Merge remote-tracking branch 'upstream/master' into gerrit-submission 66/6966/2
authorAnderson Bravalheri <a.bravalheri@bristol.ac.uk>
Wed, 28 Nov 2018 17:21:26 +0000 (17:21 +0000)
committerAnderson Bravalheri <a.bravalheri@bristol.ac.uk>
Wed, 28 Nov 2018 17:21:26 +0000 (17:21 +0000)
Sync with master branch

Change-Id: Ic26d043a84f50f48eeebffb512ccea2eedc053a4
Signed-off-by: Anderson Bravalheri <a.bravalheri@bristol.ac.uk>
54 files changed:
.gitignore-common
database_utils/migrate_mano_db.sh
database_utils/migrations/down/34_remove_wim_tables.sql [new file with mode: 0644]
database_utils/migrations/up/34_add_wim_tables.sql [new file with mode: 0644]
docker/Dockerfile-local
docker/tests.dockerfile [new file with mode: 0644]
docker/tests.yml [new file with mode: 0644]
openmano
openmanod
osm_ro/db_base.py
osm_ro/http_tools/__init__.py [new file with mode: 0644]
osm_ro/http_tools/errors.py [new file with mode: 0644]
osm_ro/http_tools/handler.py [new file with mode: 0644]
osm_ro/http_tools/request_processing.py [new file with mode: 0644]
osm_ro/http_tools/tests/__init__.py [new file with mode: 0644]
osm_ro/http_tools/tests/test_errors.py [new file with mode: 0644]
osm_ro/http_tools/tests/test_handler.py [new file with mode: 0644]
osm_ro/http_tools/tox.ini [new file with mode: 0644]
osm_ro/httpserver.py
osm_ro/nfvo.py
osm_ro/nfvo_db.py
osm_ro/openmano_schemas.py
osm_ro/openmanoclient.py
osm_ro/openmanod.cfg
osm_ro/tests/db_helpers.py [new file with mode: 0644]
osm_ro/tests/helpers.py [new file with mode: 0644]
osm_ro/utils.py
osm_ro/vim_thread.py
osm_ro/vimconn_openstack.py
osm_ro/wim/__init__.py [new file with mode: 0644]
osm_ro/wim/actions.py [new file with mode: 0644]
osm_ro/wim/engine.py [new file with mode: 0644]
osm_ro/wim/errors.py [new file with mode: 0644]
osm_ro/wim/failing_connector.py [new file with mode: 0644]
osm_ro/wim/http_handler.py [new file with mode: 0644]
osm_ro/wim/persistence.py [new file with mode: 0644]
osm_ro/wim/schemas.py [new file with mode: 0644]
osm_ro/wim/tests/__init__.py [new file with mode: 0644]
osm_ro/wim/tests/fixtures.py [new file with mode: 0644]
osm_ro/wim/tests/test_actions.py [new file with mode: 0644]
osm_ro/wim/tests/test_engine.py [new file with mode: 0644]
osm_ro/wim/tests/test_http_handler.py [new file with mode: 0644]
osm_ro/wim/tests/test_persistence.py [new file with mode: 0644]
osm_ro/wim/tests/test_wim_thread.py [new file with mode: 0644]
osm_ro/wim/tox.ini [new file with mode: 0644]
osm_ro/wim/wan_link_actions.py [new file with mode: 0644]
osm_ro/wim/wim_thread.py [new file with mode: 0644]
osm_ro/wim/wimconn.py [new file with mode: 0644]
osm_ro/wim/wimconn_odl.py [new file with mode: 0644]
scripts/install-openmano.sh
setup.py
stdeb.cfg
test/test_RO.py
test/test_openmanoclient.py

index 92edf1b..85235b2 100644 (file)
@@ -22,6 +22,9 @@
 # This is a template with common files to be igonored, after clone make a copy to .gitignore
 # cp .gitignore-common .gitignore
 
+.tox/
+.coverage
+
 *.pyc
 *.pyo
 
index ecd6b11..aa2e718 100755 (executable)
@@ -24,6 +24,7 @@
 #
 #Upgrade/Downgrade openmano database preserving the content
 #
+DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
 
 DBUSER="mano"
 DBPASS=""
@@ -33,7 +34,7 @@ DBPORT="3306"
 DBNAME="mano_db"
 QUIET_MODE=""
 #TODO update it with the last database version
-LAST_DB_VERSION=33
+LAST_DB_VERSION=34
 
 # Detect paths
 MYSQL=$(which mysql)
@@ -201,6 +202,7 @@ fi
 #[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31  #0.5.61 =>  31
 #[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32  #0.5.70 =>  32
 #[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33  #0.5.82 =>  33
+#[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34  #0.6.00 =>  34
 #TODO ... put next versions here
 
 function upgrade_to_1(){
@@ -222,8 +224,8 @@ function upgrade_to_1(){
 }
 function downgrade_from_1(){
     # echo "    downgrade database from version 0.1 to version 0.0"
-    echo "      DROP TABLE \`schema_version\`"
-    sql "DROP TABLE \`schema_version\`;"
+    echo "      DROP TABLE IF EXISTS \`schema_version\`"
+    sql "DROP TABLE IF EXISTS \`schema_version\`;"
 }
 function upgrade_to_2(){
     # echo "    upgrade database from version 0.1 to version 0.2"
@@ -304,11 +306,11 @@ function downgrade_from_2(){
     echo "      Delete columns 'user/passwd' from 'vim_tenants'"
     sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
     echo "        delete tables 'datacenter_images', 'images'"
-    sql "DROP TABLE \`datacenters_images\`;"
-    sql "DROP TABLE \`images\`;"
+    sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
+    sql "DROP TABLE IF EXISTS \`images\`;"
     echo "        delete tables 'datacenter_flavors', 'flavors'"
-    sql "DROP TABLE \`datacenters_flavors\`;"
-    sql "DROP TABLE \`flavors\`;"
+    sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
+    sql "DROP TABLE IF EXISTS \`flavors\`;"
     sql "DELETE FROM schema_version WHERE version_int='2';"
 }
 
@@ -622,7 +624,7 @@ function upgrade_to_12(){
 function downgrade_from_12(){
     # echo "    downgrade database from version 0.12 to version 0.11"
     echo "      delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
-    sql "DROP TABLE ip_profiles;"
+    sql "DROP TABLE IF EXISTS ip_profiles;"
     sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
     sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
     sql "DELETE FROM schema_version WHERE version_int='12';"
@@ -1006,8 +1008,8 @@ function downgrade_from_26(){
            "REFERENCES scenarios (uuid);"
 
     echo "      Delete table instance_actions"
-    sql "DROP TABLE vim_actions"
-    sql "DROP TABLE instance_actions"
+    sql "DROP TABLE IF EXISTS vim_actions"
+    sql "DROP TABLE IF EXISTS instance_actions"
     sql "DELETE FROM schema_version WHERE version_int='26';"
 }
 
@@ -1220,24 +1222,24 @@ function upgrade_to_28(){
 function downgrade_from_28(){
     echo "      [Undo adding the VNFFG tables]"
     echo "      Dropping instance_sfps"
-    sql "DROP TABLE instance_sfps;"
+    sql "DROP TABLE IF EXISTS instance_sfps;"
     echo "      Dropping sce_classifications"
-    sql "DROP TABLE instance_classifications;"
+    sql "DROP TABLE IF EXISTS instance_classifications;"
     echo "      Dropping instance_sfs"
-    sql "DROP TABLE instance_sfs;"
+    sql "DROP TABLE IF EXISTS instance_sfs;"
     echo "      Dropping instance_sfis"
-    sql "DROP TABLE instance_sfis;"
+    sql "DROP TABLE IF EXISTS instance_sfis;"
     echo "      Dropping sce_classifier_matches"
     echo "      [Undo adding the VNFFG-SFC instance mapping tables]"
-    sql "DROP TABLE sce_classifier_matches;"
+    sql "DROP TABLE IF EXISTS sce_classifier_matches;"
     echo "      Dropping sce_classifiers"
-    sql "DROP TABLE sce_classifiers;"
+    sql "DROP TABLE IF EXISTS sce_classifiers;"
     echo "      Dropping sce_rsp_hops"
-    sql "DROP TABLE sce_rsp_hops;"
+    sql "DROP TABLE IF EXISTS sce_rsp_hops;"
     echo "      Dropping sce_rsps"
-    sql "DROP TABLE sce_rsps;"
+    sql "DROP TABLE IF EXISTS sce_rsps;"
     echo "      Dropping sce_vnffgs"
-    sql "DROP TABLE sce_vnffgs;"
+    sql "DROP TABLE IF EXISTS sce_vnffgs;"
     echo "      [Altering vim_actions table]"
     sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
     sql "DELETE FROM schema_version WHERE version_int='28';"
@@ -1316,6 +1318,19 @@ function downgrade_from_X(){
     echo "      Change back 'datacenter_nets'"
     sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
 }
+
+function upgrade_to_34() {
+    echo "      Create databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
+    sql "source ${script}"
+}
+
+function downgrade_from_34() {
+    echo "      Drop databases required for WIM features"
+    script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
+    sql "source ${script}"
+}
+
 #TODO ... put functions here
 
 # echo "db version = "${DATABASE_VER_NUM}
diff --git a/database_utils/migrations/down/34_remove_wim_tables.sql b/database_utils/migrations/down/34_remove_wim_tables.sql
new file mode 100644 (file)
index 0000000..c6fa0b4
--- /dev/null
@@ -0,0 +1,27 @@
+--
+-- Tear down database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wim_port_mappings;
+DROP TABLE IF EXISTS wim_nfvo_tenants;
+DROP TABLE IF EXISTS instance_wim_nets;
+
+ALTER TABLE `vim_wim_actions` DROP FOREIGN KEY `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `FK_actions_wims`;
+ALTER TABLE `vim_wim_actions` DROP INDEX `item_type_id`;
+ALTER TABLE `vim_wim_actions` MODIFY `item` enum(
+  'datacenters_flavors',
+  'datacenter_images',
+  'instance_nets',
+  'instance_vms',
+  'instance_interfaces') NOT NULL
+  COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions` MODIFY `datacenter_vim_id` varchar(36) NOT NULL;
+ALTER TABLE `vim_wim_actions` DROP `wim_internal_id`, DROP `wim_account_id`;
+ALTER TABLE `vim_wim_actions` RENAME TO `vim_actions`;
+
+DROP TABLE IF EXISTS wim_accounts;
+DROP TABLE IF EXISTS wims;
+
+DELETE FROM schema_version WHERE version_int='34';
diff --git a/database_utils/migrations/up/34_add_wim_tables.sql b/database_utils/migrations/up/34_add_wim_tables.sql
new file mode 100644 (file)
index 0000000..6c6fc33
--- /dev/null
@@ -0,0 +1,165 @@
+--
+-- Setup database structure required for integrating OSM with
+-- Wide Are Network Infrastructure Managers
+--
+
+DROP TABLE IF EXISTS wims;
+CREATE TABLE wims (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) NOT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `type` varchar(36) NOT NULL DEFAULT 'odl',
+  `wim_url` varchar(150) NOT NULL,
+  `config` varchar(4000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `name` (`name`)
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIMs managed by the NFVO.';
+
+DROP TABLE IF EXISTS wim_accounts;
+CREATE TABLE wim_accounts (
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) DEFAULT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `created` enum('true','false') NOT NULL DEFAULT 'false',
+  `user` varchar(64) DEFAULT NULL,
+  `password` varchar(64) DEFAULT NULL,
+  `config` varchar(4000) DEFAULT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  UNIQUE KEY `wim_name` (`wim_id`,`name`),
+  KEY `FK_wim_accounts_wims` (`wim_id`),
+  CONSTRAINT `FK_wim_accounts_wims` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts by the user';
+
+DROP TABLE IF EXISTS `wim_nfvo_tenants`;
+CREATE TABLE `wim_nfvo_tenants` (
+  `id` integer NOT NULL AUTO_INCREMENT,
+  `nfvo_tenant_id` varchar(36) NOT NULL,
+  `wim_id` varchar(36) NOT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `wim_nfvo_tenant` (`wim_id`,`nfvo_tenant_id`),
+  KEY `FK_wims_nfvo_tenants` (`wim_id`),
+  KEY `FK_wim_accounts_nfvo_tenants` (`wim_account_id`),
+  KEY `FK_nfvo_tenants_wim_accounts` (`nfvo_tenant_id`),
+  CONSTRAINT `FK_wims_nfvo_tenants` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_wim_accounts_nfvo_tenants` FOREIGN KEY (`wim_account_id`)
+    REFERENCES `wim_accounts` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_nfvo_tenants_wim_accounts` FOREIGN KEY (`nfvo_tenant_id`)
+    REFERENCES `nfvo_tenants` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB AUTO_INCREMENT=86 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+COMMENT='WIM accounts mapping to NFVO tenants';
+
+DROP TABLE IF EXISTS `instance_wim_nets`;
+CREATE TABLE `instance_wim_nets` (
+  `uuid` varchar(36) NOT NULL,
+  `wim_internal_id` varchar(128) DEFAULT NULL
+    COMMENT 'Internal ID used by the WIM to refer to the network',
+  `instance_scenario_id` varchar(36) DEFAULT NULL,
+  `sce_net_id` varchar(36) DEFAULT NULL,
+  `wim_id` varchar(36) DEFAULT NULL,
+  `wim_account_id` varchar(36) NOT NULL,
+  `status` enum(
+    'ACTIVE',
+    'INACTIVE',
+    'DOWN',
+    'BUILD',
+    'ERROR',
+    'WIM_ERROR',
+    'DELETED',
+    'SCHEDULED_CREATION',
+    'SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+  `error_msg` varchar(1024) DEFAULT NULL,
+  `wim_info` text,
+  `multipoint` enum('true','false') NOT NULL DEFAULT 'false',
+  `created` enum('true','false') NOT NULL DEFAULT 'false'
+      COMMENT 'Created or already exists at WIM',
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`uuid`),
+  KEY `FK_instance_wim_nets_instance_scenarios` (`instance_scenario_id`),
+  KEY `FK_instance_wim_nets_sce_nets` (`sce_net_id`),
+  KEY `FK_instance_wim_nets_wims` (`wim_id`),
+  KEY `FK_instance_wim_nets_wim_accounts` (`wim_account_id`),
+  CONSTRAINT `FK_instance_wim_nets_wim_accounts`
+    FOREIGN KEY (`wim_account_id`) REFERENCES `wim_accounts` (`uuid`),
+  CONSTRAINT `FK_instance_wim_nets_wims`
+    FOREIGN KEY (`wim_id`) REFERENCES `wims` (`uuid`),
+  CONSTRAINT `FK_instance_wim_nets_instance_scenarios`
+    FOREIGN KEY (`instance_scenario_id`) REFERENCES `instance_scenarios` (`uuid`)
+    ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_instance_wim_nets_sce_nets`
+    FOREIGN KEY (`sce_net_id`) REFERENCES `sce_nets` (`uuid`)
+    ON DELETE SET NULL ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
+  COMMENT='Instances of wim networks';
+
+ALTER TABLE `vim_actions`
+  RENAME TO `vim_wim_actions`;
+ALTER TABLE `vim_wim_actions`
+  ADD `wim_account_id` varchar(36) DEFAULT NULL AFTER `vim_id`,
+  ADD `wim_internal_id` varchar(64) DEFAULT NULL AFTER `wim_account_id`,
+  MODIFY `datacenter_vim_id` varchar(36) DEFAULT NULL,
+  MODIFY `item` enum(
+    'datacenters_flavors',
+    'datacenter_images',
+    'instance_nets',
+    'instance_vms',
+    'instance_interfaces',
+    'instance_wim_nets') NOT NULL
+  COMMENT 'table where the item is stored';
+ALTER TABLE `vim_wim_actions`
+  ADD INDEX `item_type_id` (`item`, `item_id`);
+ALTER TABLE `vim_wim_actions`
+  ADD INDEX `FK_actions_wims` (`wim_account_id`);
+ALTER TABLE `vim_wim_actions`
+  ADD CONSTRAINT `FK_actions_wims` FOREIGN KEY (`wim_account_id`)
+  REFERENCES `wim_accounts` (`uuid`)
+  ON UPDATE CASCADE ON DELETE CASCADE;
+
+DROP TABLE IF EXISTS `wim_port_mappings`;
+CREATE TABLE `wim_port_mappings` (
+  `id` integer NOT NULL AUTO_INCREMENT,
+  `wim_id` varchar(36) NOT NULL,
+  `datacenter_id` varchar(36) NOT NULL,
+  `pop_switch_dpid` varchar(64) NOT NULL,
+  `pop_switch_port` varchar(64) NOT NULL,
+  `wan_service_endpoint_id` varchar(256) NOT NULL
+      COMMENT 'In case the WIM plugin relies on the wan_service_mapping_info'
+      COMMENT 'this field contains a unique identifier used to check the mapping_info consistency',
+      /* In other words: wan_service_endpoint_id = f(wan_service_mapping_info)
+       * where f is a injective function'
+       */
+  `wan_service_mapping_info` text,
+  `created_at` double NOT NULL,
+  `modified_at` double DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `unique_datacenter_port_mapping`
+    (`datacenter_id`, `pop_switch_dpid`, `pop_switch_port`),
+  UNIQUE KEY `unique_wim_port_mapping`
+    (`wim_id`, `wan_service_endpoint_id`),
+  KEY `FK_wims_wim_physical_connections` (`wim_id`),
+  KEY `FK_datacenters_wim_port_mappings` (`datacenter_id`),
+  CONSTRAINT `FK_wims_wim_port_mappings` FOREIGN KEY (`wim_id`)
+    REFERENCES `wims` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE,
+  CONSTRAINT `FK_datacenters_wim_port_mappings` FOREIGN KEY (`datacenter_id`)
+    REFERENCES `datacenters` (`uuid`) ON DELETE CASCADE ON UPDATE CASCADE
+)
+ENGINE=InnoDB DEFAULT CHARSET=utf8
+COMMENT='WIM port mappings managed by the WIM.';
+
+-- Update Schema with DB version
+INSERT INTO schema_version
+VALUES (34, '0.34', '0.6.00', 'Added WIM tables', '2018-09-10');
index 5cc96bb..e7e05ce 100644 (file)
@@ -13,6 +13,7 @@ RUN apt-get update && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-novaclient python-keystoneclient python-glanceclient python-cinderclient python-neutronclient && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
+    DEBIAN_FRONTEND=noninteractive apt-get -y install python-networkx && \
     DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
     DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client
diff --git a/docker/tests.dockerfile b/docker/tests.dockerfile
new file mode 100644 (file)
index 0000000..fd5a45c
--- /dev/null
@@ -0,0 +1,13 @@
+from ubuntu:xenial
+
+VOLUME /opt/openmano
+VOLUME /var/log/osm
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+
+RUN apt-get update && \
+    apt-get -y install python python-pip mysql-client libmysqlclient-dev && \
+    pip install tox
+
+ENTRYPOINT ["tox"]
diff --git a/docker/tests.yml b/docker/tests.yml
new file mode 100644 (file)
index 0000000..a33f0d4
--- /dev/null
@@ -0,0 +1,37 @@
+# This file is intended to be used by the developer in the local machine
+# in order to run the tests in isolation
+# To do so, cd into osm_ro and run:
+# docker-compose -f ../docker/tests.yml run --rm tox -c <folder to be tested, eg. wim>
+version: '2'
+services:
+  test-db:
+    image: mysql:5
+    container_name: test-db
+    restart: always
+    environment:
+      - MYSQL_ROOT_PASSWORD=osm4u
+      - MYSQL_USER=mano
+      - MYSQL_PASSWORD=manopw
+      - MYSQL_DATABASE=mano_db
+  tox:
+    container_name: tox
+    depends_on:
+      - test-db
+    build:
+      context: ../
+      dockerfile: docker/tests.dockerfile
+    restart: always
+    environment:
+      - RO_DB_ROOT_PASSWORD=osm4u
+      - TEST_DB_HOST=test-db
+      - TEST_DB_USER=mano
+      - TEST_DB_PASSWORD=manopw
+      - TEST_DB_DATABASE=mano_db
+    ports:
+      - "9090:9090"
+    volumes:
+      - ..:/opt/openmano
+      - /tmp/osm/openmano/logs:/var/log/osm
+    entrypoint:
+      - tox
+    working_dir: /opt/openmano/osm_ro
index 357b91a..1577656 100755 (executable)
--- a/openmano
+++ b/openmano
@@ -24,7 +24,7 @@
 ##
 
 """
-openmano client used to interact with openmano-server (openmanod) 
+openmano client used to interact with openmano-server (openmanod)
 """
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$09-oct-2014 09:09:48$"
@@ -65,6 +65,10 @@ def config(args):
         mano_tenant_name = "None"
         mano_datacenter_id = "None"
         mano_datacenter_name = "None"
+        # WIM additions
+        logger.debug("resolving WIM names")
+        mano_wim_id = "None"
+        mano_wim_name = "None"
         try:
             mano_tenant_id = _get_item_uuid("tenants", mano_tenant)
             URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, mano_tenant_id)
@@ -79,17 +83,35 @@ def config(args):
             if "error" not in content:
                 mano_datacenter_id = content["datacenter"]["uuid"]
                 mano_datacenter_name = content["datacenter"]["name"]
+
+            # WIM
+            URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (
+            mano_host, mano_port, mano_tenant_id, mano_wim)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text)
+            content = mano_response.json()
+            if "error" not in content:
+                mano_wim_id = content["wim"]["uuid"]
+                mano_wim_name = content["wim"]["name"]
+
         except OpenmanoCLIError:
             pass
         print "OPENMANO_TENANT: %s" %mano_tenant
         print "    Id: %s" %mano_tenant_id
-        print "    Name: %s" %mano_tenant_name 
+        print "    Name: %s" %mano_tenant_name
         print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
         print "    Id: %s" %mano_datacenter_id
-        print "    Name: %s" %mano_datacenter_name 
+        print "    Name: %s" %mano_datacenter_name
+        # WIM
+        print "OPENMANO_WIM: %s" %str (mano_wim)
+        print "    Id: %s" %mano_wim_id
+        print "    Name: %s" %mano_wim_name
+
     else:
         print "OPENMANO_TENANT: %s" %mano_tenant
         print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
+        # WIM
+        print "OPENMANO_WIM: %s" %str (mano_wim)
 
 def _print_verbose(mano_response, verbose_level=0):
     content = mano_response.json()
@@ -98,7 +120,7 @@ def _print_verbose(mano_response, verbose_level=0):
         #print "Non expected format output"
         print str(content)
         return result
-    
+
     val=content.values()[0]
     if type(val)==str:
         print val
@@ -111,7 +133,7 @@ def _print_verbose(mano_response, verbose_level=0):
         #print "Non expected dict/list format output"
         print str(content)
         return result
-    
+
     #print content_list
     if verbose_level==None:
         verbose_level=0
@@ -163,7 +185,7 @@ def parser_json_yaml(file_name):
         f.close()
     except Exception as e:
         return (False, str(e))
-           
+
     #Read and parse file
     if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text):
         try:
@@ -176,7 +198,7 @@ def parser_json_yaml(file_name):
             return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos)
     else: #json
         try:
-            config = json.loads(text) 
+            config = json.loads(text)
         except Exception as e:
             return (False, "Error loading file '"+file_name+"' json format error " + str(e) )
 
@@ -234,7 +256,7 @@ def _get_item_uuid(item, item_name_id, tenant=None):
     elif found > 1:
         raise OpenmanoCLIError("%d %s found with name '%s'. uuid must be used" %(found, item, item_name_id))
     return uuid
-# 
+#
 # def check_valid_uuid(uuid):
 #     id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
 #     try:
@@ -242,7 +264,7 @@ def _get_item_uuid(item, item_name_id, tenant=None):
 #         return True
 #     except js_e.ValidationError:
 #         return False
-    
+
 def _get_tenant(tenant_name_id = None):
     if not tenant_name_id:
         tenant_name_id = mano_tenant
@@ -257,6 +279,14 @@ def _get_datacenter(datacenter_name_id = None, tenant = "any"):
             raise OpenmanoCLIError("neither 'OPENMANO_DATACENTER' environment variable is set nor --datacenter option is used")
     return _get_item_uuid("datacenters", datacenter_name_id, tenant)
 
+# WIM
+def _get_wim(wim_name_id = None, tenant = "any"):
+    if not wim_name_id:
+        wim_name_id = mano_wim
+        if not wim_name_id:
+            raise OpenmanoCLIError("neither 'OPENMANO_WIM' environment variable is set nor --wim option is used")
+    return _get_item_uuid("wims", wim_name_id, tenant)
+
 def vnf_create(args):
     #print "vnf-create",args
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
@@ -357,11 +387,11 @@ def vnf_create(args):
             elif str(e) == 'image checksum':  error_pos= "missing field 'vnf':'VNFC'['image checksum']"
             else:                       error_pos="wrong format"
             print "Wrong VNF descriptor: " + error_pos
-            return -1 
+            return -1
     payload_req = json.dumps(myvnf)
-        
+
     #print payload_req
-        
+
     URLrequest = "http://{}:{}/openmano{}/{}/{token}".format(mano_host, mano_port, api_version, tenant, token=token)
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
@@ -486,7 +516,7 @@ def scenario_create(args):
         nsd['description'] = args.description
     payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False,
                                  encoding='utf-8', allow_unicode=True)
-    
+
     # print payload_req
     URLrequest = "http://{host}:{port}/openmano{api}/{tenant}/{token}".format(
         host=mano_host, port=mano_port, api=api_version, tenant=tenant, token=token)
@@ -607,26 +637,26 @@ def scenario_deploy(args):
 #         action[actionCmd]["datacenter"] = args.datacenter
 #     elif mano_datacenter != None:
 #         action[actionCmd]["datacenter"] = mano_datacenter
-#         
+#
 #     if args.description:
 #         action[actionCmd]["description"] = args.description
 #     payload_req = json.dumps(action, indent=4)
 #     #print payload_req
-# 
+#
 #     URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario)
 #     logger.debug("openmano request: %s", payload_req)
 #     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
 #     logger.debug("openmano response: %s", mano_response.text )
 #     if args.verbose==None:
 #         args.verbose=0
-#     
+#
 #     result = 0 if mano_response.status_code==200 else mano_response.status_code
 #     content = mano_response.json()
 #     #print json.dumps(content, indent=4)
 #     if args.verbose >= 3:
 #         print yaml.safe_dump(content, indent=4, default_flow_style=False)
 #         return result
-# 
+#
 #     if mano_response.status_code == 200:
 #         myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20))
 #         if args.verbose >=1:
@@ -655,7 +685,7 @@ def scenario_verify(args):
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
     logger.debug("openmano response: %s", mano_response.text )
-    
+
     result = 0 if mano_response.status_code==200 else mano_response.status_code
     content = mano_response.json()
     #print json.dumps(content, indent=4)
@@ -708,7 +738,7 @@ def instance_create(args):
                 net_scenario   = net_tuple[0].strip()
                 net_datacenter = net_tuple[1].strip()
                 if net_scenario not in myInstance["instance"]["networks"]:
-                    myInstance["instance"]["networks"][net_scenario] = {} 
+                    myInstance["instance"]["networks"][net_scenario] = {}
                 if "sites" not in myInstance["instance"]["networks"][net_scenario]:
                     myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
                 myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-use"] = net_datacenter
@@ -729,7 +759,7 @@ def instance_create(args):
                     print "error at netmap-create. Expected net-scenario=net-datacenter or net-scenario. (%s)?" % net_comma
                     return
                 if net_scenario not in myInstance["instance"]["networks"]:
-                    myInstance["instance"]["networks"][net_scenario] = {} 
+                    myInstance["instance"]["networks"][net_scenario] = {}
                 if "sites" not in myInstance["instance"]["networks"][net_scenario]:
                     myInstance["instance"]["networks"][net_scenario]["sites"] = [ {} ]
                 myInstance["instance"]["networks"][net_scenario]["sites"][0]["netmap-create"] = net_datacenter
@@ -766,7 +796,7 @@ def instance_create(args):
         except Exception as e:
             print "Cannot obtain any public ssh key. Error '{}'. Try not using --keymap-auto".format(str(e))
             return 1
-        
+
         if "cloud-config" not in myInstance["instance"]:
             myInstance["instance"]["cloud-config"] = {}
         cloud_config = myInstance["instance"]["cloud-config"]
@@ -775,8 +805,8 @@ def instance_create(args):
         if user:
             if "users" not in cloud_config:
                 cloud_config["users"] = []
-            cloud_config["users"].append({"name": user, "key-pairs": keys })                    
-                        
+            cloud_config["users"].append({"name": user, "key-pairs": keys })
+
     payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
     logger.debug("openmano request: %s", payload_req)
     URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, tenant)
@@ -784,7 +814,7 @@ def instance_create(args):
     logger.debug("openmano response: %s", mano_response.text )
     if args.verbose==None:
         args.verbose=0
-    
+
     result = 0 if mano_response.status_code==200 else mano_response.status_code
     content = mano_response.json()
     #print json.dumps(content, indent=4)
@@ -933,7 +963,7 @@ def instance_scenario_action(args):
         action["vnfs"] = args.vnf
     if args.vm:
         action["vms"] = args.vm
-    
+
     headers_req = {'content-type': 'application/json'}
     payload_req = json.dumps(action, indent=4)
     URLrequest = "http://%s:%s/openmano/%s/instances/%s/action" %(mano_host, mano_port, tenant, toact)
@@ -969,11 +999,11 @@ def tenant_create(args):
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
     tenant_dict={"name": args.name}
     if args.description!=None:
-        tenant_dict["description"] = args.description 
+        tenant_dict["description"] = args.description
     payload_req = json.dumps( {"tenant": tenant_dict })
-    
+
     #print payload_req
-        
+
     URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port)
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
@@ -1018,7 +1048,7 @@ def datacenter_attach(args):
     tenant = _get_tenant()
     datacenter = _get_datacenter(args.name)
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
-    
+
     datacenter_dict={}
     if args.vim_tenant_id != None:
         datacenter_dict['vim_tenant'] = args.vim_tenant_id
@@ -1034,7 +1064,7 @@ def datacenter_attach(args):
     payload_req = json.dumps( {"datacenter": datacenter_dict })
 
     #print payload_req
-        
+
     URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, datacenter)
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
@@ -1103,11 +1133,11 @@ def datacenter_create(args):
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
     datacenter_dict={"name": args.name, "vim_url": args.url}
     if args.description!=None:
-        datacenter_dict["description"] = args.description 
+        datacenter_dict["description"] = args.description
     if args.type!=None:
-        datacenter_dict["type"] = args.type 
+        datacenter_dict["type"] = args.type
     if args.url!=None:
-        datacenter_dict["vim_url_admin"] = args.url_admin 
+        datacenter_dict["vim_url_admin"] = args.url_admin
     if args.config!=None:
         datacenter_dict["config"] = _load_file_or_yaml(args.config)
     if args.sdn_controller!=None:
@@ -1117,9 +1147,9 @@ def datacenter_create(args):
             datacenter_dict['config'] = {}
         datacenter_dict['config']['sdn-controller'] = sdn_controller
     payload_req = json.dumps( {"datacenter": datacenter_dict })
-    
+
     #print payload_req
-        
+
     URLrequest = "http://%s:%s/openmano/datacenters" %(mano_host, mano_port)
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
@@ -1149,9 +1179,9 @@ def datacenter_delete(args):
 def datacenter_list(args):
     #print "datacenter-list",args
     tenant='any' if args.all else _get_tenant()
-    
+
     if args.name:
-        toshow = _get_item_uuid("datacenters", args.name, tenant) 
+        toshow = _get_item_uuid("datacenters", args.name, tenant)
         URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, toshow)
     else:
         URLrequest = "http://%s:%s/openmano/%s/datacenters" %(mano_host, mano_port, tenant)
@@ -1502,7 +1532,7 @@ def datacenter_net_action(args):
     elif args.action == "net-delete":
         args.netmap = args.net
         args.all = False
-          
+
     args.action = "netmap" + args.action[3:]
     args.vim_name=None
     args.vim_id=None
@@ -1519,13 +1549,13 @@ def datacenter_netmap_action(args):
         args.verbose=0
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
     URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/netmaps" %(mano_host, mano_port, tenant, datacenter)
-        
+
     if args.action=="netmap-list":
         if args.netmap:
             URLrequest += "/" + args.netmap
             args.verbose += 1
         mano_response = requests.get(URLrequest)
-            
+
     elif args.action=="netmap-delete":
         if args.netmap and args.all:
             print "you can not use a netmap name and the option --all at the same time"
@@ -1533,7 +1563,7 @@ def datacenter_netmap_action(args):
         if args.netmap:
             force_text= "Delete default netmap '%s' from datacenter '%s' (y/N)? " % (args.netmap, datacenter)
             URLrequest += "/" + args.netmap
-        elif args.all: 
+        elif args.all:
             force_text="Delete all default netmaps from datacenter '%s' (y/N)? " % (datacenter)
         else:
             print "you must specify a netmap name or the option --all"
@@ -1569,7 +1599,7 @@ def datacenter_netmap_action(args):
             payload["netmap"]["vim_name"] = args.vim_name
         payload_req = json.dumps(payload)
         logger.debug("openmano request: %s", payload_req)
-        
+
         if args.action=="netmap-edit" and not args.force:
             if len(payload["netmap"]) == 0:
                 print "You must supply some parameter to edit"
@@ -1599,7 +1629,7 @@ def element_edit(args):
     if args.element[:-1] not in payload:
         payload = {args.element[:-1]: payload }
     payload_req = json.dumps(payload)
-    
+
     #print payload_req
     if not args.force or (args.name==None and args.filer==None):
         r = raw_input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ")
@@ -1665,6 +1695,260 @@ def datacenter_edit(args):
     return _print_verbose(mano_response, args.verbose)
 
 
+# WIM
+def wim_account_create(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    wim_dict = {}
+    if args.account_name is not None:
+        wim_dict['name'] = args.account_name
+    if args.user is not None:
+        wim_dict['user'] = args.user
+    if args.password is not None:
+        wim_dict['password'] = args.password
+    if args.config is not None:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim_account": wim_dict})
+
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    # provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database wim_tenants table" in content['error']['description']:
+            print "Try to specify a different name with --wim-tenant-name"
+    return result
+
+
+def wim_account_delete(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
+    mano_response = requests.delete(URLrequest, headers=headers_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    content = mano_response.json()
+    # print json.dumps(content, indent=4)
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+
+def wim_account_edit(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    wim_dict = {}
+    if not args.account_name:
+        wim_dict['name'] = args.vim_tenant_name
+    if not args.user:
+        wim_dict['user'] = args.user
+    if not args.password:
+        wim_dict['password'] = args.password
+    if not args.config:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim_account": wim_dict})
+
+    # print payload_req
+
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+    # provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database wim_tenants table" in content['error']['description']:
+            print "Try to specify a different name with --wim-tenant-name"
+    return result
+
+def wim_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    wim_dict = {"name": args.name, "wim_url": args.url}
+    if args.description != None:
+        wim_dict["description"] = args.description
+    if args.type != None:
+        wim_dict["type"] = args.type
+    if args.config != None:
+        wim_dict["config"] = _load_file_or_yaml(args.config)
+
+    payload_req = json.dumps({"wim": wim_dict})
+
+    URLrequest = "http://%s:%s/openmano/wims" % (mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_edit(args):
+    tenant = _get_tenant()
+    element = _get_item_uuid('wims', args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/wims/%s" % (mano_host, mano_port, element)
+
+    has_arguments = False
+    if args.file != None:
+        has_arguments = True
+        payload = _load_file_or_yaml(args.file)
+    else:
+        payload = {}
+
+    if not has_arguments:
+        raise OpenmanoCLIError("At least one argument must be provided to modify the wim")
+
+    if 'wim' not in payload:
+        payload = {'wim': payload}
+    payload_req = json.dumps(payload)
+
+    # print payload_req
+    if not args.force or (args.name == None and args.filer == None):
+        r = raw_input(" Edit wim " + args.name + " (y/N)? ")
+        if len(r) > 0 and r[0].lower() == "y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_delete(args):
+    # print "wim-delete",args
+    todelete = _get_item_uuid("wims", args.name, "any")
+    if not args.force:
+        r = raw_input("Delete wim %s (y/N)? " % (args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+    URLrequest = "http://%s:%s/openmano/wims/%s" % (mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    content = mano_response.json()
+    # print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+
+def wim_list(args):
+    # print "wim-list",args
+    tenant = 'any' if args.all else _get_tenant()
+
+    if args.name:
+        toshow = _get_item_uuid("wims", args.name, tenant)
+        URLrequest = "http://%s:%s/openmano/%s/wims/%s" % (mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/wims" % (mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+def wim_port_mapping_set(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not args.file:
+        raise OpenmanoCLIError(
+            "No yaml/json has been provided specifying the WIM port mapping")
+    wim_port_mapping = _load_file_or_yaml(args.file)
+
+    payload_req = json.dumps({"wim_port_mapping": wim_port_mapping})
+
+    # read
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    port_mapping = mano_response.json()
+
+    if mano_response.status_code != 200:
+        str(mano_response.json())
+        raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
+    # TODO: check this if statement
+    if len(port_mapping["wim_port_mapping"]) > 0:
+        if not args.force:
+            r = raw_input("WIM %s already contains a port mapping. Overwrite? (y/N)? " % (wim))
+            if not (len(r) > 0 and r[0].lower() == "y"):
+                return 0
+
+        # clear
+        URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text)
+        if mano_response.status_code != 200:
+            return _print_verbose(mano_response, args.verbose)
+
+    # set
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_list(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    return _print_verbose(mano_response, 4)
+
+
+def wim_port_mapping_clear(args):
+    tenant = _get_tenant()
+    wim = _get_wim(args.name, tenant)
+
+    if not args.force:
+        r = raw_input("Clear WIM port mapping for wim %s (y/N)? " % (wim))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    URLrequest = "http://%s:%s/openmano/%s/wims/%s/port_mapping" % (mano_host, mano_port, tenant, wim)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    content = mano_response.json()
+    # print json.dumps(content, indent=4)
+    result = 0 if mano_response.status_code == 200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+
 def version(args):
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
     URLrequest = "http://%s:%s/openmano/version" % (mano_host, mano_port)
@@ -1679,19 +1963,21 @@ global mano_port
 global mano_tenant
 
 if __name__=="__main__":
-    
+
     mano_tenant = os.getenv('OPENMANO_TENANT', None)
     mano_host = os.getenv('OPENMANO_HOST',"localhost")
     mano_port = os.getenv('OPENMANO_PORT',"9090")
     mano_datacenter = os.getenv('OPENMANO_DATACENTER',None)
-    
+    # WIM env variable for default WIM
+    mano_wim = os.getenv('OPENMANO_WIM', None)
+
     main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)')
     main_parser.add_argument('--version', action='version', help="get version of this client",
                             version='%(prog)s client version ' + __version__ +
                                     " (Note: use '%(prog)s version' to get server version)")
 
     subparsers = main_parser.add_subparsers(help='commands')
-    
+
     parent_parser = argparse.ArgumentParser(add_help=False)
     parent_parser.add_argument('--verbose', '-v', action='count', help="increase verbosity level. Use several times")
     parent_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
@@ -1717,13 +2003,13 @@ if __name__=="__main__":
     vnf_list_parser.add_argument("-a", "--all", action="store_true", help="shows all vnfs, not only the owned or public ones")
     #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true")
     vnf_list_parser.set_defaults(func=vnf_list)
-    
+
     vnf_delete_parser = subparsers.add_parser('vnf-delete', parents=[parent_parser], help="deletes a vnf from the catalogue")
     vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted")
     vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
     vnf_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
     vnf_delete_parser.set_defaults(func=vnf_delete)
-    
+
     scenario_create_parser = subparsers.add_parser('scenario-create', parents=[parent_parser], help="adds a scenario into the OPENMANO DB")
     scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario").completer = FilesCompleter
     scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)")
@@ -1735,7 +2021,7 @@ if __name__=="__main__":
     #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true")
     scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all scenarios, not only the owned or public ones")
     scenario_list_parser.set_defaults(func=scenario_list)
-    
+
     scenario_delete_parser = subparsers.add_parser('scenario-delete', parents=[parent_parser], help="deletes a scenario from the OPENMANO DB")
     scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted")
     scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
@@ -1749,12 +2035,12 @@ if __name__=="__main__":
     scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
     scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance")
     scenario_deploy_parser.set_defaults(func=scenario_deploy)
-    
+
     scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)")
     scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified")
     scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
     scenario_deploy_parser.set_defaults(func=scenario_verify)
-    
+
     instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', parents=[parent_parser], help="deploys a scenario")
     instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text")
     instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed")
@@ -1778,7 +2064,7 @@ if __name__=="__main__":
     instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
     instance_scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
     instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete)
-    
+
     instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', parents=[parent_parser], help="invoke an action over part or the whole scenario instance")
     instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
     instance_scenario_action_parser.add_argument("action", action="store", type=str, \
@@ -1798,7 +2084,7 @@ if __name__=="__main__":
     #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance")
     #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
     #instance_scenario_status_parser.set_defaults(func=instance_scenario_status)
-    
+
     tenant_create_parser = subparsers.add_parser('tenant-create', parents=[parent_parser], help="creates a new tenant")
     tenant_create_parser.add_argument("name", action="store", help="name for the tenant")
     tenant_create_parser.add_argument("--description", action="store", help="description of the tenant")
@@ -1958,6 +2244,128 @@ if __name__=="__main__":
     sdn_controller_delete_parser.set_defaults(func=sdn_controller_delete)
     # =======================
 
+    # WIM ======================= WIM section==================
+
+    # WIM create
+    wim_create_parser = subparsers.add_parser('wim-create',
+                                              parents=[parent_parser], help="creates a new wim")
+    wim_create_parser.add_argument("name", action="store",
+                                   help="name for the wim")
+    wim_create_parser.add_argument("url", action="store",
+                                   help="url for the wim")
+    wim_create_parser.add_argument("--type", action="store",
+                                   help="wim type: tapi, onos or odl (default)")
+    wim_create_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_create_parser.add_argument("--description", action="store",
+                                   help="description of the wim")
+    wim_create_parser.set_defaults(func=wim_create)
+
+    # WIM delete
+    wim_delete_parser = subparsers.add_parser('wim-delete',
+                                              parents=[parent_parser], help="deletes a wim from the catalogue")
+    wim_delete_parser.add_argument("name", action="store",
+                                   help="name or uuid of the wim to be deleted")
+    wim_delete_parser.add_argument("-f", "--force", action="store_true",
+                                   help="forces deletion without asking")
+    wim_delete_parser.set_defaults(func=wim_delete)
+
+    # WIM edit
+    wim_edit_parser = subparsers.add_parser('wim-edit',
+                                            parents=[parent_parser], help="edits a wim")
+    wim_edit_parser.add_argument("name", help="name or uuid of the wim")
+    wim_edit_parser.add_argument("--file",
+                                 help="json/yaml text or file with the changes")\
+                                .completer = FilesCompleter
+    wim_edit_parser.add_argument("-f", "--force", action="store_true",
+                                 help="do not prompt for confirmation")
+    wim_edit_parser.set_defaults(func=wim_edit)
+
+    # WIM list
+    wim_list_parser = subparsers.add_parser('wim-list',
+                                            parents=[parent_parser],
+                                            help="lists information about registered wims")
+    wim_list_parser.add_argument("name", nargs='?',
+                                 help="name or uuid of the wim")
+    wim_list_parser.add_argument("-a", "--all", action="store_true",
+                                 help="shows all wims, not only wims attached to tenant")
+    wim_list_parser.set_defaults(func=wim_list)
+
+    # WIM account create
+    wim_attach_parser = subparsers.add_parser('wim-account-create', parents=
+    [parent_parser], help="associates a wim account to the operating tenant")
+    wim_attach_parser.add_argument("name", help="name or uuid of the wim")
+    wim_attach_parser.add_argument('--account-name', action='store',
+                                   help="specify a name for the wim account.")
+    wim_attach_parser.add_argument("--user", action="store",
+                                   help="user credentials for the wim account")
+    wim_attach_parser.add_argument("--password", action="store",
+                                   help="password credentials for the wim account")
+    wim_attach_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_attach_parser.set_defaults(func=wim_account_create)
+
+    # WIM account delete
+    wim_detach_parser = subparsers.add_parser('wim-account-delete',
+                                        parents=[parent_parser],
+                                        help="removes the association "
+                                                "between a wim account and the operating tenant")
+    wim_detach_parser.add_argument("name", help="name or uuid of the wim")
+    wim_detach_parser.add_argument("-a", "--all", action="store_true",
+                                   help="removes all associations from this wim")
+    wim_detach_parser.add_argument("-f", "--force", action="store_true",
+                                   help="forces delete without asking")
+    wim_detach_parser.set_defaults(func=wim_account_delete)
+
+    # WIM account edit
+    wim_attach_edit_parser = subparsers.add_parser('wim-account-edit', parents=
+    [parent_parser], help="modifies the association of a wim account to the operating tenant")
+    wim_attach_edit_parser.add_argument("name", help="name or uuid of the wim")
+    wim_attach_edit_parser.add_argument('--account-name', action='store',
+                                   help="specify a name for the wim account.")
+    wim_attach_edit_parser.add_argument("--user", action="store",
+                                   help="user credentials for the wim account")
+    wim_attach_edit_parser.add_argument("--password", action="store",
+                                   help="password credentials for the wim account")
+    wim_attach_edit_parser.add_argument("--config", action="store",
+                                   help="additional configuration in json/yaml format")
+    wim_attach_edit_parser.set_defaults(func=wim_account_edit)
+
+    # WIM port mapping set
+    wim_port_mapping_set_parser = subparsers.add_parser('wim-port-mapping-set',
+                                                        parents=[parent_parser],
+                                                        help="Load a file with the mappings "
+                                                                "of ports of a WAN switch that is "
+                                                                "connected to a PoP and the ports "
+                                                                "of the switch controlled by the PoP")
+    wim_port_mapping_set_parser.add_argument("name", action="store",
+                                             help="specifies the wim")
+    wim_port_mapping_set_parser.add_argument("file",
+                                             help="json/yaml text or file with the wim port mapping")\
+        .completer = FilesCompleter
+    wim_port_mapping_set_parser.add_argument("-f", "--force",
+                                             action="store_true", help="forces overwriting without asking")
+    wim_port_mapping_set_parser.set_defaults(func=wim_port_mapping_set)
+
+    # WIM port mapping list
+    wim_port_mapping_list_parser = subparsers.add_parser('wim-port-mapping-list',
+            parents=[parent_parser], help="Show the port mappings for a wim")
+    wim_port_mapping_list_parser.add_argument("name", action="store",
+                                              help="specifies the wim")
+    wim_port_mapping_list_parser.set_defaults(func=wim_port_mapping_list)
+
+    # WIM port mapping clear
+    wim_port_mapping_clear_parser = subparsers.add_parser('wim-port-mapping-clear',
+            parents=[parent_parser], help="Clean the port mapping in a wim")
+    wim_port_mapping_clear_parser.add_argument("name", action="store",
+                                               help="specifies the wim")
+    wim_port_mapping_clear_parser.add_argument("-f", "--force",
+                                               action="store_true",
+                                               help="forces clearing without asking")
+    wim_port_mapping_clear_parser.set_defaults(func=wim_port_mapping_clear)
+
+    # =======================================================
+
     action_dict={'net-update': 'retrieves external networks from datacenter',
                  'net-edit': 'edits an external network',
                  'net-delete': 'deletes an external network',
@@ -2062,7 +2470,7 @@ if __name__=="__main__":
             vim_item_create_parser.set_defaults(func=vim_action, item=item, action="create")
 
     argcomplete.autocomplete(main_parser)
-    
+
     try:
         args = main_parser.parse_args()
         #logging info
@@ -2088,7 +2496,7 @@ if __name__=="__main__":
     except OpenmanoCLIError as e:
         print str(e)
         result = -5
-    
+
     #print result
     exit(result)
 
index c04d66a..5e9cc92 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -27,7 +27,7 @@ openmano server.
 Main program that implements a reference NFVO (Network Functions Virtualisation Orchestrator).
 It interfaces with an NFV VIM through its API and offers a northbound interface, based on REST (openmano API),
 where NFV services are offered including the creation and deletion of VNF templates, VNF instances,
-network service templates and network service instances. 
+network service templates and network service instances.
 
 It loads the configuration file and launches the http_server thread that will listen requests using openmano API.
 """
@@ -44,14 +44,15 @@ import socket
 from osm_ro import httpserver, nfvo, nfvo_db
 from osm_ro.openmano_schemas import config_schema
 from osm_ro.db_base import db_base_Exception
+from osm_ro.wim.engine import WimEngine
+from osm_ro.wim.persistence import WimPersistence
 import osm_ro
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.5.84-r594"
+__version__ = "0.6.00"
 version_date = "Nov 2018"
-database_version = 33      # expected database schema version
-
+database_version = 34      # expected database schema version
 
 global global_config
 global logger
@@ -106,7 +107,7 @@ def load_configuration(configuration_file):
 
 
 def console_port_iterator():
-    '''this iterator deals with the http_console_ports 
+    '''this iterator deals with the http_console_ports
     returning the ports one by one
     '''
     index = 0
@@ -296,7 +297,7 @@ if __name__=="__main__":
         logger.critical("Starting openmano server version: '%s %s' command: '%s'",
                          __version__, version_date, " ".join(sys.argv))
 
-        for log_module in ("nfvo", "http", "vim", "db", "console", "ovim"):
+        for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim"):
             log_level_module = "log_level_" + log_module
             log_file_module = "log_file_" + log_module
             logger_module = logging.getLogger('openmano.' + log_module)
@@ -343,9 +344,18 @@ if __name__=="__main__":
                     pass  # if tenant exist (NfvoException error 409), ignore
                 else:     # otherwise print and error and continue
                     logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
-        nfvo.start_service(mydb)
 
-        httpthread = httpserver.httpserver(mydb, False, global_config['http_host'], global_config['http_port'])
+        # WIM module
+        wim_persistence = WimPersistence(mydb)
+        wim_engine = WimEngine(wim_persistence)
+        # ---
+        nfvo.start_service(mydb, wim_persistence, wim_engine)
+
+        httpthread = httpserver.httpserver(
+            mydb, False,
+            global_config['http_host'], global_config['http_port'],
+            wim_persistence, wim_engine
+        )
 
         httpthread.start()
         if 'http_admin_port' in global_config:
index 2a6cd8c..7b48f43 100644 (file)
@@ -37,14 +37,7 @@ import logging
 import datetime
 from jsonschema import validate as js_v, exceptions as js_e
 
-HTTP_Bad_Request = 400
-HTTP_Unauthorized = 401 
-HTTP_Not_Found = 404 
-HTTP_Method_Not_Allowed = 405 
-HTTP_Request_Timeout = 408
-HTTP_Conflict = 409
-HTTP_Service_Unavailable = 503 
-HTTP_Internal_Server_Error = 500 
+from .http_tools import errors as httperrors
 
 def _check_valid_uuid(uuid):
     id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
@@ -68,7 +61,7 @@ def _convert_datetime2str(var):
         for k,v in var.items():
             if type(v) is datetime.datetime:
                 var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
-            elif type(v) is dict or type(v) is list or type(v) is tuple: 
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
                 _convert_datetime2str(v)
         if len(var) == 0: return True
     elif type(var) is list or type(var) is tuple:
@@ -76,7 +69,7 @@ def _convert_datetime2str(var):
             _convert_datetime2str(v)
 
 def _convert_bandwidth(data, reverse=False, logger=None):
-    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number 
+    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
     It assumes that bandwidth is well formed
     Attributes:
         'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
@@ -111,7 +104,7 @@ def _convert_bandwidth(data, reverse=False, logger=None):
                 _convert_bandwidth(k, reverse, logger)
 
 def _convert_str2boolean(data, items):
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean 
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
     Done recursively
     Attributes:
         'data': dictionary variable to be checked. None or empty is considered valid
@@ -135,16 +128,15 @@ def _convert_str2boolean(data, items):
             if type(k) is dict or type(k) is tuple or type(k) is list:
                 _convert_str2boolean(k, items)
 
-class db_base_Exception(Exception):
+class db_base_Exception(httperrors.HttpMappedError):
     '''Common Exception for all database exceptions'''
-    
-    def __init__(self, message, http_code=HTTP_Bad_Request):
-        Exception.__init__(self, message)
-        self.http_code = http_code
+
+    def __init__(self, message, http_code=httperrors.Bad_Request):
+        super(db_base_Exception, self).__init__(message, http_code)
 
 class db_base():
     tables_with_created_field=()
-    
+
     def __init__(self, host=None, user=None, passwd=None, database=None, log_name='db', log_level=None):
         self.host = host
         self.user = user
@@ -155,9 +147,9 @@ class db_base():
         self.logger = logging.getLogger(log_name)
         if self.log_level:
             self.logger.setLevel( getattr(logging, log_level) )
-        
+
     def connect(self, host=None, user=None, passwd=None, database=None):
-        '''Connect to specific data base. 
+        '''Connect to specific data base.
         The first time a valid host, user, passwd and database must be provided,
         Following calls can skip this parameters
         '''
@@ -172,8 +164,16 @@ class db_base():
         except mdb.Error as e:
             raise db_base_Exception("Cannot connect to DataBase '{}' at '{}@{}' Error {}: {}".format(
                                     self.database, self.user, self.host, e.args[0], e.args[1]),
-                                    http_code = HTTP_Unauthorized )
-        
+                                    http_code = httperrors.Unauthorized )
+
+    def escape(self, value):
+        return self.con.escape(value)
+
+
+    def escape_string(self, value):
+        return self.con.escape_string(value)
+
+
     def get_db_version(self):
         ''' Obtain the database schema version.
         Return: (negative, text) if error or version 0.0 where schema_version table is missing
@@ -212,10 +212,10 @@ class db_base():
             if e[0][-5:] == "'con'":
                 self.logger.warn("while disconnecting from DB: Error %d: %s",e.args[0], e.args[1])
                 return
-            else: 
+            else:
                 raise
 
-    def _format_error(self, e, tries=1, command=None, extra=None): 
+    def _format_error(self, e, tries=1, command=None, extra=None, table=None):
         '''Creates a text error base on the produced exception
             Params:
                 e: mdb exception
@@ -227,7 +227,8 @@ class db_base():
                 HTTP error in negative, formatted error text
         '''
         if isinstance(e,AttributeError ):
-            raise db_base_Exception("DB Exception " + str(e), HTTP_Internal_Server_Error)
+            self.logger.debug(str(e), exc_info=True)
+            raise db_base_Exception("DB Exception " + str(e), httperrors.Internal_Server_Error)
         if e.args[0]==2006 or e.args[0]==2013 : #MySQL server has gone away (((or)))    Exception 2013: Lost connection to MySQL server during query
             if tries>1:
                 self.logger.warn("DB Exception '%s'. Retry", str(e))
@@ -235,32 +236,45 @@ class db_base():
                 self.connect()
                 return
             else:
-                raise db_base_Exception("Database connection timeout Try Again", HTTP_Request_Timeout)
-        
+                raise db_base_Exception("Database connection timeout Try Again", httperrors.Request_Timeout)
+
         fk=e.args[1].find("foreign key constraint fails")
         if fk>=0:
             if command=="update":
-                raise db_base_Exception("tenant_id '{}' not found.".format(extra), HTTP_Not_Found)
+                raise db_base_Exception("tenant_id '{}' not found.".format(extra), httperrors.Not_Found)
             elif command=="delete":
-                raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), HTTP_Conflict)
+                raise db_base_Exception("Resource is not free. There are {} that prevent deleting it.".format(extra), httperrors.Conflict)
         de = e.args[1].find("Duplicate entry")
         fk = e.args[1].find("for key")
         uk = e.args[1].find("Unknown column")
         wc = e.args[1].find("in 'where clause'")
         fl = e.args[1].find("in 'field list'")
         #print de, fk, uk, wc,fl
+        table_info = ' (table `{}`)'.format(table) if table else ''
         if de>=0:
             if fk>=0: #error 1062
-                raise db_base_Exception("Value {} already in use for {}".format(e.args[1][de+15:fk], e.args[1][fk+7:]), HTTP_Conflict)
+                raise db_base_Exception(
+                    "Value {} already in use for {}{}".format(
+                        e.args[1][de+15:fk], e.args[1][fk+7:], table_info),
+                    httperrors.Conflict)
         if uk>=0:
             if wc>=0:
-                raise db_base_Exception("Field {} can not be used for filtering".format(e.args[1][uk+14:wc]), HTTP_Bad_Request)
+                raise db_base_Exception(
+                    "Field {} can not be used for filtering{}".format(
+                        e.args[1][uk+14:wc], table_info),
+                    httperrors.Bad_Request)
             if fl>=0:
-                raise db_base_Exception("Field {} does not exist".format(e.args[1][uk+14:wc]), HTTP_Bad_Request)
-        raise db_base_Exception("Database internal Error {}: {}".format(e.args[0], e.args[1]), HTTP_Internal_Server_Error)
-    
+                raise db_base_Exception(
+                    "Field {} does not exist{}".format(
+                        e.args[1][uk+14:wc], table_info),
+                    httperrors.Bad_Request)
+        raise db_base_Exception(
+                "Database internal Error{} {}: {}".format(
+                    table_info, e.args[0], e.args[1]),
+                httperrors.Internal_Server_Error)
+
     def __str2db_format(self, data):
-        '''Convert string data to database format. 
+        '''Convert string data to database format.
         If data is None it returns the 'Null' text,
         otherwise it returns the text surrounded by quotes ensuring internal quotes are escaped.
         '''
@@ -270,10 +284,10 @@ class db_base():
             return json.dumps(data)
         else:
             return json.dumps(str(data))
-    
+
     def __tuple2db_format_set(self, data):
         """Compose the needed text for a SQL SET, parameter 'data' is a pair tuple (A,B),
-        and it returns the text 'A="B"', where A is a field of a table and B is the value 
+        and it returns the text 'A="B"', where A is a field of a table and B is the value
         If B is None it returns the 'A=Null' text, without surrounding Null by quotes
         If B is not None it returns the text "A='B'" or 'A="B"' where B is surrounded by quotes,
         and it ensures internal quotes of B are escaped.
@@ -287,10 +301,10 @@ class db_base():
         elif isinstance(data[1], dict):
             if "INCREMENT" in data[1]:
                 return "{A}={A}{N:+d}".format(A=data[0], N=data[1]["INCREMENT"])
-            raise db_base_Exception("Format error for UPDATE field")
+            raise db_base_Exception("Format error for UPDATE field: {!r}".format(data[0]))
         else:
             return str(data[0]) + '=' + json.dumps(str(data[1]))
-    
+
     def __create_where(self, data, use_or=None):
         """
         Compose the needed text for a SQL WHERE, parameter 'data' can be a dict or a list of dict. By default lists are
@@ -347,9 +361,9 @@ class db_base():
         '''remove single quotes ' of any string content of data dictionary'''
         for k,v in data.items():
             if type(v) == str:
-                if "'" in v: 
+                if "'" in v:
                     data[k] = data[k].replace("'","_")
-    
+
     def _update_rows(self, table, UPDATE, WHERE, modified_time=0):
         """ Update one or several rows of a table.
         :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
@@ -369,7 +383,7 @@ class db_base():
             values += ",modified_at={:f}".format(modified_time)
         cmd= "UPDATE " + table + " SET " + values + " WHERE " + self.__create_where(WHERE)
         self.logger.debug(cmd)
-        self.cur.execute(cmd) 
+        self.cur.execute(cmd)
         return self.cur.rowcount
 
     def _new_uuid(self, root_uuid=None, used_table=None, created_time=0):
@@ -398,7 +412,7 @@ class db_base():
 
     def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0, confidential_data=False):
         ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
-        Attribute 
+        Attribute
             INSERT: dictionary with the key:value to insert
             table: table where to insert
             add_uuid: if True, it will create an uuid key entry at INSERT if not provided
@@ -411,7 +425,7 @@ class db_base():
             #create uuid if not provided
             if 'uuid' not in INSERT:
                 uuid = INSERT['uuid'] = str(myUuid.uuid1()) # create_uuid
-            else: 
+            else:
                 uuid = str(INSERT['uuid'])
         else:
             uuid=None
@@ -429,7 +443,7 @@ class db_base():
             self.cur.execute(cmd)
         #insertion
         cmd= "INSERT INTO " + table +" SET " + \
-            ",".join(map(self.__tuple2db_format_set, INSERT.iteritems() )) 
+            ",".join(map(self.__tuple2db_format_set, INSERT.iteritems() ))
         if created_time:
             cmd += ",created_at=%f" % created_time
         if confidential_data:
@@ -448,16 +462,16 @@ class db_base():
         self.cur.execute(cmd)
         rows = self.cur.fetchall()
         return rows
-    
+
     def new_row(self, table, INSERT, add_uuid=False, created_time=0, confidential_data=False):
         ''' Add one row into a table.
-        Attribute 
+        Attribute
             INSERT: dictionary with the key: value to insert
             table: table where to insert
             tenant_id: only useful for logs. If provided, logs will use this tenant_id
             add_uuid: if True, it will create an uuid key entry at INSERT if not provided
         It checks presence of uuid and add one automatically otherwise
-        Return: (result, uuid) where result can be 0 if error, or 1 if ok
+        Return: uuid
         '''
         if table in self.tables_with_created_field and created_time==0:
             created_time=time.time()
@@ -467,9 +481,9 @@ class db_base():
                 with self.con:
                     self.cur = self.con.cursor()
                     return self._new_row_internal(table, INSERT, add_uuid, None, created_time, confidential_data)
-                    
+
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table)
             tries -= 1
 
     def update_rows(self, table, UPDATE, WHERE, modified_time=0):
@@ -493,10 +507,10 @@ class db_base():
             try:
                 with self.con:
                     self.cur = self.con.cursor()
-                    return self._update_rows(table, UPDATE, WHERE)
-                    
+                    return self._update_rows(
+                        table, UPDATE, WHERE, modified_time)
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table)
             tries -= 1
 
     def _delete_row_by_id_internal(self, table, uuid):
@@ -519,7 +533,8 @@ class db_base():
                     self.cur = self.con.cursor()
                     return self._delete_row_by_id_internal(table, uuid)
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries, "delete", "dependencies")
+                self._format_error(
+                    e, tries, "delete", "dependencies", table=table)
             tries -= 1
 
     def delete_row(self, **sql_dict):
@@ -567,9 +582,9 @@ class db_base():
                     rows = self.cur.fetchall()
                     return rows
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table)
             tries -= 1
-    
+
     def get_rows(self, **sql_dict):
         """ Obtain rows from a table.
         :param SELECT: list or tuple of fields to retrieve) (by default all)
@@ -581,7 +596,7 @@ class db_base():
                 keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
                 The special keys "OR", "AND" with a dict value is used to create a nested WHERE
             If a list, each item will be a dictionary that will be concatenated with OR
-        :param LIMIT: limit the number of obtianied entries (Optional)
+        :param LIMIT: limit the number of obtained entries (Optional)
         :param ORDER_BY:  list or tuple of fields to order, add ' DESC' to each item if inverse order is required
         :return: a list with dictionaries at each row, raises exception upon error
         """
@@ -628,10 +643,10 @@ class db_base():
         Attribute:
             table: string of table name
             uuid_name: name or uuid. If not uuid format is found, it is considered a name
-            allow_severeral: if False return ERROR if more than one row are founded 
-            error_item_text: in case of error it identifies the 'item' name for a proper output text 
+            allow_severeral: if False return ERROR if more than one row are founded
+            error_item_text: in case of error it identifies the 'item' name for a proper output text
             'WHERE_OR': dict of key:values, translated to key=value OR ... (Optional)
-            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional  
+            'WHERE_AND_OR: str 'AND' or 'OR'(by default) mark the priority to 'WHERE AND (WHERE_OR)' or (WHERE) OR WHERE_OR' (Optional
         Return: if allow_several==False, a dictionary with this row, or error if no item is found or more than one is found
                 if allow_several==True, a list of dictionaries with the row or rows, error if no item is found
         '''
@@ -656,16 +671,16 @@ class db_base():
                     self.cur.execute(cmd)
                     number = self.cur.rowcount
                     if number == 0:
-                        raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=HTTP_Not_Found)
+                        raise db_base_Exception("No {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Not_Found)
                     elif number > 1 and not allow_serveral:
-                        raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=HTTP_Conflict)
+                        raise db_base_Exception("More than one {} found with {} '{}'".format(error_item_text, what, uuid_name), http_code=httperrors.Conflict)
                     if allow_serveral:
                         rows = self.cur.fetchall()
                     else:
                         rows = self.cur.fetchone()
                     return rows
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table)
             tries -= 1
 
     def get_uuid(self, uuid):
@@ -684,7 +699,7 @@ class db_base():
 
     def get_uuid_from_name(self, table, name):
         '''Searchs in table the name and returns the uuid
-        ''' 
+        '''
         tries = 2
         while tries:
             try:
@@ -699,6 +714,6 @@ class db_base():
                         return self.cur.rowcount, "More than one VNF with name %s found in table %s" %(name, table)
                     return self.cur.rowcount, rows[0]["uuid"]
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table)
             tries -= 1
 
diff --git a/osm_ro/http_tools/__init__.py b/osm_ro/http_tools/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/osm_ro/http_tools/errors.py b/osm_ro/http_tools/errors.py
new file mode 100644 (file)
index 0000000..552e85b
--- /dev/null
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+import logging
+from functools import wraps
+
+import bottle
+import yaml
+
+Bad_Request = 400
+Unauthorized = 401
+Not_Found = 404
+Forbidden = 403
+Method_Not_Allowed = 405
+Not_Acceptable = 406
+Request_Timeout = 408
+Conflict = 409
+Service_Unavailable = 503
+Internal_Server_Error = 500
+
+
+class HttpMappedError(Exception):
+    """Base class for a new hierarchy that translate HTTP error codes
+    to python exceptions
+
+    This class accepts an extra argument ``http_code`` (integer
+    representing HTTP error codes).
+    """
+
+    def __init__(self, message, http_code=Internal_Server_Error):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+
+class ErrorHandler(object):
+    """Defines a default strategy for handling HttpMappedError.
+
+    This class implements a wrapper (can also be used as decorator), that
+    watches out for different exceptions and log them accordingly.
+
+    Arguments:
+        logger(logging.Logger): logger object to be used to report errors
+    """
+    def __init__(self, logger=None):
+        self.logger = logger or logging.getLogger('openmano.http')
+
+    def __call__(self, function):
+        @wraps(function)
+        def _wraped(*args, **kwargs):
+            try:
+                return function(*args, **kwargs)
+            except bottle.HTTPError:
+                raise
+            except HttpMappedError as ex:
+                self.logger.error(
+                    "%s error %s",
+                    function.__name__, ex.http_code, exc_info=True)
+                bottle.abort(ex.http_code, str(ex))
+            except yaml.YAMLError as ex:
+                self.logger.error(
+                    "YAML error while trying to serialize/unserialize fields",
+                    exc_info=True)
+                bottle.abort(Bad_Request, type(ex).__name__ + ": " + str(ex))
+            except Exception as ex:
+                self.logger.error("Unexpected exception: ", exc_info=True)
+                bottle.abort(Internal_Server_Error,
+                             type(ex).__name__ + ": " + str(ex))
+
+        return _wraped
diff --git a/osm_ro/http_tools/handler.py b/osm_ro/http_tools/handler.py
new file mode 100644 (file)
index 0000000..49249a8
--- /dev/null
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from types import MethodType
+
+from bottle import Bottle
+
+
+class route(object):
+    """Decorator that stores route information, so creating the routes can be
+    postponed.
+
+    This allows methods (OOP) with bottle.
+
+    Arguments:
+        method: HTTP verb (e.g. ``'get'``, ``'post'``, ``'put'``, ...)
+        path: URL path that will be handled by the callback
+    """
+    def __init__(self, method, path, **kwargs):
+        kwargs['method'] = method.upper()
+        self.route_info = (path, kwargs)
+
+    def __call__(self, function):
+        function.route_info = self.route_info
+        return function
+
+
+class BaseHandler(object):
+    """Base class that allows isolated webapp implementation using Bottle,
+    when used in conjunction with the ``route`` decorator.
+
+    In this context, a ``Handler`` is meant to be a collection of Bottle
+    routes/callbacks related to a specific topic.
+
+    A ``Handler`` instance can produce a WSGI app that can be mounted or merged
+    inside another more general bottle app.
+
+    Example:
+
+        from http_tools.handler import Handler, route
+        from http_tools.errors import ErrorHandler
+
+        class MyHandler(Handler):
+            plugins = [ErrorHandler()]
+            url_base = '/my/url/base'
+
+            @route('GET', '/some/path/<var>')
+            def get_var(self, var):
+                return var
+
+        app = MyHandler.wsgi_app
+        # ^  Webapp with a `GET /my/url/base/some/path/<var>` route
+    """
+    _wsgi_app = None
+
+    url_base = ''
+    """String representing a path fragment to be prepended to the routes"""
+
+    plugins = []
+    """Bottle plugins to be installed when creating the WSGI app"""
+
+    @property
+    def wsgi_app(self):
+        """Create a WSGI app based on the implemented callbacks"""
+
+        if self._wsgi_app:
+            # Return if cached
+            return self._wsgi_app
+
+        app = Bottle()
+
+        members = (getattr(self, m) for m in dir(self) if m != 'wsgi_app')
+        callbacks = (m for m in members
+                     if isinstance(m, MethodType) and hasattr(m, 'route_info'))
+
+        for callback in callbacks:
+            path, kwargs = callback.route_info
+            kwargs.update(callback=callback, apply=self.plugins)
+            app.route(self.url_base + path, **kwargs)
+
+        self._wsgi_app = app
+
+        return app
diff --git a/osm_ro/http_tools/request_processing.py b/osm_ro/http_tools/request_processing.py
new file mode 100644 (file)
index 0000000..0b8a6ca
--- /dev/null
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+
+#
+# Util functions previously in `httpserver`
+#
+
+__author__ = "Alfonso Tierno, Gerardo Garcia"
+
+import json
+import logging
+
+import bottle
+import yaml
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
+from . import errors as httperrors
+
+logger = logging.getLogger('openmano.http')
+
+
+def remove_clear_passwd(data):
+    """
+    Removes clear passwords from the data received
+    :param data: data with clear password
+    :return: data without the password information
+    """
+
+    passw = ['password: ', 'passwd: ']
+
+    for pattern in passw:
+        init = data.find(pattern)
+        while init != -1:
+            end = data.find('\n', init)
+            data = data[:init] + '{}******'.format(pattern) + data[end:]
+            init += 1
+            init = data.find(pattern, init)
+    return data
+
+
+def change_keys_http2db(data, http_db, reverse=False):
+    '''Change keys of dictionary data acording to the key_dict values
+    This allow change from http interface names to database names.
+    When reverse is True, the change is otherwise
+    Attributes:
+        data: can be a dictionary or a list
+        http_db: is a dictionary with hhtp names as keys and database names as value
+        reverse: by default change is done from http api to database.
+            If True change is done otherwise.
+    Return: None, but data is modified'''
+    if type(data) is tuple or type(data) is list:
+        for d in data:
+            change_keys_http2db(d, http_db, reverse)
+    elif type(data) is dict or type(data) is bottle.FormsDict:
+        if reverse:
+            for k,v in http_db.items():
+                if v in data: data[k]=data.pop(v)
+        else:
+            for k,v in http_db.items():
+                if k in data: data[v]=data.pop(k)
+
+
+def format_out(data):
+    '''Return string of dictionary data according to requested json, yaml, xml.
+    By default json
+    '''
+    logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) )
+    accept = bottle.request.headers.get('Accept')
+    if accept and 'application/yaml' in accept:
+        bottle.response.content_type='application/yaml'
+        return yaml.safe_dump(
+                data, explicit_start=True, indent=4, default_flow_style=False,
+                tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
+    else: #by default json
+        bottle.response.content_type='application/json'
+        #return data #json no style
+        return json.dumps(data, indent=4) + "\n"
+
+
+def format_in(default_schema, version_fields=None, version_dict_schema=None, confidential_data=False):
+    """
+    Parse the content of HTTP request against a json_schema
+
+    :param default_schema: The schema to be parsed by default
+        if no version field is found in the client data.
+        In None no validation is done
+    :param version_fields: If provided it contains a tuple or list with the
+        fields to iterate across the client data to obtain the version
+    :param version_dict_schema: It contains a dictionary with the version as key,
+        and json schema to apply as value.
+        It can contain a None as key, and this is apply
+        if the client data version does not match any key
+    :return:  user_data, used_schema: if the data is successfully decoded and
+        matches the schema.
+
+    Launch a bottle abort if fails
+    """
+    #print "HEADERS :" + str(bottle.request.headers.items())
+    try:
+        error_text = "Invalid header format "
+        format_type = bottle.request.headers.get('Content-Type', 'application/json')
+        if 'application/json' in format_type:
+            error_text = "Invalid json format "
+            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
+            client_data = json.load(bottle.request.body)
+            #client_data = bottle.request.json()
+        elif 'application/yaml' in format_type:
+            error_text = "Invalid yaml format "
+            client_data = yaml.load(bottle.request.body)
+        elif 'application/xml' in format_type:
+            bottle.abort(501, "Content-Type: application/xml not supported yet.")
+        else:
+            logger.warning('Content-Type ' + str(format_type) + ' not supported.')
+            bottle.abort(httperrors.Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
+            return
+        # if client_data == None:
+        #    bottle.abort(httperrors.Bad_Request, "Content error, empty")
+        #    return
+        if confidential_data:
+            logger.debug('IN: %s', remove_clear_passwd (yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+                                              tags=False, encoding='utf-8', allow_unicode=True)))
+        else:
+            logger.debug('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
+                                              tags=False, encoding='utf-8', allow_unicode=True) )
+        # look for the client provider version
+        error_text = "Invalid content "
+        if not default_schema and not version_fields:
+            return client_data, None
+        client_version = None
+        used_schema = None
+        if version_fields != None:
+            client_version = client_data
+            for field in version_fields:
+                if field in client_version:
+                    client_version = client_version[field]
+                else:
+                    client_version=None
+                    break
+        if client_version == None:
+            used_schema = default_schema
+        elif version_dict_schema != None:
+            if client_version in version_dict_schema:
+                used_schema = version_dict_schema[client_version]
+            elif None in version_dict_schema:
+                used_schema = version_dict_schema[None]
+        if used_schema==None:
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version or missing version field")
+
+        js_v(client_data, used_schema)
+        return client_data, used_schema
+    except (TypeError, ValueError, yaml.YAMLError) as exc:
+        error_text += str(exc)
+        logger.error(error_text, exc_info=True)
+        bottle.abort(httperrors.Bad_Request, error_text)
+    except js_e.ValidationError as exc:
+        logger.error(
+            "validate_in error, jsonschema exception", exc_info=True)
+        error_pos = ""
+        if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
+        bottle.abort(httperrors.Bad_Request, error_text + exc.message + error_pos)
+    #except:
+    #    bottle.abort(httperrors.Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
+    #    raise
+
+def filter_query_string(qs, http2db, allowed):
+    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
+    Attributes:
+        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
+        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
+        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
+    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
+        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
+        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
+        limit: limit dictated by user with the query string 'limit'. 100 by default
+    abort if not permited, using bottel.abort
+    '''
+    where={}
+    limit=100
+    select=[]
+    #if type(qs) is not bottle.FormsDict:
+    #    bottle.abort(httperrors.Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
+    #    #bottle.abort(httperrors.Internal_Server_Error, "call programmer")
+    for k in qs:
+        if k=='field':
+            select += qs.getall(k)
+            for v in select:
+                if v not in allowed:
+                    bottle.abort(httperrors.Bad_Request, "Invalid query string at 'field="+v+"'")
+        elif k=='limit':
+            try:
+                limit=int(qs[k])
+            except:
+                bottle.abort(httperrors.Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
+        else:
+            if k not in allowed:
+                bottle.abort(httperrors.Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
+            if qs[k]!="null":  where[k]=qs[k]
+            else: where[k]=None
+    if len(select)==0: select += allowed
+    #change from http api to database naming
+    for i in range(0,len(select)):
+        k=select[i]
+        if http2db and k in http2db:
+            select[i] = http2db[k]
+    if http2db:
+        change_keys_http2db(where, http2db)
+    #print "filter_query_string", select,where,limit
+
+    return select,where,limit
diff --git a/osm_ro/http_tools/tests/__init__.py b/osm_ro/http_tools/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/osm_ro/http_tools/tests/test_errors.py b/osm_ro/http_tools/tests/test_errors.py
new file mode 100644 (file)
index 0000000..a968e76
--- /dev/null
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+import bottle
+
+from .. import errors as httperrors
+from ...tests.helpers import TestCaseWithLogging
+
+
+class TestHttpErrors(TestCaseWithLogging):
+    def test_http_error_base(self):
+        # When an error code is passed as argument
+        ex = httperrors.HttpMappedError(http_code=1226324)
+        # then it should be set in the exception object
+        self.assertEqual(ex.http_code, 1226324)
+        # When an error code is not passed as argument
+        ex = httperrors.HttpMappedError()
+        # then the default error code (internal server error) should be used
+        self.assertEqual(ex.http_code, httperrors.Internal_Server_Error)
+
+    def test_error_handler_should_log_unexpected_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an unexpected error
+        @error_handler
+        def _throw():
+            raise AttributeError('some error')
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError):
+            _throw()
+        logs = self.caplog.getvalue()
+        # then the exception should be contained by bottle
+        # and a proper message should be logged
+        assert "Unexpected exception:" in logs
+
+    def test_error_handler_should_log_http_based_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an error that is considered by the
+        # application
+        @error_handler
+        def _throw():
+            raise httperrors.HttpMappedError(http_code=404)
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError):
+            _throw()
+        logs = self.caplog.getvalue()
+        # then the exception should be contained by bottle
+        # and a proper message should be logged
+        assert "_throw error 404" in logs
+
+    def test_error_handler_should_ignore_bottle_errors(self):
+        # Given a error handler wraps a function
+        error_handler = httperrors.ErrorHandler(self.logger)
+
+        # and the function raises an error that is considered by the
+        # application
+        exception = bottle.HTTPError()
+
+        @error_handler
+        def _throw():
+            raise exception
+
+        # when the function is called
+        with self.assertRaises(bottle.HTTPError) as context:
+            _throw()
+        # then the exception should bypass the error handler
+        self.assertEqual(context.exception, exception)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/http_tools/tests/test_handler.py b/osm_ro/http_tools/tests/test_handler.py
new file mode 100644 (file)
index 0000000..af32545
--- /dev/null
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+import unittest
+
+from mock import MagicMock, patch
+from webtest import TestApp
+
+from .. import handler
+from ..handler import BaseHandler, route
+
+
+class TestIntegration(unittest.TestCase):
+    def test_wsgi_app(self):
+        # Given a Handler class that implements a route
+        some_plugin = MagicMock()
+
+        class MyHandler(BaseHandler):
+            url_base = '/42'
+            plugins = [some_plugin]
+
+            @route('get', '/some/path')
+            def callback(self):
+                return 'some content'
+
+        route_mock = MagicMock()
+        with patch(handler.__name__+'.Bottle.route', route_mock):
+            # When we try to access wsgi_app for the first time
+            my_handler = MyHandler()
+            assert my_handler.wsgi_app
+            # then bottle.route should be called with the right arguments
+            route_mock.assert_called_once_with('/42/some/path', method='GET',
+                                               callback=my_handler.callback,
+                                               apply=[some_plugin])
+
+            # When we try to access wsgi_app for the second time
+            assert my_handler.wsgi_app
+            # then the result should be cached
+            # and bottle.route should not be called again
+            self.assertEqual(route_mock.call_count, 1)
+
+    def test_route_created(self):
+        # Given a Handler class, as in the example documentation
+        class MyHandler(BaseHandler):
+            def __init__(self):
+                self.value = 42
+
+            @route('GET', '/some/path/<param>')
+            def callback(self, param):
+                return '{} + {}'.format(self.value, param)
+
+        # when this class is used to generate a webapp
+        app = TestApp(MyHandler().wsgi_app)
+
+        # then the defined URLs should be available
+        response = app.get('/some/path/0')
+        self.assertEqual(response.status_code, 200)
+        # and the callbacks should have access to ``self``
+        response.mustcontain('42 + 0')
+
+    def test_url_base(self):
+        # Given a Handler class that allows url_base customization
+        class MyHandler(BaseHandler):
+            def __init__(self, url_base):
+                self.url_base = url_base
+
+            @route('GET', '/some/path/<param>')
+            def callback(self, param):
+                return param
+
+        # when this class is used to generate a webapp
+        app = TestApp(MyHandler('/prefix').wsgi_app)
+
+        # then the prefixed URLs should be available
+        response = app.get('/prefix/some/path/content')
+        self.assertEqual(response.status_code, 200)
+        response.mustcontain('content')
+
+    def test_starting_param(self):
+        # Given a Handler class with a route beginning with a param
+        class MyHandler(BaseHandler):
+            @route('GET', '/<param>/some/path')
+            def callback(self, param):
+                return '**{}**'.format(param)
+
+        # is used to generate a webapp
+        app = TestApp(MyHandler().wsgi_app)
+
+        # when the defined URLs is accessed
+        response = app.get('/42/some/path')
+        # Then no error should happen
+        self.assertEqual(response.status_code, 200)
+        response.mustcontain('**42**')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/http_tools/tox.ini b/osm_ro/http_tools/tox.ini
new file mode 100644 (file)
index 0000000..43055c2
--- /dev/null
@@ -0,0 +1,49 @@
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,py36,flake8,radon
+skipsdist = True
+
+[testenv]
+changedir = {toxinidir}
+commands =
+    nosetests -d --with-coverage --cover-package=. {posargs:tests}
+deps =
+    WebTest
+    bottle
+    coverage
+    mock
+    nose
+    six
+    PyYaml
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+    radon cc --show-complexity --total-average {posargs:.}
+    radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+    tests
+    tests/*
+    */test_*
+    .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+    request_processing.py
+    .tox
index 374676e..613fb08 100644 (file)
 '''
 HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
 and will use the nfvo.py module to run the appropriate method.
-Every YAML/JSON file is checked against a schema in openmano_schemas.py module.  
+Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
 '''
 __author__="Alfonso Tierno, Gerardo Garcia"
 __date__ ="$17-sep-2014 09:07:15$"
 
 import bottle
 import yaml
-import json
 import threading
-import time
 import logging
 
-from jsonschema import validate as js_v, exceptions as js_e
 from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
                             nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
                             scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
@@ -45,6 +42,14 @@ from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
                             object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
                             sdn_port_mapping_schema, sdn_external_port_schema
 
+from .http_tools import errors as httperrors
+from .http_tools.request_processing import (
+    format_out,
+    format_in,
+    filter_query_string
+)
+from .wim.http_handler import WimHandler
+
 import nfvo
 import utils
 from db_base import db_base_Exception
@@ -56,42 +61,6 @@ global logger
 url_base="/openmano"
 logger = None
 
-HTTP_Bad_Request =          400
-HTTP_Unauthorized =         401 
-HTTP_Not_Found =            404 
-HTTP_Forbidden =            403
-HTTP_Method_Not_Allowed =   405 
-HTTP_Not_Acceptable =       406
-HTTP_Service_Unavailable =  503 
-HTTP_Internal_Server_Error= 500 
-
-def delete_nulls(var):
-    if type(var) is dict:
-        for k in var.keys():
-            if var[k] is None: del var[k]
-            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple: 
-                if delete_nulls(var[k]): del var[k]
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for k in var:
-            if type(k) is dict: delete_nulls(k)
-        if len(var) == 0: return True
-    return False
-
-def convert_datetime2str(var):
-    '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
-    It enters recursively in the dict var finding this kind of variables
-    '''
-    if type(var) is dict:
-        for k,v in var.items():
-            if type(v) is float and k in ("created_at", "modified_at"):
-                var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
-            elif type(v) is dict or type(v) is list or type(v) is tuple: 
-                convert_datetime2str(v)
-        if len(var) == 0: return True
-    elif type(var) is list or type(var) is tuple:
-        for v in var:
-            convert_datetime2str(v)
 
 def log_to_logger(fn):
     '''
@@ -102,15 +71,16 @@ def log_to_logger(fn):
     def _log_to_logger(*args, **kwargs):
         actual_response = fn(*args, **kwargs)
         # modify this to log exactly what you need:
-        logger.info('FROM %s %s %s %s' % (bottle.request.remote_addr,
+        logger.info('FROM %s %s %s %s'bottle.request.remote_addr,
                                         bottle.request.method,
                                         bottle.request.url,
-                                        bottle.response.status))
+                                        bottle.response.status)
         return actual_response
     return _log_to_logger
 
 class httpserver(threading.Thread):
-    def __init__(self, db, admin=False, host='localhost', port=9090):
+    def __init__(self, db, admin=False, host='localhost', port=9090,
+                 wim_persistence=None, wim_engine=None):
         #global url_base
         global mydb
         global logger
@@ -127,186 +97,37 @@ class httpserver(threading.Thread):
             #self.url_preffix = 'http://' + host + ':' + str(port) + url_base
             mydb = db
         #self.first_usable_connection_index = 10
-        #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used 
+        #self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
         #Ensure that when the main program exits the thread will also exit
+
+        self.handlers = [
+            WimHandler(db, wim_persistence, wim_engine, url_base)
+        ]
+
         self.daemon = True
         self.setDaemon(True)
-         
-    def run(self):
+
+    def run(self, debug=False, quiet=True):
         bottle.install(log_to_logger)
-        bottle.run(host=self.host, port=self.port, debug=False, quiet=True)
-           
+        default_app = bottle.app()
+
+        for handler in self.handlers:
+            default_app.merge(handler.wsgi_app)
+
+        bottle.run(host=self.host, port=self.port, debug=debug, quiet=quiet)
+
+
 def run_bottle(db, host_='localhost', port_=9090):
-    '''used for launching in main thread, so that it can be debugged'''
-    global mydb
-    mydb = db
-    bottle.run(host=host_, port=port_, debug=True) #quiet=True
-    
+    '''Used for launching in main thread, so that it can be debugged'''
+    server = httpserver(db, host=host_, port=port_)
+    server.run(debug=True)  # quiet=True
+
 
 @bottle.route(url_base + '/', method='GET')
 def http_get():
-    #print 
+    #print
     return 'works' #TODO: to be completed
 
-#
-# Util functions
-#
-
-def change_keys_http2db(data, http_db, reverse=False):
-    '''Change keys of dictionary data acording to the key_dict values
-    This allow change from http interface names to database names.
-    When reverse is True, the change is otherwise
-    Attributes:
-        data: can be a dictionary or a list
-        http_db: is a dictionary with hhtp names as keys and database names as value
-        reverse: by default change is done from http api to database. If True change is done otherwise
-    Return: None, but data is modified'''
-    if type(data) is tuple or type(data) is list:
-        for d in data:
-            change_keys_http2db(d, http_db, reverse)
-    elif type(data) is dict or type(data) is bottle.FormsDict:
-        if reverse:
-            for k,v in http_db.items():
-                if v in data: data[k]=data.pop(v)
-        else:
-            for k,v in http_db.items():
-                if k in data: data[v]=data.pop(k)
-
-def format_out(data):
-    '''return string of dictionary data according to requested json, yaml, xml. By default json'''
-    logger.debug("OUT: " + yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) )
-    if 'application/yaml' in bottle.request.headers.get('Accept'):
-        bottle.response.content_type='application/yaml'
-        return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
-    else: #by default json
-        bottle.response.content_type='application/json'
-        #return data #json no style
-        return json.dumps(data, indent=4) + "\n"
-
-def format_in(default_schema, version_fields=None, version_dict_schema=None, confidential_data=False):
-    """
-    Parse the content of HTTP request against a json_schema
-    :param default_schema: The schema to be parsed by default if no version field is found in the client data. In None
-        no validation is done
-    :param version_fields: If provided it contains a tuple or list with the fields to iterate across the client data to
-        obtain the version
-    :param version_dict_schema: It contains a dictionary with the version as key, and json schema to apply as value.
-        It can contain a None as key, and this is apply if the client data version does not match any key
-    :return:  user_data, used_schema: if the data is successfully decoded and matches the schema.
-        Launch a bottle abort if fails
-    """
-    #print "HEADERS :" + str(bottle.request.headers.items())
-    try:
-        error_text = "Invalid header format "
-        format_type = bottle.request.headers.get('Content-Type', 'application/json')
-        if 'application/json' in format_type:
-            error_text = "Invalid json format "
-            #Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
-            client_data = json.load(bottle.request.body)
-            #client_data = bottle.request.json()
-        elif 'application/yaml' in format_type:
-            error_text = "Invalid yaml format "
-            client_data = yaml.load(bottle.request.body)
-        elif 'application/xml' in format_type:
-            bottle.abort(501, "Content-Type: application/xml not supported yet.")
-        else:
-            logger.warning('Content-Type ' + str(format_type) + ' not supported.')
-            bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
-            return
-        # if client_data == None:
-        #    bottle.abort(HTTP_Bad_Request, "Content error, empty")
-        #    return
-        if confidential_data:
-            logger.debug('IN: %s', remove_clear_passwd (yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
-                                              tags=False, encoding='utf-8', allow_unicode=True)))
-        else:
-            logger.debug('IN: %s', yaml.safe_dump(client_data, explicit_start=True, indent=4, default_flow_style=False,
-                                              tags=False, encoding='utf-8', allow_unicode=True) )
-        # look for the client provider version
-        error_text = "Invalid content "
-        if not default_schema and not version_fields:
-            return client_data, None
-        client_version = None
-        used_schema = None
-        if version_fields != None:
-            client_version = client_data
-            for field in version_fields:
-                if field in client_version:
-                    client_version = client_version[field]
-                else:
-                    client_version=None
-                    break
-        if client_version == None:
-            used_schema = default_schema
-        elif version_dict_schema != None:
-            if client_version in version_dict_schema:
-                used_schema = version_dict_schema[client_version]
-            elif None in version_dict_schema:
-                used_schema = version_dict_schema[None]
-        if used_schema==None:
-            bottle.abort(HTTP_Bad_Request, "Invalid schema version or missing version field")
-            
-        js_v(client_data, used_schema)
-        return client_data, used_schema
-    except (TypeError, ValueError, yaml.YAMLError) as exc:
-        error_text += str(exc)
-        logger.error(error_text) 
-        bottle.abort(HTTP_Bad_Request, error_text)
-    except js_e.ValidationError as exc:
-        logger.error("validate_in error, jsonschema exception at '%s' '%s' ", str(exc.path), str(exc.message))
-        error_pos = ""
-        if len(exc.path)>0: error_pos=" at " + ":".join(map(json.dumps, exc.path))
-        bottle.abort(HTTP_Bad_Request, error_text + exc.message + error_pos)
-    #except:
-    #    bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type",  error_pos)
-    #    raise
-
-def filter_query_string(qs, http2db, allowed):
-    '''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
-    Attributes:
-        'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
-        'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
-        'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
-    Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
-        select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
-        where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
-        limit: limit dictated by user with the query string 'limit'. 100 by default
-    abort if not permited, using bottel.abort
-    '''
-    where={}
-    limit=100
-    select=[]
-    #if type(qs) is not bottle.FormsDict:
-    #    bottle.abort(HTTP_Internal_Server_Error, '!!!!!!!!!!!!!!invalid query string not a dictionary')
-    #    #bottle.abort(HTTP_Internal_Server_Error, "call programmer")
-    for k in qs:
-        if k=='field':
-            select += qs.getall(k)
-            for v in select:
-                if v not in allowed:
-                    bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field="+v+"'")
-        elif k=='limit':
-            try:
-                limit=int(qs[k])
-            except:
-                bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
-        else:
-            if k not in allowed:
-                bottle.abort(HTTP_Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
-            if qs[k]!="null":  where[k]=qs[k]
-            else: where[k]=None 
-    if len(select)==0: select += allowed
-    #change from http api to database naming
-    for i in range(0,len(select)):
-        k=select[i]
-        if http2db and k in http2db: 
-            select[i] = http2db[k]
-    if http2db:
-        change_keys_http2db(where, http2db)
-    #print "filter_query_string", select,where,limit
-    
-    return select,where,limit
-
 @bottle.hook('after_request')
 def enable_cors():
     '''Don't know yet if really needed. Keep it just in case'''
@@ -327,7 +148,7 @@ def http_get_tenants():
     try:
         tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
         #change_keys_http2db(content, http2db_tenant, reverse=True)
-        convert_datetime2str(tenants)
+        utils.convert_float_timestamp2str(tenants)
         data={'tenants' : tenants}
         return format_out(data)
     except bottle.HTTPError:
@@ -337,7 +158,7 @@ def http_get_tenants():
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
@@ -354,10 +175,10 @@ def http_get_tenant_id(tenant_id):
         tenants = mydb.get_rows(FROM=from_, SELECT=select_,WHERE=where_)
         #change_keys_http2db(content, http2db_tenant, reverse=True)
         if len(tenants) == 0:
-            bottle.abort(HTTP_Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
+            bottle.abort(httperrors.Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
         elif len(tenants) > 1:
-            bottle.abort(HTTP_Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
-        convert_datetime2str(tenants[0])
+            bottle.abort(httperrors.Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
+        utils.convert_float_timestamp2str(tenants[0])
         data = {'tenant': tenants[0]}
         return format_out(data)
     except bottle.HTTPError:
@@ -367,7 +188,7 @@ def http_get_tenant_id(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/tenants', method='POST')
@@ -379,7 +200,7 @@ def http_post_tenants():
     r = utils.remove_extra_items(http_content, tenant_schema)
     if r:
         logger.debug("Remove received extra items %s", str(r))
-    try: 
+    try:
         data = nfvo.new_tenant(mydb, http_content['tenant'])
         return http_get_tenant_id(data)
     except bottle.HTTPError:
@@ -389,7 +210,7 @@ def http_post_tenants():
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
@@ -401,11 +222,11 @@ def http_edit_tenant_id(tenant_id):
     r = utils.remove_extra_items(http_content, tenant_edit_schema)
     if r:
         logger.debug("Remove received extra items %s", str(r))
-    
+
     #obtain data, check that only one exist
-    try: 
+    try:
         tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
-        #edit data 
+        #edit data
         tenant_id = tenant['uuid']
         where={'uuid': tenant['uuid']}
         mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
@@ -417,7 +238,7 @@ def http_edit_tenant_id(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
@@ -434,7 +255,7 @@ def http_delete_tenant_id(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
@@ -458,7 +279,7 @@ def http_get_datacenters(tenant_id):
             datacenters = mydb.get_rows(FROM='datacenters',
                                           SELECT=select_,WHERE=where_,LIMIT=limit_)
         #change_keys_http2db(content, http2db_tenant, reverse=True)
-        convert_datetime2str(datacenters)
+        utils.convert_float_timestamp2str(datacenters)
         data={'datacenters' : datacenters}
         return format_out(data)
     except bottle.HTTPError:
@@ -468,7 +289,7 @@ def http_get_datacenters(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vim_accounts', method='GET')
@@ -542,11 +363,11 @@ def http_get_datacenter_id(tenant_id, datacenter_id):
                     SELECT=select_,
                     FROM=from_,
                     WHERE=where_)
-    
+
         if len(datacenters)==0:
-            bottle.abort( HTTP_Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
-        elif len(datacenters)>1: 
-            bottle.abort( HTTP_Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+            bottle.abort( httperrors.Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
+        elif len(datacenters)>1:
+            bottle.abort( httperrors.Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
         datacenter = datacenters[0]
         if tenant_id != 'any':
             #get vim tenant info
@@ -586,7 +407,7 @@ def http_get_datacenter_id(tenant_id, datacenter_id):
             except Exception as e:
                 logger.error("Exception '%s' while trying to load config information", str(e))
         #change_keys_http2db(content, http2db_datacenter, reverse=True)
-        convert_datetime2str(datacenter)
+        utils.convert_float_timestamp2str(datacenter)
         data={'datacenter' : datacenter}
         return format_out(data)
     except bottle.HTTPError:
@@ -596,7 +417,7 @@ def http_get_datacenter_id(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/datacenters', method='POST')
@@ -618,7 +439,7 @@ def http_post_datacenters():
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
@@ -630,7 +451,7 @@ def http_edit_datacenter_id(datacenter_id_name):
     r = utils.remove_extra_items(http_content, datacenter_edit_schema)
     if r:
         logger.debug("Remove received extra items %s", str(r))
-    
+
     try:
         datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
         return http_get_datacenter_id('any', datacenter_id)
@@ -641,7 +462,7 @@ def http_edit_datacenter_id(datacenter_id_name):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
 def http_post_sdn_controller(tenant_id):
@@ -662,7 +483,7 @@ def http_post_sdn_controller(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
 def http_put_sdn_controller_update(tenant_id, controller_id):
@@ -687,7 +508,7 @@ def http_put_sdn_controller_update(tenant_id, controller_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
 def http_get_sdn_controller(tenant_id):
@@ -704,7 +525,7 @@ def http_get_sdn_controller(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
 def http_get_sdn_controller_id(tenant_id, controller_id):
@@ -720,7 +541,7 @@ def http_get_sdn_controller_id(tenant_id, controller_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
 def http_delete_sdn_controller_id(tenant_id, controller_id):
@@ -736,7 +557,7 @@ def http_delete_sdn_controller_id(tenant_id, controller_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
 def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
@@ -757,7 +578,7 @@ def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
 def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
@@ -774,7 +595,7 @@ def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
 def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
@@ -790,7 +611,7 @@ def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET')  #deprecated
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
@@ -800,7 +621,7 @@ def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
     #obtain data
     try:
-        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter") 
+        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
         where_= {"datacenter_id":datacenter_dict['uuid']}
         if netmap_id:
             if utils.check_valid_uuid(netmap_id):
@@ -809,14 +630,14 @@ def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
                 where_["name"] = netmap_id
         netmaps =mydb.get_rows(FROM='datacenter_nets',
                                         SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
-                                        WHERE=where_ ) 
-        convert_datetime2str(netmaps)
+                                        WHERE=where_ )
+        utils.convert_float_timestamp2str(netmaps)
         utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
         if netmap_id and len(netmaps)==1:
             data={'netmap' : netmaps[0]}
         elif netmap_id and len(netmaps)==0:
-            bottle.abort(HTTP_Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
-            return 
+            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
+            return
         else:
             data={'netmaps' : netmaps}
         return format_out(data)
@@ -827,7 +648,7 @@ def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
@@ -837,7 +658,7 @@ def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
     #obtain data
     try:
-        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter") 
+        datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
         where_= {"datacenter_id":datacenter_dict['uuid']}
         if netmap_id:
             if utils.check_valid_uuid(netmap_id):
@@ -845,9 +666,9 @@ def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
             else:
                 where_["name"] = netmap_id
         #change_keys_http2db(content, http2db_tenant, reverse=True)
-        deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_) 
+        deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
         if deleted == 0 and netmap_id:
-            bottle.abort(HTTP_Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
+            bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
         if netmap_id:
             return format_out({"result": "netmap %s deleted" % netmap_id})
         else:
@@ -859,7 +680,7 @@ def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
@@ -867,7 +688,7 @@ def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
     try:
         netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
-        convert_datetime2str(netmaps)
+        utils.convert_float_timestamp2str(netmaps)
         utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
         data={'netmaps' : netmaps}
         return format_out(data)
@@ -878,7 +699,7 @@ def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
@@ -893,7 +714,7 @@ def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
     try:
         #obtain data, check that only one exist
         netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
-        convert_datetime2str(netmaps)
+        utils.convert_float_timestamp2str(netmaps)
         utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
         data={'netmaps' : netmaps}
         return format_out(data)
@@ -904,7 +725,7 @@ def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
@@ -916,7 +737,7 @@ def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
     r = utils.remove_extra_items(http_content, netmap_edit_schema)
     if r:
         logger.debug("Remove received extra items %s", str(r))
-    
+
     #obtain data, check that only one exist
     try:
         nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
@@ -928,8 +749,8 @@ def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
-    
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
 def http_action_datacenter_id(tenant_id, datacenter_id):
@@ -954,13 +775,13 @@ def http_action_datacenter_id(tenant_id, datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
 def http_delete_datacenter_id( datacenter_id):
     '''delete a tenant from database, can use both uuid or name'''
-    
+
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
     try:
         data = nfvo.delete_datacenter(mydb, datacenter_id)
@@ -972,7 +793,7 @@ def http_delete_datacenter_id( datacenter_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
@@ -996,7 +817,7 @@ def http_associate_datacenters(tenant_id, datacenter_id=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='PUT')
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
@@ -1019,7 +840,7 @@ def http_vim_account_edit(tenant_id, vim_account_id=None, datacenter_id=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
@@ -1037,7 +858,7 @@ def http_deassociate_datacenters(tenant_id, datacenter_id=None, vim_account_id=N
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/attach', method='POST')
 def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
@@ -1053,7 +874,7 @@ def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach', method='DELETE')
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach/<port_id>', method='DELETE')
@@ -1069,8 +890,8 @@ def http_delete_vim_net_sdn_detach(tenant_id, datacenter_id, network_id, port_id
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
-       
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
 def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
@@ -1085,7 +906,7 @@ def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
@@ -1101,7 +922,7 @@ def http_del_vim_items(tenant_id, datacenter_id, item, name):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
@@ -1118,7 +939,7 @@ def http_post_vim_items(tenant_id, datacenter_id, item):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
@@ -1135,7 +956,7 @@ def http_get_vnfs(tenant_id):
         vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_, WHERE=where_, LIMIT=limit_)
         # change_keys_http2db(content, http2db_vnf, reverse=True)
         utils.convert_str2boolean(vnfs, ('public',))
-        convert_datetime2str(vnfs)
+        utils.convert_float_timestamp2str(vnfs)
         data={'vnfs': vnfs}
         return format_out(data)
     except bottle.HTTPError:
@@ -1145,7 +966,7 @@ def http_get_vnfs(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
@@ -1155,7 +976,7 @@ def http_get_vnf_id(tenant_id,vnf_id):
     try:
         vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
         utils.convert_str2boolean(vnf, ('public',))
-        convert_datetime2str(vnf)
+        utils.convert_float_timestamp2str(vnf)
         return format_out(vnf)
     except bottle.HTTPError:
         raise
@@ -1164,7 +985,7 @@ def http_get_vnf_id(tenant_id,vnf_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
@@ -1187,7 +1008,7 @@ def http_post_vnfs(tenant_id):
             vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
         else:
             logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
-            bottle.abort(HTTP_Bad_Request, "Invalid schema version")
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
         return http_get_vnf_id(tenant_id, vnf_id)
     except bottle.HTTPError:
         raise
@@ -1196,7 +1017,7 @@ def http_post_vnfs(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/v3/<tenant_id>/vnfd', method='POST')
@@ -1214,7 +1035,7 @@ def http_post_vnfs_v3(tenant_id):
         for vnfd_uuid in vnfd_uuid_list:
             vnf = nfvo.get_vnf_id(mydb, tenant_id, vnfd_uuid)
             utils.convert_str2boolean(vnf, ('public',))
-            convert_datetime2str(vnf)
+            utils.convert_float_timestamp2str(vnf)
             vnfd_list.append(vnf["vnf"])
         return format_out({"vnfd": vnfd_list})
     except bottle.HTTPError:
@@ -1224,13 +1045,13 @@ def http_post_vnfs_v3(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
 def http_delete_vnf_id(tenant_id, vnf_id):
     '''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    #check valid tenant_id and deletes the vnf, including images, 
+    #check valid tenant_id and deletes the vnf, including images,
     try:
         data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
         #print json.dumps(data, indent=4)
@@ -1242,7 +1063,7 @@ def http_delete_vnf_id(tenant_id, vnf_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 #@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
@@ -1258,12 +1079,12 @@ def http_get_hosts(tenant_id, datacenter):
         else:
             #openmano-gui is using a hardcoded value for the datacenter
             result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
-        
+
         if result < 0:
             #print "http_get_hosts error %d %s" % (-result, data)
             bottle.abort(-result, data)
         else:
-            convert_datetime2str(data)
+            utils.convert_float_timestamp2str(data)
             #print json.dumps(data, indent=4)
             return format_out(data)
     except bottle.HTTPError:
@@ -1273,7 +1094,7 @@ def http_get_hosts(tenant_id, datacenter):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<path:path>', method='OPTIONS')
@@ -1297,7 +1118,7 @@ def http_post_deploy(tenant_id):
     #r = utils.remove_extra_items(http_content, used_schema)
     #if r is not None: print "http_post_deploy: Warning: remove extra items ", r
     #print "http_post_deploy input: ",  http_content
-    
+
     try:
         scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
         instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
@@ -1310,7 +1131,7 @@ def http_post_deploy(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
@@ -1319,7 +1140,7 @@ def http_post_verify(tenant_id):
 #    '''post topology verify'''
 #    print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
-    return 
+    return
 
 #
 # SCENARIOS
@@ -1342,7 +1163,7 @@ def http_post_scenarios(tenant_id):
             scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
         else:
             logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
-            bottle.abort(HTTP_Bad_Request, "Invalid schema version")
+            bottle.abort(httperrors.Bad_Request, "Invalid schema version")
         #print json.dumps(data, indent=4)
         #return format_out(data)
         return http_get_scenario_id(tenant_id, scenario_id)
@@ -1353,7 +1174,7 @@ def http_post_scenarios(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/v3/<tenant_id>/nsd', method='POST')
 def http_post_nsds_v3(tenant_id):
@@ -1369,7 +1190,7 @@ def http_post_nsds_v3(tenant_id):
         nsd_list = []
         for nsd_uuid in nsd_uuid_list:
             scenario = mydb.get_scenario(nsd_uuid, tenant_id)
-            convert_datetime2str(scenario)
+            utils.convert_float_timestamp2str(scenario)
             nsd_list.append(scenario)
         data = {'nsd': nsd_list}
         return format_out(data)
@@ -1380,7 +1201,7 @@ def http_post_nsds_v3(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
@@ -1424,7 +1245,7 @@ def http_post_scenario_action(tenant_id, scenario_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
@@ -1434,14 +1255,14 @@ def http_get_scenarios(tenant_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         #obtain data
         s,w,l=filter_query_string(bottle.request.query, None,
                                   ('uuid', 'name', 'osm_id', 'description', 'tenant_id', 'created_at', 'public'))
         if tenant_id != "any":
             w["OR"] = {"tenant_id": tenant_id, "public": True}
         scenarios = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='scenarios')
-        convert_datetime2str(scenarios)
+        utils.convert_float_timestamp2str(scenarios)
         utils.convert_str2boolean(scenarios, ('public',) )
         data={'scenarios':scenarios}
         #print json.dumps(scenarios, indent=4)
@@ -1453,7 +1274,7 @@ def http_get_scenarios(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
@@ -1463,10 +1284,10 @@ def http_get_scenario_id(tenant_id, scenario_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         #obtain data
         scenario = mydb.get_scenario(scenario_id, tenant_id)
-        convert_datetime2str(scenario)
+        utils.convert_float_timestamp2str(scenario)
         data={'scenario' : scenario}
         return format_out(data)
     except bottle.HTTPError:
@@ -1476,7 +1297,7 @@ def http_get_scenario_id(tenant_id, scenario_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
@@ -1498,7 +1319,7 @@ def http_delete_scenario_id(tenant_id, scenario_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
@@ -1521,7 +1342,7 @@ def http_put_scenario_id(tenant_id, scenario_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/instances', method='POST')
 def http_post_instances(tenant_id):
@@ -1535,17 +1356,17 @@ def http_post_instances(tenant_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
         return format_out(data)
     except bottle.HTTPError:
         raise
     except (nfvo.NfvoException, db_base_Exception) as e:
-        logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)))
+        logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)), exc_info=True)
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 #
 # INSTANCES
@@ -1556,13 +1377,13 @@ def http_get_instances(tenant_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         #obtain data
         s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
         if tenant_id != "any":
             w['tenant_id'] = tenant_id
         instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
-        convert_datetime2str(instances)
+        utils.convert_float_timestamp2str(instances)
         utils.convert_str2boolean(instances, ('public',) )
         data={'instances':instances}
         return format_out(data)
@@ -1573,7 +1394,7 @@ def http_get_instances(tenant_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
@@ -1584,7 +1405,7 @@ def http_get_instance_id(tenant_id, instance_id):
 
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         if tenant_id == "any":
             tenant_id = None
 
@@ -1598,7 +1419,7 @@ def http_get_instance_id(tenant_id, instance_id):
                         index = iface["ip_address"].find(";")
                         if index >= 0:
                             iface["ip_address"] = iface["ip_address"][:index]
-        convert_datetime2str(instance)
+        utils.convert_float_timestamp2str(instance)
         # print json.dumps(instance, indent=4)
         return format_out(instance)
     except bottle.HTTPError:
@@ -1608,7 +1429,7 @@ def http_get_instance_id(tenant_id, instance_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
@@ -1618,7 +1439,7 @@ def http_delete_instance_id(tenant_id, instance_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
         if tenant_id == "any":
             tenant_id = None
         #obtain data
@@ -1631,7 +1452,7 @@ def http_delete_instance_id(tenant_id, instance_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
@@ -1651,13 +1472,13 @@ def http_post_instance_scenario_action(tenant_id, instance_id):
     try:
         #check valid tenant_id
         if tenant_id != "any":
-            nfvo.check_tenant(mydb, tenant_id) 
+            nfvo.check_tenant(mydb, tenant_id)
 
         #print "http_post_instance_scenario_action input: ", http_content
         #obtain data
         instance = mydb.get_instance_scenario(instance_id, tenant_id)
         instance_id = instance["uuid"]
-        
+
         data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
         return format_out(data)
     except bottle.HTTPError:
@@ -1667,7 +1488,7 @@ def http_post_instance_scenario_action(tenant_id, instance_id):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 
 @bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='GET')
@@ -1693,34 +1514,17 @@ def http_get_instance_scenario_action(tenant_id, instance_id, action_id=None):
         bottle.abort(e.http_code, str(e))
     except Exception as e:
         logger.error("Unexpected exception: ", exc_info=True)
-        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
-
-def remove_clear_passwd(data):
-    """
-    Removes clear passwords from the data received
-    :param data: data with clear password
-    :return: data without the password information
-    """
-
-    passw = ['password: ', 'passwd: ']
+        bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
-    for pattern in passw:
-        init = data.find(pattern)
-        while init != -1:
-            end = data.find('\n', init)
-            data = data[:init] + '{}******'.format(pattern) + data[end:]
-            init += 1
-            init = data.find(pattern, init)
-    return data
 
 @bottle.error(400)
-@bottle.error(401) 
-@bottle.error(404) 
+@bottle.error(401)
+@bottle.error(404)
 @bottle.error(403)
-@bottle.error(405) 
+@bottle.error(405)
 @bottle.error(406)
 @bottle.error(409)
-@bottle.error(503) 
+@bottle.error(503)
 @bottle.error(500)
 def error400(error):
     e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
index 0c8cef6..f625b4f 100644 (file)
@@ -28,13 +28,11 @@ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$16-sep-2014 22:05:01$"
 
 # import imp
-import json
+import json
 import yaml
 import utils
 from utils import deprecated
 import vim_thread
-from db_base import HTTP_Unauthorized, HTTP_Bad_Request, HTTP_Internal_Server_Error, HTTP_Not_Found,\
-    HTTP_Conflict, HTTP_Method_Not_Allowed
 import console_proxy_thread as cli
 import vimconn
 import logging
@@ -56,8 +54,22 @@ from pyangbind.lib.serialise import pybindJSONDecoder
 from copy import deepcopy
 
 
+# WIM
+import wim.wimconn as wimconn
+import wim.wim_thread as wim_thread
+from .http_tools import errors as httperrors
+from .wim.engine import WimEngine
+from .wim.persistence import WimPersistence
+from copy import deepcopy
+#
+
 global global_config
 global vimconn_imported
+# WIM
+global wim_engine
+wim_engine  = None
+global wimconn_imported
+#
 global logger
 global default_volume_size
 default_volume_size = '5' #size in GB
@@ -68,16 +80,21 @@ global_config = None
 vimconn_imported = {}   # dictionary with VIM type as key, loaded module as value
 vim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-VIMs
 vim_persistent_info = {}
+# WIM
+wimconn_imported = {}   # dictionary with WIM type as key, loaded module as value
+wim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-WIMs
+wim_persistent_info = {}
+#
+
 logger = logging.getLogger('openmano.nfvo')
 task_lock = Lock()
 last_task_id = 0.0
 db = None
 db_lock = Lock()
 
-class NfvoException(Exception):
-    def __init__(self, message, http_code):
-        self.http_code = http_code
-        Exception.__init__(self, message)
+
+class NfvoException(httperrors.HttpMappedError):
+    """Common Class for NFVO errors"""
 
 
 def get_task_id():
@@ -116,13 +133,32 @@ def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id
     vim_threads["names"].append(name)
     return name
 
+# -- Move
+def get_non_used_wim_name(wim_name, wim_id, tenant_name, tenant_id):
+    name = wim_name[:16]
+    if name not in wim_threads["names"]:
+        wim_threads["names"].append(name)
+        return name
+    name = wim_name[:16] + "." + tenant_name[:16]
+    if name not in wim_threads["names"]:
+        wim_threads["names"].append(name)
+        return name
+    name = wim_id + "-" + tenant_id
+    wim_threads["names"].append(name)
+    return name
 
-def start_service(mydb):
+
+def start_service(mydb, persistence=None, wim=None):
     global db, global_config
     db = nfvo_db.nfvo_db()
     db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
     global ovim
 
+    if persistence:
+        persistence.lock = db_lock
+    else:
+        persistence = WimPersistence(db, lock=db_lock)
+
     # Initialize openvim for SDN control
     # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
     # TODO: review ovim.py to delete not needed configuration
@@ -143,9 +179,14 @@ def start_service(mydb):
     try:
         # starts ovim library
         ovim = ovim_module.ovim(ovim_configuration)
+
+        global wim_engine
+        wim_engine = wim or WimEngine(persistence)
+        wim_engine.ovim = ovim
+
         ovim.start_service()
 
-        #delete old unneeded vim_actions
+        #delete old unneeded vim_wim_actions
         clean_db(mydb)
 
         # starts vim_threads
@@ -176,13 +217,13 @@ def start_service(mydb):
                     # if module_info and module_info[0]:
                     #    file.close(module_info[0])
                     raise NfvoException("Unknown vim type '{}'. Cannot open file '{}.py'; {}: {}".format(
-                        vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
+                        vim["type"], module, type(e).__name__, str(e)), httperrors.Bad_Request)
 
             thread_id = vim['datacenter_tenant_id']
             vim_persistent_info[thread_id] = {}
             try:
                 #if not tenant:
-                #    return -HTTP_Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
                 myvim = vimconn_imported[ vim["type"] ].vimconnector(
                     uuid=vim['datacenter_id'], name=vim['datacenter_name'],
                     tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
@@ -196,13 +237,15 @@ def start_service(mydb):
                                                                                vim['datacenter_id'], e))
             except Exception as e:
                 raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
-                                    HTTP_Internal_Server_Error)
+                                    httperrors.Internal_Server_Error)
             thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'],
                                                 vim['vim_tenant_id'])
             new_thread = vim_thread.vim_thread(task_lock, thread_name, vim['datacenter_name'],
                                                vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
             new_thread.start()
             vim_threads["running"][thread_id] = new_thread
+
+        wim_engine.start_threads()
     except db_base_Exception as e:
         raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
     except ovim_module.ovimException as e:
@@ -213,17 +256,21 @@ def start_service(mydb):
                             msg=message[22:-3], dbname=global_config["db_ovim_name"],
                             dbuser=global_config["db_ovim_user"], dbpass=global_config["db_ovim_passwd"],
                             ver=message[-3:-1], dbhost=global_config["db_ovim_host"])
-        raise NfvoException(message, HTTP_Bad_Request)
+        raise NfvoException(message, httperrors.Bad_Request)
 
 
 def stop_service():
     global ovim, global_config
     if ovim:
         ovim.stop_service()
-    for thread_id,thread in vim_threads["running"].items():
+    for thread_id, thread in vim_threads["running"].items():
         thread.insert_task("exit")
         vim_threads["deleting"][thread_id] = thread
     vim_threads["running"] = {}
+
+    if wim_engine:
+        wim_engine.stop_threads()
+
     if global_config and global_config.get("console_thread"):
         for thread in global_config["console_thread"]:
             thread.terminate = True
@@ -238,21 +285,21 @@ def clean_db(mydb):
     :param mydb: database connector
     :return: None
     """
-    # get and delete unused vim_actions: all elements deleted, one week before, instance not present
+    # get and delete unused vim_wim_actions: all elements deleted, one week before, instance not present
     now = t.time()-3600*24*7
     instance_action_id = None
     nb_deleted = 0
     while True:
         actions_to_delete = mydb.get_rows(
             SELECT=("item", "item_id", "instance_action_id"),
-            FROM="vim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
+            FROM="vim_wim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
                     "left join instance_scenarios as i on ia.instance_id=i.uuid",
             WHERE={"va.action": "DELETE", "va.modified_at<": now, "i.uuid": None,
                    "va.status": ("DONE", "SUPERSEDED")},
             LIMIT=100
         )
         for to_delete in actions_to_delete:
-            mydb.delete_row(FROM="vim_actions", WHERE=to_delete)
+            mydb.delete_row(FROM="vim_wim_actions", WHERE=to_delete)
             if instance_action_id != to_delete["instance_action_id"]:
                 instance_action_id = to_delete["instance_action_id"]
                 mydb.delete_row(FROM="instance_actions", WHERE={"uuid": instance_action_id})
@@ -260,8 +307,7 @@ def clean_db(mydb):
         if len(actions_to_delete) < 100:
             break
     if nb_deleted:
-        logger.debug("Removed {} unused vim_actions".format(nb_deleted))
-
+        logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
 
 
 def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
@@ -357,7 +403,7 @@ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, da
                                             vim["type"], module, type(e).__name__, str(e)))
                         continue
                     raise NfvoException("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
-                                            vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
+                                            vim["type"], module, type(e).__name__, str(e)), httperrors.Bad_Request)
 
             try:
                 if 'datacenter_tenant_id' in vim:
@@ -368,7 +414,7 @@ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, da
                 else:
                     persistent_info = {}
                 #if not tenant:
-                #    return -HTTP_Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
                 vim_dict[ vim['datacenter_id'] ] = vimconn_imported[ vim["type"] ].vimconnector(
                                 uuid=vim['datacenter_id'], name=vim['datacenter_name'],
                                 tenant_id=vim.get('vim_tenant_id',vim_tenant),
@@ -381,7 +427,7 @@ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, da
                 if ignore_errors:
                     logger.error("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)))
                     continue
-                http_code = HTTP_Internal_Server_Error
+                http_code = httperrors.Internal_Server_Error
                 if isinstance(e, vimconn.vimconnException):
                     http_code = e.http_code
                 raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), http_code)
@@ -446,7 +492,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                     raise NfvoException(
                         "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
                             vnfc["name"], interface["name"]),
-                        HTTP_Bad_Request)
+                        httperrors.Bad_Request)
                 name_dict[ interface["name"] ] = "underlay"
         #bridge interfaces
         for interface in vnfc.get("bridge-ifaces",() ):
@@ -454,7 +500,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                 raise NfvoException(
                     "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
                         vnfc["name"], interface["name"]),
-                    HTTP_Bad_Request)
+                    httperrors.Bad_Request)
             name_dict[ interface["name"] ] = "overlay"
         vnfc_interfaces[ vnfc["name"] ] = name_dict
         # check bood-data info
@@ -463,7 +509,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
         #     if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
         #         raise NfvoException(
         #             "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
-        #             HTTP_Bad_Request)
+        #             httperrors.Bad_Request)
 
     #check if the info in external_connections matches with the one in the vnfcs
     name_list=[]
@@ -472,20 +518,20 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
             raise NfvoException(
                 "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
                     external_connection["name"]),
-                HTTP_Bad_Request)
+                httperrors.Bad_Request)
         name_list.append(external_connection["name"])
         if external_connection["VNFC"] not in vnfc_interfaces:
             raise NfvoException(
                 "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
                     external_connection["name"], external_connection["VNFC"]),
-                HTTP_Bad_Request)
+                httperrors.Bad_Request)
 
         if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
             raise NfvoException(
                 "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
                     external_connection["name"],
                     external_connection["local_iface_name"]),
-                HTTP_Bad_Request )
+                httperrors.Bad_Request )
 
     #check if the info in internal_connections matches with the one in the vnfcs
     name_list=[]
@@ -494,7 +540,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
             raise NfvoException(
                 "Error at vnf:internal-connections:name, value '%s' already used as an internal-connection".format(
                     internal_connection["name"]),
-                HTTP_Bad_Request)
+                httperrors.Bad_Request)
         name_list.append(internal_connection["name"])
         #We should check that internal-connections of type "ptp" have only 2 elements
 
@@ -504,7 +550,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                     internal_connection["name"],
                     'ptp' if vnf_descriptor_version==1 else 'e-line',
                     'data' if vnf_descriptor_version==1 else "e-lan"),
-                HTTP_Bad_Request)
+                httperrors.Bad_Request)
         for port in internal_connection["elements"]:
             vnf = port["VNFC"]
             iface = port["local_iface_name"]
@@ -512,13 +558,13 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                 raise NfvoException(
                     "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
                         internal_connection["name"], vnf),
-                    HTTP_Bad_Request)
+                    httperrors.Bad_Request)
             if iface not in vnfc_interfaces[ vnf ]:
                 raise NfvoException(
                     "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
                         internal_connection["name"], iface),
-                    HTTP_Bad_Request)
-                return -HTTP_Bad_Request,
+                    httperrors.Bad_Request)
+                return -httperrors.Bad_Request,
             if vnf_descriptor_version==1 and "type" not in internal_connection:
                 if vnfc_interfaces[vnf][iface] == "overlay":
                     internal_connection["type"] = "bridge"
@@ -536,7 +582,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                         internal_connection["name"],
                         iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
                         'data' if vnf_descriptor_version==1 else 'underlay'),
-                    HTTP_Bad_Request)
+                    httperrors.Bad_Request)
             if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
                 vnfc_interfaces[vnf][iface] == "underlay":
                 raise NfvoException(
@@ -544,7 +590,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                         internal_connection["name"], iface,
                         'data' if vnf_descriptor_version==1 else 'underlay',
                         'bridge' if vnf_descriptor_version==1 else 'overlay'),
-                    HTTP_Bad_Request)
+                    httperrors.Bad_Request)
 
 
 def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
@@ -589,7 +635,7 @@ def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vi
                 vim_images = vim.get_image_list(filter_dict)
                 #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
                 if len(vim_images) > 1:
-                    raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), HTTP_Conflict)
+                    raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), httperrors.Conflict)
                 elif len(vim_images) == 0:
                     raise vimconn.vimconnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
                 else:
@@ -845,7 +891,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
         try:
             pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True)
         except Exception as e:
-            raise NfvoException("Error. Invalid VNF descriptor format " + str(e), HTTP_Bad_Request)
+            raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
         db_vnfs = []
         db_nets = []
         db_vms = []
@@ -929,7 +975,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
                                             "'{}'. Reference to a non-existing 'ip_profiles'".format(
                                                 str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
                 else:  #check no ip-address has been defined
                     for icp in vld.get("internal-connection-point").itervalues():
@@ -937,7 +983,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                             raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
                                             "contains an ip-address but no ip-profile has been defined at VLD".format(
                                                 str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
 
             # connection points vaiable declaration
             cp_name2iface_uuid = {}
@@ -1086,7 +1132,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
                                             "-interface':'type':'{}'. Interface type is not supported".format(
                                                 vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
 
                     if iface.get("mgmt-interface"):
                         db_interface["type"] = "mgmt"
@@ -1120,7 +1166,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                                                 " at connection-point".format(
                                                     vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
                                                     cp=iface.get("vnfd-connection-point-ref")),
-                                                HTTP_Bad_Request)
+                                                httperrors.Bad_Request)
                     elif iface.get("internal-connection-point-ref"):
                         try:
                             for icp_descriptor in vdu_descriptor["internal-connection-point"]:
@@ -1155,7 +1201,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                                                 " {msg}".format(
                                                     vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
                                                     cp=iface.get("internal-connection-point-ref"), msg=str(e)),
-                                                HTTP_Bad_Request)
+                                                httperrors.Bad_Request)
                     if iface.get("position"):
                         db_interface["created_at"] = int(iface.get("position")) * 50
                     if iface.get("mac-address"):
@@ -1240,12 +1286,12 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
                                             "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
                                                 vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     if vdu_id2db_table_index[vdu_id]:
                         db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
                     # TODO consider the case of isolation and not colocation
                     # if pg.get("strategy") == "ISOLATION":
-                    
+
             # VNF mgmt configuration
             mgmt_access = {}
             if vnfd["mgmt-interface"].get("vdu-id"):
@@ -1254,7 +1300,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
                                         "'{vdu}'. Reference to a non-existing vdu".format(
                                             vnf=vnfd_id, vdu=mgmt_vdu_id),
-                                        HTTP_Bad_Request)
+                                        httperrors.Bad_Request)
                 mgmt_access["vm_id"] = vdu_id2uuid[vnfd["mgmt-interface"]["vdu-id"]]
                 # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
                 if vdu_id2cp_name.get(mgmt_vdu_id):
@@ -1268,7 +1314,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
                                         "Reference to a non-existing connection-point".format(
                                             vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
-                                        HTTP_Bad_Request)
+                                        httperrors.Bad_Request)
                 mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
                 mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
                 # mark this interface as of type mgmt
@@ -1307,7 +1353,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
         raise
     except Exception as e:
         logger.error("Exception {}".format(e))
-        raise  # NfvoException("Exception {}".format(e), HTTP_Bad_Request)
+        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
 
 
 @deprecated("Use new_vnfd_v3")
@@ -1323,7 +1369,7 @@ def new_vnf(mydb, tenant_id, vnf_descriptor):
         if "tenant_id" in vnf_descriptor["vnf"]:
             if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
-                                    HTTP_Unauthorized)
+                                    httperrors.Unauthorized)
         else:
             vnf_descriptor['vnf']['tenant_id'] = tenant_id
         # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
@@ -1381,9 +1427,9 @@ def new_vnf(mydb, tenant_id, vnf_descriptor):
             #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
             #    if result2:
             #        print "Error creating flavor: unknown processor model. Rollback successful."
-            #        return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
             #    else:
-            #        return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
             myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
 
             if 'numas' in vnfc and len(vnfc['numas'])>0:
@@ -1440,7 +1486,7 @@ def new_vnf(mydb, tenant_id, vnf_descriptor):
             error_text = "Exception at database"
         elif isinstance(e, KeyError):
             error_text = "KeyError exception "
-            e.http_code = HTTP_Internal_Server_Error
+            e.http_code = httperrors.Internal_Server_Error
         else:
             error_text = "Exception at VIM"
         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
@@ -1461,7 +1507,7 @@ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
         if "tenant_id" in vnf_descriptor["vnf"]:
             if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
-                                    HTTP_Unauthorized)
+                                    httperrors.Unauthorized)
         else:
             vnf_descriptor['vnf']['tenant_id'] = tenant_id
         # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
@@ -1518,9 +1564,9 @@ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
             #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
             #    if result2:
             #        print "Error creating flavor: unknown processor model. Rollback successful."
-            #        return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
             #    else:
-            #        return -HTTP_Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+            #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
             myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
 
             if 'numas' in vnfc and len(vnfc['numas'])>0:
@@ -1576,7 +1622,7 @@ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
             error_text = "Exception at database"
         elif isinstance(e, KeyError):
             error_text = "KeyError exception "
-            e.http_code = HTTP_Internal_Server_Error
+            e.http_code = httperrors.Internal_Server_Error
         else:
             error_text = "Exception at VIM"
         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
@@ -1605,7 +1651,7 @@ def get_vnf_id(mydb, tenant_id, vnf_id):
                     'boot_data'),
             WHERE={'vnfs.uuid': vnf_id} )
     if len(content) != 0:
-        #raise NfvoException("vnf '{}' not found".format(vnf_id), HTTP_Not_Found)
+        #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
     # change boot_data into boot-data
         for vm in content:
             if vm.get("boot_data"):
@@ -1629,7 +1675,7 @@ def get_vnf_id(mydb, tenant_id, vnf_id):
         if len(ipprofiles)==1:
             net["ip_profile"] = ipprofiles[0]
         elif len(ipprofiles)>1:
-            raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), HTTP_Bad_Request)
+            raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
 
 
     #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
@@ -1673,7 +1719,7 @@ def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
 
     deleted = mydb.delete_row_by_id('vnfs', vnf_id)
     if deleted == 0:
-        raise NfvoException("vnf '{}' not found".format(vnf_id), HTTP_Not_Found)
+        raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
 
     undeletedItems = []
     for flavor in flavorList:
@@ -1755,7 +1801,7 @@ def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
     if result < 0:
         return result, vims
     elif result == 0:
-        return -HTTP_Not_Found, "datacenter '%s' not found" % datacenter_name
+        return -httperrors.Not_Found, "datacenter '%s' not found" % datacenter_name
     myvim = vims.values()[0]
     result,servers =  myvim.get_hosts_info()
     if result < 0:
@@ -1767,10 +1813,10 @@ def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
 def get_hosts(mydb, nfvo_tenant_id):
     vims = get_vim(mydb, nfvo_tenant_id)
     if len(vims) == 0:
-        raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), HTTP_Not_Found)
+        raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), httperrors.Not_Found)
     elif len(vims)>1:
         #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
     myvim = vims.values()[0]
     try:
         hosts =  myvim.get_hosts()
@@ -1812,7 +1858,7 @@ def new_scenario(mydb, tenant_id, topo):
         if "tenant_id" in topo:
             if topo["tenant_id"] != tenant_id:
                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
-                                    HTTP_Unauthorized)
+                                    httperrors.Unauthorized)
     else:
         tenant_id=None
 
@@ -1844,15 +1890,15 @@ def new_scenario(mydb, tenant_id, topo):
             error_text += " 'VNF model' " +  vnf['VNF model']
             where['name'] = vnf['VNF model']
         if len(where) == 1:
-            raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, HTTP_Bad_Request)
+            raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, httperrors.Bad_Request)
 
         vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
                                FROM='vnfs',
                                WHERE=where)
         if len(vnf_db)==0:
-            raise NfvoException("unknown" + error_text + " at " + error_pos, HTTP_Not_Found)
+            raise NfvoException("unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
         elif len(vnf_db)>1:
-            raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", HTTP_Conflict)
+            raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
         vnf['uuid']=vnf_db[0]['uuid']
         vnf['description']=vnf_db[0]['description']
         #get external interfaces
@@ -1878,7 +1924,7 @@ def new_scenario(mydb, tenant_id, topo):
         con_type = conections[k].get("type", "link")
         if con_type != "link":
             if k in other_nets:
-                raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), HTTP_Bad_Request)
+                raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), httperrors.Bad_Request)
             other_nets[k] = {'external': False}
             if conections[k].get("graph"):
                 other_nets[k]["graph"] =   conections[k]["graph"]
@@ -1901,10 +1947,10 @@ def new_scenario(mydb, tenant_id, topo):
         for iface in ifaces_list:
             if iface[0] not in vnfs and iface[0] not in other_nets :
                 raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
-                                                                                        str(k), iface[0]), HTTP_Not_Found)
+                                                                                        str(k), iface[0]), httperrors.Not_Found)
             if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
                 raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
-                                                                                        str(k), iface[0], iface[1]), HTTP_Not_Found)
+                                                                                        str(k), iface[0], iface[1]), httperrors.Not_Found)
 
 #1.5 unify connections from the pair list to a consolidated list
     index=0
@@ -1941,13 +1987,13 @@ def new_scenario(mydb, tenant_id, topo):
             if 'name' not in net:
                 net['name']=k
             if 'model' not in net:
-                raise NfvoException("needed a 'model' at " + error_pos, HTTP_Bad_Request)
+                raise NfvoException("needed a 'model' at " + error_pos, httperrors.Bad_Request)
             if net['model']=='bridge_net':
                 net['type']='bridge';
             elif net['model']=='dataplane_net':
                 net['type']='data';
             else:
-                raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, HTTP_Not_Found)
+                raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, httperrors.Not_Found)
         else: #external
 #IF we do not want to check that external network exist at datacenter
             pass
@@ -1961,17 +2007,17 @@ def new_scenario(mydb, tenant_id, topo):
 #                 error_text += " 'model' " +  net['model']
 #                 WHERE_['name'] = net['model']
 #             if len(WHERE_) == 0:
-#                 return -HTTP_Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
+#                 return -httperrors.Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
 #             r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
 #                 FROM='datacenter_nets', WHERE=WHERE_ )
 #             if r<0:
 #                 print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
 #             elif r==0:
 #                 print "nfvo.new_scenario Error" +error_text+ " is not present at database"
-#                 return -HTTP_Bad_Request, "unknown " +error_text+ " at " + error_pos
+#                 return -httperrors.Bad_Request, "unknown " +error_text+ " at " + error_pos
 #             elif r>1:
 #                 print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
-#                 return -HTTP_Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
+#                 return -httperrors.Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
 #             other_nets[k].update(net_db[0])
 #ENDIF
     net_list={}
@@ -1988,7 +2034,7 @@ def new_scenario(mydb, tenant_id, topo):
                     if other_net_index>=0:
                         error_text="There is some interface connected both to net '%s' and net '%s'" % (con[other_net_index][0], net_key)
                         #print "nfvo.new_scenario " + error_text
-                        raise NfvoException(error_text, HTTP_Bad_Request)
+                        raise NfvoException(error_text, httperrors.Bad_Request)
                     else:
                         other_net_index = index
                         net_target = net_key
@@ -2012,7 +2058,7 @@ def new_scenario(mydb, tenant_id, topo):
 #                     if type_=='data' and other_nets[net_target]['type']=="ptp":
 #                         error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
 #                         print "nfvo.new_scenario " + error_text
-#                         return -HTTP_Bad_Request, error_text
+#                         return -httperrors.Bad_Request, error_text
 #ENDIF
                 for iface in con:
                     vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
@@ -2034,7 +2080,7 @@ def new_scenario(mydb, tenant_id, topo):
                 if net_type_bridge and net_type_data:
                     error_text = "Error connection interfaces of bridge type with data type. Firs node %s, iface %s" % (iface[0], iface[1])
                     #print "nfvo.new_scenario " + error_text
-                    raise NfvoException(error_text, HTTP_Bad_Request)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
                 elif net_type_bridge:
                     type_='bridge'
                 else:
@@ -2045,7 +2091,7 @@ def new_scenario(mydb, tenant_id, topo):
             error_text = "Error connection node %s : %s does not match any VNF or interface" % (iface[0], iface[1])
             #print "nfvo.new_scenario " + error_text
             #raise e
-            raise NfvoException(error_text, HTTP_Bad_Request)
+            raise NfvoException(error_text, httperrors.Bad_Request)
 
 #1.8: Connect to management net all not already connected interfaces of type 'mgmt'
     #1.8.1 obtain management net
@@ -2092,7 +2138,7 @@ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
             if scenario["tenant_id"] != tenant_id:
                 # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
-                                                    scenario["tenant_id"], tenant_id), HTTP_Unauthorized)
+                                                    scenario["tenant_id"], tenant_id), httperrors.Unauthorized)
     else:
         tenant_id=None
 
@@ -2108,14 +2154,14 @@ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
             error_text += " 'vnf_name' " + vnf['vnf_name']
             where['name'] = vnf['vnf_name']
         if len(where) == 1:
-            raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, HTTP_Bad_Request)
+            raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, httperrors.Bad_Request)
         vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
                                FROM='vnfs',
                                WHERE=where)
         if len(vnf_db) == 0:
-            raise NfvoException("Unknown" + error_text + " at " + error_pos, HTTP_Not_Found)
+            raise NfvoException("Unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
         elif len(vnf_db) > 1:
-            raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", HTTP_Conflict)
+            raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
         vnf['uuid'] = vnf_db[0]['uuid']
         vnf['description'] = vnf_db[0]['description']
         vnf['ifaces'] = {}
@@ -2143,17 +2189,17 @@ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
                     error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
                         net_name, vnf)
                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, HTTP_Not_Found)
+                    raise NfvoException(error_text, httperrors.Not_Found)
                 if iface not in scenario["vnfs"][vnf]['ifaces']:
                     error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
                         .format(net_name, iface)
                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, HTTP_Bad_Request)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
                 if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
                     error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
                                  "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
-                    raise NfvoException(error_text, HTTP_Bad_Request)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
                 scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
                 scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
                 iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
@@ -2166,7 +2212,7 @@ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
             error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
                 .format(net_name)
             # logger.debug("nfvo.new_scenario " + error_text)
-            raise NfvoException(error_text, HTTP_Bad_Request)
+            raise NfvoException(error_text, httperrors.Bad_Request)
         elif net_type_bridge:
             type_ = 'bridge'
         else:
@@ -2177,19 +2223,19 @@ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
                 error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
                              "'network':'{}'".format(net_name)
                 # logger.debug(error_text)
-                raise NfvoException(error_text, HTTP_Bad_Request)
+                raise NfvoException(error_text, httperrors.Bad_Request)
             elif type_ != "bridge" and net["implementation"] == "overlay":
                 error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
                              "'network':'{}'".format(net_name)
                 # logger.debug(error_text)
-                raise NfvoException(error_text, HTTP_Bad_Request)
+                raise NfvoException(error_text, httperrors.Bad_Request)
             net.pop("implementation")
         if "type" in net and version == "0.3":   # for v0.3
             if type_ == "data" and net["type"] == "e-line":
                 error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
                              "'e-line' at 'network':'{}'".format(net_name)
                 # logger.debug(error_text)
-                raise NfvoException(error_text, HTTP_Bad_Request)
+                raise NfvoException(error_text, httperrors.Bad_Request)
             elif type_ == "ptp" and net["type"] == "e-lan":
                 type_ = "data"
 
@@ -2217,7 +2263,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
         try:
             pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd)
         except Exception as e:
-            raise NfvoException("Error. Invalid NS descriptor format: " + str(e), HTTP_Bad_Request)
+            raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
         db_scenarios = []
         db_sce_nets = []
         db_sce_vnfs = []
@@ -2260,7 +2306,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                     raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'constituent-vnfd':'vnfd-id-ref':"
                                         "'{}'. Reference to a non-existing VNFD in the catalog".format(
                                             str(nsd["id"]), str(vnf["vnfd-id-ref"])[:255]),
-                                        HTTP_Bad_Request)
+                                        httperrors.Bad_Request)
                 sce_vnf_uuid = str(uuid4())
                 uuid_list.append(sce_vnf_uuid)
                 db_sce_vnf = {
@@ -2329,7 +2375,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                         raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'ip-profile-ref':'{}'."
                                             " Reference to a non-existing 'ip_profiles'".format(
                                                 str(nsd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["sce_net_id"] = sce_net_uuid
                 elif vld.get("vim-network-name"):
                     db_sce_net["vim_network_name"] = get_str(vld, "vim-network-name", 255)
@@ -2343,7 +2389,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                             "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
                                             "'nsd':'constituent-vnfd'".format(
                                                 str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
 
                     existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
                                                     FROM="interfaces as i join vms on i.vm_id=vms.uuid",
@@ -2356,7 +2402,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                             "connection-point name at VNFD '{}'".format(
                                                 str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
                                                 str(iface.get("vnfd-id-ref"))[:255]),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     interface_uuid = existing_ifaces[0]["uuid"]
                     if existing_ifaces[0]["iface_type"] == "data" and not db_sce_net["type"]:
                         db_sce_net["type"] = "data"
@@ -2411,7 +2457,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                                 "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
                                                 "'nsd':'constituent-vnfd'".format(
                                                     str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
-                                                HTTP_Bad_Request)
+                                                httperrors.Bad_Request)
 
                         existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
                                                         FROM="interfaces as i join vms on i.vm_id=vms.uuid",
@@ -2424,7 +2470,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                                 "connection-point name at VNFD '{}'".format(
                                                     str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
                                                     str(iface.get("vnfd-id-ref"))[:255]),
-                                                HTTP_Bad_Request)
+                                                httperrors.Bad_Request)
                         interface_uuid = existing_ifaces[0]["uuid"]
                         sce_rsp_hop_uuid = str(uuid4())
                         uuid_list.append(sce_rsp_hop_uuid)
@@ -2450,7 +2496,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                             "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
                                             "'nsd':'constituent-vnfd'".format(
                                                 str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
                                                     FROM="interfaces as i join vms on i.vm_id=vms.uuid",
                                                     WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
@@ -2462,7 +2508,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                             "connection-point name at VNFD '{}'".format(
                                                 str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
                                                 str(iface.get("vnfd-id-ref"))[:255]),
-                                            HTTP_Bad_Request)
+                                            httperrors.Bad_Request)
                     interface_uuid = existing_ifaces[0]["uuid"]
 
                     db_sce_classifier = {
@@ -2518,7 +2564,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
         raise
     except Exception as e:
         logger.error("Exception {}".format(e))
-        raise  # NfvoException("Exception {}".format(e), HTTP_Bad_Request)
+        raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
 
 
 def edit_scenario(mydb, tenant_id, scenario_id, data):
@@ -2579,7 +2625,7 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
                     error_text = "Error, datacenter '%s' does not have external network '%s'." % (datacenter_name, sce_net['name'])
                     _, message = rollback(mydb, vims, rollbackList)
                     logger.error("nfvo.start_scenario: %s", error_text)
-                    raise NfvoException(error_text, HTTP_Bad_Request)
+                    raise NfvoException(error_text, httperrors.Bad_Request)
                 logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
                 auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
 
@@ -2626,7 +2672,7 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
             # check if there is enough availability zones available at vim level.
             if myvims[datacenter_id].availability_zone and vnf_availability_zones:
                 if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
-                    raise NfvoException('No enough availability zones at VIM for this deployment', HTTP_Bad_Request)
+                    raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
 
             for vm in sce_vnf['vms']:
                 i += 1
@@ -2682,9 +2728,9 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
                         e_text = "Cannot determine the interface type PF or VF of VNF '%s' VM '%s' iface '%s'" %(sce_vnf['name'], vm['name'], iface['internal_name'])
                         if flavor_dict.get('extended')==None:
                             raise NfvoException(e_text  + "After database migration some information is not available. \
-                                    Try to delete and create the scenarios and VNFs again", HTTP_Conflict)
+                                    Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
                         else:
-                            raise NfvoException(e_text, HTTP_Internal_Server_Error)
+                            raise NfvoException(e_text, httperrors.Internal_Server_Error)
                     if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
                         netDict["type"]="virtual"
                     if "vpci" in iface and iface["vpci"] is not None:
@@ -2861,12 +2907,12 @@ def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_i
                      "join datacenters as d on d.uuid=dt.datacenter_id",
                 WHERE=where_)
             if len(datacenters) > 1:
-                raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+                raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
             elif datacenters:
                 thread_id = datacenters[0]["datacenter_tenant_id"]
                 thread = vim_threads["running"].get(thread_id)
         if not thread:
-            raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
+            raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
         return thread_id, thread
     except db_base_Exception as e:
         raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
@@ -2887,10 +2933,10 @@ def get_datacenter_uuid(mydb, tenant_id, datacenter_id_name):
         from_ = 'datacenters as d'
     vimaccounts = mydb.get_rows(FROM=from_, SELECT=("d.uuid as uuid, d.name as name",), WHERE=WHERE_dict )
     if len(vimaccounts) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
+        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
     elif len(vimaccounts)>1:
         #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
     return vimaccounts[0]["uuid"], vimaccounts[0]["name"]
 
 
@@ -2904,10 +2950,10 @@ def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extr
             datacenter_name = datacenter_id_name
     vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
     if len(vims) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
+        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
     elif len(vims)>1:
         #print "nfvo.datacenter_action() error. Several datacenters found"
-        raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+        raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
     return vims.keys()[0], vims.values()[0]
 
 
@@ -3018,7 +3064,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     break
             else:
                 raise NfvoException("Invalid scenario network name or id '{}' at instance:networks".format(net_name),
-                                    HTTP_Bad_Request)
+                                    httperrors.Bad_Request)
             if "sites" not in net_instance_desc:
                 net_instance_desc["sites"] = [ {} ]
             site_without_datacenter_field = False
@@ -3034,7 +3080,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                 else:
                     if site_without_datacenter_field:
                         raise NfvoException("Found more than one entries without datacenter field at "
-                                            "instance:networks:{}:sites".format(net_name), HTTP_Bad_Request)
+                                            "instance:networks:{}:sites".format(net_name), httperrors.Bad_Request)
                     site_without_datacenter_field = True
                     site["datacenter"] = default_datacenter_id   # change name to id
 
@@ -3043,7 +3089,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                 if vnf_name == scenario_vnf['member_vnf_index'] or vnf_name == scenario_vnf['uuid'] or vnf_name == scenario_vnf['name']:
                     break
             else:
-                raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), HTTP_Bad_Request)
+                raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), httperrors.Bad_Request)
             if "datacenter" in vnf_instance_desc:
                 # Add this datacenter to myvims
                 vnf_instance_desc["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
@@ -3058,7 +3104,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     if net_id == scenario_net['osm_id'] or net_id == scenario_net['uuid'] or net_id == scenario_net["name"]:
                         break
                 else:
-                    raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), HTTP_Bad_Request)
+                    raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
                 if net_instance_desc.get("vim-network-name"):
                     scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
                 if net_instance_desc.get("name"):
@@ -3075,7 +3121,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     if vdu_id == scenario_vm['osm_id'] or vdu_id == scenario_vm["name"]:
                         break
                 else:
-                    raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), HTTP_Bad_Request)
+                    raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
                 scenario_vm["instance_parameters"] = vdu_instance_desc
                 for iface_id, iface_instance_desc in vdu_instance_desc.get("interfaces", {}).iteritems():
                     for scenario_interface in scenario_vm['interfaces']:
@@ -3083,7 +3129,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                             scenario_interface.update(iface_instance_desc)
                             break
                     else:
-                        raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), HTTP_Bad_Request)
+                        raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
 
         # 0.1 parse cloud-config parameters
         cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
@@ -3198,7 +3244,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         if number_mgmt_networks > 1:
                             raise NfvoException("Found several VLD of type mgmt. "
                                                 "You must concrete what vim-network must be use for each one",
-                                                HTTP_Bad_Request)
+                                                httperrors.Bad_Request)
                         create_network = False
                         lookfor_network = True
                         if vim["config"].get("management_network_id"):
@@ -3450,10 +3496,17 @@ def create_instance(mydb, tenant_id, instance_dict):
                 }
                 task_index += 1
                 db_vim_actions.append(db_vim_action)
+        db_instance_action["number_tasks"] = task_index
+
+        # --> WIM
+        wan_links = wim_engine.derive_wan_links(db_instance_nets, tenant_id)
+        wim_actions = wim_engine.create_actions(wan_links)
+        wim_actions, db_instance_action = (
+            wim_engine.incorporate_actions(wim_actions, db_instance_action))
+        # <-- WIM
 
         scenarioDict["datacenter2tenant"] = myvim_threads_id
 
-        db_instance_action["number_tasks"] = task_index
         db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
         db_instance_scenario['datacenter_id'] = default_datacenter_id
         db_tables=[
@@ -3468,7 +3521,8 @@ def create_instance(mydb, tenant_id, instance_dict):
             {"instance_sfs": db_instance_sfs},
             {"instance_classifications": db_instance_classifications},
             {"instance_sfps": db_instance_sfps},
-            {"vim_actions": db_vim_actions}
+            {"instance_wim_nets": wan_links},
+            {"vim_wim_actions": db_vim_actions + wim_actions}
         ]
 
         logger.debug("create_instance done DB tables: %s",
@@ -3477,6 +3531,8 @@ def create_instance(mydb, tenant_id, instance_dict):
         for myvim_thread_id in myvim_threads_id.values():
             vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
 
+        wim_engine.dispatch(wim_actions)
+
         returned_instance = mydb.get_instance_scenario(instance_uuid)
         returned_instance["action_id"] = instance_action_id
         return returned_instance
@@ -3490,6 +3546,7 @@ def create_instance(mydb, tenant_id, instance_dict):
             error_text = "Exception"
         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
         # logger.error("create_instance: %s", error_text)
+        logger.exception(e)
         raise NfvoException(error_text, e.http_code)
 
 
@@ -3607,7 +3664,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
     # check if there is enough availability zones available at vim level.
     if myvims[datacenter_id].availability_zone and vnf_availability_zones:
         if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
-            raise NfvoException('No enough availability zones at VIM for this deployment', HTTP_Bad_Request)
+            raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
 
     if sce_vnf.get("datacenter"):
         vim = myvims[sce_vnf["datacenter"]]
@@ -3667,7 +3724,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
         extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',),
                                              WHERE={'vim_id': flavor_id})
         if not extended_flavor_dict:
-            raise NfvoException("flavor '{}' not found".format(flavor_id), HTTP_Not_Found)
+            raise NfvoException("flavor '{}' not found".format(flavor_id), httperrors.Not_Found)
 
         # extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0])
         myVMDict['disks'] = None
@@ -3718,9 +3775,9 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
                     sce_vnf['name'], vm['name'], iface['internal_name'])
                 if flavor_dict.get('extended') == None:
                     raise NfvoException(e_text + "After database migration some information is not available. \
-                            Try to delete and create the scenarios and VNFs again", HTTP_Conflict)
+                            Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
                 else:
-                    raise NfvoException(e_text, HTTP_Internal_Server_Error)
+                    raise NfvoException(e_text, httperrors.Internal_Server_Error)
             if netDict["use"] == "mgmt":
                 is_management_vm = True
                 netDict["type"] = "virtual"
@@ -3864,6 +3921,14 @@ def delete_instance(mydb, tenant_id, instance_id):
     instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
     # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
     tenant_id = instanceDict["tenant_id"]
+
+    # --> WIM
+    # We need to retrieve the WIM Actions now, before the instance_scenario is
+    # deleted. The reason for that is that: ON CASCADE rules will delete the
+    # instance_wim_nets record in the database
+    wim_actions = wim_engine.delete_actions(instance_scenario_id=instance_id)
+    # <-- WIM
+
     # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
     # 1. Delete from Database
     message = mydb.delete_instance_scenario(instance_id, tenant_id)
@@ -4136,9 +4201,15 @@ def delete_instance(mydb, tenant_id, instance_id):
         db_vim_actions.append(db_vim_action)
 
     db_instance_action["number_tasks"] = task_index
+
+    # --> WIM
+    wim_actions, db_instance_action = (
+        wim_engine.incorporate_actions(wim_actions, db_instance_action))
+    # <-- WIM
+
     db_tables = [
         {"instance_actions": db_instance_action},
-        {"vim_actions": db_vim_actions}
+        {"vim_wim_actions": db_vim_actions + wim_actions}
     ]
 
     logger.debug("delete_instance done DB tables: %s",
@@ -4147,6 +4218,8 @@ def delete_instance(mydb, tenant_id, instance_id):
     for myvim_thread_id in vimthread_affected.keys():
         vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
 
+    wim_engine.dispatch(wim_actions)
+
     if len(error_msg) > 0:
         return 'action_id={} instance {} deleted but some elements could not be deleted, or already deleted '\
                '(error: 404) from VIM: {}'.format(instance_action_id, message, error_msg)
@@ -4347,7 +4420,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
     #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
     vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
     if len(vims) == 0:
-        raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), HTTP_Not_Found)
+        raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), httperrors.Not_Found)
     myvim = vims.values()[0]
     vm_result = {}
     vm_error = 0
@@ -4381,7 +4454,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                     ORDER_BY="vms.created_at"
                 )
                 if not target_vms:
-                    raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), HTTP_Not_Found)
+                    raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
             else:
                 if not osm_vdu_id and not member_vnf_index:
                     raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
@@ -4395,7 +4468,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                     ORDER_BY="ivms.created_at"
                 )
                 if not target_vms:
-                    raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), HTTP_Not_Found)
+                    raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
                 vdu_id = target_vms[-1]["uuid"]
             target_vm = target_vms[-1]
             datacenter = target_vm["datacenter_id"]
@@ -4436,7 +4509,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
 
                 vim_action_to_clone = mydb.get_rows(FROM="vim_actions", WHERE=where)
                 if not vim_action_to_clone:
-                    raise NfvoException("Cannot find the vim_action at database with {}".format(where), HTTP_Internal_Server_Error)
+                    raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
                 vim_action_to_clone = vim_action_to_clone[0]
                 extra = yaml.safe_load(vim_action_to_clone["extra"])
 
@@ -4569,13 +4642,13 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                                                           password=password, ro_key=priv_RO_key)
                             else:
                                 raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
-                                                    HTTP_Internal_Server_Error)
+                                                    httperrors.Internal_Server_Error)
                         except KeyError:
                             raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
-                                                HTTP_Internal_Server_Error)
+                                                httperrors.Internal_Server_Error)
                     else:
                         raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
-                                            HTTP_Internal_Server_Error)
+                                            httperrors.Internal_Server_Error)
                 else:
                     data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
                     if "console" in action_dict:
@@ -4590,7 +4663,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                                                     }
                             vm_ok +=1
                         elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
-                            vm_result[ vm['uuid'] ] = {"vim_result": -HTTP_Unauthorized,
+                            vm_result[ vm['uuid'] ] = {"vim_result": -httperrors.Unauthorized,
                                                        "description": "this console is only reachable by local interface",
                                                        "name":vm['name']
                                                     }
@@ -4635,9 +4708,9 @@ def instance_action_get(mydb, nfvo_tenant, instance_id, action_id):
     rows = mydb.get_rows(FROM="instance_actions", WHERE=filter)
     if action_id:
         if not rows:
-            raise NfvoException("Not found any action with this criteria", HTTP_Not_Found)
-        vim_actions = mydb.get_rows(FROM="vim_actions", WHERE={"instance_action_id": action_id})
-        rows[0]["vim_actions"] = vim_actions
+            raise NfvoException("Not found any action with this criteria", httperrors.Not_Found)
+        vim_wim_actions = mydb.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": action_id})
+        rows[0]["vim_wim_actions"] = vim_wim_actions
     return {"actions": rows}
 
 
@@ -4662,15 +4735,15 @@ def create_or_use_console_proxy_thread(console_server, console_port):
             #port used, try with onoher
             continue
         except cli.ConsoleProxyException as e:
-            raise NfvoException(str(e), HTTP_Bad_Request)
-    raise NfvoException("Not found any free 'http_console_ports'", HTTP_Conflict)
+            raise NfvoException(str(e), httperrors.Bad_Request)
+    raise NfvoException("Not found any free 'http_console_ports'", httperrors.Conflict)
 
 
 def check_tenant(mydb, tenant_id):
     '''check that tenant exists at database'''
     tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
     if not tenant:
-        raise NfvoException("tenant '{}' not found".format(tenant_id), HTTP_Not_Found)
+        raise NfvoException("tenant '{}' not found".format(tenant_id), httperrors.Not_Found)
     return
 
 def new_tenant(mydb, tenant_dict):
@@ -4713,7 +4786,7 @@ def new_datacenter(mydb, datacenter_descriptor):
         #    file.close(module_info[0])
         raise NfvoException("Incorrect datacenter type '{}'. Plugin '{}.py' not installed".format(datacenter_type,
                                                                                                   module),
-                            HTTP_Bad_Request)
+                            httperrors.Bad_Request)
 
     datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
     if sdn_port_mapping:
@@ -4758,7 +4831,7 @@ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
                 for k in to_delete:
                     del config_dict[k]
             except Exception as e:
-                raise NfvoException("Bad format at datacenter:config " + str(e), HTTP_Bad_Request)
+                raise NfvoException("Bad format at datacenter:config " + str(e), httperrors.Bad_Request)
         if config_dict:
             datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
         else:
@@ -4767,7 +4840,7 @@ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
             try:
                 datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
             except ovimException as e:
-                raise NfvoException("Error deleting datacenter-port-mapping " + str(e), HTTP_Conflict)
+                raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
 
     mydb.update_rows('datacenters', datacenter_descriptor, where)
     if new_sdn_port_mapping:
@@ -4776,7 +4849,7 @@ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
         except ovimException as e:
             # Rollback
             mydb.update_rows('datacenters', datacenter, where)
-            raise NfvoException("Error adding datacenter-port-mapping " + str(e), HTTP_Conflict)
+            raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
     return datacenter_id
 
 
@@ -4797,7 +4870,7 @@ def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None,
     try:
         if not datacenter_id:
             if not vim_id:
-                raise NfvoException("You must provide 'vim_id", http_code=HTTP_Bad_Request)
+                raise NfvoException("You must provide 'vim_id", http_code=httperrors.Bad_Request)
             datacenter_id = vim_id
         datacenter_id, datacenter_name = get_datacenter_uuid(mydb, None, datacenter_id)
 
@@ -4812,7 +4885,7 @@ def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None,
         # #check that this association does not exist before
         # tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
         # if len(tenants_datacenters)>0:
-        #     raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), HTTP_Conflict)
+        #     raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Conflict)
 
         vim_tenant_id_exist_atdb=False
         if not create_vim_tenant:
@@ -4838,7 +4911,7 @@ def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None,
                 datacenter_name = myvim["name"]
                 vim_tenant = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
             except vimconn.vimconnException as e:
-                raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant, str(e)), HTTP_Internal_Server_Error)
+                raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_id, str(e)), httperrors.Internal_Server_Error)
             datacenter_tenants_dict = {}
             datacenter_tenants_dict["created"]="true"
 
@@ -4872,7 +4945,7 @@ def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None,
         vim_threads["running"][thread_id] = new_thread
         return thread_id
     except vimconn.vimconnException as e:
-        raise NfvoException(str(e), HTTP_Bad_Request)
+        raise NfvoException(str(e), httperrors.Bad_Request)
 
 
 def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None, name=None, vim_tenant=None,
@@ -4887,9 +4960,9 @@ def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None
         where_["dt.datacenter_id"] = datacenter_id
     vim_accounts = mydb.get_rows(SELECT="dt.uuid as uuid, config", FROM=from_, WHERE=where_)
     if not vim_accounts:
-        raise NfvoException("vim_account not found for this tenant", http_code=HTTP_Not_Found)
+        raise NfvoException("vim_account not found for this tenant", http_code=httperrors.Not_Found)
     elif len(vim_accounts) > 1:
-        raise NfvoException("found more than one vim_account for this tenant", http_code=HTTP_Conflict)
+        raise NfvoException("found more than one vim_account for this tenant", http_code=httperrors.Conflict)
     datacenter_tenant_id = vim_accounts[0]["uuid"]
     original_config = vim_accounts[0]["config"]
 
@@ -4933,7 +5006,7 @@ def delete_vim_account(mydb, tenant_id, vim_account_id, datacenter=None):
         tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
     tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
     if len(tenant_datacenter_list)==0 and tenant_uuid:
-        raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), HTTP_Not_Found)
+        raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Not_Found)
 
     #delete this association
     mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
@@ -4975,7 +5048,7 @@ def datacenter_action(mydb, tenant_id, datacenter, action_dict):
             #print content
         except vimconn.vimconnException as e:
             #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
-            raise NfvoException(str(e), HTTP_Internal_Server_Error)
+            raise NfvoException(str(e), httperrors.Internal_Server_Error)
         #update nets Change from VIM format to NFVO format
         net_list=[]
         for net in nets:
@@ -5004,7 +5077,7 @@ def datacenter_action(mydb, tenant_id, datacenter, action_dict):
         return result
 
     else:
-        raise NfvoException("Unknown action " + str(action_dict), HTTP_Bad_Request)
+        raise NfvoException("Unknown action " + str(action_dict), httperrors.Bad_Request)
 
 
 def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
@@ -5034,11 +5107,11 @@ def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
         vim_nets = myvim.get_network_list(filter_dict=filter_dict)
     except vimconn.vimconnException as e:
         #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
-        raise NfvoException(str(e), HTTP_Internal_Server_Error)
+        raise NfvoException(str(e), httperrors.Internal_Server_Error)
     if len(vim_nets)>1 and action_dict:
-        raise NfvoException("more than two networks found, specify with vim_id", HTTP_Conflict)
+        raise NfvoException("more than two networks found, specify with vim_id", httperrors.Conflict)
     elif len(vim_nets)==0: # and action_dict:
-        raise NfvoException("Not found a network at VIM with " + str(filter_dict), HTTP_Not_Found)
+        raise NfvoException("Not found a network at VIM with " + str(filter_dict), httperrors.Not_Found)
     net_list=[]
     for net in vim_nets:
         net_nfvo={'datacenter_id': datacenter_id}
@@ -5079,11 +5152,11 @@ def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
     # ensure the network is defined
     if len(network) == 0:
         raise NfvoException("Network {} is not present in the system".format(network_id),
-                            HTTP_Bad_Request)
+                            httperrors.Bad_Request)
 
     # ensure there is only one network with the provided name
     if len(network) > 1:
-        raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), HTTP_Bad_Request)
+        raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), httperrors.Bad_Request)
 
     # ensure it is a dataplane network
     if network[0]['type'] != 'data':
@@ -5116,7 +5189,7 @@ def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
         return sdn_net_id
     else:
         raise NfvoException("More than one SDN network is associated to vim network {}".format(
-            network_id), HTTP_Internal_Server_Error)
+            network_id), httperrors.Internal_Server_Error)
 
 def get_sdn_controller_id(mydb, datacenter):
     # Obtain sdn controller id
@@ -5130,12 +5203,12 @@ def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
     try:
         sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
         if not sdn_network_id:
-            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), HTTP_Internal_Server_Error)
+            raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), httperrors.Internal_Server_Error)
 
         #Obtain sdn controller id
         controller_id = get_sdn_controller_id(mydb, datacenter)
         if not controller_id:
-            raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), HTTP_Internal_Server_Error)
+            raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), httperrors.Internal_Server_Error)
 
         #Obtain sdn controller info
         sdn_controller = ovim.show_of_controller(controller_id)
@@ -5156,7 +5229,7 @@ def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
         result = ovim.new_port(port_data)
     except ovimException as e:
         raise NfvoException("ovimException attaching SDN network {} to vim network {}".format(
-            sdn_network_id, network_id) + str(e), HTTP_Internal_Server_Error)
+            sdn_network_id, network_id) + str(e), httperrors.Internal_Server_Error)
     except db_base_Exception as e:
         raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
             network_id) + str(e), e.http_code)
@@ -5170,7 +5243,7 @@ def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
         sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
         if not sdn_network_id:
             raise NfvoException("No SDN network is associated to vim-network {}".format(network_id),
-                                HTTP_Internal_Server_Error)
+                                httperrors.Internal_Server_Error)
         #in case no port_id is specified only ports marked as 'external_port' will be detached
         filter = {'name': 'external_port', 'net_id': sdn_network_id}
 
@@ -5178,11 +5251,11 @@ def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
         port_list = ovim.get_ports(columns={'uuid'}, filter=filter)
     except ovimException as e:
         raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
-                            HTTP_Internal_Server_Error)
+                            httperrors.Internal_Server_Error)
 
     if len(port_list) == 0:
         raise NfvoException("No ports attached to the network {} were found with the requested criteria".format(network_id),
-                            HTTP_Bad_Request)
+                            httperrors.Bad_Request)
 
     port_uuid_list = []
     for port in port_list:
@@ -5190,7 +5263,7 @@ def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
             port_uuid_list.append(port['uuid'])
             ovim.delete_port(port['uuid'])
         except ovimException as e:
-            raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), HTTP_Internal_Server_Error)
+            raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), httperrors.Internal_Server_Error)
 
     return 'Detached ports uuid: {}'.format(','.join(port_uuid_list))
 
@@ -5210,7 +5283,7 @@ def vim_action_get(mydb, tenant_id, datacenter, item, name):
 
             if len(content) == 0:
                 raise NfvoException("Network {} is not present in the system. ".format(name),
-                                    HTTP_Bad_Request)
+                                    httperrors.Bad_Request)
 
             #Update the networks with the attached ports
             for net in content:
@@ -5220,7 +5293,7 @@ def vim_action_get(mydb, tenant_id, datacenter, item, name):
                         #port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan'}, filter={'name': 'external_port', 'net_id': sdn_network_id})
                         port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan','name'}, filter={'net_id': sdn_network_id})
                     except ovimException as e:
-                        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), HTTP_Internal_Server_Error)
+                        raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), httperrors.Internal_Server_Error)
                     #Remove field name and if port name is external_port save it as 'type'
                     for port in port_list:
                         if port['name'] == 'external_port':
@@ -5235,7 +5308,7 @@ def vim_action_get(mydb, tenant_id, datacenter, item, name):
 
             content = myvim.get_image_list(filter_dict=filter_dict)
         else:
-            raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
         logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
         if name and len(content)==1:
             return {item[:-1]: content[0]}
@@ -5260,9 +5333,9 @@ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
     logger.debug("vim_action_delete vim response: " + str(content))
     items = content.values()[0]
     if type(items)==list and len(items)==0:
-        raise NfvoException("Not found " + item, HTTP_Not_Found)
+        raise NfvoException("Not found " + item, httperrors.Not_Found)
     elif type(items)==list and len(items)>1:
-        raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), HTTP_Not_Found)
+        raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), httperrors.Not_Found)
     else: # it is a dict
         item_id = items["id"]
         item_name = str(items.get("name"))
@@ -5278,7 +5351,7 @@ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
                 except ovimException as e:
                     raise NfvoException(
                         "ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
-                        HTTP_Internal_Server_Error)
+                        httperrors.Internal_Server_Error)
 
                 # By calling one by one all ports to be detached we ensure that not only the external_ports get detached
                 for port in port_list:
@@ -5297,7 +5370,7 @@ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
                 except ovimException as e:
                     logger.error("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e), exc_info=True)
                     raise NfvoException("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e),
-                                        HTTP_Internal_Server_Error)
+                                        httperrors.Internal_Server_Error)
 
             content = myvim.delete_network(item_id)
         elif item=="tenants":
@@ -5305,7 +5378,7 @@ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
         elif item == "images":
             content = myvim.delete_image(item_id)
         else:
-            raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
     except vimconn.vimconnException as e:
         #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
         raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
@@ -5346,7 +5419,7 @@ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
                     logger.error("ovimException creating SDN network={} ".format(
                         sdn_network) + str(e), exc_info=True)
                     raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e),
-                                        HTTP_Internal_Server_Error)
+                                        httperrors.Internal_Server_Error)
 
                 # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary  vim_net_id <->sdn_net_id
                 # use instance_scenario_id=None to distinguish from real instaces of nets
@@ -5364,7 +5437,7 @@ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
             tenant = descriptor["tenant"]
             content = myvim.new_tenant(tenant["name"], tenant.get("description"))
         else:
-            raise NfvoException(item + "?", HTTP_Method_Not_Allowed)
+            raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
     except vimconn.vimconnException as e:
         raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
 
@@ -5398,7 +5471,7 @@ def sdn_controller_delete(mydb, tenant_id, controller_id):
         if datacenter['config']:
             config = yaml.load(datacenter['config'])
             if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
-                raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), HTTP_Conflict)
+                raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), httperrors.Conflict)
 
     data = ovim.delete_of_controller(controller_id)
     msg = 'SDN controller {} deleted'.format(data)
@@ -5408,12 +5481,12 @@ def sdn_controller_delete(mydb, tenant_id, controller_id):
 def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
     controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
     if len(controller) < 1:
-        raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), HTTP_Not_Found)
+        raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), httperrors.Not_Found)
 
     try:
         sdn_controller_id = yaml.load(controller[0]["config"])["sdn-controller"]
     except:
-        raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), HTTP_Bad_Request)
+        raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), httperrors.Bad_Request)
 
     sdn_controller = ovim.show_of_controller(sdn_controller_id)
     switch_dpid = sdn_controller["dpid"]
@@ -5429,7 +5502,7 @@ def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_map
             element["switch_mac"] = port.get("switch_mac")
             if not pci or not (element["switch_port"] or element["switch_mac"]):
                 raise NfvoException ("The mapping must contain the 'pci' and at least one of the elements 'switch_port'"
-                                     " or 'switch_mac'", HTTP_Bad_Request)
+                                     " or 'switch_mac'", httperrors.Bad_Request)
             for pci_expanded in utils.expand_brackets(pci):
                 element["pci"] = pci_expanded
                 maps.append(dict(element))
@@ -5456,10 +5529,10 @@ def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
             result["dpid"] = sdn_controller["dpid"]
 
     if result["sdn-controller"] == None:
-        raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), HTTP_Bad_Request)
+        raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), httperrors.Bad_Request)
     if result["dpid"] == None:
         raise NfvoException("It was not possible to determine DPID for SDN controller {}".format(result["sdn-controller"]),
-                        HTTP_Internal_Server_Error)
+                        httperrors.Internal_Server_Error)
 
     if len(maps) == 0:
         return result
@@ -5467,9 +5540,9 @@ def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
     ports_correspondence_dict = dict()
     for link in maps:
         if result["sdn-controller"] != link["ofc_id"]:
-            raise NfvoException("The sdn-controller specified for different port mappings differ", HTTP_Internal_Server_Error)
+            raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
         if result["dpid"] != link["switch_dpid"]:
-            raise NfvoException("The dpid specified for different port mappings differ", HTTP_Internal_Server_Error)
+            raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
         element = dict()
         element["pci"] = link["pci"]
         if link["switch_port"]:
@@ -5508,10 +5581,10 @@ def create_RO_keypair(tenant_id):
     try:
         public_key = key.publickey().exportKey('OpenSSH')
         if isinstance(public_key, ValueError):
-            raise NfvoException("Unable to create public key: {}".format(public_key), HTTP_Internal_Server_Error)
+            raise NfvoException("Unable to create public key: {}".format(public_key), httperrors.Internal_Server_Error)
         private_key = key.exportKey(passphrase=tenant_id, pkcs=8)
     except (ValueError, NameError) as e:
-        raise NfvoException("Unable to create private key: {}".format(e), HTTP_Internal_Server_Error)
+        raise NfvoException("Unable to create private key: {}".format(e), httperrors.Internal_Server_Error)
     return public_key, private_key
 
 def decrypt_key (key, tenant_id):
@@ -5527,7 +5600,7 @@ def decrypt_key (key, tenant_id):
         key = RSA.importKey(key,tenant_id)
         unencrypted_key = key.exportKey('PEM')
         if isinstance(unencrypted_key, ValueError):
-            raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), HTTP_Internal_Server_Error)
+            raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), httperrors.Internal_Server_Error)
     except ValueError as e:
-        raise NfvoException("Unable to decrypt the private key: {}".format(e), HTTP_Internal_Server_Error)
+        raise NfvoException("Unable to decrypt the private key: {}".format(e), httperrors.Internal_Server_Error)
     return unencrypted_key
index a40a804..9d52803 100644 (file)
@@ -34,12 +34,16 @@ import yaml
 import time
 #import sys, os
 
+from .http_tools import errors as httperrors
+
 tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios","instance_vms","instance_vnfs",
                            "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
                            "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets",
-                           "instance_actions", "vim_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
+                           "instance_actions", "sce_vnffgs", "sce_rsps", "sce_rsp_hops",
                            "sce_classifiers", "sce_classifier_matches", "instance_sfis", "instance_sfs",
-                           "instance_classifications", "instance_sfps"]
+                           "instance_classifications", "instance_sfps", "wims", "wim_accounts", "wim_nfvo_tenants",
+                           "wim_port_mappings", "vim_wim_actions",
+                           "instance_wim_nets"]
 
 
 class nfvo_db(db_base.db_base):
@@ -55,7 +59,7 @@ class nfvo_db(db_base.db_base):
             created_time = time.time()
             try:
                 with self.con:
-            
+
                     myVNFDict = {}
                     myVNFDict["name"] = vnf_name
                     myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
@@ -63,22 +67,22 @@ class nfvo_db(db_base.db_base):
                     myVNFDict["description"] = vnf_descriptor['vnf']['description']
                     myVNFDict["class"] = vnf_descriptor['vnf'].get('class',"MISC")
                     myVNFDict["tenant_id"] = vnf_descriptor['vnf'].get("tenant_id")
-                    
+
                     vnf_id = self._new_row_internal('vnfs', myVNFDict, add_uuid=True, root_uuid=None, created_time=created_time)
                     #print "Adding new vms to the NFVO database"
                     #For each vm, we must create the appropriate vm in the NFVO database.
                     vmDict = {}
                     for _,vm in VNFCDict.iteritems():
                         #This code could make the name of the vms grow and grow.
-                        #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name  
+                        #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
                         #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
                         #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
                         vm["vnf_id"] = vnf_id
                         created_time += 0.00001
-                        vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time) 
+                        vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
                         #print "Internal vm id in NFVO DB: %s" % vm_id
                         vmDict[vm['name']] = vm_id
-                
+
                     #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
                     bridgeInterfacesDict = {}
                     for vm in vnf_descriptor['vnf']['VNFC']:
@@ -124,19 +128,19 @@ class nfvo_db(db_base.db_base):
                     if 'internal-connections' in vnf_descriptor['vnf']:
                         for net in vnf_descriptor['vnf']['internal-connections']:
                             #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-                            
+
                             myNetDict = {}
                             myNetDict["name"] = net['name']
                             myNetDict["description"] = net['description']
                             myNetDict["type"] = net['type']
                             myNetDict["vnf_id"] = vnf_id
-                            
+
                             created_time += 0.00001
                             net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-                                
+
                             for element in net['elements']:
                                 ifaceItem = {}
-                                #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])  
+                                #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
                                 ifaceItem["internal_name"] = element['local_iface_name']
                                 #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
                                 ifaceItem["vm_id"] = vmDict[element['VNFC']]
@@ -159,17 +163,17 @@ class nfvo_db(db_base.db_base):
                                     created_time_iface = bridgeiface['created_time']
                                 internalconnList.append(ifaceItem)
                             #print "Internal net id in NFVO DB: %s" % net_id
-                    
+
                     #print "Adding internal interfaces to the NFVO database (if any)"
                     for iface in internalconnList:
                         #print "Iface name: %s" % iface['internal_name']
                         iface_id = self._new_row_internal('interfaces', iface, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
                         #print "Iface id in NFVO DB: %s" % iface_id
-                    
+
                     #print "Adding external interfaces to the NFVO database"
                     for iface in vnf_descriptor['vnf']['external-connections']:
                         myIfaceDict = {}
-                        #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])  
+                        #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
                         myIfaceDict["internal_name"] = iface['local_iface_name']
                         #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
                         myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
@@ -193,13 +197,13 @@ class nfvo_db(db_base.db_base):
                         #print "Iface name: %s" % iface['name']
                         iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time = created_time_iface)
                         #print "Iface id in NFVO DB: %s" % iface_id
-                    
+
                     return vnf_id
-                
+
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
             tries -= 1
-        
+
     def new_vnf_as_a_whole2(self,nfvo_tenant,vnf_name,vnf_descriptor,VNFCDict):
         self.logger.debug("Adding new vnf to the NFVO database")
         tries = 2
@@ -207,7 +211,7 @@ class nfvo_db(db_base.db_base):
             created_time = time.time()
             try:
                 with self.con:
-                     
+
                     myVNFDict = {}
                     myVNFDict["name"] = vnf_name
                     myVNFDict["descriptor"] = vnf_descriptor['vnf'].get('descriptor')
@@ -222,15 +226,15 @@ class nfvo_db(db_base.db_base):
                     vmDict = {}
                     for _,vm in VNFCDict.iteritems():
                         #This code could make the name of the vms grow and grow.
-                        #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name  
+                        #If we agree to follow this convention, we should check with a regex that the vnfc name is not including yet the vnf name
                         #vm['name'] = "%s-%s" % (vnf_name,vm['name'])
                         #print "VM name: %s. Description: %s" % (vm['name'], vm['description'])
                         vm["vnf_id"] = vnf_id
                         created_time += 0.00001
-                        vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time) 
+                        vm_id = self._new_row_internal('vms', vm, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
                         #print "Internal vm id in NFVO DB: %s" % vm_id
                         vmDict[vm['name']] = vm_id
-                     
+
                     #Collect the bridge interfaces of each VM/VNFC under the 'bridge-ifaces' field
                     bridgeInterfacesDict = {}
                     for vm in vnf_descriptor['vnf']['VNFC']:
@@ -274,13 +278,13 @@ class nfvo_db(db_base.db_base):
                     if 'internal-connections' in vnf_descriptor['vnf']:
                         for net in vnf_descriptor['vnf']['internal-connections']:
                             #print "Net name: %s. Description: %s" % (net['name'], net['description'])
-                            
+
                             myNetDict = {}
                             myNetDict["name"] = net['name']
                             myNetDict["description"] = net['description']
                             if (net["implementation"] == "overlay"):
                                 net["type"] = "bridge"
-                                #It should give an error if the type is e-line. For the moment, we consider it as a bridge 
+                                #It should give an error if the type is e-line. For the moment, we consider it as a bridge
                             elif (net["implementation"] == "underlay"):
                                 if (net["type"] == "e-line"):
                                     net["type"] = "ptp"
@@ -289,10 +293,10 @@ class nfvo_db(db_base.db_base):
                             net.pop("implementation")
                             myNetDict["type"] = net['type']
                             myNetDict["vnf_id"] = vnf_id
-                            
+
                             created_time += 0.00001
                             net_id = self._new_row_internal('nets', myNetDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time)
-                            
+
                             if "ip-profile" in net:
                                 ip_profile = net["ip-profile"]
                                 myIPProfileDict = {}
@@ -305,13 +309,13 @@ class nfvo_db(db_base.db_base):
                                     myIPProfileDict["dhcp_enabled"] = ip_profile["dhcp"].get('enabled',"true")
                                     myIPProfileDict["dhcp_start_address"] = ip_profile["dhcp"].get('start-address',None)
                                     myIPProfileDict["dhcp_count"] = ip_profile["dhcp"].get('count',None)
-                                
+
                                 created_time += 0.00001
                                 ip_profile_id = self._new_row_internal('ip_profiles', myIPProfileDict)
-                                
+
                             for element in net['elements']:
                                 ifaceItem = {}
-                                #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])  
+                                #ifaceItem["internal_name"] = "%s-%s-%s" % (net['name'],element['VNFC'], element['local_iface_name'])
                                 ifaceItem["internal_name"] = element['local_iface_name']
                                 #ifaceItem["vm_id"] = vmDict["%s-%s" % (vnf_name,element['VNFC'])]
                                 ifaceItem["vm_id"] = vmDict[element['VNFC']]
@@ -335,11 +339,11 @@ class nfvo_db(db_base.db_base):
                                 #print "Iface name: %s" % iface['internal_name']
                                 iface_id = self._new_row_internal('interfaces', ifaceItem, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
                                 #print "Iface id in NFVO DB: %s" % iface_id
-                    
+
                     #print "Adding external interfaces to the NFVO database"
                     for iface in vnf_descriptor['vnf']['external-connections']:
                         myIfaceDict = {}
-                        #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])  
+                        #myIfaceDict["internal_name"] = "%s-%s-%s" % (vnf_name,iface['VNFC'], iface['local_iface_name'])
                         myIfaceDict["internal_name"] = iface['local_iface_name']
                         #myIfaceDict["vm_id"] = vmDict["%s-%s" % (vnf_name,iface['VNFC'])]
                         myIfaceDict["vm_id"] = vmDict[iface['VNFC']]
@@ -363,9 +367,9 @@ class nfvo_db(db_base.db_base):
                         #print "Iface name: %s" % iface['name']
                         iface_id = self._new_row_internal('interfaces', myIfaceDict, add_uuid=True, root_uuid=vnf_id, created_time=created_time_iface)
                         #print "Iface id in NFVO DB: %s" % iface_id
-                    
+
                     return vnf_id
-                
+
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
 #             except KeyError as e2:
@@ -388,7 +392,7 @@ class nfvo_db(db_base.db_base):
                              'name': scenario_dict['name'],
                              'description': scenario_dict['description'],
                              'public': scenario_dict.get('public', "false")}
-                    
+
                     scenario_uuid =  self._new_row_internal('scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
                     #sce_nets
                     for net in scenario_dict['nets'].values():
@@ -447,9 +451,9 @@ class nfvo_db(db_base.db_base):
                             created_time += 0.00001
                             iface_uuid = self._new_row_internal('sce_interfaces', INSERT_, add_uuid=True,
                                                                  root_uuid=scenario_uuid, created_time=created_time)
-                            
+
                     return scenario_uuid
-                    
+
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
             tries -= 1
@@ -465,7 +469,7 @@ class nfvo_db(db_base.db_base):
                     #check that scenario exist
                     tenant_id = scenario_dict.get('tenant_id')
                     scenario_uuid = scenario_dict['uuid']
-                    
+
                     where_text = "uuid='{}'".format(scenario_uuid)
                     if not tenant_id and tenant_id != "any":
                         where_text += " AND (tenant_id='{}' OR public='True')".format(tenant_id)
@@ -474,9 +478,9 @@ class nfvo_db(db_base.db_base):
                     self.cur.execute(cmd)
                     self.cur.fetchall()
                     if self.cur.rowcount==0:
-                        raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
                     elif self.cur.rowcount>1:
-                        raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
 
                     #scenario
                     nodes = {}
@@ -500,7 +504,7 @@ class nfvo_db(db_base.db_base):
                         item_changed += self._update_rows('sce_nets', node, WHERE_)
                         item_changed += self._update_rows('sce_vnfs', node, WHERE_, modified_time=modified_time)
                     return item_changed
-                    
+
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
             tries -= 1
@@ -509,7 +513,7 @@ class nfvo_db(db_base.db_base):
 #         '''Obtain the scenario instance information, filtering by one or serveral of the tenant, uuid or name
 #         instance_scenario_id is the uuid or the name if it is not a valid uuid format
 #         Only one scenario isntance must mutch the filtering or an error is returned
-#         ''' 
+#         '''
 #         print "1******************************************************************"
 #         try:
 #             with self.con:
@@ -525,11 +529,11 @@ class nfvo_db(db_base.db_base):
 #                 self.cur.execute("SELECT * FROM instance_scenarios WHERE "+ where_text)
 #                 rows = self.cur.fetchall()
 #                 if self.cur.rowcount==0:
-#                     return -HTTP_Bad_Request, "No scenario instance found with this criteria " + where_text
+#                     return -httperrors.Bad_Request, "No scenario instance found with this criteria " + where_text
 #                 elif self.cur.rowcount>1:
-#                     return -HTTP_Bad_Request, "More than one scenario instance found with this criteria " + where_text
+#                     return -httperrors.Bad_Request, "More than one scenario instance found with this criteria " + where_text
 #                 instance_scenario_dict = rows[0]
-#                 
+#
 #                 #instance_vnfs
 #                 self.cur.execute("SELECT uuid,vnf_id FROM instance_vnfs WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
 #                 instance_scenario_dict['instance_vnfs'] = self.cur.fetchall()
@@ -537,17 +541,17 @@ class nfvo_db(db_base.db_base):
 #                     #instance_vms
 #                     self.cur.execute("SELECT uuid, vim_vm_id "+
 #                                 "FROM instance_vms  "+
-#                                 "WHERE instance_vnf_id='" + vnf['uuid'] +"'"  
+#                                 "WHERE instance_vnf_id='" + vnf['uuid'] +"'"
 #                                 )
 #                     vnf['instance_vms'] = self.cur.fetchall()
 #                 #instance_nets
 #                 self.cur.execute("SELECT uuid, vim_net_id FROM instance_nets WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
 #                 instance_scenario_dict['instance_nets'] = self.cur.fetchall()
-#                 
+#
 #                 #instance_interfaces
 #                 self.cur.execute("SELECT uuid, vim_interface_id, instance_vm_id, instance_net_id FROM instance_interfaces WHERE instance_scenario_id='"+ instance_scenario_dict['uuid'] + "'")
 #                 instance_scenario_dict['instance_interfaces'] = self.cur.fetchall()
-#                 
+#
 #                 db_base._convert_datetime2str(instance_scenario_dict)
 #                 db_base._convert_str2boolean(instance_scenario_dict, ('public','shared','external') )
 #                 print "2******************************************************************"
@@ -561,7 +565,7 @@ class nfvo_db(db_base.db_base):
         scenario_id is the uuid or the name if it is not a valid uuid format
         if datacenter_vim_id,d datacenter_id is provided, it supply aditional vim_id fields with the matching vim uuid
         Only one scenario must mutch the filtering or an error is returned
-        ''' 
+        '''
         tries = 2
         while tries:
             try:
@@ -575,9 +579,9 @@ class nfvo_db(db_base.db_base):
                     self.cur.execute(cmd)
                     rows = self.cur.fetchall()
                     if self.cur.rowcount==0:
-                        raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("No scenario found with this criteria " + where_text, httperrors.Bad_Request)
                     elif self.cur.rowcount>1:
-                        raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("More than one scenario found with this criteria " + where_text, httperrors.Bad_Request)
                     scenario_dict = rows[0]
                     if scenario_dict["cloud_config"]:
                         scenario_dict["cloud-config"] = yaml.load(scenario_dict["cloud_config"])
@@ -627,17 +631,17 @@ class nfvo_db(db_base.db_base):
                             if datacenter_vim_id!=None:
                                 cmd = "SELECT vim_id FROM datacenters_images WHERE image_id='{}' AND datacenter_vim_id='{}'".format(vm['image_id'],datacenter_vim_id)
                                 self.logger.debug(cmd)
-                                self.cur.execute(cmd) 
+                                self.cur.execute(cmd)
                                 if self.cur.rowcount==1:
                                     vim_image_dict = self.cur.fetchone()
                                     vm['vim_image_id']=vim_image_dict['vim_id']
                                 cmd = "SELECT vim_id FROM datacenters_flavors WHERE flavor_id='{}' AND datacenter_vim_id='{}'".format(vm['flavor_id'],datacenter_vim_id)
                                 self.logger.debug(cmd)
-                                self.cur.execute(cmd) 
+                                self.cur.execute(cmd)
                                 if self.cur.rowcount==1:
                                     vim_flavor_dict = self.cur.fetchone()
                                     vm['vim_flavor_id']=vim_flavor_dict['vim_id']
-                                
+
                             #interfaces
                             cmd = "SELECT uuid,internal_name,external_name,net_id,type,vpci,mac,bw,model,ip_address," \
                                   "floating_ip, port_security" \
@@ -662,15 +666,15 @@ class nfvo_db(db_base.db_base):
                         vnf['nets'] = self.cur.fetchall()
                         for vnf_net in vnf['nets']:
                             SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
-                            cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])  
+                            cmd = "SELECT {} FROM ip_profiles WHERE net_id='{}'".format(SELECT_,vnf_net['uuid'])
                             self.logger.debug(cmd)
                             self.cur.execute(cmd)
                             ipprofiles = self.cur.fetchall()
                             if self.cur.rowcount==1:
                                 vnf_net["ip_profile"] = ipprofiles[0]
                             elif self.cur.rowcount>1:
-                                raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), db_base.HTTP_Bad_Request)
-                            
+                                raise db_base.db_base_Exception("More than one ip-profile found with this criteria: net_id='{}'".format(vnf_net['uuid']), httperrors.Bad_Request)
+
                     #sce_nets
                     cmd = "SELECT uuid,name,type,external,description,vim_network_name, osm_id" \
                           " FROM sce_nets  WHERE scenario_id='{}'" \
@@ -682,28 +686,28 @@ class nfvo_db(db_base.db_base):
                     for net in scenario_dict['nets']:
                         if str(net['external']) == 'false':
                             SELECT_ = "ip_version,subnet_address,gateway_address,dns_address,dhcp_enabled,dhcp_start_address,dhcp_count"
-                            cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])  
+                            cmd = "SELECT {} FROM ip_profiles WHERE sce_net_id='{}'".format(SELECT_,net['uuid'])
                             self.logger.debug(cmd)
                             self.cur.execute(cmd)
                             ipprofiles = self.cur.fetchall()
                             if self.cur.rowcount==1:
                                 net["ip_profile"] = ipprofiles[0]
                             elif self.cur.rowcount>1:
-                                raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), db_base.HTTP_Bad_Request)
+                                raise db_base.db_base_Exception("More than one ip-profile found with this criteria: sce_net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
                             continue
                         WHERE_=" WHERE name='{}'".format(net['name'])
                         if datacenter_id!=None:
                             WHERE_ += " AND datacenter_id='{}'".format(datacenter_id)
                         cmd = "SELECT vim_net_id FROM datacenter_nets" + WHERE_
                         self.logger.debug(cmd)
-                        self.cur.execute(cmd) 
+                        self.cur.execute(cmd)
                         d_net = self.cur.fetchone()
                         if d_net==None or datacenter_vim_id==None:
                             #print "nfvo_db.get_scenario() WARNING external net %s not found"  % net['name']
                             net['vim_id']=None
                         else:
                             net['vim_id']=d_net['vim_net_id']
-                    
+
                     db_base._convert_datetime2str(scenario_dict)
                     db_base._convert_str2boolean(scenario_dict, ('public','shared','external','port-security','floating-ip') )
 
@@ -745,13 +749,13 @@ class nfvo_db(db_base.db_base):
         '''Deletes a scenario, filtering by one or several of the tenant, uuid or name
         scenario_id is the uuid or the name if it is not a valid uuid format
         Only one scenario must mutch the filtering or an error is returned
-        ''' 
+        '''
         tries = 2
         while tries:
             try:
                 with self.con:
                     self.cur = self.con.cursor(mdb.cursors.DictCursor)
-    
+
                     #scenario table
                     where_text = "uuid='{}'".format(scenario_id)
                     if not tenant_id and tenant_id != "any":
@@ -761,12 +765,12 @@ class nfvo_db(db_base.db_base):
                     self.cur.execute(cmd)
                     rows = self.cur.fetchall()
                     if self.cur.rowcount==0:
-                        raise db_base.db_base_Exception("No scenario found where " + where_text, db_base.HTTP_Not_Found)
+                        raise db_base.db_base_Exception("No scenario found where " + where_text, httperrors.Not_Found)
                     elif self.cur.rowcount>1:
-                        raise db_base.db_base_Exception("More than one scenario found where " + where_text, db_base.HTTP_Conflict)
+                        raise db_base.db_base_Exception("More than one scenario found where " + where_text, httperrors.Conflict)
                     scenario_uuid = rows[0]["uuid"]
                     scenario_name = rows[0]["name"]
-                    
+
                     #sce_vnfs
                     cmd = "DELETE FROM scenarios WHERE uuid='{}'".format(scenario_uuid)
                     self.logger.debug(cmd)
@@ -777,7 +781,7 @@ class nfvo_db(db_base.db_base):
                 self._format_error(e, tries, "delete", "instances running")
             tries -= 1
 
-    def new_rows(self, tables, uuid_list=None):
+    def new_rows(self, tables, uuid_list=None, confidential_data=False):
         """
         Make a transactional insertion of rows at several tables. Can be also a deletion
         :param tables: list with dictionary where the keys are the table names and the values are a row or row list
@@ -795,6 +799,7 @@ class nfvo_db(db_base.db_base):
         :return: None if success,  raise exception otherwise
         """
         tries = 2
+        table_name = None
         while tries:
             created_time = time.time()
             try:
@@ -819,10 +824,11 @@ class nfvo_db(db_base.db_base):
                                 else:
                                     created_time_param = 0
                                 self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
-                                                               created_time=created_time_param)
+                                                       confidential_data=confidential_data,
+                                                       created_time=created_time_param)
                     return
             except (mdb.Error, AttributeError) as e:
-                self._format_error(e, tries)
+                self._format_error(e, tries, table=table_name)
             tries -= 1
 
     def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
@@ -845,7 +851,7 @@ class nfvo_db(db_base.db_base):
                         INSERT_["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"], default_flow_style=True, width=256)
 
                     instance_uuid = self._new_row_internal('instance_scenarios', INSERT_, add_uuid=True, root_uuid=None, created_time=created_time)
-                    
+
                     net_scene2instance={}
                     #instance_nets   #nets interVNF
                     for net in scenarioDict['nets']:
@@ -855,10 +861,10 @@ class nfvo_db(db_base.db_base):
                             net["vim_id_sites"] ={datacenter_site_id: net['vim_id']}
                             net["vim_id_sites"]["datacenter_site_id"] = {datacenter_site_id: net['vim_id']}
                         sce_net_id = net.get("uuid")
-                        
+
                         for datacenter_site_id,vim_id in net["vim_id_sites"].iteritems():
                             INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #,  'type': net['type']
-                            INSERT_['datacenter_id'] = datacenter_site_id 
+                            INSERT_['datacenter_id'] = datacenter_site_id
                             INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
                             if not net.get('created', False):
                                 INSERT_['status'] = "ACTIVE"
@@ -868,31 +874,31 @@ class nfvo_db(db_base.db_base):
                             instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
                             net_scene2instance[ sce_net_id ][datacenter_site_id] = instance_net_uuid
                             net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
-                        
+
                         if 'ip_profile' in net:
                             net['ip_profile']['net_id'] = None
                             net['ip_profile']['sce_net_id'] = None
                             net['ip_profile']['instance_net_id'] = instance_net_uuid
                             created_time += 0.00001
                             ip_profile_id = self._new_row_internal('ip_profiles', net['ip_profile'])
-                    
+
                     #instance_vnfs
                     for vnf in scenarioDict['vnfs']:
                         datacenter_site_id = vnf.get('datacenter_id', datacenter_id)
                         INSERT_={'instance_scenario_id': instance_uuid,  'vnf_id': vnf['vnf_id']  }
-                        INSERT_['datacenter_id'] = datacenter_site_id 
+                        INSERT_['datacenter_id'] = datacenter_site_id
                         INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
                         if vnf.get("uuid"):
                             INSERT_['sce_vnf_id'] = vnf['uuid']
                         created_time += 0.00001
                         instance_vnf_uuid =  self._new_row_internal('instance_vnfs', INSERT_, True, instance_uuid, created_time)
                         vnf['uuid'] = instance_vnf_uuid  #overwrite scnario uuid by instance uuid
-                        
+
                         #instance_nets   #nets intraVNF
                         for net in vnf['nets']:
                             net_scene2instance[ net['uuid'] ] = {}
                             INSERT_={'vim_net_id': net['vim_id'], 'created': net.get('created', False), 'instance_scenario_id':instance_uuid  } #,  'type': net['type']
-                            INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id) 
+                            INSERT_['datacenter_id'] = net.get('datacenter_id', datacenter_site_id)
                             INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_id]
                             if net.get("uuid"):
                                 INSERT_['net_id'] = net['uuid']
@@ -900,7 +906,7 @@ class nfvo_db(db_base.db_base):
                             instance_net_uuid =  self._new_row_internal('instance_nets', INSERT_, True, instance_uuid, created_time)
                             net_scene2instance[ net['uuid'] ][datacenter_site_id] = instance_net_uuid
                             net['uuid'] = instance_net_uuid  #overwrite scnario uuid by instance uuid
-                            
+
                             if 'ip_profile' in net:
                                 net['ip_profile']['net_id'] = None
                                 net['ip_profile']['sce_net_id'] = None
@@ -914,7 +920,7 @@ class nfvo_db(db_base.db_base):
                             created_time += 0.00001
                             instance_vm_uuid =  self._new_row_internal('instance_vms', INSERT_, True, instance_uuid, created_time)
                             vm['uuid'] = instance_vm_uuid  #overwrite scnario uuid by instance uuid
-                            
+
                             #instance_interfaces
                             for interface in vm['interfaces']:
                                 net_id = interface.get('net_id', None)
@@ -945,7 +951,7 @@ class nfvo_db(db_base.db_base):
         '''Obtain the instance information, filtering by one or several of the tenant, uuid or name
         instance_id is the uuid or the name if it is not a valid uuid format
         Only one instance must mutch the filtering or an error is returned
-        ''' 
+        '''
         tries = 2
         while tries:
             try:
@@ -969,17 +975,17 @@ class nfvo_db(db_base.db_base):
                     self.logger.debug(cmd)
                     self.cur.execute(cmd)
                     rows = self.cur.fetchall()
-                    
+
                     if self.cur.rowcount == 0:
-                        raise db_base.db_base_Exception("No instance found where " + where_text, db_base.HTTP_Not_Found)
+                        raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Not_Found)
                     elif self.cur.rowcount > 1:
                         raise db_base.db_base_Exception("More than one instance found where " + where_text,
-                                                        db_base.HTTP_Bad_Request)
+                                                        httperrors.Bad_Request)
                     instance_dict = rows[0]
                     if instance_dict["cloud_config"]:
                         instance_dict["cloud-config"] = yaml.load(instance_dict["cloud_config"])
                     del instance_dict["cloud_config"]
-                    
+
                     # instance_vnfs
                     cmd = "SELECT iv.uuid as uuid, iv.vnf_id as vnf_id, sv.name as vnf_name, sce_vnf_id, datacenter_id"\
                           ", datacenter_tenant_id, v.mgmt_access, sv.member_vnf_index, v.osm_id as vnfd_osm_id "\
@@ -1034,7 +1040,7 @@ class nfvo_db(db_base.db_base):
                             del vm["vm_uuid"]
 
                     #instance_nets
-                    #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external" 
+                    #select_text = "instance_nets.uuid as uuid,sce_nets.name as net_name,instance_nets.vim_net_id as net_id,instance_nets.status as status,instance_nets.external as external"
                     #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
                     #            "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
                     #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
@@ -1094,18 +1100,18 @@ class nfvo_db(db_base.db_base):
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries)
             tries -= 1
-        
+
     def delete_instance_scenario(self, instance_id, tenant_id=None):
         '''Deletes a instance_Scenario, filtering by one or serveral of the tenant, uuid or name
         instance_id is the uuid or the name if it is not a valid uuid format
         Only one instance_scenario must mutch the filtering or an error is returned
-        ''' 
+        '''
         tries = 2
         while tries:
             try:
                 with self.con:
                     self.cur = self.con.cursor(mdb.cursors.DictCursor)
-    
+
                     #instance table
                     where_list=[]
                     if tenant_id is not None: where_list.append( "tenant_id='" + tenant_id +"'" )
@@ -1118,24 +1124,24 @@ class nfvo_db(db_base.db_base):
                     self.logger.debug(cmd)
                     self.cur.execute(cmd)
                     rows = self.cur.fetchall()
-                    
+
                     if self.cur.rowcount==0:
-                        raise db_base.db_base_Exception("No instance found where " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("No instance found where " + where_text, httperrors.Bad_Request)
                     elif self.cur.rowcount>1:
-                        raise db_base.db_base_Exception("More than one instance found where " + where_text, db_base.HTTP_Bad_Request)
+                        raise db_base.db_base_Exception("More than one instance found where " + where_text, httperrors.Bad_Request)
                     instance_uuid = rows[0]["uuid"]
                     instance_name = rows[0]["name"]
-                    
+
                     #sce_vnfs
                     cmd = "DELETE FROM instance_scenarios WHERE uuid='{}'".format(instance_uuid)
                     self.logger.debug(cmd)
                     self.cur.execute(cmd)
-    
+
                     return instance_uuid + " " + instance_name
             except (mdb.Error, AttributeError) as e:
                 self._format_error(e, tries, "delete", "No dependences can avoid deleting!!!!")
             tries -= 1
-    
+
     def new_instance_scenario(self, instance_scenario_dict, tenant_id):
         #return self.new_row('vnfs', vnf_dict, None, tenant_id, True, True)
         return self._new_row_internal('instance_scenarios', instance_scenario_dict, tenant_id, add_uuid=True, root_uuid=None, log=True)
@@ -1151,7 +1157,7 @@ class nfvo_db(db_base.db_base):
     def update_instance_vnf(self, instance_vnf_dict):
         #TODO:
         return
-    
+
     def delete_instance_vnf(self, instance_vnf_id):
         #TODO:
         return
@@ -1163,14 +1169,14 @@ class nfvo_db(db_base.db_base):
     def update_instance_vm(self, instance_vm_dict):
         #TODO:
         return
-    
+
     def delete_instance_vm(self, instance_vm_id):
         #TODO:
         return
 
     def new_instance_net(self, instance_net_dict, tenant_id, instance_scenario_id = None):
         return self._new_row_internal('instance_nets', instance_net_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
-    
+
     def update_instance_net(self, instance_net_dict):
         #TODO:
         return
@@ -1178,7 +1184,7 @@ class nfvo_db(db_base.db_base):
     def delete_instance_net(self, instance_net_id):
         #TODO:
         return
-    
+
     def new_instance_interface(self, instance_interface_dict, tenant_id, instance_scenario_id = None):
         return self._new_row_internal('instance_interfaces', instance_interface_dict, tenant_id, add_uuid=True, root_uuid=instance_scenario_id, log=True)
 
@@ -1192,7 +1198,7 @@ class nfvo_db(db_base.db_base):
 
     def update_datacenter_nets(self, datacenter_id, new_net_list=[]):
         ''' Removes the old and adds the new net list at datacenter list for one datacenter.
-        Attribute 
+        Attribute
             datacenter_id: uuid of the datacenter to act upon
             table: table where to insert
             new_net_list: the new values to be inserted. If empty it only deletes the existing nets
@@ -1218,4 +1224,4 @@ class nfvo_db(db_base.db_base):
                 self._format_error(e, tries)
             tries -= 1
 
-        
+
index 4a7d5f5..d10f862 100644 (file)
@@ -22,7 +22,7 @@
 ##
 
 '''
-JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API 
+JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API
 '''
 __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$09-oct-2014 09:09:48$"
@@ -48,7 +48,7 @@ integer1_schema={"type":"integer","minimum":1}
 path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
 vlan_schema={"type":"integer","minimum":1,"maximum":4095}
 vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
-mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0 
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0
 #mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
 ip_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
 ip_prefix_schema={"type":"string","pattern":"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
@@ -99,13 +99,13 @@ config_schema = {
         "vim_name": nameshort_schema,
         "vim_tenant_name": nameshort_schema,
         "mano_tenant_name": nameshort_schema,
-        "mano_tenant_id": id_schema, 
+        "mano_tenant_id": id_schema,
         "http_console_proxy": {"type":"boolean"},
         "http_console_host": nameshort_schema,
         "http_console_ports": {
-            "type": "array", 
+            "type": "array",
             "items": {"OneOf": [
-                port_schema, 
+                port_schema,
                 {"type": "object", "properties": {"from": port_schema, "to": port_schema}, "required": ["from", "to"]}
             ]}
         },
@@ -113,12 +113,14 @@ config_schema = {
         "log_socket_level": log_level_schema,
         "log_level_db": log_level_schema,
         "log_level_vim": log_level_schema,
+        "log_level_wim": log_level_schema,
         "log_level_nfvo": log_level_schema,
         "log_level_http": log_level_schema,
         "log_level_console": log_level_schema,
         "log_level_ovim": log_level_schema,
         "log_file_db": path_schema,
         "log_file_vim": path_schema,
+        "log_file_wim": path_schema,
         "log_file_nfvo": path_schema,
         "log_file_http": path_schema,
         "log_file_console": path_schema,
@@ -430,7 +432,7 @@ external_connection_schema_v02 = {
     "properties":{
         "name": name_schema,
         "mgmt": {"type":"boolean"},
-        "type": {"type": "string", "enum":["e-line", "e-lan"]}, 
+        "type": {"type": "string", "enum":["e-line", "e-lan"]},
         "implementation": {"type": "string", "enum":["overlay", "underlay"]},
         "VNFC": name_schema,
         "local_iface_name": name_schema ,
@@ -580,7 +582,7 @@ vnfc_schema = {
         "bridge-ifaces": bridge_interfaces_schema,
         "devices": devices_schema,
         "boot-data" : boot_data_vdu_schema
-        
+
     },
     "required": ["name"],
     "oneOf": [
@@ -767,7 +769,7 @@ nsd_schema_v02 = {
                         },
                     }
                 },
-            
+
             },
             "required": ["vnfs", "name"],
             "additionalProperties": False
@@ -861,7 +863,7 @@ nsd_schema_v03 = {
                         },
                     }
                 },
-            
+
             },
             "required": ["vnfs", "networks","name"],
             "additionalProperties": False
@@ -1056,7 +1058,7 @@ instance_scenario_create_schema_v01 = {
                                                 "vim-network-name": name_schema,
                                                 "ip-profile": ip_profile_schema,
                                                 "name": name_schema,
-                                            } 
+                                            }
                                         }
                                     }
                                 },
@@ -1086,7 +1088,7 @@ instance_scenario_create_schema_v01 = {
                                     }
                                 },
                                 "ip-profile": ip_profile_schema,
-                                #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to 
+                                #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
                                 "sites": {
                                     "type":"array",
                                     "minLength":1,
@@ -1095,16 +1097,16 @@ instance_scenario_create_schema_v01 = {
                                         "properties":{
                                             # By default for an scenario 'external' network openmano looks for an existing VIM network to map this external scenario network,
                                             # for other networks openamno creates at VIM
-                                            # Use netmap-create to force to create an external scenario network  
+                                            # Use netmap-create to force to create an external scenario network
                                             "netmap-create": {"oneOf":[name_schema,{"type": "null"}]}, #datacenter network to use. Null if must be created as an internal net
-                                            #netmap-use:   Indicates an existing VIM network that must be used for this scenario network. 
+                                            #netmap-use:   Indicates an existing VIM network that must be used for this scenario network.
                                             #Can use both the VIM network name (if it is not ambiguous) or the VIM net UUID
                                             #If both 'netmap-create' and 'netmap-use'are supplied, netmap-use precedes, but if fails openmano follows the netmap-create
                                             #In oder words, it is the same as 'try to map to the VIM network (netmap-use) if exist, and if not create the network (netmap-create)
-                                            "netmap-use": name_schema, # 
+                                            "netmap-use": name_schema, #
                                             "vim-network-name": name_schema, #override network name
                                             #"ip-profile": ip_profile_schema,
-                                            "datacenter": name_schema,                                        
+                                            "datacenter": name_schema,
                                         }
                                     }
                                 },
index 897eb9a..e15824a 100644 (file)
@@ -565,7 +565,168 @@ class openmanoclient():
         if mano_response.status_code==200:
             return content
         else:
-            raise OpenmanoResponseException(str(content))        
+            raise OpenmanoResponseException(str(content))
+
+    # WIMS
+
+    def list_wims(self, all_tenants=False, **kwargs):
+        '''Obtain a list of wims, that are the WIM information at openmano
+        Params: can be filtered by 'uuid','name','wim_url','type'
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'wims':[{wim1_info},{wim2_info},...]}}
+        '''
+        return self._list_item("wims", all_tenants, filter_dict=kwargs)
+
+    def get_wim(self, uuid=None, name=None, all_tenants=False):
+        '''Obtain the information of a wim
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several
+                Obtain a dictionary with format {'wim':{wim_info}}
+        '''
+        return self._get_item("wims", uuid, name, all_tenants)
+
+    def delete_wim(self, uuid=None, name=None):
+        '''Delete a wim
+        Params: uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+        Return: Raises an exception on error, not found, found several, not free
+                Obtain a dictionary with format {'result': text indicating deleted}
+        '''
+        if not uuid:
+            # check that exist
+            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+        return self._del_item("wims", uuid, name, all_tenants=None)
+
+    def create_wim(self, descriptor=None, descriptor_format=None, name=None, wim_url=None, **kwargs):
+        # , type="openvim", public=False, description=None):
+        '''Creates a wim
+        Params: must supply a descriptor or/and just a name and a wim_url
+            descriptor: with format {'wim':{new_wim_info}}
+                new_wim_info must contain 'name', 'wim_url', and optionally 'description'
+                must be a dictionary or a json/yaml text.
+            name: the wim name. Overwrite descriptor name if any
+            wim_url: the wim URL. Overwrite descriptor vim_url if any
+            wim_type: the WIM type, can be tapi, odl, onos. Overwrite descriptor type if any
+            public: boolean, by default not public
+            description: wim description. Overwrite descriptor description if any
+            config: dictionary with extra configuration for the concrete wim
+        Return: Raises an exception on error
+                Obtain a dictionary with format {'wim:{new_wim_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif name and wim_url:
+            descriptor = {"wim": {"name": name, "wim_url": wim_url}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor, or name and wim_url")
+
+        if 'wim' not in descriptor or len(descriptor) != 1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+        if name:
+            descriptor['wim']['name'] = name
+        if wim_url:
+            descriptor['wim']['wim_url'] = wim_url
+        for param in kwargs:
+            descriptor['wim'][param] = kwargs[param]
+
+        return self._create_item("wims", descriptor, all_tenants=None)
+
+    def edit_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, all_tenants=False,
+                        **kwargs):
+        '''Edit the parameters of a wim
+        Params: must supply a descriptor or/and a parameter to change
+            uuid or/and name. If only name is supplied, there must be only one or an exception is raised
+            descriptor: with format {'wim':{params to change info}}
+                must be a dictionary or a json/yaml text.
+            parameters to change can be supplied by the descriptor or as parameters:
+                new_name: the wim name
+                wim_url: the wim URL
+                wim_type: the wim type, can be tapi, onos, odl
+                public: boolean, available to other tenants
+                description: wim description
+        Return: Raises an exception on error, not found or found several
+                Obtain a dictionary with format {'wim':{new_wim_info}}
+        '''
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif kwargs:
+            descriptor = {"wim": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor")
+
+        if 'wim' not in descriptor or len(descriptor) != 1:
+            raise OpenmanoBadParamsException("Descriptor must contain only one 'wim' field")
+        for param in kwargs:
+            if param == 'new_name':
+                descriptor['wim']['name'] = kwargs[param]
+            else:
+                descriptor['wim'][param] = kwargs[param]
+        return self._edit_item("wims", descriptor, uuid, name, all_tenants=None)
+
+    def attach_wim(self, uuid=None, name=None, descriptor=None, descriptor_format=None, wim_user=None,
+                          wim_password=None, wim_tenant_name=None, wim_tenant_id=None):
+        # check that exist
+        uuid = self._get_item_uuid("wims", uuid, name, all_tenants=True)
+        tenant_text = "/" + self._get_tenant()
+
+        if isinstance(descriptor, str):
+            descriptor = self.parse(descriptor, descriptor_format)
+        elif descriptor:
+            pass
+        elif wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+            descriptor = {"wim": {}}
+        else:
+            raise OpenmanoBadParamsException("Missing descriptor or params")
+
+        if wim_user or wim_password or wim_tenant_name or wim_tenant_id:
+            # print args.name
+            try:
+                if wim_user:
+                    descriptor['wim']['wim_user'] = wim_user
+                if wim_password:
+                    descriptor['wim']['wim_password'] = wim_password
+                if wim_tenant_name:
+                    descriptor['wim']['wim_tenant_name'] = wim_tenant_name
+                if wim_tenant_id:
+                    descriptor['wim']['wim_tenant'] = wim_tenant_id
+            except (KeyError, TypeError) as e:
+                if str(e) == 'wim':
+                    error_pos = "missing field 'wim'"
+                else:
+                    error_pos = "wrong format"
+                raise OpenmanoBadParamsException("Wrong wim descriptor: " + error_pos)
+
+        payload_req = yaml.safe_dump(descriptor)
+        # print payload_req
+        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano POST %s %s", URLrequest, payload_req)
+        mano_response = requests.post(URLrequest, headers=self.headers_req, data=payload_req)
+        self.logger.debug("openmano response: %s", mano_response.text)
+
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code == 200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))
+
+    def detach_wim(self, uuid=None, name=None):
+        if not uuid:
+            # check that exist
+            uuid = self._get_item_uuid("wims", uuid, name, all_tenants=False)
+        tenant_text = "/" + self._get_tenant()
+        URLrequest = "{}{}/wims/{}".format(self.endpoint_url, tenant_text, uuid)
+        self.logger.debug("openmano DELETE %s", URLrequest)
+        mano_response = requests.delete(URLrequest, headers=self.headers_req)
+        self.logger.debug("openmano response: %s", mano_response.text)
+
+        content = self._parse_yaml(mano_response.text, response=True)
+        if mano_response.status_code == 200:
+            return content
+        else:
+            raise OpenmanoResponseException(str(content))
 
     #VNFS
     def list_vnfs(self, all_tenants=False, **kwargs):
index eeefcb8..3565bbf 100644 (file)
@@ -71,6 +71,8 @@ log_level_db:      ERROR  #database log levels
 #log_file_db:       /opt/openmano/logs/openmano_db.log
 #log_level_vim:     DEBUG  #VIM connection log levels
 #log_file_vim:      /opt/openmano/logs/openmano_vimconn.log
+#log_level_wim:     DEBUG  #WIM connection log levels
+#log_file_wim:      /opt/openmano/logs/openmano_wimconn.log
 #log_level_nfvo:    DEBUG  #Main engine log levels
 #log_file_nfvo:     /opt/openmano/logs/openmano_nfvo.log
 #log_level_http:    DEBUG  #Main engine log levels
diff --git a/osm_ro/tests/db_helpers.py b/osm_ro/tests/db_helpers.py
new file mode 100644 (file)
index 0000000..bedf9a5
--- /dev/null
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+import hashlib
+import shlex
+import unittest
+from contextlib import contextmanager
+from functools import wraps
+from hashlib import md5
+from os import environ, pathsep
+from subprocess import STDOUT, check_output
+from uuid import UUID
+
+from MySQLdb import connect
+
+from ..nfvo_db import nfvo_db
+
+HOST = environ.get('TEST_DB_HOST', 'localhost')
+USER = environ.get('TEST_DB_USER', 'mano')
+PASSWORD = environ.get('TEST_DB_PASSWORD', 'manopw')
+DATABASE = environ.get('TEST_DB_DATABASE', 'mano_db')
+
+
+def uuid(seed):
+    """Generates strings with a UUID format in a repeatable way"""
+    return str(UUID(md5(str(seed)).hexdigest()))
+
+
+def sha1(text):
+    """Generates SHA1 hash code from a text string"""
+    return hashlib.sha1(text).hexdigest()
+
+
+def run(*args, **kwargs):
+    """Run a command inside a subprocess, raising an exception when it fails
+
+    Arguments:
+        *args: you can pass any number of arquments as separated words in the
+            shell, or just a single string with the entire command
+        **kwargs: proxied to subprocess.check_output (by default
+            ``stderr=STDOUT`` and ``universal_newlines=True``
+    """
+    if len(args) == 1 and isinstance(args[0], str):
+        args = shlex.split(args[0])
+
+    opts = dict(stderr=STDOUT, universal_newlines=True)
+    opts.update(kwargs)
+    return check_output(args, **opts)
+
+
+# In order to not mess around, enforce user to explicit set the
+# test database in a env variable
+@unittest.skipUnless(
+    environ.get('TEST_DB_HOST'),
+    'Test database not available. Please set TEST_DB_HOST env var')
+class TestCaseWithDatabase(unittest.TestCase):
+    """Connect to the database and provide methods to facilitate isolating the
+    database stored inside it between tests.
+
+    In order to avoid connecting, reconnecting, creating tables and destroying
+    tables all the time, this class manage the database using class-level
+    fixtures. This reduce the cost of performing these actions but not
+    guarantees isolation in the DB state between the tests.
+    To enforce isolation, please call the ``setup_tables`` and
+    ``empty_database`` directly, or write one single test per class.
+    """
+
+    host = HOST
+    user = USER
+    password = PASSWORD
+    database = DATABASE
+
+    @classmethod
+    def setup_tables(cls):
+        """Make sure the database is set up and in the right version, with all the
+        required tables.
+        """
+        dbutils = environ.get('DBUTILS')
+
+        if dbutils:
+            environ["PATH"] += pathsep + dbutils
+
+        return run('init_mano_db.sh',
+                   '-u', cls.user,
+                   '-p', cls.password,
+                   '-h', cls.host,
+                   '-d', cls.database)
+
+    @classmethod
+    def empty_database(cls):
+        """Clear the database, so one test does not interfere with the other"""
+        # Create a custom connection not attached to the database, so we can
+        # destroy and recreate the database itself
+        connection = connect(cls.host, cls.user, cls.password)
+        cursor = connection.cursor()
+        cursor.execute(
+            "DROP DATABASE {};".format(
+                connection.escape_string(cls.database)))
+        cursor.execute(
+            "CREATE DATABASE {};".format(
+                connection.escape_string(cls.database)))
+        cursor.close()
+        connection.close()
+
+
+class TestCaseWithDatabasePerTest(TestCaseWithDatabase):
+    """Ensure a connection to the database before and
+    drop tables after each test runs
+    """
+
+    def setUp(self):
+        self.setup_tables()
+        self.addCleanup(self.empty_database)
+
+        self.maxDiff = None
+
+        self.db = nfvo_db(self.host, self.user, self.password, self.database)
+        self.db.connect()
+
+    def populate(self, seeds=None, **kwargs):
+        """Seed the database with initial values"""
+        if not seeds:
+            seeds = []
+        if not isinstance(seeds, (list, tuple)):
+            seeds = [seeds]
+        if kwargs:
+            seeds.append(kwargs)
+        self.db.new_rows(seeds)
+
+    def count(self, table):
+        """Count number of rows in a table"""
+        return self.db.get_rows(
+            SELECT='COUNT(*) as count', FROM=table)[0]['count']
+
+    @contextmanager
+    def disable_foreign_keys(self):
+        """Do the test without checking foreign keys"""
+        try:
+            cursor = self.db.con.cursor()
+            cursor.execute('SET FOREIGN_KEY_CHECKS=0;')
+            yield
+        finally:
+            cursor.execute('SET FOREIGN_KEY_CHECKS=1;')
+
+
+def disable_foreign_keys(test):
+    """Do the test without checking foreign keys.
+    To be used together in subclasses of TestCaseWithDatabasePerTest
+    """
+    @wraps(test)
+    def _no_check(self, *args, **kwargs):
+        with self.disable_foreign_keys():
+            result = test(self, *args, **kwargs)
+
+        return result
+
+    return _no_check
diff --git a/osm_ro/tests/helpers.py b/osm_ro/tests/helpers.py
new file mode 100644 (file)
index 0000000..787fbce
--- /dev/null
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+import logging
+import unittest
+from collections import defaultdict
+
+from six import StringIO
+
+from mock import MagicMock, patch
+
+logger = logging.getLogger()
+
+
+class TestCaseWithLogging(unittest.TestCase):
+    """Attach a special handler to the root logger, capturing the logs in a
+    internal buffer (caplog property).
+
+    To retrieve the logs, do::
+
+        self.caplog.getvalue()
+    """
+    def setUp(self):
+        super(TestCaseWithLogging, self).setUp()
+        self.logger = logging.getLogger()
+        self.caplog = StringIO()
+        self.log_handler = logging.StreamHandler(self.caplog)
+        self.logger.addHandler(self.log_handler)
+        self.logger.setLevel(logging.NOTSET)
+
+    def tearDown(self):
+        super(TestCaseWithLogging, self).tearDown()
+        self.log_handler.close()
+        self.logger.removeHandler(self.log_handler)
+
+
+def mock_imports(modules, preserve=()):
+    """Given a list of modules, mock everything, unless listed in the preserve
+    argument.
+    """
+    # Ensure iterable
+    if isinstance(modules, str):
+        modules = (modules,)
+    if isinstance(preserve, str):
+        preserve = (preserve,)
+
+    # First expand the list, since children modules needs their parent also
+    # mocked most of the time.
+    # Example: ['Crypto.PublicKey'] => ['Crypto', 'Crypto.PublicKey']
+    all_modules = []
+    for name in modules:
+        parts = name.split('.')
+        compound_name = []
+        for part in parts:
+            compound_name.append(part)
+            all_modules.append('.'.join(compound_name))
+
+    all_modules = set(m for m in all_modules if m not in preserve)
+    for module in all_modules:
+        logger.info('Mocking module `%s`', module)
+
+    mocks = {module: MagicMock() for module in all_modules}
+
+    return patch.dict('sys.modules', **mocks)
+
+
+def mock_dict(**kwargs):
+    """Create a dict that always respond something.
+
+    Arguments:
+        **kwargs: certain items that should be set in the created object
+    """
+    response = defaultdict(MagicMock)
+    for k, v in kwargs.items():
+        response[k] = v
+
+    return response
+
+
+def mock_object(**kwargs):
+    """Create an object that always respond something.
+
+    Arguments:
+        **kwargs: certain attributes that should be set in the created object
+    """
+    response = MagicMock()
+    for k, v in kwargs.items():
+        setattr(response, k, v)
+
+    return response
index 0ee8efc..2afbc85 100644 (file)
@@ -30,8 +30,16 @@ __author__="Alfonso Tierno, Gerardo Garcia"
 __date__ ="$08-sep-2014 12:21:22$"
 
 import datetime
+import time
 import warnings
-from jsonschema import validate as js_v, exceptions as js_e
+from functools import reduce
+from itertools import tee
+
+from six.moves import filter, filterfalse
+
+from jsonschema import exceptions as js_e
+from jsonschema import validate as js_v
+
 #from bs4 import BeautifulSoup
 
 def read_file(file_to_read):
@@ -42,7 +50,7 @@ def read_file(file_to_read):
         f.close()
     except Exception as e:
         return (False, str(e))
-      
+
     return (True, read_data)
 
 def write_file(file_to_write, text):
@@ -53,7 +61,7 @@ def write_file(file_to_write, text):
         f.close()
     except Exception as e:
         return (False, str(e))
-      
+
     return (True, None)
 
 def format_in(http_response, schema):
@@ -93,8 +101,22 @@ def remove_extra_items(data, schema):
 #    return text
 
 
+def delete_nulls(var):
+    if type(var) is dict:
+        for k in var.keys():
+            if var[k] is None: del var[k]
+            elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple:
+                if delete_nulls(var[k]): del var[k]
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for k in var:
+            if type(k) is dict: delete_nulls(k)
+        if len(var) == 0: return True
+    return False
+
+
 def convert_bandwidth(data, reverse=False):
-    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number 
+    '''Check the field bandwidth recursivelly and when found, it removes units and convert to number
     It assumes that bandwidth is well formed
     Attributes:
         'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
@@ -127,7 +149,21 @@ def convert_bandwidth(data, reverse=False):
             if type(k) is dict or type(k) is tuple or type(k) is list:
                 convert_bandwidth(k, reverse)
 
-
+def convert_float_timestamp2str(var):
+    '''Converts timestamps (created_at, modified_at fields) represented as float
+    to a string with the format '%Y-%m-%dT%H:%i:%s'
+    It enters recursively in the dict var finding this kind of variables
+    '''
+    if type(var) is dict:
+        for k,v in var.items():
+            if type(v) is float and k in ("created_at", "modified_at"):
+                var[k] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(v) )
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
+                convert_float_timestamp2str(v)
+        if len(var) == 0: return True
+    elif type(var) is list or type(var) is tuple:
+        for v in var:
+            convert_float_timestamp2str(v)
 
 def convert_datetime2str(var):
     '''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
@@ -137,7 +173,7 @@ def convert_datetime2str(var):
         for k,v in var.items():
             if type(v) is datetime.datetime:
                 var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
-            elif type(v) is dict or type(v) is list or type(v) is tuple: 
+            elif type(v) is dict or type(v) is list or type(v) is tuple:
                 convert_datetime2str(v)
         if len(var) == 0: return True
     elif type(var) is list or type(var) is tuple:
@@ -145,7 +181,7 @@ def convert_datetime2str(var):
             convert_datetime2str(v)
 
 def convert_str2boolean(data, items):
-    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean 
+    '''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
     Done recursively
     Attributes:
         'data': dictionary variable to be checked. None or empty is considered valid
@@ -207,4 +243,105 @@ def deprecated(message):
           warnings.simplefilter('default', DeprecationWarning)
           return func(*args, **kwargs)
       return deprecated_func
-  return deprecated_decorator
\ No newline at end of file
+  return deprecated_decorator
+
+
+def truncate(text, max_length=1024):
+    """Limit huge texts in number of characters"""
+    text = str(text)
+    if text and len(text) >= max_length:
+        return text[:max_length//2-3] + " ... " + text[-max_length//2+3:]
+    return text
+
+
+def merge_dicts(*dicts, **kwargs):
+    """Creates a new dict merging N others and keyword arguments.
+    Right-most dicts take precedence.
+    Keyword args take precedence.
+    """
+    return reduce(
+        lambda acc, x: acc.update(x) or acc,
+        list(dicts) + [kwargs], {})
+
+
+def remove_none_items(adict):
+    """Return a similar dict without keys associated to None values"""
+    return {k: v for k, v in adict.items() if v is not None}
+
+
+def filter_dict_keys(adict, allow):
+    """Return a similar dict, but just containing the explicitly allowed keys
+
+    Arguments:
+        adict (dict): Simple python dict data struct
+        allow (list): Explicits allowed keys
+    """
+    return {k: v for k, v in adict.items() if k in allow}
+
+
+def filter_out_dict_keys(adict, deny):
+    """Return a similar dict, but not containing the explicitly denied keys
+
+    Arguments:
+        adict (dict): Simple python dict data struct
+        deny (list): Explicits denied keys
+    """
+    return {k: v for k, v in adict.items() if k not in deny}
+
+
+def expand_joined_fields(record):
+    """Given a db query result, explode the fields that contains `.` (join
+    operations).
+
+    Example
+        >> expand_joined_fiels({'wim.id': 2})
+        # {'wim': {'id': 2}}
+    """
+    result = {}
+    for field, value in record.items():
+        keys = field.split('.')
+        target = result
+        target = reduce(lambda target, key: target.setdefault(key, {}),
+                        keys[:-1], result)
+        target[keys[-1]] = value
+
+    return result
+
+
+def ensure(condition, exception):
+    """Raise an exception if condition is not met"""
+    if not condition:
+        raise exception
+
+
+def partition(predicate, iterable):
+    """Create two derived iterators from a single one
+    The first iterator created will loop thought the values where the function
+    predicate is True, the second one will iterate over the values where it is
+    false.
+    """
+    iterable1, iterable2 = tee(iterable)
+    return filter(predicate, iterable2), filterfalse(predicate, iterable1)
+
+
+def pipe(*functions):
+    """Compose functions of one argument in the opposite order,
+    So pipe(f, g)(x) = g(f(x))
+    """
+    return lambda x: reduce(lambda acc, f: f(acc), functions, x)
+
+
+def compose(*functions):
+    """Compose functions of one argument,
+    So compose(f, g)(x) = f(g(x))
+    """
+    return lambda x: reduce(lambda acc, f: f(acc), functions[::-1], x)
+
+
+def safe_get(target, key_path, default=None):
+    """Given a path of keys (eg.: "key1.key2.key3"), return a nested value in
+    a nested dict if present, or the default value
+    """
+    keys = key_path.split('.')
+    target = reduce(lambda acc, key: acc.get(key) or {}, keys[:-1], target)
+    return target.get(keys[-1], default)
index b70d94a..48c8e32 100644 (file)
@@ -23,7 +23,7 @@
 
 """"
 This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
-The tasks are stored at database in table vim_actions
+The tasks are stored at database in table vim_wim_actions
 The task content is (M: stored at memory, D: stored at database):
     MD  instance_action_id:  reference a global action over an instance-scenario: database instance_actions
     MD  task_index:     index number of the task. This together with the previous forms a unique key identifier
@@ -49,8 +49,8 @@ The task content is (M: stored at memory, D: stored at database):
             vim_status: VIM status of the element. Stored also at database in the instance_XXX
     M   depends:    dict with task_index(from depends_on) to task class
     M   params:     same as extra[params] but with the resolved dependencies
-    M   vim_interfaces: similar to extra[interfaces] but with VIM information. Stored at database in the instance_XXX but not at vim_actions
-    M   vim_info:   Detailed information of a vm,net from the VIM. Stored at database in the instance_XXX but not at vim_actions
+    M   vim_interfaces: similar to extra[interfaces] but with VIM information. Stored at database in the instance_XXX but not at vim_wim_actions
+    M   vim_info:   Detailed information of a vm,net from the VIM. Stored at database in the instance_XXX but not at vim_wim_actions
     MD  error_msg:  descriptive text upon an error.Stored also at database instance_XXX
     MD  created_at: task creation time
     MD  modified_at: last task update time. On refresh it contains when this task need to be refreshed
@@ -189,7 +189,7 @@ class vim_thread(threading.Thread):
             while True:
                 # get 200 (database_limit) entries each time
                 with self.db_lock:
-                    vim_actions = self.db.get_rows(FROM="vim_actions",
+                    vim_actions = self.db.get_rows(FROM="vim_wim_actions",
                                                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
                                                           "item_id>=": old_item_id},
                                                    ORDER_BY=("item_id", "item", "created_at",),
@@ -393,7 +393,7 @@ class vim_thread(threading.Thread):
                     if task_need_update:
                         with self.db_lock:
                             self.db.update_rows(
-                                'vim_actions',
+                                'vim_wim_actions',
                                 UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
                                         "error_msg": task.get("error_msg"), "modified_at": now},
                                 WHERE={'instance_action_id': task['instance_action_id'],
@@ -463,7 +463,7 @@ class vim_thread(threading.Thread):
                         with self.db_lock:
                             self.db.update_rows('instance_nets', UPDATE=temp_dict, WHERE={"uuid": task["item_id"]})
                             self.db.update_rows(
-                                'vim_actions',
+                                'vim_wim_actions',
                                 UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
                                         "error_msg": task.get("error_msg"), "modified_at": now},
                                 WHERE={'instance_action_id': task['instance_action_id'],
@@ -644,7 +644,7 @@ class vim_thread(threading.Thread):
                 now = time.time()
                 with self.db_lock:
                     self.db.update_rows(
-                        table="vim_actions",
+                        table="vim_wim_actions",
                         UPDATE={"status": task["status"], "vim_id": task.get("vim_id"), "modified_at": now,
                                 "error_msg": task["error_msg"],
                                 "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
@@ -811,7 +811,7 @@ class vim_thread(threading.Thread):
                 instance_action_id = ins_action_id
 
         with self.db_lock:
-            tasks = self.db.get_rows(FROM="vim_actions", WHERE={"instance_action_id": instance_action_id,
+            tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
                                                                 "task_index": task_index})
         if not tasks:
             return None
index ffe2da0..876fa2f 100644 (file)
@@ -44,6 +44,8 @@ import yaml
 import random
 import re
 import copy
+from pprint import pformat
+from types import StringTypes
 
 from novaclient import client as nClient, exceptions as nvExceptions
 from keystoneauth1.identity import v2, v3
@@ -77,6 +79,18 @@ supportedClassificationTypes = ['legacy_flow_classifier']
 volume_timeout = 600
 server_timeout = 600
 
+
+class SafeDumper(yaml.SafeDumper):
+    def represent_data(self, data):
+        # Openstack APIs use custom subclasses of dict and YAML safe dumper
+        # is designed to not handle that (reference issue 142 of pyyaml)
+        if isinstance(data, dict) and data.__class__ != dict:
+            # A simple solution is to convert those items back to dicts
+            data = dict(data.items())
+
+        return super(SafeDumper, self).represent_data(data)
+
+
 class vimconnector(vimconn.vimconnector):
     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
                  log_level=None, config={}, persistent_info={}):
@@ -158,11 +172,30 @@ class vimconnector(vimconn.vimconnector):
             vimconn.vimconnector.__setitem__(self, index, value)
         self.session['reload_client'] = True
 
+    def serialize(self, value):
+        """Serialization of python basic types.
+
+        In the case value is not serializable a message will be logged and a
+        simple representation of the data that cannot be converted back to
+        python is returned.
+        """
+        if isinstance(value, StringTypes):
+            return value
+
+        try:
+            return yaml.dump(value, Dumper=SafeDumper,
+                             default_flow_style=True, width=256)
+        except yaml.representer.RepresenterError:
+                self.logger.debug(
+                    'The following entity cannot be serialized in YAML:'
+                    '\n\n%s\n\n', pformat(value), exc_info=True)
+                return str(value)
+
     def _reload_connection(self):
         '''Called before any operation, it check if credentials has changed
         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
         '''
-        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) 
+        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
         if self.session['reload_client']:
             if self.config.get('APIversion'):
                 self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
@@ -563,13 +596,13 @@ class vimconnector(vimconn.vimconnector):
                 net_id:         #VIM id of this network
                     status:     #Mandatory. Text with one of:
                                 #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                                 #  OTHER (Vim reported other status not understood)
                                 #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  ACTIVE, INACTIVE, DOWN (admin down),
                                 #  BUILD (on building process)
                                 #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
 
         '''
@@ -586,10 +619,9 @@ class vimconnector(vimconn.vimconnector):
 
                 if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
                     net['status'] = 'DOWN'
-                try:
-                    net['vim_info'] = yaml.safe_dump(net_vim, default_flow_style=True, width=256)
-                except yaml.representer.RepresenterError:
-                    net['vim_info'] = str(net_vim)
+
+                net['vim_info'] = self.serialize(net_vim)
+
                 if net_vim.get('fault'):  #TODO
                     net['error_msg'] = str(net_vim['fault'])
             except vimconn.vimconnNotFoundException as e:
@@ -1271,13 +1303,13 @@ class vimconnector(vimconn.vimconnector):
         Params:
             vm_id: uuid of the VM
             console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types, 
+                "novnc" (by default), "xvpvnc" for VNC types,
                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
         Returns dict with the console parameters:
                 protocol: ssh, ftp, http, https, ...
-                server:   usually ip address 
-                port:     the http, ssh, ... port 
-                suffix:   extra text, e.g. the http path and query string   
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
         '''
         self.logger.debug("Getting VM CONSOLE from VIM")
         try:
@@ -1377,14 +1409,14 @@ class vimconnector(vimconn.vimconnector):
                 vm_id:          #VIM id of this Virtual Machine
                     status:     #Mandatory. Text with one of:
                                 #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                                 #  OTHER (Vim reported other status not understood)
                                 #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
                                 #  CREATING (on building process), ERROR
                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
                                 #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
                     interfaces:
                      -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
@@ -1407,10 +1439,9 @@ class vimconnector(vimconn.vimconnector):
                 else:
                     vm['status']    = "OTHER"
                     vm['error_msg'] = "VIM status reported " + vm_vim['status']
-                try:
-                    vm['vim_info']  = yaml.safe_dump(vm_vim, default_flow_style=True, width=256)
-                except yaml.representer.RepresenterError:
-                    vm['vim_info'] = str(vm_vim)
+
+                vm['vim_info'] = self.serialize(vm_vim)
+
                 vm["interfaces"] = []
                 if vm_vim.get('fault'):
                     vm['error_msg'] = str(vm_vim['fault'])
@@ -1420,20 +1451,17 @@ class vimconnector(vimconn.vimconnector):
                     port_dict = self.neutron.list_ports(device_id=vm_id)
                     for port in port_dict["ports"]:
                         interface={}
-                        try:
-                            interface['vim_info'] = yaml.safe_dump(port, default_flow_style=True, width=256)
-                        except yaml.representer.RepresenterError:
-                            interface['vim_info'] = str(port)
+                        interface['vim_info'] = self.serialize(port)
                         interface["mac_address"] = port.get("mac_address")
                         interface["vim_net_id"] = port["network_id"]
                         interface["vim_interface_id"] = port["id"]
-                        # check if OS-EXT-SRV-ATTR:host is there, 
+                        # check if OS-EXT-SRV-ATTR:host is there,
                         # in case of non-admin credentials, it will be missing
                         if vm_vim.get('OS-EXT-SRV-ATTR:host'):
                             interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
                         interface["pci"] = None
 
-                        # check if binding:profile is there, 
+                        # check if binding:profile is there,
                         # in case of non-admin credentials, it will be missing
                         if port.get('binding:profile'):
                             if port['binding:profile'].get('pci_slot'):
diff --git a/osm_ro/wim/__init__.py b/osm_ro/wim/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/osm_ro/wim/actions.py b/osm_ro/wim/actions.py
new file mode 100644 (file)
index 0000000..f224460
--- /dev/null
@@ -0,0 +1,423 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+
+"""Common logic for task management"""
+import logging
+from time import time
+from types import StringTypes
+
+from six.moves import range
+
+import yaml
+
+from ..utils import (
+    filter_dict_keys,
+    filter_out_dict_keys,
+    merge_dicts,
+    remove_none_items,
+    truncate
+)
+
+PENDING, REFRESH, IGNORE = range(3)
+
+TIMEOUT = 1 * 60 * 60  # 1 hour
+MIN_ATTEMPTS = 10
+
+
+class Action(object):
+    """Create a basic object representing the action record.
+
+    Arguments:
+        record (dict): record as returned by the database
+        **kwargs: extra keyword arguments to overwrite the fields in record
+    """
+
+    PROPERTIES = [
+        'task_index',          # MD - Index number of the task.
+                               #      This together with the instance_action_id
+                               #      forms a unique key identifier
+        'action',              # MD - CREATE, DELETE, FIND
+        'item',                # MD - table name, eg. instance_wim_nets
+        'item_id',             # MD - uuid of the referenced entry in the
+                               #      previous table
+        'instance_action_id',  # MD - reference to a cohesive group of actions
+                               #      related to the same instance-scenario
+        'wim_account_id',      # MD - reference to the WIM account used
+                               #      by the thread/connector
+        'wim_internal_id',     # MD - internal ID used by the WIM to refer to
+                               #      the item
+        'datacenter_vim_id',   # MD - reference to the VIM account used
+                               #      by the thread/connector
+        'vim_id',              # MD - internal ID used by the VIM to refer to
+                               #      the item
+        'status',              # MD - SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
+        'extra',               # MD - text with yaml format at database,
+        #                             dict at memory with:
+        # `- params:     list with the params to be sent to the VIM for CREATE
+        #                or FIND. For DELETE the vim_id is taken from other
+        #                related tasks
+        # `- find:       (only for CREATE tasks) if present it should FIND
+        #                before creating and use if existing.
+        #                Contains the FIND params
+        # `- depends_on: list with the 'task_index'es of tasks that must be
+        #                completed before. e.g. a vm creation depends on a net
+        #                creation
+        # `- sdn_net_id: used for net.
+        # `- tries
+        # `- created_items:
+        #                dictionary with extra elements created that need
+        #                to be deleted. e.g. ports,
+        # `- volumes,...
+        # `- created:    False if the VIM element is not created by
+        #                other actions, and it should not be deleted
+        # `- wim_status: WIM status of the element. Stored also at database
+        #                in the item table
+        'params',              # M  - similar to extra[params]
+        'depends_on',          # M  - similar to extra[depends_on]
+        'depends',             # M  - dict with task_index(from depends_on) to
+                               #      task class
+        'error_msg',           # MD - descriptive text upon an error
+        'created_at',          # MD - task DB creation time
+        'modified_at',         # MD - last DB update time
+        'process_at',          # M  - unix epoch when to process the task
+    ]
+
+    __slots__ = PROPERTIES + [
+        'logger',
+    ]
+
+    def __init__(self, record, logger=None, **kwargs):
+        self.logger = logger or logging.getLogger('openmano.wim.action')
+        attrs = merge_dicts(dict.fromkeys(self.PROPERTIES), record, kwargs)
+        self.update(_expand_extra(attrs))
+
+    def __repr__(self):
+        return super(Action, self).__repr__() + repr(self.as_dict())
+
+    def as_dict(self, *fields):
+        """Representation of the object as a dict"""
+        attrs = (set(self.PROPERTIES) & set(fields)
+                 if fields else self.PROPERTIES)
+        return {k: getattr(self, k) for k in attrs}
+
+    def as_record(self):
+        """Returns a dict that can be send to the persistence layer"""
+        special = ['params', 'depends_on', 'depends']
+        record = self.as_dict()
+        record['extra'].update(self.as_dict(*special))
+        non_fields = special + ['process_at']
+
+        return remove_none_items(filter_out_dict_keys(record, non_fields))
+
+    def update(self, values=None, **kwargs):
+        """Update the in-memory representation of the task (works similarly to
+        dict.update). The update is NOT automatically persisted.
+        """
+        # "white-listed mass assignment"
+        updates = merge_dicts(values, kwargs)
+        for attr in set(self.PROPERTIES) & set(updates.keys()):
+            setattr(self, attr, updates[attr])
+
+    def save(self, persistence, **kwargs):
+        """Persist current state of the object to the database.
+
+        Arguments:
+            persistence: object encapsulating the database
+            **kwargs: extra properties to be updated before saving
+
+        Note:
+            If any key word argument is passed, the object itself will be
+            changed as an extra side-effect.
+        """
+        action_id = self.instance_action_id
+        index = self.task_index
+        if kwargs:
+            self.update(kwargs)
+        properties = self.as_record()
+
+        return persistence.update_action(action_id, index, properties)
+
+    def fail(self, persistence, reason, status='FAILED'):
+        """Mark action as FAILED, updating tables accordingly"""
+        persistence.update_instance_action_counters(
+            self.instance_action_id,
+            failed=1,
+            done=(-1 if self.status == 'DONE' else 0))
+
+        self.status = status
+        self.error_msg = truncate(reason)
+        self.logger.error('%s %s: %s', self.id, status, reason)
+        return self.save(persistence)
+
+    def succeed(self, persistence, status='DONE'):
+        """Mark action as DONE, updating tables accordingly"""
+        persistence.update_instance_action_counters(
+            self.instance_action_id, done=1)
+        self.status = status
+        self.logger.debug('%s %s', self.id, status)
+        return self.save(persistence)
+
+    def defer(self, persistence, reason,
+              timeout=TIMEOUT, min_attempts=MIN_ATTEMPTS):
+        """Postpone the task processing, taking care to not timeout.
+
+        Arguments:
+            persistence: object encapsulating the database
+            reason (str): explanation for the delay
+            timeout (int): maximum delay tolerated since the first attempt.
+                Note that this number is a time delta, in seconds
+            min_attempts (int): Number of attempts to try before giving up.
+        """
+        now = time()
+        last_attempt = self.extra.get('last_attempted_at') or time()
+        attempts = self.extra.get('attempts') or 0
+
+        if last_attempt - now > timeout and attempts > min_attempts:
+            self.fail(persistence,
+                      'Timeout reached. {} attempts in the last {:d} min'
+                      .format(attempts, last_attempt / 60))
+
+        self.extra['last_attempted_at'] = time()
+        self.extra['attempts'] = attempts + 1
+        self.logger.info('%s DEFERRED: %s', self.id, reason)
+        return self.save(persistence)
+
+    @property
+    def group_key(self):
+        """Key defining the group to which this tasks belongs"""
+        return (self.item, self.item_id)
+
+    @property
+    def processing(self):
+        """Processing status for the task (PENDING, REFRESH, IGNORE)"""
+        if self.status == 'SCHEDULED':
+            return PENDING
+
+        return IGNORE
+
+    @property
+    def id(self):
+        """Unique identifier of this particular action"""
+        return '{}[{}]'.format(self.instance_action_id, self.task_index)
+
+    @property
+    def is_scheduled(self):
+        return self.status == 'SCHEDULED'
+
+    @property
+    def is_build(self):
+        return self.status == 'BUILD'
+
+    @property
+    def is_done(self):
+        return self.status == 'DONE'
+
+    @property
+    def is_failed(self):
+        return self.status == 'FAILED'
+
+    @property
+    def is_superseded(self):
+        return self.status == 'SUPERSEDED'
+
+    def refresh(self, connector, persistence):
+        """Use the connector/persistence to refresh the status of the item.
+
+        After the item status is refreshed any change in the task should be
+        persisted to the database.
+
+        Arguments:
+            connector: object containing the classes to access the WIM or VIM
+            persistence: object containing the methods necessary to query the
+                database and to persist the updates
+        """
+        self.logger.debug(
+            'Action `%s` has no refresh to be done',
+            self.__class__.__name__)
+
+    def expand_dependency_links(self, task_group):
+        """Expand task indexes into actual IDs"""
+        if not self.depends_on or (
+                isinstance(self.depends, dict) and self.depends):
+            return
+
+        num_tasks = len(task_group)
+        references = {
+            "TASK-{}".format(i): task_group[i]
+            for i in self.depends_on
+            if i < num_tasks and task_group[i].task_index == i and
+            task_group[i].instance_action_id == self.instance_action_id
+        }
+        self.depends = references
+
+    def become_superseded(self, superseding):
+        """When another action tries to supersede this one,
+        we need to change both of them, so the surviving actions will be
+        logic consistent.
+
+        This method should do the required internal changes, and also
+        suggest changes for the other, superseding, action.
+
+        Arguments:
+            superseding: other task superseding this one
+
+        Returns:
+            dict: changes suggested to the action superseding this one.
+                  A special key ``superseding_needed`` is used to
+                  suggest if the superseding is actually required or not.
+                  If not present, ``superseding_needed`` is assumed to
+                  be False.
+        """
+        self.status = 'SUPERSEDED'
+        self.logger.debug(
+            'Action `%s` was superseded by `%s`',
+            self.__class__.__name__, superseding.__class__.__name__)
+        return {}
+
+    def supersede(self, others):
+        """Supersede other tasks, if necessary
+
+        Arguments:
+            others (list): action objects being superseded
+
+        When the task decide to supersede others, this method should call
+        ``become_superseded`` on the other actions, collect the suggested
+        updates and perform the necessary changes
+        """
+        # By default actions don't supersede others
+        self.logger.debug(
+            'Action `%s` does not supersede other actions',
+            self.__class__.__name__)
+
+    def process(self, connector, persistence, ovim):
+        """Abstract method, that needs to be implemented.
+        Process the current task.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+            ovim: instance of openvim, abstraction layer that enable
+                SDN-related operations
+        """
+        raise NotImplementedError
+
+
+class FindAction(Action):
+    """Abstract class that should be inherited for FIND actions, depending on
+    the item type.
+    """
+    @property
+    def processing(self):
+        if self.status in ('DONE', 'BUILD'):
+            return REFRESH
+
+        return super(FindAction, self).processing
+
+    def become_superseded(self, superseding):
+        super(FindAction, self).become_superseded(superseding)
+        info = ('vim_id', 'wim_internal_id')
+        return remove_none_items({f: getattr(self, f) for f in info})
+
+
+class CreateAction(Action):
+    """Abstract class that should be inherited for CREATE actions, depending on
+    the item type.
+    """
+    @property
+    def processing(self):
+        if self.status in ('DONE', 'BUILD'):
+            return REFRESH
+
+        return super(CreateAction, self).processing
+
+    def become_superseded(self, superseding):
+        super(CreateAction, self).become_superseded(superseding)
+
+        created = self.extra.get('created', True)
+        sdn_net_id = self.extra.get('sdn_net_id')
+        pending_info = self.wim_internal_id or self.vim_id or sdn_net_id
+        if not(created and pending_info):
+            return {}
+
+        extra_fields = ('sdn_net_id', 'interfaces', 'created_items')
+        extra_info = filter_dict_keys(self.extra or {}, extra_fields)
+
+        return {'superseding_needed': True,
+                'wim_internal_id': self.wim_internal_id,
+                'vim_id': self.vim_id,
+                'extra': remove_none_items(extra_info)}
+
+
+class DeleteAction(Action):
+    """Abstract class that should be inherited for DELETE actions, depending on
+    the item type.
+    """
+    def supersede(self, others):
+        self.logger.debug('%s %s %s %s might supersede other actions',
+                          self.id, self.action, self.item, self.item_id)
+        # First collect all the changes from the superseded tasks
+        changes = [other.become_superseded(self) for other in others]
+        needed = any(change.pop('superseding_needed', False)
+                     for change in changes)
+
+        # Deal with the nested ones first
+        extras = [change.pop('extra', None) or {} for change in changes]
+        items = [extra.pop('created_items', None) or {} for extra in extras]
+        items = merge_dicts(self.extra.get('created_items', {}), *items)
+        self.extra = merge_dicts(self.extra, {'created_items': items}, *extras)
+
+        # Accept the other ones
+        change = ((key, value) for key, value in merge_dicts(*changes).items()
+                  if key in self.PROPERTIES)
+        for attr, value in change:
+            setattr(self, attr, value)
+
+        # Reevaluate if the action itself is needed
+        if not needed:
+            self.status = 'SUPERSEDED'
+
+
+def _expand_extra(record):
+    extra = record.pop('extra', None) or {}
+    if isinstance(extra, StringTypes):
+        extra = yaml.safe_load(extra)
+
+    record['params'] = extra.get('params')
+    record['depends_on'] = extra.get('depends_on', [])
+    record['depends'] = extra.get('depends', None)
+    record['extra'] = extra
+
+    return record
diff --git a/osm_ro/wim/engine.py b/osm_ro/wim/engine.py
new file mode 100644 (file)
index 0000000..dde5dee
--- /dev/null
@@ -0,0 +1,455 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains the domain logic, and the implementation of the
+required steps to perform VNF management and orchestration in a WAN
+environment.
+
+It works as an extension/complement to the main functions contained in the
+``nfvo.py`` file and avoids interacting directly with the database, by relying
+on the `persistence` module.
+
+No http request handling/direct interaction with the database should be present
+in this file.
+"""
+import json
+import logging
+from contextlib import contextmanager
+from itertools import groupby
+from operator import itemgetter
+from uuid import uuid4
+
+from ..utils import remove_none_items
+from .actions import Action
+from .errors import (
+    NoWimConnectedToDatacenters,
+    WimAccountNotActive
+)
+from .wim_thread import WimThread
+
+
+class WimEngine(object):
+    """Logic supporting the establishment of WAN links when NS spans across
+    different datacenters.
+    """
+    def __init__(self, persistence, logger=None, ovim=None):
+        self.persist = persistence
+        self.logger = logger or logging.getLogger('openmano.wim.engine')
+        self.threads = {}
+        self.connectors = {}
+        self.ovim = ovim
+
+    def create_wim(self, properties):
+        """Create a new wim record according to the properties
+
+        Please check the wim schema to have more information about
+        ``properties``.
+
+        Returns:
+            str: uuid of the newly created WIM record
+        """
+        return self.persist.create_wim(properties)
+
+    def get_wim(self, uuid_or_name, tenant_id=None):
+        """Retrieve existing WIM record by name or id.
+
+        If ``tenant_id`` is specified, the query will be
+        limited to the WIM associated to the given tenant.
+        """
+        # Since it is a pure DB operation, we can delegate it directly
+        return self.persist.get_wim(uuid_or_name, tenant_id)
+
+    def update_wim(self, uuid_or_name, properties):
+        """Edit an existing WIM record.
+
+        ``properties`` is a dictionary with the properties being changed,
+        if a property is not present, the old value will be preserved
+        """
+        return self.persist.update_wim(uuid_or_name, properties)
+
+    def delete_wim(self, uuid_or_name):
+        """Kill the corresponding wim threads and erase the WIM record"""
+        # Theoretically, we can rely on the database to drop the wim_accounts
+        # automatically, since we have configures 'ON CASCADE DELETE'.
+        # However, use use `delete_wim_accounts` to kill all the running
+        # threads.
+        self.delete_wim_accounts(uuid_or_name)
+        return self.persist.delete_wim(uuid_or_name)
+
+    def create_wim_account(self, wim, tenant, properties):
+        """Create an account that associates a tenant to a WIM.
+
+        As a side effect this function will spawn a new thread
+
+        Arguments:
+            wim (str): name or uuid of the WIM related to the account being
+                created
+            tenant (str): name or uuid of the nfvo tenant to which the account
+                will be created
+            properties (dict): properties of the account
+                (eg. username, password, ...)
+
+        Returns:
+            dict: Created record
+        """
+        uuid = self.persist.create_wim_account(wim, tenant, properties)
+        account = self.persist.get_wim_account_by(uuid=uuid)
+        # ^  We need to use get_wim_account_by here, since this methods returns
+        #    all the associations, and we need the wim to create the thread
+        self._spawn_thread(account)
+        return account
+
+    def _update_single_wim_account(self, account, properties):
+        """Update WIM Account, taking care to reload the corresponding thread
+
+        Arguments:
+            account (dict): Current account record
+            properties (dict): Properties to be updated
+
+        Returns:
+            dict: updated record
+        """
+        account = self.persist.update_wim_account(account['uuid'], properties)
+        self.threads[account['uuid']].reload()
+        return account
+
+    def update_wim_accounts(self, wim, tenant, properties):
+        """Update all the accounts related to a WIM and a tenant,
+        thanking care of reloading threads.
+
+        Arguments:
+            wim (str): uuid or name of a WIM record
+            tenant (str): uuid or name of a NFVO tenant record
+            properties (dict): attributes with values to be updated
+
+        Returns
+            list: Records that were updated
+        """
+        accounts = self.persist.get_wim_accounts_by(wim, tenant)
+        return [self._update_single_wim_account(account, properties)
+                for account in accounts]
+
+    def _delete_single_wim_account(self, account):
+        """Delete WIM Account, taking care to remove the corresponding thread
+        and delete the internal WIM account, if it was automatically generated.
+
+        Arguments:
+            account (dict): Current account record
+            properties (dict): Properties to be updated
+
+        Returns:
+            dict: current record (same as input)
+        """
+        self.persist.delete_wim_account(account['uuid'])
+
+        if account['uuid'] not in self.threads:
+            raise WimAccountNotActive(
+                'Requests send to the WIM Account %s are not currently '
+                'being processed.', account['uuid'])
+        else:
+            self.threads[account['uuid']].exit()
+            del self.threads[account['uuid']]
+
+        return account
+
+    def delete_wim_accounts(self, wim, tenant=None, **kwargs):
+        """Delete all the accounts related to a WIM (and a tenant),
+        thanking care of threads and internal WIM accounts.
+
+        Arguments:
+            wim (str): uuid or name of a WIM record
+            tenant (str): uuid or name of a NFVO tenant record
+
+        Returns
+            list: Records that were deleted
+        """
+        kwargs.setdefault('error_if_none', False)
+        accounts = self.persist.get_wim_accounts_by(wim, tenant, **kwargs)
+        return [self._delete_single_wim_account(a) for a in accounts]
+
+    def _reload_wim_threads(self, wim_id):
+        for thread in self.threads.values():
+            if thread.wim_account['wim_id'] == wim_id:
+                thread.reload()
+
+    def create_wim_port_mappings(self, wim, properties, tenant=None):
+        """Store information about port mappings from Database"""
+        # TODO: Review tenants... WIMs can exist across different tenants,
+        #       and the port_mappings are a WIM property, not a wim_account
+        #       property, so the concepts are not related
+        wim = self.persist.get_by_name_or_uuid('wims', wim)
+        result = self.persist.create_wim_port_mappings(wim, properties, tenant)
+        self._reload_wim_threads(wim['uuid'])
+        return result
+
+    def get_wim_port_mappings(self, wim):
+        """Retrive information about port mappings from Database"""
+        return self.persist.get_wim_port_mappings(wim)
+
+    def delete_wim_port_mappings(self, wim):
+        """Erase the port mapping records associated with the WIM"""
+        wim = self.persist.get_by_name_or_uuid('wims', wim)
+        message = self.persist.delete_wim_port_mappings(wim['uuid'])
+        self._reload_wim_threads(wim['uuid'])
+        return message
+
+    def find_common_wims(self, datacenter_ids, tenant):
+        """Find WIMs that are common to all datacenters listed"""
+        mappings = self.persist.get_wim_port_mappings(
+            datacenter=datacenter_ids, tenant=tenant, error_if_none=False)
+
+        wim_id_of = itemgetter('wim_id')
+        sorted_mappings = sorted(mappings, key=wim_id_of)  # needed by groupby
+        grouped_mappings = groupby(sorted_mappings, key=wim_id_of)
+        mapped_datacenters = {
+            wim_id: [m['datacenter_id'] for m in mappings]
+            for wim_id, mappings in grouped_mappings
+        }
+
+        return [
+            wim_id
+            for wim_id, connected_datacenters in mapped_datacenters.items()
+            if set(connected_datacenters) >= set(datacenter_ids)
+        ]
+
+    def find_common_wim(self, datacenter_ids, tenant):
+        """Find a single WIM that is able to connect all the datacenters
+        listed
+
+        Raises
+            NoWimConnectedToDatacenters: if no WIM connected to all datacenters
+                at once is found
+        """
+        suitable_wim_ids = self.find_common_wims(datacenter_ids, tenant)
+
+        if not suitable_wim_ids:
+            raise NoWimConnectedToDatacenters(datacenter_ids)
+
+        # TODO: use a criteria to determine which WIM is going to be used,
+        #       instead of always using the first one (strategy pattern can be
+        #       used here)
+        return suitable_wim_ids[0]
+
+    def derive_wan_link(self,
+                        instance_scenario_id, sce_net_id,
+                        networks, tenant):
+        """Create a instance_wim_nets record for the given information"""
+        datacenters = [n['datacenter_id'] for n in networks]
+        wim_id = self.find_common_wim(datacenters, tenant)
+
+        account = self.persist.get_wim_account_by(wim_id, tenant)
+
+        return {
+            'uuid': str(uuid4()),
+            'instance_scenario_id': instance_scenario_id,
+            'sce_net_id': sce_net_id,
+            'wim_id': wim_id,
+            'wim_account_id': account['uuid']
+        }
+
+    def derive_wan_links(self, networks, tenant=None):
+        """Discover and return what are the wan_links that have to be created
+        considering a set of networks (VLDs) required for a scenario instance
+        (NSR).
+
+        Arguments:
+            networks(list): Dicts containing the information about the networks
+                that will be instantiated to materialize a Network Service
+                (scenario) instance.
+
+        Returns:
+            list: list of WAN links to be written to the database
+        """
+        # Group networks by key=(instance_scenario_id, sce_net_id)
+        filtered = _filter_multi_vim(networks)
+        grouped_networks = _group_networks(filtered)
+        datacenters_per_group = _count_datacenters(grouped_networks)
+        # For each group count the number of networks. If greater then 1,
+        # we have to create a wan link connecting them.
+        wan_groups = [key
+                      for key, counter in datacenters_per_group
+                      if counter > 1]
+
+        return [
+            self.derive_wan_link(key[0], key[1], grouped_networks[key], tenant)
+            for key in wan_groups
+        ]
+
+    def create_action(self, wan_link):
+        """For a single wan_link create the corresponding create action"""
+        return {
+            'action': 'CREATE',
+            'status': 'SCHEDULED',
+            'item': 'instance_wim_nets',
+            'item_id': wan_link['uuid'],
+            'wim_account_id': wan_link['wim_account_id']
+        }
+
+    def create_actions(self, wan_links):
+        """For an array of wan_links, create all the corresponding actions"""
+        return [self.create_action(l) for l in wan_links]
+
+    def delete_action(self, wan_link):
+        """For a single wan_link create the corresponding create action"""
+        return {
+            'action': 'DELETE',
+            'status': 'SCHEDULED',
+            'item': 'instance_wim_nets',
+            'item_id': wan_link['uuid'],
+            'wim_account_id': wan_link['wim_account_id'],
+            'extra': json.dumps({'wan_link': wan_link})
+            # We serialize and cache the wan_link here, because it can be
+            # deleted during the delete process
+        }
+
+    def delete_actions(self, wan_links=(), instance_scenario_id=None):
+        """Given a Instance Scenario, remove all the WAN Links created in the
+        past"""
+        if instance_scenario_id:
+            wan_links = self.persist.get_wan_links(
+                instance_scenario_id=instance_scenario_id)
+        return [self.delete_action(l) for l in wan_links]
+
+    def incorporate_actions(self, wim_actions, instance_action):
+        """Make the instance action consider new WIM actions and make the WIM
+        actions aware of the instance action
+        """
+        current = instance_action.setdefault('number_tasks', 0)
+        for i, action in enumerate(wim_actions):
+            action['task_index'] = current + i
+            action['instance_action_id'] = instance_action['uuid']
+        instance_action['number_tasks'] += len(wim_actions)
+
+        return wim_actions, instance_action
+
+    def dispatch(self, tasks):
+        """Enqueue a list of tasks for further processing.
+
+        This function is supposed to be called outside from the WIM Thread.
+        """
+        for task in tasks:
+            if task['wim_account_id'] not in self.threads:
+                error_msg = str(WimAccountNotActive(
+                    'Requests send to the WIM Account %s are not currently '
+                    'being processed.', task['wim_account_id']))
+                Action(task, self.logger).fail(self.persist, error_msg)
+                self.persist.update_wan_link(task['item_id'],
+                                             {'status': 'ERROR',
+                                              'error_msg': error_msg})
+                self.logger.error('Task %s %s %s not dispatched.\n%s',
+                                  task['action'], task['item'],
+                                  task['instance_account_id'], error_msg)
+            else:
+                self.threads[task['wim_account_id']].insert_task(task)
+                self.logger.debug('Task %s %s %s dispatched',
+                                  task['action'], task['item'],
+                                  task['instance_action_id'])
+
+    def _spawn_thread(self, wim_account):
+        """Spawn a WIM thread
+
+        Arguments:
+            wim_account (dict): WIM information (usually persisted)
+                The `wim` field is required to be set with a valid WIM record
+                inside the `wim_account` dict
+
+        Return:
+            threading.Thread: Thread object
+        """
+        thread = None
+        try:
+            thread = WimThread(self.persist, wim_account, ovim=self.ovim)
+            self.threads[wim_account['uuid']] = thread
+            thread.start()
+        except:  # noqa
+            self.logger.error('Error when spawning WIM thread for %s',
+                              wim_account['uuid'], exc_info=True)
+
+        return thread
+
+    def start_threads(self):
+        """Start the threads responsible for processing WIM Actions"""
+        accounts = self.persist.get_wim_accounts(error_if_none=False)
+        self.threads = remove_none_items(
+            {a['uuid']: self._spawn_thread(a) for a in accounts})
+
+    def stop_threads(self):
+        """Stop the threads responsible for processing WIM Actions"""
+        for uuid, thread in self.threads.items():
+            thread.exit()
+            del self.threads[uuid]
+
+    @contextmanager
+    def threads_running(self):
+        """Ensure no thread will be left running"""
+        # This method is particularly important for testing :)
+        try:
+            self.start_threads()
+            yield
+        finally:
+            self.stop_threads()
+
+
+def _filter_multi_vim(networks):
+    """Ignore networks without sce_net_id (all VNFs go to the same VIM)"""
+    return [n for n in networks if 'sce_net_id' in n and n['sce_net_id']]
+
+
+def _group_networks(networks):
+    """Group networks that correspond to the same instance_scenario_id and
+    sce_net_id (NSR and VLD).
+
+    Arguments:
+        networks(list): Dicts containing the information about the networks
+            that will be instantiated to materialize a Network Service
+            (scenario) instance.
+    Returns:
+        dict: Keys are tuples (instance_scenario_id, sce_net_id) and values
+            are lits of networks.
+    """
+    criteria = itemgetter('instance_scenario_id', 'sce_net_id')
+
+    networks = sorted(networks, key=criteria)
+    return {k: list(v) for k, v in groupby(networks, key=criteria)}
+
+
+def _count_datacenters(grouped_networks):
+    """Count the number of datacenters in each group of networks
+
+    Returns:
+        list of tuples: the first element is the group key, while the second
+            element is the number of datacenters in each group.
+    """
+    return ((key, len(set(n['datacenter_id'] for n in group)))
+            for key, group in grouped_networks.items())
diff --git a/osm_ro/wim/errors.py b/osm_ro/wim/errors.py
new file mode 100644 (file)
index 0000000..16c53b5
--- /dev/null
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+from six.moves import queue
+
+from ..db_base import db_base_Exception as DbBaseException
+from ..http_tools.errors import (
+    Bad_Request,
+    Conflict,
+    HttpMappedError,
+    Internal_Server_Error,
+    Not_Found
+)
+
+
+class NoRecordFound(DbBaseException):
+    """No record was found in the database"""
+
+    def __init__(self, criteria, table=None):
+        table_info = '{} - '.format(table) if table else ''
+        super(NoRecordFound, self).__init__(
+            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+            http_code=Not_Found)
+
+
+class MultipleRecordsFound(DbBaseException):
+    """More than one record was found in the database"""
+
+    def __init__(self, criteria, table=None):
+        table_info = '{} - '.format(table) if table else ''
+        super(MultipleRecordsFound, self).__init__(
+            '{}: {}`{}`'.format(self.__class__.__doc__, table_info, criteria),
+            http_code=Conflict)
+
+
+class WimAndTenantNotAttached(DbBaseException):
+    """Wim and Tenant are not attached"""
+
+    def __init__(self, wim, tenant):
+        super(WimAndTenantNotAttached, self).__init__(
+            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+            http_code=Conflict)
+
+
+class WimAndTenantAlreadyAttached(DbBaseException):
+    """There is already a wim account attaching the given wim and tenant"""
+
+    def __init__(self, wim, tenant):
+        super(WimAndTenantAlreadyAttached, self).__init__(
+            '{}: `{}` <> `{}`'.format(self.__class__.__doc__, wim, tenant),
+            http_code=Conflict)
+
+
+class NoWimConnectedToDatacenters(NoRecordFound):
+    """No WIM that is able to connect the given datacenters was found"""
+
+
+class InvalidParameters(DbBaseException):
+    """The given parameters are invalid"""
+
+    def __init__(self, message, http_code=Bad_Request):
+        super(InvalidParameters, self).__init__(message, http_code)
+
+
+class UndefinedAction(HttpMappedError):
+    """No action found"""
+
+    def __init__(self, item_type, action, http_code=Internal_Server_Error):
+        message = ('The action {} {} is not defined'.format(action, item_type))
+        super(UndefinedAction, self).__init__(message, http_code)
+
+
+class UndefinedWimConnector(DbBaseException):
+    """The connector class for the specified wim type is not implemented"""
+
+    def __init__(self, wim_type, module_name, location_reference):
+        super(UndefinedWimConnector, self).__init__(
+            ('{}: `{}`. Could not find module `{}` '
+             '(check if it is necessary to install a plugin)'
+             .format(self.__class__.__doc__, wim_type, module_name)),
+            http_code=Bad_Request)
+
+
+class WimAccountOverwrite(DbBaseException):
+    """An attempt to overwrite an existing WIM account was identified"""
+
+    def __init__(self, wim_account, diff=None, tip=None):
+        message = self.__class__.__doc__
+        account_info = (
+            'Account -- name: {name}, uuid: {uuid}'.format(**wim_account)
+            if wim_account else '')
+        diff_info = (
+            'Differing fields: ' + ', '.join(diff.keys()) if diff else '')
+
+        super(WimAccountOverwrite, self).__init__(
+            '\n'.join(m for m in (message, account_info, diff_info, tip) if m),
+            http_code=Conflict)
+
+
+class UnexpectedDatabaseError(DbBaseException):
+    """The database didn't raised an exception but also the query was not
+    executed (maybe the connection had some problems?)
+    """
+
+
+class UndefinedUuidOrName(DbBaseException):
+    """Trying to query for a record using an empty uuid or name"""
+
+    def __init__(self, table=None):
+        table_info = '{} - '.format(table.split()[0]) if table else ''
+        super(UndefinedUuidOrName, self).__init__(
+            table_info + self.__class__.__doc__, http_status=Bad_Request)
+
+
+class UndefinedWanMappingType(InvalidParameters):
+    """The dict wan_service_mapping_info MUST contain a `type` field"""
+
+    def __init__(self, given):
+        super(UndefinedWanMappingType, self).__init__(
+            '{}. Given: `{}`'.format(self.__class__.__doc__, given))
+
+
+class QueueFull(HttpMappedError, queue.Full):
+    """Thread queue is full"""
+
+    def __init__(self, thread_name, http_code=Internal_Server_Error):
+        message = ('Thread {} queue is full'.format(thread_name))
+        super(QueueFull, self).__init__(message, http_code)
+
+
+class InconsistentState(HttpMappedError):
+    """An unexpected inconsistency was find in the state of the program"""
+
+    def __init__(self, arg, http_code=Internal_Server_Error):
+        if isinstance(arg, HttpMappedError):
+            http_code = arg.http_code
+            message = str(arg)
+        else:
+            message = arg
+
+        super(InconsistentState, self).__init__(message, http_code)
+
+
+class WimAccountNotActive(HttpMappedError, KeyError):
+    """WIM Account is not active yet (no thread is running)"""
+
+    def __init__(self, message, http_code=Internal_Server_Error):
+        message += ('\nThe thread responsible for processing the actions have '
+                    'suddenly stopped, or have never being spawned')
+        super(WimAccountNotActive, self).__init__(message, http_code)
diff --git a/osm_ro/wim/failing_connector.py b/osm_ro/wim/failing_connector.py
new file mode 100644 (file)
index 0000000..b66551c
--- /dev/null
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""In the case any error happens when trying to initiate the WIM Connector,
+we need a replacement for it, that will throw an error every time we try to
+execute any action
+"""
+import json
+from .wimconn import WimConnectorError
+
+
+class FailingConnector(object):
+    """Placeholder for a connector whose incitation failed,
+    This place holder will just raise an error every time an action is needed
+    from the connector.
+
+    This way we can make sure that all the other parts of the program will work
+    but the user will have all the information available to fix the problem.
+    """
+    def __init__(self, error_msg):
+        self.error_msg = error_msg
+
+    def check_credentials(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+
+    def get_connectivity_service_status(self, service_uuid, _conn_info=None):
+        raise WimConnectorError('Impossible to retrieve status for {}\n\n{}'
+                                .format(service_uuid, self.error_msg))
+
+    def create_connectivity_service(self, service_uuid, *args, **kwargs):
+        raise WimConnectorError('Impossible to connect {}.\n{}\n{}\n{}'
+                                .format(service_uuid, self.error_msg,
+                                        json.dumps(args, indent=4),
+                                        json.dumps(kwargs, indent=4)))
+
+    def delete_connectivity_service(self, service_uuid, _conn_info=None):
+        raise WimConnectorError('Impossible to disconnect {}\n\n{}'
+                                .format(service_uuid, self.error_msg))
+
+    def edit_connectivity_service(self, service_uuid, *args, **kwargs):
+        raise WimConnectorError('Impossible to change connection {}.\n{}\n'
+                                '{}\n{}'
+                                .format(service_uuid, self.error_msg,
+                                        json.dumps(args, indent=4),
+                                        json.dumps(kwargs, indent=4)))
+
+    def clear_all_connectivity_services(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
+
+    def get_all_active_connectivity_services(self):
+        raise WimConnectorError('Impossible to use WIM:\n' + self.error_msg)
diff --git a/osm_ro/wim/http_handler.py b/osm_ro/wim/http_handler.py
new file mode 100644 (file)
index 0000000..f5eeed9
--- /dev/null
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module works as an extension to the toplevel ``httpserver`` module,
+implementing callbacks for the HTTP routes related to the WIM features of OSM.
+
+Acting as a front-end, it is responsible for converting the HTTP request
+payload into native python objects, calling the correct engine methods
+and converting back the response objects into strings to be send in the HTTP
+response payload.
+
+Direct domain/persistence logic should be avoided in this file, instead
+calls to other layers should be done.
+"""
+import logging
+
+from bottle import request
+
+from .. import utils
+from ..http_tools.errors import ErrorHandler
+from ..http_tools.handler import BaseHandler, route
+from ..http_tools.request_processing import (
+    filter_query_string,
+    format_in,
+    format_out
+)
+from .engine import WimEngine
+from .persistence import WimPersistence
+from .schemas import (
+    wim_account_schema,
+    wim_edit_schema,
+    wim_port_mapping_schema,
+    wim_schema
+)
+
+
+class WimHandler(BaseHandler):
+    """HTTP route implementations for WIM related URLs
+
+    Arguments:
+        db: instance of mydb [optional]. This argument must be provided
+            if not ``persistence`` is passed
+        persistence (WimPersistence): High-level data storage abstraction
+            [optional]. If this argument is not present, ``db`` must be.
+        engine (WimEngine): Implementation of the business logic
+            for the engine of WAN networks
+        logger (logging.Logger): logger object [optional]
+        url_base(str): Path fragment to be prepended to the routes [optional]
+        plugins(list): List of bottle plugins to be applied to routes
+            [optional]
+    """
+    def __init__(self, db=None, persistence=None, engine=None,
+                 url_base='', logger=None, plugins=()):
+        self.persist = persistence or WimPersistence(db)
+        self.engine = engine or WimEngine(self.persist)
+        self.url_base = url_base
+        self.logger = logger or logging.getLogger('openmano.wim.http')
+        error_handler = ErrorHandler(self.logger)
+        self.plugins = [error_handler] + list(plugins)
+
+    @route('GET', '/<tenant_id>/wims')
+    def http_list_wims(self, tenant_id):
+        allowed_fields = ('uuid', 'name', 'wim_url', 'type', 'created_at')
+        select_, where_, limit_ = filter_query_string(
+            request.query, None, allowed_fields)
+        # ^  Since we allow the user to customize the db query using the HTTP
+        #    query and it is quite difficult to re-use this query, let's just
+        #    do a ad-hoc call to the db
+
+        from_ = 'wims'
+        if tenant_id != 'any':
+            where_['nfvo_tenant_id'] = tenant_id
+            if 'created_at' in select_:
+                select_[select_.index('created_at')] = (
+                    'w.created_at as created_at')
+            if 'created_at' in where_:
+                where_['w.created_at'] = where_.pop('created_at')
+            from_ = ('wims as w join wim_nfvo_tenants as wt '
+                     'on w.uuid=wt.wim_id')
+
+        wims = self.persist.query(
+            FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_,
+            error_if_none=False)
+
+        utils.convert_float_timestamp2str(wims)
+        return format_out({'wims': wims})
+
+    @route('GET', '/<tenant_id>/wims/<wim_id>')
+    def http_get_wim(self, tenant_id, wim_id):
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        wim = self.engine.get_wim(wim_id, tenant_id)
+        return format_out({'wim': wim})
+
+    @route('POST', '/wims')
+    def http_create_wim(self):
+        http_content, _ = format_in(wim_schema, confidential_data=True)
+        r = utils.remove_extra_items(http_content, wim_schema)
+        if r:
+            self.logger.debug("Remove extra items received %r", r)
+        data = self.engine.create_wim(http_content['wim'])
+        return self.http_get_wim('any', data)
+
+    @route('PUT', '/wims/<wim_id>')
+    def http_update_wim(self, wim_id):
+        '''edit wim details, can use both uuid or name'''
+        # parse input data
+        http_content, _ = format_in(wim_edit_schema)
+        r = utils.remove_extra_items(http_content, wim_edit_schema)
+        if r:
+            self.logger.debug("Remove received extra items %s", r)
+
+        wim_id = self.engine.update_wim(wim_id, http_content['wim'])
+        return self.http_get_wim('any', wim_id)
+
+    @route('DELETE', '/wims/<wim_id>')
+    def http_delete_wim(self, wim_id):
+        """Delete a wim from a database, can use both uuid or name"""
+        data = self.engine.delete_wim(wim_id)
+        # TODO Remove WIM in orchestrator
+        return format_out({"result": "wim '" + data + "' deleted"})
+
+    @route('POST', '/<tenant_id>/wims/<wim_id>')
+    def http_create_wim_account(self, tenant_id, wim_id):
+        """Associate an existing wim to this tenant"""
+        # parse input data
+        http_content, _ = format_in(
+            wim_account_schema, confidential_data=True)
+        removed = utils.remove_extra_items(http_content, wim_account_schema)
+        removed and self.logger.debug("Remove extra items %r", removed)
+        account = self.engine.create_wim_account(
+            wim_id, tenant_id, http_content['wim_account'])
+        # check update succeeded
+        return format_out({"wim_account": account})
+
+    @route('PUT', '/<tenant_id>/wims/<wim_id>')
+    def http_update_wim_accounts(self, tenant_id, wim_id):
+        """Edit the association of an existing wim to this tenant"""
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        # parse input data
+        http_content, _ = format_in(
+            wim_account_schema, confidential_data=True)
+        removed = utils.remove_extra_items(http_content, wim_account_schema)
+        removed and self.logger.debug("Remove extra items %r", removed)
+        accounts = self.engine.update_wim_accounts(
+            wim_id, tenant_id, http_content['wim_account'])
+
+        if tenant_id:
+            return format_out({'wim_account': accounts[0]})
+
+        return format_out({'wim_accounts': accounts})
+
+    @route('DELETE', '/<tenant_id>/wims/<wim_id>')
+    def http_delete_wim_accounts(self, tenant_id, wim_id):
+        """Deassociate an existing wim to this tenant"""
+        tenant_id = None if tenant_id == 'any' else tenant_id
+        accounts = self.engine.delete_wim_accounts(wim_id, tenant_id,
+                                                   error_if_none=True)
+
+        properties = (
+            (account['name'], wim_id,
+             utils.safe_get(account, 'association.nfvo_tenant_id', tenant_id))
+            for account in accounts)
+
+        return format_out({
+            'result': '\n'.join('WIM account `{}` deleted. '
+                                'Tenant `{}` detached from WIM `{}`'
+                                .format(*p) for p in properties)
+        })
+
+    @route('POST', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_create_wim_port_mappings(self, tenant_id, wim_id):
+        """Set the wim port mapping for a wim"""
+        # parse input data
+        http_content, _ = format_in(wim_port_mapping_schema)
+
+        data = self.engine.create_wim_port_mappings(
+            wim_id, http_content['wim_port_mapping'], tenant_id)
+        return format_out({"wim_port_mapping": data})
+
+    @route('GET', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_get_wim_port_mappings(self, tenant_id, wim_id):
+        """Get wim port mapping details"""
+        # TODO: tenant_id is never used, so it should be removed
+        data = self.engine.get_wim_port_mappings(wim_id)
+        return format_out({"wim_port_mapping": data})
+
+    @route('DELETE', '/<tenant_id>/wims/<wim_id>/port_mapping')
+    def http_delete_wim_port_mappings(self, tenant_id, wim_id):
+        """Clean wim port mapping"""
+        # TODO: tenant_id is never used, so it should be removed
+        data = self.engine.delete_wim_port_mappings(wim_id)
+        return format_out({"result": data})
diff --git a/osm_ro/wim/persistence.py b/osm_ro/wim/persistence.py
new file mode 100644 (file)
index 0000000..8a74d49
--- /dev/null
@@ -0,0 +1,1018 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""This module contains only logic related to managing records in a database
+which includes data format normalization, data format validation and etc.
+(It works as an extension to `nfvo_db.py` for the WIM feature)
+
+No domain logic/architectural concern should be present in this file.
+"""
+import json
+import logging
+from contextlib import contextmanager
+from hashlib import sha1
+from itertools import groupby
+from operator import itemgetter
+from sys import exc_info
+from threading import Lock
+from time import time
+from uuid import uuid1 as generate_uuid
+
+from six import reraise
+
+import yaml
+
+from ..utils import (
+    check_valid_uuid,
+    convert_float_timestamp2str,
+    expand_joined_fields,
+    filter_dict_keys,
+    filter_out_dict_keys,
+    merge_dicts,
+    remove_none_items
+)
+from .errors import (
+    DbBaseException,
+    InvalidParameters,
+    MultipleRecordsFound,
+    NoRecordFound,
+    UndefinedUuidOrName,
+    UndefinedWanMappingType,
+    UnexpectedDatabaseError,
+    WimAccountOverwrite,
+    WimAndTenantAlreadyAttached
+)
+
+_WIM = 'wims AS wim '
+
+_WIM_JOIN = (
+    _WIM +
+    ' JOIN wim_nfvo_tenants AS association '
+    '   ON association.wim_id=wim.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+    ' JOIN wim_accounts AS wim_account '
+    '   ON association.wim_account_id=wim_account.uuid '
+)
+
+_WIM_ACCOUNT_JOIN = (
+    'wim_accounts AS wim_account '
+    ' JOIN wim_nfvo_tenants AS association '
+    '   ON association.wim_account_id=wim_account.uuid '
+    ' JOIN wims AS wim '
+    '   ON association.wim_id=wim.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_DATACENTER_JOIN = (
+    'datacenters AS datacenter '
+    ' JOIN tenants_datacenters AS association '
+    '   ON association.datacenter_id=datacenter.uuid '
+    ' JOIN datacenter_tenants as datacenter_account '
+    '   ON association.datacenter_tenant_id=datacenter_account.uuid '
+    ' JOIN nfvo_tenants AS nfvo_tenant '
+    '   ON association.nfvo_tenant_id=nfvo_tenant.uuid '
+)
+
+_PORT_MAPPING = 'wim_port_mappings as wim_port_mapping '
+
+_PORT_MAPPING_JOIN_WIM = (
+    ' JOIN wims as wim '
+    '   ON wim_port_mapping.wim_id=wim.uuid '
+)
+
+_PORT_MAPPING_JOIN_DATACENTER = (
+    ' JOIN datacenters as datacenter '
+    '   ON wim_port_mapping.datacenter_id=datacenter.uuid '
+)
+
+_WIM_SELECT = [
+    'wim.{0} as {0}'.format(_field)
+    for _field in 'uuid name description wim_url type config '
+                  'created_at modified_at'.split()
+]
+
+_WIM_ACCOUNT_SELECT = 'uuid name user password config'.split()
+
+_PORT_MAPPING_SELECT = ('wim_port_mapping.*', )
+
+_CONFIDENTIAL_FIELDS = ('password', 'passwd')
+
+_SERIALIZED_FIELDS = ('config', 'vim_info', 'wim_info', 'conn_info', 'extra',
+                      'wan_service_mapping_info')
+
+UNIQUE_PORT_MAPPING_INFO_FIELDS = {
+    'dpid-port': ('wan_switch_dpid', 'wan_switch_port')
+}
+"""Fields that should be unique for each port mapping that relies on
+wan_service_mapping_info.
+
+For example, for port mappings of type 'dpid-port', each combination of
+wan_switch_dpid and wan_switch_port should be unique (the same switch cannot
+be connected to two different places using the same port)
+"""
+
+
+class WimPersistence(object):
+    """High level interactions with the WIM tables in the database"""
+
+    def __init__(self, db, logger=None, lock=None):
+        self.db = db
+        self.logger = logger or logging.getLogger('openmano.wim.persistence')
+        self.lock = lock or Lock()
+
+    def query(self,
+              FROM=None,
+              SELECT=None,
+              WHERE=None,
+              ORDER_BY=None,
+              LIMIT=None,
+              OFFSET=None,
+              error_if_none=True,
+              error_if_multiple=False,
+              postprocess=None,
+              hide=_CONFIDENTIAL_FIELDS,
+              **kwargs):
+        """Retrieve records from the database.
+
+        Keyword Arguments:
+            SELECT, FROM, WHERE, LIMIT, ORDER_BY: used to compose the SQL
+                query. See ``nfvo_db.get_rows``.
+            OFFSET: only valid when used togheter with LIMIT.
+                    Ignore the OFFSET first results of the query.
+            error_if_none: by default an error is raised if no record is
+                found. With this option it is possible to disable this error.
+            error_if_multiple: by default no error is raised if more then one
+                record is found.
+                With this option it is possible to enable this error.
+            postprocess: function applied to every retrieved record.
+                This function receives a dict as input and must return it
+                after modifications. Moreover this function should accept a
+                second optional parameter ``hide`` indicating
+                the confidential fiels to be obfuscated.
+                By default a minimal postprocessing function is applied,
+                obfuscating confidential fields and converting timestamps.
+            hide: option proxied to postprocess
+
+        All the remaining keyword arguments will be assumed to be ``name``s or
+        ``uuid``s to compose the WHERE statement, according to their format.
+        If the value corresponds to an array, the first element will determine
+        if it is an name or UUID.
+
+        For example:
+            - ``wim="abcdef"``` will be turned into ``wim.name="abcdef"``,
+            - ``datacenter="5286a274-8a1b-4b8d-a667-9c94261ad855"``
+               will be turned into
+               ``datacenter.uuid="5286a274-8a1b-4b8d-a667-9c94261ad855"``.
+            - ``wim=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+               will be turned into
+               ``wim.uuid=["5286a274-8a1b-4b8d-a667-9c94261ad855", ...]``
+
+        Raises:
+            NoRecordFound: if the query result set is empty
+            DbBaseException: errors occuring during the execution of the query.
+        """
+        # Defaults:
+        postprocess = postprocess or _postprocess_record
+        WHERE = WHERE or {}
+
+        # Find remaining keywords by name or uuid
+        WHERE.update(_compose_where_from_uuids_or_names(**kwargs))
+        WHERE = WHERE or None
+        # ^ If the where statement is empty, it is better to leave it as None,
+        #   so it can be filtered out at a later stage
+        LIMIT = ('{:d},{:d}'.format(OFFSET, LIMIT)
+                 if LIMIT and OFFSET else LIMIT)
+
+        query = remove_none_items({
+            'SELECT': SELECT, 'FROM': FROM, 'WHERE': WHERE,
+            'LIMIT': LIMIT, 'ORDER_BY': ORDER_BY})
+
+        with self.lock:
+            records = self.db.get_rows(**query)
+
+        table = FROM.split()[0]
+        if error_if_none and not records:
+            raise NoRecordFound(WHERE, table)
+
+        if error_if_multiple and len(records) > 1:
+            self.logger.error('Multiple records '
+                              'FROM %s WHERE %s:\n\n%s\n\n',
+                              FROM, WHERE, json.dumps(records, indent=4))
+            raise MultipleRecordsFound(WHERE, table)
+
+        return [
+            expand_joined_fields(postprocess(record, hide))
+            for record in records
+        ]
+
+    def query_one(self, *args, **kwargs):
+        """Similar to ``query``, but ensuring just one result.
+        ``error_if_multiple`` is enabled by default.
+        """
+        kwargs.setdefault('error_if_multiple', True)
+        records = self.query(*args, **kwargs)
+        return records[0] if records else None
+
+    def get_by_uuid(self, table, uuid, **kwargs):
+        """Retrieve one record from the database based on its uuid
+
+        Arguments:
+            table (str): table name (to be used in SQL's FROM statement).
+            uuid (str): unique identifier for record.
+
+        For additional keyword arguments and exceptions see :obj:`~.query`
+        (``error_if_multiple`` is enabled by default).
+        """
+        if uuid is None:
+            raise UndefinedUuidOrName(table)
+        return self.query_one(table, WHERE={'uuid': uuid}, **kwargs)
+
+    def get_by_name_or_uuid(self, table, uuid_or_name, **kwargs):
+        """Retrieve a record from the database based on a value that can be its
+        uuid or name.
+
+        Arguments:
+            table (str): table name (to be used in SQL's FROM statement).
+            uuid_or_name (str): this value can correspond to either uuid or
+                name
+        For additional keyword arguments and exceptions see :obj:`~.query`
+        (``error_if_multiple`` is enabled by default).
+        """
+        if uuid_or_name is None:
+            raise UndefinedUuidOrName(table)
+
+        key = 'uuid' if check_valid_uuid(uuid_or_name) else 'name'
+        return self.query_one(table, WHERE={key: uuid_or_name}, **kwargs)
+
+    def get_wims(self, uuid_or_name=None, tenant=None, **kwargs):
+        """Retrieve information about one or more WIMs stored in the database
+
+        Arguments:
+            uuid_or_name (str): uuid or name for WIM
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        kwargs.update(wim=uuid_or_name, tenant=tenant)
+        from_ = _WIM_JOIN if tenant else _WIM
+        select_ = _WIM_SELECT[:] + (['wim_account.*'] if tenant else [])
+
+        kwargs.setdefault('SELECT', select_)
+        return self.query(from_, **kwargs)
+
+    def get_wim(self, wim, tenant=None, **kwargs):
+        """Similar to ``get_wims`` but ensure only one result is returned"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_wims(wim, tenant)[0]
+
+    def create_wim(self, wim_descriptor):
+        """Create a new wim record inside the database and returns its uuid
+
+        Arguments:
+            wim_descriptor (dict): properties of the record
+                (usually each field corresponds to a database column, but extra
+                information can be offloaded to another table or serialized as
+                JSON/YAML)
+        Returns:
+            str: UUID of the created WIM
+        """
+        if "config" in wim_descriptor:
+            wim_descriptor["config"] = _serialize(wim_descriptor["config"])
+
+        with self.lock:
+            return self.db.new_row(
+                "wims", wim_descriptor, add_uuid=True, confidential_data=True)
+
+    def update_wim(self, uuid_or_name, wim_descriptor):
+        """Change an existing WIM record on the database"""
+        # obtain data, check that only one exist
+        wim = self.get_by_name_or_uuid('wims', uuid_or_name)
+
+        # edit data
+        wim_id = wim['uuid']
+        where = {'uuid': wim['uuid']}
+
+        # unserialize config, edit and serialize it again
+        if wim_descriptor.get('config'):
+            new_config_dict = wim_descriptor["config"]
+            config_dict = remove_none_items(merge_dicts(
+                wim.get('config') or {}, new_config_dict))
+            wim_descriptor['config'] = (
+                _serialize(config_dict) if config_dict else None)
+
+        with self.lock:
+            self.db.update_rows('wims', wim_descriptor, where)
+
+        return wim_id
+
+    def delete_wim(self, wim):
+        # get nfvo_tenant info
+        wim = self.get_by_name_or_uuid('wims', wim)
+
+        with self.lock:
+            self.db.delete_row_by_id('wims', wim['uuid'])
+
+        return wim['uuid'] + ' ' + wim['name']
+
+    def get_wim_accounts_by(self, wim=None, tenant=None, uuid=None, **kwargs):
+        """Retrieve WIM account information from the database together
+        with the related records (wim, nfvo_tenant and wim_nfvo_tenant)
+
+        Arguments:
+            wim (str): uuid or name for WIM
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        kwargs.update(wim=wim, tenant=tenant)
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        if uuid:
+            kwargs.setdefault('WHERE', {'wim_account.uuid': uuid})
+        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+    def get_wim_account_by(self, wim=None, tenant=None, **kwargs):
+        """Similar to ``get_wim_accounts_by``, but ensuring just one result"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_wim_accounts_by(wim, tenant, **kwargs)[0]
+
+    def get_wim_accounts(self, **kwargs):
+        """Retrieve all the accounts from the database"""
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        return self.query(FROM=_WIM_ACCOUNT_JOIN, **kwargs)
+
+    def get_wim_account(self, uuid_or_name, **kwargs):
+        """Retrieve WIM Account record by UUID or name,
+        See :obj:`get_by_name_or_uuid` for keyword arguments.
+        """
+        kwargs.setdefault('postprocess', _postprocess_wim_account)
+        kwargs.setdefault('SELECT', _WIM_ACCOUNT_SELECT)
+        return self.get_by_name_or_uuid('wim_accounts', uuid_or_name, **kwargs)
+
+    @contextmanager
+    def _associate(self, wim_id, nfvo_tenant_id):
+        """Auxiliary method for ``create_wim_account``
+
+        This method just create a row in the association table
+        ``wim_nfvo_tenants``
+        """
+        try:
+            with self.lock:
+                yield
+        except DbBaseException as db_exception:
+            error_msg = str(db_exception)
+            if all([msg in error_msg
+                    for msg in ("already in use", "'wim_nfvo_tenant'")]):
+                ex = WimAndTenantAlreadyAttached(wim_id, nfvo_tenant_id)
+                reraise(ex.__class__, ex, exc_info()[2])
+
+            raise
+
+    def create_wim_account(self, wim, tenant, properties):
+        """Associate a wim to a tenant using the ``wim_nfvo_tenants`` table
+        and create a ``wim_account`` to store credentials and configurations.
+
+        For the sake of simplification, we assume that each NFVO tenant can be
+        attached to a WIM using only one WIM account. This is automatically
+        guaranteed via database constraints.
+        For corner cases, the same WIM can be registered twice using another
+        name.
+
+        Arguments:
+            wim (str): name or uuid of the WIM related to the account being
+                created
+            tenant (str): name or uuid of the nfvo tenant to which the account
+                will be created
+            properties (dict): properties of the account
+                (eg. user, password, ...)
+        """
+        wim_id = self.get_by_name_or_uuid('wims', wim, SELECT=['uuid'])['uuid']
+        tenant = self.get_by_name_or_uuid('nfvo_tenants', tenant,
+                                          SELECT=['uuid', 'name'])
+        account = properties.setdefault('name', tenant['name'])
+
+        wim_account = self.query_one('wim_accounts',
+                                     WHERE={'wim_id': wim_id, 'name': account},
+                                     error_if_none=False)
+
+        transaction = []
+        used_uuids = []
+
+        if wim_account is None:
+            # If a row for the wim account doesn't exist yet, we need to
+            # create one, otherwise we can just re-use it.
+            account_id = str(generate_uuid())
+            used_uuids.append(account_id)
+            row = merge_dicts(properties, wim_id=wim_id, uuid=account_id)
+            transaction.append({'wim_accounts': _preprocess_wim_account(row)})
+        else:
+            account_id = wim_account['uuid']
+            properties.pop('config', None)  # Config is too complex to compare
+            diff = {k: v for k, v in properties.items() if v != wim_account[k]}
+            if diff:
+                tip = 'Edit the account first, and then attach it to a tenant'
+                raise WimAccountOverwrite(wim_account, diff, tip)
+
+        transaction.append({
+            'wim_nfvo_tenants': {'nfvo_tenant_id': tenant['uuid'],
+                                 'wim_id': wim_id,
+                                 'wim_account_id': account_id}})
+
+        with self._associate(wim_id, tenant['uuid']):
+            self.db.new_rows(transaction, used_uuids, confidential_data=True)
+
+        return account_id
+
+    def update_wim_account(self, uuid, properties, hide=_CONFIDENTIAL_FIELDS):
+        """Update WIM account record by overwriting fields with new values
+
+        Specially for the field ``config`` this means that a new dict will be
+        merged to the existing one.
+
+        Attributes:
+            uuid (str): UUID for the WIM account
+            properties (dict): fields that should be overwritten
+
+        Returns:
+            Updated wim_account
+        """
+        wim_account = self.get_by_uuid('wim_accounts', uuid)
+        safe_fields = 'user password name created'.split()
+        updates = _preprocess_wim_account(
+            merge_dicts(wim_account, filter_dict_keys(properties, safe_fields))
+        )
+
+        if properties.get('config'):
+            old_config = wim_account.get('config') or {}
+            new_config = merge_dicts(old_config, properties['config'])
+            updates['config'] = _serialize(new_config)
+
+        with self.lock:
+            num_changes = self.db.update_rows(
+                'wim_accounts', UPDATE=updates,
+                WHERE={'uuid': wim_account['uuid']})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError('Impossible to update wim_account '
+                                          '{name}:{uuid}'.format(*wim_account))
+
+        return self.get_wim_account(wim_account['uuid'], hide=hide)
+
+    def delete_wim_account(self, uuid):
+        """Remove WIM account record from the database"""
+        # Since we have foreign keys configured with ON CASCADE, we can rely
+        # on the database engine to guarantee consistency, deleting the
+        # dependant records
+        with self.lock:
+            return self.db.delete_row_by_id('wim_accounts', uuid)
+
+    def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):
+        """Retrieve datacenter information from the database together
+        with the related records (nfvo_tenant)
+
+        Arguments:
+            datacenter (str): uuid or name for datacenter
+            tenant (str): [optional] uuid or name for NFVO tenant
+
+        See :obj:`~.query` for additional keyword arguments.
+        """
+        kwargs.update(datacenter=datacenter, tenant=tenant)
+        return self.query(_DATACENTER_JOIN, **kwargs)
+
+    def get_datacenter_by(self, datacenter=None, tenant=None, **kwargs):
+        """Similar to ``get_datacenters_by``, but ensuring just one result"""
+        kwargs.setdefault('error_if_multiple', True)
+        return self.get_datacenters_by(datacenter, tenant, **kwargs)[0]
+
+    def _create_single_port_mapping(self, properties):
+        info = properties.setdefault('wan_service_mapping_info', {})
+        endpoint_id = properties.get('wan_service_endpoint_id')
+
+        if info.get('mapping_type') and not endpoint_id:
+            properties['wan_service_endpoint_id'] = (
+                self._generate_port_mapping_id(info))
+
+        properties['wan_service_mapping_info'] = _serialize(info)
+
+        try:
+            with self.lock:
+                self.db.new_row('wim_port_mappings', properties,
+                                add_uuid=False, confidential_data=True)
+        except DbBaseException as old_exception:
+            self.logger.exception(old_exception)
+            ex = InvalidParameters(
+                "The mapping must contain the "
+                "'pop_switch_dpid', 'pop_switch_port',  and "
+                "wan_service_mapping_info: "
+                "('wan_switch_dpid' and 'wan_switch_port') or "
+                "'wan_service_endpoint_id}'")
+            reraise(ex.__class__, ex, exc_info()[2])
+
+        return properties
+
+    def create_wim_port_mappings(self, wim, port_mappings, tenant=None):
+        if not isinstance(wim, dict):
+            wim = self.get_by_name_or_uuid('wims', wim)
+
+        for port_mapping in port_mappings:
+            port_mapping['wim_name'] = wim['name']
+            datacenter = self.get_datacenter_by(
+                port_mapping['datacenter_name'], tenant)
+            for pop_wan_port_mapping in port_mapping['pop_wan_mappings']:
+                element = merge_dicts(pop_wan_port_mapping, {
+                    'wim_id': wim['uuid'],
+                    'datacenter_id': datacenter['uuid']})
+                self._create_single_port_mapping(element)
+
+        return port_mappings
+
+    def _filter_port_mappings_by_tenant(self, mappings, tenant):
+        """Make sure all the datacenters and wims listed in the port mapping
+        belong to an specific tenant
+        """
+
+        # NOTE: Theoretically this could be done at SQL level, but given the
+        #       number of tables involved (wim_port_mappings, wim_accounts,
+        #       wims, wim_nfvo_tenants, datacenters, datacenter_tenants,
+        #       tenants_datacents and nfvo_tenants), it would result in a
+        #       extremely complex query. Moreover, the predicate can vary:
+        #       for `get_wim_port_mappings` we can have any combination of
+        #       (wim, datacenter, tenant), not all of them having the 3 values
+        #       so we have combinatorial trouble to write the 'FROM' statement.
+
+        kwargs = {'tenant': tenant, 'error_if_none': False}
+        # Cache results to speedup things
+        datacenters = {}
+        wims = {}
+
+        def _get_datacenter(uuid):
+            return (
+                datacenters.get(uuid) or
+                datacenters.setdefault(
+                    uuid, self.get_datacenters_by(uuid, **kwargs)))
+
+        def _get_wims(uuid):
+            return (wims.get(uuid) or
+                    wims.setdefault(uuid, self.get_wims(uuid, **kwargs)))
+
+        return [
+            mapping
+            for mapping in mappings
+            if (_get_datacenter(mapping['datacenter_id']) and
+                _get_wims(mapping['wim_id']))
+        ]
+
+    def get_wim_port_mappings(self, wim=None, datacenter=None, tenant=None,
+                              **kwargs):
+        """List all the port mappings, optionally filtering by wim, datacenter
+        AND/OR tenant
+        """
+        from_ = [_PORT_MAPPING,
+                 _PORT_MAPPING_JOIN_WIM if wim else '',
+                 _PORT_MAPPING_JOIN_DATACENTER if datacenter else '']
+
+        criteria = ('wim_id', 'datacenter_id')
+        kwargs.setdefault('error_if_none', False)
+        mappings = self.query(
+            ' '.join(from_),
+            SELECT=_PORT_MAPPING_SELECT,
+            ORDER_BY=['wim_port_mapping.{}'.format(c) for c in criteria],
+            wim=wim, datacenter=datacenter,
+            postprocess=_postprocess_wim_port_mapping,
+            **kwargs)
+
+        if tenant:
+            mappings = self._filter_port_mappings_by_tenant(mappings, tenant)
+
+        # We don't have to sort, since we have used 'ORDER_BY'
+        grouped_mappings = groupby(mappings, key=itemgetter(*criteria))
+
+        return [
+            {'wim_id': key[0],
+             'datacenter_id': key[1],
+             'wan_pop_port_mappings': [
+                 filter_out_dict_keys(mapping, (
+                     'id', 'wim_id', 'datacenter_id',
+                     'created_at', 'modified_at'))
+                 for mapping in group]}
+            for key, group in grouped_mappings
+        ]
+
+    def delete_wim_port_mappings(self, wim_id):
+        with self.lock:
+            self.db.delete_row(FROM='wim_port_mappings',
+                               WHERE={"wim_id": wim_id})
+        return "port mapping for wim {} deleted.".format(wim_id)
+
+    def update_wim_port_mapping(self, id, properties):
+        original = self.query_one('wim_port_mappings', WHERE={'id': id})
+
+        mapping_info = remove_none_items(merge_dicts(
+            original.get('wan_service_mapping_info') or {},
+            properties.get('wan_service_mapping_info') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(original, remove_none_items(properties),
+                        wan_service_mapping_info=mapping_info))
+
+        with self.lock:
+            num_changes = self.db.update_rows(
+                'wim_port_mappings', UPDATE=updates, WHERE={'id': id})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update wim_port_mappings %s:\n%s\n',
+                id, _serialize(properties))
+
+        return num_changes
+
+    def get_actions_in_groups(self, wim_account_id,
+                              item_types=('instance_wim_nets',),
+                              group_offset=0, group_limit=150):
+        """Retrieve actions from the database in groups.
+        Each group contains all the actions that have the same ``item`` type
+        and ``item_id``.
+
+        Arguments:
+            wim_account_id: restrict the search to actions to be performed
+                using the same account
+            item_types (list): [optional] filter the actions to the given
+                item types
+            group_limit (int): maximum number of groups returned by the
+                function
+            group_offset (int): skip the N first groups. Used together with
+                group_limit for pagination purposes.
+
+        Returns:
+            List of groups, where each group is a tuple ``(key, actions)``.
+            In turn, ``key`` is a tuple containing the values of
+            ``(item, item_id)`` used to create the group and ``actions`` is a
+            list of ``vim_wim_actions`` records (dicts).
+        """
+
+        type_options = set(
+            '"{}"'.format(self.db.escape_string(t)) for t in item_types)
+
+        items = ('SELECT DISTINCT a.item, a.item_id, a.wim_account_id '
+                 'FROM vim_wim_actions AS a '
+                 'WHERE a.wim_account_id="{}" AND a.item IN ({}) '
+                 'ORDER BY a.item, a.item_id '
+                 'LIMIT {:d},{:d}').format(
+                     self.safe_str(wim_account_id),
+                     ','.join(type_options),
+                     group_offset, group_limit
+                 )
+
+        join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
+        with self.lock:
+            db_results = self.db.get_rows(
+                FROM=join, ORDER_BY=('item', 'item_id', 'created_at'))
+
+        results = (_postprocess_action(r) for r in db_results)
+        criteria = itemgetter('item', 'item_id')
+        return [(k, list(g)) for k, g in groupby(results, key=criteria)]
+
+    def update_action(self, instance_action_id, task_index, properties):
+        condition = {'instance_action_id': instance_action_id,
+                     'task_index': task_index}
+        action = self.query_one('vim_wim_actions', WHERE=condition)
+
+        extra = remove_none_items(merge_dicts(
+            action.get('extra') or {},
+            properties.get('extra') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(action, properties, extra=extra))
+
+        with self.lock:
+            num_changes = self.db.update_rows('vim_wim_actions',
+                                              UPDATE=updates, WHERE=condition)
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update vim_wim_actions '
+                '{instance_action_id}[{task_index}]'.format(*action))
+
+        return num_changes
+
+    def get_wan_links(self, uuid=None, **kwargs):
+        """Retrieve WAN link records from the database
+
+        Keyword Arguments:
+            uuid, instance_scenario_id, sce_net_id, wim_id, wim_account_id:
+                attributes that can be used at the WHERE clause
+        """
+        kwargs.setdefault('uuid', uuid)
+        kwargs.setdefault('error_if_none', False)
+
+        criteria_fields = ('uuid', 'instance_scenario_id', 'sce_net_id',
+                           'wim_id', 'wim_account_id')
+        criteria = remove_none_items(filter_dict_keys(kwargs, criteria_fields))
+        kwargs = filter_out_dict_keys(kwargs, criteria_fields)
+
+        return self.query('instance_wim_nets', WHERE=criteria, **kwargs)
+
+    def update_wan_link(self, uuid, properties):
+        wan_link = self.get_by_uuid('instance_wim_nets', uuid)
+
+        wim_info = remove_none_items(merge_dicts(
+            wan_link.get('wim_info') or {},
+            properties.get('wim_info') or {}))
+
+        updates = preprocess_record(
+            merge_dicts(wan_link, properties, wim_info=wim_info))
+
+        self.logger.debug({'UPDATE': updates})
+        with self.lock:
+            num_changes = self.db.update_rows(
+                'instance_wim_nets', UPDATE=updates,
+                WHERE={'uuid': wan_link['uuid']})
+
+        if num_changes is None:
+            raise UnexpectedDatabaseError(
+                'Impossible to update instance_wim_nets ' + wan_link['uuid'])
+
+        return num_changes
+
+    def get_instance_nets(self, instance_scenario_id, sce_net_id, **kwargs):
+        """Retrieve all the instance nets related to the same instance_scenario
+        and scenario network
+        """
+        return self.query(
+            'instance_nets',
+            WHERE={'instance_scenario_id': instance_scenario_id,
+                   'sce_net_id': sce_net_id},
+            ORDER_BY=kwargs.pop(
+                'ORDER_BY', ('instance_scenario_id', 'sce_net_id')),
+            **kwargs)
+
+    def update_instance_action_counters(self, uuid, failed=None, done=None):
+        """Atomically increment/decrement number_done and number_failed fields
+        in the instance action table
+        """
+        changes = remove_none_items({
+            'number_failed': failed and {'INCREMENT': failed},
+            'number_done': done and {'INCREMENT': done}
+        })
+
+        if not changes:
+            return 0
+
+        with self.lock:
+            return self.db.update_rows('instance_actions',
+                                       WHERE={'uuid': uuid}, UPDATE=changes)
+
+    def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
+        """Return an instance VM if that is the only VM connected to an
+        external network identified by instance_net_id
+        """
+        counting = ('SELECT DISTINCT instance_net_id '
+                    'FROM instance_interfaces '
+                    'WHERE instance_net_id="{}" AND type="external" '
+                    'GROUP BY instance_net_id '
+                    'HAVING COUNT(*)=1').format(self.safe_str(instance_net_id))
+
+        vm_item = ('SELECT DISTINCT instance_vm_id '
+                   'FROM instance_interfaces NATURAL JOIN ({}) AS a'
+                   .format(counting))
+
+        return self.query_one(
+            'instance_vms JOIN ({}) as instance_interface '
+            'ON instance_vms.uuid=instance_interface.instance_vm_id'
+            .format(vm_item), **kwargs)
+
+    def safe_str(self, string):
+        """Return a SQL safe string"""
+        return self.db.escape_string(string)
+
+    def _generate_port_mapping_id(self, mapping_info):
+        """Given a port mapping represented by a dict with a 'type' field,
+        generate a unique string, in a injective way.
+        """
+        mapping_info = mapping_info.copy()  # Avoid mutating original object
+        mapping_type = mapping_info.pop('mapping_type', None)
+        if not mapping_type:
+            raise UndefinedWanMappingType(mapping_info)
+
+        unique_fields = UNIQUE_PORT_MAPPING_INFO_FIELDS.get(mapping_type)
+
+        if unique_fields:
+            mapping_info = filter_dict_keys(mapping_info, unique_fields)
+        else:
+            self.logger.warning('Unique fields for WIM port mapping of type '
+                                '%s not defined. Please add a list of fields '
+                                'which combination should be unique in '
+                                'UNIQUE_PORT_MAPPING_INFO_FIELDS '
+                                '(`wim/persistency.py) ', mapping_type)
+
+        repeatable_repr = json.dumps(mapping_info, encoding='utf-8',
+                                     sort_keys=True, indent=False)
+
+        return ':'.join([mapping_type, _str2id(repeatable_repr)])
+
+
+def _serialize(value):
+    """Serialize an arbitrary value in a consistent way,
+    so it can be stored in a database inside a text field
+    """
+    return yaml.safe_dump(value, default_flow_style=True, width=256)
+
+
+def _unserialize(text):
+    """Unserialize text representation into an arbitrary value,
+    so it can be loaded from the database
+    """
+    return yaml.safe_load(text)
+
+
+def preprocess_record(record):
+    """Small transformations to be applied to the data that cames from the
+    user before writing it to the database. By default, filter out timestamps,
+    and serialize the ``config`` field.
+    """
+    automatic_fields = ['created_at', 'modified_at']
+    record = serialize_fields(filter_out_dict_keys(record, automatic_fields))
+
+    return record
+
+
+def _preprocess_wim_account(wim_account):
+    """Do the default preprocessing and convert the 'created' field from
+    boolean to string
+    """
+    wim_account = preprocess_record(wim_account)
+
+    created = wim_account.get('created')
+    wim_account['created'] = (
+        'true' if created is True or created == 'true' else 'false')
+
+    return wim_account
+
+
+def _postprocess_record(record, hide=_CONFIDENTIAL_FIELDS):
+    """By default, hide passwords fields, unserialize ``config`` fields, and
+    convert float timestamps to strings
+    """
+    record = hide_confidential_fields(record, hide)
+    record = unserialize_fields(record, hide)
+
+    convert_float_timestamp2str(record)
+
+    return record
+
+
+def _postprocess_action(action):
+    if action.get('extra'):
+        action['extra'] = _unserialize(action['extra'])
+
+    return action
+
+
+def _postprocess_wim_account(wim_account, hide=_CONFIDENTIAL_FIELDS):
+    """Do the default postprocessing and convert the 'created' field from
+    string to boolean
+    """
+    # Fix fields from join
+    for field in ('type', 'description', 'wim_url'):
+        if field in wim_account:
+            wim_account['wim.'+field] = wim_account.pop(field)
+
+    for field in ('id', 'nfvo_tenant_id', 'wim_account_id'):
+        if field in wim_account:
+            wim_account['association.'+field] = wim_account.pop(field)
+
+    wim_account = _postprocess_record(wim_account, hide)
+
+    created = wim_account.get('created')
+    wim_account['created'] = (created is True or created == 'true')
+
+    return wim_account
+
+
+def _postprocess_wim_port_mapping(mapping, hide=_CONFIDENTIAL_FIELDS):
+    mapping = _postprocess_record(mapping, hide=hide)
+    mapping_info = mapping.get('wan_service_mapping_info', None) or {}
+    mapping['wan_service_mapping_info'] = mapping_info
+    return mapping
+
+
+def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):
+    """Obfuscate confidential fields from the input dict.
+
+    Note:
+        This function performs a SHALLOW operation.
+    """
+    if not(isinstance(record, dict) and fields):
+        return record
+
+    keys = record.iterkeys()
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {k: '********' for k in keys if record[k]})
+
+
+def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,
+                       fields=_SERIALIZED_FIELDS):
+    """Unserialize fields that where stored in the database as a serialized
+    YAML (or JSON)
+    """
+    keys = record.iterkeys()
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {
+        key: hide_confidential_fields(_unserialize(record[key]), hide)
+        for key in keys if record[key]
+    })
+
+
+def serialize_fields(record, fields=_SERIALIZED_FIELDS):
+    """Serialize fields to be stored in the database as YAML"""
+    keys = record.iterkeys()
+    keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))
+
+    return merge_dicts(record, {
+        key: _serialize(record[key])
+        for key in keys if record[key] is not None
+    })
+
+
+def _decide_name_or_uuid(value):
+    reference = value
+
+    if isinstance(value, (list, tuple)):
+        reference = value[0] if value else ''
+
+    return 'uuid' if check_valid_uuid(reference) else 'name'
+
+
+def _compose_where_from_uuids_or_names(**conditions):
+    """Create a dict containing the right conditions to be used in a database
+    query.
+
+    This function chooses between ``names`` and ``uuid`` fields based on the
+    format of the passed string.
+    If a list is passed, the first element of the list will be used to choose
+    the name of the field.
+    If a ``None`` value is passed, ``uuid`` is used.
+
+    Note that this function automatically translates ``tenant`` to
+    ``nfvo_tenant`` for the sake of brevity.
+
+    Example:
+        >>> _compose_where_from_uuids_or_names(
+                wim='abcdef',
+                tenant=['xyz123', 'def456']
+                datacenter='5286a274-8a1b-4b8d-a667-9c94261ad855')
+        {'wim.name': 'abcdef',
+         'nfvo_tenant.name': ['xyz123', 'def456']
+         'datacenter.uuid': '5286a274-8a1b-4b8d-a667-9c94261ad855'}
+    """
+    if 'tenant' in conditions:
+        conditions['nfvo_tenant'] = conditions.pop('tenant')
+
+    return {
+        '{}.{}'.format(kind, _decide_name_or_uuid(value)): value
+        for kind, value in conditions.items() if value
+    }
+
+
+def _str2id(text):
+    """Create an ID (following the UUID format) from a piece of arbitrary
+    text.
+
+    Different texts should generate different IDs, and the same text should
+    generate the same ID in a repeatable way.
+    """
+    return sha1(text).hexdigest()
diff --git a/osm_ro/wim/schemas.py b/osm_ro/wim/schemas.py
new file mode 100644 (file)
index 0000000..a040405
--- /dev/null
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from ..openmano_schemas import (
+    description_schema,
+    name_schema,
+    nameshort_schema
+)
+
+# WIM -------------------------------------------------------------------------
+wim_types = ["tapi", "onos", "odl"]
+
+wim_schema_properties = {
+    "name": name_schema,
+    "description": description_schema,
+    "type": {
+        "type": "string",
+        "enum": ["tapi", "onos", "odl"]
+    },
+    "wim_url": description_schema,
+    "config": {"type": "object"}
+}
+
+wim_schema = {
+    "title": "wim information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim": {
+            "type": "object",
+            "properties": wim_schema_properties,
+            "required": ["name", "type", "wim_url"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["wim"],
+    "additionalProperties": False
+}
+
+wim_edit_schema = {
+    "title": "wim edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim": {
+            "type": "object",
+            "properties": wim_schema_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["wim"],
+    "additionalProperties": False
+}
+
+wim_account_schema = {
+    "title": "wim account information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type": "object",
+    "properties": {
+        "wim_account": {
+            "type": "object",
+            "properties": {
+                "name": name_schema,
+                "user": nameshort_schema,
+                "password": nameshort_schema,
+                "config": {"type": "object"}
+            },
+            "additionalProperties": True
+        }
+    },
+    "required": ["wim_account"],
+    "additionalProperties": False
+}
+
+dpid_type = {
+    "type": "string",
+    "pattern":
+        "^[0-9a-zA-Z]+(:[0-9a-zA-Z]+)*$"
+}
+
+port_type = {
+    "oneOf": [
+        {"type": "string",
+         "minLength": 1,
+         "maxLength": 5},
+        {"type": "integer",
+         "minimum": 1,
+         "maximum": 65534}
+    ]
+}
+
+wim_port_mapping_schema = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "title": "wim mapping information schema",
+    "type": "object",
+    "properties": {
+        "wim_port_mapping": {
+            "type": "array",
+            "items": {
+                "type": "object",
+                "properties": {
+                    "datacenter_name": nameshort_schema,
+                    "pop_wan_mappings": {
+                        "type": "array",
+                        "items": {
+                            "type": "object",
+                            "properties": {
+                                "pop_switch_dpid": dpid_type,
+                                "pop_switch_port": port_type,
+                                "wan_service_endpoint_id": name_schema,
+                                "wan_service_mapping_info": {
+                                    "type": "object",
+                                    "properties": {
+                                        "mapping_type": name_schema,
+                                        "wan_switch_dpid": dpid_type,
+                                        "wan_switch_port": port_type
+                                    },
+                                    "additionalProperties": True,
+                                    "required": ["mapping_type"]
+                                }
+                            },
+                            "oneOf": [
+                                {
+                                    "required": [
+                                        "pop_switch_dpid",
+                                        "pop_switch_port",
+                                        "wan_service_endpoint_id"
+                                    ]
+                                },
+                                {
+                                    "required": [
+                                        "pop_switch_dpid",
+                                        "pop_switch_port",
+                                        "wan_service_mapping_info"
+                                    ]
+                                }
+                            ]
+                        }
+                    }
+                },
+                "required": ["datacenter_name", "pop_wan_mappings"]
+            }
+        }
+    },
+    "required": ["wim_port_mapping"]
+}
diff --git a/osm_ro/wim/tests/__init__.py b/osm_ro/wim/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/osm_ro/wim/tests/fixtures.py b/osm_ro/wim/tests/fixtures.py
new file mode 100644 (file)
index 0000000..1b52e49
--- /dev/null
@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=W0621
+
+from __future__ import unicode_literals
+
+import json
+from time import time
+
+from six.moves import range
+
+from ...tests.db_helpers import uuid, sha1
+
+NUM_WIMS = 3
+NUM_TENANTS = 2
+NUM_DATACENTERS = 2
+
+
+# In the following functions, the identifiers should be simple integers
+
+
+def wim(identifier=0):
+    return {'name': 'wim%d' % identifier,
+            'uuid': uuid('wim%d' % identifier),
+            'wim_url': 'localhost',
+            'type': 'tapi'}
+
+
+def tenant(identifier=0):
+    return {'name': 'tenant%d' % identifier,
+            'uuid': uuid('tenant%d' % identifier)}
+
+
+def wim_account(wim, tenant):
+    return {'name': 'wim-account%d%d' % (tenant, wim),
+            'uuid': uuid('wim-account%d%d' % (tenant, wim)),
+            'user': 'user%d%d' % (tenant, wim),
+            'password': 'password%d%d' % (tenant, wim),
+            'wim_id': uuid('wim%d' % wim),
+            'created': 'true'}
+
+
+def wim_tenant_association(wim, tenant):
+    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+            'wim_id': uuid('wim%d' % wim),
+            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim))}
+
+
+def wim_set(identifier=0, tenant=0):
+    """Records necessary to create a WIM and connect it to a tenant"""
+    return [
+        {'wims': [wim(identifier)]},
+        {'wim_accounts': [wim_account(identifier, tenant)]},
+        {'wim_nfvo_tenants': [wim_tenant_association(identifier, tenant)]}
+    ]
+
+
+def datacenter(identifier):
+    return {'uuid': uuid('dc%d' % identifier),
+            'name': 'dc%d' % identifier,
+            'type': 'openvim',
+            'vim_url': 'localhost'}
+
+
+def datacenter_account(datacenter, tenant):
+    return {'name': 'dc-account%d%d' % (tenant, datacenter),
+            'uuid': uuid('dc-account%d%d' % (tenant, datacenter)),
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'created': 'true'}
+
+
+def datacenter_tenant_association(datacenter, tenant):
+    return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
+            'datacenter_id':  uuid('dc%d' % datacenter),
+            'datacenter_tenant_id':
+                uuid('dc-account%d%d' % (tenant, datacenter))}
+
+
+def datacenter_set(identifier, tenant):
+    """Records necessary to create a datacenter and connect it to a tenant"""
+    return [
+        {'datacenters': [datacenter(identifier)]},
+        {'datacenter_tenants': [datacenter_account(identifier, tenant)]},
+        {'tenants_datacenters': [
+            datacenter_tenant_association(identifier, tenant)
+        ]}
+    ]
+
+
+def wim_port_mapping(wim, datacenter,
+                     pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA', pop_port=0,
+                     wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB', wan_port=0):
+    mapping_info = {'mapping_type': 'dpid-port',
+                    'wan_switch_dpid': wan_dpid,
+                    'wan_switch_port': wan_port + datacenter + 1}
+    id_ = 'dpid-port|' + sha1(json.dumps(mapping_info, sort_keys=True))
+
+    return {'wim_id': uuid('wim%d' % wim),
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'pop_switch_dpid': pop_dpid,
+            'pop_switch_port': pop_port + wim + 1,
+            # ^  Datacenter router have one port managed by each WIM
+            'wan_service_endpoint_id': id_,
+            # ^  WIM managed router have one port connected to each DC
+            'wan_service_mapping_info': json.dumps(mapping_info)}
+
+
+def processed_port_mapping(wim, datacenter,
+                           num_pairs=1,
+                           pop_dpid='AA:AA:AA:AA:AA:AA:AA:AA',
+                           wan_dpid='BB:BB:BB:BB:BB:BB:BB:BB'):
+    """Emulate the response of the Persistence class, where the records in the
+    data base are grouped by wim and datacenter
+    """
+    return {
+        'wim_id': uuid('wim%d' % wim),
+        'datacenter_id': uuid('dc%d' % datacenter),
+        'wan_pop_port_mappings': [
+            {'pop_switch_dpid': pop_dpid,
+             'pop_switch_port': wim + 1 + i,
+             'wan_service_endpoint_id':
+                 sha1('dpid-port|%s|%d' % (wan_dpid, datacenter + 1 + i)),
+             'wan_service_mapping_info': {
+                 'mapping_type': 'dpid-port',
+                 'wan_switch_dpid': wan_dpid,
+                 'wan_switch_port': datacenter + 1 + i}}
+            for i in range(num_pairs)
+        ]
+    }
+
+
+def consistent_set(num_wims=NUM_WIMS, num_tenants=NUM_TENANTS,
+                   num_datacenters=NUM_DATACENTERS):
+    return [
+        {'nfvo_tenants': [tenant(i) for i in range(num_tenants)]},
+        {'wims': [wim(j) for j in range(num_wims)]},
+        {'wim_accounts': [
+            wim_account(j, i)
+            for i in range(num_tenants)
+            for j in range(num_wims)
+        ]},
+        {'wim_nfvo_tenants': [
+            wim_tenant_association(j, i)
+            for i in range(num_tenants)
+            for j in range(num_wims)
+        ]},
+        {'datacenters': [
+            datacenter(k)
+            for k in range(num_datacenters)
+        ]},
+        {'datacenter_tenants': [
+            datacenter_account(k, i)
+            for i in range(num_tenants)
+            for k in range(num_datacenters)
+        ]},
+        {'tenants_datacenters': [
+            datacenter_tenant_association(k, i)
+            for i in range(num_tenants)
+            for k in range(num_datacenters)
+        ]},
+        {'wim_port_mappings': [
+            wim_port_mapping(j, k)
+            for j in range(num_wims)
+            for k in range(num_datacenters)
+        ]},
+    ]
+
+
+def instance_nets(num_datacenters=2, num_links=2):
+    """Example of multi-site deploy with N datacenters and M WAN links between
+    them (e.g M = 2 -> back and forth)
+    """
+    return [
+        {'uuid': uuid('net%d%d' % (k, l)),
+         'datacenter_id': uuid('dc%d' % k),
+         'datacenter_tenant_id': uuid('dc-account0%d' % k),
+         'instance_scenario_id': uuid('nsr0'),
+         # ^  instance_scenario_id == NS Record id
+         'sce_net_id': uuid('vld%d' % l),
+         # ^  scenario net id == VLD id
+         'status': 'BUILD',
+         'vim_net_id': None,
+         'created': True}
+        for k in range(num_datacenters)
+        for l in range(num_links)
+    ]
+
+
+def wim_actions(action='CREATE', status='SCHEDULED',
+                action_id=None, instance=0,
+                wim=0, tenant=0, num_links=1):
+    """Create a list of actions for the WIM,
+
+    Arguments:
+        action: type of action (CREATE) by default
+        wim: WIM fixture index to create actions for
+        tenant: tenant fixture index to create actions for
+        num_links: number of WAN links to be established by each WIM
+    """
+
+    action_id = action_id or 'ACTION-{}'.format(time())
+
+    return [
+        {
+            'action': action,
+            'wim_internal_id': uuid('-wim-net%d%d%d' % (wim, instance, link)),
+            'wim_account_id': uuid('wim-account%d%d' % (tenant, wim)),
+            'instance_action_id': action_id,
+            'item': 'instance_wim_nets',
+            'item_id': uuid('wim-net%d%d%d' % (wim, instance, link)),
+            'status': status,
+            'task_index': link,
+            'created_at': time(),
+            'modified_at': time(),
+            'extra': None
+        }
+        for link in range(num_links)
+    ]
+
+
+def instance_action(tenant=0, instance=0, action_id=None,
+                    num_tasks=1, num_done=0, num_failed=0):
+    action_id = action_id or 'ACTION-{}'.format(time())
+
+    return {
+        'uuid': action_id,
+        'tenant_id': uuid('tenant%d' % tenant),
+        'instance_id': uuid('nsr%d' % instance),
+        'number_tasks': num_tasks,
+        'number_done': num_done,
+        'number_failed': num_failed,
+    }
+
+
+def instance_wim_nets(instance=0, wim=0, num_links=1,
+                      status='SCHEDULED_CREATION'):
+    """Example of multi-site deploy with N wims and M WAN links between
+    them (e.g M = 2 -> back and forth)
+    VIM nets
+    """
+    return [
+        {'uuid': uuid('wim-net%d%d%d' % (wim, instance, l)),
+         'wim_id': uuid('wim%d' % wim),
+         'wim_account_id': uuid('wim-account%d' % wim),
+         'wim_internal_id': uuid('-net%d%d' % (wim, l)),
+         'instance_scenario_id': uuid('nsr%d' % instance),
+         # ^  instance_scenario_id == NS Record id
+         'sce_net_id': uuid('vld%d' % l),
+         # ^  scenario net id == VLD id
+         'status': status,
+         'created': False}
+        for l in range(num_links)
+    ]
+
+
+def instance_vm(instance=0, vim_info=None):
+    vim_info = {'OS-EXT-SRV-ATTR:hypervisor_hostname': 'host%d' % instance}
+    return {
+        'uuid': uuid('vm%d' % instance),
+        'instance_vnf_id': uuid('vnf%d' % instance),
+        'vm_id': uuid('vm%d' % instance),
+        'vim_vm_id': uuid('vm%d' % instance),
+        'status': 'ACTIVE',
+        'vim_info': vim_info,
+    }
+
+
+def instance_interface(instance=0, interface=0, datacenter=0, link=0):
+    return {
+        'uuid': uuid('interface%d%d' % (instance, interface)),
+        'instance_vm_id': uuid('vm%d' % instance),
+        'instance_net_id': uuid('net%d%d' % (datacenter, link)),
+        'interface_id': uuid('iface%d' % interface),
+        'type': 'external',
+        'vlan': 3
+    }
diff --git a/osm_ro/wim/tests/test_actions.py b/osm_ro/wim/tests/test_actions.py
new file mode 100644 (file)
index 0000000..920182b
--- /dev/null
@@ -0,0 +1,366 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101
+
+from __future__ import unicode_literals, print_function
+
+import json
+import unittest
+from time import time
+
+from mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid,
+)
+from ..persistence import WimPersistence
+from ..wan_link_actions import WanLinkCreate, WanLinkDelete
+from ..wimconn import WimConnectorError
+
+
+class TestActionsWithDb(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestActionsWithDb, self).setUp()
+        self.persist = WimPersistence(self.db)
+        self.connector = MagicMock()
+        self.ovim = MagicMock()
+
+
+class TestCreate(TestActionsWithDb):
+    @disable_foreign_keys
+    def test_process__instance_nets_on_build(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is still being built
+        wan_link = eg.instance_wim_nets()
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        for net in instance_nets:
+            net['status'] = 'BUILD'
+        self.populate([{'instance_nets': instance_nets,
+                        'instance_wim_nets': wan_link}])
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        now = time()
+        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+        action.sce_net_id = instance_nets[0]['sce_net_id']
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be defered
+        assert action.is_scheduled
+        self.assertEqual(action.extra['attempts'], 1)
+        self.assertGreater(action.extra['last_attempted_at'], now)
+
+    @disable_foreign_keys
+    def test_process__instance_nets_on_error(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and at least one local network is in a not good state (error, or
+        # being deleted)
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        instance_nets[1]['status'] = 'SCHEDULED_DELETION'
+        wan_link = eg.instance_wim_nets()
+        self.populate([{'instance_nets': instance_nets,
+                        'instance_wim_nets': wan_link}])
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        action = WanLinkCreate(eg.wim_actions('CREATE')[0])
+        action.instance_scenario_id = instance_nets[0]['instance_scenario_id']
+        action.sce_net_id = instance_nets[0]['sce_net_id']
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should fail
+        assert action.is_failed
+        self.assertIn('issue with the local networks', action.error_msg)
+        self.assertIn('SCHEDULED_DELETION', action.error_msg)
+
+    def prepare_create__sdn(self):
+        db_state = [{'nfvo_tenants': eg.tenant()}] + eg.wim_set()
+
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        port_mappings = [
+            eg.wim_port_mapping(0, 0),
+            eg.wim_port_mapping(0, 1)
+        ]
+        instance_action = eg.instance_action(action_id='ACTION-000')
+        for i, net in enumerate(instance_nets):
+            net['status'] = 'ACTIVE'
+            net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+        db_state += [{'instance_nets': instance_nets},
+                     {'instance_wim_nets': eg.instance_wim_nets()},
+                     {'wim_port_mappings': port_mappings},
+                     {'instance_actions': instance_action}]
+
+        action = WanLinkCreate(
+            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+
+        return db_state, action
+
+    @disable_foreign_keys
+    def test_process__sdn(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action = self.prepare_create__sdn()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'create_connectivity_service',
+            lambda *_, **__: (uuid('random-id'), None))
+
+        ovim_patch = patch.object(
+            self.ovim, 'get_ports', MagicMock(return_value=[{
+                'switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
+                'switch_port': 1,
+            }]))
+
+        # If the connector works fine
+        with connector_patch, ovim_patch:
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be succeeded
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done + 1)
+        self.assertEqual(instance_action['number_failed'], number_failed)
+
+    @disable_foreign_keys
+    def test_process__sdn_fail(self):
+        # Given we want 1 WAN link between 2 datacenters
+        # and the local network in each datacenter is already created
+        db_state, action = self.prepare_create__sdn()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'create_connectivity_service',
+            MagicMock(side_effect=WimConnectorError('foobar')))
+
+        ovim_patch = patch.object(
+            self.ovim, 'get_ports', MagicMock(return_value=[{
+                'switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
+                'switch_port': 1,
+            }]))
+
+        # If the connector throws an error
+        with connector_patch, ovim_patch:
+            # When we try to process a CREATE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be fail
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'FAILED')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done)
+        self.assertEqual(instance_action['number_failed'], number_failed + 1)
+
+
+class TestDelete(TestActionsWithDb):
+    @disable_foreign_keys
+    def test_process__no_internal_id(self):
+        # Given no WAN link was created yet,
+        # when we try to process a DELETE action, with no wim_internal_id
+        action = WanLinkDelete(eg.wim_actions('DELETE')[0])
+        action.wim_internal_id = None
+        # -- ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record,
+                        'instance_wim_nets': eg.instance_wim_nets()}])
+        # <-- #
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should succeed
+        assert action.is_done
+
+    def prepare_delete(self):
+        db_state = [{'nfvo_tenants': eg.tenant()}] + eg.wim_set()
+
+        instance_nets = eg.instance_nets(num_datacenters=2, num_links=1)
+        port_mappings = [
+            eg.wim_port_mapping(0, 0),
+            eg.wim_port_mapping(0, 1)
+        ]
+        instance_action = eg.instance_action(action_id='ACTION-000')
+        for i, net in enumerate(instance_nets):
+            net['status'] = 'ACTIVE'
+            net['sdn_net_id'] = uuid('sdn-net%d' % i)
+
+        db_state += [{'instance_nets': instance_nets},
+                     {'instance_wim_nets': eg.instance_wim_nets()},
+                     {'wim_port_mappings': port_mappings},
+                     {'instance_actions': instance_action}]
+
+        action = WanLinkDelete(
+            eg.wim_actions('DELETE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+
+        return db_state, action
+
+    @disable_foreign_keys
+    def test_process(self):
+        # Given we want to delete 1 WAN link between 2 datacenters
+        db_state, action = self.prepare_delete()
+        self.populate(db_state)
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        number_done = instance_action['number_done']
+        number_failed = instance_action['number_failed']
+
+        connector_patch = patch.object(
+            self.connector, 'delete_connectivity_service')
+
+        # If the connector works fine
+        with connector_patch:
+            # When we try to process a DELETE action that refers to the same
+            # instance_scenario_id and sce_net_id
+            action.process(self.connector, self.persist, self.ovim)
+
+        # Then the action should be succeeded
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': action.instance_action_id,
+            'task_index': action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+        instance_action = self.persist.get_by_uuid(
+            'instance_actions', action.instance_action_id)
+        self.assertEqual(instance_action['number_done'], number_done + 1)
+        self.assertEqual(instance_action['number_failed'], number_failed)
+
+    @disable_foreign_keys
+    def test_process__wan_link_error(self):
+        # Given we have a delete action that targets a wan link with an error
+        db_state, action = self.prepare_delete()
+        wan_link = [tables for tables in db_state
+                    if tables.get('instance_wim_nets')][0]['instance_wim_nets']
+        from pprint import pprint
+        pprint(wan_link)
+        wan_link[0]['status'] = 'ERROR'
+        self.populate(db_state)
+
+        # When we try to process it
+        action.process(self.connector, self.persist, self.ovim)
+
+        # Then it should fail
+        assert action.is_failed
+
+    def create_action(self):
+        action = WanLinkCreate(
+            eg.wim_actions('CREATE', action_id='ACTION-000')[0])
+        # --> ensure it is in the database for updates --> #
+        action_record = action.as_record()
+        action_record['extra'] = json.dumps(action_record['extra'])
+        self.populate([{'vim_wim_actions': action_record}])
+
+        return action
+
+    @disable_foreign_keys
+    def test_create_and_delete(self):
+        # Given a CREATE action was well succeeded
+        db_state, delete_action = self.prepare_delete()
+        delete_action.save(self.persist, task_index=1)
+        self.populate(db_state)
+        create_action = self.create_action()
+
+        connector_patch = patch.multiple(
+            self.connector,
+            delete_connectivity_service=MagicMock(),
+            create_connectivity_service=(
+                lambda *_, **__: (uuid('random-id'), None)))
+
+        ovim_patch = patch.object(
+            self.ovim, 'get_ports', MagicMock(return_value=[{
+                'switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
+                'switch_port': 1,
+            }]))
+
+        with connector_patch, ovim_patch:
+            create_action.process(self.connector, self.persist, self.ovim)
+
+        # When we try to process a CREATE action that refers to the same
+        # instance_scenario_id and sce_net_id
+        with connector_patch:
+            delete_action.process(self.connector, self.persist, self.ovim)
+
+        # Then the DELETE action should be successful
+        db_action = self.persist.query_one('vim_wim_actions', WHERE={
+            'instance_action_id': delete_action.instance_action_id,
+            'task_index': delete_action.task_index})
+        self.assertEqual(db_action['status'], 'DONE')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/wim/tests/test_engine.py b/osm_ro/wim/tests/test_engine.py
new file mode 100644 (file)
index 0000000..6fb2d8c
--- /dev/null
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from __future__ import unicode_literals
+
+import unittest
+
+from mock import MagicMock
+
+from . import fixtures as eg
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ..errors import NoWimConnectedToDatacenters
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+
+
+class TestWimEngineDbMethods(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimEngineDbMethods, self).setUp()
+        self.persist = WimPersistence(self.db)
+        self.engine = WimEngine(persistence=self.persist)
+        self.addCleanup(self.engine.stop_threads)
+
+    def populate(self, seeds=None):
+        super(TestWimEngineDbMethods, self).populate(
+            seeds or eg.consistent_set())
+
+    def test_find_common_wims(self):
+        # Given we have 2 WIM, 3 datacenters, but just 1 of the WIMs have
+        # access to them
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.wim_set(1, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1),
+                          eg.wim_port_mapping(0, 2)]}])
+
+        # When we retrieve the wims interconnecting some datacenters
+        wim_ids = self.engine.find_common_wims(
+            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+        # Then we should have just the first wim
+        self.assertEqual(len(wim_ids), 1)
+        self.assertEqual(wim_ids[0], uuid('wim0'))
+
+    def test_find_common_wims_multiple(self):
+        # Given we have 2 WIM, 3 datacenters, and all the WIMs have access to
+        # all datacenters
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.wim_set(1, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1),
+                          eg.wim_port_mapping(0, 2),
+                          eg.wim_port_mapping(1, 0),
+                          eg.wim_port_mapping(1, 1),
+                          eg.wim_port_mapping(1, 2)]}])
+
+        # When we retrieve the wims interconnecting tree datacenters
+        wim_ids = self.engine.find_common_wims(
+            [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+        # Then we should have all the wims
+        self.assertEqual(len(wim_ids), 2)
+        self.assertItemsEqual(wim_ids, [uuid('wim0'), uuid('wim1')])
+
+    def test_find_common_wim(self):
+        # Given we have 1 WIM, 3 datacenters but the WIM have access to just 2
+        # of them
+        self.populate([{'nfvo_tenants': [eg.tenant(0)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      eg.datacenter_set(2, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1)]}])
+
+        # When we retrieve the common wim for the 2 datacenter that are
+        # interconnected
+        wim_id = self.engine.find_common_wim(
+            [uuid('dc0'), uuid('dc1')], tenant='tenant0')
+
+        # Then we should find the wim
+        self.assertEqual(wim_id, uuid('wim0'))
+
+        # When we try to retrieve the common wim for the all the datacenters
+        # Then a NoWimConnectedToDatacenters exception should be raised
+        with self.assertRaises(NoWimConnectedToDatacenters):
+            self.engine.find_common_wim(
+                [uuid('dc0'), uuid('dc1'), uuid('dc2')], tenant='tenant0')
+
+    def test_find_common_wim__different_tenants(self):
+        # Given we have 1 WIM and 2 datacenters connected but the WIMs don't
+        # belong to the tenant we have access to...
+        self.populate([{'nfvo_tenants': [eg.tenant(0), eg.tenant(1)]}] +
+                      eg.wim_set(0, 0) +
+                      eg.datacenter_set(0, 0) +
+                      eg.datacenter_set(1, 0) +
+                      [{'wim_port_mappings': [
+                          eg.wim_port_mapping(0, 0),
+                          eg.wim_port_mapping(0, 1)]}])
+
+        # When we retrieve the common wim for the 2 datacenter that are
+        # interconnected, but using another tenant,
+        # Then we should get an exception
+        with self.assertRaises(NoWimConnectedToDatacenters):
+            self.engine.find_common_wim(
+                [uuid('dc0'), uuid('dc1')], tenant='tenant1')
+
+
+class TestWimEngine(unittest.TestCase):
+    def test_derive_wan_link(self):
+        # Given we have 2 datacenters connected by the same WIM, with port
+        # mappings registered
+        mappings = [eg.processed_port_mapping(0, 0),
+                    eg.processed_port_mapping(0, 1)]
+        persist = MagicMock(
+            get_wim_port_mappings=MagicMock(return_value=mappings))
+
+        engine = WimEngine(persistence=persist)
+        self.addCleanup(engine.stop_threads)
+
+        # When we receive a list of 4 instance nets, representing
+        # 2 VLDs connecting 2 datacenters each
+        instance_nets = eg.instance_nets(2, 2)
+        wan_links = engine.derive_wan_links(
+            instance_nets, uuid('tenant0'))
+
+        # Then we should derive 2 wan_links with the same instance_scenario_id
+        # and different scenario_network_id
+        self.assertEqual(len(wan_links), 2)
+        for link in wan_links:
+            self.assertEqual(link['instance_scenario_id'], uuid('nsr0'))
+        # Each VLD needs a network to be created in each datacenter
+        self.assertItemsEqual([l['sce_net_id'] for l in wan_links],
+                              [uuid('vld0'), uuid('vld1')])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/wim/tests/test_http_handler.py b/osm_ro/wim/tests/test_http_handler.py
new file mode 100644 (file)
index 0000000..04577e4
--- /dev/null
@@ -0,0 +1,508 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from __future__ import unicode_literals
+
+import unittest
+
+import bottle
+from mock import MagicMock, patch
+from webtest import TestApp
+
+from . import fixtures as eg  # "examples"
+from ...http_tools.errors import Conflict, Not_Found
+from ...tests.db_helpers import TestCaseWithDatabasePerTest, uuid
+from ...utils import merge_dicts
+from ..http_handler import WimHandler
+
+OK = 200
+
+
+@patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())  # Avoid external calls
+@patch('osm_ro.wim.wim_thread.WimThread.start', MagicMock())  # Avoid running
+class TestHttpHandler(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestHttpHandler, self).setUp()
+        bottle.debug(True)
+        handler = WimHandler(db=self.db)
+        self.engine = handler.engine
+        self.addCleanup(self.engine.stop_threads)
+        self.app = TestApp(handler.wsgi_app)
+
+    def populate(self, seeds=None):
+        super(TestHttpHandler, self).populate(seeds or eg.consistent_set())
+
+    def test_list_wims(self):
+        # Given some wims are registered in the database
+        self.populate()
+        # when a GET /<tenant_id>/wims request arrives
+        tenant_id = uuid('tenant0')
+        response = self.app.get('/{}/wims'.format(tenant_id))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and all the registered wims should be present
+        retrieved_wims = {v['name']: v for v in response.json['wims']}
+        for name in retrieved_wims:
+            identifier = int(name.replace('wim', ''))
+            self.assertDictContainsSubset(
+                eg.wim(identifier), retrieved_wims[name])
+
+    def test_show_wim(self):
+        # Given some wims are registered in the database
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant0')
+        wim_id = uuid('wim1')
+        response = self.app.get('/{}/wims/{}'.format(tenant_id, wim_id))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the registered wim (wim1) should be present
+        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+        # Moreover, it also works with tenant_id =  all
+        response = self.app.get('/any/wims/{}'.format(wim_id))
+        self.assertEqual(response.status_code, OK)
+        self.assertDictContainsSubset(eg.wim(1), response.json['wim'])
+
+    def test_show_wim__wim_doesnt_exists(self):
+        # Given wim_id does not refer to any already registered wim
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant0')
+        wim_id = uuid('wim999')
+        response = self.app.get(
+            '/{}/wims/{}'.format(tenant_id, wim_id),
+            expect_errors=True)
+
+        # then the result should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+
+    def test_show_wim__tenant_doesnt_exists(self):
+        # Given wim_id does not refer to any already registered wim
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id> request arrives
+        tenant_id = uuid('tenant999')
+        wim_id = uuid('wim0')
+        response = self.app.get(
+            '/{}/wims/{}'.format(tenant_id, wim_id),
+            expect_errors=True)
+
+        # then the result should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+
+    def test_edit_wim(self):
+        # Given a WIM exists in the database
+        self.populate()
+        # when a PUT /wims/<wim_id> request arrives
+        wim_id = uuid('wim1')
+        response = self.app.put_json('/wims/{}'.format(wim_id), {
+            'wim': {'name': 'My-New-Name'}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the registered wim (wim1) should be present
+        self.assertDictContainsSubset(
+            merge_dicts(eg.wim(1), name='My-New-Name'),
+            response.json['wim'])
+
+    def test_delete_wim(self):
+        # Given a WIM exists in the database
+        self.populate()
+        num_accounts = self.count('wim_accounts')
+        num_associations = self.count('wim_nfvo_tenants')
+        num_mappings = self.count('wim_port_mappings')
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+            # when a DELETE /wims/<wim_id> request arrives
+            wim_id = uuid('wim1')
+            response = self.app.delete('/wims/{}'.format(wim_id))
+            num_threads_after = len(self.engine.threads)
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertIn('deleted', response.json['result'])
+        # and the registered wim1 should be deleted
+        response = self.app.get(
+            '/any/wims/{}'.format(wim_id),
+            expect_errors=True)
+        self.assertEqual(response.status_code, Not_Found)
+        # and all the dependent records in other tables should be deleted:
+        # wim_accounts, wim_nfvo_tenants, wim_port_mappings
+        self.assertEqual(self.count('wim_nfvo_tenants'),
+                         num_associations - eg.NUM_TENANTS)
+        self.assertLess(self.count('wim_port_mappings'), num_mappings)
+        self.assertEqual(self.count('wim_accounts'),
+                         num_accounts - eg.NUM_TENANTS)
+        # And the threads associated with the wim accounts should be stopped
+        self.assertEqual(num_threads_after, num_threads - eg.NUM_TENANTS)
+
+    def test_create_wim(self):
+        # Given no WIM exists yet
+        # when a POST /wims request arrives with the right payload
+        response = self.app.post_json('/wims', {'wim': eg.wim(999)})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim']['name'], 'wim999')
+
+    def test_create_wim_account(self):
+        # Given a WIM and a NFVO tenant exist but are not associated
+        self.populate([{'wims': [eg.wim(0)]},
+                       {'nfvo_tenants': [eg.tenant(0)]}])
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+            # when a POST /<tenant_id>/wims/<wim_id> arrives
+            response = self.app.post_json(
+                '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+                {'wim_account': eg.wim_account(0, 0)})
+
+            num_threads_after = len(self.engine.threads)
+
+        # then a new thread should be created
+        self.assertEqual(num_threads_after, num_threads + 1)
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim_account']['name'], 'wim-account00')
+
+        # and a new association record should be created
+        association = self.db.get_rows(FROM='wim_nfvo_tenants')
+        assert association
+        self.assertEqual(len(association), 1)
+        self.assertEqual(association[0]['wim_id'], uuid('wim0'))
+        self.assertEqual(association[0]['nfvo_tenant_id'], uuid('tenant0'))
+        self.assertEqual(association[0]['wim_account_id'],
+                         response.json['wim_account']['uuid'])
+
+    def test_create_wim_account__existing_account(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # But the NFVO and the WIM are not associated
+        self.populate([
+            {'wims': [eg.wim(0)]},
+            {'nfvo_tenants': [eg.tenant(0)]},
+            {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives
+        # and it refers to an existing wim account
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')),
+            {'wim_account': {'name': 'wim-account00'}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the association should be created
+        association = self.db.get_rows(
+            FROM='wim_nfvo_tenants',
+            WHERE={'wim_id': uuid('wim0'),
+                   'nfvo_tenant_id': uuid('tenant0')})
+        assert association
+        self.assertEqual(len(association), 1)
+        # but no new wim_account should be created
+        wim_accounts = self.db.get_rows(FROM='wim_accounts')
+        self.assertEqual(len(wim_accounts), 1)
+        self.assertEqual(wim_accounts[0]['name'], 'wim-account00')
+
+    def test_create_wim_account__existing_account__differing(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # But the NFVO and the WIM are not associated
+        self.populate([
+            {'wims': [eg.wim(0)]},
+            {'nfvo_tenants': [eg.tenant(0)]},
+            {'wim_accounts': [eg.wim_account(0, 0)]}])
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives
+        # and it refers to an existing wim account,
+        # but with different fields
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                'wim_account': {
+                    'name': 'wim-account00',
+                    'user': 'john',
+                    'password': 'abc123'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Conflict)
+        # some useful message should be displayed
+        response.mustcontain('attempt to overwrite', 'user', 'password')
+        # and the association should not be created
+        association = self.db.get_rows(
+            FROM='wim_nfvo_tenants',
+            WHERE={'wim_id': uuid('wim0'),
+                   'nfvo_tenant_id': uuid('tenant0')})
+        assert not association
+
+    def test_create_wim_account__association_already_exists(self):
+        # Given a WIM, a WIM account and a NFVO tenants exist
+        # and are correctly associated
+        self.populate()
+        num_assoc_before = self.count('wim_nfvo_tenants')
+
+        # when a POST /<tenant_id>/wims/<wim_id> arrives trying to connect a
+        # WIM and a tenant for the second time
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Conflict)
+        # the message should be useful
+        response.mustcontain('There is already', uuid('wim0'), uuid('tenant0'))
+
+        num_assoc_after = self.count('wim_nfvo_tenants')
+
+        # and the number of association record should not be increased
+        self.assertEqual(num_assoc_before, num_assoc_after)
+
+    def test_create_wim__tenant_doesnt_exist(self):
+        # Given a tenant not exists
+        self.populate()
+
+        # But the user tries to create a wim_account anyway
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant999'), uuid('wim0')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+        # the message should be useful
+        response.mustcontain('No record was found', uuid('tenant999'))
+
+    def test_create_wim__wim_doesnt_exist(self):
+        # Given a tenant not exists
+        self.populate()
+
+        # But the user tries to create a wim_account anyway
+        response = self.app.post_json(
+            '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim999')), {
+                'wim_account': {
+                    'user': 'user999',
+                    'password': 'password999'}},
+            expect_errors=True)
+
+        # then the request should not be well succeeded
+        self.assertEqual(response.status_code, Not_Found)
+        # the message should be useful
+        response.mustcontain('No record was found', uuid('wim999'))
+
+    def test_update_wim_account(self):
+        # Given a WIM account connecting a tenant and a WIM exists
+        self.populate()
+
+        with self.engine.threads_running():
+            num_threads = len(self.engine.threads)
+
+            thread = self.engine.threads[uuid('wim-account00')]
+            reload = MagicMock(wraps=thread.reload)
+
+            with patch.object(thread, 'reload', reload):
+                # when a PUT /<tenant_id>/wims/<wim_id> arrives
+                response = self.app.put_json(
+                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')), {
+                        'wim_account': {
+                            'name': 'account888',
+                            'user': 'user888'}})
+
+            num_threads_after = len(self.engine.threads)
+
+        # then the wim thread should be restarted
+        reload.assert_called_once()
+        # and no thread should be added or removed
+        self.assertEqual(num_threads_after, num_threads)
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(response.json['wim_account']['name'], 'account888')
+        self.assertEqual(response.json['wim_account']['user'], 'user888')
+
+    def test_update_wim_account__multiple(self):
+        # Given a WIM account connected to several tenants
+        self.populate()
+
+        with self.engine.threads_running():
+            # when a PUT /any/wims/<wim_id> arrives
+            response = self.app.put_json(
+                '/any/wims/{}'.format(uuid('wim0')), {
+                    'wim_account': {
+                        'user': 'user888',
+                        'config': {'x': 888}}})
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        self.assertEqual(len(response.json['wim_accounts']), eg.NUM_TENANTS)
+
+        for account in response.json['wim_accounts']:
+            self.assertEqual(account['user'], 'user888')
+            self.assertEqual(account['config']['x'], 888)
+
+    def test_delete_wim_account(self):
+        # Given a WIM account exists and it is connected to a tenant
+        self.populate()
+
+        num_accounts_before = self.count('wim_accounts')
+
+        with self.engine.threads_running():
+            thread = self.engine.threads[uuid('wim-account00')]
+            exit = MagicMock(wraps=thread.exit)
+            num_threads = len(self.engine.threads)
+
+            with patch.object(thread, 'exit', exit):
+                # when a PUT /<tenant_id>/wims/<wim_id> arrives
+                response = self.app.delete_json(
+                    '/{}/wims/{}'.format(uuid('tenant0'), uuid('wim0')))
+
+            num_threads_after = len(self.engine.threads)
+
+        # then the wim thread should exit
+        self.assertEqual(num_threads_after, num_threads - 1)
+        exit.assert_called_once()
+
+        # and the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        response.mustcontain('account `wim-account00` deleted')
+
+        # and the number of wim_accounts should decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after, num_accounts_before - 1)
+
+    def test_delete_wim_account__multiple(self):
+        # Given a WIM account exists and it is connected to several tenants
+        self.populate()
+
+        num_accounts_before = self.count('wim_accounts')
+
+        with self.engine.threads_running():
+            # when a PUT /<tenant_id>/wims/<wim_id> arrives
+            response = self.app.delete_json(
+                '/any/wims/{}'.format(uuid('wim0')))
+
+        # then the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        response.mustcontain('account `wim-account00` deleted')
+        response.mustcontain('account `wim-account10` deleted')
+
+        # and the number of wim_accounts should decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after,
+                         num_accounts_before - eg.NUM_TENANTS)
+
+    def test_delete_wim_account__doesnt_exist(self):
+        # Given we have a tenant that is not connected to a WIM
+        self.populate()
+        tenant = {'uuid': uuid('tenant888'), 'name': 'tenant888'}
+        self.populate([{'nfvo_tenants': [tenant]}])
+
+        num_accounts_before = self.count('wim_accounts')
+
+        # when a PUT /<tenant_id>/wims/<wim_id> arrives
+        response = self.app.delete(
+            '/{}/wims/{}'.format(uuid('tenant888'), uuid('wim0')),
+            expect_errors=True)
+
+        # then the request should not succeed
+        self.assertEqual(response.status_code, Not_Found)
+
+        # and the number of wim_accounts should not decrease
+        num_accounts_after = self.count('wim_accounts')
+        self.assertEqual(num_accounts_after, num_accounts_before)
+
+    def test_create_port_mappings(self):
+        # Given we have a wim and datacenter without any port mappings
+        self.populate([{'nfvo_tenants': eg.tenant(0)}] +
+                      eg.datacenter_set(888, 0) +
+                      eg.wim_set(999, 0))
+
+        # when a POST /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.post_json(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim999')),
+            {'wim_port_mapping': [{
+                'datacenter_name': 'dc888',
+                'pop_wan_mappings': [
+                    {'pop_switch_dpid': 'AA:AA:AA:AA:AA:AA:AA:AA',
+                     'pop_switch_port': 1,
+                     'wan_service_mapping_info': {
+                         'mapping_type': 'dpid-port',
+                         'wan_switch_dpid': 'BB:BB:BB:BB:BB:BB:BB:BB',
+                         'wan_switch_port': 1
+                     }}
+                ]}
+            ]})
+
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and port mappings should be stored in the database
+        port_mapping = self.db.get_rows(FROM='wim_port_mappings')
+        self.assertEqual(len(port_mapping), 1)
+
+    def test_get_port_mappings(self):
+        # Given WIMS and datacenters exist with port mappings between them
+        self.populate()
+        # when a GET /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.get(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and we should see port mappings for each WIM, datacenter pair
+        mappings = response.json['wim_port_mapping']
+        self.assertEqual(len(mappings), eg.NUM_DATACENTERS)
+        # ^  In the fixture set all the datacenters are connected to all wims
+
+    def test_delete_port_mappings(self):
+        # Given WIMS and datacenters exist with port mappings between them
+        self.populate()
+        num_mappings_before = self.count('wim_port_mappings')
+
+        # when a DELETE /<tenant_id>/wims/<wim_id>/port_mapping arrives
+        response = self.app.delete(
+            '/{}/wims/{}/port_mapping'.format(uuid('tenant0'), uuid('wim0')))
+        # the request should be well succeeded
+        self.assertEqual(response.status_code, OK)
+        # and the number of port mappings should decrease
+        num_mappings_after = self.count('wim_port_mappings')
+        self.assertEqual(num_mappings_after,
+                         num_mappings_before - eg.NUM_DATACENTERS)
+        # ^  In the fixture set all the datacenters are connected to all wims
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/wim/tests/test_persistence.py b/osm_ro/wim/tests/test_persistence.py
new file mode 100644 (file)
index 0000000..d09a116
--- /dev/null
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from __future__ import unicode_literals
+
+import unittest
+from itertools import chain
+from types import StringType
+
+from six.moves import range
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid
+)
+from ..persistence import (
+    WimPersistence,
+    hide_confidential_fields,
+    serialize_fields,
+    unserialize_fields
+)
+
+
+class TestPersistenceUtils(unittest.TestCase):
+    def test_hide_confidential_fields(self):
+        example = {
+            'password': '123456',
+            'nested.password': '123456',
+            'nested.secret': None,
+        }
+        result = hide_confidential_fields(example,
+                                          fields=('password', 'secret'))
+        for field in 'password', 'nested.password':
+            assert result[field].startswith('***')
+        self.assertIs(result['nested.secret'], None)
+
+    def test_serialize_fields(self):
+        example = {
+            'config': dict(x=1),
+            'nested.info': [1, 2, 3],
+            'nested.config': None
+        }
+        result = serialize_fields(example, fields=('config', 'info'))
+        for field in 'config', 'nested.info':
+            self.assertIsInstance(result[field], StringType)
+        self.assertIs(result['nested.config'], None)
+
+    def test_unserialize_fields(self):
+        example = {
+            'config': '{"x": 1}',
+            'nested.info': '[1,2,3]',
+            'nested.config': None,
+            'confidential.info': '{"password": "abcdef"}'
+        }
+        result = unserialize_fields(example, fields=('config', 'info'))
+        self.assertEqual(result['config'], dict(x=1))
+        self.assertEqual(result['nested.info'], [1, 2, 3])
+        self.assertIs(result['nested.config'], None)
+        self.assertNotEqual(result['confidential.info']['password'], 'abcdef')
+        assert result['confidential.info']['password'].startswith('***')
+
+
+class TestWimPersistence(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimPersistence, self).setUp()
+        self.persist = WimPersistence(self.db)
+
+    def populate(self, seeds=None):
+        super(TestWimPersistence, self).populate(seeds or eg.consistent_set())
+
+    def test_query_offset(self):
+        # Given a database contains 4 records
+        self.populate([{'wims': [eg.wim(i) for i in range(4)]}])
+
+        # When we query using a limit of 2 and a offset of 1
+        results = self.persist.query('wims',
+                                     ORDER_BY='name', LIMIT=2, OFFSET=1)
+        # Then we should have 2 results, skipping the first record
+        names = [r['name'] for r in results]
+        self.assertItemsEqual(names, ['wim1', 'wim2'])
+
+    def test_get_wim_account_by_wim_tenant(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the account using wim and tenant
+        wim_account = self.persist.get_wim_account_by(
+            uuid('wim0'), uuid('tenant0'))
+
+        # then the right record should be returned
+        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+        self.assertEqual(wim_account['name'], 'wim-account00')
+        self.assertEqual(wim_account['user'], 'user00')
+
+    def test_get_wim_account_by_wim_tenant__names(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the account using wim and tenant
+        wim_account = self.persist.get_wim_account_by(
+            'wim0', 'tenant0')
+
+        # then the right record should be returned
+        self.assertEqual(wim_account['uuid'], uuid('wim-account00'))
+        self.assertEqual(wim_account['name'], 'wim-account00')
+        self.assertEqual(wim_account['user'], 'user00')
+
+    def test_get_wim_accounts_by_wim(self):
+        # Given a database contains WIM accounts associated to Tenants
+        self.populate()
+
+        # when we retrieve the accounts using wim
+        wim_accounts = self.persist.get_wim_accounts_by(uuid('wim0'))
+
+        # then the right records should be returned
+        self.assertEqual(len(wim_accounts), eg.NUM_TENANTS)
+        for account in wim_accounts:
+            self.assertEqual(account['wim_id'], uuid('wim0'))
+
+    def test_get_wim_port_mappings(self):
+        # Given a database with WIMs, datacenters and port-mappings
+        self.populate()
+
+        # when we retrieve the port mappings for a list of datacenters
+        # using either names or uuids
+        for criteria in ([uuid('dc0'), uuid('dc1')], ['dc0', 'dc1']):
+            mappings = self.persist.get_wim_port_mappings(datacenter=criteria)
+
+            # then each result should have a datacenter_id
+            datacenters = [m['datacenter_id'] for m in mappings]
+            for datacenter in datacenters:
+                self.assertIn(datacenter, [uuid('dc0'), uuid('dc1')])
+
+            # a wim_id
+            wims = [m['wim_id'] for m in mappings]
+            for wim in wims:
+                self.assertIsNot(wim, None)
+
+            # and a array of pairs 'wan' <> 'pop' connections
+            pairs = chain(*(m['wan_pop_port_mappings'] for m in mappings))
+            self.assertEqual(len(list(pairs)), 2 * eg.NUM_WIMS)
+
+    def test_get_wim_port_mappings_multiple(self):
+        # Given we have more then one connection in a datacenter managed by the
+        # WIM
+        self.populate()
+        self.populate([{
+            'wim_port_mappings': [
+                eg.wim_port_mapping(
+                    0, 0,
+                    pop_dpid='CC:CC:CC:CC:CC:CC:CC:CC',
+                    wan_dpid='DD:DD:DD:DD:DD:DD:DD:DD'),
+                eg.wim_port_mapping(
+                    0, 0,
+                    pop_dpid='EE:EE:EE:EE:EE:EE:EE:EE',
+                    wan_dpid='FF:FF:FF:FF:FF:FF:FF:FF')]}])
+
+        # when we retrieve the port mappings for the wim and datacenter:
+        mappings = (
+            self.persist.get_wim_port_mappings(wim='wim0', datacenter='dc0'))
+
+        # then it should return just a single result, grouped by wim and
+        # datacenter
+        self.assertEqual(len(mappings), 1)
+        self.assertEqual(mappings[0]['wim_id'], uuid('wim0'))
+        self.assertEqual(mappings[0]['datacenter_id'], uuid('dc0'))
+
+        self.assertEqual(len(mappings[0]['wan_pop_port_mappings']), 3)
+
+        # when we retreive the mappings for more then one wim/datacenter
+        # the grouping should still work properly
+        mappings = self.persist.get_wim_port_mappings(
+            wim=['wim0', 'wim1'], datacenter=['dc0', 'dc1'])
+        self.assertEqual(len(mappings), 4)
+        pairs = chain(*(m['wan_pop_port_mappings'] for m in mappings))
+        self.assertEqual(len(list(pairs)), 6)
+
+    def test_get_actions_in_group(self):
+        # Given a good number of wim actions exist in the database
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs) +
+                   eg.wim_actions('START', num_links=8, **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we retrieve them in groups
+        limit = 5
+        results = self.persist.get_actions_in_groups(
+            uuid('wim-account00'), ['instance_wim_nets'], group_limit=limit)
+
+        # Then we should have N groups where N == limit
+        self.assertEqual(len(results), limit)
+        for _, task_list in results:
+            # And since for each link we have create 3 actions (create, find,
+            # start), we should find them in each group
+            self.assertEqual(len(task_list), 3)
+
+    @disable_foreign_keys
+    def test_update_instance_action_counters(self):
+        # Given we have one instance action in the database with 2 incomplete
+        # tasks
+        action = eg.instance_action(num_tasks=2)
+        self.populate([{'instance_actions': action}])
+        # When we update the done counter by 0, nothing should happen
+        self.persist.update_instance_action_counters(action['uuid'], done=0)
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 0)
+        self.assertEqual(result['number_failed'], 0)
+        # When we update the done counter by 2, number_done should be 2
+        self.persist.update_instance_action_counters(action['uuid'], done=2)
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 2)
+        self.assertEqual(result['number_failed'], 0)
+        # When we update the done counter by -1, and the failed counter by 1
+        self.persist.update_instance_action_counters(
+            action['uuid'], done=-1, failed=1)
+        # Then we should see 1 and 1
+        result = self.persist.get_by_uuid('instance_actions', action['uuid'])
+        self.assertEqual(result['number_done'], 1)
+        self.assertEqual(result['number_failed'], 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/wim/tests/test_wim_thread.py b/osm_ro/wim/tests/test_wim_thread.py
new file mode 100644 (file)
index 0000000..6d61848
--- /dev/null
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+from __future__ import unicode_literals, print_function
+
+import unittest
+from difflib import unified_diff
+from operator import itemgetter
+from time import time
+
+import json
+
+from mock import MagicMock, patch
+
+from . import fixtures as eg
+from ...tests.db_helpers import (
+    TestCaseWithDatabasePerTest,
+    disable_foreign_keys,
+    uuid
+)
+from ..engine import WimEngine
+from ..persistence import WimPersistence
+from ..wim_thread import WimThread
+
+
+ignore_connector = patch('osm_ro.wim.wim_thread.CONNECTORS', MagicMock())
+
+
+def _repr(value):
+    return json.dumps(value, indent=4, sort_keys=True)
+
+
+@ignore_connector
+class TestWimThreadWithDb(TestCaseWithDatabasePerTest):
+    def setUp(self):
+        super(TestWimThreadWithDb, self).setUp()
+        self.persist = WimPersistence(self.db)
+        wim = eg.wim(0)
+        account = eg.wim_account(0, 0)
+        account['wim'] = wim
+        self.thread = WimThread(self.persist, account)
+        self.thread.connector = MagicMock()
+
+    def assertTasksEqual(self, left, right):
+        fields = itemgetter('item', 'item_id', 'action', 'status')
+        left_ = (t.as_dict() for t in left)
+        left_ = [fields(t) for t in left_]
+        right_ = [fields(t) for t in right]
+
+        try:
+            self.assertItemsEqual(left_, right_)
+        except AssertionError:
+            print('left', _repr(left))
+            print('left', len(left_), 'items')
+            print('right', len(right_), 'items')
+            result = list(unified_diff(_repr(sorted(left_)).split('\n'),
+                                       _repr(sorted(right_)).split('\n'),
+                                       'left', 'right'))
+            print('diff:\n', '\n'.join(result))
+            raise
+
+    def test_reload_actions__all_create(self):
+        # Given we have 3 CREATE actions stored in the database
+        actions = eg.wim_actions('CREATE',
+                                 action_id=uuid('action0'), num_links=3)
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions':
+                eg.instance_action(action_id=uuid('action0'))},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # All of them should be inserted as pending
+        self.assertTasksEqual(self.thread.pending_tasks, actions)
+
+    def test_reload_actions__all_refresh(self):
+        # Given just DONE tasks are in the database
+        actions = eg.wim_actions(status='DONE',
+                                 action_id=uuid('action0'), num_links=3)
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions':
+                eg.instance_action(action_id=uuid('action0'))},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # All of them should be inserted as refresh
+        self.assertTasksEqual(self.thread.refresh_tasks, actions)
+
+    def test_reload_actions__grouped(self):
+        # Given we have 2 tasks for the same item in the database
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # Just one group should be created
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+    def test_reload_actions__delete_scheduled(self):
+        # Given we have 3 tasks for the same item in the database, but one of
+        # them is a DELETE task and it is SCHEDULED
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs) +
+                   eg.wim_actions('DELETE', status='SCHEDULED', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # Just one group should be created
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 1)
+
+    def test_reload_actions__delete_done(self):
+        # Given we have 3 tasks for the same item in the database, but one of
+        # them is a DELETE task and it is not SCHEDULED
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', **kwargs) +
+                   eg.wim_actions('FIND', **kwargs) +
+                   eg.wim_actions('DELETE', status='DONE', **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions()
+        # No pending task should be found
+        self.assertEqual(self.thread.pending_tasks, [])
+
+    def test_reload_actions__batch(self):
+        # Given the group_limit is 10, and we have 24
+        group_limit = 10
+        kwargs = {'action_id': uuid('action0')}
+        actions = (eg.wim_actions('CREATE', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs) +
+                   eg.wim_actions('FIND', num_links=8, **kwargs))
+        for i, action in enumerate(actions):
+            action['task_index'] = i
+
+        self.populate([
+            {'nfvo_tenants': eg.tenant()}
+        ] + eg.wim_set() + [
+            {'instance_actions': eg.instance_action(**kwargs)},
+            {'vim_wim_actions': actions}
+        ])
+
+        # When we reload the tasks
+        self.thread.reload_actions(group_limit)
+
+        # Then we should still see the actions in memory properly
+        self.assertTasksEqual(self.thread.pending_tasks, actions)
+        self.assertEqual(len(self.thread.grouped_tasks.values()), 8)
+
+    @disable_foreign_keys
+    def test_process_list__refresh(self):
+        update_wan_link = MagicMock(wrap=self.persist.update_wan_link)
+        update_action = MagicMock(wrap=self.persist.update_wan_link)
+        patches = dict(update_wan_link=update_wan_link,
+                       update_action=update_action)
+
+        with patch.multiple(self.persist, **patches):
+            # Given we have 2 tasks in the refresh queue
+            kwargs = {'action_id': uuid('action0')}
+            actions = (eg.wim_actions('FIND', 'DONE', **kwargs) +
+                       eg.wim_actions('CREATE', 'BUILD', **kwargs))
+            for i, action in enumerate(actions):
+                action['task_index'] = i
+
+            self.populate(
+                [{'instance_wim_nets': eg.instance_wim_nets()}] +
+                [{'instance_actions':
+                    eg.instance_action(num_tasks=2, **kwargs)}] +
+                [{'vim_wim_actions': actions}])
+
+            self.thread.insert_pending_tasks(actions)
+
+            # When we process the refresh list
+            processed = self.thread.process_list('refresh')
+
+            # Then we should have 2 updates
+            self.assertEqual(processed, 2)
+
+            # And the database should be updated accordingly
+            self.assertEqual(update_wan_link.call_count, 2)
+            self.assertEqual(update_action.call_count, 2)
+
+    @disable_foreign_keys
+    def test_delete_superseed_create(self):
+        # Given we insert a scheduled CREATE task
+        instance_action = eg.instance_action(num_tasks=1)
+        self.thread.pending_tasks = []
+        engine = WimEngine(persistence=self.persist)
+        self.addCleanup(engine.stop_threads)
+        wan_links = eg.instance_wim_nets()
+        create_actions = engine.create_actions(wan_links)
+        delete_actions = engine.delete_actions(wan_links)
+        engine.incorporate_actions(create_actions + delete_actions,
+                                   instance_action)
+
+        self.populate(instance_actions=instance_action,
+                      vim_wim_actions=create_actions + delete_actions)
+
+        self.thread.insert_pending_tasks(create_actions)
+
+        assert self.thread.pending_tasks[0].is_scheduled
+
+        # When we insert the equivalent DELETE task
+        self.thread.insert_pending_tasks(delete_actions)
+
+        # Then the CREATE task should be superseded
+        self.assertEqual(self.thread.pending_tasks[0].action, 'CREATE')
+        assert self.thread.pending_tasks[0].is_superseded
+
+        self.thread.process_list('pending')
+        self.thread.process_list('refresh')
+        self.assertFalse(self.thread.pending_tasks)
+
+
+@ignore_connector
+class TestWimThread(unittest.TestCase):
+    def setUp(self):
+        wim = eg.wim(0)
+        account = eg.wim_account(0, 0)
+        account['wim'] = wim
+        self.persist = MagicMock()
+        self.thread = WimThread(self.persist, account)
+        self.thread.connector = MagicMock()
+
+        super(TestWimThread, self).setUp()
+
+    def test_process_refresh(self):
+        # Given we have 30 tasks in the refresh queue
+        kwargs = {'action_id': uuid('action0')}
+        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+        self.thread.insert_pending_tasks(actions)
+
+        # When we process the refresh list
+        processed = self.thread.process_list('refresh')
+
+        # Then we should have REFRESH_BATCH updates
+        self.assertEqual(processed, self.thread.BATCH)
+
+    def test_process_refresh__with_superseded(self):
+        # Given we have 30 tasks but 15 of them are superseded
+        kwargs = {'action_id': uuid('action0')}
+        actions = eg.wim_actions('FIND', 'DONE', num_links=30, **kwargs)
+        self.thread.insert_pending_tasks(actions)
+        for task in self.thread.refresh_tasks[0:30:2]:
+            task.status = 'SUPERSEDED'
+
+        now = time()
+
+        # When we call the refresh_elements
+        processed = self.thread.process_list('refresh')
+
+        # Then we should have 25 updates (since SUPERSEDED updates are cheap,
+        # they are not counted for the limits)
+        self.assertEqual(processed, 25)
+
+        # The SUPERSEDED tasks should be removed, 5 tasks should be untouched,
+        # and 10 tasks should be rescheduled
+        refresh_tasks = self.thread.refresh_tasks
+        old = [t for t in refresh_tasks if t.process_at <= now]
+        new = [t for t in refresh_tasks if t.process_at > now]
+        self.assertEqual(len(old), 5)
+        self.assertEqual(len(new), 10)
+        self.assertEqual(len(self.thread.refresh_tasks), 15)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/osm_ro/wim/tox.ini b/osm_ro/wim/tox.ini
new file mode 100644 (file)
index 0000000..29f1a8f
--- /dev/null
@@ -0,0 +1,58 @@
+# This tox file allows the devs to run unit tests only for this subpackage.
+# In order to do so, cd into the directory and run `tox`
+
+[tox]
+minversion = 1.8
+envlist = py27,flake8,radon
+skipsdist = True
+
+[testenv]
+passenv = *_DB_*
+setenv =
+    PATH = {env:PATH}:{toxinidir}/../../database_utils
+    DBUTILS = {toxinidir}/../../database_utils
+changedir = {toxinidir}
+commands =
+    nosetests -v -d {posargs:tests}
+deps =
+    WebTest
+    logging
+    bottle
+    coverage
+    jsonschema
+    mock
+    mysqlclient
+    nose
+    six
+    PyYaml
+    paramiko
+    ipdb
+    requests
+
+[testenv:flake8]
+changedir = {toxinidir}
+deps = flake8
+commands = flake8 {posargs:.}
+
+[testenv:radon]
+changedir = {toxinidir}
+deps = radon
+commands =
+    radon cc --show-complexity --total-average {posargs:.}
+    radon mi -s {posargs:.}
+
+[coverage:run]
+branch = True
+source = {toxinidir}
+omit =
+    tests
+    tests/*
+    */test_*
+    .tox/*
+
+[coverage:report]
+show_missing = True
+
+[flake8]
+exclude =
+    .tox
diff --git a/osm_ro/wim/wan_link_actions.py b/osm_ro/wim/wan_link_actions.py
new file mode 100644 (file)
index 0000000..1993ae7
--- /dev/null
@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+# pylint: disable=E1101,E0203,W0201
+import json
+from time import time
+
+from ..utils import filter_dict_keys as filter_keys
+from ..utils import merge_dicts, remove_none_items, safe_get, truncate
+from .actions import CreateAction, DeleteAction, FindAction
+from .errors import (
+    InconsistentState,
+    MultipleRecordsFound,
+    NoRecordFound,
+)
+from wimconn import WimConnectorError
+
+INSTANCE_NET_STATUS_ERROR = ('DOWN', 'ERROR', 'VIM_ERROR',
+                             'DELETED', 'SCHEDULED_DELETION')
+INSTANCE_NET_STATUS_PENDING = ('BUILD', 'INACTIVE', 'SCHEDULED_CREATION')
+INSTANCE_VM_STATUS_ERROR = ('ERROR', 'VIM_ERROR',
+                            'DELETED', 'SCHEDULED_DELETION')
+
+
+class RefreshMixin(object):
+    def refresh(self, connector, persistence):
+        """Ask the external WAN Infrastructure Manager system for updates on
+        the status of the task.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+        """
+        fields = ('wim_status', 'wim_info', 'error_msg')
+        result = dict.fromkeys(fields)
+
+        try:
+            result.update(
+                connector
+                .get_connectivity_service_status(self.wim_internal_id))
+        except WimConnectorError as ex:
+            self.logger.exception(ex)
+            result.update(wim_status='WIM_ERROR', error_msg=truncate(ex))
+
+        result = filter_keys(result, fields)
+
+        action_changes = remove_none_items({
+            'extra': merge_dicts(self.extra, result),
+            'status': 'BUILD' if result['wim_status'] == 'BUILD' else None,
+            'error_msg': result['error_msg'],
+            'modified_at': time()})
+        link_changes = merge_dicts(result, status=result.pop('wim_status'))
+        # ^  Rename field: wim_status => status
+
+        persistence.update_wan_link(self.item_id,
+                                    remove_none_items(link_changes))
+
+        self.save(persistence, **action_changes)
+
+        return result
+
+
+class WanLinkCreate(RefreshMixin, CreateAction):
+    def fail(self, persistence, reason, status='FAILED'):
+        changes = {'status': 'ERROR', 'error_msg': truncate(reason)}
+        persistence.update_wan_link(self.item_id, changes)
+        return super(WanLinkCreate, self).fail(persistence, reason, status)
+
+    def process(self, connector, persistence, ovim):
+        """Process the current task.
+        First we check if all the dependencies are ready,
+        then we call ``execute`` to actually execute the action.
+
+        Arguments:
+            connector: object with API for accessing the WAN
+                Infrastructure Manager system
+            persistence: abstraction layer for the database
+            ovim: instance of openvim, abstraction layer that enable
+                SDN-related operations
+        """
+        wan_link = persistence.get_by_uuid('instance_wim_nets', self.item_id)
+
+        # First we check if all the dependencies are solved
+        instance_nets = persistence.get_instance_nets(
+            wan_link['instance_scenario_id'], wan_link['sce_net_id'])
+
+        try:
+            dependency_statuses = [n['status'] for n in instance_nets]
+        except KeyError:
+            self.logger.debug('`status` not found in\n\n%s\n\n',
+                              json.dumps(instance_nets, indent=4))
+        errored = [instance_nets[i]
+                   for i, status in enumerate(dependency_statuses)
+                   if status in INSTANCE_NET_STATUS_ERROR]
+        if errored:
+            return self.fail(
+                persistence,
+                'Impossible to stablish WAN connectivity due to an issue '
+                'with the local networks:\n\t' +
+                '\n\t'.join('{uuid}: {status}'.format(**n) for n in errored))
+
+        pending = [instance_nets[i]
+                   for i, status in enumerate(dependency_statuses)
+                   if status in INSTANCE_NET_STATUS_PENDING]
+        if pending:
+            return self.defer(
+                persistence,
+                'Still waiting for the local networks to be active:\n\t' +
+                '\n\t'.join('{uuid}: {status}'.format(**n) for n in pending))
+
+        return self.execute(connector, persistence, ovim, instance_nets)
+
+    def get_endpoint(self, persistence, ovim, instance_net):
+        """Retrieve the endpoint (information about the connection PoP <> WAN
+        """
+        wim_account = persistence.get_wim_account_by(uuid=self.wim_account_id)
+
+        # TODO: make more generic to support networks that are not created with
+        # the SDN assist. This method should have a consistent way of getting
+        # the endpoint for all different types of networks used in the VIM
+        # (provider networks, SDN assist, overlay networks, ...)
+        if instance_net.get('sdn_net_id'):
+            return self.get_endpoint_sdn(
+                persistence, ovim, instance_net, wim_account['wim_id'])
+        else:
+            raise InconsistentState(
+                'The field `instance_nets.sdn_net_id` was expected to be '
+                'found in the database for the record %s after the network '
+                'become active, but it is still NULL', instance_net['uuid'])
+
+    def get_endpoint_sdn(self, persistence, ovim, instance_net, wim_id):
+        criteria = {'net_id': instance_net['sdn_net_id']}
+        local_port_mapping = ovim.get_ports(filter=criteria)
+
+        if len(local_port_mapping) > 1:
+            raise MultipleRecordsFound(criteria, 'ovim.ports')
+        local_port_mapping = local_port_mapping[0]
+
+        criteria = {
+            'wim_id': wim_id,
+            'pop_switch_dpid': local_port_mapping['switch_dpid'],
+            'pop_switch_port': local_port_mapping['switch_port'],
+            'datacenter_id': instance_net['datacenter_id']}
+
+        wan_port_mapping = persistence.query_one(
+            FROM='wim_port_mappings',
+            WHERE=criteria)
+
+        if local_port_mapping.get('vlan'):
+            wan_port_mapping['wan_service_mapping_info']['vlan'] = (
+                local_port_mapping['vlan'])
+
+        return wan_port_mapping
+
+    @staticmethod
+    def _derive_connection_point(endpoint):
+        point = {'service_endpoint_id': endpoint['wan_service_endpoint_id']}
+        # TODO: Cover other scenarios, e.g. VXLAN.
+        info = endpoint.get('wan_service_mapping_info', {})
+        if 'vlan' in info:
+            point['service_endpoint_encapsulation_type'] = 'dot1q'
+            point['service_endpoint_encapsulation_info'] = {
+                'vlan': info['vlan']
+            }
+        else:
+            point['service_endpoint_encapsulation_type'] = 'none'
+        return point
+
+    @staticmethod
+    def _derive_service_type(connection_points):
+        # TODO: add multipoint and L3 connectivity.
+        if len(connection_points) == 2:
+            return 'ELINE'
+        else:
+            raise NotImplementedError('Multipoint connectivity is not '
+                                      'supported yet.')
+
+    def _update_persistent_data(self, persistence, service_uuid,
+                                endpoints, conn_info):
+        """Store plugin/connector specific information in the database"""
+        persistence.update_wan_link(self.item_id, {
+            'wim_internal_id': service_uuid,
+            'wim_info': {'conn_info': conn_info},
+            'status': 'BUILD'})
+
+    def execute(self, connector, persistence, ovim, instance_nets):
+        """Actually execute the action, since now we are sure all the
+        dependencies are solved
+        """
+        try:
+            endpoints = [self.get_endpoint(persistence, ovim, net)
+                         for net in instance_nets]
+            connection_points = [self._derive_connection_point(e)
+                                 for e in endpoints]
+
+            uuid, info = connector.create_connectivity_service(
+                self._derive_service_type(connection_points),
+                connection_points
+                # TODO: other properties, e.g. bandwidth
+            )
+        except (WimConnectorError, InconsistentState) as ex:
+            self.logger.exception(ex)
+            return self.fail(
+                persistence,
+                'Impossible to stablish WAN connectivity.\n\t{}'.format(ex))
+
+        self.logger.debug('WAN connectivity established %s\n%s\n',
+                          uuid, json.dumps(info, indent=4))
+        self.wim_internal_id = uuid
+        self._update_persistent_data(persistence, uuid, endpoints, info)
+        self.succeed(persistence)
+        return uuid
+
+
+class WanLinkDelete(DeleteAction):
+    def succeed(self, persistence):
+        try:
+            persistence.update_wan_link(self.item_id, {'status': 'DELETED'})
+        except NoRecordFound:
+            self.logger.debug('%s(%s) record already deleted',
+                              self.item, self.item_id)
+
+        return super(WanLinkDelete, self).succeed(persistence)
+
+    def get_wan_link(self, persistence):
+        """Retrieve information about the wan_link
+
+        It might be cached, or arrive from the database
+        """
+        if self.extra.get('wan_link'):
+            # First try a cached version of the data
+            return self.extra['wan_link']
+
+        return persistence.get_by_uuid(
+            'instance_wim_nets', self.item_id)
+
+    def process(self, connector, persistence, ovim):
+        """Delete a WAN link previously created"""
+        wan_link = self.get_wan_link(persistence)
+        if 'ERROR' in (wan_link.get('status') or ''):
+            return self.fail(
+                persistence,
+                'Impossible to delete WAN connectivity, '
+                'it was never successfully established:'
+                '\n\t{}'.format(wan_link['error_msg']))
+
+        internal_id = wan_link.get('wim_internal_id') or self.internal_id
+
+        if not internal_id:
+            self.logger.debug('No wim_internal_id found in\n%s\n%s\n'
+                              'Assuming no network was created yet, '
+                              'so no network have to be deleted.',
+                              json.dumps(wan_link, indent=4),
+                              json.dumps(self.as_dict(), indent=4))
+            return self.succeed(persistence)
+
+        try:
+            id = self.wim_internal_id
+            conn_info = safe_get(wan_link, 'wim_info.conn_info')
+            self.logger.debug('Connection Service %s (wan_link: %s):\n%s\n',
+                              id, wan_link['uuid'],
+                              json.dumps(conn_info, indent=4))
+            result = connector.delete_connectivity_service(id, conn_info)
+        except (WimConnectorError, InconsistentState) as ex:
+            self.logger.exception(ex)
+            return self.fail(
+                persistence,
+                'Impossible to delete WAN connectivity.\n\t{}'.format(ex))
+
+        self.logger.debug('WAN connectivity removed %s', result)
+        self.succeed(persistence)
+
+        return result
+
+
+class WanLinkFind(RefreshMixin, FindAction):
+    pass
+
+
+ACTIONS = {
+    'CREATE': WanLinkCreate,
+    'DELETE': WanLinkDelete,
+    'FIND': WanLinkFind,
+}
diff --git a/osm_ro/wim/wim_thread.py b/osm_ro/wim/wim_thread.py
new file mode 100644 (file)
index 0000000..a13d6a2
--- /dev/null
@@ -0,0 +1,437 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+
+"""
+Thread-based interaction with WIMs. Tasks are stored in the
+database (vim_wim_actions table) and processed sequentially
+
+Please check the Action class for information about the content of each action.
+"""
+
+import logging
+import threading
+from contextlib import contextmanager
+from functools import partial
+from itertools import islice, chain, takewhile
+from operator import itemgetter, attrgetter
+from sys import exc_info
+from time import time, sleep
+
+from six import reraise
+from six.moves import queue
+
+from . import wan_link_actions, wimconn_odl  # wimconn_tapi
+from ..utils import ensure, partition, pipe
+from .actions import IGNORE, PENDING, REFRESH
+from .errors import (
+    DbBaseException,
+    QueueFull,
+    InvalidParameters as Invalid,
+    UndefinedAction,
+)
+from .failing_connector import FailingConnector
+from .wimconn import WimConnectorError
+
+ACTIONS = {
+    'instance_wim_nets': wan_link_actions.ACTIONS
+}
+
+CONNECTORS = {
+    "odl": wimconn_odl.OdlConnector,
+    # "tapi": wimconn_tapi
+    # Add extra connectors here
+}
+
+
+class WimThread(threading.Thread):
+    """Specialized task queue implementation that runs in an isolated thread.
+
+    Objects of this class have a few methods that are intended to be used
+    outside of the thread:
+
+    - start
+    - insert_task
+    - reload
+    - exit
+
+    All the other methods are used internally to manipulate/process the task
+    queue.
+    """
+    RETRY_SCHEDULED = 10  # 10 seconds
+    REFRESH_BUILD = 10    # 10 seconds
+    REFRESH_ACTIVE = 60   # 1 minute
+    BATCH = 10            # 10 actions per round
+    QUEUE_SIZE = 2000
+    RECOVERY_TIME = 5     # Sleep 5s to leave the system some time to recover
+    MAX_RECOVERY_TIME = 180
+    WAITING_TIME = 1      # Wait 1s for taks to arrive, when there are none
+
+    def __init__(self, persistence, wim_account, logger=None, ovim=None):
+        """Init a thread.
+
+        Arguments:
+            persistence: Database abstraction layer
+            wim_account: Record containing wim_account, tenant and wim
+                information.
+        """
+        name = '{}.{}.{}'.format(wim_account['wim']['name'],
+                                 wim_account['name'], wim_account['uuid'])
+        super(WimThread, self).__init__(name=name)
+
+        self.name = name
+        self.connector = None
+        self.wim_account = wim_account
+
+        self.logger = logger or logging.getLogger('openmano.wim.'+self.name)
+        self.persist = persistence
+        self.ovim = ovim
+
+        self.task_queue = queue.Queue(self.QUEUE_SIZE)
+
+        self.refresh_tasks = []
+        """Time ordered task list for refreshing the status of WIM nets"""
+
+        self.pending_tasks = []
+        """Time ordered task list for creation, deletion of WIM nets"""
+
+        self.grouped_tasks = {}
+        """ It contains all the creation/deletion pending tasks grouped by
+        its concrete vm, net, etc
+
+            <item><item_id>:
+                -   <task1>  # e.g. CREATE task
+                    <task2>  # e.g. DELETE task
+        """
+
+        self._insert_task = {
+            PENDING: partial(self.schedule, list_name='pending'),
+            REFRESH: partial(self.schedule, list_name='refresh'),
+            IGNORE: lambda task, *_, **__: task.save(self.persist)}
+        """Send the task to the right processing queue"""
+
+    def on_start(self):
+        """Run a series of procedures every time the thread (re)starts"""
+        self.connector = self.get_connector()
+        self.reload_actions()
+
+    def get_connector(self):
+        """Create an WimConnector instance according to the wim.type"""
+        error_msg = ''
+        account_id = self.wim_account['uuid']
+        try:
+            account = self.persist.get_wim_account_by(
+                uuid=account_id, hide=None)  # Credentials need to be available
+            wim = account['wim']
+            mapping = self.persist.query('wim_port_mappings',
+                                         WHERE={'wim_id': wim['uuid']},
+                                         error_if_none=False)
+            return CONNECTORS[wim['type']](wim, account, {
+                'service_endpoint_mapping': mapping or []
+            })
+        except DbBaseException as ex:
+            error_msg = ('Error when retrieving WIM account ({})\n'
+                         .format(account_id)) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+        except KeyError as ex:
+            error_msg = ('Unable to find the WIM connector for WIM ({})\n'
+                         .format(wim['type'])) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+        except (WimConnectorError, Exception) as ex:
+            # TODO: Remove the Exception class here when the connector class is
+            # ready
+            error_msg = ('Error when loading WIM connector for WIM ({})\n'
+                         .format(wim['type'])) + str(ex)
+            self.logger.error(error_msg, exc_info=True)
+
+        error_msg_extra = ('Any task targeting WIM account {} ({}) will fail.'
+                           .format(account_id, self.wim_account.get('name')))
+        self.logger.warning(error_msg_extra)
+        return FailingConnector(error_msg + '\n' + error_msg_extra)
+
+    @contextmanager
+    def avoid_exceptions(self):
+        """Make a real effort to keep the thread alive, by avoiding the
+        exceptions. They are instead logged as a critical errors.
+        """
+        try:
+            yield
+        except Exception as ex:
+            self.logger.critical("Unexpected exception %s", ex, exc_info=True)
+            sleep(self.RECOVERY_TIME)
+
+    def reload_actions(self, group_limit=100):
+        """Read actions from database and reload them at memory.
+
+        This method will clean and reload the attributes ``refresh_tasks``,
+        ``pending_tasks`` and ``grouped_tasks``
+
+        Attributes:
+            group_limit (int): maximum number of action groups (those that
+                refer to the same ``<item, item_id>``) to be retrieved from the
+                database in each batch.
+        """
+
+        # First we clean the cache to let the garbage collector work
+        self.refresh_tasks = []
+        self.pending_tasks = []
+        self.grouped_tasks = {}
+
+        offset = 0
+
+        while True:
+            # Do things in batches
+            task_groups = self.persist.get_actions_in_groups(
+                self.wim_account['uuid'], item_types=('instance_wim_nets',),
+                group_offset=offset, group_limit=group_limit)
+            offset += (group_limit - 1)  # Update for the next batch
+
+            if not task_groups:
+                break
+
+            pending_groups = (g for _, g in task_groups if is_pending_group(g))
+
+            for task_list in pending_groups:
+                with self.avoid_exceptions():
+                    self.insert_pending_tasks(filter_pending_tasks(task_list))
+
+            self.logger.debug(
+                'Reloaded wim actions pending: %d refresh: %d',
+                len(self.pending_tasks), len(self.refresh_tasks))
+
+    def insert_pending_tasks(self, task_list):
+        """Insert task in the list of actions being processed"""
+        task_list = [action_from(task, self.logger) for task in task_list]
+
+        for task in task_list:
+            group = task.group_key
+            self.grouped_tasks.setdefault(group, [])
+            # Each task can try to supersede the other ones,
+            # but just DELETE actions will actually do
+            task.supersede(self.grouped_tasks[group])
+            self.grouped_tasks[group].append(task)
+
+        # We need a separate loop so each task can check all the other
+        # ones before deciding
+        for task in task_list:
+            self._insert_task[task.processing](task)
+            self.logger.debug('Insert WIM task: %s (%s): %s %s',
+                              task.id, task.status, task.action, task.item)
+
+    def schedule(self, task, when=None, list_name='pending'):
+        """Insert a task in the correct list, respecting the schedule.
+        The refreshing list is ordered by threshold_time (task.process_at)
+        It is assumed that this is called inside this thread
+
+        Arguments:
+            task (Action): object representing the task.
+                This object must implement the ``process`` method and inherit
+                from the ``Action`` class
+            list_name: either 'refresh' or 'pending'
+            when (float): unix time in seconds since as a float number
+        """
+        processing_list = {'refresh': self.refresh_tasks,
+                           'pending': self.pending_tasks}[list_name]
+
+        when = when or time()
+        task.process_at = when
+
+        schedule = (t.process_at for t in processing_list)
+        index = len(list(takewhile(lambda moment: moment <= when, schedule)))
+
+        processing_list.insert(index, task)
+        self.logger.debug(
+            'Schedule of %s in "%s" - waiting position: %d (%f)',
+            task.id, list_name, index, task.process_at)
+
+        return task
+
+    def process_list(self, list_name='pending'):
+        """Process actions in batches and reschedule them if necessary"""
+        task_list, handler = {
+            'refresh': (self.refresh_tasks, self._refresh_single),
+            'pending': (self.pending_tasks, self._process_single)}[list_name]
+
+        now = time()
+        waiting = ((i, task) for i, task in enumerate(task_list)
+                   if task.process_at is None or task.process_at <= now)
+
+        is_superseded = pipe(itemgetter(1), attrgetter('is_superseded'))
+        superseded, active = partition(is_superseded, waiting)
+        superseded = [(i, t.save(self.persist)) for i, t in superseded]
+
+        batch = islice(active, self.BATCH)
+        refreshed = [(i, handler(t)) for i, t in batch]
+
+        # Since pop changes the indexes in the list, we need to do it backwards
+        remove = sorted([i for i, _ in chain(refreshed, superseded)])
+        return len([task_list.pop(i) for i in reversed(remove)])
+
+    def _refresh_single(self, task):
+        """Refresh just a single task, and reschedule it if necessary"""
+        now = time()
+
+        result = task.refresh(self.connector, self.persist)
+        self.logger.debug('Refreshing WIM task: %s (%s): %s %s => %r',
+                          task.id, task.status, task.action, task.item, result)
+
+        interval = self.REFRESH_BUILD if task.is_build else self.REFRESH_ACTIVE
+        self.schedule(task, now + interval, 'refresh')
+
+        return result
+
+    def _process_single(self, task):
+        """Process just a single task, and reschedule it if necessary"""
+        now = time()
+
+        result = task.process(self.connector, self.persist, self.ovim)
+        self.logger.debug('Executing WIM task: %s (%s): %s %s => %r',
+                          task.id, task.status, task.action, task.item, result)
+
+        if task.action == 'DELETE':
+            del self.grouped_tasks[task.group_key]
+
+        self._insert_task[task.processing](task, now + self.RETRY_SCHEDULED)
+
+        return result
+
+    def insert_task(self, task):
+        """Send a message to the running thread
+
+        This function is supposed to be called outside of the WIM Thread.
+
+        Arguments:
+            task (str or dict): `"exit"`, `"reload"` or dict representing a
+                task. For more information about the fields in task, please
+                check the Action class.
+        """
+        try:
+            self.task_queue.put(task, False)
+            return None
+        except queue.Full:
+            ex = QueueFull(self.name)
+            reraise(ex.__class__, ex, exc_info()[2])
+
+    def reload(self):
+        """Send a message to the running thread to reload itself"""
+        self.insert_task('reload')
+
+    def exit(self):
+        """Send a message to the running thread to kill itself"""
+        self.insert_task('exit')
+
+    def run(self):
+        self.logger.debug('Starting: %s', self.name)
+        recovery_time = 0
+        while True:
+            self.on_start()
+            reload_thread = False
+            self.logger.debug('Reloaded: %s', self.name)
+
+            while True:
+                with self.avoid_exceptions():
+                    while not self.task_queue.empty():
+                        task = self.task_queue.get()
+                        if isinstance(task, dict):
+                            self.insert_pending_tasks([task])
+                        elif isinstance(task, list):
+                            self.insert_pending_tasks(task)
+                        elif isinstance(task, str):
+                            if task == 'exit':
+                                self.logger.debug('Finishing: %s', self.name)
+                                return 0
+                            elif task == 'reload':
+                                reload_thread = True
+                                break
+                        self.task_queue.task_done()
+
+                    if reload_thread:
+                        break
+
+                    if not(self.process_list('pending') +
+                           self.process_list('refresh')):
+                        sleep(self.WAITING_TIME)
+
+                    if isinstance(self.connector, FailingConnector):
+                        # Wait sometime to try instantiating the connector
+                        # again and restart
+                        # Increase the recovery time if restarting is not
+                        # working (up to a limit)
+                        recovery_time = min(self.MAX_RECOVERY_TIME,
+                                            recovery_time + self.RECOVERY_TIME)
+                        sleep(recovery_time)
+                        break
+                    else:
+                        recovery_time = 0
+
+        self.logger.debug("Finishing")
+
+
+def is_pending_group(group):
+    return all(task['action'] != 'DELETE' or
+               task['status'] == 'SCHEDULED'
+               for task in group)
+
+
+def filter_pending_tasks(group):
+    return (t for t in group
+            if (t['status'] == 'SCHEDULED' or
+                t['action'] in ('CREATE', 'FIND')))
+
+
+def action_from(record, logger=None, mapping=ACTIONS):
+    """Create an Action object from a action record (dict)
+
+    Arguments:
+        mapping (dict): Nested data structure that maps the relationship
+            between action properties and object constructors.  This data
+            structure should be a dict with 2 levels of keys: item type and
+            action type. Example::
+                {'wan_link':
+                    {'CREATE': WanLinkCreate}
+                    ...}
+                ...}
+        record (dict): action information
+
+    Return:
+        (Action.Base): Object representing the action
+    """
+    ensure('item' in record, Invalid('`record` should contain "item"'))
+    ensure('action' in record, Invalid('`record` should contain "action"'))
+
+    try:
+        factory = mapping[record['item']][record['action']]
+        return factory(record, logger=logger)
+    except KeyError:
+        ex = UndefinedAction(record['item'], record['action'])
+        reraise(ex.__class__, ex, exc_info()[2])
diff --git a/osm_ro/wim/wimconn.py b/osm_ro/wim/wimconn.py
new file mode 100644 (file)
index 0000000..92b6db0
--- /dev/null
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+"""The WIM connector is responsible for establishing wide area network
+connectivity.
+
+It receives information from the WimThread/WAN Actions about the endpoints of
+a link that spans across multiple datacenters and stablish a path between them.
+"""
+import logging
+
+from ..http_tools.errors import HttpMappedError
+
+
+class WimConnectorError(HttpMappedError):
+    """Base Exception for all connector related errors"""
+
+
+class WimConnector(object):
+    """Abstract base class for all the WIM connectors
+
+    Arguments:
+        wim (dict): WIM record, as stored in the database
+        wim_account (dict): WIM account record, as stored in the database
+        config (dict): optional persistent information related to an specific
+            connector.  Inside this dict, a special key,
+            ``service_endpoint_mapping`` provides the internal endpoint
+            mapping.
+        logger (logging.Logger): optional logger object. If none is passed
+            ``openmano.wim.wimconn`` is used.
+
+    The arguments of the constructor are converted to object attributes.
+    An extra property, ``service_endpoint_mapping`` is created from ``config``.
+    """
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        self.logger = logger or logging.getLogger('openmano.wim.wimconn')
+
+        self.wim = wim
+        self.wim_account = wim_account
+        self.config = config or {}
+        self.service_endpoint_mapping = (
+            config.get('service_endpoint_mapping', []))
+
+    def check_credentials(self):
+        """Check if the connector itself can access the WIM.
+
+        Raises:
+            WimConnectorError: Issues regarding authorization, access to
+                external URLs, etc are detected.
+        """
+        raise NotImplementedError
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service established
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation/edition and subsequently stored in
+                the database.
+
+        Returns:
+            dict: JSON/YAML-serializable dict that contains a mandatory key
+                ``wim_status`` associated with one of the following values::
+
+                    {'wim_status': 'ACTIVE'}
+                        # The service is up and running.
+
+                    {'wim_status': 'INACTIVE'}
+                        # The service was created, but the connector
+                        # cannot determine yet if connectivity exists
+                        # (ideally, the caller needs to wait and check again).
+
+                    {'wim_status': 'DOWN'}
+                        # Connection was previously established,
+                        # but an error/failure was detected.
+
+                    {'wim_status': 'ERROR'}
+                        # An error occurred when trying to create the service/
+                        # establish the connectivity.
+
+                    {'wim_status': 'BUILD'}
+                        # Still trying to create the service, the caller
+                        # needs to wait and check again.
+
+                Additionally ``error_msg``(**str**) and ``wim_info``(**dict**)
+                keys can be used to provide additional status explanation or
+                new information available for the connectivity service.
+        """
+        raise NotImplementedError
+
+    def create_connectivity_service(self, service_type, connection_points,
+                                    **kwargs):
+        """Stablish WAN connectivity between the endpoints
+
+        Arguments:
+            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+                ``L3``.
+            connection_points (list): each point corresponds to
+                an entry point from the DC to the transport network. One
+                connection point serves to identify the specific access and
+                some other service parameters, such as encapsulation type.
+                Represented by a dict as follows::
+
+                    {
+                      "service_endpoint_id": ..., (str[uuid])
+                      "service_endpoint_encapsulation_type": ...,
+                           (enum: none, dot1q, ...)
+                      "service_endpoint_encapsulation_info": {
+                        ... (dict)
+                        "vlan": ..., (int, present if encapsulation is dot1q)
+                        "vni": ... (int, present if encapsulation is vxlan),
+                        "peers": [(ipv4_1), (ipv4_2)]
+                            (present if encapsulation is vxlan)
+                      }
+                    }
+
+              The service endpoint ID should be previously informed to the WIM
+              engine in the RO when the WIM port mapping is registered.
+
+        Keyword Arguments:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+
+        Other QoS might be passed as keyword arguments.
+
+        Returns:
+            tuple: ``(service_id, conn_info)`` containing:
+               - *service_uuid* (str): UUID of the established connectivity
+                  service
+               - *conn_info* (dict or None): Information to be stored at the
+                 database (or ``None``). This information will be provided to
+                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                 **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        This method should receive as arguments both the UUID and the
+        connection info dict (respectively), as returned by
+        :meth:`~.create_connectivity_service` and
+        :meth:`~.edit_connectivity_service`.
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service
+            conn_info (dict or None): Information returned by the connector
+                during the service creation and subsequently stored in the
+                database.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def edit_connectivity_service(self, service_uuid, conn_info=None,
+                                  connection_points=None, **kwargs):
+        """Change an existing connectivity service.
+
+        This method's arguments and return value follow the same convention as
+        :meth:`~.create_connectivity_service`.
+
+        Arguments:
+            service_uuid (str): UUID of the connectivity service.
+            conn_info (dict or None): Information previously stored in the
+                database.
+            connection_points (list): If provided, the old list of connection
+                points will be replaced.
+
+        Returns:
+            dict or None: Information to be updated and stored at the
+                database.
+                When ``None`` is returned, no information should be changed.
+                When an empty dict is returned, the database record will be
+                deleted.
+                **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links in a WIM.
+
+        This method is intended for debugging only, and should delete all the
+        connections controlled by the WIM, not only the WIM connections that
+        a specific RO is aware of.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM.
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        raise NotImplementedError
diff --git a/osm_ro/wim/wimconn_odl.py b/osm_ro/wim/wimconn_odl.py
new file mode 100644 (file)
index 0000000..2371046
--- /dev/null
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 University of Bristol - High Performance Networks Research
+# Group
+# All Rights Reserved.
+#
+# Contributors: Anderson Bravalheri, Dimitrios Gkounis, Abubakar Siddique
+# Muqaddas, Navdeep Uniyal, Reza Nejabati and Dimitra Simeonidou
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: <highperformance-networks@bristol.ac.uk>
+#
+# Neither the name of the University of Bristol nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# This work has been performed in the context of DCMS UK 5G Testbeds
+# & Trials Programme and in the framework of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 and 5G-PPP programmes.
+##
+from .wimconn import WimConnector
+
+
+# TODO: Basically create this file
+
+class OdlConnector(WimConnector):
+    def get_connectivity_service_status(self, link_uuid):
+        raise NotImplementedError
+
+    def create_connectivity_service(self, *args, **kwargs):
+        raise NotImplementedError
+
+    def delete_connectivity_service(self, link_uuid):
+        raise NotImplementedError
index 3b18494..3789992 100755 (executable)
@@ -246,10 +246,10 @@ then
         "#################################################################"
     [ "$_DISTRO" == "Ubuntu" ] && install_packages "python-yaml python-bottle python-mysqldb python-jsonschema "\
         "python-paramiko python-argcomplete python-requests python-logutils libxml2-dev libxslt-dev python-dev "\
-        "python-pip python-crypto"
+        "python-pip python-crypto python-networkx"
     [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "PyYAML MySQL-python python-jsonschema "\
         "python-paramiko python-argcomplete python-requests python-logutils libxslt-devel libxml2-devel python-devel "\
-        "python-pip python-crypto"
+        "python-pip python-crypto python-networkx"
     # The only way to install python-bottle on Centos7 is with easy_install or pip
     [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && easy_install -U bottle
 
index ddce5cd..d5cbe79 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,7 @@ _maintainer_email = 'gerardo.garciadeblas@telefonica.com'
 _license = 'Apache 2.0'
 _url = 'https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary'
 _requirements = [
+    "six",  # python 2 x 3 compatibility
     "PyYAML",
     "bottle",
     #"mysqlclient",
index ea4c8f7..53abd48 100644 (file)
--- a/stdeb.cfg
+++ b/stdeb.cfg
@@ -2,5 +2,5 @@
 Suite: xenial
 XS-Python-Version: >= 2.7
 Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
-Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im
+Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx
 
index 4d814e6..8f2a550 100755 (executable)
@@ -2287,6 +2287,72 @@ def test_vim(args):
     return executed, failed
 
 
+def test_wim(args):
+    global test_config
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+    import openmanoclient
+    executed = 0
+    failed = 0
+    test_config["client"] = openmanoclient.openmanoclient(
+        endpoint_url=args.endpoint_url,
+        tenant_name=args.tenant_name,
+        datacenter_name=args.datacenter,
+        debug=args.debug, logger=test_config["logger_name"])
+    clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+    # If only want to obtain a tests list print it and exit
+    if args.list_tests:
+        tests_names = []
+        for cls in clsmembers:
+            if cls[0].startswith('test_WIM'):
+                tests_names.append(cls[0])
+
+        msg = "The 'wim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+              "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+              "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+        print(msg)
+        logger.info(msg)
+        sys.exit(0)
+
+    # Create the list of tests to be run
+    code_based_tests = []
+    if args.tests:
+        for test in args.tests:
+            for t in test.split(','):
+                matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                if len(matches_code_based_tests) > 0:
+                    code_based_tests.append(matches_code_based_tests[0][1])
+                else:
+                    logger.critical("Test '{}' is not among the possible ones".format(t))
+                    sys.exit(1)
+    if not code_based_tests:
+        # include all tests
+        for cls in clsmembers:
+            # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+            if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+                code_based_tests.append(cls[1])
+
+    logger.debug("tests to be executed: {}".format(code_based_tests))
+
+    # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+    # This is handled in the tests using logging.
+    stream = open('/dev/null', 'w')
+
+    # Run code based tests
+    basic_tests_suite = unittest.TestSuite()
+    for test in code_based_tests:
+        basic_tests_suite.addTest(unittest.makeSuite(test))
+    result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+    executed += result.testsRun
+    failed += len(result.failures) + len(result.errors)
+    if failfast and failed:
+        sys.exit(1)
+    if len(result.failures) > 0:
+        logger.debug("failures : {}".format(result.failures))
+    if len(result.errors) > 0:
+        logger.debug("errors : {}".format(result.errors))
+    return executed, failed
+
+
 def test_deploy(args):
     global test_config
     sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
@@ -2427,6 +2493,19 @@ if __name__=="__main__":
     vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
 
+    # WIM test set
+    # -------------------
+    vimconn_parser = subparsers.add_parser('wim', parents=[parent_parser], help="test wim")
+    vimconn_parser.set_defaults(func=test_wim)
+
+    # Mandatory arguments
+    mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+    mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+
+    # Optional arguments
+    vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+
     argcomplete.autocomplete(parser)
     args = parser.parse_args()
     # print str(args)
index 8e13d67..6bdd67c 100755 (executable)
@@ -230,7 +230,80 @@ if __name__=="__main__":
         to_delete_list.insert(0,{"item": "datacenter-detach", "function": client.detach_datacenter, "params":{"name": test_datacenter} })
 
         client["datacenter_name"] = test_datacenter
-        
+
+        # WIMs
+        print("  {}. TEST create_wim".format(test_number))
+        test_number += 1
+        long_name = _get_random_name(60)
+
+        wim = client.create_wim(name=long_name, wim_url="http://fakeurl/fake")
+        if verbose: print(wim)
+
+        print("  {}. TEST list_wims".format(test_number))
+        test_number += 1
+        wims = client.list_wims(all_tenants=True)
+        if verbose: print(wims)
+
+        print("  {}. TEST list_tenans filter by name".format(test_number))
+        test_number += 1
+        wims_ = client.list_wims(all_tenants=True, name=long_name)
+        if not wims_["wims"]:
+            raise Exception("Text error, no TENANT found with name")
+        if verbose: print(wims_)
+
+        print("  {}. TEST get_wim by UUID".format(test_number))
+        test_number += 1
+        wim = client.get_wim(uuid=wims_["wims"][0]["uuid"], all_tenants=True)
+        if verbose: print(wim)
+
+        print("  {}. TEST delete_wim by name".format(test_number))
+        test_number += 1
+        wim = client.delete_wim(name=long_name)
+        if verbose: print(wim)
+
+        print("  {}. TEST create_wim for remaining tests".format(test_number))
+        test_number += 1
+        test_wim = "test-wim " + \
+                          ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        wim = client.create_wim(name=test_wim, vim_url="http://127.0.0.1:9080/odl")
+        if verbose: print(wim)
+        to_delete_list.insert(0,
+                              {
+                                    "item": "wim", "function": client.delete_wim,
+                                    "params":
+                                        {
+                                                "name": test_wim
+                                        }
+                                })
+
+        test_wim_tenant = "test-wimtenant " + \
+                           ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+
+        # print("  {}. TEST datacenter new tenenat".format(test_number))
+        # test_number += 1
+        # test_vim_tenant = "test-vimtenant " + \
+        #                   ''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(40))
+        # vim_tenant = client.vim_action("create", "tenants", datacenter_name=test_datacenter, all_tenants=True,
+        #                                name=test_vim_tenant)
+        # if verbose: print(vim_tenant)
+        # client["datacenter_name"] = test_datacenter
+        # to_delete_list.insert(0, {"item": "vim_tenant",
+        #                           "function": client.vim_action,
+        #                           "params": {
+        #                               "action": "delete",
+        #                               "item": "tenants",
+        #                               "datacenter_name": test_datacenter,
+        #                               "all_tenants": True,
+        #                               "uuid": vim_tenant["tenant"]["id"]
+        #                           }
+        #                           })
+
+        print("  {}. TEST wim attach".format(test_number))
+        test_number += 1
+        wim = client.attach_wim(name=test_wim, wim_tenant_name=test_wim_tenant)
+        if verbose: print(wim)
+        to_delete_list.insert(0, {"item": "wim-detach", "function": client.detach_wim,
+                                  "params": {"name": test_wim}})
     
     #VIM_ACTIONS
     print("  {}. TEST create_VIM_tenant".format(test_number))