Merge branch 'feature7106' 57/7457/1
authortierno <alfonso.tiernosepulveda@telefonica.com>
Mon, 13 May 2019 10:18:28 +0000 (10:18 +0000)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Mon, 13 May 2019 10:18:42 +0000 (10:18 +0000)
Change-Id: I1670f7013c63c7a5e5c6855fe3ea168423f86834
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
22 files changed:
Dockerfile
database_utils/migrate_mano_db.sh
devops-stages/stage-test.sh
docker/Dockerfile-local
openmanod
osm_ro/db_base.py
osm_ro/nfvo.py
osm_ro/nfvo_db.py
osm_ro/openmano_schemas.py
osm_ro/vim_thread.py
osm_ro/vimconn_openstack.py
osm_ro/vimconn_vmware.py
osm_ro/wim/engine.py
osm_ro/wim/persistence.py
osm_ro/wim/tests/fixtures.py
osm_ro/wim/wim_thread.py
osm_ro/wim/wimconn_dynpac.py
osm_ro/wim/wimconn_ietfl2vpn.py [new file with mode: 0644]
requirements.txt
scripts/install-openmano.sh
stdeb.cfg
tox.ini

index 68a6890..10efe45 100644 (file)
@@ -21,7 +21,7 @@ FROM ubuntu:16.04
 
 RUN  apt-get update && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install git make python python-pip debhelper python3 python3-all python3-pip python3-setuptools && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox && \
+  DEBIAN_FRONTEND=noninteractive apt-get -y install wget tox apt-utils flake8 python-nose python-mock && \
   DEBIAN_FRONTEND=noninteractive pip install pip==9.0.3 && \
   DEBIAN_FRONTEND=noninteractive pip3 install pip==9.0.3 && \
   DEBIAN_FRONTEND=noninteractive pip install -U setuptools setuptools-version-command stdeb && \
index b6867ff..b587612 100755 (executable)
@@ -36,7 +36,7 @@ QUIET_MODE=""
 BACKUP_DIR=""
 BACKUP_FILE=""
 #TODO update it with the last database version
-LAST_DB_VERSION=37
+LAST_DB_VERSION=38
 
 # Detect paths
 MYSQL=$(which mysql)
@@ -195,6 +195,7 @@ fi
 #[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35  #0.6.01 =>  35
 #[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36  #0.6.03 =>  36
 #[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37  #0.6.09 =>  37
+#[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38  #0.6.11 =>  38
 #TODO ... put next versions here
 
 function upgrade_to_1(){
@@ -1362,6 +1363,49 @@ function downgrade_from_37(){
     # It doesn't make sense to reverse to a bug state.
     sql "DELETE FROM schema_version WHERE version_int='37';"
 }
+function upgrade_to_38(){
+    echo "      Change vim_wim_actions, add worker, related"
+    sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
+           "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+       sql "UPDATE vim_wim_actions set related=item_id;"
+       echo "      Change DONE to FINISHED when DELETE has been completed"
+       sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
+           "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
+        "SET v1.status='FINISHED', v2.status='FINISHED';"
+    echo "      Add osm_id to instance_nets"
+    sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+    echo "      Add related to instance_xxxx"
+    for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
+        instance_vms
+    do
+        sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
+       sql "UPDATE $table set related=uuid;"
+    done
+    sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
+       sql "UPDATE instance_wim_nets set related=uuid;"
+
+    sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+        "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
+
+}
+function downgrade_from_38(){
+    echo "      Change vim_wim_actions, delete worker, related"
+       sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
+    sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+    echo "      Remove related from instance_xxxx"
+    for table in instance_classifications instance_nets instance_wim_netsinstance_sfis instance_sfps instance_sfs \
+        instance_vms
+    do
+        sql "ALTER TABLE $table DROP COLUMN related;"
+    done
+    echo "      Remove osm_id from instance_nets"
+    sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
+    sql "DELETE FROM schema_version WHERE version_int='38';"
+}
 
 #TODO ... put functions here
 
index 49296c7..cb72fb1 100755 (executable)
@@ -1,2 +1,18 @@
 #!/bin/sh
-echo "UNITTEST"
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+flake8 osm_ro/wim osm_ro/vim_thread.py --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
+
index 86bc994..9897c4d 100644 (file)
@@ -14,6 +14,7 @@ RUN apt-get update && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-cffi libmysqlclient-dev libssl-dev libffi-dev python-mysqldb && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-openstacksdk python-openstackclient && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install python-networkx && \
+    DEBIAN_FRONTEND=noninteractive apt-get -y install genisoimage && \
     DEBIAN_FRONTEND=noninteractive pip2 install untangle && \
     DEBIAN_FRONTEND=noninteractive pip2 install -e git+https://github.com/python-oca/python-oca#egg=oca && \
     DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-client
index 6e46990..d16c8dd 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -41,6 +41,9 @@ from jsonschema import validate as js_v, exceptions as js_e
 import logging
 import logging.handlers as log_handlers
 import socket
+
+from yaml import MarkedYAMLError
+
 from osm_ro import httpserver, nfvo, nfvo_db
 from osm_ro.openmano_schemas import config_schema
 from osm_ro.db_base import db_base_Exception
@@ -50,9 +53,9 @@ import osm_ro
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.6.10"
-version_date = "Mar 2019"
-database_version = 37      # expected database schema version
+__version__ = "0.6.12"
+version_date = "Apr 2019"
+database_version = 38      # expected database schema version
 
 global global_config
 global logger
@@ -72,33 +75,33 @@ def load_configuration(configuration_file):
                       'auto_push_VNF_to_VIMs': True,
                       'db_host': 'localhost',
                       'db_ovim_host': 'localhost'
-    }
+                      }
     try:
-        #Check config file exists
+        # Check config file exists
         with open(configuration_file, 'r') as f:
             config_str = f.read()
-        #Parse configuration file
+        # Parse configuration file
         config = yaml.load(config_str)
-        #Validate configuration file with the config_schema
+        # Validate configuration file with the config_schema
         js_v(config, config_schema)
 
-        #Add default values tokens
-        for k,v in default_tokens.items():
+        # Add default values tokens
+        for k, v in default_tokens.items():
             if k not in config:
-                config[k]=v
+                config[k] = v
         return config
 
     except yaml.YAMLError as e:
         error_pos = ""
-        if hasattr(e, 'problem_mark'):
+        if isinstance(e, MarkedYAMLError):
             mark = e.problem_mark
-            error_pos = " at line:{} column:{}".format(mark.line+1, mark.column+1)
+            error_pos = " at line:{} column:{}".format(mark.line + 1, mark.column + 1)
         raise LoadConfigurationException("Bad YAML format at configuration file '{file}'{pos}: {message}".format(
             file=configuration_file, pos=error_pos, message=e))
     except js_e.ValidationError as e:
         error_pos = ""
         if e.path:
-            error_pos=" at '" + ":".join(map(str, e.path))+"'"
+            error_pos = " at '" + ":".join(map(str, e.path)) + "'"
         raise LoadConfigurationException("Invalid field at configuration file '{file}'{pos} {message}".format(
             file=configuration_file, pos=error_pos, message=e))
     except Exception as e:
@@ -107,20 +110,18 @@ def load_configuration(configuration_file):
 
 
 def console_port_iterator():
-    '''this iterator deals with the http_console_ports
+    """
+    this iterator deals with the http_console_ports
     returning the ports one by one
-    '''
+    """
     index = 0
     while index < len(global_config["http_console_ports"]):
         port = global_config["http_console_ports"][index]
-        #print("ports -> ", port)
         if type(port) is int:
             yield port
-        else: #this is dictionary with from to keys
+        else:  # this is dictionary with from to keys
             port2 = port["from"]
-            #print("ports -> ", port, port2)
             while port2 <= port["to"]:
-                #print("ports -> ", port, port2)
                 yield port2
                 port2 += 1
         index += 1
@@ -131,13 +132,15 @@ def usage():
     print("      -v|--version: prints current version")
     print("      -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)")
     print("      -h|--help: shows this help")
-    print("      -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
-    print("      -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
-    # print( "      -V|--vnf-repository: changes the path of the vnf-repository and overrides the path in the configuration file")
+    print(
+        "      -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)")
+    print(
+        "      -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)")
     print("      --log-socket-host HOST: send logs to this host")
     print("      --log-socket-port PORT: send logs using this port (default: 9022)")
     print("      --log-file FILE: send logs to this file")
-    print("      --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
+    print(
+        "      --create-tenant NAME: Try to creates this tenant name before starting, ignoring any errors as e.g. conflict")
     return
 
 
@@ -146,7 +149,6 @@ def set_logging_file(log_file):
         file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=100e6, backupCount=9, delay=0)
         file_handler.setFormatter(log_formatter_simple)
         logger.addHandler(file_handler)
-        # logger.debug("moving logs to '%s'", global_config["log_file"])
         # remove initial stream handler
         logging.root.removeHandler(logging.root.handlers[0])
         print ("logging on '{}'".format(log_file))
@@ -155,34 +157,28 @@ def set_logging_file(log_file):
             "Cannot open logging file '{}': {}. Check folder exist and permissions".format(log_file, e))
 
 
-if __name__=="__main__":
-    # env2config contains envioron variable names and the correspondence with configuration file openmanod.cfg keys.
+if __name__ == "__main__":
+    # env2config contains environ variable names and the correspondence with configuration file openmanod.cfg keys.
     # If this environ is defined, this value is taken instead of the one at at configuration file
     env2config = {
         'RO_DB_HOST': 'db_host',
         'RO_DB_NAME': 'db_name',
         'RO_DB_USER': 'db_user',
         'RO_DB_PASSWORD': 'db_passwd',
-        # 'RO_DB_PORT': 'db_port',
         'RO_DB_OVIM_HOST': 'db_ovim_host',
         'RO_DB_OVIM_NAME': 'db_ovim_name',
         'RO_DB_OVIM_USER': 'db_ovim_user',
         'RO_DB_OVIM_PASSWORD': 'db_ovim_passwd',
-        # 'RO_DB_OVIM_PORT': 'db_ovim_port',
         'RO_LOG_LEVEL': 'log_level',
         'RO_LOG_FILE': 'log_file',
     }
     # Configure logging step 1
     hostname = socket.gethostname()
-    # streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
-    # "%(asctime)s %(name)s %(levelname)s %(filename)s:%(lineno)d %(funcName)s %(process)d: %(message)s"
-    log_formatter_complete = logging.Formatter('%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s '
-                                               'severity:%(levelname)s logger:%(name)s log:%(message)s'.format(
-                                                    host=hostname),
-                                               datefmt='%Y-%m-%dT%H:%M:%S')
-    log_format_simple =  "%(asctime)s %(levelname)s  %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
+    log_formatter_str = '%(asctime)s.%(msecs)03d00Z[{host}@openmanod] %(filename)s:%(lineno)s severity:%(levelname)s logger:%(name)s log:%(message)s'
+    log_formatter_complete = logging.Formatter(log_formatter_str.format(host=hostname), datefmt='%Y-%m-%dT%H:%M:%S')
+    log_format_simple = "%(asctime)s %(levelname)s  %(name)s %(thread)d %(filename)s:%(lineno)s %(message)s"
     log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
-    logging.basicConfig(format=log_format_simple, level= logging.DEBUG)
+    logging.basicConfig(format=log_format_simple, level=logging.DEBUG)
     logger = logging.getLogger('openmano')
     logger.setLevel(logging.DEBUG)
     socket_handler = None
@@ -193,7 +189,7 @@ if __name__=="__main__":
         opts, args = getopt.getopt(sys.argv[1:], "hvc:V:p:P:",
                                    ["config=", "help", "version", "port=", "vnf-repository=", "adminport=",
                                     "log-socket-host=", "log-socket-port=", "log-file=", "create-tenant="])
-        port=None
+        port = None
         port_admin = None
         config_file = 'osm_ro/openmanod.cfg'
         vnf_repository = None
@@ -233,7 +229,6 @@ if __name__=="__main__":
         global_config = load_configuration(config_file)
         global_config["version"] = __version__
         global_config["version_date"] = version_date
-        #print global_config
         # Override parameters obtained by command line on ENV
         if port:
             global_config['http_port'] = port
@@ -250,52 +245,37 @@ if __name__=="__main__":
                 if not env_k.startswith("RO_") or env_k not in env2config or not env_v:
                     continue
                 global_config[env2config[env_k]] = env_v
-                if env_k.endswith("PORT"):    # convert to int, skip if not possible
+                if env_k.endswith("PORT"):  # convert to int, skip if not possible
                     global_config[env2config[env_k]] = int(env_v)
             except Exception as e:
                 logger.warn("skipping environ '{}={}' because exception '{}'".format(env_k, env_v, e))
 
-#         if vnf_repository is not None:
-#             global_config['vnf_repository'] = vnf_repository
-#         else:
-#             if not 'vnf_repository' in global_config:
-#                 logger.error( os.getcwd() )
-#                 global_config['vnf_repository'] = os.getcwd()+'/vnfrepo'
-#         #print global_config
-#         if not os.path.exists(global_config['vnf_repository']):
-#             logger.error( "Creating folder vnf_repository folder: '%s'.", global_config['vnf_repository'])
-#             try:
-#                 os.makedirs(global_config['vnf_repository'])
-#             except Exception as e:
-#                 logger.error( "Error '%s'. Ensure the path 'vnf_repository' is properly set at %s",e.args[1], config_file)
-#                 exit(-1)
-
         global_config["console_port_iterator"] = console_port_iterator
-        global_config["console_thread"]={}
-        global_config["console_ports"]={}
+        global_config["console_thread"] = {}
+        global_config["console_ports"] = {}
         if not global_config["http_console_host"]:
             global_config["http_console_host"] = global_config["http_host"]
-            if global_config["http_host"]=="0.0.0.0":
+            if global_config["http_host"] == "0.0.0.0":
                 global_config["http_console_host"] = socket.gethostname()
 
         # Configure logging STEP 2
         if "log_host" in global_config:
-            socket_handler= log_handlers.SocketHandler(global_config["log_socket_host"], global_config["log_socket_port"])
+            socket_handler = log_handlers.SocketHandler(global_config["log_socket_host"],
+                                                        global_config["log_socket_port"])
             socket_handler.setFormatter(log_formatter_complete)
-            if global_config.get("log_socket_level") and global_config["log_socket_level"] != global_config["log_level"]:
+            if global_config.get("log_socket_level") \
+                    and global_config["log_socket_level"] != global_config["log_level"]:
                 socket_handler.setLevel(global_config["log_socket_level"])
             logger.addHandler(socket_handler)
 
-        # logger.addHandler(log_handlers.SysLogHandler())
         if log_file:
             global_config['log_file'] = log_file
         elif global_config.get('log_file'):
             set_logging_file(global_config['log_file'])
 
-        # logging.basicConfig(level = getattr(logging, global_config.get('log_level',"debug")))
         logger.setLevel(getattr(logging, global_config['log_level']))
         logger.critical("Starting openmano server version: '%s %s' command: '%s'",
-                         __version__, version_date, " ".join(sys.argv))
+                        __version__, version_date, " ".join(sys.argv))
 
         for log_module in ("nfvo", "http", "vim", "wim", "db", "console", "ovim"):
             log_level_module = "log_level_" + log_module
@@ -312,14 +292,13 @@ if __name__=="__main__":
                 except IOError as e:
                     raise LoadConfigurationException(
                         "Cannot open logging file '{}': {}. Check folder exist and permissions".format(
-                            global_config[log_file_module], str(e)) )
-            global_config["logger_"+log_module] = logger_module
-        #httpserver.logger = global_config["logger_http"]
-        #nfvo.logger = global_config["logger_nfvo"]
+                            global_config[log_file_module], str(e)))
+            global_config["logger_" + log_module] = logger_module
 
         # Initialize DB connection
-        mydb = nfvo_db.nfvo_db();
-        mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+        mydb = nfvo_db.nfvo_db()
+        mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'],
+                     global_config['db_name'])
         db_path = osm_ro.__path__[0] + "/database_utils"
         if not os_path.exists(db_path + "/migrate_mano_db.sh"):
             db_path = osm_ro.__path__[0] + "/../database_utils"
@@ -327,22 +306,23 @@ if __name__=="__main__":
             r = mydb.get_db_version()
             if r[0] != database_version:
                 logger.critical("DATABASE wrong version '{current}'. Try to upgrade/downgrade to version '{target}'"
-                                " with '{db_path}/migrate_mano_db.sh {target}'".format(
-                                current=r[0], target=database_version,  db_path=db_path))
+                                " with '{db_path}/migrate_mano_db.sh {target}'".format(current=r[0],
+                                                                                       target=database_version,
+                                                                                       db_path=db_path))
                 exit(-1)
         except db_base_Exception as e:
             logger.critical("DATABASE is not valid. If you think it is corrupted, you can init it with"
                             " '{db_path}/init_mano_db.sh' script".format(db_path=db_path))
             exit(-1)
 
-        nfvo.global_config=global_config
+        nfvo.global_config = global_config
         if create_tenant:
             try:
                 nfvo.new_tenant(mydb, {"name": create_tenant})
             except Exception as e:
                 if isinstance(e, nfvo.NfvoException) and e.http_code == 409:
                     pass  # if tenant exist (NfvoException error 409), ignore
-                else:     # otherwise print and error and continue
+                else:  # otherwise print and error and continue
                     logger.error("Cannot create tenant '{}': {}".format(create_tenant, e))
 
         # WIM module
@@ -359,7 +339,8 @@ if __name__=="__main__":
 
         httpthread.start()
         if 'http_admin_port' in global_config:
-            httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'], global_config['http_admin_port'])
+            httpthreadadmin = httpserver.httpserver(mydb, True, global_config['http_host'],
+                                                    global_config['http_admin_port'])
             httpthreadadmin.start()
         time.sleep(1)
         logger.info('Waiting for http clients')
@@ -369,10 +350,10 @@ if __name__=="__main__":
         time.sleep(20)
         sys.stdout.flush()
 
-        #TODO: Interactive console must be implemented here instead of join or sleep
+        # TODO: Interactive console must be implemented here instead of join or sleep
 
-        #httpthread.join()
-        #if 'http_admin_port' in global_config:
+        # httpthread.join()
+        # if 'http_admin_port' in global_config:
         #    httpthreadadmin.join()
         while True:
             time.sleep(86400)
@@ -382,8 +363,7 @@ if __name__=="__main__":
     except SystemExit:
         pass
     except getopt.GetoptError as e:
-        logger.critical(str(e)) # will print something like "option -a not recognized"
-        #usage()
+        logger.critical(str(e))  # will print something like "option -a not recognized"
         exit(-1)
     except LoadConfigurationException as e:
         logger.critical(str(e))
@@ -397,4 +377,3 @@ if __name__=="__main__":
     nfvo.stop_service()
     if httpthread:
         httpthread.join(1)
-
index e946f00..e6e1134 100644 (file)
@@ -562,7 +562,7 @@ class db_base():
             INSERT: dictionary with the key:value to insert
             table: table where to insert
             add_uuid: if True, it will create an uuid key entry at INSERT if not provided
-            created_time: time to add to the created_time column
+            created_time: time to add to the created_at column
         It checks presence of uuid and add one automatically otherwise
         Return: uuid
         '''
@@ -591,7 +591,7 @@ class db_base():
         cmd= "INSERT INTO " + table +" SET " + \
             ",".join(map(self.__tuple2db_format_set, INSERT.iteritems() ))
         if created_time:
-            cmd += ",created_at=%f" % created_time
+            cmd += ",created_at={time:.9f},modified_at={time:.9f}".format(time=created_time)
         if confidential_data:
             index = cmd.find("SET")
             subcmd = cmd[:index] + 'SET...'
@@ -627,7 +627,7 @@ class db_base():
 
     @retry
     @with_transaction
-    def update_rows(self, table, UPDATE, WHERE, modified_time=0, attempt=_ATTEMPT):
+    def update_rows(self, table, UPDATE, WHERE, modified_time=None, attempt=_ATTEMPT):
         """ Update one or several rows of a table.
         :param UPDATE: dictionary with the changes. dict keys are database columns that will be set with the dict values
         :param table: database table to update
@@ -638,11 +638,12 @@ class db_base():
                 keys can be suffixed by >,<,<>,>=,<= so that this is used to compare key and value instead of "="
                 The special keys "OR", "AND" with a dict value is used to create a nested WHERE
             If a list, each item will be a dictionary that will be concatenated with OR
-        :param modified_time: Can contain the time to be set to the table row
+        :param modified_time: Can contain the time to be set to the table row.
+            None to set automatically, 0 to do not modify it
         :return: the number of updated rows, raises exception upon error
         """
-        if table in self.tables_with_created_field and modified_time==0:
-            modified_time=time.time()
+        if table in self.tables_with_created_field and modified_time is None:
+            modified_time = time.time()
 
         return self._update_rows(table, UPDATE, WHERE, modified_time)
 
index 1d55a3c..414f365 100644 (file)
@@ -126,11 +126,12 @@ def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id
     if name not in vim_threads["names"]:
         vim_threads["names"].append(name)
         return name
-    name = datacenter_name[:16] + "." + tenant_name[:16]
-    if name not in vim_threads["names"]:
-        vim_threads["names"].append(name)
-        return name
-    name = datacenter_id + "-" + tenant_id
+    if tenant_name:
+        name = datacenter_name[:16] + "." + tenant_name[:16]
+        if name not in vim_threads["names"]:
+            vim_threads["names"].append(name)
+            return name
+    name = datacenter_id
     vim_threads["names"].append(name)
     return name
 
@@ -237,7 +238,7 @@ def start_service(mydb, persistence=None, wim=None):
             except Exception as e:
                 raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
                                     httperrors.Internal_Server_Error)
-            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'],
+            thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
                                                 vim['vim_tenant_id'])
             new_thread = vim_thread.vim_thread(task_lock, thread_name, vim['datacenter_name'],
                                                vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
@@ -305,6 +306,9 @@ def clean_db(mydb):
         nb_deleted += len(actions_to_delete)
         if len(actions_to_delete) < 100:
             break
+    # clean locks
+    mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+
     if nb_deleted:
         logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
 
@@ -3248,8 +3252,16 @@ def create_instance(mydb, tenant_id, instance_dict):
             # <-- WIM
 
             descriptor_net = {}
-            if instance_dict.get("networks") and instance_dict["networks"].get(sce_net["name"]):
-                descriptor_net = instance_dict["networks"][sce_net["name"]]
+            if instance_dict.get("networks"):
+                if sce_net.get("uuid") in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+                    descriptor_net_name = sce_net["uuid"]
+                elif sce_net.get("osm_id") in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+                    descriptor_net_name = sce_net["osm_id"]
+                elif sce_net["name"] in instance_dict["networks"]:
+                    descriptor_net = instance_dict["networks"][sce_net["name"]]
+                    descriptor_net_name = sce_net["name"]
             net_name = descriptor_net.get("vim-network-name")
             # add datacenters from instantiation parameters
             if descriptor_net.get("sites"):
@@ -3259,6 +3271,22 @@ def create_instance(mydb, tenant_id, instance_dict):
             sce_net2instance[sce_net_uuid] = {}
             net2task_id['scenario'][sce_net_uuid] = {}
 
+            use_network = None
+            related_network = None
+            if descriptor_net.get("use-network"):
+                target_instance_nets = mydb.get_rows(
+                    SELECT="related",
+                    FROM="instance_nets",
+                    WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+                           "osm_id":  descriptor_net["use-network"]["osm_id"]},
+                )
+                if not target_instance_nets:
+                    raise NfvoException(
+                        "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
+                        httperrors.Bad_Request)
+                else:
+                    use_network = target_instance_nets[0]["related"]
+
             if sce_net["external"]:
                 number_mgmt_networks += 1
 
@@ -3346,8 +3374,12 @@ def create_instance(mydb, tenant_id, instance_dict):
                 net_uuid = str(uuid4())
                 uuid_list.append(net_uuid)
                 sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+                if not related_network:   # all db_instance_nets will have same related
+                    related_network = use_network or net_uuid
                 db_net = {
                     "uuid": net_uuid,
+                    "osm_id": sce_net.get("osm_id") or sce_net["name"],
+                    "related": related_network,
                     'vim_net_id': None,
                     "vim_name": net_vim_name,
                     "instance_scenario_id": instance_uuid,
@@ -3366,6 +3398,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     "action": task_action,
                     "item": "instance_nets",
                     "item_id": net_uuid,
+                    "related": related_network,
                     "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
                 }
                 net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
@@ -3446,6 +3479,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         uuid_list.append(sfi_uuid)
                         db_sfi = {
                             "uuid": sfi_uuid,
+                            "related": sfi_uuid,
                             "instance_scenario_id": instance_uuid,
                             'sce_rsp_hop_id': cp['uuid'],
                             'datacenter_id': datacenter_id,
@@ -3461,6 +3495,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                             "status": "SCHEDULED",
                             "item": "instance_sfis",
                             "item_id": sfi_uuid,
+                            "related": sfi_uuid,
                             "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
                                                     default_flow_style=True, width=256)
                         }
@@ -3472,6 +3507,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     uuid_list.append(sf_uuid)
                     db_sf = {
                         "uuid": sf_uuid,
+                        "related": sf_uuid,
                         "instance_scenario_id": instance_uuid,
                         'sce_rsp_hop_id': cp['uuid'],
                         'datacenter_id': datacenter_id,
@@ -3487,6 +3523,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         "status": "SCHEDULED",
                         "item": "instance_sfs",
                         "item_id": sf_uuid,
+                        "related": sf_uuid,
                         "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
                                                 default_flow_style=True, width=256)
                     }
@@ -3517,6 +3554,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         uuid_list.append(classification_uuid)
                         db_classification = {
                             "uuid": classification_uuid,
+                            "related": classification_uuid,
                             "instance_scenario_id": instance_uuid,
                             'sce_classifier_match_id': match['uuid'],
                             'datacenter_id': datacenter_id,
@@ -3539,6 +3577,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                             "status": "SCHEDULED",
                             "item": "instance_classifications",
                             "item_id": classification_uuid,
+                            "related": classification_uuid,
                             "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
                                                     default_flow_style=True, width=256)
                         }
@@ -3551,6 +3590,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                 uuid_list.append(sfp_uuid)
                 db_sfp = {
                     "uuid": sfp_uuid,
+                    "related": sfp_uuid,
                     "instance_scenario_id": instance_uuid,
                     'sce_rsp_id': rsp['uuid'],
                     'datacenter_id': datacenter_id,
@@ -3566,6 +3606,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                     "status": "SCHEDULED",
                     "item": "instance_sfps",
                     "item_id": sfp_uuid,
+                    "related": sfp_uuid,
                     "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
                                             default_flow_style=True, width=256)
                 }
@@ -3681,6 +3722,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
         vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
         db_net = {
             "uuid": net_uuid,
+            "related": net_uuid,
             'vim_net_id': None,
             "vim_name": net_name,
             "instance_scenario_id": instance_uuid,
@@ -3711,6 +3753,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
             "action": task_action,
             "item": "instance_nets",
             "item_id": net_uuid,
+            "related": net_uuid,
             "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -3950,6 +3993,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
             uuid_list.append(vm_uuid)
             db_vm = {
                 "uuid": vm_uuid,
+                "related": vm_uuid,
                 'instance_vnf_id': vnf_uuid,
                 # TODO delete "vim_vm_id": vm_id,
                 "vm_id": vm["uuid"],
@@ -3989,6 +4033,7 @@ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
                 "status": "SCHEDULED",
                 "item": "instance_vms",
                 "item_id": vm_uuid,
+                "related": vm_uuid,
                 "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
                                         default_flow_style=True, width=256)
             }
@@ -4066,6 +4111,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             "status": "SCHEDULED",
             "item": "instance_sfps",
             "item_id": sfp["uuid"],
+            "related": sfp["related"],
             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -4106,6 +4152,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             "status": "SCHEDULED",
             "item": "instance_classifications",
             "item_id": classification["uuid"],
+            "related": classification["related"],
             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -4144,6 +4191,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             "status": "SCHEDULED",
             "item": "instance_sfs",
             "item_id": sf["uuid"],
+            "related": sf["related"],
             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -4182,6 +4230,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             "status": "SCHEDULED",
             "item": "instance_sfis",
             "item_id": sfi["uuid"],
+            "related": sfi["related"],
             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -4223,6 +4272,7 @@ def delete_instance(mydb, tenant_id, instance_id):
                 "status": "SCHEDULED",
                 "item": "instance_vms",
                 "item_id": vm["uuid"],
+                "related": vm["related"],
                 "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
                                         default_flow_style=True, width=256)
             }
@@ -4277,6 +4327,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             "status": "SCHEDULED",
             "item": "instance_nets",
             "item_id": net["uuid"],
+            "related": net["related"],
             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
         }
         task_index += 1
@@ -4576,6 +4627,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                         "status": "SCHEDULED",
                         "item": "instance_vms",
                         "item_id": vdu_id,
+                        "related": vm["related"],
                         "extra": yaml.safe_dump({"params": vm_interfaces},
                                                 default_flow_style=True, width=256)
                     }
@@ -4666,6 +4718,7 @@ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
                         "status": "SCHEDULED",
                         "item": "instance_vms",
                         "item_id": vm_uuid,
+                        "related": target_vm["related"],
                         # ALF
                         # ALF
                         # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
index 86144de..eb72b13 100644 (file)
@@ -964,7 +964,7 @@ class nfvo_db(db_base.db_base):
 
             # instance vms
             cmd = "SELECT iv.uuid as uuid, vim_vm_id, status, error_msg, vim_info, iv.created_at as "\
-                    "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid"\
+                    "created_at, name, vms.osm_id as vdu_osm_id, vim_name, vms.uuid as vm_uuid, related"\
                     " FROM instance_vms as iv join vms on iv.vm_id=vms.uuid "\
                     " WHERE instance_vnf_id='{}' ORDER BY iv.created_at".format(vnf['uuid'])
             self.logger.debug(cmd)
@@ -1002,7 +1002,7 @@ class nfvo_db(db_base.db_base):
         #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
         cmd = "SELECT inets.uuid as uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, " \
                 "net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id, " \
-                "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name " \
+                "snets.osm_id as ns_net_osm_id, nets.osm_id as vnf_net_osm_id, inets.vim_name, related " \
                 "FROM instance_nets as inets left join sce_nets as snets on inets.sce_net_id=snets.uuid " \
                 "left join nets on inets.net_id=nets.uuid " \
                 "WHERE instance_scenario_id='{}' ORDER BY inets.created_at".format(instance_dict['uuid'])
@@ -1012,7 +1012,7 @@ class nfvo_db(db_base.db_base):
 
         #instance_sfps
         cmd = "SELECT uuid,vim_sfp_id,sce_rsp_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
                 " FROM instance_sfps" \
                 " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
         self.logger.debug(cmd)
@@ -1022,7 +1022,7 @@ class nfvo_db(db_base.db_base):
         # for sfp in instance_dict['sfps']:
         #instance_sfs
         cmd = "SELECT uuid,vim_sf_id,sce_rsp_hop_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
                 " FROM instance_sfs" \
                 " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sfp_id
         self.logger.debug(cmd)
@@ -1032,7 +1032,7 @@ class nfvo_db(db_base.db_base):
         #for sf in instance_dict['sfs']:
         #instance_sfis
         cmd = "SELECT uuid,vim_sfi_id,sce_rsp_hop_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
                 " FROM instance_sfis" \
                 " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid']) # TODO: replace instance_scenario_id with instance_sf_id
         self.logger.debug(cmd)
@@ -1042,7 +1042,7 @@ class nfvo_db(db_base.db_base):
 
         #instance_classifications
         cmd = "SELECT uuid,vim_classification_id,sce_classifier_match_id,datacenter_id,"\
-                "datacenter_tenant_id,status,error_msg,vim_info"\
+                "datacenter_tenant_id,status,error_msg,vim_info, related"\
                 " FROM instance_classifications" \
                 " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
         self.logger.debug(cmd)
index 5839e24..988b6ca 100644 (file)
@@ -1094,6 +1094,16 @@ instance_scenario_create_schema_v01 = {
                                 },
                                 "wim_account": {"oneOf": [boolean_schema, id_schema, null_schema]},
                                 "ip-profile": ip_profile_schema,
+                                "use-network": {
+                                    "type": "object",
+                                    "properties": {
+                                        "instance_scenario_id": id_schema,
+                                        # "member_vnf_index": name_schema,  # if not null, network inside VNF
+                                        "osm_id": name_schema,  # sce_network osm_id or name
+                                    },
+                                    "additionalProperties": False,
+                                    "required": ["instance_scenario_id", "osm_id"]
+                                },
                                 #if the network connects VNFs deployed at different sites, you must specify one entry per site that this network connect to
                                 "sites": {
                                     "type":"array",
index 02b3bcc..8ed570c 100644 (file)
 """"
 This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
 The tasks are stored at database in table vim_wim_actions
+Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
+is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
+In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
+there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
+
 The task content is (M: stored at memory, D: stored at database):
     MD  instance_action_id:  reference a global action over an instance-scenario: database instance_actions
     MD  task_index:     index number of the task. This together with the previous forms a unique key identifier
     MD  datacenter_vim_id:  should contain the uuid of the VIM managed by this thread
     MD  vim_id:     id of the vm,net,etc at VIM
-    MD  action:     CREATE, DELETE, FIND
     MD  item:       database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
     MD  item_id:    uuid of the referenced entry in the previous table
-    MD  status:     SCHEDULED,BUILD,DONE,FAILED,SUPERSEDED
+    MD  action:     CREATE, DELETE, FIND
+    MD  status:     SCHEDULED: action need to be done
+                    BUILD: not used
+                    DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
+                    FAILED: It cannot be created/found/deleted
+                    FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
+                        it is never processed by any thread
+                    SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
     MD  extra:      text with yaml format at database, dict at memory with:
-            params:     list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken from other related tasks
-            find:       (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains the FIND params
-            depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends on a net creation
+            params:     list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
+                        from other related tasks
+            find:       (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
+                        the FIND params
+            depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
+                        on a net creation
                         can contain an int (single index on the same instance-action) or str (compete action ID)
             sdn_net_id: used for net.
-            tries:
             interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
                 iface_id: uuid of intance_interfaces
                 sdn_port_id:
                 sdn_net_id:
+                vim_info
             created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
             created:    False if the VIM element is not created by other actions, and it should not be deleted
             vim_status: VIM status of the element. Stored also at database in the instance_XXX
-    M   depends:    dict with task_index(from depends_on) to task class
-    M   params:     same as extra[params] but with the resolved dependencies
-    M   vim_interfaces: similar to extra[interfaces] but with VIM information. Stored at database in the instance_XXX but not at vim_wim_actions
-    M   vim_info:   Detailed information of a vm,net from the VIM. Stored at database in the instance_XXX but not at vim_wim_actions
+            vim_info:   Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
+                        vim_wim_actions
+    M   depends:    dict with task_index(from depends_on) to vim_id
+    M   params:     same as extra[params]
     MD  error_msg:  descriptive text upon an error.Stored also at database instance_XXX
-    MD  created_at: task creation time
-    MD  modified_at: last task update time. On refresh it contains when this task need to be refreshed
+    MD  created_at: task creation time. The task of creation must be the oldest
+    MD  modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
+                     be done
+    MD related:     All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
+                    same value of related, but this thread only process those task of one VIM.  Also related can be the
+                    same among several NS os isntance-scenarios
+    MD worker:      Used to lock in case of several thread workers.
 
 """
 
@@ -99,6 +118,8 @@ class VimThreadExceptionNotFound(VimThreadException):
 class vim_thread(threading.Thread):
     REFRESH_BUILD = 5  # 5 seconds
     REFRESH_ACTIVE = 60  # 1 minute
+    REFRESH_ERROR = 600
+    REFRESH_DELETE = 3600 * 10
 
     def __init__(self, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None,
                  db=None, db_lock=None, ovim=None):
@@ -120,6 +141,7 @@ class vim_thread(threading.Thread):
         else:
             self.name = name
         self.vim_persistent_info = {}
+        self.my_id = self.name[:64]
 
         self.logger = logging.getLogger('openmano.vim.' + self.name)
         self.db = db
@@ -128,19 +150,6 @@ class vim_thread(threading.Thread):
         self.task_lock = task_lock
         self.task_queue = Queue.Queue(2000)
 
-        self.refresh_tasks = []
-        """Contains time ordered task list for refreshing the status of VIM VMs and nets"""
-
-        self.pending_tasks = []
-        """Contains time ordered task list for creation, deletion of VIM VMs and nets"""
-
-        self.grouped_tasks = {}
-        """ It contains all the creation/deletion pending tasks grouped by its concrete vm, net, etc
-            <item><item_id>:
-                -   <task1>  # e.g. CREATE task
-                    <task2>  # e.g. DELETE task
-        """
-
     def get_vimconnector(self):
         try:
             from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
@@ -177,363 +186,343 @@ class vim_thread(threading.Thread):
             self.vim = None
             self.error_status = "Error loading vimconnector: {}".format(e)
 
-    def _reload_vim_actions(self):
+    def _get_db_task(self):
         """
         Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
         :return: None
         """
+        now = time.time()
         try:
-            action_completed = False
-            task_list = []
-            old_action_key = None
-
-            old_item_id = ""
-            old_item = ""
-            old_created_at = 0.0
-            database_limit = 200
+            database_limit = 20
+            task_related = None
             while True:
-                # get 200 (database_limit) entries each time
+                # get 20 (database_limit) entries each time
                 vim_actions = self.db.get_rows(FROM="vim_wim_actions",
-                                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
-                                                        "item_id>=": old_item_id},
-                                                ORDER_BY=("item_id", "item", "created_at",),
-                                                LIMIT=database_limit)
+                                               WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                      "status": ['SCHEDULED', 'BUILD', 'DONE'],
+                                                      "worker": [None, self.my_id], "modified_at<=": now
+                                                      },
+                                               ORDER_BY=("modified_at", "created_at",),
+                                               LIMIT=database_limit)
+                if not vim_actions:
+                    return None, None
+                # if vim_actions[0]["modified_at"] > now:
+                #     return int(vim_actions[0] - now)
                 for task in vim_actions:
-                    item = task["item"]
-                    item_id = task["item_id"]
-
-                    # skip the first entries that are already processed in the previous pool of 200
-                    if old_item_id:
-                        if item_id == old_item_id and item == old_item and task["created_at"] == old_created_at:
-                            old_item_id = False  # next one will be a new un-processed task
+                    # block related task
+                    if task_related == task["related"]:
+                        continue  # ignore if a locking has already tried for these task set
+                    task_related = task["related"]
+                    # lock ...
+                    self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
+                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                               "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                               "worker": [None, self.my_id],
+                                               "related": task_related,
+                                               "item": task["item"],
+                                               })
+                    # ... and read all related and check if locked
+                    related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                                     WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                            "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                            "related": task_related,
+                                                            "item": task["item"],
+                                                            },
+                                                     ORDER_BY=("created_at",))
+                    # check that all related tasks have been locked. If not release and try again. It can happen
+                    # for race conditions if a new related task has been inserted by nfvo in the process
+                    some_tasks_locked = False
+                    some_tasks_not_locked = False
+                    creation_task = None
+                    for relate_task in related_tasks:
+                        if relate_task["worker"] != self.my_id:
+                            some_tasks_not_locked = True
+                        else:
+                            some_tasks_locked = True
+                        if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
+                            creation_task = relate_task
+                    if some_tasks_not_locked:
+                        if some_tasks_locked:  # unlock
+                            self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
+                                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                       "worker": self.my_id,
+                                                       "related": task_related,
+                                                       "item": task["item"],
+                                                       })
                         continue
 
-                    action_key = item + item_id
-                    if old_action_key != action_key:
-                        if not action_completed and task_list:
-                            # This will fill needed task parameters into memory, and insert the task if needed in
-                            # self.pending_tasks or self.refresh_tasks
-                            try:
-                                self._insert_pending_tasks(task_list)
-                            except Exception as e:
-                                self.logger.critical(
-                                    "Unexpected exception at _reload_vim_actions:_insert_pending_tasks: " + str(e),
-                                    exc_info=True)
-                        task_list = []
-                        old_action_key = action_key
-                        action_completed = False
-                    elif action_completed:
-                        continue
+                    # task of creation must be the first in the list of related_task
+                    assert(related_tasks[0]["action"] in ("CREATE", "FIND"))
 
-                    if task["status"] == "SCHEDULED" or task["action"] == "CREATE" or task["action"] == "FIND":
-                        task_list.append(task)
-                    elif task["action"] == "DELETE":
-                        # action completed because deleted and status is not SCHEDULED. Not needed anything
-                        action_completed = True
-                if len(vim_actions) == database_limit:
-                    # update variables for get the next database iteration
-                    old_item_id = item_id
-                    old_item = item
-                    old_created_at = task["created_at"]
-                else:
-                    break
-            # Last actions group need to be inserted too
-            if not action_completed and task_list:
-                try:
-                    self._insert_pending_tasks(task_list)
-                except Exception as e:
-                    self.logger.critical("Unexpected exception at _reload_vim_actions:_insert_pending_tasks: " + str(e),
-                                         exc_info=True)
-            self.logger.debug("reloaded vim actions pending:{} refresh:{}".format(
-                len(self.pending_tasks), len(self.refresh_tasks)))
+                    if task["extra"]:
+                        extra = yaml.load(task["extra"])
+                    else:
+                        extra = {}
+                    task["extra"] = extra
+                    if extra.get("depends_on"):
+                        task["depends"] = {}
+                    if extra.get("params"):
+                        task["params"] = deepcopy(extra["params"])
+                    return task, related_tasks
         except Exception as e:
-            self.logger.critical("Unexpected exception at _reload_vim_actions: " + str(e), exc_info=True)
+            self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
+            return None, None
 
-    def _refres_elements(self):
-        """Call VIM to get VMs and networks status until 10 elements"""
-        now = time.time()
-        nb_processed = 0
-        vm_to_refresh_list = []
-        net_to_refresh_list = []
-        vm_to_refresh_dict = {}
-        net_to_refresh_dict = {}
-        items_to_refresh = 0
-        while self.refresh_tasks:
-            task = self.refresh_tasks[0]
-            with self.task_lock:
-                if task['status'] == 'SUPERSEDED':
-                    self.refresh_tasks.pop(0)
-                    continue
-                if task['modified_at'] > now:
+    def _delete_task(self, task):
+        """
+        Determine if this task need to be done or superseded
+        :return: None
+        """
+
+        def copy_extra_created(copy_to, copy_from):
+            copy_to["created"] = copy_from["created"]
+            if copy_from.get("sdn_net_id"):
+                copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
+            if copy_from.get("interfaces"):
+                copy_to["interfaces"] = copy_from["interfaces"]
+            if copy_from.get("created_items"):
+                if not copy_to.get("created_items"):
+                    copy_to["created_items"] = {}
+                copy_to["created_items"].update(copy_from["created_items"])
+
+        task_create = None
+        dependency_task = None
+        deletion_needed = False
+        if task["status"] == "FAILED":
+            return   # TODO need to be retry??
+        try:
+            # get all related tasks
+            related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                             WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                                    "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                    "action": ["FIND", "CREATE"],
+                                                    "related": task["related"],
+                                                    },
+                                             ORDER_BY=("created_at",),
+                                             )
+            for related_task in related_tasks:
+                if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
+                    task_create = related_task
+                    # TASK_CREATE
+                    if related_task["extra"]:
+                        extra_created = yaml.load(related_task["extra"])
+                        if extra_created.get("created"):
+                            deletion_needed = True
+                        related_task["extra"] = extra_created
+                elif not dependency_task:
+                    dependency_task = related_task
+                if task_create and dependency_task:
                     break
-                # task["status"] = "processing"
-                nb_processed += 1
-            self.refresh_tasks.pop(0)
-            if task["item"] == 'instance_vms':
-                if task["vim_id"] not in vm_to_refresh_dict:
-                    vm_to_refresh_dict[task["vim_id"]] = [task]
-                    vm_to_refresh_list.append(task["vim_id"])
-                else:
-                    vm_to_refresh_dict[task["vim_id"]].append(task)
-            elif task["item"] == 'instance_nets':
-                if task["vim_id"] not in net_to_refresh_dict:
-                    net_to_refresh_dict[task["vim_id"]] = [task]
-                    net_to_refresh_list.append(task["vim_id"])
-                else:
-                    net_to_refresh_dict[task["vim_id"]].append(task)
+
+            # mark task_create as FINISHED
+            self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
+                                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                       "instance_action_id": task_create["instance_action_id"],
+                                       "task_index": task_create["task_index"]
+                                       })
+            if not deletion_needed:
+                return
+            elif dependency_task:
+                # move create information  from task_create to relate_task
+                extra_new_created = yaml.load(dependency_task["extra"]) or {}
+                extra_new_created["created"] = extra_created["created"]
+                copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
+
+                self.db.update_rows("vim_wim_actions",
+                                    UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
+                                                                    width=256),
+                                            "vim_id": task_create.get("vim_id")},
+                                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                                           "instance_action_id": dependency_task["instance_action_id"],
+                                           "task_index": dependency_task["task_index"]
+                                           })
+                return False
             else:
-                task_id = task["instance_action_id"] + "." + str(task["task_index"])
-                self.logger.critical("task={}: unknown task {}".format(task_id, task["item"]), exc_info=True)
-            items_to_refresh += 1
-            if items_to_refresh == 10:
-                break
-
-        if vm_to_refresh_list:
-            now = time.time()
-            try:
-                vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
-            except vimconn.vimconnException as e:
-                # Mark all tasks at VIM_ERROR status
-                self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
-                vim_dict = {}
-                for vim_id in vm_to_refresh_list:
-                    vim_dict[vim_id] = {"status": "VIM_ERROR", "error_msg": str(e)}
-
-            for vim_id, vim_info in vim_dict.items():
-
-                # look for task
-                for task in vm_to_refresh_dict[vim_id]:
-                    task_need_update = False
-                    task_id = task["instance_action_id"] + "." + str(task["task_index"])
-                    self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
-                    # check and update interfaces
-                    task_warning_msg = ""
-                    for interface in vim_info.get("interfaces", ()):
-                        vim_interface_id = interface["vim_interface_id"]
-                        if vim_interface_id not in task["extra"]["interfaces"]:
-                            self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
-                                task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
-                            continue
-                        task_interface = task["extra"]["interfaces"][vim_interface_id]
-                        task_vim_interface = task["vim_interfaces"].get(vim_interface_id)
-                        if task_vim_interface != interface:
-                            # delete old port
-                            if task_interface.get("sdn_port_id"):
-                                try:
-                                    with self.db_lock:
-                                        self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
-                                        task_interface["sdn_port_id"] = None
-                                        task_need_update = True
-                                except ovimException as e:
-                                    error_text = "ovimException deleting external_port={}: {}".format(
-                                        task_interface["sdn_port_id"], e)
-                                    self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
-                                    task_warning_msg += error_text
-                                    # TODO Set error_msg at instance_nets instead of instance VMs
-
-                            # Create SDN port
-                            sdn_net_id = task_interface.get("sdn_net_id")
-                            if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
-                                sdn_port_name = sdn_net_id + "." + task["vim_id"]
-                                sdn_port_name = sdn_port_name[:63]
-                                try:
-                                    with self.db_lock:
-                                        sdn_port_id = self.ovim.new_external_port(
-                                            {"compute_node": interface["compute_node"],
-                                                "pci": interface["pci"],
-                                                "vlan": interface.get("vlan"),
-                                                "net_id": sdn_net_id,
-                                                "region": self.vim["config"]["datacenter_id"],
-                                                "name": sdn_port_name,
-                                                "mac": interface.get("mac_address")})
-                                        task_interface["sdn_port_id"] = sdn_port_id
-                                        task_need_update = True
-                                except (ovimException, Exception) as e:
-                                    error_text = "ovimException creating new_external_port compute_node={}" \
-                                                 " pci={} vlan={} {}".format(
-                                        interface["compute_node"],
-                                        interface["pci"],
-                                        interface.get("vlan"), e)
-                                    self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
-                                    task_warning_msg += error_text
-                                    # TODO Set error_msg at instance_nets instead of instance VMs
-
-                            self.db.update_rows(
-                                'instance_interfaces',
-                                UPDATE={"mac_address": interface.get("mac_address"),
-                                        "ip_address": interface.get("ip_address"),
-                                        "vim_interface_id": interface.get("vim_interface_id"),
-                                        "vim_info": interface.get("vim_info"),
-                                        "sdn_port_id": task_interface.get("sdn_port_id"),
-                                        "compute_node": interface.get("compute_node"),
-                                        "pci": interface.get("pci"),
-                                        "vlan": interface.get("vlan")},
-                                WHERE={'uuid': task_interface["iface_id"]})
-                            task["vim_interfaces"][vim_interface_id] = interface
-
-                    # check and update task and instance_vms database
-                    vim_info_error_msg = None
-                    if vim_info.get("error_msg"):
-                        vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
-                    elif task_warning_msg:
-                        vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
-                    task_vim_info = task.get("vim_info")
-                    task_error_msg = task.get("error_msg")
-                    task_vim_status = task["extra"].get("vim_status")
-                    if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
-                            (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
-                        temp_dict = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
-                        if vim_info.get("vim_info"):
-                            temp_dict["vim_info"] = vim_info["vim_info"]
-                        self.db.update_rows('instance_vms', UPDATE=temp_dict, WHERE={"uuid": task["item_id"]})
-                        task["extra"]["vim_status"] = vim_info["status"]
-                        task["error_msg"] = vim_info_error_msg
-                        if vim_info.get("vim_info"):
-                            task["vim_info"] = vim_info["vim_info"]
-                        task_need_update = True
-
-                    if task_need_update:
-                        self.db.update_rows(
-                            'vim_wim_actions',
-                            UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
-                                    "error_msg": task.get("error_msg"), "modified_at": now},
-                            WHERE={'instance_action_id': task['instance_action_id'],
-                                    'task_index': task['task_index']})
-                    if task["extra"].get("vim_status") == "BUILD":
-                        self._insert_refresh(task, now + self.REFRESH_BUILD)
-                    else:
-                        self._insert_refresh(task, now + self.REFRESH_ACTIVE)
+                task["vim_id"] = task_create["vim_id"]
+                copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
+                return True
 
-        if net_to_refresh_list:
-            now = time.time()
-            try:
-                vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
-            except vimconn.vimconnException as e:
-                # Mark all tasks at VIM_ERROR status
-                self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
-                vim_dict = {}
-                for vim_id in net_to_refresh_list:
-                    vim_dict[vim_id] = {"status": "VIM_ERROR", "error_msg": str(e)}
-
-            for vim_id, vim_info in vim_dict.items():
-                # look for task
-                for task in net_to_refresh_dict[vim_id]:
-                    task_id = task["instance_action_id"] + "." + str(task["task_index"])
-                    self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
-
-                    task_vim_info = task.get("vim_info")
-                    task_vim_status = task["extra"].get("vim_status")
-                    task_error_msg = task.get("error_msg")
-                    task_sdn_net_id = task["extra"].get("sdn_net_id")
-
-                    vim_info_status = vim_info["status"]
-                    vim_info_error_msg = vim_info.get("error_msg")
-                    # get ovim status
-                    if task_sdn_net_id:
-                        try:
-                            with self.db_lock:
-                                sdn_net = self.ovim.show_network(task_sdn_net_id)
-                        except (ovimException, Exception) as e:
-                            text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
-                            self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
-                            sdn_net = {"status": "ERROR", "last_error": text_error}
-                        if sdn_net["status"] == "ERROR":
-                            if not vim_info_error_msg:
-                                vim_info_error_msg = str(sdn_net.get("last_error"))
-                            else:
-                                vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
-                                    self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
-                                    self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
-                            vim_info_status = "ERROR"
-                        elif sdn_net["status"] == "BUILD":
-                            if vim_info_status == "ACTIVE":
-                                vim_info_status = "BUILD"
-
-                    # update database
-                    if vim_info_error_msg:
-                        vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
-                    if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
-                            (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
-                        task["extra"]["vim_status"] = vim_info_status
-                        task["error_msg"] = vim_info_error_msg
-                        if vim_info.get("vim_info"):
-                            task["vim_info"] = vim_info["vim_info"]
-                        temp_dict = {"status": vim_info_status, "error_msg": vim_info_error_msg}
-                        if vim_info.get("vim_info"):
-                            temp_dict["vim_info"] = vim_info["vim_info"]
-                        self.db.update_rows('instance_nets', UPDATE=temp_dict, WHERE={"uuid": task["item_id"]})
-                        self.db.update_rows(
-                            'vim_wim_actions',
-                            UPDATE={"extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256),
-                                    "error_msg": task.get("error_msg"), "modified_at": now},
-                            WHERE={'instance_action_id': task['instance_action_id'],
-                                    'task_index': task['task_index']})
-                    if task["extra"].get("vim_status") == "BUILD":
-                        self._insert_refresh(task, now + self.REFRESH_BUILD)
-                    else:
-                        self._insert_refresh(task, now + self.REFRESH_ACTIVE)
+        except Exception as e:
+            self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
 
-        return nb_processed
+    def _refres_vm(self, task):
+        """Call VIM to get VMs status"""
+        database_update = None
+
+        vim_id = task["vim_id"]
+        vm_to_refresh_list = [vim_id]
+        try:
+            vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+            vim_info = vim_dict[vim_id]
+        except vimconn.vimconnException as e:
+            # Mark all tasks at VIM_ERROR status
+            self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
+            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
 
-    def _insert_refresh(self, task, threshold_time=None):
-        """Insert a task at list of refreshing elements. The refreshing list is ordered by threshold_time (task['modified_at']
-        It is assumed that this is called inside this thread
-        """
-        if not self.vim:
-            return
-        if not threshold_time:
-            threshold_time = time.time()
-        task["modified_at"] = threshold_time
-        task_name = task["item"][9:] + "-" + task["action"]
         task_id = task["instance_action_id"] + "." + str(task["task_index"])
-        for index in range(0, len(self.refresh_tasks)):
-            if self.refresh_tasks[index]["modified_at"] > threshold_time:
-                self.refresh_tasks.insert(index, task)
-                break
-        else:
-            index = len(self.refresh_tasks)
-            self.refresh_tasks.append(task)
-        self.logger.debug("task={} new refresh name={}, modified_at={} index={}".format(
-            task_id, task_name, task["modified_at"], index))
-
-    def _remove_refresh(self, task_name, vim_id):
-        """Remove a task with this name and vim_id from the list of refreshing elements.
-        It is assumed that this is called inside this thread outside _refres_elements method
-        Return True if self.refresh_list is modified, task is found
-        Return False if not found
-        """
-        index_to_delete = None
-        for index in range(0, len(self.refresh_tasks)):
-            if self.refresh_tasks[index]["name"] == task_name and self.refresh_tasks[index]["vim_id"] == vim_id:
-                index_to_delete = index
-                break
-        else:
-            return False
-        if not index_to_delete:
-            del self.refresh_tasks[index_to_delete]
-        return True
-
-    def _proccess_pending_tasks(self):
-        nb_created = 0
-        nb_processed = 0
-        while self.pending_tasks:
-            task = self.pending_tasks.pop(0)
-            nb_processed += 1
+        self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+        # check and update interfaces
+        task_warning_msg = ""
+        for interface in vim_info.get("interfaces", ()):
+            vim_interface_id = interface["vim_interface_id"]
+            if vim_interface_id not in task["extra"]["interfaces"]:
+                self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
+                    task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
+                continue
+            task_interface = task["extra"]["interfaces"][vim_interface_id]
+            task_vim_interface = task_interface.get("vim_info")
+            if task_vim_interface != interface:
+                # delete old port
+                if task_interface.get("sdn_port_id"):
+                    try:
+                        with self.db_lock:
+                            self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+                            task_interface["sdn_port_id"] = None
+                    except ovimException as e:
+                        error_text = "ovimException deleting external_port={}: {}".format(
+                            task_interface["sdn_port_id"], e)
+                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                        task_warning_msg += error_text
+                        # TODO Set error_msg at instance_nets instead of instance VMs
+
+                # Create SDN port
+                sdn_net_id = task_interface.get("sdn_net_id")
+                if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+                    sdn_port_name = sdn_net_id + "." + task["vim_id"]
+                    sdn_port_name = sdn_port_name[:63]
+                    try:
+                        with self.db_lock:
+                            sdn_port_id = self.ovim.new_external_port(
+                                {"compute_node": interface["compute_node"],
+                                    "pci": interface["pci"],
+                                    "vlan": interface.get("vlan"),
+                                    "net_id": sdn_net_id,
+                                    "region": self.vim["config"]["datacenter_id"],
+                                    "name": sdn_port_name,
+                                    "mac": interface.get("mac_address")})
+                            task_interface["sdn_port_id"] = sdn_port_id
+                    except (ovimException, Exception) as e:
+                        error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+                            format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+                        self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                        task_warning_msg += error_text
+                        # TODO Set error_msg at instance_nets instead of instance VMs
+
+                self.db.update_rows('instance_interfaces',
+                                    UPDATE={"mac_address": interface.get("mac_address"),
+                                            "ip_address": interface.get("ip_address"),
+                                            "vim_interface_id": interface.get("vim_interface_id"),
+                                            "vim_info": interface.get("vim_info"),
+                                            "sdn_port_id": task_interface.get("sdn_port_id"),
+                                            "compute_node": interface.get("compute_node"),
+                                            "pci": interface.get("pci"),
+                                            "vlan": interface.get("vlan")},
+                                    WHERE={'uuid': task_interface["iface_id"]})
+                task_interface["vim_info"] = interface
+
+        # check and update task and instance_vms database
+        vim_info_error_msg = None
+        if vim_info.get("error_msg"):
+            vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
+        elif task_warning_msg:
+            vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
+        task_vim_info = task["extra"].get("vim_info")
+        task_error_msg = task.get("error_msg")
+        task_vim_status = task["extra"].get("vim_status")
+        if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+            database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+            if vim_info.get("vim_info"):
+                database_update["vim_info"] = vim_info["vim_info"]
+
+            task["extra"]["vim_status"] = vim_info["status"]
+            task["error_msg"] = vim_info_error_msg
+            if vim_info.get("vim_info"):
+                task["extra"]["vim_info"] = vim_info["vim_info"]
+
+        return database_update
+
+    def _refres_net(self, task):
+        """Call VIM to get network status"""
+        database_update = None
+
+        vim_id = task["vim_id"]
+        net_to_refresh_list = [vim_id]
+        try:
+            vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+            vim_info = vim_dict[vim_id]
+        except vimconn.vimconnException as e:
+            # Mark all tasks at VIM_ERROR status
+            self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
+            vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
+
+        task_vim_info = task["extra"].get("vim_info")
+        task_vim_status = task["extra"].get("vim_status")
+        task_error_msg = task.get("error_msg")
+        task_sdn_net_id = task["extra"].get("sdn_net_id")
+
+        vim_info_status = vim_info["status"]
+        vim_info_error_msg = vim_info.get("error_msg")
+        # get ovim status
+        if task_sdn_net_id:
             try:
+                with self.db_lock:
+                    sdn_net = self.ovim.show_network(task_sdn_net_id)
+            except (ovimException, Exception) as e:
+                text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+                self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+                sdn_net = {"status": "ERROR", "last_error": text_error}
+            if sdn_net["status"] == "ERROR":
+                if not vim_info_error_msg:
+                    vim_info_error_msg = str(sdn_net.get("last_error"))
+                else:
+                    vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+                        self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+                        self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+                vim_info_status = "ERROR"
+            elif sdn_net["status"] == "BUILD":
+                if vim_info_status == "ACTIVE":
+                    vim_info_status = "BUILD"
+
+        # update database
+        if vim_info_error_msg:
+            vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
+        if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
+                (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+            task["extra"]["vim_status"] = vim_info_status
+            task["error_msg"] = vim_info_error_msg
+            if vim_info.get("vim_info"):
+                task["extra"]["vim_info"] = vim_info["vim_info"]
+            database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
+            if vim_info.get("vim_info"):
+                database_update["vim_info"] = vim_info["vim_info"]
+        return database_update
+
+    def _proccess_pending_tasks(self, task, related_tasks):
+        old_task_status = task["status"]
+        create_or_find = False   # if as result of processing this task something is created or found
+        next_refresh = 0
+
+        try:
+            if task["status"] == "SCHEDULED":
                 # check if tasks that this depends on have been completed
                 dependency_not_completed = False
+                dependency_modified_at = 0
                 for task_index in task["extra"].get("depends_on", ()):
-                    task_dependency = task["depends"].get("TASK-" + str(task_index))
+                    task_dependency = self._look_for_task(task["instance_action_id"], task_index)
                     if not task_dependency:
-                        task_dependency = self._look_for_task(task["instance_action_id"], task_index)
-                        if not task_dependency:
-                            raise VimThreadException(
-                                "Cannot get depending net task trying to get depending task {}.{}".format(
-                                    task["instance_action_id"], task_index))
-                        # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so database must be look again
+                        raise VimThreadException(
+                            "Cannot get depending net task trying to get depending task {}.{}".format(
+                                task["instance_action_id"], task_index))
+                    # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
+                    # database must be look again
                     if task_dependency["status"] == "SCHEDULED":
                         dependency_not_completed = True
+                        dependency_modified_at = task_dependency["modified_at"]
                         break
                     elif task_dependency["status"] == "FAILED":
                         raise VimThreadException(
@@ -542,205 +531,184 @@ class vim_thread(threading.Thread):
                                 task["instance_action_id"], task["task_index"],
                                 task_dependency["instance_action_id"], task_dependency["task_index"],
                                 task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
+
+                    task["depends"]["TASK-"+str(task_index)] = task_dependency["vim_id"]
+                    task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] =\
+                        task_dependency["vim_id"]
                 if dependency_not_completed:
-                    # Move this task to the end.
-                    task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
-                    if task["extra"]["tries"] <= 3:
-                        self.pending_tasks.append(task)
-                        continue
-                    else:
-                        raise VimThreadException(
-                            "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
-                            "(task {}.{})".format(task["action"], task["item"],
-                                                  task["instance_action_id"], task["task_index"],
-                                                  task_dependency["instance_action_id"], task_dependency["task_index"],
-                                                  task_dependency["action"], task_dependency["item"]))
-
-                if task["status"] == "SUPERSEDED":
-                    # not needed to do anything but update database with the new status
-                    result = True
-                    database_update = None
-                elif not self.vim:
-                    task["status"] = "ERROR"
-                    task["error_msg"] = self.error_status
-                    result = False
-                    database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
-                elif task["item"] == 'instance_vms':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_vm(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_vm(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                elif task["item"] == 'instance_nets':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_net(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_net(task)
-                    elif task["action"] == "FIND":
-                        result, database_update = self.get_net(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                elif task["item"] == 'instance_sfis':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_sfi(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_sfi(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                elif task["item"] == 'instance_sfs':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_sf(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_sf(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                elif task["item"] == 'instance_classifications':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_classification(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_classification(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                elif task["item"] == 'instance_sfps':
-                    if task["action"] == "CREATE":
-                        result, database_update = self.new_sfp(task)
-                        nb_created += 1
-                    elif task["action"] == "DELETE":
-                        result, database_update = self.del_sfp(task)
-                    else:
-                        raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
-                else:
-                    raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
-                    # TODO
-            except VimThreadException as e:
-                result = False
-                task["error_msg"] = str(e)
+                    # Move this task to the time dependency is going to be modified plus 10 seconds.
+                    self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
+                                        UPDATE={"worker": None},
+                                        WHERE={"datacenter_vim_id": self.datacenter_tenant_id, "worker": self.my_id,
+                                               "related": task["related"],
+                                               })
+                    # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
+                    # if task["extra"]["tries"] > 3:
+                    #     raise VimThreadException(
+                    #         "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
+                    #         "(task {}.{})".format(task["action"], task["item"],
+                    #                               task["instance_action_id"], task["task_index"],
+                    #                               task_dependency["instance_action_id"], task_dependency["task_index"]
+                    #                               task_dependency["action"], task_dependency["item"]))
+                    return
+
+            database_update = None
+            if task["action"] == "DELETE":
+                deleted_needed = self._delete_task(task)
+                if not deleted_needed:
+                    task["status"] = "SUPERSEDED"  # with FINISHED instead of DONE it will not be refreshing
+                    task["error_msg"] = None
+
+            if task["status"] == "SUPERSEDED":
+                # not needed to do anything but update database with the new status
+                database_update = None
+            elif not self.vim:
                 task["status"] = "FAILED"
+                task["error_msg"] = self.error_status
                 database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+            elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
+                # Do nothing, just copy values from one to another and updata database
+                task["status"] = related_tasks[0]["status"]
+                task["error_msg"] = related_tasks[0]["error_msg"]
+                task["vim_id"] = related_tasks[0]["vim_id"]
+                extra = yaml.load(related_tasks[0]["extra"])
+                task["extra"]["vim_status"] = extra["vim_status"]
+                next_refresh = related_tasks[0]["modified_at"] + 0.001
+                database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
+                                   "error_msg": task["error_msg"]}
                 if task["item"] == 'instance_vms':
-                    database_update["vim_vm_id"] = None
+                    database_update["vim_vm_id"] = task["vim_id"]
                 elif task["item"] == 'instance_nets':
-                    database_update["vim_net_id"] = None
+                    database_update["vim_net_id"] = task["vim_id"]
+            elif task["item"] == 'instance_vms':
+                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                    database_update = self._refres_vm(task)
+                    create_or_find = True
+                elif task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_vm(task)
+                elif task["action"] == "DELETE":
+                    self.del_vm(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_nets':
+                if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                    database_update = self._refres_net(task)
+                    create_or_find = True
+                elif task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_net(task)
+                elif task["action"] == "DELETE":
+                    self.del_net(task)
+                elif task["action"] == "FIND":
+                    database_update = self.get_net(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfis':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sfi(task)
+                elif task["action"] == "DELETE":
+                    self.del_sfi(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfs':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sf(task)
+                elif task["action"] == "DELETE":
+                    self.del_sf(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_classifications':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_classification(task)
+                elif task["action"] == "DELETE":
+                    self.del_classification(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            elif task["item"] == 'instance_sfps':
+                if task["action"] == "CREATE":
+                    create_or_find = True
+                    database_update = self.new_sfp(task)
+                elif task["action"] == "DELETE":
+                    self.del_sfp(task)
+                else:
+                    raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+            else:
+                raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
+                # TODO
+        except VimThreadException as e:
+            task["error_msg"] = str(e)
+            task["status"] = "FAILED"
+            database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+            if task["item"] == 'instance_vms':
+                database_update["vim_vm_id"] = None
+            elif task["item"] == 'instance_nets':
+                database_update["vim_net_id"] = None
 
-            no_refresh_tasks = ['instance_sfis', 'instance_sfs',
-                                'instance_classifications', 'instance_sfps']
-            if task["action"] == "DELETE":
-                action_key = task["item"] + task["item_id"]
-                del self.grouped_tasks[action_key]
-            elif task["action"] in ("CREATE", "FIND") and task["status"] in ("DONE", "BUILD"):
-                if task["item"] not in no_refresh_tasks:
-                    self._insert_refresh(task)
+        task_id = task["instance_action_id"] + "." + str(task["task_index"])
+        self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
+            task_id, task["item"], task["action"], task["status"],
+            task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
+        try:
+            if not next_refresh:
+                if task["status"] == "DONE":
+                    next_refresh = time.time()
+                    if task["extra"].get("vim_status") == "BUILD":
+                        next_refresh += self.REFRESH_BUILD
+                    elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR"):
+                        next_refresh += self.REFRESH_ERROR
+                    elif task["extra"].get("vim_status") == "DELETED":
+                        next_refresh += self.REFRESH_DELETE
+                    else:
+                        next_refresh += self.REFRESH_ACTIVE
+                elif task["status"] == "FAILED":
+                    next_refresh = time.time() + self.REFRESH_DELETE
 
-            task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
-                task_id, task["item"], task["action"], task["status"],
-                task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
-            try:
-                now = time.time()
+            if create_or_find:
+                # modify all related task with action FIND/CREATED non SCHEDULED
                 self.db.update_rows(
-                    table="vim_wim_actions",
-                    UPDATE={"status": task["status"], "vim_id": task.get("vim_id"), "modified_at": now,
+                    table="vim_wim_actions", modified_time=next_refresh + 0.001,
+                    UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
                             "error_msg": task["error_msg"],
-                            "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
-                    WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
-                if result is not None:
-                    self.db.update_rows(
-                        table="instance_actions",
-                        UPDATE={("number_done" if result else "number_failed"): {"INCREMENT": 1},
-                                "modified_at": now},
-                        WHERE={"uuid": task["instance_action_id"]})
-                if database_update:
-                    self.db.update_rows(table=task["item"],
-                                        UPDATE=database_update,
-                                        WHERE={"uuid": task["item_id"]})
-            except db_base_Exception as e:
-                self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
-
-            if nb_created == 10:
-                break
-        return nb_processed
-
-    def _insert_pending_tasks(self, vim_actions_list):
-        for task in vim_actions_list:
-            if task["datacenter_vim_id"] != self.datacenter_tenant_id:
-                continue
-            item = task["item"]
-            item_id = task["item_id"]
-            action_key = item + item_id
-            if action_key not in self.grouped_tasks:
-                self.grouped_tasks[action_key] = []
-            task["params"] = None
-            task["depends"] = {}
-            if task["extra"]:
-                extra = yaml.load(task["extra"])
-                task["extra"] = extra
-                task["params"] = extra.get("params")
-                depends_on_list = extra.get("depends_on")
-                if depends_on_list:
-                    for dependency_task in depends_on_list:
-                        if isinstance(dependency_task, int):
-                            index = dependency_task
-                        else:
-                            instance_action_id, _, task_id = dependency_task.rpartition(".")
-                            if instance_action_id != task["instance_action_id"]:
-                                continue
-                            index = int(task_id)
-
-                        if index < len(vim_actions_list) and vim_actions_list[index]["task_index"] == index and \
-                                vim_actions_list[index]["instance_action_id"] == task["instance_action_id"]:
-                            task["depends"]["TASK-" + str(index)] = vim_actions_list[index]
-                            task["depends"]["TASK-{}.{}".format(task["instance_action_id"], index)] = vim_actions_list[index]
-                if extra.get("interfaces"):
-                    task["vim_interfaces"] = {}
-            else:
-                task["extra"] = {}
-            if "error_msg" not in task:
-                task["error_msg"] = None
-            if "vim_id" not in task:
-                task["vim_id"] = None
-
-            if task["action"] == "DELETE":
-                need_delete_action = False
-                for to_supersede in self.grouped_tasks.get(action_key, ()):
-                    if to_supersede["action"] == "FIND" and to_supersede.get("vim_id"):
-                        task["vim_id"] = to_supersede["vim_id"]
-                    if to_supersede["action"] == "CREATE" and to_supersede["extra"].get("created", True) and \
-                            (to_supersede.get("vim_id") or to_supersede["extra"].get("sdn_net_id")):
-                        need_delete_action = True
-                        task["vim_id"] = to_supersede["vim_id"]
-                        if to_supersede["extra"].get("sdn_net_id"):
-                            task["extra"]["sdn_net_id"] = to_supersede["extra"]["sdn_net_id"]
-                        if to_supersede["extra"].get("interfaces"):
-                            task["extra"]["interfaces"] = to_supersede["extra"]["interfaces"]
-                        if to_supersede["extra"].get("created_items"):
-                            if not task["extra"].get("created_items"):
-                                task["extra"]["created_items"] = {}
-                            task["extra"]["created_items"].update(to_supersede["extra"]["created_items"])
-                    # Mark task as SUPERSEDED.
-                    #   If task is in self.pending_tasks, it will be removed and database will be update
-                    #   If task is in self.refresh_tasks, it will be removed
-                    to_supersede["status"] = "SUPERSEDED"
-                if not need_delete_action:
-                    task["status"] = "SUPERSEDED"
-
-                self.grouped_tasks[action_key].append(task)
-                self.pending_tasks.append(task)
-            elif task["status"] == "SCHEDULED":
-                self.grouped_tasks[action_key].append(task)
-                self.pending_tasks.append(task)
-            elif task["action"] in ("CREATE", "FIND"):
-                self.grouped_tasks[action_key].append(task)
-                if task["status"] in ("DONE", "BUILD"):
-                    self._insert_refresh(task)
-            # TODO add VM reset, get console, etc...
-            else:
-                raise vimconn.vimconnException(self.name + "unknown vim_action action {}".format(task["action"]))
+                            },
+
+                    WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                           "worker": self.my_id,
+                           "action": ["FIND", "CREATE"],
+                           "related": task["related"],
+                           "status<>": "SCHEDULED",
+                           })
+            # modify own task
+            self.db.update_rows(
+                table="vim_wim_actions", modified_time=next_refresh,
+                UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+                        "error_msg": task["error_msg"],
+                        "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
+                WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
+            # Unlock tasks
+            self.db.update_rows(
+                table="vim_wim_actions", modified_time=0,
+                UPDATE={"worker": None},
+                WHERE={"datacenter_vim_id": self.datacenter_tenant_id,
+                       "worker": self.my_id,
+                       "related": task["related"],
+                       })
+
+            # Update table instance_actions
+            if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
+                self.db.update_rows(
+                    table="instance_actions",
+                    UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
+                    WHERE={"uuid": task["instance_action_id"]})
+            if database_update:
+                self.db.update_rows(table=task["item"],
+                                    UPDATE=database_update,
+                                    WHERE={"related": task["related"]})
+        except db_base_Exception as e:
+            self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
 
     def insert_task(self, task):
         try:
@@ -763,7 +731,6 @@ class vim_thread(threading.Thread):
         while True:
             self.get_vimconnector()
             self.logger.debug("Vimconnector loaded")
-            self._reload_vim_actions()
             reload_thread = False
 
             while True:
@@ -771,7 +738,7 @@ class vim_thread(threading.Thread):
                     while not self.task_queue.empty():
                         task = self.task_queue.get()
                         if isinstance(task, list):
-                            self._insert_pending_tasks(task)
+                            pass
                         elif isinstance(task, str):
                             if task == 'exit':
                                 return 0
@@ -781,10 +748,12 @@ class vim_thread(threading.Thread):
                         self.task_queue.task_done()
                     if reload_thread:
                         break
-                    nb_processed = self._proccess_pending_tasks()
-                    nb_processed += self._refres_elements()
-                    if not nb_processed:
-                        time.sleep(1)
+
+                    task, related_tasks = self._get_db_task()
+                    if task:
+                        self._proccess_pending_tasks(task, related_tasks)
+                    else:
+                        time.sleep(5)
 
                 except Exception as e:
                     self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
@@ -811,7 +780,7 @@ class vim_thread(threading.Thread):
                 instance_action_id = ins_action_id
 
         tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
-                                                            "task_index": task_index})
+                                                                "task_index": task_index})
         if not tasks:
             return None
         task = tasks[0]
@@ -821,8 +790,6 @@ class vim_thread(threading.Thread):
             extra = yaml.load(task["extra"])
             task["extra"] = extra
             task["params"] = extra.get("params")
-            if extra.get("interfaces"):
-                task["vim_interfaces"] = {}
         else:
             task["extra"] = {}
         return task
@@ -841,14 +808,7 @@ class vim_thread(threading.Thread):
             net_list = params[5]
             for net in net_list:
                 if "net_id" in net and is_task_id(net["net_id"]):  # change task_id into network_id
-                    task_dependency = task["depends"].get(net["net_id"])
-                    if not task_dependency:
-                        task_dependency = self._look_for_task(task["instance_action_id"], net["net_id"])
-                        if not task_dependency:
-                            raise VimThreadException(
-                                "Cannot get depending net task trying to get depending task {}.{}".format(
-                                    task["instance_action_id"], net["net_id"]))
-                    network_id = task_dependency.get("vim_id")
+                    network_id = task["depends"][net["net_id"]]
                     if not network_id:
                         raise VimThreadException(
                             "Cannot create VM because depends on a network not created or found: " +
@@ -870,18 +830,19 @@ class vim_thread(threading.Thread):
                     task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
                 else:
                     self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
-                                                                                                        iface["uuid"]), exc_info=True)
+                                                                                                        iface["uuid"]),
+                                         exc_info=True)
 
             task["vim_info"] = {}
-            task["vim_interfaces"] = {}
             task["extra"]["interfaces"] = task_interfaces
             task["extra"]["created"] = True
             task["extra"]["created_items"] = created_items
+            task["extra"]["vim_status"] = "BUILD"
             task["error_msg"] = None
             task["status"] = "DONE"
             task["vim_id"] = vim_vm_id
             instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
-            return True, instance_element_update
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("task={} new-VM: {}".format(task_id, e))
@@ -890,7 +851,7 @@ class vim_thread(threading.Thread):
             task["status"] = "FAILED"
             task["vim_id"] = None
             instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
-            return False, instance_element_update
+            return instance_element_update
 
     def del_vm(self, task):
         task_id = task["instance_action_id"] + "." + str(task["task_index"])
@@ -908,18 +869,18 @@ class vim_thread(threading.Thread):
                         # TODO Set error_msg at instance_nets
 
             self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
 
         except vimconn.vimconnException as e:
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
             task["status"] = "FAILED"
-            return False, None
+            return None
 
     def _get_net_internal(self, task, filter_param):
         """
@@ -939,15 +900,15 @@ class vim_thread(threading.Thread):
         # Discover if this network is managed by a sdn controller
         sdn_net_id = None
         result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
-                                    WHERE={'vim_net_id': vim_net_id,
-                                            'datacenter_tenant_id': self.datacenter_tenant_id},
-                                    ORDER="instance_scenario_id")
+                                  WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
+                                  ORDER="instance_scenario_id")
         if result:
             sdn_net_id = result[0]['sdn_net_id']
 
         task["status"] = "DONE"
         task["extra"]["vim_info"] = {}
         task["extra"]["created"] = False
+        task["extra"]["vim_status"] = "BUILD"
         task["extra"]["sdn_net_id"] = sdn_net_id
         task["error_msg"] = None
         task["vim_id"] = vim_net_id
@@ -961,7 +922,7 @@ class vim_thread(threading.Thread):
             params = task["params"]
             filter_param = params[0]
             instance_element_update = self._get_net_internal(task, filter_param)
-            return True, instance_element_update
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("task={} get-net: {}".format(task_id, e))
@@ -970,7 +931,7 @@ class vim_thread(threading.Thread):
             task["error_msg"] = self._format_vim_error_msg(str(e))
             instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
                                        "error_msg": task["error_msg"]}
-            return False, instance_element_update
+            return instance_element_update
 
     def new_net(self, task):
         vim_net_id = None
@@ -984,7 +945,7 @@ class vim_thread(threading.Thread):
                 filter_param = task["extra"]["find"][0]
                 try:
                     instance_element_update = self._get_net_internal(task, filter_param)
-                    return True, instance_element_update
+                    return instance_element_update
                 except VimThreadExceptionNotFound:
                     pass
             # CREATE
@@ -1034,17 +995,17 @@ class vim_thread(threading.Thread):
                             sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
                     self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
                                                                                             sdn_net_id))
-
             task["status"] = "DONE"
             task["extra"]["vim_info"] = {}
             task["extra"]["sdn_net_id"] = sdn_net_id
+            task["extra"]["vim_status"] = "BUILD"
             task["extra"]["created"] = True
             task["extra"]["created_items"] = created_items
             task["error_msg"] = None
             task["vim_id"] = vim_net_id
             instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "BUILD",
                                        "created": True, "error_msg": None}
-            return True, instance_element_update
+            return instance_element_update
         except (vimconn.vimconnException, ovimException) as e:
             self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
             task["status"] = "FAILED"
@@ -1053,7 +1014,7 @@ class vim_thread(threading.Thread):
             task["extra"]["sdn_net_id"] = sdn_net_id
             instance_element_update = {"vim_net_id": vim_net_id, "sdn_net_id": sdn_net_id, "status": "VIM_ERROR",
                                        "error_msg": task["error_msg"]}
-            return False, instance_element_update
+            return instance_element_update
 
     def del_net(self, task):
         net_vim_id = task["vim_id"]
@@ -1070,9 +1031,9 @@ class vim_thread(threading.Thread):
                     self.ovim.delete_network(sdn_net_id, idempotent=True)
             if net_vim_id:
                 self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
         except ovimException as e:
             task["error_msg"] = self._format_vim_error_msg("ovimException obtaining and deleting external "
                                                            "ports for net {}: {}".format(sdn_net_id, str(e)))
@@ -1080,13 +1041,12 @@ class vim_thread(threading.Thread):
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
         task["status"] = "FAILED"
-        return False, None
-
-    ## Service Function Instances
+        return None
 
+    # Service Function Instances
     def new_sfi(self, task):
         vim_sfi_id = None
         try:
@@ -1112,12 +1072,16 @@ class vim_thread(threading.Thread):
             else:
                 egress_vim_interface_id = ingress_vim_interface_id
             if not ingress_vim_interface_id or not egress_vim_interface_id:
-                self.logger.error("Error creating Service Function Instance, Ingress: %s, Egress: %s",
-                                  ingress_vim_interface_id, egress_vim_interface_id)
-                return False, None
+                error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
+                    ingress_vim_interface_id, egress_vim_interface_id)
+                self.logger.error(error_text)
+                task["error_msg"] = error_text
+                task["status"] = "FAILED"
+                task["vim_id"] = None
+                return None
             # At the moment, every port associated with the VM will be used both as ingress and egress ports.
-            # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack, only the
-            # first ingress and first egress ports will be used to create the SFI (Port Pair).
+            # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
+            # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
             ingress_port_id_list = [ingress_vim_interface_id]
             egress_port_id_list = [egress_vim_interface_id]
             name = "sfi-%s" % task["item_id"][:8]
@@ -1125,11 +1089,12 @@ class vim_thread(threading.Thread):
             vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
 
             task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
             task["error_msg"] = None
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["vim_id"] = vim_sfi_id
             instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
-            return True, instance_element_update
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
@@ -1138,24 +1103,24 @@ class vim_thread(threading.Thread):
             task["status"] = "FAILED"
             task["vim_id"] = None
             instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
-            return False, instance_element_update
+            return instance_element_update
 
     def del_sfi(self, task):
         sfi_vim_id = task["vim_id"]
         try:
             self.vim.delete_sfi(sfi_vim_id)
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
 
         except vimconn.vimconnException as e:
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
             task["status"] = "FAILED"
-            return False, None
+            return None
 
     def new_sf(self, task):
         vim_sf_id = None
@@ -1173,11 +1138,12 @@ class vim_thread(threading.Thread):
             vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
 
             task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
             task["error_msg"] = None
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["vim_id"] = vim_sf_id
             instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
-            return True, instance_element_update
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
@@ -1186,24 +1152,24 @@ class vim_thread(threading.Thread):
             task["status"] = "FAILED"
             task["vim_id"] = None
             instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
-            return False, instance_element_update
+            return instance_element_update
 
     def del_sf(self, task):
         sf_vim_id = task["vim_id"]
         try:
             self.vim.delete_sf(sf_vim_id)
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
 
         except vimconn.vimconnException as e:
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
             task["status"] = "FAILED"
-            return False, None
+            return None
 
     def new_classification(self, task):
         vim_classification_id = None
@@ -1253,11 +1219,13 @@ class vim_thread(threading.Thread):
                 name, 'legacy_flow_classifier', definition)
 
             task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
             task["error_msg"] = None
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["vim_id"] = vim_classification_id
-            instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id, "error_msg": None}
-            return True, instance_element_update
+            instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
+                                       "error_msg": None}
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
@@ -1266,31 +1234,31 @@ class vim_thread(threading.Thread):
             task["status"] = "FAILED"
             task["vim_id"] = None
             instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
-            return False, instance_element_update
+            return instance_element_update
 
     def del_classification(self, task):
         classification_vim_id = task["vim_id"]
         try:
             self.vim.delete_classification(classification_vim_id)
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
 
         except vimconn.vimconnException as e:
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
             task["status"] = "FAILED"
-            return False, None
+            return None
 
     def new_sfp(self, task):
         vim_sfp_id = None
         try:
-            params = task["params"]
             task_id = task["instance_action_id"] + "." + str(task["task_index"])
-            depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in task.get("extra").get("depends_on")]
+            depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
+                               task.get("extra").get("depends_on")]
             error_text = ""
             sf_id_list = []
             classification_id_list = []
@@ -1307,11 +1275,12 @@ class vim_thread(threading.Thread):
             vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
 
             task["extra"]["created"] = True
+            task["extra"]["vim_status"] = "ACTIVE"
             task["error_msg"] = None
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["vim_id"] = vim_sfp_id
             instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
-            return True, instance_element_update
+            return instance_element_update
 
         except (vimconn.vimconnException, VimThreadException) as e:
             self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
@@ -1320,22 +1289,21 @@ class vim_thread(threading.Thread):
             task["status"] = "FAILED"
             task["vim_id"] = None
             instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
-            return False, instance_element_update
-        return
+            return instance_element_update
 
     def del_sfp(self, task):
         sfp_vim_id = task["vim_id"]
         try:
             self.vim.delete_sfp(sfp_vim_id)
-            task["status"] = "DONE"
+            task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
             task["error_msg"] = None
-            return True, None
+            return None
 
         except vimconn.vimconnException as e:
             task["error_msg"] = self._format_vim_error_msg(str(e))
             if isinstance(e, vimconn.vimconnNotFoundException):
                 # If not found mark as Done and fill error_msg
-                task["status"] = "DONE"
-                return True, None
+                task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                return None
             task["status"] = "FAILED"
-            return False, None
+            return None
index 1b1c6e5..ff67f6f 100644 (file)
@@ -571,6 +571,8 @@ class vimconnector(vimconn.vimconnector):
                         network_dict["provider:segmentation_id"] = self._generate_vlanID()
 
             network_dict["shared"] = shared
+            if self.config.get("disable_network_port_security"):
+                network_dict["port_security_enabled"] = False
             new_net = self.neutron.create_network({'network':network_dict})
             # print new_net
             # create subnetwork, even if there is no profile
index d4b83e5..128e8e1 100644 (file)
@@ -29,6 +29,9 @@ from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
 
 import vimconn
 import os
+import shutil
+import subprocess
+import tempfile
 import traceback
 import itertools
 import requests
@@ -1034,8 +1037,7 @@ class vimconnector(vimconn.vimconnector):
         """
         for catalog in catalogs:
             if catalog['name'] == catalog_name:
-                return True
-        return False
+                return catalog['id']
 
     def create_vimcatalog(self, vca=None, catalog_name=None):
         """ Create new catalog entry in vCloud director.
@@ -1045,16 +1047,19 @@ class vimconnector(vimconn.vimconnector):
                 catalog_name catalog that client wish to create.   Note no validation done for a name.
                 Client must make sure that provide valid string representation.
 
-             Return (bool) True if catalog created.
+             Returns catalog id if catalog created else None.
 
         """
         try:
-            result = vca.create_catalog(catalog_name, catalog_name)
-            if result is not None:
-                return True
+            lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+            if lxml_catalog_element:
+                id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+                return id_attr_value.split(':')[-1]
             catalogs = vca.list_catalogs()
-        except:
-            return False
+        except Exception as ex:
+            self.logger.error(
+                'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+            raise
         return self.catalog_exists(catalog_name, catalogs)
 
     # noinspection PyIncorrectDocstring
@@ -1324,8 +1329,7 @@ class vimconnector(vimconn.vimconnector):
 
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
-            result = self.create_vimcatalog(org, catalog_md5_name)
-            if not result:
+            if self.create_vimcatalog(org, catalog_md5_name) is None:
                 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
 
             result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
@@ -1345,8 +1349,7 @@ class vimconnector(vimconn.vimconnector):
 
         # if we didn't find existing catalog we create a new one and upload image.
         self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
-        result = self.create_vimcatalog(org, catalog_md5_name)
-        if not result:
+        if self.create_vimcatalog(org, catalog_md5_name) is None:
             raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
 
         result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
@@ -1662,13 +1665,12 @@ class vimconnector(vimconn.vimconnector):
                 else:
                     result = (response.content).replace("\n"," ")
 
-                src = re.search('<Vm goldMaster="false"\sstatus="\d+"\sname="(.*?)"\s'
-                                               'id="(\w+:\w+:vm:.*?)"\shref="(.*?)"\s'
-                              'type="application/vnd\.vmware\.vcloud\.vm\+xml',result)
-                if src:
-                    vm_name = src.group(1)
-                    vm_id = src.group(2)
-                    vm_href = src.group(3)
+                vapp_template_tree = XmlElementTree.fromstring(response.content)
+                children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
+                vm_element = [child for child in children_element if 'Vm' in child.tag][0]
+                vm_name = vm_element.get('name')
+                vm_id = vm_element.get('id')
+                vm_href = vm_element.get('href')
 
                 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
                 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
@@ -1967,7 +1969,32 @@ class vimconnector(vimconn.vimconnector):
 
             # cloud-init for ssh-key injection
             if cloud_config:
-                self.cloud_init(vapp,cloud_config)
+                # Create a catalog which will be carrying the config drive ISO
+                # This catalog is deleted during vApp deletion. The catalog name carries
+                # vApp UUID and thats how it gets identified during its deletion.
+                config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+                self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+                    config_drive_catalog_name))
+                config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+                if config_drive_catalog_id is None:
+                    error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+                                "ISO".format(config_drive_catalog_name)
+                    raise Exception(error_msg)
+
+                # Create config-drive ISO
+                _, userdata = self._create_user_data(cloud_config)
+                # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+                iso_path = self.create_config_drive_iso(userdata)
+                self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+
+                self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+                self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+                # Attach the config-drive ISO to the VM
+                self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+                # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+                time.sleep(5)
+                self.insert_media_to_vm(vapp, config_drive_catalog_id)
+                shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
 
             # If VM has PCI devices or SRIOV reserve memory for VM
             if reserve_memory:
@@ -1984,7 +2011,11 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.error("new_vminstance(): failed to power on vApp "\
                                                      "{}".format(vmname_andid))
 
-        except Exception as exp :
+        except Exception as exp:
+            try:
+                self.delete_vminstance(vapp_uuid)
+            except Exception as exp2:
+                self.logger.error("new_vminstance rollback fail {}".format(exp2))
             # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
             self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
                               .format(name, exp))
@@ -2104,6 +2135,80 @@ class vimconnector(vimconn.vimconnector):
         else:
             raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
+    def create_config_drive_iso(self, user_data):
+        tmpdir = tempfile.mkdtemp()
+        iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+        latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+        os.makedirs(latest_dir)
+        with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+                open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+            userdata_file_obj.write(user_data)
+            meta_file_obj.write(json.dumps({"availability_zone": "nova",
+                                            "launch_index": 0,
+                                            "name": "ConfigDrive",
+                                            "uuid": str(uuid.uuid4())}
+                                           )
+                                )
+        genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+            iso_path=iso_path, source_dir_path=tmpdir)
+        self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+        try:
+            FNULL = open(os.devnull, 'w')
+            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+        except subprocess.CalledProcessError as e:
+            shutil.rmtree(tmpdir, ignore_errors=True)
+            error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        return iso_path
+
+    def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+        if not os.path.isfile(iso_file_path):
+            error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        iso_file_stat = os.stat(iso_file_path)
+        xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+                            <Media
+                                xmlns="http://www.vmware.com/vcloud/v1.5"
+                                name="{iso_name}"
+                                size="{iso_size}"
+                                imageType="iso">
+                                <Description>ISO image for config-drive</Description>
+                            </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+        catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+        response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+
+        if response.status_code != 201:
+            error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+
+        catalogItem = XmlElementTree.fromstring(response.content)
+        entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+        entity_href = entity.get('href')
+
+        response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+        if response.status_code != 200:
+            raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+
+        match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+        if match:
+            media_upload_href = match.group(1)
+        else:
+            raise Exception('Could not parse the upload URL for the media file from the last response')
+
+        headers['Content-Type'] = 'application/octet-stream'
+        response = self.perform_request(req_type='PUT',
+                                        url=media_upload_href,
+                                        headers=headers,
+                                        data=open(iso_file_path, 'rb'))
+
+        if response.status_code != 200:
+            raise Exception('PUT request to "{}" failed'.format(media_upload_href))
 
     def get_vcd_availibility_zones(self,respool_href, headers):
         """ Method to find presence of av zone is VIM resource pool
@@ -2713,6 +2818,17 @@ class vimconnector(vimconn.vimconnector):
                         self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
                     else:
                         self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                        config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+                        catalog_list = self.get_image_list()
+                        try:
+                            config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+                                                       if catalog_['name'] == config_drive_catalog_name][0]
+                        except IndexError:
+                            pass
+                        if config_drive_catalog_id:
+                            self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+                                              'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+                            self.delete_image(config_drive_catalog_id)
                         return vm__vim_uuid
         except:
             self.logger.debug(traceback.format_exc())
@@ -6212,10 +6328,17 @@ class vimconnector(vimconn.vimconnector):
             if iso_name and media_id:
                 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
                      <ns6:MediaInsertOrEjectParams
-                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:ns6="http://www.vmware.com/vcloud/v1.5" xmlns:ns7="http://www.vmware.com/schema/ovf" xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
+                     xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
+                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
+                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
+                     xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
+                     xmlns:ns7="http://www.vmware.com/schema/ovf" 
+                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
+                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
                      <ns6:Media
                         type="application/vnd.vmware.vcloud.media+xml"
-                        name="{}.iso"
+                        name="{}"
                         id="urn:vcloud:media:{}"
                         href="https://{}/api/media/{}"/>
                      </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
@@ -6233,9 +6356,10 @@ class vimconnector(vimconn.vimconnector):
                                                     headers=headers)
 
                     if response.status_code != 202:
-                        self.logger.error("Failed to insert CD-ROM to vm")
-                        raise vimconn.vimconnException("insert_media_to_vm() : Failed to insert"\
-                                                                                    "ISO image to vm")
+                        error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+                                    "Status code {}".format(response.text, response.status_code)
+                        self.logger.error(error_msg)
+                        raise vimconn.vimconnException(error_msg)
                     else:
                         task = self.get_task_from_response(response.content)
                         result = self.client.get_task_monitor().wait_for_success(task=task)
index a5da1dd..eecf10f 100644 (file)
@@ -331,7 +331,7 @@ class WimEngine(object):
     def derive_wan_link(self,
                         wim_usage,
                         instance_scenario_id, sce_net_id,
-                        networks, tenant):
+                        networks, tenant, related=None):
         """Create a instance_wim_nets record for the given information"""
         if sce_net_id in wim_usage:
             account_id = wim_usage[sce_net_id]
@@ -347,7 +347,8 @@ class WimEngine(object):
             'instance_scenario_id': instance_scenario_id,
             'sce_net_id': sce_net_id,
             'wim_id': wim_id,
-            'wim_account_id': account['uuid']
+            'wim_account_id': account['uuid'],
+            related: related
         }
 
     def derive_wan_links(self, wim_usage, networks, tenant=None):
@@ -366,6 +367,9 @@ class WimEngine(object):
             list: list of WAN links to be written to the database
         """
         # Group networks by key=(instance_scenario_id, sce_net_id)
+        related = None
+        if networks:
+            related = networks[0].get("related")
         filtered = _filter_multi_vim(networks)
         grouped_networks = _group_networks(filtered)
         datacenters_per_group = _count_datacenters(grouped_networks)
@@ -377,7 +381,7 @@ class WimEngine(object):
         # Keys are tuples(instance_scenario_id, sce_net_id)
         return [
             self.derive_wan_link(wim_usage,
-                                 key[0], key[1], grouped_networks[key], tenant)
+                                 key[0], key[1], grouped_networks[key], tenant, related)
             for key in wan_groups if wim_usage.get(key[1]) is not False
         ]
 
index 2e72640..636676a 100644 (file)
@@ -45,7 +45,7 @@ from hashlib import sha1
 from itertools import groupby
 from operator import itemgetter
 from sys import exc_info
-from time import time
+from time import time
 from uuid import uuid1 as generate_uuid
 
 from six import reraise
@@ -681,8 +681,7 @@ class WimPersistence(object):
                  'LIMIT {:d},{:d}').format(
                      self.safe_str(wim_account_id),
                      ','.join(type_options),
-                     group_offset, group_limit
-                 )
+                     group_offset, group_limit)
 
         join = 'vim_wim_actions NATURAL JOIN ({}) AS items'.format(items)
         db_results = self.db.get_rows(
@@ -697,7 +696,7 @@ class WimPersistence(object):
                      'task_index': task_index}
         try:
             action = self.query_one('vim_wim_actions', WHERE=condition)
-        except:
+        except Exception:
             actions = self.query('vim_wim_actions', WHERE=condition)
             self.logger.error('More then one action found:\n%s',
                               json.dumps(actions, indent=4))
@@ -710,8 +709,7 @@ class WimPersistence(object):
         updates = preprocess_record(
             merge_dicts(action, properties, extra=extra))
 
-        num_changes = self.db.update_rows('vim_wim_actions',
-                                            UPDATE=updates, WHERE=condition)
+        num_changes = self.db.update_rows('vim_wim_actions', UPDATE=updates, WHERE=condition)
 
         if num_changes is None:
             raise UnexpectedDatabaseError(
@@ -782,8 +780,7 @@ class WimPersistence(object):
         if not changes:
             return 0
 
-        return self.db.update_rows('instance_actions',
-                                    WHERE={'uuid': uuid}, UPDATE=changes)
+        return self.db.update_rows('instance_actions', WHERE={'uuid': uuid}, UPDATE=changes)
 
     def get_only_vm_with_external_net(self, instance_net_id, **kwargs):
         """Return an instance VM if that is the only VM connected to an
index cb662ab..c39e9d7 100644 (file)
@@ -121,9 +121,8 @@ def datacenter_account(datacenter, tenant):
 
 def datacenter_tenant_association(datacenter, tenant):
     return {'nfvo_tenant_id': uuid('tenant%d' % tenant),
-            'datacenter_id':  uuid('dc%d' % datacenter),
-            'datacenter_tenant_id':
-                uuid('dc-account%d%d' % (tenant, datacenter))}
+            'datacenter_id': uuid('dc%d' % datacenter),
+            'datacenter_tenant_id': uuid('dc-account%d%d' % (tenant, datacenter))}
 
 
 def datacenter_set(identifier=0, tenant=0):
index fa64fbb..f8d52bb 100644 (file)
@@ -51,7 +51,7 @@ from time import time, sleep
 from six import reraise
 from six.moves import queue
 
-from . import wan_link_actions, wimconn_odl, wimconn_dynpac # wimconn_tapi
+from . import wan_link_actions, wimconn_odl, wimconn_dynpac  # wimconn_tapi
 from ..utils import ensure, partition, pipe
 from .actions import IGNORE, PENDING, REFRESH
 from .errors import (
index 1816937..c0652e2 100644 (file)
@@ -211,8 +211,7 @@ class DynpacConnector(WimConnector):
         selected_ports = []
         for connection_point in connection_points:
             endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM)
-            port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM)
-                          == endpoint_id, port_mapping)[0]
+            port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0]
             wsmpi_json = port.get(self.__WAN_MAPPING_INFO_PARAM)
             port_info = json.loads(wsmpi_json)
             selected_ports.append(port_info)
@@ -226,7 +225,7 @@ class DynpacConnector(WimConnector):
             }, {
                 "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM),
                 "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM),
-                "wan_vlan":    connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
+                "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM)
             }],
             "bandwidth": kwargs.get(self.__BANDWIDTH_PARAM),
             "service_type": service_type,
diff --git a/osm_ro/wim/wimconn_ietfl2vpn.py b/osm_ro/wim/wimconn_ietfl2vpn.py
new file mode 100644 (file)
index 0000000..ee58f2f
--- /dev/null
@@ -0,0 +1,359 @@
+# -*- coding: utf-8 -*-
+##
+# Copyright 2018 Telefonica
+# All Rights Reserved.
+#
+# Contributors: Oscar Gonzalez de Dios, Manuel Lopez Bravo, Guillermo Pajares Martin
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This work has been performed in the context of the Metro-Haul project -
+# funded by the European Commission under Grant number 761727 through the
+# Horizon 2020 program.
+##
+"""The WIM connector is responsible for establishing wide area network
+connectivity.
+
+This WIM connector implements the standard IETF RFC 8466 "A YANG Data
+ Model for Layer 2 Virtual Private Network (L2VPN) Service Delivery"
+
+It receives the endpoints and the necessary details to request
+the Layer 2 service.
+"""
+import requests
+import uuid
+import logging
+from .wimconn import WimConnector, WimConnectorError
+"""CHeck layer where we move it"""
+
+
+class WimconnectorIETFL2VPN(WimConnector):
+
+    def __init__(self, wim, wim_account, config=None, logger=None):
+        """IETF L2VPM WIM connector
+
+        Arguments: (To be completed)
+            wim (dict): WIM record, as stored in the database
+            wim_account (dict): WIM account record, as stored in the database
+        """
+        self.logger = logging.getLogger('openmano.wimconn.ietfl2vpn')
+        super(WimconnectorIETFL2VPN, self).__init__(wim, wim_account, config, logger)
+        self.headers = {'Content-Type': 'application/json'}
+        self.mappings = {m['wan_service_endpoint_id']: m
+                         for m in self.service_endpoint_mapping}
+        self.user = wim_account.get("user")
+        self.passwd = wim_account.get("passwd")
+        if self.user and self.passwd is not None:
+            self.auth = (self.user, self.passwd)
+        else:
+            self.auth = None
+        self.logger.info("IETFL2VPN Connector Initialized.")
+
+    def check_credentials(self):
+        endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+        try:
+            response = requests.get(endpoint, auth=self.auth)    
+            http_code = response.status_code
+        except requests.exceptions.RequestException as e:
+            raise WimConnectorError(e.message, http_code=503)
+
+        if http_code != 200:
+            raise WimConnectorError("Failed while authenticating", http_code=http_code)
+        self.logger.info("Credentials checked")
+
+    def get_connectivity_service_status(self, service_uuid, conn_info=None):
+        """Monitor the status of the connectivity service stablished
+
+        Arguments:
+            service_uuid: Connectivity service unique identifier
+
+        Returns:
+            Examples::
+                {'wim_status': 'ACTIVE'}
+                {'wim_status': 'INACTIVE'}
+                {'wim_status': 'DOWN'}
+                {'wim_status': 'ERROR'}
+        """
+        try:
+            self.logger.info("Sending get connectivity service stuatus")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid)
+            response = requests.get(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.ok:
+                raise WimConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code)
+            service_status = {'wim_status': 'ACTIVE'}
+            return service_status
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+               
+    def search_mapp(self, connection_point):
+        id = connection_point['service_endpoint_id']
+        if id not in self.mappings:         
+            raise WimConnectorError("Endpoint {} not located".format(str(id)))
+        else:
+            return self.mappings[id]
+
+    def create_connectivity_service(self, service_type, connection_points, **kwargs):
+        """Stablish WAN connectivity between the endpoints
+
+        Arguments:
+            service_type (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2),
+                ``L3``.
+            connection_points (list): each point corresponds to
+                an entry point from the DC to the transport network. One
+                connection point serves to identify the specific access and
+                some other service parameters, such as encapsulation type.
+                Represented by a dict as follows::
+
+                    {
+                      "service_endpoint_id": ..., (str[uuid])
+                      "service_endpoint_encapsulation_type": ...,
+                           (enum: none, dot1q, ...)
+                      "service_endpoint_encapsulation_info": {
+                        ... (dict)
+                        "vlan": ..., (int, present if encapsulation is dot1q)
+                        "vni": ... (int, present if encapsulation is vxlan),
+                        "peers": [(ipv4_1), (ipv4_2)]
+                            (present if encapsulation is vxlan)
+                      }
+                    }
+
+              The service endpoint ID should be previously informed to the WIM
+              engine in the RO when the WIM port mapping is registered.
+
+        Keyword Arguments:
+            bandwidth (int): value in kilobytes
+            latency (int): value in milliseconds
+
+        Other QoS might be passed as keyword arguments.
+
+        Returns:
+            tuple: ``(service_id, conn_info)`` containing:
+               - *service_uuid* (str): UUID of the established connectivity
+                  service
+               - *conn_info* (dict or None): Information to be stored at the
+                 database (or ``None``). This information will be provided to
+                 the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+                 **MUST** be JSON/YAML-serializable (plain data structures).
+
+        Raises:
+            WimConnectorException: In case of error.
+        """
+        if service_type == "ELINE":
+            if len(connection_points) > 2:
+                raise WimConnectorError('Connections between more than 2 endpoints are not supported')
+            if len(connection_points) < 2:
+                raise WimConnectorError('Connections must be of at least 2 endpoints')
+            """ First step, create the vpn service """    
+            uuid_l2vpn = str(uuid.uuid4())
+            vpn_service = {}
+            vpn_service["vpn-id"] = uuid_l2vpn
+            vpn_service["vpn-scv-type"] = "vpws"
+            vpn_service["svc-topo"] = "any-to-any"
+            vpn_service["customer-name"] = "osm"
+            vpn_service_list = []
+            vpn_service_list.append(vpn_service)
+            vpn_service_l = {"vpn-service": vpn_service_list}
+            response_service_creation = None
+            conn_info = []
+            self.logger.info("Sending vpn-service :{}".format(vpn_service_l))
+            try:
+                endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(
+                    self.wim["wim_url"])
+                response_service_creation = requests.post(endpoint_service_creation, headers=self.headers,
+                                                          json=vpn_service_l, auth=self.auth)
+            except requests.exceptions.ConnectionError:
+                raise WimConnectorError("Request to create service Timeout", http_code=408)
+            if response_service_creation.status_code == 409:
+                raise WimConnectorError("Service already exists", http_code=response_service_creation.status_code)
+            elif response_service_creation.status_code != requests.codes.created:
+                raise WimConnectorError("Request to create service not accepted",
+                                        http_code=response_service_creation.status_code)
+            """ Second step, create the connections and vpn attachments """   
+            for connection_point in connection_points:
+                connection_point_wan_info = self.search_mapp(connection_point)
+                site_network_access = {}
+                connection = {}
+                if connection_point["service_endpoint_encapsulation_type"] != "none":
+                    if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+                        """ The connection is a VLAN """
+                        connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                        tagged = {}
+                        tagged_interf = {}
+                        service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+                        if service_endpoint_encapsulation_info["vlan"] is None:
+                            raise WimConnectorError("VLAN must be provided")
+                        tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+                        tagged["dot1q-vlan-tagged"] = tagged_interf
+                        connection["tagged-interface"] = tagged
+                    else:
+                        raise NotImplementedError("Encapsulation type not implemented")
+                site_network_access["connection"] = connection
+                self.logger.info("Sending connection:{}".format(connection))
+                vpn_attach = {}
+                vpn_attach["vpn-id"] = uuid_l2vpn
+                vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+                site_network_access["vpn-attachment"] = vpn_attach
+                self.logger.info("Sending vpn-attachement :{}".format(vpn_attach))
+                uuid_sna = str(uuid.uuid4())
+                site_network_access["network-access-id"] = uuid_sna
+                site_network_accesses = {}
+                site_network_access_list = []
+                site_network_access_list.append(site_network_access)
+                site_network_accesses["site-network-access"] = site_network_access_list
+                conn_info_d = {}
+                conn_info_d["site"] = connection_point_wan_info["site-id"]
+                conn_info_d["site-network-access-id"] = site_network_access["network-access-id"]
+                conn_info_d["mapping"] = None
+                conn_info.append(conn_info_d)
+                try:
+                    endpoint_site_network_access_creation = \
+                        "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+                            self.wim["wim_url"], connection_point_wan_info["site-id"])
+                    response_endpoint_site_network_access_creation = requests.post(
+                        endpoint_site_network_access_creation,
+                        headers=self.headers,
+                        json=site_network_accesses,
+                        auth=self.auth)
+                    
+                    if response_endpoint_site_network_access_creation.status_code == 409:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Site_Network_Access with ID '{}' already exists".format(
+                            site_network_access["network-access-id"]),
+                            http_code=response_endpoint_site_network_access_creation.status_code)
+                    
+                    elif response_endpoint_site_network_access_creation.status_code == 400:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Site {} does not exist".format(connection_point_wan_info["site-id"]),
+                                                http_code=response_endpoint_site_network_access_creation.status_code)
+                    
+                    elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \
+                            response_endpoint_site_network_access_creation.status_code != requests.codes.no_content:
+                        self.delete_connectivity_service(vpn_service["vpn-id"])
+                        raise WimConnectorError("Request no accepted",
+                                                http_code=response_endpoint_site_network_access_creation.status_code)
+                
+                except requests.exceptions.ConnectionError:
+                    self.delete_connectivity_service(vpn_service["vpn-id"])
+                    raise WimConnectorError("Request Timeout", http_code=408)
+            return uuid_l2vpn, conn_info
+        
+        else:
+            raise NotImplementedError
+
+    def delete_connectivity_service(self, service_uuid, conn_info=None):
+        """Disconnect multi-site endpoints previously connected
+
+        This method should receive as the first argument the UUID generated by
+        the ``create_connectivity_service``
+        """
+        try:
+            self.logger.info("Sending delete")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format(
+                self.wim["wim_url"], service_uuid)
+            response = requests.delete(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.no_content:
+                raise WimConnectorError("Error in the request", http_code=response.status_code)
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+
+    def edit_connectivity_service(self, service_uuid, conn_info=None,
+                                  connection_points=None, **kwargs):
+        """Change an existing connectivity service, see
+        ``create_connectivity_service``"""
+
+        # sites = {"sites": {}}
+        # site_list = []
+        vpn_service = {}
+        vpn_service["svc-topo"] = "any-to-any"
+        counter = 0
+        for connection_point in connection_points:
+            site_network_access = {}
+            connection_point_wan_info = self.search_mapp(connection_point)
+            params_site = {}
+            params_site["site-id"] = connection_point_wan_info["site-id"]
+            params_site["site-vpn-flavor"] = "site-vpn-flavor-single"
+            device_site = {}
+            device_site["device-id"] = connection_point_wan_info["device-id"]
+            params_site["devices"] = device_site
+            # network_access = {}
+            connection = {}
+            if connection_point["service_endpoint_encapsulation_type"] != "none":
+                if connection_point["service_endpoint_encapsulation_type"] == "dot1q":
+                    """ The connection is a VLAN """
+                    connection["encapsulation-type"] = "dot1q-vlan-tagged"
+                    tagged = {}
+                    tagged_interf = {}
+                    service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"]
+                    if service_endpoint_encapsulation_info["vlan"] is None:
+                        raise WimConnectorError("VLAN must be provided")
+                    tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"]
+                    tagged["dot1q-vlan-tagged"] = tagged_interf
+                    connection["tagged-interface"] = tagged
+                else:
+                    raise NotImplementedError("Encapsulation type not implemented")
+            site_network_access["connection"] = connection
+            vpn_attach = {}
+            vpn_attach["vpn-id"] = service_uuid
+            vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role"
+            site_network_access["vpn-attachment"] = vpn_attach
+            uuid_sna = conn_info[counter]["site-network-access-id"]
+            site_network_access["network-access-id"] = uuid_sna
+            site_network_accesses = {}
+            site_network_access_list = []
+            site_network_access_list.append(site_network_access)
+            site_network_accesses["site-network-access"] = site_network_access_list
+            try:
+                endpoint_site_network_access_edit = \
+                    "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format(
+                        self.wim["wim_url"], connection_point_wan_info["site-id"])  # MODIF
+                response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit,
+                                                                              headers=self.headers,
+                                                                              json=site_network_accesses,
+                                                                              auth=self.auth)
+                if response_endpoint_site_network_access_creation.status_code == 400:
+                    raise WimConnectorError("Service does not exist",
+                                            http_code=response_endpoint_site_network_access_creation.status_code)
+                elif response_endpoint_site_network_access_creation.status_code != 201 and \
+                        response_endpoint_site_network_access_creation.status_code != 204:
+                    raise WimConnectorError("Request no accepted",
+                                            http_code=response_endpoint_site_network_access_creation.status_code)
+            except requests.exceptions.ConnectionError:
+                raise WimConnectorError("Request Timeout", http_code=408)
+            counter += 1
+        return None
+
+    def clear_all_connectivity_services(self):
+        """Delete all WAN Links corresponding to a WIM"""
+        try:
+            self.logger.info("Sending clear all connectivity services")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+            response = requests.delete(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.no_content:
+                raise WimConnectorError("Unable to clear all connectivity services", http_code=response.status_code)
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
+
+    def get_all_active_connectivity_services(self):
+        """Provide information about all active connections provisioned by a
+        WIM
+        """
+        try:
+            self.logger.info("Sending get all connectivity services")
+            servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"])
+            response = requests.get(servicepoint, auth=self.auth)
+            if response.status_code != requests.codes.ok:
+                raise WimConnectorError("Unable to get all connectivity services", http_code=response.status_code)
+            return response
+        except requests.exceptions.ConnectionError:
+            raise WimConnectorError("Request Timeout", http_code=408)
index 05d96bb..79cb11f 100644 (file)
@@ -18,5 +18,6 @@ pyvmomi
 progressbar
 prettytable
 boto
+genisoimage
 untangle
 oca
index e4554ae..bbf33e4 100755 (executable)
@@ -259,6 +259,8 @@ then
     pip2 install progressbar || exit 1
     pip2 install prettytable || exit 1
     pip2 install pyvmomi || exit 1
+    [ "$_DISTRO" == "Ubuntu" ] && install_packages "genisoimage"
+    [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "genisoimage"
 
     # required for OpenNebula connector
     pip2 install untangle || exit 1
index f0424fb..3751334 100644 (file)
--- a/stdeb.cfg
+++ b/stdeb.cfg
@@ -2,5 +2,5 @@
 Suite: xenial
 XS-Python-Version: >= 2.7
 Maintainer: Gerardo Garcia <gerardo.garciadeblas@telefonica.com>
-Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-networking-l2gw, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx
+Depends: python-pip, libmysqlclient-dev, libssl-dev, libffi-dev, python-argcomplete, python-boto, python-bottle, python-jsonschema, python-logutils, python-cinderclient, python-glanceclient, python-keystoneclient, python-neutronclient, python-networking-l2gw, python-novaclient, python-openstackclient, python-mysqldb, python-lib-osm-openvim, python-osm-im, python-networkx, genisoimage
 
diff --git a/tox.ini b/tox.ini
index 025f4a0..e451bb8 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -11,12 +11,13 @@ commands=nosetests
 [testenv:flake8]
 basepython = python
 deps = flake8
-commands =
-    flake8 setup.py
+# TODO for the moment few files are tested.
+commands = flake8 osm_ro/wim  --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
 
 [testenv:build]
 basepython = python
 deps = stdeb
        setuptools-version-command
 commands = python setup.py --command-packages=stdeb.command bdist_deb
-    
+