Merge "Changes in vimconn_vmware.py: Fixed Bug ID 121: VMWARE Connector: Return Nic... v1.0.3
authortierno <alfonso.tiernosepulveda@telefonica.com>
Sat, 31 Dec 2016 15:51:05 +0000 (16:51 +0100)
committerGerrit Code Review <root@osm.etsi.org>
Sat, 31 Dec 2016 15:51:05 +0000 (16:51 +0100)
17 files changed:
charms/layers/openmano/reactive/layer_openmano.py
charms/layers/openmano/scripts/create-datacenter.sh
database_utils/migrate_mano_db.sh
nfvo.py
openmano_schemas.py
openmanod.py
scenarios/examples/scenario_vnf_additional_disk_based_image.yaml [new file with mode: 0644]
scenarios/examples/scenario_vnf_additional_disk_empty_volume.yaml [new file with mode: 0644]
scenarios/examples/scenario_vnf_no_additional_devices.yaml [new file with mode: 0644]
vimconn.py
vimconn_openstack.py
vimconn_openvim.py
vimconn_vmware.py
vmwarecli.py [new file with mode: 0755]
vnfs/examples/vnf_additional_disk_based_image.yaml [new file with mode: 0644]
vnfs/examples/vnf_additional_disk_empty_volume.yaml [new file with mode: 0644]
vnfs/examples/vnf_no_additional_devices.yaml [new file with mode: 0644]

index cc1b474..2320e91 100644 (file)
@@ -32,17 +32,18 @@ INSTALL_PATH = '/opt/openmano'
 USER = 'openmanod'
 
 
-@when('openmano.installed')
-@when('openmano.available')
+@when('openmano.installed', 'openmano.available')
 def openmano_available(openmano):
     # TODO make this configurable via charm config
     openmano.configure(port=9090)
 
 
-@when('openmano.installed')
-@when('db.available', 'db.installed')
-@when('openvim-controller.available')
-@when('openmano.running')
+@when('openvim-controller.available',
+      'db.available',
+      'db.installed',
+      'openmano.installed',
+      'openmano.running',
+      )
 def openvim_available(openvim, db):
     for service in openvim.services():
         for endpoint in service['hosts']:
@@ -71,9 +72,9 @@ def openvim_available(openvim, db):
         break
 
 
-@when('openmano.installed')
-@when('db.available', 'db.installed')
-@when('openvim-controller.available')
+@when('openmano.installed',
+      'db.installed',
+      'openvim-controller.available')
 @when_not('openmano.running')
 def start(*args):
     # TODO: if the service fails to start, we should raise an error to the op
@@ -95,8 +96,7 @@ def start(*args):
     set_state('openmano.running')
 
 
-@when('openmano.installed')
-@when('db.available')
+@when('db.available', 'openmano.installed')
 @when_not('db.installed')
 def setup_db(db):
     """Setup the database
@@ -151,6 +151,7 @@ def setup_db(db):
     status_set('active', 'Database installed.')
     set_state('db.installed')
 
+
 @when_not('openvim-controller.available')
 def need_openvim():
     status_set('waiting', 'Waiting for OpenVIM')
index 4b192e4..6aafab5 100755 (executable)
@@ -4,20 +4,33 @@ OPENMANO=$HOME/bin/openmano
 export OPENMANO_TENANT=$4
 
 OPENMANO_DATACENTER=`$OPENMANO datacenter-list myov`
-if [ $? -ne 0 ]; then
+if [ $? -eq 0 ]; then
+    # If the datacenter exists, the current approach is to delete the existing
+    # one and create a new one. We may want to change this behavior to retain
+    # the existing datacenter, but this script will also go away in favour of
+    # a python API to OpenMano
+
+    # If the datacenter exists, remove all traces of it before continuing
+    OPENMANO_DATACENTER=`echo $OPENMANO_DATACENTER |gawk '{print $1}'`
+
+    # Delete netmap
+    $OPENMANO datacenter-netmap-delete --all -f --datacenter $OPENMANO_DATACENTER
+
+    # detach
+    $OPENMANO datacenter-detach -a $OPENMANO_DATACENTER
+
     # Make sure the datacenter is deleted
-    $OPENMANO datacenter-delete myov
+    $OPENMANO datacenter-delete --force myov
+
     OPENMANO_DATACENTER=`$OPENMANO datacenter-create myov http://$1:$2/openvim`
 fi
-export OPENMANO_DATACENTER=`echo $OPENMANO_DATACENTER |gawk '{print $1}'`
+OPENMANO_DATACENTER=`echo $OPENMANO_DATACENTER |gawk '{print $1}'`
 
-#export OPENMANO_DATACENTER=`$OPENMANO datacenter-create myov http://$1:$2/openvim |gawk '{print $1}'`
-# FIXME: don't add this to .bashrc if it already exists.
-if ! grep -q "^export OPENMANO_DATACENTER" $HOME/.bashrc
-then
-    echo "export OPENMANO_DATACENTER=$OPENMANO_DATACENTER " >> $HOME/.bashrc
-fi
 
-# TODO: Test idempotency. We may need to check and remove existing data
+# if ! grep -q "^export OPENMANO_DATACENTER" $HOME/.bashrc
+# then
+#     echo "export OPENMANO_DATACENTER=$OPENMANO_DATACENTER " >> $HOME/.bashrc
+# fi
+
 $OPENMANO datacenter-attach myov --vim-tenant-id $3
 $OPENMANO datacenter-netmap-import -f --datacenter $OPENMANO_DATACENTER
index e69d8a5..208ec7d 100755 (executable)
@@ -184,6 +184,7 @@ DATABASE_TARGET_VER_NUM=0
 [ $OPENMANO_VER_NUM -ge 4057 ] && DATABASE_TARGET_VER_NUM=14  #0.4.57=>  14
 [ $OPENMANO_VER_NUM -ge 4059 ] && DATABASE_TARGET_VER_NUM=15  #0.4.59=>  15
 [ $OPENMANO_VER_NUM -ge 5002 ] && DATABASE_TARGET_VER_NUM=16  #0.5.2 =>  16
+[ $OPENMANO_VER_NUM -ge 5003 ] && DATABASE_TARGET_VER_NUM=17  #0.5.3 =>  17
 #TODO ... put next versions here
 
 
@@ -676,12 +677,25 @@ function upgrade_to_16(){
 function downgrade_from_16(){
     echo "    downgrade database from version 0.16 to version 0.15"
     echo "      remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
-    echo "ALTER TABLE datacenter_tenants DROP COLUMN config" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE datacenter_tenants DROP COLUMN config;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
     echo "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
     echo "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
     echo "DELETE FROM schema_version WHERE version_int='16';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
 }
 
+function upgrade_to_17(){
+    echo "    upgrade database from version 0.16 to version 0.17"
+    echo "      add column 'extended' at table 'datacenter_flavors'"
+    echo "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+function downgrade_from_17(){
+    echo "    downgrade database from version 0.17 to version 0.16"
+    echo "      remove column 'extended' from table 'datacenter_flavors'"
+    echo "ALTER TABLE datacenters_flavors DROP COLUMN extended;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "DELETE FROM schema_version WHERE version_int='17';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+
 function upgrade_to_X(){
     echo "      change 'datacenter_nets'"
     echo "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
diff --git a/nfvo.py b/nfvo.py
index d4935bf..0d60ff7 100644 (file)
--- a/nfvo.py
+++ b/nfvo.py
@@ -42,6 +42,8 @@ from db_base import db_base_Exception
 global global_config
 global vimconn_imported
 global logger
+global default_volume_size
+default_volume_size = '5' #size in GB
 
 
 vimconn_imported={} #dictionary with VIM type as key, loaded module as value
@@ -395,9 +397,11 @@ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_
     
         #Create the flavor in VIM
         #Translate images at devices from MANO id to VIM id
+        disk_list = []
         if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
             #make a copy of original devices
             devices_original=[]
+
             for device in flavor_dict["extended"].get("devices",[]):
                 dev={}
                 dev.update(device)
@@ -409,7 +413,9 @@ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_
             dev_nb=0
             for index in range(0,len(devices_original)) :
                 device=devices_original[index]
-                if "image" not in device or "image name" not in device:
+                if "image" not in device and "image name" not in device:
+                    if 'size' in device:
+                        disk_list.append({'size': device.get('size', default_volume_size)})
                     continue
                 image_dict={}
                 image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
@@ -425,6 +431,10 @@ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_
                 image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
                 image_dict["uuid"]=image_mano_id
                 image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
+
+                #save disk information (image must be based on and size
+                disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
+
                 flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
                 dev_nb += 1
         if len(flavor_db)>0:
@@ -451,7 +461,14 @@ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_
         #if reach here the flavor has been create or exist
         if len(flavor_db)==0:
             #add new vim_id at datacenters_flavors
-            mydb.new_row('datacenters_flavors', {'datacenter_id':vim_id, 'flavor_id':flavor_mano_id, 'vim_id': flavor_vim_id, 'created':flavor_created})
+            extended_devices_yaml = None
+            if len(disk_list) > 0:
+                extended_devices = dict()
+                extended_devices['disks'] = disk_list
+                extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
+            mydb.new_row('datacenters_flavors',
+                        {'datacenter_id':vim_id, 'flavor_id':flavor_mano_id, 'vim_id': flavor_vim_id,
+                        'created':flavor_created,'extended': extended_devices_yaml})
         elif flavor_db[0]["vim_id"]!=flavor_vim_id:
             #modify existing vim_id at datacenters_flavors
             mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id}, WHERE={'datacenter_id':vim_id, 'flavor_id':flavor_mano_id})
@@ -1921,7 +1938,28 @@ def create_instance(mydb, tenant_id, instance_dict):
                 flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
                 if flavor_dict['extended']!=None:
                     flavor_dict['extended']= yaml.load(flavor_dict['extended'])
-                flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)                
+                flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
+
+
+
+
+                #Obtain information for additional disks
+                extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',), WHERE={'vim_id': flavor_id})
+                if not extended_flavor_dict:
+                    raise NfvoException("flavor '{}' not found".format(flavor_id), HTTP_Not_Found)
+                    return
+
+                #extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0])
+                myVMDict['disks'] = None
+                extended_info = extended_flavor_dict[0]['extended']
+                if extended_info != None:
+                    extended_flavor_dict_yaml = yaml.load(extended_info)
+                    if 'disks' in extended_flavor_dict_yaml:
+                        myVMDict['disks'] = extended_flavor_dict_yaml['disks']
+
+
+
+
                 vm['vim_flavor_id'] = flavor_id
                 
                 myVMDict['imageRef'] = vm['vim_image_id']
@@ -1983,7 +2021,9 @@ def create_instance(mydb, tenant_id, instance_dict):
                 #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
                 #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
                 vm_id = vim.new_vminstance(myVMDict['name'],myVMDict['description'],myVMDict.get('start', None),
-                        myVMDict['imageRef'],myVMDict['flavorRef'],myVMDict['networks'], cloud_config = cloud_config)
+                        myVMDict['imageRef'],myVMDict['flavorRef'],myVMDict['networks'], cloud_config = cloud_config,
+                        disk_list = myVMDict['disks'])
+
                 vm['vim_id'] = vm_id
                 rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
                 #put interface uuid back to scenario[vnfs][vms[[interfaces]
index ffbacd3..013234f 100644 (file)
@@ -24,7 +24,7 @@
 '''
 JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API 
 '''
-__author__="Alfonso Tierno, Gerardo Garcia"
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$09-oct-2014 09:09:48$"
 
 #Basis schemas
@@ -55,6 +55,7 @@ schema_version_2={"type":"integer","minimum":2,"maximum":2}
 #schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
 log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
 checksum_schema={"type":"string", "pattern":"^[0-9a-fA-F]{32}$"}
+size_schema={"type":"integer","minimum":1,"maximum":100}
 
 metadata_schema={
     "type":"object",
@@ -465,7 +466,8 @@ devices_schema={
             "image": path_schema,
             "image name": name_schema,
             "image checksum": checksum_schema,
-            "image metadata": metadata_schema, 
+            "image metadata": metadata_schema,
+            "size": size_schema,
             "vpci":pci_schema,
             "xml":xml_text_schema,
         },
index bbbd7d3..b50ca9d 100755 (executable)
@@ -33,9 +33,9 @@ It loads the configuration file and launches the http_server thread that will li
 '''
 __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$26-aug-2014 11:09:29$"
-__version__="0.5.2-r510"
-version_date="Oct 2016"
-database_version="0.16"      #expected database schema version
+__version__="0.5.3-r511"
+version_date="Dec 2016"
+database_version="0.17"      #expected database schema version
 
 import httpserver
 import time
diff --git a/scenarios/examples/scenario_vnf_additional_disk_based_image.yaml b/scenarios/examples/scenario_vnf_additional_disk_based_image.yaml
new file mode 100644 (file)
index 0000000..6612369
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_additional_disk_based_image
+  description:   Just deploy vnf_2_disks
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_2_disks:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_additional_disk_based_image   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    internal:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_2_disks:  mgmt0
+
diff --git a/scenarios/examples/scenario_vnf_additional_disk_empty_volume.yaml b/scenarios/examples/scenario_vnf_additional_disk_empty_volume.yaml
new file mode 100644 (file)
index 0000000..0644a69
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_additional_disk_empty_volume
+  description:   Just deploy vnf_2_disks
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_2_disks:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_additional_disk_empty_volume   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    internal:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_2_disks:  mgmt0
+
diff --git a/scenarios/examples/scenario_vnf_no_additional_devices.yaml b/scenarios/examples/scenario_vnf_no_additional_devices.yaml
new file mode 100644 (file)
index 0000000..10ef4b2
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          vnf_no_additional_devices
+  description:   Just deploy vnf_2_disks
+  public:        false      # if available for other tenants
+  vnfs:
+    vnf_2_disks:                     # vnf name in the scenario
+      #identify an already openmano uploaded VNF either by vnf_id (uuid, prefered) or vnf_name
+      #vnf_id:    0c0dcc20-c5d5-11e6-a9fb-fa163e2ae06e                  #prefered id method
+      vnf_name:  vnf_no_additional_devices   #can fail if several vnfs matches this name
+      #graph:     {"y":399,"x":332,"ifaces":{"left":[["xe0","d"],["xe1","d"]],"bottom":[["eth0","v"],["eth1","m"]]}}
+  networks:                
+    internal:
+      # Connections based on external networks (datacenter nets) must include the external network in the list of nodes
+      type:      bridge
+      external:  true       #this will be connected outside
+      interfaces:
+      -   vnf_2_disks:  mgmt0
+
index b5f8b07..814be65 100644 (file)
@@ -303,7 +303,7 @@ class vimconnector():
         '''
         raise vimconnNotImplemented( "Should have implemented this" )
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None):
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
index e0a3aa3..cdd1178 100644 (file)
@@ -24,7 +24,7 @@
 '''
 osconnector implements all the methods to interact with openstack using the python-client.
 '''
-__author__="Alfonso Tierno, Gerardo Garcia"
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research"
 __date__ ="$22-jun-2014 11:19:29$"
 
 import vimconn
@@ -32,15 +32,20 @@ import json
 import yaml
 import logging
 import netaddr
+import time
 
-from novaclient import client as nClient, exceptions as nvExceptions
-import keystoneclient.v2_0.client as ksClient
+from novaclient import client as nClient_v2, exceptions as nvExceptions, api_versions as APIVersion
+import keystoneclient.v2_0.client as ksClient_v2
+from novaclient.v2.client import Client as nClient
+import keystoneclient.v3.client as ksClient
 import keystoneclient.exceptions as ksExceptions
 import glanceclient.v2.client as glClient
 import glanceclient.client as gl1Client
 import glanceclient.exc as gl1Exceptions
+import cinderclient.v2.client as cClient_v2
 from httplib import HTTPException
-from neutronclient.neutron import client as neClient
+from neutronclient.neutron import client as neClient_v2
+from neutronclient.v2_0 import client as neClient
 from neutronclient.common import exceptions as neExceptions
 from requests.exceptions import ConnectionError
 
@@ -55,12 +60,18 @@ vmStatus2manoFormat={'ACTIVE':'ACTIVE',
 netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
                      }
 
+#global var to have a timeout creating and deleting volumes
+volume_timeout = 60
+
 class vimconnector(vimconn.vimconnector):
     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, config={}):
         '''using common constructor parameters. In this case 
         'url' is the keystone authorization url,
         'url_admin' is not use
         '''
+        self.osc_api_version = 'v2.0'
+        if config.get('APIversion') == 'v3.3':
+            self.osc_api_version = 'v3.3'
         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
         
         self.k_creds={}
@@ -81,6 +92,10 @@ class vimconnector(vimconn.vimconnector):
         if passwd:
             self.k_creds['password'] = passwd
             self.n_creds['api_key']  = passwd
+        if self.osc_api_version == 'v3.3':
+            self.k_creds['project_name'] = tenant_name
+            self.k_creds['project_id'] = tenant_id
+
         self.reload_client       = True
         self.logger = logging.getLogger('openmano.vim.openstack')
         if log_level:
@@ -93,21 +108,37 @@ class vimconnector(vimconn.vimconnector):
         if index=='tenant_id':
             self.reload_client=True
             self.tenant_id = value
-            if value:
-                self.k_creds['tenant_id'] = value
-                self.n_creds['tenant_id']  = value
+            if self.osc_api_version == 'v3.3':
+                if value:
+                    self.k_creds['project_id'] = value
+                    self.n_creds['project_id']  = value
+                else:
+                    del self.k_creds['project_id']
+                    del self.n_creds['project_id']
             else:
-                del self.k_creds['tenant_name']
-                del self.n_creds['project_id']
+                if value:
+                    self.k_creds['tenant_id'] = value
+                    self.n_creds['tenant_id']  = value
+                else:
+                    del self.k_creds['tenant_id']
+                    del self.n_creds['tenant_id']
         elif index=='tenant_name':
             self.reload_client=True
             self.tenant_name = value
-            if value:
-                self.k_creds['tenant_name'] = value
-                self.n_creds['project_id']  = value
+            if self.osc_api_version == 'v3.3':
+                if value:
+                    self.k_creds['project_name'] = value
+                    self.n_creds['project_name']  = value
+                else:
+                    del self.k_creds['project_name']
+                    del self.n_creds['project_name']
             else:
-                del self.k_creds['tenant_name']
-                del self.n_creds['project_id']
+                if value:
+                    self.k_creds['tenant_name'] = value
+                    self.n_creds['project_id']  = value
+                else:
+                    del self.k_creds['tenant_name']
+                    del self.n_creds['project_id']
         elif index=='user':
             self.reload_client=True
             self.user = value
@@ -146,14 +177,23 @@ class vimconnector(vimconn.vimconnector):
             #test valid params
             if len(self.n_creds) <4:
                 raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
-            self.nova = nClient.Client(2, **self.n_creds)
-            self.keystone = ksClient.Client(**self.k_creds)
+            if self.osc_api_version == 'v3.3':
+                self.nova = nClient(APIVersion(version_str='2'), **self.n_creds)
+                #TODO To be updated for v3
+                #self.cinder = cClient.Client(**self.n_creds)
+                self.keystone = ksClient.Client(**self.k_creds)
+                self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+                self.neutron = neClient.Client(APIVersion(version_str='2'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+            else:
+                self.nova = nClient_v2.Client('2', **self.n_creds)
+                self.cinder = cClient_v2.Client(**self.n_creds)
+                self.keystone = ksClient_v2.Client(**self.k_creds)
+                self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+                self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
             self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
             self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds)  #TODO check k_creds vs n_creds
-            self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
-            self.neutron = neClient.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
             self.reload_client = False
-        
+
     def __net_os2mano(self, net_list_dict):
         '''Transform the net openstack format to mano format
         net_list_dict can be a list of dict or a single dict'''
@@ -195,14 +235,17 @@ class vimconnector(vimconn.vimconnector):
             <other VIM specific>
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         '''
-        self.logger.debug("Getting tenant from VIM filter: '%s'", str(filter_dict))
+        self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
-            tenant_class_list=self.keystone.tenants.findall(**filter_dict)
-            tenant_list=[]
-            for tenant in tenant_class_list:
-                tenant_list.append(tenant.to_dict())
-            return tenant_list
+            if self.osc_api_version == 'v3.3':
+                project_class_list=self.keystone.projects.findall(**filter_dict)
+            else:
+                project_class_list=self.keystone.tenants.findall(**filter_dict)
+            project_list=[]
+            for project in project_class_list:
+                project_list.append(project.to_dict())
+            return project_list
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
@@ -211,8 +254,11 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
         try:
             self._reload_connection()
-            tenant=self.keystone.tenants.create(tenant_name, tenant_description)
-            return tenant.id
+            if self.osc_api_version == 'v3.3':
+                project=self.keystone.projects.create(tenant_name, tenant_description)
+            else:
+                project=self.keystone.tenants.create(tenant_name, tenant_description)
+            return project.id
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
@@ -221,11 +267,14 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
         try:
             self._reload_connection()
-            self.keystone.tenants.delete(tenant_id)
+            if self.osc_api_version == 'v3.3':
+                self.keystone.projects.delete(tenant_id)
+            else:
+                self.keystone.tenants.delete(tenant_id)
             return tenant_id
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
-        
+
     def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
         '''Adds a tenant network to VIM. Returns the network identifier'''
         self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
@@ -298,6 +347,8 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
+            if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict:
+                filter_dict['project_id'] = filter_dict.pop('tenant_id')
             net_dict=self.neutron.list_networks(**filter_dict)
             net_list=net_dict["networks"]
             self.__net_os2mano(net_list)
@@ -607,7 +658,7 @@ class vimconnector(vimconn.vimconnector):
         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
             self._format_exception(e)
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None):
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
@@ -621,6 +672,7 @@ class vimconnector(vimconn.vimconnector):
                 use: 'data', 'bridge',  'mgmt'
                 type: 'virtual', 'PF', 'VF', 'VFnotShared'
                 vim_id: filled/added by this function
+                floating_ip: True/False (or it can be None)
                 #TODO ip, security groups
         Returns the instance identifier
         '''
@@ -628,41 +680,42 @@ class vimconnector(vimconn.vimconnector):
         try:
             metadata={}
             net_list_vim=[]
+            external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
             self._reload_connection()
             metadata_vpci={} #For a specific neutron plugin 
             for net in net_list:
                 if not net.get("net_id"): #skip non connected iface
                     continue
-                if net["type"]=="virtual":
-                    net_list_vim.append({'net-id': net["net_id"]})
-                    if "vpci" in net:
-                        metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"]=="PF":
-                    self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
-                    #TODO insert this when openstack consider passthrough ports as openstack neutron ports
-                else: #VF
-                    if "vpci" in net:
-                        if "VF" not in metadata_vpci:
-                            metadata_vpci["VF"]=[]
-                        metadata_vpci["VF"].append([ net["vpci"], "" ])
+                if net["type"]=="virtual" or net["type"]=="VF":
                     port_dict={
-                         "network_id": net["net_id"],
-                         "name": net.get("name"),
-                         "binding:vnic_type": "direct", 
-                         "admin_state_up": True
-                    }
+                        "network_id": net["net_id"],
+                        "name": net.get("name"),
+                        "admin_state_up": True
+                    }    
+                    if net["type"]=="virtual":
+                        if "vpci" in net:
+                            metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+                    else: # for VF
+                        if "vpci" in net:
+                            if "VF" not in metadata_vpci:
+                                metadata_vpci["VF"]=[]
+                            metadata_vpci["VF"].append([ net["vpci"], "" ])
+                        port_dict["binding:vnic_type"]="direct"
                     if not port_dict["name"]:
-                        port_dict["name"] = name
+                        port_dict["name"]=name
                     if net.get("mac_address"):
                         port_dict["mac_address"]=net["mac_address"]
-                    #TODO: manage having SRIOV without vlan tag
-                    #if net["type"] == "VFnotShared"
-                    #    port_dict["vlan"]=0
                     new_port = self.neutron.create_port({"port": port_dict })
                     net["mac_adress"] = new_port["port"]["mac_address"]
                     net["vim_id"] = new_port["port"]["id"]
-                    net["ip"] = new_port["port"].get("fixed_ips",[{}])[0].get("ip_address")
+                    net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
                     net_list_vim.append({"port-id": new_port["port"]["id"]})
+                else:   # for PF
+                    self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
+                    #TODO insert this when openstack consider passthrough ports as openstack neutron ports
+                if net.get('floating_ip', False):
+                    external_network.append(net)
+                 
             if metadata_vpci:
                 metadata = {"pci_assignement": json.dumps(metadata_vpci)}
                 if len(metadata["pci_assignement"]) >255:
@@ -696,29 +749,88 @@ class vimconnector(vimconn.vimconnector):
             elif isinstance(cloud_config, str):
                 userdata = cloud_config
             else:
-                userdata=None    
-            
+                userdata=None
+
+            #Create additional volumes in case these are present in disk_list
+            block_device_mapping = None
+            base_disk_index = ord('b')
+            if disk_list != None:
+                block_device_mapping = dict()
+                for disk in disk_list:
+                    if 'image_id' in disk:
+                        volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
+                                    chr(base_disk_index), imageRef = disk['image_id'])
+                    else:
+                        volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                    chr(base_disk_index))
+                    block_device_mapping['_vd' +  chr(base_disk_index)] = volume.id
+                    base_disk_index += 1
+
+                #wait until volumes are with status available
+                keep_waiting = True
+                elapsed_time = 0
+                while keep_waiting and elapsed_time < volume_timeout:
+                    keep_waiting = False
+                    for volume_id in block_device_mapping.itervalues():
+                        if self.cinder.volumes.get(volume_id).status != 'available':
+                            keep_waiting = True
+                    if keep_waiting:
+                        time.sleep(1)
+                        elapsed_time += 1
+
+                #if we exceeded the timeout rollback
+                if elapsed_time >= volume_timeout:
+                    #delete the volumes we just created
+                    for volume_id in block_device_mapping.itervalues():
+                        self.cinder.volumes.delete(volume_id)
+
+                    #delete ports we just created
+                    for net_item  in net_list_vim:
+                        if 'port-id' in net_item:
+                            self.neutron.delete_port(net_item['port_id'])
+
+                    raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+                                                   http_code=vimconn.HTTP_Request_Timeout)
+
             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
-                                              security_groups   = security_groups,
-                                              availability_zone = self.config.get('availability_zone'),
-                                              key_name          = self.config.get('keypair'),
-                                              userdata=userdata
-                                        ) #, description=description)
-            
-            
+                                              security_groups=security_groups,
+                                              availability_zone=self.config.get('availability_zone'),
+                                              key_name=self.config.get('keypair'),
+                                              userdata=userdata,
+                                              block_device_mapping = block_device_mapping
+                                              )  # , description=description)
             #print "DONE :-)", server
             
-#             #TODO   server.add_floating_ip("10.95.87.209")
-#             #To look for a free floating_ip
-#             free_floating_ip = None
-#             for floating_ip in self.neutron.list_floatingips().get("floatingips", () ):
-#                 if not floating_ip["port_id"]:
-#                     free_floating_ip = floating_ip["floating_ip_address"]
-#                     break
-#             if free_floating_ip:
-#                 server.add_floating_ip(free_floating_ip)
+            pool_id = None
+            floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+            for floating_network in external_network:
+                assigned = False
+                while(assigned == False):
+                    if floating_ips:
+                        ip = floating_ips.pop(0)
+                        if not ip.get("port_id", False):
+                            free_floating_ip = ip.get("floating_ip_address")
+                            try:
+                                fix_ip = floating_network.get('ip')
+                                server.add_floating_ip(free_floating_ip, fix_ip)
+                                assigned = True
+                            except Exception as e:
+                                self.delete_vminstance(server.id)
+                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
+                    else:
+                        pool_id = floating_network.get('net_id')
+                        param = {'floatingip': {'floating_network_id': pool_id}}
+                        try:
+                            #self.logger.debug("Creating floating IP")
+                            new_floating_ip = self.neutron.create_floatingip(param)
+                            free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
+                            fix_ip = floating_network.get('ip')
+                            server.add_floating_ip(free_floating_ip, fix_ip)
+                            assigned=True
+                        except Exception as e:
+                            self.delete_vminstance(server.id)
+                            raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
                 
-            
             return server.id
 #        except nvExceptions.NotFound as e:
 #            error_value=-vimconn.HTTP_Not_Found
@@ -804,7 +916,32 @@ class vimconnector(vimconn.vimconnector):
                     self.neutron.delete_port(p["id"])
                 except Exception as e:
                     self.logger.error("Error deleting port: " + type(e).__name__ + ": "+  str(e))
+
+            #commented because detaching the volumes makes the servers.delete not work properly ?!?
+            #dettach volumes attached
+            server = self.nova.servers.get(vm_id)
+            volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']
+            #for volume in volumes_attached_dict:
+            #    self.cinder.volumes.detach(volume['id'])
+
             self.nova.servers.delete(vm_id)
+
+            #delete volumes.
+            #Although having detached them should have them  in active status
+            #we ensure in this loop
+            keep_waiting = True
+            elapsed_time = 0
+            while keep_waiting and elapsed_time < volume_timeout:
+                keep_waiting = False
+                for volume in volumes_attached_dict:
+                    if self.cinder.volumes.get(volume['id']).status != 'available':
+                        keep_waiting = True
+                    else:
+                        self.cinder.volumes.delete(volume['id'])
+                if keep_waiting:
+                    time.sleep(1)
+                    elapsed_time += 1
+
             return vm_id
         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)
index d415532..241f63d 100644 (file)
@@ -776,7 +776,7 @@ class vimconnector(vimconn.vimconnector):
             #print text
             return -vim_response.status_code,text
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list, cloud_config=None):
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list, cloud_config=None, disk_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
index 27583e0..c8fc73a 100644 (file)
@@ -1113,7 +1113,7 @@ class vimconnector(vimconn.vimconnector):
         return None
 
     def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
-                       cloud_config=None):
+                       cloud_config=None, disk_list=None):
         """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
diff --git a/vmwarecli.py b/vmwarecli.py
new file mode 100755 (executable)
index 0000000..4583a65
--- /dev/null
@@ -0,0 +1,819 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##
+# This file is standalone vmware vcloud director util
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: mbayramov@vmware.com
+##
+
+"""
+
+Standalone application that leverage openmano vmware connector work with vCloud director rest api.
+
+ - Provides capability to create and delete VDC for specific organization.
+ - Create, delete and manage network for specific VDC
+ - List deployed VM's , VAPPs, VDSs, Organization
+ - View detail information about VM / Vapp , Organization etc
+ - Operate with images upload / boot / power on etc
+
+ Usage example.
+
+ List organization created in vCloud director
+  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
+
+ List VDC for particular organization
+  vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
+
+ Upload image
+  python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
+
+ Boot Image
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
+
+ View vApp
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
+
+ List VMS
+    python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
+
+ List VDC in OSM format
+  python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
+
+Mustaafa Bayramov
+mbayramov@vmware.com
+"""
+import os
+import argparse
+import traceback
+import uuid
+
+from xml.etree import ElementTree as ET
+
+import sys
+from pyvcloud import Http
+
+import logging
+import vimconn
+import time
+import uuid
+import urllib3
+import requests
+
+from vimconn_vmware import vimconnector
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+from prettytable import PrettyTable
+
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+__author__ = "Mustafa Bayramov"
+__date__ = "$16-Sep-2016 11:09:29$"
+
+
+# TODO move to main vim
+def delete_network_action(vca=None, network_uuid=None):
+    """
+    Method leverages vCloud director and query network based on network uuid
+
+    Args:
+        vca - is active VCA connection.
+        network_uuid - is a network uuid
+
+        Returns:
+            The return XML respond
+    """
+
+    if vca is None or network_uuid is None:
+        return None
+
+    url_list = [vca.host, '/api/admin/network/', network_uuid]
+    vm_list_rest_call = ''.join(url_list)
+
+    if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        response = Http.get(url=vm_list_rest_call,
+                            headers=vca.vcloud_session.get_vcloud_headers(),
+                            verify=vca.verify,
+                            logger=vca.logger)
+        if response.status_code == requests.codes.ok:
+            print response.content
+            return response.content
+
+    return None
+
+
+def print_vapp(vapp_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        vapp_dict: container vapp object.
+
+        Returns:
+            The return nothing
+    """
+
+    # following key available to print
+    # {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
+    #  'memoryMB': '384',
+    #  'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
+    #  'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
+    #  'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
+    #  'containerName': 'Cirros Template', #  'container':
+    #  'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
+    #  'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
+    #  'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
+    #  'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
+
+    if vapp_dict is None:
+        return
+
+    vm_table = PrettyTable(['vm   uuid',
+                            'vapp name',
+                            'vapp uuid',
+                            'network name',
+                            'storage name',
+                            'vcpu', 'memory', 'hw ver','deployed','status'])
+    for k in vapp_dict:
+        entry = []
+        entry.append(k)
+        entry.append(vapp_dict[k]['containerName'])
+        # vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
+        entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
+        entry.append(vapp_dict[k]['networkName'])
+        entry.append(vapp_dict[k]['storageProfileName'])
+        entry.append(vapp_dict[k]['numberOfCpus'])
+        entry.append(vapp_dict[k]['memoryMB'])
+        entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
+        entry.append(vapp_dict[k]['isDeployed'])
+        entry.append(vapp_dict[k]['status'])
+
+        vm_table.add_row(entry)
+
+    print vm_table
+
+
+def print_org(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+
+    if org_dict is None:
+        return
+
+    org_table = PrettyTable(['org uuid', 'name'])
+    for k in org_dict:
+        entry = [k, org_dict[k]]
+        org_table.add_row(entry)
+
+    print org_table
+
+
+def print_vm_list(vm_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        vm_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if vm_dict is None:
+        return
+
+    vm_table = PrettyTable(
+        ['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
+
+    try:
+        for k in vm_dict:
+            entry = []
+            entry.append(k)
+            entry.append(vm_dict[k]['name'])
+            entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
+            entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
+            entry.append(vm_dict[k]['networkName'])
+            entry.append(vm_dict[k]['isDeployed'])
+            entry.append(vm_dict[k]['numberOfCpus'])
+            entry.append(vm_dict[k]['memoryMB'])
+            entry.append(vm_dict[k]['status'])
+            vm_table.add_row(entry)
+        print vm_table
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        pass
+
+
+def print_vdc_list(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        vdcs_dict = {}
+        if org_dict.has_key('vdcs'):
+            vdcs_dict = org_dict['vdcs']
+        vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
+        for k in vdcs_dict:
+            entry = [k, vdcs_dict[k]]
+            vdc_table.add_row(entry)
+
+        print vdc_table
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def print_network_list(org_dict=None):
+    """ Method print network list.
+
+    Args:
+        org_dict:   dictionary of organization that contain key networks with a list of all
+                    network for for specific VDC
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        network_dict = {}
+        if org_dict.has_key('networks'):
+            network_dict = org_dict['networks']
+        network_table = PrettyTable(['network uuid', 'network name'])
+        for k in network_dict:
+            entry = [k, network_dict[k]]
+            network_table.add_row(entry)
+
+        print network_table
+
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def print_org_details(org_dict=None):
+    """ Method takes vapp_dict and print in tabular format
+
+    Args:
+        org_dict:  dictionary of organization where key is org uuid.
+
+        Returns:
+            The return nothing
+    """
+    if org_dict is None:
+        return
+    try:
+        catalogs_dict = {}
+
+        print_vdc_list(org_dict=org_dict)
+        print_network_list(org_dict=org_dict)
+
+        if org_dict.has_key('catalogs'):
+            catalogs_dict = org_dict['catalogs']
+
+        catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
+        for k in catalogs_dict:
+            entry = [k, catalogs_dict[k]]
+            catalog_table.add_row(entry)
+
+        print catalog_table
+
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def delete_actions(vim=None, action=None, namespace=None):
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Requesting delete for network {}".format(namespace.network_name))
+        network_uuid = namespace.network_name
+        # if request name based we need find UUID
+        # TODO optimize it or move to external function
+        if not namespace.uuid:
+            org_dict = vim.get_org_list()
+            for org in org_dict:
+                org_net = vim.get_org(org)['networks']
+                for network in org_net:
+                    if org_net[network] == namespace.network_name:
+                        network_uuid = network
+
+        vim.delete_network_action(network_uuid=network_uuid)
+
+
+def list_actions(vim=None, action=None, namespace=None):
+    """ Method provide list object from VDC action
+
+       Args:
+           vim - is vcloud director vim connector.
+           action - is action for list ( vdc / org etc)
+           namespace -  must contain VDC / Org information.
+
+           Returns:
+               The return nothing
+       """
+
+    org_id = None
+    myorgs = vim.get_org_list()
+    for org in myorgs:
+        if myorgs[org] == namespace.vcdorg:
+            org_id = org
+        break
+    else:
+        print(" Invalid organization.")
+        return
+
+    if action == 'vms' or namespace.action == 'vms':
+        vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
+        print_vm_list(vm_dict=vm_dict)
+    elif action == 'vapps' or namespace.action == 'vapps':
+        vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
+        print_vapp(vapp_dict=vapp_dict)
+    elif action == 'networks' or namespace.action == 'networks':
+        if namespace.osm:
+            osm_print(vim.get_network_list(filter_dict={}))
+        else:
+            print_network_list(vim.get_org(org_uuid=org_id))
+    elif action == 'vdc' or namespace.action == 'vdc':
+        if namespace.osm:
+            osm_print(vim.get_tenant_list(filter_dict=None))
+        else:
+            print_vdc_list(vim.get_org(org_uuid=org_id))
+    elif action == 'org' or namespace.action == 'org':
+        print_org(org_dict=vim.get_org_list())
+    else:
+        return None
+
+
+def print_network_details(network_dict=None):
+    try:
+        network_table = PrettyTable(network_dict.keys())
+        entry = [network_dict.values()]
+        network_table.add_row(entry[0])
+        print network_table
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def osm_print(generic_dict=None):
+
+    try:
+        for element in generic_dict:
+            table = PrettyTable(element.keys())
+            entry = [element.values()]
+            table.add_row(entry[0])
+        print table
+    except KeyError:
+        logger.error("wrong key {}".format(KeyError.message))
+        logger.logger.debug(traceback.format_exc())
+
+
+def view_actions(vim=None, action=None, namespace=None):
+    org_id = None
+    orgs = vim.get_org_list()
+    for org in orgs:
+        if orgs[org] == namespace.vcdorg:
+            org_id = org
+        break
+    else:
+        print(" Invalid organization.")
+        return
+
+    myorg = vim.get_org(org_uuid=org_id)
+
+    # view org
+    if action == 'org' or namespace.action == 'org':
+        org_id = None
+        orgs = vim.get_org_list()
+        if namespace.uuid:
+            if namespace.org_name in orgs:
+                org_id = namespace.org_name
+        else:
+            # we need find UUID based on name provided
+            for org in orgs:
+                if orgs[org] == namespace.org_name:
+                    org_id = org
+                    break
+
+        logger.debug("Requesting view for orgs {}".format(org_id))
+        print_org_details(vim.get_org(org_uuid=org_id))
+
+    # view vapp action
+    if action == 'vapp' or namespace.action == 'vapp':
+        if namespace.vapp_name is not None and namespace.uuid:
+            logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
+            vapp_dict = {}
+            vapp_uuid = namespace.vapp_name
+            # if request based on just name we need get UUID
+            if not namespace.uuid:
+                vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
+                if vapp_uuid is None:
+                    print("Can't find vapp by given name {}".format(namespace.vapp_name))
+                    return
+
+            print " namespace {}".format(namespace)
+            if vapp_dict is not None and namespace.osm:
+                vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
+                print vm_info_dict
+            if vapp_dict is not None and namespace.osm != True:
+                vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
+                print_vapp(vapp_dict=vapp_dict)
+
+    # view network
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Requesting view for network {}".format(namespace.network_name))
+        network_uuid = namespace.network_name
+        # if request name based we need find UUID
+        # TODO optimize it or move to external function
+        if not namespace.uuid:
+            if not myorg.has_key('networks'):
+                print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
+                                                                                            vim.name,
+                                                                                            vim.tenant_name))
+                return
+
+            my_org_net = myorg['networks']
+            for network in my_org_net:
+                if my_org_net[network] == namespace.network_name:
+                    network_uuid = network
+                    break
+
+        print print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid))
+
+
+def create_actions(vim=None, action=None, namespace=None):
+    """Method gets provider vdc view from vcloud director
+
+        Args:
+            vim - is Cloud director vim connector
+            action - action for create ( network / vdc etc)
+
+        Returns:
+            The return xml content of respond or None
+    """
+    if action == 'network' or namespace.action == 'network':
+        logger.debug("Creating a network in vcloud director".format(namespace.network_name))
+        network_uuid = vim.create_network(namespace.network_name)
+        if network_uuid is not None:
+            print ("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
+        else:
+            print ("Failed create a new network {}".format(namespace.network_name))
+    elif action == 'vdc' or namespace.action == 'vdc':
+        logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
+        vdc_uuid = vim.create_vdc(namespace.vdc_name)
+        if vdc_uuid is not None:
+            print ("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
+        else:
+            print ("Failed create a new vdc {}".format(namespace.vdc_name))
+    else:
+        return None
+
+
+def validate_uuid4(uuid_string):
+    """Function validate that string contain valid uuid4
+
+        Args:
+            uuid_string - valid UUID string
+
+        Returns:
+            The return true if string contain valid UUID format
+    """
+    try:
+        val = uuid.UUID(uuid_string, version=4)
+    except ValueError:
+        return False
+    return True
+
+
+def upload_image(vim=None, image_file=None):
+    """Function upload image to vcloud director
+
+        Args:
+            image_file - valid UUID string
+
+        Returns:
+            The return true if image uploaded correctly
+    """
+    try:
+        catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
+        if catalog_uuid is not None and validate_uuid4(catalog_uuid):
+            print("Image uploaded and uuid {}".format(catalog_uuid))
+            return True
+    except vimconn.vimconnException as upload_exception:
+        print("Failed uploaded {} image".format(image_file))
+        print("Error Reason: {}".format(upload_exception.message))
+    return False
+
+
+def boot_image(vim=None, image_name=None, vm_name=None):
+    """ Function boot image that resided in vcloud director.
+        The image name can be UUID of name.
+
+        Args:
+            vim - vim connector
+            image_name - image identified by UUID or text string.
+            vm_name - vmname
+
+
+         Returns:
+             The return true if image uploaded correctly
+     """
+
+    vim_catalog = None
+    try:
+        catalogs = vim.vca.get_catalogs()
+        if not validate_uuid4(image_name):
+            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+            if vim_catalog is None:
+                return None
+        else:
+            vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
+            if vim_catalog is None:
+                return None
+
+        print (" Booting {} image id {} ".format(vm_name, vim_catalog))
+        vm_uuid = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
+        if vm_uuid is not None and validate_uuid4(vm_uuid):
+            print("Image booted and vm uuid {}".format(vm_uuid))
+            vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
+            if vapp_dict is not None:
+                print_vapp(vapp_dict=vapp_dict)
+        return True
+    except vimconn.vimconnNotFoundException as notFound:
+        print("Failed boot {} image".format(image_name))
+        print(notFound.message)
+    except vimconn.vimconnException as vimconError:
+        print("Failed boot {} image".format(image_name))
+        print(vimconError.message)
+    except:
+        print("Failed boot {} image".format(image_name))
+
+
+        return False
+
+
+def image_action(vim=None, action=None, namespace=None):
+    """ Function present set of action to manipulate with image.
+          - upload image
+          - boot image.
+          - delete image ( not yet done )
+
+        Args:
+             vim - vcloud director connector
+             action - string (upload/boot etc)
+             namespace - contain other attributes image name etc
+
+         Returns:
+             The return nothing
+     """
+
+    if action == 'upload' or namespace.action == 'upload':
+        upload_image(vim=vim, image_file=namespace.image)
+    elif action == 'boot' or namespace.action == 'boot':
+        boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
+    else:
+        return None
+
+
+def vmwarecli(command=None, action=None, namespace=None):
+    logger.debug("Namespace {}".format(namespace))
+    urllib3.disable_warnings()
+
+    vcduser = None
+    vcdpasword = None
+    vcdhost = None
+    vcdorg = None
+
+    if hasattr(__builtins__, 'raw_input'):
+        input = raw_input
+
+    if namespace.vcdvdc is None:
+        while True:
+            vcduser = input("Enter vcd username: ")
+            if vcduser is not None and len(vcduser) > 0:
+                break
+    else:
+        vcduser = namespace.vcduser
+
+    if namespace.vcdpassword is None:
+        while True:
+            vcdpasword = input("Please enter vcd password: ")
+            if vcdpasword is not None and len(vcdpasword) > 0:
+                break
+    else:
+        vcdpasword = namespace.vcdpassword
+
+    if namespace.vcdhost is None:
+        while True:
+            vcdhost = input("Please enter vcd host name or ip: ")
+            if vcdhost is not None and len(vcdhost) > 0:
+                break
+    else:
+        vcdhost = namespace.vcdhost
+
+    if namespace.vcdorg is None:
+        while True:
+            vcdorg = input("Please enter vcd organization name: ")
+            if vcdorg is not None and len(vcdorg) > 0:
+                break
+    else:
+        vcdorg = namespace.vcdorg
+
+    try:
+        vim = vimconnector(uuid=None,
+                           name=vcdorg,
+                           tenant_id=None,
+                           tenant_name=namespace.vcdvdc,
+                           url=vcdhost,
+                           url_admin=vcdhost,
+                           user=vcduser,
+                           passwd=vcdpasword,
+                           log_level="DEBUG",
+                           config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
+        vim.vca = vim.connect()
+
+    except vimconn.vimconnConnectionException:
+        print("Failed connect to vcloud director. Please check credential and hostname.")
+        return
+
+    # list
+    if command == 'list' or namespace.command == 'list':
+        logger.debug("Client requested list action")
+        # route request to list actions
+        list_actions(vim=vim, action=action, namespace=namespace)
+
+    # view action
+    if command == 'view' or namespace.command == 'view':
+        logger.debug("Client requested view action")
+        view_actions(vim=vim, action=action, namespace=namespace)
+
+    # delete action
+    if command == 'delete' or namespace.command == 'delete':
+        logger.debug("Client requested delete action")
+        delete_actions(vim=vim, action=action, namespace=namespace)
+
+    # create action
+    if command == 'create' or namespace.command == 'create':
+        logger.debug("Client requested create action")
+        create_actions(vim=vim, action=action, namespace=namespace)
+
+    # image action
+    if command == 'image' or namespace.command == 'image':
+        logger.debug("Client requested create action")
+        image_action(vim=vim, action=action, namespace=namespace)
+
+
+if __name__ == '__main__':
+    defaults = {'vcdvdc': 'default',
+                'vcduser': 'admin',
+                'vcdpassword': 'admin',
+                'vcdhost': 'https://localhost',
+                'vcdorg': 'default',
+                'debug': 'INFO'}
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
+    parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
+    parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
+    parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
+    parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
+    parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
+    parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
+    parser.add_argument('-d', '--debug', help='debug level', type=int)
+
+    parser_subparsers = parser.add_subparsers(help='commands', dest='command')
+    sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
+    sub_subparsers = sub.add_subparsers(dest='action')
+
+    list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
+    list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
+    list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
+    list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+    #list vdc
+    list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
+    list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
+
+    list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
+
+    create_sub = parser_subparsers.add_parser('create')
+    create_sub_subparsers = create_sub.add_subparsers(dest='action')
+    create_vms = create_sub_subparsers.add_parser('vms')
+    create_vapp = create_sub_subparsers.add_parser('vapp')
+    create_vapp.add_argument('uuid')
+
+    # add network
+    create_network = create_sub_subparsers.add_parser('network')
+    create_network.add_argument('network_name', action='store', help='create a network for a vdc')
+
+    # add VDC
+    create_vdc = create_sub_subparsers.add_parser('vdc')
+    create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
+
+    delete_sub = parser_subparsers.add_parser('delete')
+    del_sub_subparsers = delete_sub.add_subparsers(dest='action')
+    del_vms = del_sub_subparsers.add_parser('vms')
+    del_vapp = del_sub_subparsers.add_parser('vapp')
+    del_vapp.add_argument('uuid', help='view vapp based on UUID')
+
+    # delete network
+    del_network = del_sub_subparsers.add_parser('network')
+    del_network.add_argument('network_name', action='store',
+                             help='- delete network for vcloud director by provided name')
+    del_network.add_argument('-u', '--uuid', default=False, action='store_true',
+                             help='delete network for vcloud director by provided uuid')
+
+    # delete vdc
+    del_vdc = del_sub_subparsers.add_parser('vdc')
+
+    view_sub = parser_subparsers.add_parser('view')
+    view_sub_subparsers = view_sub.add_subparsers(dest='action')
+
+    view_vms_parser = view_sub_subparsers.add_parser('vms')
+    view_vms_parser.add_argument('uuid', default=False, action='store_true',
+                                 help='- View VM for specific uuid in vcloud director')
+    view_vms_parser.add_argument('name', default=False, action='store_true',
+                                 help='- View VM for specific vapp name in vcloud director')
+
+    # view vapp
+    view_vapp_parser = view_sub_subparsers.add_parser('vapp')
+    view_vapp_parser.add_argument('vapp_name', action='store',
+                                  help='- view vapp for specific vapp name in vcloud director')
+    view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
+    view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true',  help='provide view in OSM format')
+
+    # view network
+    view_network = view_sub_subparsers.add_parser('network')
+    view_network.add_argument('network_name', action='store',
+                              help='- view network for specific network name in vcloud director')
+    view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
+
+    # view VDC command and actions
+    view_vdc = view_sub_subparsers.add_parser('vdc')
+    view_vdc.add_argument('vdc_name', action='store',
+                          help='- View VDC based and action based on provided vdc uuid')
+    view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
+
+    # view organization command and actions
+    view_org = view_sub_subparsers.add_parser('org')
+    view_org.add_argument('org_name', action='store',
+                          help='- View VDC based and action based on provided vdc uuid')
+    view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+    # upload image action
+    image_sub = parser_subparsers.add_parser('image')
+    image_subparsers = image_sub.add_subparsers(dest='action')
+    upload_parser = image_subparsers.add_parser('upload')
+    upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
+    upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
+
+    # boot vm action
+    boot_parser = image_subparsers.add_parser('boot')
+    boot_parser.add_argument('image', default=False, action='store', help='- Image name')
+    boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
+    boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
+
+    namespace = parser.parse_args()
+    # put command_line args to mapping
+    command_line_args = {k: v for k, v in vars(namespace).items() if v}
+
+    d = defaults.copy()
+    d.update(os.environ)
+    d.update(command_line_args)
+
+    logger = logging.getLogger('mano.vim.vmware')
+    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    ch = logging.StreamHandler()
+    ch.setLevel(str.upper(d['debug']))
+    ch.setFormatter(formatter)
+    logger.addHandler(ch)
+    logger.setLevel(getattr(logging, str.upper(d['debug'])))
+    logger.info(
+        "Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
+
+    logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
+
+    # main entry point.
+    vmwarecli(namespace=namespace)
diff --git a/vnfs/examples/vnf_additional_disk_based_image.yaml b/vnfs/examples/vnf_additional_disk_based_image.yaml
new file mode 100644 (file)
index 0000000..48f8ba9
--- /dev/null
@@ -0,0 +1,67 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_additional_disk_based_image
+    description: VNF with additional volume based on image
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              TEMPLATE-VM # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        TEMPLATE-VM       # name of Virtual Machine
+        description: TEMPLATE description
+#        VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+        image name: ubuntu16.04
+        image checksum: 7373edba82a31eedd182d29237b746cf
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+        devices:                       # Optional, order determines device letter asignation (hda, hdb, ...)
+        -   type:      disk            # "disk","cdrom","xml"
+            image name: TestVM
+            image checksum: 88d6c77b58fd40a7cb7f44b62bd5ad98
+            size: 1
+            # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+            # vpci:      "0000:00:03.0"   # Optional, not for disk or cdrom
+    # Additional Virtual Machines would be included here
+
diff --git a/vnfs/examples/vnf_additional_disk_empty_volume.yaml b/vnfs/examples/vnf_additional_disk_empty_volume.yaml
new file mode 100644 (file)
index 0000000..ef0990f
--- /dev/null
@@ -0,0 +1,65 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_additional_disk_empty_volume
+    description: VNF with additional volume based on image
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              TEMPLATE-VM # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        TEMPLATE-VM       # name of Virtual Machine
+        description: TEMPLATE description
+#        VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+        image name: ubuntu16.04
+        image checksum: 7373edba82a31eedd182d29237b746cf
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+        devices:                       # Optional, order determines device letter asignation (hda, hdb, ...)
+        -   type:      disk            # "disk","cdrom","xml"
+            size: 1
+            # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" }
+            # vpci:      "0000:00:03.0"   # Optional, not for disk or cdrom
+    # Additional Virtual Machines would be included here
+
diff --git a/vnfs/examples/vnf_no_additional_devices.yaml b/vnfs/examples/vnf_no_additional_devices.yaml
new file mode 100644 (file)
index 0000000..aad2f19
--- /dev/null
@@ -0,0 +1,60 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name: vnf_no_additional_devices
+    description: VNF with additional volume based on image
+    # class: parent      # Optional. Used to organize VNFs
+    external-connections:
+    -   name:              mgmt0
+        type:              mgmt        # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              TEMPLATE-VM # Virtual Machine this interface belongs to
+        local_iface_name:  mgmt0       # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface
+    VNFC:                              # Virtual machine array 
+    -   name:        TEMPLATE-VM       # name of Virtual Machine
+        description: TEMPLATE description
+#        VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
+        image name: ubuntu16.04
+        image checksum: 7373edba82a31eedd182d29237b746cf
+        # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional
+        # processor:                     #Optional
+        #     model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+        #     features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        # hypervisor:                    #Optional
+        #     type: QEMU-kvm
+        #     version: "10002|12001|2.6.32-358.el6.x86_64"
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1000         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 5          # disk size in GiB, by default 1
+        #numas: 
+        #-   paired-threads: 5          # "cores", "paired-threads", "threads"
+        #    paired-threads-id: [ [0,1], [2,3], [4,5], [6,7], [8,9] ] # By default follows incremental order
+        #    memory: 14                 # GBytes
+        #    interfaces: []
+        bridge-ifaces:
+        -   name:      mgmt0
+            vpci:      "0000:00:0a.0"    # Optional. Virtual PCI address
+            bandwidth: 1 Mbps            # Optional. Informative only
+            # mac_address: '20:33:45:56:77:46' #avoid this option if possible
+            # model:       'virtio'      # ("virtio","e1000","ne2k_pci","pcnet","rtl8139") By default, it is automatically filled by libvirt
+    # Additional Virtual Machines would be included here
+