Merge from master

Signed-off-by: Philip Joseph <philip.joseph@riftio.com>
diff --git a/BUILD.sh b/BUILD.sh
index 94c8e21..1c90a25 100755
--- a/BUILD.sh
+++ b/BUILD.sh
@@ -184,16 +184,16 @@
     # remove these packages since some files moved from one to the other, and one was obsoleted
     # ignore failures
 
-    DEL_PACKAGES="rw.toolchain-rwbase rw.toolchain-rwtoolchain rw.core.mgmt-mgmt rw.core.util-util \
-	          rw.core.rwvx-rwvx rw.core.rwvx-rwdts rw.automation.core-RWAUTO rw.core.rwvx-rwha-1.0"
-    for package in $DEL_PACKAGES; do
-        apt remove -y $package || true
+    PACKAGES="rw.toolchain-rwbase rw.toolchain-rwtoolchain rw.core.mgmt-mgmt rw.core.util-util \
+	            rw.core.rwvx-rwvx rw.core.rwvx-rwdts rw.automation.core-RWAUTO"
+    # this package is obsolete.
+    OLD_PACKAGES="rw.core.rwvx-rwha-1.0"
+    for package in $PACKAGES $OLD_PACKAGES; do
+        sudo apt remove -y $package || true
     done
 
-    INST_PACKAGES="rw.toolchain-rwbase rw.toolchain-rwtoolchain rw.core.mgmt-mgmt rw.core.util-util \
-	           rw.core.rwvx-rwvx rw.core.rwvx-rwdts rw.automation.core-RWAUTO"
     packages=""
-    for package in $INST_PACKAGES; do
+    for package in $PACKAGES; do
         packages="$packages $package=${PLATFORM_VERSION}"
     done
     sudo apt-get install -y --allow-downgrades $packages
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..463b1c6
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,22 @@
+FROM ubuntu:16.04
+
+RUN apt-get update && apt-get -y install python3 curl build-essential
+RUN curl http://repos.riftio.com/public/xenial-riftware-public-key | apt-key add - && \
+	curl -o /etc/apt/sources.list.d/OSM.list http://buildtracker.riftio.com/repo_file/ub16/OSM/ && \
+	apt-get update && \
+	apt-get -y install rw.toolchain-rwbase \
+		rw.toolchain-rwtoolchain \
+		rw.core.mgmt-mgmt \
+		rw.core.util-util \
+		rw.core.rwvx-rwvx \
+		rw.core.rwvx-rwdts \
+		rw.automation.core-RWAUTO \
+		rw.tools-container-tools \
+		rw.tools-scripts \
+		python-cinderclient \
+		libxml2-dev \
+		libxslt-dev
+
+RUN /usr/rift/container_tools/mkcontainer --modes build --modes ext --repo OSM
+
+RUN chmod 777 /usr/rift /usr/rift/usr/share
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..0c556a4
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,27 @@
+pipeline {
+	agent any
+	stages {
+		stage("Build") {
+			agent {
+				dockerfile true
+			}
+			steps {
+				sh 'make NOT_DEVELOPER_BUILD=TRUE -j16 package'
+				stash name: "deb-files", includes: ".build/*.deb"
+			}
+		}
+		stage("Repo Component") {
+			steps {
+				unstash "deb-files"
+				sh '''
+					mkdir -p pool/SO
+					mv .build/*.deb pool/SO/
+					mkdir -p dists/$RELEASE/SO/binary-amd64/
+					apt-ftparchive packages pool/SO > dists/$RELEASE/SO/binary-amd64/Packages
+					gzip -9fk dists/$RELEASE/SO/binary-amd64/Packages
+					'''
+				archiveArtifacts artifacts: "dists/**,pool/SO/*.deb"
+			}
+		}
+	}
+}
diff --git a/common/python/CMakeLists.txt b/common/python/CMakeLists.txt
index de83df3..285123b 100644
--- a/common/python/CMakeLists.txt
+++ b/common/python/CMakeLists.txt
@@ -113,6 +113,10 @@
     rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
     rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
     rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_placement_group.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_graph.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_path.py
     rift/mano/tosca_translator/common/__init__.py
     rift/mano/tosca_translator/common/utils.py
     rift/mano/tosca_translator/common/exception.py
@@ -131,6 +135,7 @@
     rift/mano/utils/compare_desc.py
     rift/mano/utils/juju_api.py
     rift/mano/utils/project.py
+    rift/mano/utils/short_name.py
   COMPONENT ${PKG_LONG_NAME}
   PYTHON3_ONLY
   )
@@ -169,6 +174,20 @@
   ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/yang_translator/yang-translator)
 
 install(
+  FILES rift/mano/yang_translator/riftiotypes.yaml
+    DESTINATION
+      usr/rift/mano/common
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+install(
+  FILES rift/mano/tosca_translator/dummy_vnf_node.yaml
+    DESTINATION
+      usr/rift/mano/common
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+install(
   FILES ${TRANSLATOR_SCRIPTS}
     DESTINATION
       usr/bin
diff --git a/common/python/rift/mano/config_agent/operdata.py b/common/python/rift/mano/config_agent/operdata.py
index 61ae5f8..5cbd351 100644
--- a/common/python/rift/mano/config_agent/operdata.py
+++ b/common/python/rift/mano/config_agent/operdata.py
@@ -447,7 +447,10 @@
                            format(process, rc, err))
 
             if len(err):
-                errs += "<error>{}</error>".format(err)
+                if rc == 0:
+                    errs += "<success>{}</success>".format(err)
+                else:
+                    errs += "<error>{}</error>".format(err)
             result |= rc
 
         if result == 0:
diff --git a/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml b/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml
new file mode 100644
index 0000000..6798e2a
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/dummy_vnf_node.yaml
@@ -0,0 +1,1496 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Toy NS
+data_types:
+  tosca.datatypes.nfv.riftio.dashboard_params:
+    properties:
+      path:
+        type: string
+        description: >-
+          The HTTP path for the dashboard
+      port:
+        type: tosca.datatypes.network.PortDef
+        description: >-
+          The HTTP port for the dashboard
+        default: 80
+      https:
+        type: boolean
+        description: >-
+          Pick HTTPS instead of HTTP , Default is false
+        default: false
+        required: false
+  tosca.datatypes.nfv.riftio.monitoring_param_ui:
+    properties:
+      description:
+        type: string
+        required: false
+      group_tag:
+        type: string
+        description: >-
+          A simple tag to group monitoring parameters
+        required: false
+      widget_type:
+        type: string
+        description: >-
+          Type of the widget
+        default: counter
+        constraints:
+          - valid_values:
+              - histogram
+              - bar
+              - gauge
+              - slider
+              - counter
+              - textbox
+      units:
+        type: string
+        required: false
+  tosca.datatypes.nfv.riftio.monitoring_param_value:
+    properties:
+      value_type:
+        type: string
+        default: integer
+        constraints:
+          - valid_values:
+              - integer
+              - float
+              - string
+      numeric_min:
+        type: integer
+        description: >-
+          Minimum value for the parameter
+        required: false
+      numeric_max:
+        type: integer
+        description: >-
+          Maxium value for the parameter
+        required: false
+      string_min:
+        type: integer
+        description: >-
+          Minimum string length for the parameter
+        required: false
+        constraints:
+          - greater_or_equal: 0
+      string_max:
+        type: integer
+        description: >-
+          Maximum string length for the parameter
+        required: false
+        constraints:
+          - greater_or_equal: 0
+  tosca.datatypes.compute.Container.Architecture.CPUAllocation:
+    derived_from: tosca.datatypes.Root
+    properties:
+      cpu_affinity:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [shared, dedicated, any]
+      thread_allocation:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [avoid, separate, isolate, prefer]
+      socket_count:
+        type: integer
+        required: false
+      core_count:
+        type: integer
+        required: false
+      thread_count:
+        type: integer
+        required: false
+
+  tosca.datatypes.compute.Container.Architecture.NUMA:
+    derived_from: tosca.datatypes.Root
+    properties:
+      id:
+        type: integer
+        constraints:
+          - greater_or_equal: 0
+      vcpus:
+        type: list
+        entry_schema:
+          type: integer
+          constraints:
+            -  greater_or_equal: 0
+      mem_size:
+        type: scalar-unit.size
+        constraints:
+          - greater_or_equal: 0 MB
+  tosca.datatypes.nfv.riftio.paired_thread_map:
+    properties:
+      thread_a:
+        type: integer
+        required: true
+        constraints:
+          - greater_or_equal: 0
+      thread_b:
+        type: integer
+        required: true
+        constraints:
+          - greater_or_equal: 0
+
+  tosca.datatypes.nfv.riftio.paired_threads:
+    properties:
+      num_paired_threads:
+        type: integer
+        constraints:
+          - greater_or_equal: 1
+      paired_thread_ids:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.paired_thread_map
+        constraints:
+          - max_length: 16
+        required: false
+
+  tosca.datatypes.compute.riftio.numa:
+    properties:
+      id:
+        type: integer
+        constraints:
+          - greater_or_equal: 0
+      vcpus:
+        type: list
+        entry_schema:
+          type: integer
+          constraints:
+            -  greater_or_equal: 0
+        required: false
+      mem_size:
+        type: scalar-unit.size
+        constraints:
+          - greater_or_equal: 0 MB
+        required: false
+      om_numa_type:
+        type: string
+        description: Openmano Numa type selection
+        constraints:
+          - valid_values: [cores, paired-threads, threads]
+        required: false
+      num_cores:
+        type: integer
+        description: Use when om_numa_type is cores
+        constraints:
+          - greater_or_equal: 1
+        required: false
+      paired_threads:
+        type: tosca.datatypes.nfv.riftio.paired_threads
+        description: Use when om_numa_type is paired-threads
+        required: false
+      num_threads:
+        type: integer
+        description: Use when om_numa_type is threads
+        constraints:
+          - greater_or_equal: 1
+        required: false
+  
+  tosca.nfv.datatypes.pathType:
+    properties:
+      forwarder:
+        type: string
+        required: true
+      capability:
+        type: string
+        required: true
+
+  tosca.nfv.datatypes.aclType:
+    properties:
+      eth_type:
+        type: string
+        required: false
+      eth_src:
+        type: string
+        required: false
+      eth_dst:
+        type: string
+        required: false
+      vlan_id:
+        type: integer
+        constraints:
+          - in_range: [ 1, 4094 ]
+        required: false
+      vlan_pcp:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7 ]
+        required: false
+      mpls_label:
+        type: integer
+        constraints:
+          - in_range: [ 16, 1048575]
+        required: false
+      mpls_tc:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7 ]
+        required: false
+      ip_dscp:
+        type: integer
+        constraints:
+          - in_range: [ 0, 63 ]
+        required: false
+      ip_ecn:
+        type: integer
+        constraints:
+          - in_range: [ 0, 3 ]
+        required: false
+      ip_src_prefix:
+        type: string
+        required: false
+      ip_dst_prefix:
+        type: string
+        required: false
+      ip_proto:
+        type: integer
+        constraints:
+          - in_range: [ 1, 254 ]
+        required: false
+      destination_port_range:
+        type: string
+        required: false
+      source_port_range:
+        type: string
+        required: false
+      network_src_port_id:
+        type: string
+        required: false
+      network_dst_port_id:
+        type: string
+        required: false
+      network_id:
+        type: string
+        required: false
+      network_name:
+        type: string
+        required: false
+      tenant_id:
+        type: string
+        required: false
+      icmpv4_type:
+        type: integer
+        constraints:
+          - in_range: [ 0, 254 ]
+        required: false
+      icmpv4_code:
+        type: integer
+        constraints:
+          - in_range: [ 0, 15 ]
+        required: false
+      arp_op:
+        type: integer
+        constraints:
+          - in_range: [ 1, 25 ]
+        required: false
+      arp_spa:
+        type: string
+        required: false
+      arp_tpa:
+        type: string
+        required: false
+      arp_sha:
+        type: string
+        required: false
+      arp_tha:
+        type: string
+        required: false
+      ipv6_src:
+        type: string
+        required: false
+      ipv6_dst:
+        type: string
+        required: false
+      ipv6_flabel:
+        type: integer
+        constraints:
+          - in_range: [ 0, 1048575]
+        required: false
+      icmpv6_type:
+        type: integer
+        constraints:
+          - in_range: [ 0, 255]
+        required: false
+      icmpv6_code:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7]
+        required: false
+      ipv6_nd_target:
+        type: string
+        required: false
+      ipv6_nd_sll:
+        type: string
+        required: false
+      ipv6_nd_tll:
+        type: string
+        required: false
+
+  
+  tosca.datatypes.nfv.riftio.vnf_configuration:
+    properties:
+      config_type:
+        type: string
+        description: >-
+          Type of the configuration agent to use
+        constraints:
+          - valid_values: [script, netconf, rest, juju]
+      config_details:
+        type: map
+        description: >-
+          Specify the details for the config agent, like
+          script type, juju charm to use, etc.
+      config_template:
+        required: false
+        type: string
+      config_delay:
+        type: integer
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: false
+      config_priority:
+        type: integer
+        constraints:
+        - greater_than: 0
+
+  tosca.datatypes.nfv.riftio.parameter_value:
+    properties:
+      name:
+        type: string
+        description: Name of the parameter
+      value:
+        type: string
+        description: Value of the parameter
+
+  tosca.datatypes.nfv.riftio.config_primitive:
+    properties:
+      name:
+        type: string
+      seq:
+        type: integer
+        description: >-
+          Order in which to apply, when multiple ones are defined
+        default: 0
+        constraints:
+          - greater_or_equal: 0
+      parameter:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.parameter_value
+      user_defined_script:
+        type: string
+  tosca.datatypes.nfv.riftio.primitive_parameter:
+    properties:
+      data_type:
+        type: string
+        description: >-
+          Data type associated with the name
+        constraints:
+          - valid_values: [string, integer, boolean]
+      mandatory:
+        type: boolean
+        description: >-
+          If this field is mandatory
+        default: false
+        required: false
+      default_value:
+        type: string
+        description: >-
+          The default value for this field
+        required: false
+      parameter_pool:
+        type: string
+        description: >-
+          Parameter pool name to use for this parameter
+        required: false
+      read_only:
+        type: boolean
+        description: >-
+          The value should be greyed out by the UI.
+          Only applies to parameters with default values.
+        required: false
+        default: false
+      hidden:
+        type: boolean
+        description: >-
+          The field should be hidden by the UI.
+          Only applies to parameters with default values.
+        required: false
+        default: false
+  tosca.datatypes.nfv.riftio.primitive_parameter_group:
+    properties:
+      name:
+        type: string
+        description: >-
+          Name of the parameter group
+      mandatory:
+        type: boolean
+        description: >-
+          If this group is mandatory
+        default: false
+        required: false
+      parameter:
+        type: map
+        description: >-
+          List of parameters for the service primitive
+        entry_schema: osca.datatypes.riftio.primitive_parameter
+
+  tosca.datatypes.nfv.riftio.vnf_primitive_group:
+    properties:
+      vnf_name:
+        type: string
+        description: >-
+          Name of the VNF in the NS
+      primitive:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Index and name of the primitive
+
+
+capability_types:
+  tosca.capabilities.nfv.riftio.mgmt_interface:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      static_ip:
+        type: string
+        required: false
+        description: >-
+          Specifies the static IP address for managing the VNF
+      connection_point:
+        type: string
+        required: false
+        description: >-
+          Use the ip address associated with this connection point
+      dashboard_params:
+        type: tosca.datatypes.nfv.riftio.dashboard_params
+        required: false
+        description: >-
+          Parameters for the VNF dashboard
+  tosca.capabilities.nfv.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      name:
+        type: string
+        required: false
+      description:
+        type: string
+        required: false
+      protocol:
+        type: string
+        default: http
+        constraints:
+          - equal: http
+      polling_interval:
+        type: scalar-unit.time
+        description: >-
+          The HTTP polling interval in seconds
+        default: 2 s
+      username:
+        type: string
+        description: >-
+          The HTTP basic auth username
+        required: false
+      password:
+        type: string
+        description: >-
+          The HTTP basic auth password
+        required: false
+      method:
+        type: string
+        description: >-
+          This is the method to be performed at the uri.
+          GET by default for action
+        default: get
+        constraints:
+          - valid_values: [post, put, get, delete, options, patch]
+      headers:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Custom HTTP headers to put on HTTP request
+        required: false
+      json_query_method:
+        type: string
+        description: >-
+          The method to extract a value from a JSON response
+            namekey    - Use the name as the key for a non-nested value.
+            jsonpath   - Use jsonpath-rw implemenation to extract a value.
+            objectpath - Use objectpath implemenation to extract a value.
+        constraints:
+          - valid_values: [namekey, jsonpath, objectpath]
+        default: namekey
+      json_query_path:
+        type: string
+        description: >-
+          The json path to use to extract value from JSON structure
+        required: false
+      json_object_path:
+        type: string
+        description: >-
+          The object path to use to extract value from JSON structure
+        required: false
+      ui_data:
+        type: tosca.datatypes.nfv.riftio.monitoring_param_ui
+        required: false
+      constraints:
+        type: tosca.datatypes.nfv.riftio.monitoring_param_value
+        required: false
+  tosca.capabilities.nfv.riftio.numa_extension:
+    derived_from: tosca.capabilities.Root
+    properties:
+      node_cnt:
+        type: integer
+        description: >-
+          The number of numa nodes to expose to the VM
+        constraints:
+          - greater_or_equal: 0
+      mem_policy:
+        type: string
+        description: >-
+          This policy specifies how the memory should
+                   be allocated in a multi-node scenario.
+                   STRICT    - The memory must be allocated
+                               strictly from the memory attached
+                               to the NUMA node.
+                   PREFERRED - The memory should be allocated
+                               preferentially from the memory
+                               attached to the NUMA node
+        constraints:
+          - valid_values: [strict, preferred, STRICT, PREFERRED]
+      node:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.compute.riftio.numa
+  tosca.capabilities.nfv.riftio.vswitch_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      ovs_acceleration:
+        type: string
+        description: |-
+          Specifies Open vSwitch acceleration mode.
+             MANDATORY - OVS acceleration is required
+             PREFERRED - OVS acceleration is preferred
+        constraints:
+          - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+      ovs_offload:
+        type: string
+        description: |-
+          Specifies Open vSwitch hardware offload mode.
+             MANDATORY - OVS offload is required
+             PREFERRED - OVS offload is preferred
+        constraints:
+          - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+
+  tosca.capabilities.nfv.riftio.hypervisor_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      type:
+        type: string
+        description: |-
+          Specifies the type of hypervisor.
+        constraints:
+          - valid_values: [prefer_kvm, require_kvm, PREFER_KVM, REQUIRE_KVM]
+      version:
+        type: string
+
+  tosca.capabilities.nfv.riftio.host_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      cpu_model:
+        type: string
+        description: >-
+          Host CPU model. Examples include SandyBridge,
+          IvyBridge, etc.
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_westmere
+              - require_westmere
+              - prefer_sandbridge
+              - require_sandybridge
+              - prefer_ivybridge
+              - require_ivybridge
+              - prefer_haswell
+              - require_haswell
+              - prefer_broadwell
+              - require_broadwell
+              - prefer_nehalem
+              - require_nehalem
+              - prefer_penryn
+              - require_penryn
+              - prefer_conroe
+              - require_conroe
+              - prefer_core2duo
+              - require_core2duo
+              - PREFER_WESTMERE
+              - REQUIRE_WESTMERE
+              - PREFER_SANDBRIDGE
+              - REQUIRE_SANDYBRIDGE
+              - PREFER_IVYBRIDGE
+              - REQUIRE_IVYBRIDGE
+              - PREFER_HASWELL
+              - REQUIRE_HASWELL
+              - PREFER_BROADWELL
+              - REQUIRE_BROADWELL
+              - PREFER_NEHALEM
+              - REQUIRE_NEHALEM
+              - PREFER_PENRYN
+              - REQUIRE_PENRYN
+              - PREFER_CONROE
+              - REQUIRE_CONROE
+              - PREFER_CORE2DUO
+              - REQUIRE_CORE2DUO
+      cpu_arch:
+        type: string
+        description: >-
+          Host CPU architecture
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_x86
+              - require_x86
+              - prefer_x86_64
+              - require_x86_64
+              - prefer_i686
+              - require_i686
+              - prefer_ia64
+              - require_ia64
+              - prefer_armv7
+              - require_armv7
+              - prefer_armv8
+              - require_armv8
+              - PREFER_X86
+              - REQUIRE_X86
+              - PREFER_X86_64
+              - REQUIRE_X86_64
+              - PREFER_I686
+              - REQUIRE_I686
+              - PREFER_IA64
+              - REQUIRE_IA64
+              - PREFER_ARMV7
+              - REQUIRE_ARMV7
+              - PREFER_ARMV8
+              - REQUIRE_ARMV8
+      cpu_vendor:
+        type: string
+        description: >-
+          Host CPU vendor
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_intel
+              - require_intel
+              - prefer_amd
+              - requie_amd
+              - PREFER_INTEL
+              - REQUIRE_INTEL
+              - PREFER_AMD
+              - REQUIE_AMD
+      cpu_socket_count:
+        type: integer
+        description: >-
+          Number of sockets on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_core_count:
+        type: integer
+        description: >-
+          Number of cores on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_core_thread_count:
+        type: integer
+        description: >-
+          Number of threads per core on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_feature:
+        type: list
+        entry_schema:
+          type: string
+        description: |-
+          Enumeration for CPU features.
+
+          AES- CPU supports advanced instruction set for
+          AES (Advanced Encryption Standard).
+
+          CAT- Cache Allocation Technology (CAT) allows
+          an Operating System, Hypervisor, or similar
+          system management agent to specify the amount
+          of L3 cache (currently the last-level cache
+          in most server and client platforms) space an
+          application can fill (as a hint to hardware
+          functionality, certain features such as power
+          management may override CAT settings).
+
+          CMT- Cache Monitoring Technology (CMT) allows
+          an Operating System, Hypervisor, or similar
+          system management agent to determine the
+          usage of cache based on applications running
+          on the platform. The implementation is
+          directed at L3 cache monitoring (currently
+          the last-level cache in most server and
+          client platforms).
+
+          DDIO- Intel Data Direct I/O (DDIO) enables
+          Ethernet server NICs and controllers talk
+          directly to the processor cache without a
+          detour via system memory. This enumeration
+          specifies if the VM requires a DDIO
+          capable host.
+        required: false
+        constraints:
+          -valid_values:
+            - prefer_aes
+            - require_aes
+            - prefer_cat
+            - require_cat
+            - prefer_cmt
+            - require_cmt
+            - prefer_ddio
+            - require_ddio
+            - prefer_vme
+            - require_vme
+            - prefer_de
+            - require_de
+            - prefer_pse
+            - require_pse
+            - prefer_tsc
+            - require_tsc
+            - prefer_msr
+            - require_msr
+            - prefer_pae
+            - require_pae
+            - prefer_mce
+            - require_mce
+            - prefer_cx8
+            - require_cx8
+            - prefer_apic
+            - require_apic
+            - prefer_sep
+            - require_sep
+            - prefer_mtrr
+            - require_mtrr
+            - prefer_pge
+            - require_pge
+            - prefer_mca
+            - require_mca
+            - prefer_cmov
+            - require_cmov
+            - prefer_pat
+            - require_pat
+            - prefer_pse36
+            - require_pse36
+            - prefer_clflush
+            - require_clflush
+            - prefer_dts
+            - require_dts
+            - prefer_acpi
+            - require_acpi
+            - prefer_mmx
+            - require_mmx
+            - prefer_fxsr
+            - require_fxsr
+            - prefer_sse
+            - require_sse
+            - prefer_sse2
+            - require_sse2
+            - prefer_ss
+            - require_ss
+            - prefer_ht
+            - require_ht
+            - prefer_tm
+            - require_tm
+            - prefer_ia64
+            - require_ia64
+            - prefer_pbe
+            - require_pbe
+            - prefer_rdtscp
+            - require_rdtscp
+            - prefer_pni
+            - require_pni
+            - prefer_pclmulqdq
+            - require_pclmulqdq
+            - prefer_dtes64
+            - require_dtes64
+            - prefer_monitor
+            - require_monitor
+            - prefer_ds_cpl
+            - require_ds_cpl
+            - prefer_vmx
+            - require_vmx
+            - prefer_smx
+            - require_smx
+            - prefer_est
+            - require_est
+            - prefer_tm2
+            - require_tm2
+            - prefer_ssse3
+            - require_ssse3
+            - prefer_cid
+            - require_cid
+            - prefer_fma
+            - require_fma
+            - prefer_cx16
+            - require_cx16
+            - prefer_xtpr
+            - require_xtpr
+            - prefer_pdcm
+            - require_pdcm
+            - prefer_pcid
+            - require_pcid
+            - prefer_dca
+            - require_dca
+            - prefer_sse4_1
+            - require_sse4_1
+            - prefer_sse4_2
+            - require_sse4_2
+            - prefer_x2apic
+            - require_x2apic
+            - prefer_movbe
+            - require_movbe
+            - prefer_popcnt
+            - require_popcnt
+            - prefer_tsc_deadline_timer
+            - require_tsc_deadline_timer
+            - prefer_xsave
+            - require_xsave
+            - prefer_avx
+            - require_avx
+            - prefer_f16c
+            - require_f16c
+            - prefer_rdrand
+            - require_rdrand
+            - prefer_fsgsbase
+            - require_fsgsbase
+            - prefer_bmi1
+            - require_bmi1
+            - prefer_hle
+            - require_hle
+            - prefer_avx2
+            - require_avx2
+            - prefer_smep
+            - require_smep
+            - prefer_bmi2
+            - require_bmi2
+            - prefer_erms
+            - require_erms
+            - prefer_invpcid
+            - require_invpcid
+            - prefer_rtm
+            - require_rtm
+            - prefer_mpx
+            - require_mpx
+            - prefer_rdseed
+            - require_rdseed
+            - prefer_adx
+            - require_adx
+            - prefer_smap
+            - require_smap
+            - PREFER_AES
+            - REQUIRE_AES
+            - PREFER_CAT
+            - REQUIRE_CAT
+            - PREFER_CMT
+            - REQUIRE_CMT
+            - PREFER_DDIO
+            - REQUIRE_DDIO
+            - PREFER_VME
+            - REQUIRE_VME
+            - PREFER_DE
+            - REQUIRE_DE
+            - PREFER_PSE
+            - REQUIRE_PSE
+            - PREFER_TSC
+            - REQUIRE_TSC
+            - PREFER_MSR
+            - REQUIRE_MSR
+            - PREFER_PAE
+            - REQUIRE_PAE
+            - PREFER_MCE
+            - REQUIRE_MCE
+            - PREFER_CX8
+            - REQUIRE_CX8
+            - PREFER_APIC
+            - REQUIRE_APIC
+            - PREFER_SEP
+            - REQUIRE_SEP
+            - PREFER_MTRR
+            - REQUIRE_MTRR
+            - PREFER_PGE
+            - REQUIRE_PGE
+            - PREFER_MCA
+            - REQUIRE_MCA
+            - PREFER_CMOV
+            - REQUIRE_CMOV
+            - PREFER_PAT
+            - REQUIRE_PAT
+            - PREFER_PSE36
+            - REQUIRE_PSE36
+            - PREFER_CLFLUSH
+            - REQUIRE_CLFLUSH
+            - PREFER_DTS
+            - REQUIRE_DTS
+            - PREFER_ACPI
+            - REQUIRE_ACPI
+            - PREFER_MMX
+            - REQUIRE_MMX
+            - PREFER_FXSR
+            - REQUIRE_FXSR
+            - PREFER_SSE
+            - REQUIRE_SSE
+            - PREFER_SSE2
+            - REQUIRE_SSE2
+            - PREFER_SS
+            - REQUIRE_SS
+            - PREFER_HT
+            - REQUIRE_HT
+            - PREFER_TM
+            - REQUIRE_TM
+            - PREFER_IA64
+            - REQUIRE_IA64
+            - PREFER_PBE
+            - REQUIRE_PBE
+            - PREFER_RDTSCP
+            - REQUIRE_RDTSCP
+            - PREFER_PNI
+            - REQUIRE_PNI
+            - PREFER_PCLMULQDQ
+            - REQUIRE_PCLMULQDQ
+            - PREFER_DTES64
+            - REQUIRE_DTES64
+            - PREFER_MONITOR
+            - REQUIRE_MONITOR
+            - PREFER_DS_CPL
+            - REQUIRE_DS_CPL
+            - PREFER_VMX
+            - REQUIRE_VMX
+            - PREFER_SMX
+            - REQUIRE_SMX
+            - PREFER_EST
+            - REQUIRE_EST
+            - PREFER_TM2
+            - REQUIRE_TM2
+            - PREFER_SSSE3
+            - REQUIRE_SSSE3
+            - PREFER_CID
+            - REQUIRE_CID
+            - PREFER_FMA
+            - REQUIRE_FMA
+            - PREFER_CX16
+            - REQUIRE_CX16
+            - PREFER_XTPR
+            - REQUIRE_XTPR
+            - PREFER_PDCM
+            - REQUIRE_PDCM
+            - PREFER_PCID
+            - REQUIRE_PCID
+            - PREFER_DCA
+            - REQUIRE_DCA
+            - PREFER_SSE4_1
+            - REQUIRE_SSE4_1
+            - PREFER_SSE4_2
+            - REQUIRE_SSE4_2
+            - PREFER_X2APIC
+            - REQUIRE_X2APIC
+            - PREFER_MOVBE
+            - REQUIRE_MOVBE
+            - PREFER_POPCNT
+            - REQUIRE_POPCNT
+            - PREFER_TSC_DEADLINE_TIMER
+            - REQUIRE_TSC_DEADLINE_TIMER
+            - PREFER_XSAVE
+            - REQUIRE_XSAVE
+            - PREFER_AVX
+            - REQUIRE_AVX
+            - PREFER_F16C
+            - REQUIRE_F16C
+            - PREFER_RDRAND
+            - REQUIRE_RDRAND
+            - PREFER_FSGSBASE
+            - REQUIRE_FSGSBASE
+            - PREFER_BMI1
+            - REQUIRE_BMI1
+            - PREFER_HLE
+            - REQUIRE_HLE
+            - PREFER_AVX2
+            - REQUIRE_AVX2
+            - PREFER_SMEP
+            - REQUIRE_SMEP
+            - PREFER_BMI2
+            - REQUIRE_BMI2
+            - PREFER_ERMS
+            - REQUIRE_ERMS
+            - PREFER_INVPCID
+            - REQUIRE_INVPCID
+            - PREFER_RTM
+            - REQUIRE_RTM
+            - PREFER_MPX
+            - REQUIRE_MPX
+            - PREFER_RDSEED
+            - REQUIRE_RDSEED
+            - PREFER_ADX
+            - REQUIRE_ADX
+            - PREFER_SMAP
+            - REQUIRE_SMAP
+      om_cpu_model_string:
+        type: string
+        description: >-
+          Openmano CPU model string
+        required: false
+      om_cpu_feature:
+        type: list
+        entry_schema:
+          type: string
+        description: >-
+          List of openmano CPU features
+        required: false
+
+  tosca.capabilities.nfv.riftio.sfc:
+    derived_from: tosca.capabilities.Root
+    description: >-
+      Service Function Chaining support on this VDU
+    properties:
+      sfc_type:
+        type: string
+        description: >-
+          Type of node in Service Function Chaining Architecture
+        constraints:
+          - valid_values: [unaware, classifier, sf, sff, UNAWARE, CLASSIFIER, SF, SFF]
+        default: unaware
+      sf_type:
+        type: string
+        description: >-
+          Type of Service Function.
+             NOTE- This needs to map with Service Function Type in ODL to
+             support VNFFG. Service Function Type is manadatory param in ODL
+             SFC.
+        required: false
+  tosca.capabilities.Compute.Container.Architecture:
+    derived_from: tosca.capabilities.Container
+    properties:
+      mem_page_size:
+        type: string
+        description: >-
+          Memory page allocation size. If a VM requires
+          hugepages, it should choose huge or size_2MB
+          or size_1GB. If the VM prefers hugepages, it
+          should chose prefer_huge.
+             huge         - Require hugepages (either 2MB or 1GB)
+             normal       - Does not require hugepages
+             size_2MB     - Requires 2MB hugepages
+             size_1GB     - Requires 1GB hugepages
+             prefer_huge  - Application perfers hugepages
+          NOTE - huge and normal is only defined in standards as of
+                 now.
+        required: false
+        constraints:
+          - valid_values: [normal, huge, size_2MB, size_1GB, prefer_huge, NORMAL, HUGE, SIZE_2MB, SIZE_1GB, PREFER_HUGE]
+      cpu_allocation:
+        type: tosca.datatypes.compute.Container.Architecture.CPUAllocation
+        required: false
+      numa_nodes:
+        type: map
+        required: false
+        entry_schema:
+          type: tosca.datatypes.compute.Container.Architecture.NUMA
+
+
+node_types:
+  tosca.nodes.nfv.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      description:
+        type: string
+        required: false
+      image:
+        description: >-
+          If an image is specified here, it is assumed that the image
+          is already present in the RO or VIM and not in the package.
+        type: string
+        required: false
+      image_checksum:
+        type: string
+        description: >-
+          Image checksum for the image in RO or VIM.
+        required: false
+      cloud_init:
+        description: >-
+          Inline cloud-init specification
+        required: false
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+      monitoring_param_1:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      mgmt_interface:
+        type: tosca.capabilities.nfv.riftio.mgmt_interface
+      monitoring_param:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      numa_extension:
+        type: tosca.capabilities.nfv.riftio.numa_extension
+      vswitch_epa:
+        type: tosca.capabilities.nfv.riftio.vswitch_epa
+      hypervisor_epa:
+        type: tosca.capabilities.nfv.riftio.hypervisor_epa
+      host_epa:
+        type: tosca.capabilities.nfv.riftio.host_epa
+  tosca.nodes.nfv.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+        default: VPORT
+        constraints:
+          - valid_values: [VPORT]
+      name:
+        description: Name of the connection point
+        type: string
+        required: false
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: >-
+          Specifies the type of virtual interface
+             between VM and host.
+             VIRTIO          - Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH - Use PCI-PASSTHROUGH interface.
+             SR-IOV          - Use SR-IOV interface.
+             E1000           - Emulate E1000 interface.
+             RTL8139         - Emulate RTL8139 interface.
+             PCNET           - Emulate PCNET interface.
+             OM-MGMT         - Used to specify openmano mgmt external-connection type
+        type: string
+        constraints:
+          - valid_values: [OM-MGMT, VIRTIO, E1000, SR-IOV]
+      bandwidth:
+        type: integer
+        description: Aggregate bandwidth of the NIC
+        constraints:
+          - greater_or_equal: 0
+        required: false
+      vpci:
+        type: string
+        description: >-
+          Specifies the virtual PCI address. Expressed in
+          the following format dddd:dd:dd.d. For example
+          0000:00:12.0. This information can be used to
+          pass as metadata during the VM creation.
+        required: false
+    capabilities:
+      sfc:
+        type: tosca.capabilities.nfv.riftio.sfc
+  tosca.nodes.nfv.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      member_index:
+        type: integer
+        constraints:
+          - greater_or_equal: 1
+        description: Index of the VNF in the NS
+        required: false
+      start_by_default:
+        type: boolean
+        default: true
+        description: Start this VNF on NS instantiate
+      logo:
+        type: string
+        description: >-
+          Logo to display with the VNF in the orchestrator
+        required: false
+    capabilities:      
+      mgmt_interface:
+        type: tosca.capabilities.nfv.riftio.mgmt_interface
+      monitoring_param:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      sfc:
+        type: tosca.capabilities.nfv.riftio.sfc
+  tosca.nodes.nfv.riftio.ELAN:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+        required: false
+      network_name:
+        type: string
+        description: >-
+          Name of network in VIM account. This is used to indicate
+          pre-provisioned network name in cloud account.
+        required: false
+      root_bandwidth:
+        type: integer
+        description: >-
+          This is the aggregate bandwidth
+        constraints:
+          - greater_or_equal: 0
+        required: false
+      leaf_bandwidth:
+        type: integer
+        description: >-
+          This is the bandwidth of branches
+        constraints:
+          - greater_or_equal: 0
+        required: false
+  tosca.nodes.nfv.riftio.FP1:
+    derived_from: tosca.nodes.nfv.FP
+    properties:
+      id:
+        type: integer
+        required: false
+      policy:
+        type: tosca.nfv.datatypes.policyType
+        required: true
+        description: policy to use to match traffic for this FP
+      path:
+        type: list
+        required: true
+        entry_schema:
+          type: tosca.nfv.datatypes.pathType
+      cp:
+        type: tosca.nfv.datatypes.pathType
+        required: true
+
+
+
+artifact_types:
+  tosca.artifacts.Deployment.riftio.cloud_init_file:
+    derived_from: tosca.artifacts.Deployment
+    file:
+      type: string
+
+  tosca.artifacts.Deployment.Image.riftio.QCOW2:
+    derived_from: tosca.artifacts.Deployment.Image.VM.QCOW2
+    image_checksum:
+      required: false
+      type: string
+
+group_types:
+  tosca.groups.nfv.VNFFG:
+    derived_from: tosca.groups.Root
+    properties:
+      vendor:
+        type: string
+        required: true
+        description: name of the vendor who generate this VNFFG
+      version:
+        type: string
+        required: true
+        description: version of this VNFFG
+      number_of_endpoints:
+        type: integer
+        required: true
+        description: count of the external endpoints included in this VNFFG
+      dependent_virtual_link:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to a VLD used in this Forwarding Graph
+      connection_point:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to Connection Points forming the VNFFG
+      constituent_vnfs:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to a list of VNFD used in this VNF Forwarding Graph
+    members: [ tosca.nodes.nfv.FP ]
+
+  tosca.groups.nfv.riftio.scaling:
+    derived_from: tosca.groups.Root
+    properties:
+      name:
+        type: string
+      min_instances:
+        type: integer
+        description: >-
+          Minimum instances of the scaling group which are allowed.
+          These instances are created by default when the network service
+          is instantiated.
+      max_instances:
+        type: integer
+        description: >-
+          Maximum instances of this scaling group that are allowed
+          in a single network service. The network service scaling
+          will fail, when the number of service group instances
+          exceed the max-instance-count specified.
+      cooldown_time:
+        type: integer
+        description: >-
+          The duration after a scaling-in/scaling-out action has been
+          triggered, for which there will be no further optional
+      ratio:
+        type: map
+        entry_schema:
+          type: integer
+        description: >-
+          Specify the number of instances of each VNF to instantiate
+          for a scaling action
+    members: [tosca.nodes.nfv.VNF]
+    interfaces:
+      action:
+        type: tosca.interfaces.nfv.riftio.scaling.action
+
+interface_types:
+  tosca.interfaces.nfv.riftio.scaling.action:
+    pre_scale_in:
+      description: Operation to execute before a scale in
+    post_scale_in:
+      description: Operation to execute after a scale in
+    pre_scale_out:
+      description: Operation to execute before a scale out
+    post_scale_out:
+      description: Operation to execute after a scale out
+
+policy_types:
+  tosca.policies.nfv.riftio.placement:
+    derived_from: tosca.policies.Placement
+    properties:
+      name:
+        type: string
+        description: >-
+          Place group construct to define the compute resource placement strategy
+          in cloud environment
+      requirement:
+        type: string
+        description: >-
+          This is free text space used to describe the intent/rationale
+          behind this placement group. This is for human consumption only
+      strategy:
+        type: string
+        description: >-
+          Strategy associated with this placement group
+             Following values are possible
+               COLOCATION - Colocation strategy imply intent to share the physical
+                            infrastructure (hypervisor/network) among all members
+                            of this group.
+               ISOLATION - Isolation strategy imply intent to not share the physical
+                           infrastructure (hypervisor/network) among the members
+                           of this group.
+        constraints:
+          valid_values:
+            - COLOCATION
+            - ISOLATION
+  tosca.policies.nfv.riftio.vnf_configuration:
+    derived_from: tosca.policies.Root
+    properties:
+      config:
+        type: tosca.datatypes.nfv.riftio.vnf_configuration
+      initial_config:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.config_primitive
+  tosca.policies.nfv.riftio.vnf_service_primitives:
+    derived_from: tosca.policies.Root
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: primitive_parameter
+  tosca.policies.nfv.riftio.ns_service_primitives:
+    derived_from: tosca.policies.Root
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: primitive_parameter
+      parameter_group:
+        type: tosca.datatypes.nfv.riftio.primitive_parameter_group
+        description: >-
+          Grouping of parameters which are logically grouped in UI
+        required: false
+      vnf_primitive_group:
+        type: tosca.datatypes.nfv.riftio.vnf_primitive_group
+        description: >-
+          List of service primitives grouped by VNF
+        required: false
+      user_defined_script:
+        type: string
+        description: >-
+          A user defined script
+        required: false
+  tosca.policies.nfv.riftio.initial_config_primitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      seq:
+        type: integer
+        description: >-
+          Order in which to apply, when multiple ones are defined
+        default: 0
+        constraints:
+          - greater_or_equal: 0
+      parameter:
+        type: map
+        entry_schema:
+          type: string
+      user_defined_script:
+        type: string
+  tosca.policies.nfv.riftio.users:
+    derived_from: tosca.policies.Root
+    description: >-
+      Specify list of public keys to be injected as
+      part of NS instantitation. Use default as entry,
+      to specify the key pairs for default user.
+    properties:
+      user_info:
+        type: string
+        description: >-
+          The user\'s real name
+        required: false
+      key_pairs:
+        type: map
+        description: >-
+          List of public keys for the user
+        entry_schema:
+          type: string
+        required: true
+  tosca.policies.nfv.riftio.dependency:
+    derived_from: tosca.policies.Root
+    description: >-
+      Map dependency between VDUs or VNFs
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Parameter and value for the config
+  tosca.nfv.datatypes.policyType:
+    properties:
+      type:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [ ACL ]
+      criteria:
+        type: list
+        required: true
+        entry_schema:
+          type: tosca.nfv.datatypes.aclType
+topology_template:
+  node_templates:
+    new_vnfd:
+      type: tosca.nodes.nfv.riftio.VNF1
+      properties:
+        id: 2
+        vendor: RIFT.io
+        version: 1.0
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
index 2b244d7..7938485 100755
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
@@ -39,7 +39,23 @@
         'cloud-init-file',]
     IGNORE_PROPS = []
 
-    toscatype = 'tosca.nodes.Compute'
+    toscatype = 'tosca.nodes.nfv.VDU'
+
+    VALUE_TYPE_CONVERSION_MAP =  {
+    'integer': 'INT',
+    'string':'STRING',
+    'float':'DECIMAL'
+    }
+
+
+    TOSCA_MEM_SIZE = {
+        'huge': 'LARGE',
+        'normal': 'SMALL',
+        'size_2MB': 'SIZE_2MB',
+        'size_1GB': 'SIZE_1GB',
+        'prefer_huge': 'PREFER_LARGE'
+
+    }
 
     def __init__(self, log, nodetemplate, metadata=None):
         super(ToscaCompute, self).__init__(log,
@@ -54,6 +70,9 @@
         self._vnf = None
         self._yang = None
         self._id = self.name
+        self._monitor_param = []
+        self._mgmt_interface = {}
+        self._http_endpoint = None
 
     @property
     def image(self):
@@ -104,6 +123,39 @@
 
     def handle_capabilities(self):
 
+        def get_mgmt_interface(specs):
+            mgmt_intfce = {}
+            mgmt_intfce['vdu-id'] = self.id
+            if 'dashboard_params' in specs:
+                mgmt_intfce['dashboard-params'] = {'path':specs['dashboard_params']['path'], 'port':specs['dashboard_params']['port']}
+            if 'port' in specs:
+                mgmt_intfce['port'] = specs['port']
+            return mgmt_intfce;
+
+        def get_monitor_param(specs, monitor_id):
+            monitor_param = {}
+            monitor_param['id'] = monitor_id
+            if 'name' in specs:
+                monitor_param['name'] = specs['name']
+            if 'json_query_method' in specs:
+                monitor_param['json_query_method'] = specs['json_query_method'].upper()
+            if 'description' in specs:
+                monitor_param['description'] = specs['description']
+            if 'url_path' in specs:
+                monitor_param['http-endpoint-ref'] = specs['url_path']
+            if 'ui_data' in specs:
+                if 'widget_type' in specs['ui_data']:
+                    monitor_param['widget-type'] = specs['ui_data']['widget_type'].upper()
+                if 'units' in specs['ui_data']:
+                    monitor_param['units'] = specs['ui_data']['units']
+                if 'group_tag' in specs['ui_data']:
+                    monitor_param['group_tag'] = specs['ui_data']['group_tag']
+            if 'constraints' in specs:
+                if 'value_type' in specs['constraints']:
+                    monitor_param['value-type'] = ToscaCompute.VALUE_TYPE_CONVERSION_MAP[specs['constraints']['value_type']]
+
+            return monitor_param
+
         def get_vm_flavor(specs):
             vm_flavor = {}
             if 'num_cpus' in specs:
@@ -125,14 +177,133 @@
 
             return vm_flavor
 
+        def get_host_epa(specs):
+            host_epa = {}
+            if 'cpu_model' in specs:
+                host_epa["cpu-model"] = specs['cpu_model'].upper()
+            if 'cpu_arch' in specs:
+                host_epa["cpu-arch"] = specs['cpu_arch'].upper()
+            if 'cpu_vendor' in specs:
+                host_epa["cpu-vendor"] = specs['cpu_vendor'].upper()
+            if 'cpu_socket_count' in specs:
+                host_epa["cpu-socket-count"] = specs['cpu_socket_count']
+            if 'cpu_core_count' in specs:
+                host_epa["cpu-core-count"] = specs['cpu_core_count']
+            if 'cpu_core_thread_count' in specs:
+                host_epa["cpu-core-thread-count"] = specs['cpu_core_thread_count']
+            if 'om_cpu_model_string' in specs:
+                host_epa["om-cpu-model-string"] = specs['om_cpu_model_string']
+            if 'cpu_feature' in specs:
+                cpu_feature_prop = []
+                for spec in specs['cpu_feature']:
+                    cpu_feature_prop.append({'feature':spec.upper()})
+                host_epa['cpu-feature'] = cpu_feature_prop
+            if 'om_cpu_feature' in specs:
+                cpu_feature_prop = []
+                for spec in specs['om_cpu_feature']:
+                    cpu_feature_prop.append({'feature':spec})
+                host_epa['om-cpu-feature'] = cpu_feature_prop
+            return host_epa;
+
+        def get_vswitch_epa(specs):
+            vswitch_epa = {}
+            if 'ovs_acceleration' in specs:
+                vswitch_epa['ovs-acceleration'] = specs['ovs_acceleration'].upper()
+            if 'ovs_offload' in specs:
+                vswitch_epa['ovs-offload'] = specs['ovs_offload'].upper()
+            return vswitch_epa
+
+        def get_hypervisor_epa(specs):
+            hypervisor_epa = {}
+            if 'type' in specs:
+                hypervisor_epa['type'] = specs['type'].upper()
+            if 'version' in specs:
+                hypervisor_epa['version'] = str(specs['version'])
+
+            return hypervisor_epa
+
+        def get_guest_epa(specs, nfv_comput_specs):
+            guest_epa = {}
+            guest_epa['numa-node-policy'] = {}
+            guest_epa['numa-node-policy']['node'] = []
+            if 'mem_policy' in specs:
+                guest_epa['numa-node-policy']['mem-policy'] = specs['mem_policy'].upper()
+            if 'node_cnt' in specs:
+                guest_epa['numa-node-policy']['node-cnt'] = specs['node_cnt']
+            if 'node' in specs:
+                for node in specs['node']:
+                    node_prop = {}
+                    if 'id' in node:
+                            node_prop['id'] = node['id']
+                    if 'mem_size' in node:
+                        if 'MiB' in node['mem_size'] or 'MB' in node['mem_size']:
+                            node_prop['memory-mb'] = int(node['mem_size'].replace('MB',''))
+                        else:
+                            err_msg = "Specify mem_size of NUMA extension should be in MB"
+                            raise ValidationError(message=err_msg)
+                    if 'vcpus' in node:
+                        vcpu_lis =[]
+                        for vcpu in node['vcpus']:
+                            vcpu_lis.append({'id': vcpu})
+                        node_prop['vcpu'] = vcpu_lis
+                    if 'om_numa_type' in node:
+                        numa_type = node['om_numa_type']
+                        if 'paired-threads' == numa_type:
+                            node_prop['paired_threads'] = {}
+                            node_prop['paired_threads']['num_paired_threads'] = node['paired_threads']['num_paired_threads']
+                        elif 'threads' == numa_type:
+                            if 'num_threads' in node:
+                                node_prop['num_threads'] = node['num_threads']
+                        elif 'cores' == numa_type:
+                            if 'num_cores' in node:
+                                node_prop['num_cores'] = node['num_cores']
+                        else:
+                            err_msg = "om_numa_type should be among cores, paired-threads or threads"
+                            raise ValidationError(message=err_msg)
+                    guest_epa['numa-node-policy']['node'].append(node_prop)
+
+            if 'mem_page_size' in nfv_comput_specs:
+                guest_epa['mempage-size'] = self.TOSCA_MEM_SIZE[nfv_comput_specs['mem_page_size']]
+            if 'cpu_allocation' in nfv_comput_specs:
+                if 'cpu_affinity' in nfv_comput_specs['cpu_allocation']:
+                     guest_epa['cpu-pinning-policy'] = nfv_comput_specs['cpu_allocation']['cpu_affinity'].upper()
+                     guest_epa['trusted-execution'] = False
+                if 'thread_allocation' in nfv_comput_specs['cpu_allocation']:
+                     guest_epa['cpu-thread-pinning-policy'] = nfv_comput_specs['cpu_allocation']['thread_allocation'].upper()
+
+            return guest_epa
+
         tosca_caps = self.get_tosca_caps()
         self.log.debug(_("VDU {0} tosca capabilites: {1}").
                        format(self.name, tosca_caps))
-
-        if 'host' in tosca_caps:
-            self.properties['vm-flavor'] = get_vm_flavor(tosca_caps['host'])
+        if 'nfv_compute' in tosca_caps:
+            self.properties['vm-flavor'] = get_vm_flavor(tosca_caps['nfv_compute'])
             self.log.debug(_("VDU {0} properties: {1}").
                            format(self.name, self.properties))
+        if 'host_epa' in tosca_caps:
+            self.properties['host-epa'] = get_host_epa(tosca_caps['host_epa'])
+        if 'hypervisor_epa' in tosca_caps:
+            self.properties['hypervisor-epa'] = get_hypervisor_epa(tosca_caps['hypervisor_epa'])
+        if 'vswitch_epa' in tosca_caps:
+            self.properties['vswitch-epa'] = get_vswitch_epa(tosca_caps['vswitch_epa'])
+        if 'numa_extension' in tosca_caps:
+            self.properties['guest-epa'] = get_guest_epa(tosca_caps['numa_extension'], tosca_caps['nfv_compute'])
+        if 'monitoring_param' in tosca_caps:
+            self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param'], '1'))
+        if 'monitoring_param_1' in tosca_caps:
+            self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param_1'], '2'))
+        if 'mgmt_interface' in tosca_caps:
+            self._mgmt_interface = get_mgmt_interface(tosca_caps['mgmt_interface'])
+        if len(self._mgmt_interface) > 0:
+            prop = {}
+            if 'dashboard-params' in self._mgmt_interface:
+                if 'path' in self._mgmt_interface['dashboard-params']:
+                    prop['path'] = self._mgmt_interface['dashboard-params']['path']
+                if 'port' in self._mgmt_interface['dashboard-params']:
+                    prop['port'] = self._mgmt_interface['dashboard-params']['port']
+                self._http_endpoint = prop
+
+
 
     def handle_artifacts(self):
         if self.artifacts is None:
@@ -145,20 +316,20 @@
             if isinstance(props, dict):
                 details = {}
                 for name, value in props.items():
-                    if name == 'type':
+                    if name == 'type' and value == 'tosca.artifacts.Deployment.Image.riftio.QCOW2':
                         prefix, type_ = value.rsplit('.', 1)
                         if type_ == 'QCOW2':
                             details['type'] = 'qcow2'
-                        else:
-                            err_msg = _("VDU {0}, Currently only QCOW2 images "
-                                        "are supported in artifacts ({1}:{2})"). \
-                                        format(self.name, key, value)
-                            self.log.error(err_msg)
-                            raise ValidationError(message=err_msg)
+                            self._image = props['file']
+                            self.properties['image'] = os.path.basename(props['file'])
+                    elif name == 'type' and value == 'tosca.artifacts.Deployment.riftio.cloud_init_file':
+                        details['cloud_init_file'] = os.path.basename(props['file'])
+                        self._cloud_init = props['file']
+                        self.properties['cloud_init_file'] = os.path.basename(props['file'])
                     elif name == 'file':
                         details['file'] = value
                     elif name == 'image_checksum':
-                        details['image_checksum'] = value
+                        self.properties['image_checksum'] = value
                     else:
                         self.log.warn(_("VDU {0}, unsuported attribute {1}").
                                       format(self.name, name))
@@ -192,6 +363,7 @@
         return None
 
     def update_image_checksum(self, in_file):
+
         # Create image checksum
         # in_file is the TOSCA yaml file location
         if self._image is None:
@@ -249,7 +421,15 @@
             return None
         self._update_properties_for_model()
         props = convert_keys_to_python(self.properties)
+
+        for monitor_param in self._monitor_param:
+            monitor_props = convert_keys_to_python(monitor_param)
+            vnfd.monitoring_param.add().from_dict(monitor_props)
         try:
+            if len(self._mgmt_interface) > 0:
+                vnfd.mgmt_interface.from_dict(convert_keys_to_python(self._mgmt_interface))
+            if self._http_endpoint:
+                vnfd.http_endpoint.add().from_dict(convert_keys_to_python(self._http_endpoint))
             vnfd.vdu.add().from_dict(props)
         except Exception as e:
             err_msg = _("{0} Exception vdu from dict {1}: {2}"). \
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_graph.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_graph.py
new file mode 100644
index 0000000..7b8657d
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_graph.py
@@ -0,0 +1,62 @@
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
+
+TARGET_CLASS_NAME = 'ToscaForwardingGraph'
+class ToscaForwardingGraph(ManoResource):
+	'''Translate TOSCA node type tosca.nodes.nfv.FP'''
+	toscatype = 'tosca.groups.nfv.VNFFG'
+
+	def __init__(self, log, group, metadata=None):
+		#super(ToscaForwardingGraph, self).__init__(log, nodetemplate, type_='forwardgraph', metadata=metadata)
+		super(ToscaForwardingGraph, self).__init__(log,
+                                          group,
+                                          type_="vnfgd",
+                                          metadata=metadata)
+		self.name = group.name
+		self.type_ = 'vnfgd'
+		self.metadata = metadata
+		self.group = group
+		self.properties = {}
+		self.classifiers = []
+		self.rsp = []
+		self.log = log
+
+	def get_tosca_group_props(self):
+	        tosca_props = {}
+	        for prop in self.group.get_properties_objects():
+	            if isinstance(prop.value, GetInput):
+	                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+	            else:
+	                tosca_props[prop.name] = prop.value
+	        return tosca_props
+
+	def handle_properties(self, nodes, groups):
+		self.properties['name'] =  self.name
+		self.properties['vendor'] =  self.metadata['vendor']
+		self.properties['id'] =  self.id
+		self.properties['classifier'] = []
+		self.properties['rsp'] = []
+
+		tosca_props =   self.get_tosca_group_props()
+		forwarding_paths = []
+		for member in self.group.members:
+			forwarding_paths.append(member)
+
+		for forwarding_path in forwarding_paths:
+			node = self.get_node_with_name(forwarding_path, nodes)
+			if node.classifier is not None:
+				self.properties['classifier'].append(node.classifier)
+			if node.rsp is not None:
+				self.properties['rsp'].append(node.rsp)
+
+	def generate_yang_model_gi(self, nsd, vnfds):
+		try:
+			nsd.vnffgd.add().from_dict(self.properties)
+		except Exception as e:
+			err_msg = "Error updating VNNFG to nsd"
+			self.log.error(err_msg)
+			raise e
+
+	def generate_yang_model(self, nsd, vnfds, use_gi=False):
+		if use_gi:
+			return self.generate_yang_model_gi(nsd, vnfds)
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_path.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_path.py
new file mode 100644
index 0000000..12b7062
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_path.py
@@ -0,0 +1,91 @@
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+import uuid
+
+
+TARGET_CLASS_NAME = 'ToscaForwardingPath'
+class ToscaForwardingPath(ManoResource):
+	'''Translate TOSCA node type tosca.nodes.nfv.FP'''
+
+	toscatype = 'tosca.nodes.nfv.FP'
+
+
+	def __init__(self, log, node, metadata=None):
+		super(ToscaForwardingPath, self).__init__(log, node, type_='forwarding_path', metadata=metadata)
+		self.metadata = metadata
+		self.classifier = None
+		self.rsp = None
+		self.cp = None
+		self.properties = {}
+
+	def handle_forwarding_path_dependencies(self, nodes, vnf_type_to_capability_substitution_mapping):
+
+		def get_classifier(specs):
+			classifier_prop = {}
+			classifier_prop['name'] = 'VNFFG -' + str(self.name)
+			classifier_prop['id'] = self.id
+			if 'policy' in specs:
+				classifier_prop['match_attributes'] = []
+				policy = specs['policy']
+				if 'criteria' in policy:
+					match_prop = {}
+					match_prop['id'] = str(uuid.uuid1())
+					for criteria in policy['criteria']:
+						if 'ip_dst_prefix' in criteria:
+							match_prop['destination_ip_address'] = criteria['ip_dst_prefix']
+						if 'ip_proto' in criteria:
+							match_prop['ip_proto'] = criteria['ip_proto']
+						if 'source_port_range' in criteria:
+							match_prop['source_port'] =  int(criteria['source_port_range'])
+						if 'destination_port_range' in criteria:
+							match_prop['destination_port'] =  int(criteria['destination_port_range'])
+					classifier_prop['match_attributes'].append(match_prop)
+			if 'cp' in specs:
+				cp_node_name = specs['cp']['capability']
+				cp_node = self.get_node_with_name(cp_node_name, nodes)
+				if cp_node:
+					classifier_prop['vnfd_connection_point_ref'] = cp_node.cp_name
+			if 'cp' in specs:
+				vnf_node_name = specs['cp']['forwarder']
+				vnf_node  = self.get_node_with_name(vnf_node_name, nodes)
+				if vnf_node:
+					classifier_prop['vnfd_id_ref'] = vnf_node.id
+					classifier_prop['member_vnf_index_ref'] = vnf_node.get_member_vnf_index()
+			return classifier_prop
+
+		def get_rsp(specs):
+			rsp = {}
+			rsp['id'] = str(uuid.uuid1())
+			rsp['name'] = 'VNFFG-RSP-' + str(self.name)
+			rsp['vnfd_connection_point_ref'] =  []
+			if 'path' in specs:
+				fp_connection_point = []
+				vnf_index   = 1
+				order_index = 1
+				visited_cps = []
+				for rsp_item in specs['path']:
+					vnf_node_name       = rsp_item['forwarder']
+					conn_forwarder      = rsp_item['capability']
+					vnf_node            = self.get_node_with_name(vnf_node_name, nodes)					
+
+					for subs_mapping in vnf_type_to_capability_substitution_mapping[vnf_node.vnf_type]:
+						prop = {}
+						if conn_forwarder in subs_mapping:
+							fp_connection_point.append(subs_mapping[conn_forwarder])
+							cp_node_name = subs_mapping[conn_forwarder]
+							cp_node = self.get_node_with_name(cp_node_name, nodes)
+							if cp_node.cp_name not in visited_cps:
+								prop['vnfd_connection_point_ref'] = cp_node.cp_name
+								prop['vnfd_id_ref'] = vnf_node.id
+								prop['member_vnf_index_ref'] = vnf_node.get_member_vnf_index()
+								prop['order'] = order_index
+								rsp['vnfd_connection_point_ref'].append(prop)
+								vnf_index = vnf_index + 1
+								order_index = order_index + 1
+								visited_cps.append(cp_node.cp_name)
+				return rsp
+
+		tosca_props = self.get_tosca_props()
+		self.classifier = get_classifier(tosca_props)
+		self.rsp = get_rsp(tosca_props)
+		if self.classifier and self.rsp:
+			self.classifier['rsp_id_ref'] = self.rsp['id']
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
index 7c03d56..9b7cd03 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
@@ -18,6 +18,7 @@
 from rift.mano.tosca_translator.common.utils import _
 from rift.mano.tosca_translator.common.utils import convert_keys_to_python
 from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
 
 from toscaparser.common.exception import ValidationError
 
@@ -29,18 +30,16 @@
 class ToscaInitialConfig(ManoResource):
     '''Translate TOSCA node type tosca.policies.InitialConfigPrimitive.'''
 
-    toscatype = 'tosca.policies.riftio.InitialConfigPrimitive'
+    toscatype = 'tosca.policies.nfv.riftio.initial_config_primitive'
 
     IGNORE_PROPS = []
 
-    def __init__(self, log, primitive, metadata=None):
+    def __init__(self, log, policy, metadata=None):
         # TODO(Philip):Not inheriting for ManoResource, as there is no
         # instance from parser
         self.log = log
-        for name, details in primitive.items():
-            self.name = name
-            self.details = details
-            break
+        self.name = policy.name
+        self.policy = policy
         self.type_ = 'initial-cfg'
         self.metadata = metadata
         self.properties = {}
@@ -50,7 +49,7 @@
         return "%s(%s)" % (self.name, self.type)
 
     def handle_properties(self, nodes, groups):
-        tosca_props = self.details
+        tosca_props = self.get_policy_props()
         self.log.debug(_("{0} with tosca properties: {1}").
                        format(self, tosca_props))
         self.properties['name'] = tosca_props['name']
@@ -71,6 +70,14 @@
 
         self.log.debug(_("{0} properties: {1}").format(self, self.properties))
 
+    def get_policy_props(self):
+            tosca_props = {}
+            for prop in self.policy.get_properties_objects():
+                if isinstance(prop.value, GetInput):
+                    tosca_props[prop.name] = {'get_param': prop.value.input_name}
+                else:
+                    tosca_props[prop.name] = prop.value
+            return tosca_props
     def get_yang_model_gi(self, nsd, vnfds):
         props = convert_keys_to_python(self.properties)
         try:
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
index b446e51..a9f9c77 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
@@ -39,48 +39,64 @@
                                            nodetemplate,
                                            type_='vld',
                                            metadata=metadata)
+        self._vld = {}
+        self._ip_profile = {}
 
-    def handle_properties(self):
+    def handle_vld_properties(self, nodes, vnf_type_substitution_mapping):
+        def get_vld_props(specs):
+            vld_prop = {}
+            vld_prop['id'] = self.id
+            vld_prop['name'] = self.name
+            vld_prop['short-name'] = self.name
+            vld_prop['type'] = self.get_type()
+            vld_prop['ip_profile_ref'] = "{0}_{1}".format(self.nodetemplate.name, "ip")
+            if 'description' in specs:
+                vld_prop['description'] = specs['description']
+            if 'vendor' in specs:
+                 vld_prop['vendor'] = specs['vendor']
+
+            index_count = 1
+            vld_connection_point_list = []
+            for node in nodes:
+                if node.type == "vnfd":
+                    substitution_mapping_list = vnf_type_substitution_mapping[node.vnf_type];
+                    for req_key, req_value in node._reqs.items():
+                        for mapping in substitution_mapping_list:
+                            if req_key in mapping:
+                                # link the VLD to the connection point
+                                node_vld = self.get_node_with_name(mapping[req_key][0], nodes)
+                                if node:
+                                    #print()
+                                    prop = {}
+                                    prop['member-vnf-index-ref'] = node.get_member_vnf_index()
+                                    prop['vnfd-connection-point-ref'] = node_vld.cp_name
+                                    prop['vnfd-id-ref'] = node_vld.vnf._id
+                                    vld_connection_point_list.append(prop)
+                                    index_count += 1
+                if len(vld_connection_point_list) > 1:
+                    vld_prop['vnfd-connection-point-ref'] = vld_connection_point_list
+            return vld_prop
+
+        def get_ip_profile_props(specs):
+            ip_profile_prop = {}
+            ip_profile_param = {}
+            if 'ip_profile_ref' in self._vld:
+                ip_profile_prop['name'] = self._vld['ip_profile_ref']
+
+            if 'description' in specs:
+                ip_profile_prop['description'] = specs['description']
+            if 'gateway_ip' in specs:
+                ip_profile_param['gateway-address'] = specs['gateway_ip']
+            if 'ip_version' in specs:
+                ip_profile_param['ip-version'] = 'ipv' + str(specs['ip_version'])
+            if 'cidr' in specs:
+                ip_profile_param['subnet-address'] = specs['cidr']
+
+            ip_profile_prop['ip-profile-params'] = ip_profile_param
+            return ip_profile_prop
         tosca_props = self.get_tosca_props()
-
-        if 'cidr' in tosca_props.keys():
-            self.log.warn(_("Support for subnet not yet "
-                            "available. Ignoring it"))
-        net_props = {}
-        for key, value in tosca_props.items():
-            if key in self.NETWORK_PROPS:
-                if key == 'network_name':
-                    net_props['name'] = value
-                elif key == 'network_id':
-                    net_props['id'] = value
-            else:
-                net_props[key] = value
-
-        net_props['type'] = self.get_type()
-
-        if 'name' not in net_props:
-            # Use the node name as network name
-            net_props['name'] = self.name
-
-        if 'short_name' not in net_props:
-            # Use the node name as network name
-            net_props['short-name'] = self.name
-
-        if 'id' not in net_props:
-            net_props['id'] = self.id
-
-        if 'description' not in net_props:
-            net_props['description'] = self.description
-
-        if 'vendor' not in net_props:
-            net_props['vendor'] = self.vendor
-
-        if 'version' not in net_props:
-            net_props['version'] = self.version
-
-        self.log.debug(_("Network {0} properties: {1}").
-                       format(self.name, net_props))
-        self.properties = net_props
+        self._vld = get_vld_props(tosca_props)
+        self._ip_profile = get_ip_profile_props(tosca_props)
 
     def get_type(self):
         """Get the network type based on propery or type derived from"""
@@ -107,9 +123,12 @@
         return "ELAN"
 
     def generate_yang_model_gi(self, nsd, vnfds):
-        props = convert_keys_to_python(self.properties)
+        props            = convert_keys_to_python(self.properties)
+        vld_props        = convert_keys_to_python(self._vld)
+        ip_profile_props = convert_keys_to_python(self._ip_profile)
         try:
-            nsd.vld.add().from_dict(props)
+            nsd.vld.add().from_dict(vld_props)
+            nsd.ip_profiles.add().from_dict(ip_profile_props)
         except Exception as e:
             err_msg = _("{0} Exception vld from dict {1}: {2}"). \
                       format(self, props, e)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
index 04e3a59..3574355 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
@@ -40,6 +40,8 @@
                                                metadata=metadata)
         # Default order
         self.order = 0
+        self.vnf = None
+        self.cp_name = None
         pass
 
     def handle_properties(self):
@@ -68,28 +70,32 @@
                 self.log.warn(err_msg)
                 raise ValidationError(message=err_msg)
 
+        self.cp_name = port_props['name']
         self.properties = port_props
 
     def handle_requirements(self, nodes):
         tosca_reqs = self.get_tosca_reqs()
+        tosca_caps = self.get_tosca_caps()
         self.log.debug("VNF {0} requirements: {1}".
                        format(self.name, tosca_reqs))
 
         vnf = None  # Need vnf ref to generate cp refs in vld
         vld = None
+        '''
         if len(tosca_reqs) != 2:
             err_msg = _("Invalid configuration as incorrect number of "
                         "requirements for CP {0} are specified"). \
                         format(self)
             self.log.error(err_msg)
             raise ValidationError(message=err_msg)
-
+        '''
         for req in tosca_reqs:
             if 'virtualBinding' in req:
                 target = req['virtualBinding']['target']
                 node = self.get_node_with_name(target, nodes)
                 if node:
                     vnf = node.vnf
+                    self.vnf = node._vnf
                     if not vnf:
                         err_msg = _("No vnfs linked to a VDU {0}"). \
                                     format(node)
@@ -129,17 +135,20 @@
                     self.log.error(err_msg)
                     raise ValidationError(message=err_msg)
 
-        if vnf and vld:
+        if 'sfc' in tosca_caps and vnf:
+            if 'sfc_type' in tosca_caps['sfc']:
+                vnf.properties['service-function-chain'] = tosca_caps['sfc']['sfc_type'].upper()
+            if 'sf_type' in tosca_caps['sfc']:
+                vnf.properties['service-function-type'] = tosca_caps['sfc']['sf_type']
+
+        if vnf:
             cp_ref = {}
             cp_ref['vnfd-connection-point-ref'] = self.properties['name']
             cp_ref['vnfd-id-ref'] = vnf.properties['id']
             cp_ref['member-vnf-index-ref'] = \
                             vnf._const_vnfd['member-vnf-index']
-            if 'vnfd-connection-point-ref' not in vld.properties:
-                vld.properties['vnfd-connection-point-ref'] = []
-            vld.properties['vnfd-connection-point-ref'].append(cp_ref)
         else:
-            err_msg = _("CP {0}, VNF {1} or VL {2} not found"). \
+            err_msg = _("CP {0}, VNF {1} not found"). \
                       format(self, vnf, vld)
             self.log.error(err_msg)
             raise ValidationError(message=err_msg)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
index 29beca1..3c662dd 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
@@ -18,6 +18,7 @@
 from rift.mano.tosca_translator.common.utils import _
 from rift.mano.tosca_translator.common.utils import convert_keys_to_python
 from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
 
 from toscaparser.common.exception import ValidationError
 
@@ -45,7 +46,7 @@
                       'mgmt-interface']
     OPTIONAL_PROPS = ['version', 'vendor', 'http-endpoint', 'monitoring-param',
                       'connection-point']
-    IGNORE_PROPS = ['port']
+    IGNORE_PROPS = ['port', 'monitoring_param']
     TOSCA_CAPS = ['mgmt_interface', 'http_endpoint', 'monitoring_param_0',
                   'monitoring_param_1', 'connection_point']
 
@@ -57,6 +58,10 @@
         self._const_vnfd = {}
         self._vnf_config = {}
         self._vdus = []
+        self._policies = []
+        self._cps = []
+        self.vnf_type = nodetemplate.type
+        self._reqs = {}
 
     def map_tosca_name_to_mano(self, name):
         new_name = super().map_tosca_name_to_mano(name)
@@ -138,6 +143,8 @@
         if 'start_by_default' in vnf_props:
             self._const_vnfd['start-by-default'] = \
                                         vnf_props.pop('start_by_default')
+        if 'logo' in self.metadata:
+            vnf_props['logo'] = self.metadata['logo']
 
         self.log.debug(_("VNF {0} with constituent vnf: {1}").
                        format(self.name, self._const_vnfd))
@@ -186,45 +193,44 @@
         self.log.debug(_("VDU {0} properties: {1}").
                        format(self.name, self.properties))
 
-    def handle_requirements(self, nodes):
+    def handle_requirements(self, nodes, policies, vnf_type_to_vdus_map):
         tosca_reqs = self.get_tosca_reqs()
-        self.log.debug("VNF {0} requirements: {1}".
-                       format(self.name, tosca_reqs))
+        for req in tosca_reqs:
+            for key, value in req.items():
+                if 'target' in value:
+                    self._reqs[key] = value['target']
 
-        try:
-            for req in tosca_reqs:
-                if 'vdus' in req:
-                    target = req['vdus']['target']
-                    node = self.get_node_with_name(target, nodes)
-                    if node:
-                        self._vdus.append(node)
-                        node._vnf = self
-                        # Add the VDU id to mgmt-intf
-                        if 'mgmt-interface' in self.properties:
-                            self.properties['mgmt-interface']['vdu-id'] = \
-                                            node.id
-                            if 'vdu' in self.properties['mgmt-interface']:
-                                # Older yang
-                                self.properties['mgmt-interface'].pop('vdu')
-                    else:
-                        err_msg = _("VNF {0}, VDU {1} specified not found"). \
-                                  format(self.name, target)
-                        self.log.error(err_msg)
-                        raise ValidationError(message=err_msg)
+        for policy in policies:
+            if hasattr(policy, '_vnf_name') and policy._vnf_name == self.name:
+                self._policies.append(policy)
 
-        except Exception as e:
-            err_msg = _("Exception getting VDUs for VNF {0}: {1}"). \
-                      format(self.name, e)
-            self.log.error(err_msg)
-            raise e
 
-        self.log.debug(_("VNF {0} properties: {1}").
-                       format(self.name, self.properties))
+        if self.vnf_type in vnf_type_to_vdus_map:
+            for vdu_node_name in vnf_type_to_vdus_map[self.vnf_type]:
+                node = self.get_node_with_name(vdu_node_name, nodes)
+                if node:
+                    self._vdus.append(node)
+                    node._vnf = self
+                    # Add the VDU id to mgmt-intf
+                    if 'mgmt-interface' in self.properties:
+                        self.properties['mgmt-interface']['vdu-id'] = \
+                                        node.id
+                        if 'vdu' in self.properties['mgmt-interface']:
+                            # Older yang
+                            self.properties['mgmt-interface'].pop('vdu')
+                else:
+                    err_msg = _("VNF {0}, VDU {1} specified not found"). \
+                              format(self.name, target)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
 
     def generate_yang_model_gi(self, nsd, vnfds):
         vnfd_cat = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog()
         vnfd = vnfd_cat.vnfd.add()
         props = convert_keys_to_python(self.properties)
+        for key in ToscaNfvVnf.IGNORE_PROPS:
+            if key in props:
+                props.pop(key)
         try:
             vnfd.from_dict(props)
         except Exception as e:
@@ -237,6 +243,8 @@
         # Update the VDU properties
         for vdu in self._vdus:
             vdu.generate_yang_submodel_gi(vnfd)
+        for policy in self._policies:
+            policy.generate_yang_submodel_gi(vnfd)
 
         # Update constituent vnfd in nsd
         try:
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_placement_group.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_placement_group.py
new file mode 100644
index 0000000..8b8771b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_placement_group.py
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaPlacementGroup'
+
+
+class ToscaPlacementGroup(ManoResource):
+    '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+    toscatype = 'tosca.policies.nfv.riftio.placement'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, policy, metadata=None, vnf_name=None):
+        self.log = log
+        self.name = policy.name
+        self.type_ = 'place-grp'
+        self.metadata = metadata
+        self.policy = policy
+        self.properties = {}
+        self._vnf_name = vnf_name
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.get_policy_props()
+        self.properties['name'] = tosca_props['name']
+        self.properties['strategy'] = tosca_props['strategy']
+        self.properties['requirement'] = tosca_props['requirement']
+        if self._vnf_name is None:
+            self.properties['member-vnfd'] = []
+            index_count = 1
+            for node in self.policy.get_targets_list():
+                vnf_node = self.get_node_with_name(node.name, nodes)
+                prop = {}
+                prop['member-vnf-index-ref'] = index_count
+                prop['vnfd-id-ref'] = vnf_node.id
+                self.properties['member-vnfd'].append(prop)
+                index_count = index_count + 1
+        else:
+            self.properties['member-vdus'] = []
+            for node in self.policy.get_targets_list():
+                vdu_node = self.get_node_with_name(node.name, nodes)
+                prop = {}
+                prop['member-vdu-ref'] = vdu_node.name 
+                self.properties['member-vdus'].append(prop)
+
+    def get_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            if self._vnf_name is None:
+                nsd.placement_groups.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception nsd placement-groups from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        if use_gi:
+            return self.get_yang_model_gi(nsd, vnfds)
+        if 'placement-groups' not in nsd:
+            nsd['placement-groups'] = []
+
+        for key, value in self.properties.items():
+            prim[key] = value
+        nsd['placement-groups'].append(prim)
+
+    def generate_yang_submodel_gi(self, vnfd):
+        if vnfd is None:
+            return None
+        try:
+            props = convert_keys_to_python(self.properties)
+            vnfd.placement_groups.add().from_dict(props)   
+        except Exception as e:
+            err_msg = _("{0} Exception policy from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def get_policy_props(self):
+        tosca_props = {}
+
+        for prop in self.policy.get_properties_objects():
+            if isinstance(prop.value, GetInput):
+                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+            else:
+                tosca_props[prop.name] = prop.value
+        return tosca_props
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
index 25246af..7f427f3 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
@@ -18,6 +18,7 @@
 from rift.mano.tosca_translator.common.utils import _
 from rift.mano.tosca_translator.common.utils import convert_keys_to_python
 from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
 
 from toscaparser.common.exception import ValidationError
 
@@ -29,54 +30,62 @@
 class ToscaScalingGroup(ManoResource):
     '''Translate TOSCA node type tosca.policies.Scaling.'''
 
-    toscatype = 'tosca.policies.riftio.ScalingGroup'
+    toscatype = 'tosca.groups.nfv.riftio.scaling'
 
     IGNORE_PROPS = []
 
-    def __init__(self, log, policy, metadata=None):
+    def __init__(self, log, group, metadata=None):
         # TODO(Philip):Not inheriting for ManoResource, as there is no
         # instance from parser
         self.log = log
-        for name, details in policy.items():
-            self.name = name
-            self.details = details
-            break
+        self.name = group.name
+        #self.details = details
+        self.group = group
         self.type_ = 'scale-grp'
         self.metadata = metadata
         self.properties = {}
 
     def __str__(self):
         return "%s(%s)" % (self.name, self.type)
+    def get_tosca_group_props(self):
+        tosca_props = {}
+        for prop in self.group.get_properties_objects():
+            if isinstance(prop.value, GetInput):
+                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+            else:
+                tosca_props[prop.name] = prop.value
+        return tosca_props
 
     def handle_properties(self, nodes, groups):
-        tosca_props = self.details
+        tosca_props = self.get_tosca_group_props()
         self.log.debug(_("{0} with tosca properties: {1}").
                        format(self, tosca_props))
-        self.properties['name'] = tosca_props['name']
-        self.properties['max-instance-count'] = \
-                                tosca_props['max_instance_count']
-        self.properties['min-instance-count'] = \
-                                tosca_props['min_instance_count']
+        if 'name ' in tosca_props:
+            self.properties['name'] = tosca_props['name']
+        if 'max_instance_count' in tosca_props:
+            self.properties['max-instance-count'] = tosca_props['max_instance_count']
+        if 'min_instance_count' in tosca_props:
+            self.properties['min-instance-count'] = tosca_props['min_instance_count']
         self.properties['vnfd-member'] = []
 
         def _get_node(name):
             for node in nodes:
                 if node.name == name:
                     return node
-
-        for member, count in tosca_props['vnfd_members'].items():
-            node = _get_node(member)
-            if node:
-                memb = {}
-                memb['member-vnf-index-ref'] = node.get_member_vnf_index()
-                memb['count'] = count
-                self.properties['vnfd-member'].append(memb)
-            else:
-                err_msg = _("{0}: Did not find the member node {1} in "
-                            "resources list"). \
-                          format(self, member)
-                self.log.error(err_msg)
-                raise ValidationError(message=err_msg)
+        if 'vnfd_members' in tosca_props:
+            for member, count in tosca_props['vnfd_members'].items():
+                node = _get_node(member)
+                if node:
+                    memb = {}
+                    memb['member-vnf-index-ref'] = node.get_member_vnf_index()
+                    memb['count'] = count
+                    self.properties['vnfd-member'].append(memb)
+                else:
+                    err_msg = _("{0}: Did not find the member node {1} in "
+                                "resources list"). \
+                              format(self, member)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
 
         def _validate_action(action):
             for group in groups:
@@ -85,25 +94,27 @@
             return False
 
         self.properties['scaling-config-action'] = []
-        for action, value in tosca_props['config_actions'].items():
-            conf = {}
-            if _validate_action(value):
-                conf['trigger'] = action
-                conf['ns-config-primitive-name-ref'] = value
-                self.properties['scaling-config-action'].append(conf)
-            else:
-                err_msg = _("{0}: Did not find the action {1} in "
-                            "config primitives"). \
-                          format(self, action)
-                self.log.error(err_msg)
-                raise ValidationError(message=err_msg)
+        if 'config_actions' in tosca_props:
+            for action, value in tosca_props['config_actions'].items():
+                conf = {}
+                if _validate_action(value):
+                    conf['trigger'] = action
+                    conf['ns-config-primitive-name-ref'] = value
+                    self.properties['scaling-config-action'].append(conf)
+                else:
+                    err_msg = _("{0}: Did not find the action {1} in "
+                                "config primitives"). \
+                              format(self, action)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
 
         self.log.debug(_("{0} properties: {1}").format(self, self.properties))
 
     def get_yang_model_gi(self, nsd, vnfds):
         props = convert_keys_to_python(self.properties)
         try:
-            nsd.scaling_group_descriptor.add().from_dict(props)
+            if len(self.properties['vnfd-member']) > 0:
+                nsd.scaling_group_descriptor.add().from_dict(props)
         except Exception as e:
             err_msg = _("{0} Exception nsd scaling group from dict {1}: {2}"). \
                       format(self, props, e)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
new file mode 100644
index 0000000..f90c187
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
@@ -0,0 +1,120 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaVnfConfiguration'
+
+
+class ToscaVnfConfiguration(ManoResource):
+    '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+    toscatype = 'tosca.policies.nfv.riftio.vnf_configuration'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, policy, metadata=None, vnf_name = None):
+        self.log = log
+        self.name = policy.name
+        self.type_ = 'place-grp'
+        self.metadata = metadata
+        self.policy = policy
+        self.properties = {}
+        self.linked_to_vnf = True
+        self._vnf_name = vnf_name
+        self._vnf_id = None
+        self.scripts = []
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.get_policy_props()
+        if self._vnf_name:
+            vnf_node = self.get_node_with_name(self._vnf_name, nodes)
+            self._vnf_id = vnf_node.id
+        self.properties["vnf-configuration"] = {}
+        prop = {}
+        prop["config-attributes"] = {}
+        prop["script"] = {}
+        if 'config' in tosca_props:
+            if 'config_delay' in tosca_props['config']:
+                prop["config-attributes"]['config-delay'] = tosca_props['config']['config_delay']
+            if 'config_priority' in tosca_props['config']:
+                prop["config-attributes"]['config-priority'] = tosca_props['config']['config_priority']
+            if 'config_template' in tosca_props['config']:
+                prop["config-template"] = tosca_props['config']['config_template']
+            if 'config_details' in tosca_props['config']:
+                if 'script_type' in tosca_props['config']['config_details']:
+                    prop["script"]["script-type"] = tosca_props['config']['config_details']['script_type']
+            if 'initial_config' in tosca_props:
+                prop['initial-config-primitive'] = []
+                #print("Weleek  " + str(tosca_props['initial_config']))
+                for init_config in tosca_props['initial_config']:
+                    if 'parameter' in init_config:
+                        parameters = init_config.pop('parameter')
+                        init_config['parameter'] = []
+                        for key, value in parameters.items():
+                            init_config['parameter'].append({'name': key, 'value': str(value)})
+                            if 'user_defined_script' in init_config:
+                                self.scripts.append('../scripts/{}'. \
+                                format(init_config['user_defined_script']))
+                    prop['initial-config-primitive'].append(init_config)
+
+        self.properties = prop
+
+    def generate_yang_submodel_gi(self, vnfd):
+        if vnfd is None:
+            return None
+        try:
+            props = convert_keys_to_python(self.properties)
+            vnfd.vnf_configuration.from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vdu from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def get_policy_props(self):
+        tosca_props = {}
+
+        for prop in self.policy.get_properties_objects():
+            if isinstance(prop.value, GetInput):
+                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+            else:
+                tosca_props[prop.name] = prop.value
+        return tosca_props
+    def get_supporting_files(self, files, desc_id=None):
+        if not len(self.scripts):
+            return
+
+        if self._vnf_id not in files:
+            files[desc_id] = []
+
+        for script in self.scripts:
+            files[self._vnf_id].append({
+                'type': 'script',
+                'name': script,
+            },)
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
index dbfaa62..2d6c3e1 100644
--- a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
+++ b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
@@ -23,6 +23,7 @@
 from rift.mano.tosca_translator.common.exception import ToscaModImportError
 from rift.mano.tosca_translator.conf.config import ConfigProvider as translatorConfig
 from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.tosca_template import ToscaTemplate
 
 
 class TranslateNodeTemplates(object):
@@ -161,6 +162,8 @@
             for key in FIELDS_MAP:
                 if key in tosca_meta.keys():
                     metadata[FIELDS_MAP[key]] = str(tosca_meta[key])
+            if 'logo' in tosca_meta:
+                metadata['logo'] = os.path.basename(tosca_meta['logo'])
         self.log.debug(_("Metadata {0}").format(metadata))
         self.metadata = metadata
 
@@ -188,8 +191,51 @@
 
         self.log.debug(_('Translating the node templates.'))
         # Copy the TOSCA graph: nodetemplate
+        all_node_templates                          = []
+        node_to_artifact_map                        = {}
+        vnf_type_to_vnf_node                        = {}
+        vnf_type_to_vdus_map                        = {}
+        vnf_type_substitution_mapping               = {}
+        vnf_type_to_capability_substitution_mapping = {}
         tpl = self.tosca.tpl['topology_template']['node_templates']
+        associated_vnfd_flag = False
+
         for node in self.nodetemplates:
+            all_node_templates.append(node)
+            if node.parent_type.type == 'tosca.nodes.nfv.riftio.VNF1':
+                vnf_type_to_vnf_node[node.type] = node.name
+        for node_key in tpl:
+            if 'artifacts' in tpl[node_key]:
+                node_to_artifact_map[node_key] = tpl[node_key]['artifacts']
+        for template in self.tosca.nested_tosca_templates_with_topology:
+            tpl_node = template.tpl['node_templates']
+            vnf_type = template.substitution_mappings.node_type
+
+            vnf_type_to_vdus_map[vnf_type]                        = []
+            vnf_type_substitution_mapping[vnf_type]               = []
+            vnf_type_to_capability_substitution_mapping[vnf_type] = []
+            vnf_type_to_capability_substitution_mapping[vnf_type] = []
+            policies                                              = []
+
+            for node in template.nodetemplates:
+                all_node_templates.append(node)
+            for node_key in tpl_node:
+                if 'artifacts' in tpl_node[node_key]:
+                    node_to_artifact_map[node_key] = tpl_node[node_key]['artifacts']
+            for node in template.nodetemplates:
+                if 'VDU' in node.type:
+                    vnf_type_to_vdus_map[vnf_type].append(node.name)
+            for policy in template.policies:
+                policies.append(policy.name)
+            for req in template.substitution_mappings.requirements:
+                vnf_type_substitution_mapping[template.substitution_mappings.node_type].append(req)
+            if template.substitution_mappings.capabilities:
+                for capability in template.substitution_mappings.capabilities:
+                    sub_list = template.substitution_mappings.capabilities[capability]
+                    if len(sub_list) > 0:
+                        vnf_type_to_capability_substitution_mapping[vnf_type].append({capability: sub_list[0]})
+
+        for node in all_node_templates:
             base_type = ManoResource.get_base_type(node.type_definition)
             self.log.debug(_("Translate node %(name)s of type %(type)s with "
                              "base %(base)s") %
@@ -203,46 +249,73 @@
                             metadata=self.metadata)
             # Currently tosca-parser does not add the artifacts
             # to the node
-            if mano_node.name in tpl:
-                tpl_node = tpl[mano_node.name]
-                self.log.debug("Check artifacts for {}".format(tpl_node))
-                if 'artifacts' in tpl_node:
-                    mano_node.artifacts = tpl_node['artifacts']
+            if mano_node.type == 'vnfd':
+                associated_vnfd_flag = True
+            if mano_node.name in node_to_artifact_map:
+                mano_node.artifacts = node_to_artifact_map[mano_node.name]
             self.mano_resources.append(mano_node)
             self.mano_lookup[node] = mano_node
 
+        if not associated_vnfd_flag:
+            dummy_file = "{0}{1}".format(os.getenv('RIFT_INSTALL'), "/usr/rift/mano/common/dummy_vnf_node.yaml")
+            tosca_vnf = ToscaTemplate(dummy_file, {}, True)
+            vnf_type = self.tosca.topology_template.substitution_mappings.node_type
+            vnf_type_to_vdus_map[vnf_type] = []
+
+            for node in tosca_vnf.nodetemplates:
+                all_node_templates.append(node)
+                base_type = ManoResource.get_base_type(node.type_definition)
+                vnf_type_to_vnf_node[vnf_type] = node.name
+                mano_node = TranslateNodeTemplates. \
+                        TOSCA_TO_MANO_TYPE[base_type.type](
+                            self.log,
+                            node,
+                            metadata=self.metadata)
+                mano_node.vnf_type = vnf_type
+                self.mano_resources.append(mano_node)
+                print("Adding a new node")
+
+            for node in self.tosca.nodetemplates:
+                if 'VDU' in node.type:
+                    vnf_type_to_vdus_map[vnf_type].append(node.name)
+
         # The parser currently do not generate the objects for groups
-        if 'groups' in self.tosca.tpl['topology_template']:
-            tpl = self.tosca.tpl['topology_template']['groups']
-            self.log.debug("Groups: {}".format(tpl))
-            for group, details in tpl.items():
-                self.log.debug(_("Translate group {}: {}").
-                               format(group, details))
-                group_type = details['type']
-                if group_type:
-                    group_node = TranslateNodeTemplates. \
-                                 TOSCA_TO_MANO_TYPE[group_type](
-                                     self.log,
-                                     group,
-                                     details,
-                                     metadata=self.metadata)
-                    self.mano_groups.append(group_node)
+        for group in self.tosca.topology_template.groups:
+            group_type = group.type
+            if group_type:
+                group_node = TranslateNodeTemplates. \
+                             TOSCA_TO_MANO_TYPE[group_type](
+                                 self.log,
+                                 group,
+                                 metadata=self.metadata)
+                self.mano_groups.append(group_node)
 
         # The parser currently do not generate the objects for policies
-        if 'policies' in self.tosca.tpl['topology_template']:
-            tpl = self.tosca.tpl['topology_template']['policies']
-            # for policy in self.policies:
-            for policy in tpl:
-                self.log.debug(_("Translate policy {}").
-                               format(policy))
-                policy_type = self._get_policy_type(policy)
-                if policy_type:
-                    policy_node = TranslateNodeTemplates. \
-                                  TOSCA_TO_MANO_TYPE[policy_type](
-                                      self.log,
-                                      policy,
-                                      metadata=self.metadata)
-                    self.mano_policies.append(policy_node)
+
+        for policy in self.tosca.topology_template.policies:
+            policy_type = policy.type
+            if policy_type:
+                policy_node = TranslateNodeTemplates. \
+                             TOSCA_TO_MANO_TYPE[policy_type](
+                                 self.log,
+                                 policy,
+                                 metadata=self.metadata)
+                self.mano_policies.append(policy_node)
+        for template in self.tosca.nested_tosca_templates_with_topology:
+            vnf_type = template.substitution_mappings.node_type
+            if vnf_type in vnf_type_to_vnf_node:
+                vnf_node = vnf_type_to_vnf_node[vnf_type]
+
+                for policy in template.policies:
+                    policy_type = policy.type
+                    if policy_type:
+                        policy_node = TranslateNodeTemplates. \
+                                     TOSCA_TO_MANO_TYPE[policy_type](
+                                         self.log,
+                                         policy,
+                                         metadata=self.metadata,
+                                         vnf_name=vnf_node)
+                        self.mano_policies.append(policy_node)
 
         for node in self.mano_resources:
             self.log.debug(_("Handle properties for {0} of type {1}").
@@ -272,7 +345,8 @@
                     self.log.debug(_("Handle requirements for {0} of "
                                      "type {1}").
                                    format(node.name, node.type_))
-                    node.handle_requirements(self.mano_resources)
+                    node.handle_requirements(self.mano_resources, self.mano_policies, vnf_type_to_vdus_map)
+
                 except Exception as e:
                     self.log.error(_("Exception for {0} in requirements {1}").
                                    format(node.name, node.type_))
@@ -290,11 +364,17 @@
                                    format(node.name, node.type_))
                     self.log.exception(e)
 
+        for node in self.mano_resources:
+            if node.type == "vld":
+                node.handle_vld_properties(self.mano_resources, vnf_type_substitution_mapping)
+            elif node.type == 'forwarding_path':
+                node.handle_forwarding_path_dependencies(self.mano_resources, vnf_type_to_capability_substitution_mapping)
+
         return self.mano_resources
 
     def translate_groups(self):
         for group in self.mano_groups:
-            group.handle_properties(self.mano_resources)
+            group.handle_properties(self.mano_resources, self.mano_groups)
         return self.mano_groups
 
     def translate_policies(self):
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar_tosca_new_spec.zip b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar_tosca_new_spec.zip
new file mode 100644
index 0000000..a755040
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar_tosca_new_spec.zip
Binary files differ
diff --git a/common/python/rift/mano/utils/short_name.py b/common/python/rift/mano/utils/short_name.py
new file mode 100644
index 0000000..e4dd8a8
--- /dev/null
+++ b/common/python/rift/mano/utils/short_name.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author: Aniruddha Atale
+
+import hashlib
+import basehash
+
+
+class StringShortner(object):
+    FOLDS = 3
+    STRING_LEN=9
+    def __init__(self, string = None):
+        self._string = string
+
+    @property
+    def string(self):
+        return self._string
+
+    @string.setter
+    def string(self, string):
+        self._string = string
+
+    @property
+    def short_string(self):
+        if self._string:
+            return StringShortner._get_short_string(self._string)
+        else:
+            return str()
+
+    @staticmethod
+    def _fold_hex_series(series):
+        length = len(series)
+        result = list()
+        for i in range(int(length/2)):
+            result.append(series[i] ^ series[(length - 1) - i])
+
+        if length % 2:
+            result.append(series[int(length/2) + 1])
+
+        return result
+
+    @staticmethod
+    def _num_from_hex_series(series):
+        result = 0
+        for i in range(len(series)):
+            result = result * 256
+            result += series[i]
+        return result
+    
+    @staticmethod
+    def _get_short_string(string):
+        sha = hashlib.sha384(string.encode())
+        digest = sha.digest()
+        for i in range(StringShortner.FOLDS):
+            digest = StringShortner._fold_hex_series(digest)
+
+        number = StringShortner._num_from_hex_series(digest)
+        base62 = basehash.base62(length=StringShortner.STRING_LEN)
+        return base62.hash(number)
diff --git a/common/python/rift/mano/yang_translator/riftiotypes.yaml b/common/python/rift/mano/yang_translator/riftiotypes.yaml
new file mode 100644
index 0000000..18a0728
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/riftiotypes.yaml
@@ -0,0 +1,1493 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Extended types
+
+
+data_types:
+  tosca.datatypes.nfv.riftio.dashboard_params:
+    properties:
+      path:
+        type: string
+        description: >-
+          The HTTP path for the dashboard
+      port:
+        type: tosca.datatypes.network.PortDef
+        description: >-
+          The HTTP port for the dashboard
+        default: 80
+      https:
+        type: boolean
+        description: >-
+          Pick HTTPS instead of HTTP , Default is false
+        default: false
+        required: false
+  tosca.datatypes.nfv.riftio.monitoring_param_ui:
+    properties:
+      description:
+        type: string
+        required: false
+      group_tag:
+        type: string
+        description: >-
+          A simple tag to group monitoring parameters
+        required: false
+      widget_type:
+        type: string
+        description: >-
+          Type of the widget
+        default: counter
+        constraints:
+          - valid_values:
+              - histogram
+              - bar
+              - gauge
+              - slider
+              - counter
+              - textbox
+      units:
+        type: string
+        required: false
+  tosca.datatypes.nfv.riftio.monitoring_param_value:
+    properties:
+      value_type:
+        type: string
+        default: integer
+        constraints:
+          - valid_values:
+              - integer
+              - float
+              - string
+      numeric_min:
+        type: integer
+        description: >-
+          Minimum value for the parameter
+        required: false
+      numeric_max:
+        type: integer
+        description: >-
+          Maxium value for the parameter
+        required: false
+      string_min:
+        type: integer
+        description: >-
+          Minimum string length for the parameter
+        required: false
+        constraints:
+          - greater_or_equal: 0
+      string_max:
+        type: integer
+        description: >-
+          Maximum string length for the parameter
+        required: false
+        constraints:
+          - greater_or_equal: 0
+  tosca.datatypes.compute.Container.Architecture.CPUAllocation:
+    derived_from: tosca.datatypes.Root
+    properties:
+      cpu_affinity:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [shared, dedicated, any]
+      thread_allocation:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [avoid, separate, isolate, prefer]
+      socket_count:
+        type: integer
+        required: false
+      core_count:
+        type: integer
+        required: false
+      thread_count:
+        type: integer
+        required: false
+
+  tosca.datatypes.compute.Container.Architecture.NUMA:
+    derived_from: tosca.datatypes.Root
+    properties:
+      id:
+        type: integer
+        constraints:
+          - greater_or_equal: 0
+      vcpus:
+        type: list
+        entry_schema:
+          type: integer
+          constraints:
+            -  greater_or_equal: 0
+      mem_size:
+        type: scalar-unit.size
+        constraints:
+          - greater_or_equal: 0 MB
+  tosca.datatypes.nfv.riftio.paired_thread_map:
+    properties:
+      thread_a:
+        type: integer
+        required: true
+        constraints:
+          - greater_or_equal: 0
+      thread_b:
+        type: integer
+        required: true
+        constraints:
+          - greater_or_equal: 0
+
+  tosca.datatypes.nfv.riftio.paired_threads:
+    properties:
+      num_paired_threads:
+        type: integer
+        constraints:
+          - greater_or_equal: 1
+      paired_thread_ids:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.paired_thread_map
+        constraints:
+          - max_length: 16
+        required: false
+
+  tosca.datatypes.compute.riftio.numa:
+    properties:
+      id:
+        type: integer
+        constraints:
+          - greater_or_equal: 0
+      vcpus:
+        type: list
+        entry_schema:
+          type: integer
+          constraints:
+            -  greater_or_equal: 0
+        required: false
+      mem_size:
+        type: scalar-unit.size
+        constraints:
+          - greater_or_equal: 0 MB
+        required: false
+      om_numa_type:
+        type: string
+        description: Openmano Numa type selection
+        constraints:
+          - valid_values: [cores, paired-threads, threads]
+        required: false
+      num_cores:
+        type: integer
+        description: Use when om_numa_type is cores
+        constraints:
+          - greater_or_equal: 1
+        required: false
+      paired_threads:
+        type: tosca.datatypes.nfv.riftio.paired_threads
+        description: Use when om_numa_type is paired-threads
+        required: false
+      num_threads:
+        type: integer
+        description: Use when om_numa_type is threads
+        constraints:
+          - greater_or_equal: 1
+        required: false
+  
+  tosca.nfv.datatypes.pathType:
+    properties:
+      forwarder:
+        type: string
+        required: true
+      capability:
+        type: string
+        required: true
+
+  tosca.nfv.datatypes.aclType:
+    properties:
+      eth_type:
+        type: string
+        required: false
+      eth_src:
+        type: string
+        required: false
+      eth_dst:
+        type: string
+        required: false
+      vlan_id:
+        type: integer
+        constraints:
+          - in_range: [ 1, 4094 ]
+        required: false
+      vlan_pcp:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7 ]
+        required: false
+      mpls_label:
+        type: integer
+        constraints:
+          - in_range: [ 16, 1048575]
+        required: false
+      mpls_tc:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7 ]
+        required: false
+      ip_dscp:
+        type: integer
+        constraints:
+          - in_range: [ 0, 63 ]
+        required: false
+      ip_ecn:
+        type: integer
+        constraints:
+          - in_range: [ 0, 3 ]
+        required: false
+      ip_src_prefix:
+        type: string
+        required: false
+      ip_dst_prefix:
+        type: string
+        required: false
+      ip_proto:
+        type: integer
+        constraints:
+          - in_range: [ 1, 254 ]
+        required: false
+      destination_port_range:
+        type: integer
+        required: false
+      source_port_range:
+        type: integer
+        required: false
+      network_src_port_id:
+        type: string
+        required: false
+      network_dst_port_id:
+        type: string
+        required: false
+      network_id:
+        type: string
+        required: false
+      network_name:
+        type: string
+        required: false
+      tenant_id:
+        type: string
+        required: false
+      icmpv4_type:
+        type: integer
+        constraints:
+          - in_range: [ 0, 254 ]
+        required: false
+      icmpv4_code:
+        type: integer
+        constraints:
+          - in_range: [ 0, 15 ]
+        required: false
+      arp_op:
+        type: integer
+        constraints:
+          - in_range: [ 1, 25 ]
+        required: false
+      arp_spa:
+        type: string
+        required: false
+      arp_tpa:
+        type: string
+        required: false
+      arp_sha:
+        type: string
+        required: false
+      arp_tha:
+        type: string
+        required: false
+      ipv6_src:
+        type: string
+        required: false
+      ipv6_dst:
+        type: string
+        required: false
+      ipv6_flabel:
+        type: integer
+        constraints:
+          - in_range: [ 0, 1048575]
+        required: false
+      icmpv6_type:
+        type: integer
+        constraints:
+          - in_range: [ 0, 255]
+        required: false
+      icmpv6_code:
+        type: integer
+        constraints:
+          - in_range: [ 0, 7]
+        required: false
+      ipv6_nd_target:
+        type: string
+        required: false
+      ipv6_nd_sll:
+        type: string
+        required: false
+      ipv6_nd_tll:
+        type: string
+        required: false
+
+  
+  tosca.datatypes.nfv.riftio.vnf_configuration:
+    properties:
+      config_type:
+        type: string
+        description: >-
+          Type of the configuration agent to use
+        constraints:
+          - valid_values: [script, netconf, rest, juju]
+      config_details:
+        type: map
+        description: >-
+          Specify the details for the config agent, like
+          script type, juju charm to use, etc.
+      config_template:
+        required: false
+        type: string
+      config_delay:
+        type: integer
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: false
+      config_priority:
+        type: integer
+        constraints:
+        - greater_than: 0
+
+  tosca.datatypes.nfv.riftio.parameter_value:
+    properties:
+      name:
+        type: string
+        description: Name of the parameter
+      value:
+        type: string
+        description: Value of the parameter
+
+  tosca.datatypes.nfv.riftio.config_primitive:
+    properties:
+      name:
+        type: string
+      seq:
+        type: integer
+        description: >-
+          Order in which to apply, when multiple ones are defined
+        default: 0
+        constraints:
+          - greater_or_equal: 0
+      parameter:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.parameter_value
+      user_defined_script:
+        type: string
+  tosca.datatypes.nfv.riftio.primitive_parameter:
+    properties:
+      data_type:
+        type: string
+        description: >-
+          Data type associated with the name
+        constraints:
+          - valid_values: [string, integer, boolean]
+      mandatory:
+        type: boolean
+        description: >-
+          If this field is mandatory
+        default: false
+        required: false
+      default_value:
+        type: string
+        description: >-
+          The default value for this field
+        required: false
+      parameter_pool:
+        type: string
+        description: >-
+          Parameter pool name to use for this parameter
+        required: false
+      read_only:
+        type: boolean
+        description: >-
+          The value should be greyed out by the UI.
+          Only applies to parameters with default values.
+        required: false
+        default: false
+      hidden:
+        type: boolean
+        description: >-
+          The field should be hidden by the UI.
+          Only applies to parameters with default values.
+        required: false
+        default: false
+  tosca.datatypes.nfv.riftio.primitive_parameter_group:
+    properties:
+      name:
+        type: string
+        description: >-
+          Name of the parameter group
+      mandatory:
+        type: boolean
+        description: >-
+          If this group is mandatory
+        default: false
+        required: false
+      parameter:
+        type: map
+        description: >-
+          List of parameters for the service primitive
+        entry_schema: osca.datatypes.riftio.primitive_parameter
+
+  tosca.datatypes.nfv.riftio.vnf_primitive_group:
+    properties:
+      vnf_name:
+        type: string
+        description: >-
+          Name of the VNF in the NS
+      primitive:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Index and name of the primitive
+
+
+capability_types:
+  tosca.capabilities.nfv.riftio.mgmt_interface:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      static_ip:
+        type: string
+        required: false
+        description: >-
+          Specifies the static IP address for managing the VNF
+      connection_point:
+        type: string
+        required: false
+        description: >-
+          Use the ip address associated with this connection point
+      dashboard_params:
+        type: tosca.datatypes.nfv.riftio.dashboard_params
+        required: false
+        description: >-
+          Parameters for the VNF dashboard
+  tosca.capabilities.nfv.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      name:
+        type: string
+        required: false
+      description:
+        type: string
+        required: false
+      protocol:
+        type: string
+        default: http
+        constraints:
+          - equal: http
+      polling_interval:
+        type: scalar-unit.time
+        description: >-
+          The HTTP polling interval in seconds
+        default: 2 s
+      username:
+        type: string
+        description: >-
+          The HTTP basic auth username
+        required: false
+      password:
+        type: string
+        description: >-
+          The HTTP basic auth password
+        required: false
+      method:
+        type: string
+        description: >-
+          This is the method to be performed at the uri.
+          GET by default for action
+        default: get
+        constraints:
+          - valid_values: [post, put, get, delete, options, patch]
+      headers:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Custom HTTP headers to put on HTTP request
+        required: false
+      json_query_method:
+        type: string
+        description: >-
+          The method to extract a value from a JSON response
+            namekey    - Use the name as the key for a non-nested value.
+            jsonpath   - Use jsonpath-rw implemenation to extract a value.
+            objectpath - Use objectpath implemenation to extract a value.
+        constraints:
+          - valid_values: [namekey, jsonpath, objectpath]
+        default: namekey
+      json_query_path:
+        type: string
+        description: >-
+          The json path to use to extract value from JSON structure
+        required: false
+      json_object_path:
+        type: string
+        description: >-
+          The object path to use to extract value from JSON structure
+        required: false
+      ui_data:
+        type: tosca.datatypes.nfv.riftio.monitoring_param_ui
+        required: false
+      constraints:
+        type: tosca.datatypes.nfv.riftio.monitoring_param_value
+        required: false
+  tosca.capabilities.nfv.riftio.numa_extension:
+    derived_from: tosca.capabilities.Root
+    properties:
+      node_cnt:
+        type: integer
+        description: >-
+          The number of numa nodes to expose to the VM
+        constraints:
+          - greater_or_equal: 0
+      mem_policy:
+        type: string
+        description: >-
+          This policy specifies how the memory should
+                   be allocated in a multi-node scenario.
+                   STRICT    - The memory must be allocated
+                               strictly from the memory attached
+                               to the NUMA node.
+                   PREFERRED - The memory should be allocated
+                               preferentially from the memory
+                               attached to the NUMA node
+        constraints:
+          - valid_values: [strict, preferred, STRICT, PREFERRED]
+      node:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.compute.riftio.numa
+  tosca.capabilities.nfv.riftio.vswitch_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      ovs_acceleration:
+        type: string
+        description: |-
+          Specifies Open vSwitch acceleration mode.
+             MANDATORY - OVS acceleration is required
+             PREFERRED - OVS acceleration is preferred
+        constraints:
+          - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+      ovs_offload:
+        type: string
+        description: |-
+          Specifies Open vSwitch hardware offload mode.
+             MANDATORY - OVS offload is required
+             PREFERRED - OVS offload is preferred
+        constraints:
+          - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+
+  tosca.capabilities.nfv.riftio.hypervisor_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      type:
+        type: string
+        description: |-
+          Specifies the type of hypervisor.
+        constraints:
+          - valid_values: [prefer_kvm, require_kvm, PREFER_KVM, REQUIRE_KVM]
+      version:
+        type: string
+
+  tosca.capabilities.nfv.riftio.host_epa:
+    derived_from: tosca.capabilities.Root
+    properties:
+      cpu_model:
+        type: string
+        description: >-
+          Host CPU model. Examples include SandyBridge,
+          IvyBridge, etc.
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_westmere
+              - require_westmere
+              - prefer_sandbridge
+              - require_sandybridge
+              - prefer_ivybridge
+              - require_ivybridge
+              - prefer_haswell
+              - require_haswell
+              - prefer_broadwell
+              - require_broadwell
+              - prefer_nehalem
+              - require_nehalem
+              - prefer_penryn
+              - require_penryn
+              - prefer_conroe
+              - require_conroe
+              - prefer_core2duo
+              - require_core2duo
+              - PREFER_WESTMERE
+              - REQUIRE_WESTMERE
+              - PREFER_SANDBRIDGE
+              - REQUIRE_SANDYBRIDGE
+              - PREFER_IVYBRIDGE
+              - REQUIRE_IVYBRIDGE
+              - PREFER_HASWELL
+              - REQUIRE_HASWELL
+              - PREFER_BROADWELL
+              - REQUIRE_BROADWELL
+              - PREFER_NEHALEM
+              - REQUIRE_NEHALEM
+              - PREFER_PENRYN
+              - REQUIRE_PENRYN
+              - PREFER_CONROE
+              - REQUIRE_CONROE
+              - PREFER_CORE2DUO
+              - REQUIRE_CORE2DUO
+      cpu_arch:
+        type: string
+        description: >-
+          Host CPU architecture
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_x86
+              - require_x86
+              - prefer_x86_64
+              - require_x86_64
+              - prefer_i686
+              - require_i686
+              - prefer_ia64
+              - require_ia64
+              - prefer_armv7
+              - require_armv7
+              - prefer_armv8
+              - require_armv8
+              - PREFER_X86
+              - REQUIRE_X86
+              - PREFER_X86_64
+              - REQUIRE_X86_64
+              - PREFER_I686
+              - REQUIRE_I686
+              - PREFER_IA64
+              - REQUIRE_IA64
+              - PREFER_ARMV7
+              - REQUIRE_ARMV7
+              - PREFER_ARMV8
+              - REQUIRE_ARMV8
+      cpu_vendor:
+        type: string
+        description: >-
+          Host CPU vendor
+        required: false
+        constraints:
+          - valid_values:
+              - prefer_intel
+              - require_intel
+              - prefer_amd
+              - requie_amd
+              - PREFER_INTEL
+              - REQUIRE_INTEL
+              - PREFER_AMD
+              - REQUIE_AMD
+      cpu_socket_count:
+        type: integer
+        description: >-
+          Number of sockets on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_core_count:
+        type: integer
+        description: >-
+          Number of cores on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_core_thread_count:
+        type: integer
+        description: >-
+          Number of threads per core on the host
+        required: false
+        constraints:
+          - greater_than : 0
+      cpu_feature:
+        type: list
+        entry_schema:
+          type: string
+        description: |-
+          Enumeration for CPU features.
+
+          AES- CPU supports advanced instruction set for
+          AES (Advanced Encryption Standard).
+
+          CAT- Cache Allocation Technology (CAT) allows
+          an Operating System, Hypervisor, or similar
+          system management agent to specify the amount
+          of L3 cache (currently the last-level cache
+          in most server and client platforms) space an
+          application can fill (as a hint to hardware
+          functionality, certain features such as power
+          management may override CAT settings).
+
+          CMT- Cache Monitoring Technology (CMT) allows
+          an Operating System, Hypervisor, or similar
+          system management agent to determine the
+          usage of cache based on applications running
+          on the platform. The implementation is
+          directed at L3 cache monitoring (currently
+          the last-level cache in most server and
+          client platforms).
+
+          DDIO- Intel Data Direct I/O (DDIO) enables
+          Ethernet server NICs and controllers talk
+          directly to the processor cache without a
+          detour via system memory. This enumeration
+          specifies if the VM requires a DDIO
+          capable host.
+        required: false
+        constraints:
+          -valid_values:
+            - prefer_aes
+            - require_aes
+            - prefer_cat
+            - require_cat
+            - prefer_cmt
+            - require_cmt
+            - prefer_ddio
+            - require_ddio
+            - prefer_vme
+            - require_vme
+            - prefer_de
+            - require_de
+            - prefer_pse
+            - require_pse
+            - prefer_tsc
+            - require_tsc
+            - prefer_msr
+            - require_msr
+            - prefer_pae
+            - require_pae
+            - prefer_mce
+            - require_mce
+            - prefer_cx8
+            - require_cx8
+            - prefer_apic
+            - require_apic
+            - prefer_sep
+            - require_sep
+            - prefer_mtrr
+            - require_mtrr
+            - prefer_pge
+            - require_pge
+            - prefer_mca
+            - require_mca
+            - prefer_cmov
+            - require_cmov
+            - prefer_pat
+            - require_pat
+            - prefer_pse36
+            - require_pse36
+            - prefer_clflush
+            - require_clflush
+            - prefer_dts
+            - require_dts
+            - prefer_acpi
+            - require_acpi
+            - prefer_mmx
+            - require_mmx
+            - prefer_fxsr
+            - require_fxsr
+            - prefer_sse
+            - require_sse
+            - prefer_sse2
+            - require_sse2
+            - prefer_ss
+            - require_ss
+            - prefer_ht
+            - require_ht
+            - prefer_tm
+            - require_tm
+            - prefer_ia64
+            - require_ia64
+            - prefer_pbe
+            - require_pbe
+            - prefer_rdtscp
+            - require_rdtscp
+            - prefer_pni
+            - require_pni
+            - prefer_pclmulqdq
+            - require_pclmulqdq
+            - prefer_dtes64
+            - require_dtes64
+            - prefer_monitor
+            - require_monitor
+            - prefer_ds_cpl
+            - require_ds_cpl
+            - prefer_vmx
+            - require_vmx
+            - prefer_smx
+            - require_smx
+            - prefer_est
+            - require_est
+            - prefer_tm2
+            - require_tm2
+            - prefer_ssse3
+            - require_ssse3
+            - prefer_cid
+            - require_cid
+            - prefer_fma
+            - require_fma
+            - prefer_cx16
+            - require_cx16
+            - prefer_xtpr
+            - require_xtpr
+            - prefer_pdcm
+            - require_pdcm
+            - prefer_pcid
+            - require_pcid
+            - prefer_dca
+            - require_dca
+            - prefer_sse4_1
+            - require_sse4_1
+            - prefer_sse4_2
+            - require_sse4_2
+            - prefer_x2apic
+            - require_x2apic
+            - prefer_movbe
+            - require_movbe
+            - prefer_popcnt
+            - require_popcnt
+            - prefer_tsc_deadline_timer
+            - require_tsc_deadline_timer
+            - prefer_xsave
+            - require_xsave
+            - prefer_avx
+            - require_avx
+            - prefer_f16c
+            - require_f16c
+            - prefer_rdrand
+            - require_rdrand
+            - prefer_fsgsbase
+            - require_fsgsbase
+            - prefer_bmi1
+            - require_bmi1
+            - prefer_hle
+            - require_hle
+            - prefer_avx2
+            - require_avx2
+            - prefer_smep
+            - require_smep
+            - prefer_bmi2
+            - require_bmi2
+            - prefer_erms
+            - require_erms
+            - prefer_invpcid
+            - require_invpcid
+            - prefer_rtm
+            - require_rtm
+            - prefer_mpx
+            - require_mpx
+            - prefer_rdseed
+            - require_rdseed
+            - prefer_adx
+            - require_adx
+            - prefer_smap
+            - require_smap
+            - PREFER_AES
+            - REQUIRE_AES
+            - PREFER_CAT
+            - REQUIRE_CAT
+            - PREFER_CMT
+            - REQUIRE_CMT
+            - PREFER_DDIO
+            - REQUIRE_DDIO
+            - PREFER_VME
+            - REQUIRE_VME
+            - PREFER_DE
+            - REQUIRE_DE
+            - PREFER_PSE
+            - REQUIRE_PSE
+            - PREFER_TSC
+            - REQUIRE_TSC
+            - PREFER_MSR
+            - REQUIRE_MSR
+            - PREFER_PAE
+            - REQUIRE_PAE
+            - PREFER_MCE
+            - REQUIRE_MCE
+            - PREFER_CX8
+            - REQUIRE_CX8
+            - PREFER_APIC
+            - REQUIRE_APIC
+            - PREFER_SEP
+            - REQUIRE_SEP
+            - PREFER_MTRR
+            - REQUIRE_MTRR
+            - PREFER_PGE
+            - REQUIRE_PGE
+            - PREFER_MCA
+            - REQUIRE_MCA
+            - PREFER_CMOV
+            - REQUIRE_CMOV
+            - PREFER_PAT
+            - REQUIRE_PAT
+            - PREFER_PSE36
+            - REQUIRE_PSE36
+            - PREFER_CLFLUSH
+            - REQUIRE_CLFLUSH
+            - PREFER_DTS
+            - REQUIRE_DTS
+            - PREFER_ACPI
+            - REQUIRE_ACPI
+            - PREFER_MMX
+            - REQUIRE_MMX
+            - PREFER_FXSR
+            - REQUIRE_FXSR
+            - PREFER_SSE
+            - REQUIRE_SSE
+            - PREFER_SSE2
+            - REQUIRE_SSE2
+            - PREFER_SS
+            - REQUIRE_SS
+            - PREFER_HT
+            - REQUIRE_HT
+            - PREFER_TM
+            - REQUIRE_TM
+            - PREFER_IA64
+            - REQUIRE_IA64
+            - PREFER_PBE
+            - REQUIRE_PBE
+            - PREFER_RDTSCP
+            - REQUIRE_RDTSCP
+            - PREFER_PNI
+            - REQUIRE_PNI
+            - PREFER_PCLMULQDQ
+            - REQUIRE_PCLMULQDQ
+            - PREFER_DTES64
+            - REQUIRE_DTES64
+            - PREFER_MONITOR
+            - REQUIRE_MONITOR
+            - PREFER_DS_CPL
+            - REQUIRE_DS_CPL
+            - PREFER_VMX
+            - REQUIRE_VMX
+            - PREFER_SMX
+            - REQUIRE_SMX
+            - PREFER_EST
+            - REQUIRE_EST
+            - PREFER_TM2
+            - REQUIRE_TM2
+            - PREFER_SSSE3
+            - REQUIRE_SSSE3
+            - PREFER_CID
+            - REQUIRE_CID
+            - PREFER_FMA
+            - REQUIRE_FMA
+            - PREFER_CX16
+            - REQUIRE_CX16
+            - PREFER_XTPR
+            - REQUIRE_XTPR
+            - PREFER_PDCM
+            - REQUIRE_PDCM
+            - PREFER_PCID
+            - REQUIRE_PCID
+            - PREFER_DCA
+            - REQUIRE_DCA
+            - PREFER_SSE4_1
+            - REQUIRE_SSE4_1
+            - PREFER_SSE4_2
+            - REQUIRE_SSE4_2
+            - PREFER_X2APIC
+            - REQUIRE_X2APIC
+            - PREFER_MOVBE
+            - REQUIRE_MOVBE
+            - PREFER_POPCNT
+            - REQUIRE_POPCNT
+            - PREFER_TSC_DEADLINE_TIMER
+            - REQUIRE_TSC_DEADLINE_TIMER
+            - PREFER_XSAVE
+            - REQUIRE_XSAVE
+            - PREFER_AVX
+            - REQUIRE_AVX
+            - PREFER_F16C
+            - REQUIRE_F16C
+            - PREFER_RDRAND
+            - REQUIRE_RDRAND
+            - PREFER_FSGSBASE
+            - REQUIRE_FSGSBASE
+            - PREFER_BMI1
+            - REQUIRE_BMI1
+            - PREFER_HLE
+            - REQUIRE_HLE
+            - PREFER_AVX2
+            - REQUIRE_AVX2
+            - PREFER_SMEP
+            - REQUIRE_SMEP
+            - PREFER_BMI2
+            - REQUIRE_BMI2
+            - PREFER_ERMS
+            - REQUIRE_ERMS
+            - PREFER_INVPCID
+            - REQUIRE_INVPCID
+            - PREFER_RTM
+            - REQUIRE_RTM
+            - PREFER_MPX
+            - REQUIRE_MPX
+            - PREFER_RDSEED
+            - REQUIRE_RDSEED
+            - PREFER_ADX
+            - REQUIRE_ADX
+            - PREFER_SMAP
+            - REQUIRE_SMAP
+      om_cpu_model_string:
+        type: string
+        description: >-
+          Openmano CPU model string
+        required: false
+      om_cpu_feature:
+        type: list
+        entry_schema:
+          type: string
+        description: >-
+          List of openmano CPU features
+        required: false
+
+  tosca.capabilities.nfv.riftio.sfc:
+    derived_from: tosca.capabilities.Root
+    description: >-
+      Service Function Chaining support on this VDU
+    properties:
+      sfc_type:
+        type: string
+        description: >-
+          Type of node in Service Function Chaining Architecture
+        constraints:
+          - valid_values: [unaware, classifier, sf, sff, UNAWARE, CLASSIFIER, SF, SFF]
+        default: unaware
+      sf_type:
+        type: string
+        description: >-
+          Type of Service Function.
+             NOTE- This needs to map with Service Function Type in ODL to
+             support VNFFG. Service Function Type is manadatory param in ODL
+             SFC.
+        required: false
+  tosca.capabilities.Compute.Container.Architecture:
+    derived_from: tosca.capabilities.Container
+    properties:
+      mem_page_size:
+        type: string
+        description: >-
+          Memory page allocation size. If a VM requires
+          hugepages, it should choose huge or size_2MB
+          or size_1GB. If the VM prefers hugepages, it
+          should chose prefer_huge.
+             huge/large         - Require hugepages (either 2MB or 1GB)
+             normal       - Does not require hugepages
+             size_2MB     - Requires 2MB hugepages
+             size_1GB     - Requires 1GB hugepages
+             prefer_huge  - Application perfers hugepages
+          NOTE - huge and normal is only defined in standards as of
+                 now.
+        required: false
+        constraints:
+          - valid_values: [normal, large, huge, size_2MB, size_1GB, prefer_huge, NORMAL,LARGE, HUGE, SIZE_2MB, SIZE_1GB, PREFER_HUGE]
+      cpu_allocation:
+        type: tosca.datatypes.compute.Container.Architecture.CPUAllocation
+        required: false
+      numa_nodes:
+        type: map
+        required: false
+        entry_schema:
+          type: tosca.datatypes.compute.Container.Architecture.NUMA
+
+
+node_types:
+  tosca.nodes.nfv.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      description:
+        type: string
+        required: false
+      image:
+        description: >-
+          If an image is specified here, it is assumed that the image
+          is already present in the RO or VIM and not in the package.
+        type: string
+        required: false
+      image_checksum:
+        type: string
+        description: >-
+          Image checksum for the image in RO or VIM.
+        required: false
+      cloud_init:
+        description: >-
+          Inline cloud-init specification
+        required: false
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+      monitoring_param_1:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      mgmt_interface:
+        type: tosca.capabilities.nfv.riftio.mgmt_interface
+      monitoring_param:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      numa_extension:
+        type: tosca.capabilities.nfv.riftio.numa_extension
+      vswitch_epa:
+        type: tosca.capabilities.nfv.riftio.vswitch_epa
+      hypervisor_epa:
+        type: tosca.capabilities.nfv.riftio.hypervisor_epa
+      host_epa:
+        type: tosca.capabilities.nfv.riftio.host_epa
+  tosca.nodes.nfv.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+        default: VPORT
+        constraints:
+          - valid_values: [VPORT]
+      name:
+        description: Name of the connection point
+        type: string
+        required: false
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: >-
+          Specifies the type of virtual interface
+             between VM and host.
+             VIRTIO          - Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH - Use PCI-PASSTHROUGH interface.
+             SR-IOV          - Use SR-IOV interface.
+             E1000           - Emulate E1000 interface.
+             RTL8139         - Emulate RTL8139 interface.
+             PCNET           - Emulate PCNET interface.
+             OM-MGMT         - Used to specify openmano mgmt external-connection type
+        type: string
+        constraints:
+          - valid_values: [OM-MGMT, VIRTIO, E1000, SR-IOV]
+      bandwidth:
+        type: integer
+        description: Aggregate bandwidth of the NIC
+        constraints:
+          - greater_or_equal: 0
+        required: false
+      vpci:
+        type: string
+        description: >-
+          Specifies the virtual PCI address. Expressed in
+          the following format dddd:dd:dd.d. For example
+          0000:00:12.0. This information can be used to
+          pass as metadata during the VM creation.
+        required: false
+    capabilities:
+      sfc:
+        type: tosca.capabilities.nfv.riftio.sfc
+  tosca.nodes.nfv.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      member_index:
+        type: integer
+        constraints:
+          - greater_or_equal: 1
+        description: Index of the VNF in the NS
+        required: false
+      start_by_default:
+        type: boolean
+        default: true
+        description: Start this VNF on NS instantiate
+      logo:
+        type: string
+        description: >-
+          Logo to display with the VNF in the orchestrator
+        required: false
+    capabilities:      
+      mgmt_interface:
+        type: tosca.capabilities.nfv.riftio.mgmt_interface
+      monitoring_param:
+        type: tosca.capabilities.nfv.riftio.monitoring_param
+      sfc:
+        type: tosca.capabilities.nfv.riftio.sfc
+  tosca.nodes.nfv.riftio.ELAN:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+        required: false
+      network_name:
+        type: string
+        description: >-
+          Name of network in VIM account. This is used to indicate
+          pre-provisioned network name in cloud account.
+        required: false
+      root_bandwidth:
+        type: integer
+        description: >-
+          This is the aggregate bandwidth
+        constraints:
+          - greater_or_equal: 0
+        required: false
+      leaf_bandwidth:
+        type: integer
+        description: >-
+          This is the bandwidth of branches
+        constraints:
+          - greater_or_equal: 0
+        required: false
+  tosca.nodes.nfv.riftio.FP1:
+    derived_from: tosca.nodes.nfv.FP
+    properties:
+      id:
+        type: integer
+        required: false
+      policy:
+        type: tosca.nfv.datatypes.policyType
+        required: true
+        description: policy to use to match traffic for this FP
+      path:
+        type: list
+        required: true
+        entry_schema:
+          type: tosca.nfv.datatypes.pathType
+      cp:
+        type: tosca.nfv.datatypes.pathType
+        required: true
+
+
+
+artifact_types:
+  tosca.artifacts.Deployment.riftio.cloud_init_file:
+    derived_from: tosca.artifacts.Deployment
+    file:
+      type: string
+
+  tosca.artifacts.Deployment.Image.riftio.QCOW2:
+    derived_from: tosca.artifacts.Deployment.Image.VM.QCOW2
+    image_checksum:
+      required: false
+      type: string
+
+group_types:
+  tosca.groups.nfv.VNFFG:
+    derived_from: tosca.groups.Root
+    properties:
+      vendor:
+        type: string
+        required: true
+        description: name of the vendor who generate this VNFFG
+      version:
+        type: string
+        required: true
+        description: version of this VNFFG
+      number_of_endpoints:
+        type: integer
+        required: true
+        description: count of the external endpoints included in this VNFFG
+      dependent_virtual_link:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to a VLD used in this Forwarding Graph
+      connection_point:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to Connection Points forming the VNFFG
+      constituent_vnfs:
+        type: list
+        entry_schema:
+          type: string
+        required: true
+        description: Reference to a list of VNFD used in this VNF Forwarding Graph
+    members: [ tosca.nodes.nfv.FP ]
+
+  tosca.groups.nfv.riftio.scaling:
+    derived_from: tosca.groups.Root
+    properties:
+      name:
+        type: string
+      min_instances:
+        type: integer
+        description: >-
+          Minimum instances of the scaling group which are allowed.
+          These instances are created by default when the network service
+          is instantiated.
+      max_instances:
+        type: integer
+        description: >-
+          Maximum instances of this scaling group that are allowed
+          in a single network service. The network service scaling
+          will fail, when the number of service group instances
+          exceed the max-instance-count specified.
+      cooldown_time:
+        type: integer
+        description: >-
+          The duration after a scaling-in/scaling-out action has been
+          triggered, for which there will be no further optional
+      ratio:
+        type: map
+        entry_schema:
+          type: integer
+        description: >-
+          Specify the number of instances of each VNF to instantiate
+          for a scaling action
+    members: [tosca.nodes.nfv.VNF]
+    interfaces:
+      action:
+        type: tosca.interfaces.nfv.riftio.scaling.action
+
+interface_types:
+  tosca.interfaces.nfv.riftio.scaling.action:
+    pre_scale_in:
+      description: Operation to execute before a scale in
+    post_scale_in:
+      description: Operation to execute after a scale in
+    pre_scale_out:
+      description: Operation to execute before a scale out
+    post_scale_out:
+      description: Operation to execute after a scale out
+
+policy_types:
+  tosca.policies.nfv.riftio.placement:
+    derived_from: tosca.policies.Placement
+    properties:
+      name:
+        type: string
+        description: >-
+          Place group construct to define the compute resource placement strategy
+          in cloud environment
+      requirement:
+        type: string
+        description: >-
+          This is free text space used to describe the intent/rationale
+          behind this placement group. This is for human consumption only
+      strategy:
+        type: string
+        description: >-
+          Strategy associated with this placement group
+             Following values are possible
+               COLOCATION - Colocation strategy imply intent to share the physical
+                            infrastructure (hypervisor/network) among all members
+                            of this group.
+               ISOLATION - Isolation strategy imply intent to not share the physical
+                           infrastructure (hypervisor/network) among the members
+                           of this group.
+        constraints:
+          valid_values:
+            - COLOCATION
+            - ISOLATION
+  tosca.policies.nfv.riftio.vnf_configuration:
+    derived_from: tosca.policies.Root
+    properties:
+      config:
+        type: tosca.datatypes.nfv.riftio.vnf_configuration
+      initial_config:
+        type: list
+        entry_schema:
+          type: tosca.datatypes.nfv.riftio.config_primitive
+  tosca.policies.nfv.riftio.vnf_service_primitives:
+    derived_from: tosca.policies.Root
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: primitive_parameter
+  tosca.policies.nfv.riftio.ns_service_primitives:
+    derived_from: tosca.policies.Root
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: primitive_parameter
+      parameter_group:
+        type: tosca.datatypes.nfv.riftio.primitive_parameter_group
+        description: >-
+          Grouping of parameters which are logically grouped in UI
+        required: false
+      vnf_primitive_group:
+        type: tosca.datatypes.nfv.riftio.vnf_primitive_group
+        description: >-
+          List of service primitives grouped by VNF
+        required: false
+      user_defined_script:
+        type: string
+        description: >-
+          A user defined script
+        required: false
+  tosca.policies.nfv.riftio.initial_config_primitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      seq:
+        type: integer
+        description: >-
+          Order in which to apply, when multiple ones are defined
+        default: 0
+        constraints:
+          - greater_or_equal: 0
+      parameter:
+        type: map
+        entry_schema:
+          type: string
+      user_defined_script:
+        type: string
+  tosca.policies.nfv.riftio.users:
+    derived_from: tosca.policies.Root
+    description: >-
+      Specify list of public keys to be injected as
+      part of NS instantitation. Use default as entry,
+      to specify the key pairs for default user.
+    properties:
+      user_info:
+        type: string
+        description: >-
+          The user\'s real name
+        required: false
+      key_pairs:
+        type: map
+        description: >-
+          List of public keys for the user
+        entry_schema:
+          type: string
+        required: true
+  tosca.policies.nfv.riftio.dependency:
+    derived_from: tosca.policies.Root
+    description: >-
+      Map dependency between VDUs or VNFs
+    properties:
+      parameter:
+        type: map
+        entry_schema:
+          type: string
+        description: >-
+          Parameter and value for the config
+  tosca.nfv.datatypes.policyType:
+    properties:
+      type:
+        type: string
+        required: false
+        constraints:
+          - valid_values: [ ACL ]
+      criteria:
+        type: list
+        required: true
+        entry_schema:
+          type: tosca.nfv.datatypes.aclType
+
+  
+
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
index f05933b..57b0a31 100644
--- a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
@@ -17,7 +17,7 @@
 
 
 class ToscaResource(object):
-    '''Base class for YANG node type translation to RIFT.io TOSCA type.'''
+    '''Base class for YANG node type translation to RIFT.io SUBSTITUTION_MAPPINGtype.'''
 
     # Used when creating the resource, so keeping separate
     # from REQUIRED_FIELDS below
@@ -26,8 +26,10 @@
     REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID) = \
                       ('description', 'version', 'vendor', 'id')
 
-    COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE) = \
-                    ('path', 'port', 'host', 'xpath', 'type', 'count', 'file')
+    COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE, 
+                    NFV_COMPUTE, HOST_EPA, VSWITCH_EPA, HYPERVISOR_EPA, GUEST_EPA) = \
+                    ('path', 'port', 'host', 'xpath', 'type', 'count', 'file', 'nfv_compute', 
+                     'host_epa', 'vswitch_epa', 'hypervisor_epa', 'guest_epa')
 
     IGNORE_FIELDS = ['short_name']
 
@@ -41,7 +43,7 @@
                  MEM_VNF_INDEX_REF, VNFD_ID_REF,
                  MEM_VNF_INDEX, VNF_CONFIG, TYPE_Y,
                  USER_DEF_SCRIPT, SEQ, PARAM,
-                 VALUE, START_BY_DFLT,) = \
+                 VALUE, START_BY_DFLT, VNFFGD, ) = \
                 ('vld', 'nsd', 'vnfd', 'vdu', 'dashboard_params',
                  'config_attributes', 'config_template',
                  'config_type', 'config_details', 'external_interface',
@@ -49,7 +51,7 @@
                  'member_vnf_index_ref', 'vnfd_id_ref',
                  'member_vnf_index', 'vnf_configuration', 'type_yang',
                  'user_defined_script', 'seq', 'parameter',
-                 'value', 'start_by_default',)
+                 'value', 'start_by_default', 'vnffgd',)
 
     TOSCA_FIELDS = (DERIVED_FROM, PROPERTIES, DEFAULT, REQUIRED,
                     NO, CONSTRAINTS, REALTIONSHIPS,
@@ -66,17 +68,17 @@
                  GROUP_TYPES, POLICY_TYPES, REQUIREMENTS,
                  ARTIFACTS, PROPERTIES, INTERFACES,
                  CAPABILITIES, RELATIONSHIP,
-                 ARTIFACT_TYPES) = \
+                 ARTIFACT_TYPES, TARGETS) = \
                 ('data_types', 'capability_types', 'node_types',
                  'group_types', 'policy_types', 'requirements',
                  'artifacts', 'properties', 'interfaces',
                  'capabilities', 'relationship',
-                 'artifact_types')
+                 'artifact_types', 'targets')
 
     TOSCA_TMPL = (INPUTS, NODE_TMPL, GROUPS, POLICIES,
-                  METADATA, TOPOLOGY_TMPL, OUTPUTS) = \
+                  METADATA, TOPOLOGY_TMPL, OUTPUTS, SUBSTITUTION_MAPPING, IMPORT) = \
                  ('inputs', 'node_templates', 'groups', 'policies',
-                  'metadata', 'topology_template', 'outputs')
+                  'metadata', 'topology_template', 'outputs', 'substitution_mappings', 'imports')
 
     TOSCA_DERIVED = (
         T_VNF_CONFIG,
@@ -91,26 +93,36 @@
         T_SCALE_GRP,
         T_ARTF_QCOW2,
         T_INITIAL_CFG,
+        T_ARTF_CLOUD_INIT,
+        T_PLACEMENT,
+        T_ELAN,
+        T_VNFFG,
+        T_FP,
     ) = \
-        ('tosca.datatypes.network.riftio.vnf_configuration',
+        ('tosca.policies.nfv.riftio.vnf_configuration',
          'tosca.capabilities.riftio.http_endpoint_type',
          'tosca.capabilities.riftio.mgmt_interface_type',
          'tosca.capabilities.riftio.monitoring_param',
-         'tosca.nodes.riftio.VNF1',
-         'tosca.nodes.riftio.VDU1',
-         'tosca.nodes.riftio.CP1',
+         'tosca.nodes.nfv.riftio.VNF1',
+         'tosca.nodes.nfv.riftio.VDU1',
+         'tosca.nodes.nfv.riftio.CP1',
          'tosca.nodes.riftio.VL1',
          'tosca.groups.riftio.ConfigPrimitives',
          'tosca.policies.riftio.ScalingGroup',
          'tosca.artifacts.Deployment.Image.riftio.QCOW2',
-         'tosca.policies.riftio.InitialConfigPrimitive'
+         'tosca.policies.nfv.riftio.initial_config_primitive',
+         'tosca.artifacts.Deployment.riftio.cloud_init_file',
+         'tosca.policies.nfv.riftio.placement',
+         'tosca.nodes.nfv.riftio.ELAN',
+         'tosca.groups.nfv.VNFFG',
+         'tosca.nodes.nfv.riftio.FP1',
         )
 
     SUPPORT_FILES = ( SRC, DEST, EXISTING) = \
                     ('source', 'destination', 'existing')
 
-    SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR,) = \
-                   ('images', 'scripts',)
+    SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR, CLOUD_INIT_DIR) = \
+                   ('images', 'scripts','cloud_init')
 
     def __init__(self,
                  log,
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
index 7c31df5..95f2cb2 100644
--- a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
@@ -36,6 +36,15 @@
         self.log.debug(_('Converting translated output to tosca template.'))
 
         templates = {}
+        vnfd_templates = {}
+
+        for resource in self.resources:
+            if resource.type == 'vnfd':
+                tmpl = resource.generate_tosca()
+                tmpl = resource.generate_tosca_template(tmpl)
+                self.log.debug(_("TOSCA template generated for {0}:\n{1}").
+                               format(resource.name, tmpl))
+                vnfd_templates[resource.name] = tmpl
 
         for resource in self.resources:
             # Each NSD should generate separate templates
@@ -49,6 +58,14 @@
                 if len(files):
                     templates[resource.name][self.FILES] = files
 
+        for resource in self.resources:
+            if resource.type == 'vnfd':
+                tmpl = vnfd_templates[resource.name]
+                templates[resource.name] = {self.TOSCA: self.output_to_yaml(tmpl)}
+                files = resource.get_supporting_files()
+                if len(files):
+                    templates[resource.name][self.FILES] = files
+
         return templates
 
     def represent_ordereddict(self, dumper, data):
@@ -66,6 +83,7 @@
                  ToscaResource.REQUIREMENTS,ToscaResource.ARTIFACTS,
                  ToscaResource.INTERFACES]
         new_node = OrderedDict()
+        self.log.debug("Node to oder: {}".format(node))
         for ent in order:
             if ent in node:
                 new_node.update({ent: node.pop(ent)})
@@ -87,10 +105,18 @@
         else:
             return nodes
 
+    def ordered_nodes_sub_mapping(self, nodes):
+        new_nodes = OrderedDict()
+        if isinstance(nodes, dict):
+            for name, node in nodes.items():
+                new_nodes.update({name: node})
+            return new_nodes
+        else:
+            return nodes
+
     def output_to_yaml(self, tosca):
         self.log.debug(_('Converting translated output to yaml format.'))
         dict_output = OrderedDict()
-
         dict_output.update({'tosca_definitions_version':
                             tosca['tosca_definitions_version']})
         # Description
@@ -106,6 +132,9 @@
         if ToscaResource.METADATA in tosca:
             dict_output.update({ToscaResource.METADATA:
                                tosca[ToscaResource.METADATA]})
+        if ToscaResource.IMPORT in tosca:
+            dict_output.update({ToscaResource.IMPORT:
+                               tosca[ToscaResource.IMPORT]})
 
         # Add all types
         types_list = [ToscaResource.DATA_TYPES, ToscaResource.CAPABILITY_TYPES,
@@ -122,9 +151,14 @@
         if ToscaResource.TOPOLOGY_TMPL in tosca:
             tmpl = OrderedDict()
             for typ in tosca[ToscaResource.TOPOLOGY_TMPL]:
-                tmpl.update({typ:
-                             self.ordered_nodes(
-                                 tosca[ToscaResource.TOPOLOGY_TMPL][typ])})
+                if typ != ToscaResource.SUBSTITUTION_MAPPING:
+                    tmpl.update({typ:
+                                 self.ordered_nodes(
+                                     tosca[ToscaResource.TOPOLOGY_TMPL][typ])})
+                else:
+                    tmpl.update({typ:
+                                 self.ordered_nodes_sub_mapping(
+                                     tosca[ToscaResource.TOPOLOGY_TMPL][typ])})
             dict_output.update({ToscaResource.TOPOLOGY_TMPL: tmpl})
 
         yaml.add_representer(OrderedDict, self.represent_ordereddict)
diff --git a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
index f0a6866..707ab7f 100644
--- a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
+++ b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
@@ -104,10 +104,11 @@
 
         return types_map
 
-    def __init__(self, log, yangs, tosca_template):
+    def __init__(self, log, yangs, tosca_template, vnfd_files=None):
         self.log = log
         self.yangs = yangs
         self.tosca_template = tosca_template
+        self.vnfd_files = vnfd_files
         # list of all TOSCA resources generated
         self.tosca_resources = []
         self.metadata = {}
@@ -143,27 +144,30 @@
 
     def _translate_yang(self):
         self.log.debug(_('Translating the descriptors.'))
-        for nsd in self.yangs[self.NSD]:
-            self.log.debug(_("Translate descriptor of type nsd: {}").
-                           format(nsd))
-            tosca_node = TranslateDescriptors. \
-                         YANG_TO_TOSCA_TYPE[self.NSD](
-                             self.log,
-                             nsd.pop(ToscaResource.NAME),
-                             self.NSD,
-                             nsd)
-            self.tosca_resources.append(tosca_node)
+        if self.NSD in self.yangs:
+            for nsd in self.yangs[self.NSD]:
+                self.log.debug(_("Translate descriptor of type nsd: {}").
+                               format(nsd))
+                tosca_node = TranslateDescriptors. \
+                             YANG_TO_TOSCA_TYPE[self.NSD](
+                                 self.log,
+                                 nsd.pop(ToscaResource.NAME),
+                                 self.NSD,
+                                 nsd,
+                                 self.vnfd_files)
+                self.tosca_resources.append(tosca_node)
 
-        for vnfd in self.yangs[self.VNFD]:
-            self.log.debug(_("Translate descriptor of type vnfd: {}").
-                           format(vnfd))
-            tosca_node = TranslateDescriptors. \
-                         YANG_TO_TOSCA_TYPE[self.VNFD](
-                             self.log,
-                             vnfd.pop(ToscaResource.NAME),
-                             self.VNFD,
-                             vnfd)
-            self.tosca_resources.append(tosca_node)
+        if self.VNFD in self.yangs:
+            for vnfd in self.yangs[self.VNFD]:
+                self.log.debug(_("Translate descriptor of type vnfd: {}").
+                               format(vnfd))
+                tosca_node = TranslateDescriptors. \
+                             YANG_TO_TOSCA_TYPE[self.VNFD](
+                                 self.log,
+                                 vnfd.pop(ToscaResource.NAME),
+                                 self.VNFD,
+                                 vnfd)
+                self.tosca_resources.append(tosca_node)
 
         # First translate VNFDs
         for node in self.tosca_resources:
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
index 8ed4daa..9353454 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
@@ -21,6 +21,7 @@
 from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
     import ToscaResource
 from rift.mano.yang_translator.rwmano.yang.yang_vld import YangVld
+from collections import OrderedDict
 
 TARGET_CLASS_NAME = 'YangNsd'
 
@@ -49,7 +50,8 @@
                  log,
                  name,
                  type_,
-                 yang):
+                 yang,
+                 vnfd_files):
         super(YangNsd, self).__init__(log,
                                       name,
                                       type_,
@@ -57,10 +59,20 @@
         self.props = {}
         self.inputs = []
         self.vnfds = {}
-        self.vlds = []
+        self.vlds = {}
         self.conf_prims = []
         self.scale_grps = []
         self.initial_cfg = []
+        self.placement_groups = []
+        self.vnf_id_to_vnf_map = {}
+        self.vnfd_files = vnfd_files
+        self.vld_to_vnf_map = {}
+        self.vnf_to_vld_map = {}
+        self._vnf_vld_conn_point_map = {}
+        self.vnffgds = {}
+        self.forwarding_paths = {}
+        self.substitution_mapping_forwarder = []
+        self.vnfd_sfc_map = None
 
     def handle_yang(self, vnfds):
         self.log.debug(_("Process NSD desc {0}: {1}").
@@ -85,6 +97,7 @@
             vnfd_id = cvnfd.pop(self.VNFD_ID_REF)
             for vnfd in vnfds:
                 if vnfd.type == self.VNFD and vnfd.id == vnfd_id:
+                    self.vnf_id_to_vnf_map[vnfd_id] = vnfd.name
                     self.vnfds[cvnfd.pop(self.MEM_VNF_INDEX)] = vnfd
                     if self.START_BY_DFLT in cvnfd:
                         vnfd.props[self.START_BY_DFLT] = \
@@ -160,7 +173,244 @@
                 self.log.warn(_("{0}, Did not process all fields for {1}").
                               format(self, dic))
             self.log.debug(_("{0}, Initial config {1}").format(self, icp))
-            self.initial_cfg.append(icp)
+            self.initial_cfg.append({self.PROPERTIES : icp})
+
+        def process_vld(vld, dic):
+            vld_conf = {}
+            vld_prop = {}
+            ip_profile_vld = None
+            vld_name = None
+            if 'ip_profile_ref' in vld:
+                ip_profile_name  = vld['ip_profile_ref']
+                if 'ip_profiles' in dic:
+                    for ip_prof in dic['ip_profiles']:
+                        if ip_profile_name == ip_prof['name']:
+                            ip_profile_vld = ip_prof
+            if 'name' in vld:
+                vld_name = vld['name'].replace('-','_').replace(' ','')
+            if 'description' in vld:
+                vld_conf['description'] = vld['description']
+            if 'vendor' in vld:
+                vld_conf['vendor'] = vld['vendor']
+            if ip_profile_vld:
+                if 'ip_profile_params' in ip_profile_vld:
+                    ip_param = ip_profile_vld['ip_profile_params']
+                    if 'gateway_address' in ip_param:
+                        vld_conf['gateway_ip'] = ip_param['gateway_address']
+                    if 'subnet_address' in ip_param:
+                        vld_conf['cidr'] = ip_param['subnet_address']
+                    if 'ip_version' in ip_param:
+                        vld_conf['ip_version'] = ip_param['ip_version'].replace('ipv','')
+
+            if vld_name:
+                vld_prop = {vld_name :
+                {
+                 'type': self.T_ELAN,
+                 self.PROPERTIES : vld_conf
+                }}
+                self.vlds[vld_name] = { 'type': self.T_ELAN,
+                                         self.PROPERTIES : vld_conf
+                                        }
+
+                self.vld_to_vnf_map[vld_name] = []
+                if 'vnfd_connection_point_ref' in vld:
+                    for vnfd_ref in vld['vnfd_connection_point_ref']:
+                        vnf_name = self.vnf_id_to_vnf_map[vnfd_ref['vnfd_id_ref']]
+                        if vnf_name in self.vnf_to_vld_map:
+                            self.vnf_to_vld_map[vnf_name].append(vld_name)
+                            self._vnf_vld_conn_point_map[vnf_name].\
+                            append((vld_name ,vnfd_ref['vnfd_connection_point_ref']))
+                        else:
+                            self.vnf_to_vld_map[vnf_name] = []
+                            self._vnf_vld_conn_point_map[vnf_name] = []
+                            self.vnf_to_vld_map[vnf_name].append(vld_name)
+                            self._vnf_vld_conn_point_map[vnf_name].\
+                            append((vld_name ,vnfd_ref['vnfd_connection_point_ref']))
+
+        def process_placement_group(placement_groups):
+            for i in range(0, len(placement_groups)):
+                placement_group = placement_groups[i]
+                pg_name = "placement_{0}".format(i)
+                pg_config = {}
+                targets = []
+                if 'name' in placement_group:
+                    pg_config['name'] = placement_group['name']
+                if 'requirement' in placement_group:
+                    pg_config['requirement'] = placement_group['requirement']
+                if 'strategy' in placement_group:
+                    pg_config['strategy'] = placement_group['strategy']
+                if 'member_vnfd' in placement_group:
+                    for member_vnfd in placement_group['member_vnfd']:
+                        targets.append(self.vnf_id_to_vnf_map[member_vnfd['vnfd_id_ref']])
+                placement = { pg_name : {
+                                'type': self.T_PLACEMENT,
+                                self.PROPERTIES: pg_config,
+                                self.TARGETS   :  str(targets)
+                                }
+                            }
+                self.placement_groups.append(placement)
+
+        def process_vnffgd(vnffgs, dic):
+            associated_cp_names = []
+            all_cp_names        = []
+            vnfd_sfc_map        = {}
+            
+            conn_point_to_conection_node = {}
+            conn_point_to_vnf_name_map = {}
+
+            unigue_id_forwarder_path_map = OrderedDict()
+            forwarder_name_to_constitent_vnf_map = OrderedDict()
+            unique_id_classifier_map = OrderedDict()
+            fp_path_count = 1
+            forwarder_count = 1
+
+            vnffg_to_unique_id_rsp_map = OrderedDict()
+            vnffg_to_unique_id_classifier_map = OrderedDict()
+            vnffg_to_associated_cp_names = OrderedDict()
+            rsp_associated_cp_names = OrderedDict()
+            vnffg_to_forwarder_map  = OrderedDict()
+            for vnffg in vnffgs:
+                unique_id_rsp_map = {}
+                for rs in vnffg['rsp']:
+                    unique_id_rsp_map[str(rs['id'])] = rs
+                for class_identifier in vnffg['classifier']:
+                    unique_id_classifier_map[str(class_identifier['rsp_id_ref'])] = class_identifier
+                    associated_cp_names.append(class_identifier['vnfd_connection_point_ref'])
+                    all_cp_names.append(class_identifier['vnfd_connection_point_ref'])
+                    conn_point_to_vnf_name_map[class_identifier['vnfd_connection_point_ref']] = self.vnf_id_to_vnf_map[class_identifier['vnfd_id_ref']]
+                    vnfd_sfc_map[self.vnf_id_to_vnf_map[class_identifier['vnfd_id_ref']]] = class_identifier['vnfd_connection_point_ref']
+
+                    rsp_associated_cp_names[str(class_identifier['rsp_id_ref'])] = class_identifier['vnfd_connection_point_ref']
+
+                vnffg_to_unique_id_rsp_map[vnffg['name']] = unique_id_rsp_map
+                vnffg_to_forwarder_map[vnffg['name']] = []
+    
+            for vnffg in vnffgs:
+                prop = {}
+                fp_members = []
+
+                
+                prop['type'] = self.T_VNFFG
+                prop[self.DESC] = "Test"
+                prop[self.PROPERTIES] = {}
+                if 'vendor' in vnffg:
+                    prop[self.PROPERTIES]['vendor'] = vnffg['vendor']
+                if 'name' in vnffg:
+                    self.vnffgds[vnffg['name']] = prop
+                
+                for rs_id, rs in vnffg_to_unique_id_rsp_map[vnffg['name']].items():
+                    associated_cp_node_names = []
+                    associated_vnf_names = []
+                    number_of_endpoints = 0
+                    if 'vnfd_connection_point_ref' in rs:
+                       number_of_endpoints = number_of_endpoints + len(rs['vnfd_connection_point_ref'])
+                       for vnf in rs['vnfd_connection_point_ref']:
+                            associated_vnf_names.append(str(self.vnf_id_to_vnf_map[vnf['vnfd_id_ref']]))
+                            associated_cp_names.append(vnf['vnfd_connection_point_ref'])
+                            all_cp_names.append(vnf['vnfd_connection_point_ref'])
+                            conn_point_to_vnf_name_map[vnf['vnfd_connection_point_ref']] = self.vnf_id_to_vnf_map[vnf['vnfd_id_ref']]
+                       if "forwarder{}".format(fp_path_count) not in  forwarder_name_to_constitent_vnf_map:
+                            forwarder_name_to_constitent_vnf_map["forwarder{}".format(fp_path_count)] = associated_vnf_names
+                            vnffg_to_forwarder_map[vnffg['name']].append("forwarder{}".format(fp_path_count))
+                    fp_path_count = fp_path_count + 1
+                    
+                    associated_cp_names = list(set(associated_cp_names))
+                    for cp_name in associated_cp_names:
+                            for idx, vnfd in self.vnfds.items():
+                                for vdu in vnfd.vdus:
+                                    if cp_name == rsp_associated_cp_names[rs_id]:
+                                        if cp_name in vdu.conn_point_to_conection_node:
+                                            associated_cp_node_names.append(vdu.conn_point_to_conection_node[cp_name])
+                                            #conn_point_to_conection_node[cp_name] = vdu.conn_point_to_conection_node[cp_name]
+
+                    for cp_name in all_cp_names:
+                        for idx, vnfd in self.vnfds.items():
+                            for vdu in vnfd.vdus:
+                                if cp_name in vdu.conn_point_to_conection_node:
+                                    conn_point_to_conection_node[cp_name] = vdu.conn_point_to_conection_node[cp_name]
+
+                    if len(associated_vnf_names) > 0:
+                        associated_vnf_names = list(set(associated_vnf_names))
+                        vnf_str = ", ".join(associated_vnf_names)
+                        prop[self.PROPERTIES]['constituent_vnfs'] = "[{}]".format(vnf_str)
+                    if len(associated_cp_node_names) > 0:
+                        associated_cp_node_names = list(set(associated_cp_node_names))
+                        connection_point_str = ", ".join(associated_cp_node_names)
+                        prop[self.PROPERTIES]['connection_point'] = "[{}]".format(", ".join(associated_cp_node_names))
+
+                    prop[self.PROPERTIES]['number_of_endpoints'] = number_of_endpoints
+                    fp_name = "Forwarding_path{}".format(forwarder_count)
+                    unigue_id_forwarder_path_map[fp_name] = rs_id
+                    fp_members.append(fp_name)
+                    forwarder_count = forwarder_count + 1
+
+                    if len(fp_members) > 0:
+                        prop['members'] = []
+                        for fp in fp_members:
+                            prop['members'].append(fp)
+
+            fp_count = 1
+            for fp, idx in unigue_id_forwarder_path_map.items():
+                for vnffg_name, unique_id_rsp_map in vnffg_to_unique_id_rsp_map.items():
+                    if idx in unique_id_rsp_map:
+                        prop = {}
+                        prop['type'] = self.T_FP
+                        prop[self.PROPERTIES] = {}
+                        prop[self.PROPERTIES][self.DESC] = "Forwarder"
+                        prop[self.PROPERTIES]['policy'] = {}
+                        prop[self.PROPERTIES]['policy']['type'] = 'ACL'
+                        prop[self.PROPERTIES]['policy']['criteria'] = []
+
+                        prop[self.PROPERTIES]['path'] = []
+
+                        rsp =  unique_id_rsp_map[idx]
+                        classifier = unique_id_classifier_map[idx]
+
+                        for match in classifier['match_attributes']:
+                            match_prop = {}
+                            if 'source_port' in match:
+                                port = "'{}'".format((match['source_port']))
+                                prop[self.PROPERTIES]['policy']['criteria'].append({'source_port_range': port})
+                            if 'destination_port' in match:
+                                port = "'f'{}''".format((match['destination_port']))
+                                prop[self.PROPERTIES]['policy']['criteria'].append({'destination_port_range': '5006'})
+                            if 'ip_proto' in match:
+                                port = match['ip_proto']
+                                prop[self.PROPERTIES]['policy']['criteria'].append({'ip_proto': port})
+                            if 'destination_ip_address' in match:
+                                port = "'{}'".format((match['destination_ip_address']))
+                                prop[self.PROPERTIES]['policy']['criteria'].append({'ip_dst_prefix': port})
+
+                        if 'vnfd_connection_point_ref' in classifier:
+                            if classifier['vnfd_connection_point_ref'] in conn_point_to_vnf_name_map:
+                                if 'cp' not in prop[self.PROPERTIES]:
+                                    prop[self.PROPERTIES]['cp'] = {}
+                                prop[self.PROPERTIES]['cp']['forwarder'] = conn_point_to_vnf_name_map[classifier['vnfd_connection_point_ref']]
+                                prop[self.PROPERTIES]['cp']['capability'] = conn_point_to_conection_node[classifier['vnfd_connection_point_ref']]
+
+                        for fp, vnf_list in forwarder_name_to_constitent_vnf_map.items():
+                            for vnf in vnf_list:
+                                for cp, vnf_name in conn_point_to_vnf_name_map.items():
+                                    if vnf == vnf_name:
+                                        self.substitution_mapping_forwarder.append((vnf, fp, conn_point_to_conection_node[cp]))
+
+                        visited_forwarder = []
+                        visited_path = None
+                        for path, vnfs in forwarder_name_to_constitent_vnf_map.items():
+                            for vnf in vnfs:
+                                if (vnf not in visited_forwarder) and (path in vnffg_to_forwarder_map[vnffg_name]):
+                                    path_prop = {}
+                                    path_prop['forwarder']  = vnf
+                                    path_prop['capability'] = path
+                                    prop[self.PROPERTIES]['path'].append(path_prop)
+                                    visited_forwarder.append(vnf)
+                                    visited_path = path
+                        forwarder_name_to_constitent_vnf_map.pop(visited_path)
+
+                        self.forwarding_paths["Forwarding_path{}".format(fp_count)] = prop
+                        fp_count = fp_count +1
+
+            self.vnfd_sfc_map = vnfd_sfc_map
 
         dic = deepcopy(self.yang)
         try:
@@ -177,15 +427,20 @@
             # Process VLDs
             if self.VLD in dic:
                 for vld_dic in dic.pop(self.VLD):
-                    vld = YangVld(self.log, vld_dic.pop(self.NAME),
-                                  self.VLD, vld_dic)
-                    vld.process_vld(self.vnfds)
-                    self.vlds.append(vld)
+                    process_vld(vld_dic, dic)
+                    #self.vlds.append(vld)
+
+            #Process VNFFG
+            if self.VNFFGD in dic:
+                process_vnffgd(dic[self.VNFFGD], dic)
+
+
+            #if self.
 
             # Process config primitives
             if self.CONF_PRIM in dic:
                 for cprim in dic.pop(self.CONF_PRIM):
-                    conf_prim = {self.NAME: cprim.pop(self.NAME)}
+                    conf_prim = {self.NAME: cprim.pop(self.NAME), self.DESC : 'TestDescription'}
                     if self.USER_DEF_SCRIPT in cprim:
                         conf_prim[self.USER_DEF_SCRIPT] = \
                                         cprim.pop(self.USER_DEF_SCRIPT)
@@ -212,6 +467,10 @@
                 for param in dic.pop(self.INPUT_PARAM_XPATH):
                     process_input_param(param)
 
+            if 'placement_groups' in dic:
+                process_placement_group(dic['placement_groups'])
+
+
             self.remove_ignored_fields(dic)
             if len(dic):
                 self.log.warn(_("{0}, Did not process the following for "
@@ -226,13 +485,14 @@
             raise ValidationError(message=err_msg)
 
     def generate_tosca_type(self):
+
         self.log.debug(_("{0} Generate tosa types").
                        format(self))
 
         tosca = {}
-        tosca[self.DATA_TYPES] = {}
-        tosca[self.NODE_TYPES] = {}
-
+        #tosca[self.DATA_TYPES] = {}
+        #tosca[self.NODE_TYPES] = {}
+        return tosca
         for idx, vnfd in self.vnfds.items():
             tosca = vnfd.generate_tosca_type(tosca)
 
@@ -287,20 +547,25 @@
     def generate_tosca_template(self, tosca):
         self.log.debug(_("{0}, Generate tosca template").
                        format(self, tosca))
-
         # Add the standard entries
         tosca['tosca_definitions_version'] = \
-                                    'tosca_simple_profile_for_nfv_1_0_0'
+                                    'tosca_simple_profile_for_nfv_1_0'
         tosca[self.DESC] = self.props[self.DESC]
         tosca[self.METADATA] = {
             'ID': self.name,
             self.VENDOR: self.props[self.VENDOR],
             self.VERSION: self.props[self.VERSION],
         }
+        if len(self.vnfd_files) > 0:
+            tosca[self.IMPORT] = []
+            imports = []
+            for vnfd_file in self.vnfd_files:
+                tosca[self.IMPORT].append('"{0}.yaml"'.format(vnfd_file))
 
         tosca[self.TOPOLOGY_TMPL] = {}
 
         # Add input params
+        '''
         if len(self.inputs):
             if self.INPUTS not in tosca[self.TOPOLOGY_TMPL]:
                 tosca[self.TOPOLOGY_TMPL][self.INPUTS] = {}
@@ -309,15 +574,50 @@
                                           self.DESC:
                                           'Translated from YANG'}}
                 tosca[self.TOPOLOGY_TMPL][self.INPUTS] = entry
-
+        '''
         tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
 
         # Add the VNFDs and VLDs
         for idx, vnfd in self.vnfds.items():
-            vnfd.generate_vnf_template(tosca, idx)
+            #vnfd.generate_vnf_template(tosca, idx)
+            node = {
+              'type' : vnfd.vnf_type,
+              self.PROPERTIES : {
+                self.ID : idx,
+                self.VENDOR : self.props[self.VENDOR],
+                self.VERSION : self.props[self.VERSION]
+              }
+            }
+            if vnfd.name in self.vnf_to_vld_map:
+                vld_list = self.vnf_to_vld_map[vnfd.name]
+                node[self.REQUIREMENTS] = []
+                for vld_idx in range(0, len(vld_list)):
+                    vld_link_name = "{0}{1}".format("virtualLink", vld_idx + 1)
+                    vld_prop = {}
+                    vld_prop[vld_link_name] = vld_list[vld_idx]
+                    node[self.REQUIREMENTS].append(vld_prop)
+                    if vnfd.name in self._vnf_vld_conn_point_map:
+                        vnf_vld_list = self._vnf_vld_conn_point_map[vnfd.name]
+                        for vnf_vld in vnf_vld_list:
+                            vnfd.generate_vld_link(vld_link_name, vnf_vld[1])
 
-        for vld in self.vlds:
-            vld.generate_tosca_template(tosca)
+            for sub_mapping in self.substitution_mapping_forwarder:
+                if sub_mapping[0] == vnfd.name:
+                    vnfd.generate_forwarder_sub_mapping(sub_mapping)
+
+            for vnfd_name, cp_name in self.vnfd_sfc_map.items():
+                if vnfd.name == vnfd_name:
+                    vnfd.generate_sfc_link(cp_name)
+
+
+
+            tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vnfd.name] = node
+
+        for vld_node_name in self.vlds:
+            tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vld_node_name] = self.vlds[vld_node_name]
+
+        for fp_name, fp in self.forwarding_paths.items():
+            tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][fp_name] = fp
 
         # add the config primitives
         if len(self.conf_prims):
@@ -337,8 +637,8 @@
             conf_prims[self.PROPERTIES] = {
                 self.PRIMITIVES: prims
             }
-
-            tosca[self.TOPOLOGY_TMPL][self.GROUPS][self.CONF_PRIM] = conf_prims
+            conf_prims[self.DESC] = 'Test'
+            #tosca[self.TOPOLOGY_TMPL][self.GROUPS][self.CONF_PRIM] = conf_prims
 
 
         # Add the scale group
@@ -361,33 +661,45 @@
                 tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
 
             for icp in self.initial_cfg:
-                icpt = {
-                    self.TYPE: self.T_INITIAL_CFG,
-                }
-                icpt.update(icp)
-                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
-                    self.INITIAL_CFG: icpt
-                })
+                if len(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]) > 0:
+                    node_name = list(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL].keys())[0]
+                    icpt = {
+                        self.TYPE: self.T_INITIAL_CFG,
+                        self.TARGETS : "[{0}]".format(node_name)
+                    }
+                    icpt.update(icp)
+                    tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+                        self.INITIAL_CFG: icpt
+                    })
+
+        if len(self.placement_groups) > 0:
+            if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+            for placment_group in self.placement_groups:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append(placment_group)
+
+        if len(self.vnffgds) > 0:
+            if self.GROUPS not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.GROUPS] = {}
+            for vnffgd_name in self.vnffgds:
+                tosca[self.TOPOLOGY_TMPL][self.GROUPS][vnffgd_name] = self.vnffgds[vnffgd_name]
+
 
         return tosca
 
     def get_supporting_files(self):
         files = []
-
-        for vnfd in self.vnfds.values():
-            f = vnfd.get_supporting_files()
-            if f and len(f):
-                files.extend(f)
-
         # Get the config files for initial config
         for icp in self.initial_cfg:
-            if self.USER_DEF_SCRIPT in icp:
-                script = os.path.basename(icp[self.USER_DEF_SCRIPT])
-                files.append({
-                    self.TYPE: 'script',
-                    self.NAME: script,
-                    self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
-                })
+            if 'properties' in icp:
+                if 'user_defined_script' in icp['properties']:
+                    script = os.path.basename(icp['properties']['user_defined_script'])
+                    files.append({
+                        self.TYPE: 'script',
+                        self.NAME: script,
+                        self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
+                    })
 
         # TODO (pjoseph): Add support for config scripts,
         # charms, etc
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
index 7d095c1..2d82872 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
@@ -56,6 +56,15 @@
         'storage_gb': ' GB',
     }
 
+    TOSCA_MEM_SIZE = {
+        'LARGE': 'huge',
+        'SMALL': 'normal',
+        'SIZE_2MB': 'size_2MB',
+        'SIZE_1GB': 'size_1GB',
+        'PREFER_LARGE': 'prefer_huge'
+
+    }
+
     def __init__(self,
                  log,
                  name,
@@ -69,8 +78,16 @@
         self.props = {}
         self.ext_cp = []
         self.int_cp = []
-        self.image = None
+        self.image           = None
         self.cloud_init_file = None
+        self.host_epa        = None
+        self.vswitch_epa     = None
+        self.hypervisor_epa  = None
+        self.guest_epa       = None
+        self.cp_name_to_cp_node = {}
+        self.pinning_epa_prop   = {}
+        self.mem_page_guest_epa = None
+        self.conn_point_to_conection_node = {}
 
     def process_vdu(self):
         self.log.debug(_("Process VDU desc {0}: {1}").format(self.name,
@@ -88,9 +105,9 @@
         self.id = vdu[self.ID]
 
         if self.VM_FLAVOR in vdu_dic:
-            vdu[self.HOST] = {}
+            vdu[self.NFV_COMPUTE] = {}
             for key, value in vdu_dic.pop(self.VM_FLAVOR).items():
-                vdu[self.HOST][self.VM_FLAVOR_MAP[key]] = "{}{}". \
+                vdu[self.NFV_COMPUTE][self.VM_FLAVOR_MAP[key]] = "{}{}". \
                             format(value, self.VM_SIZE_UNITS_MAP[key])
 
         if self.EXT_INTF in vdu_dic:
@@ -103,7 +120,115 @@
                                format(self, cp, ext_intf))
                 self.ext_cp.append(cp)
 
+        if self.HOST_EPA in vdu_dic:
+            host_epa = vdu_dic.pop(self.HOST_EPA)
+            host_epa_prop = {}
+            self.host_epa = host_epa
+            '''
+            if 'cpu_model' in host_epa:
+                host_epa_prop['cpu_model'] = host_epa['cpu_model'].lower()
+            if 'cpu_arch' in host_epa:
+                host_epa_prop['cpu_arch'] = host_epa['cpu_arch'].lower()
+            if 'cpu_vendor' in host_epa:
+                host_epa_prop['cpu_vendor'] = host_epa['cpu_vendor'].lower()
+            if 'cpu_socket_count' in host_epa:
+                host_epa_prop['cpu_socket_count'] = host_epa['cpu_socket_count']
+            if 'cpu_core_count' in host_epa:
+                host_epa_prop['cpu_core_count'] = host_epa['cpu_core_count']
+            if 'cpu_core_thread_count' in host_epa:
+                host_epa_prop['cpu_core_thread_count'] = host_epa['cpu_core_thread_count']
+            if 'om_cpu_model_string' in host_epa:
+                host_epa_prop['om_cpu_model_string'] = host_epa['om_cpu_model_string']
+            if 'cpu_feature' in host_epa:
+                host_epa_prop['cpu_feature'] = []
+                for cpu_feature in host_epa['cpu_feature']:
+                    cpu_feature_prop = {}
+                    cpu_feature_prop['feature'] = cpu_feature['feature'].lower()
+                    host_epa_prop['cpu_feature'] .append(cpu_feature_prop)
+
+            if 'om_cpu_feature' in host_epa:
+                host_epa_prop['om_cpu_feature'] = []
+                for cpu_feature in host_epa['om_cpu_feature']:
+                    om_cpu_feature_prop = {}
+                    om_cpu_feature_prop['feature'] = cpu_feature
+                    host_epa_prop['om_cpu_feature'].append(om_cpu_feature_prop)
+            self.host_epa = host_epa
+            '''
+        # We might have to re write this piece of code, there are mismatch in 
+        # enum names. Its all capital in RIFT yang and TOSCA
+        if self.VSWITCH_EPA in vdu_dic:
+            vswitch_epa = vdu_dic.pop(self.VSWITCH_EPA)
+            self.vswitch_epa = vswitch_epa
+        if self.HYPERVISOR_EPA in vdu_dic:
+            hypervisor_epa = vdu_dic.pop(self.HYPERVISOR_EPA)
+            hypervisor_epa_prop = {}
+
+            if 'type_yang' in hypervisor_epa:
+                hypervisor_epa_prop['type'] = hypervisor_epa['type_yang']
+            if 'version' in hypervisor_epa:
+                hypervisor_epa_prop['version'] = str(hypervisor_epa['version'])
+            else:
+                hypervisor_epa_prop['version'] = '1'
+            self.hypervisor_epa = hypervisor_epa_prop
+
+        if self.GUEST_EPA in vdu_dic:
+            guest_epa = vdu_dic[self.GUEST_EPA]
+            guest_epa_prop = {}
+
+            # This is a hack. I have to rewrite this. I have got this quick to working
+            # 'ANY' check should be added in riftio common file. Its not working for some reason. Will fix.
+
+            if 'cpu_pinning_policy' in guest_epa and guest_epa['cpu_pinning_policy'] != 'ANY':
+                self.pinning_epa_prop['cpu_affinity'] = guest_epa['cpu_pinning_policy'].lower()
+            if 'cpu_thread_pinning_policy' in guest_epa:
+                 self.pinning_epa_prop['thread_allocation'] = guest_epa['cpu_thread_pinning_policy'].lower()
+            if 'mempage_size'  in guest_epa:
+                self.mem_page_guest_epa = self.TOSCA_MEM_SIZE[guest_epa['mempage_size']]
+
+            if 'numa_node_policy' in guest_epa:
+                num_node_policy = guest_epa['numa_node_policy']
+                if 'node_cnt' in num_node_policy:
+                    guest_epa_prop['node_cnt'] = num_node_policy['node_cnt']
+                if 'mem_policy' in num_node_policy:
+                    guest_epa_prop['mem_policy'] = num_node_policy['mem_policy']
+                if 'node' in num_node_policy:
+                    nodes = []
+                    for node in num_node_policy['node']:
+                        node_prop = {}
+                        if 'id' in node:
+                            node_prop['id'] = node['id']
+                        if 'vcpu' in node:
+                            vc =[]
+                            for vcp in node['vcpu']:
+                                vc.append(vcp['id'])
+
+                            node_prop['vcpus'] = vc
+                        if 'memory_mb' in  node:
+                            node_prop['mem_size'] = "{} MB".format(node['memory_mb'])
+                        # om_numa_type generation
+
+                        if 'num_cores' in node:
+                            node_prop['om_numa_type'] = 'num_cores'
+                            node_prop['num_cores'] = node['num_cores']
+                        elif 'paired_threads' in node:
+                            node_prop['om_numa_type'] = 'paired-threads'
+                            node_prop['paired_threads'] = node['paired_threads']
+                        elif 'threads]' in node:
+                            node_prop['om_numa_type'] = 'threads]'
+                            node_prop['num_thread]'] = node['threads]']
+
+                        nodes.append(node_prop)
+                    guest_epa_prop['node'] = nodes
+
+            self.guest_epa = guest_epa_prop
+
         self.remove_ignored_fields(vdu_dic)
+
+        for cp in self.ext_cp:
+            cp_name = cp[self.NAME].replace('/', '_')
+            self.conn_point_to_conection_node[cp[self.NAME]] = cp_name
+
+
         if len(vdu_dic):
             self.log.warn(_("{0}, Did not process the following in "
                             "VDU: {1}").
@@ -151,6 +276,7 @@
         # Create a unique name incase multiple VNFs use same
         # name for the vdu
         return "{}_{}".format(vnf_name, self.name)
+        #return self.name
 
     def generate_tosca_type(self, tosca):
         self.log.debug(_("{0} Generate tosa types").
@@ -218,14 +344,35 @@
 
         node = {}
         node[self.TYPE] = self.T_VDU1
+        node[self.CAPABILITIES] = {}
 
-        if self.HOST in self.props:
-            node[self.CAPABILITIES] = {
-                self.HOST: {self.PROPERTIES: self.props.pop(self.HOST)}
-            }
+        if self.NFV_COMPUTE in self.props:
+            node[self.CAPABILITIES][self.NFV_COMPUTE] = {self.PROPERTIES: self.props.pop(self.NFV_COMPUTE)}
         else:
             self.log.warn(_("{0}, Does not have host requirements defined").
                           format(self))
+        if self.host_epa:
+            node[self.CAPABILITIES][self.HOST_EPA] = {
+                self.PROPERTIES: self.host_epa
+            }
+        if self.vswitch_epa:
+            node[self.CAPABILITIES][self.VSWITCH_EPA] = {
+                self.PROPERTIES: self.vswitch_epa
+            }
+        if self.hypervisor_epa:
+            node[self.CAPABILITIES][self.HYPERVISOR_EPA] = {
+                self.PROPERTIES: self.hypervisor_epa
+            }
+        if self.guest_epa:
+            node[self.CAPABILITIES]['numa_extension'] = {
+                self.PROPERTIES: self.guest_epa
+            }
+        if len(self.pinning_epa_prop) > 0:
+            if node[self.CAPABILITIES][self.NFV_COMPUTE] and node[self.CAPABILITIES][self.NFV_COMPUTE][self.PROPERTIES]:
+                node[self.CAPABILITIES][self.NFV_COMPUTE][self.PROPERTIES]['cpu_allocation'] = self.pinning_epa_prop
+        if self.mem_page_guest_epa:
+            if node[self.CAPABILITIES][self.NFV_COMPUTE] and node[self.CAPABILITIES][self.NFV_COMPUTE][self.PROPERTIES]:
+                node[self.CAPABILITIES][self.NFV_COMPUTE][self.PROPERTIES]['mem_page_size'] = self.mem_page_guest_epa
 
         if self.IMAGE in self.props:
             img_name = "{}_{}_vm_image".format(vnf_name, self.name)
@@ -241,10 +388,22 @@
             node[self.INTERFACES] = {'Standard': {
                 'create': img_name
             }}
-
         # Add cloud init script if available
         if self.CLOUD_INIT_FILE in self.props:
+            cloud_name = "{}_{}_cloud_init".format(vnf_name, self.name)
             self.cloud_init_file = self.props[self.CLOUD_INIT_FILE]
+            cloud_init_file = "../{}/{}".format(self.CLOUD_INIT_DIR, self.props.pop(self.CLOUD_INIT_FILE))
+            if self.ARTIFACTS in node:
+               node[self.ARTIFACTS][cloud_name] = {
+               self.FILE: cloud_init_file,
+               self.TYPE: self.T_ARTF_CLOUD_INIT,
+               }
+            else:
+                node[self.ARTIFACTS] = {
+                cloud_name: {
+                self.FILE: cloud_init_file,
+                self.TYPE: self.T_ARTF_CLOUD_INIT,
+                }}
 
         # Remove
         self.props.pop(self.ID)
@@ -269,6 +428,7 @@
 
             cpt[self.PROPERTIES] = cp
             cp_name = cp[self.NAME].replace('/', '_')
+            self.cp_name_to_cp_node[cp[self.NAME]] = cp_name
 
             self.log.debug(_("{0}, CP node {1}: {2}").
                            format(self, cp_name, cpt))
@@ -296,7 +456,5 @@
             })
 
         self.log.debug(_("Supporting files for {} : {}").format(self, files))
-        if not len(files):
-            shutil.rmtree(out_dir)
 
         return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
index 7449c5a..ec21e3c 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
@@ -33,6 +33,7 @@
 
     OTHER_KEYS = (MGMT_INTF, HTTP_EP, MON_PARAM) = \
                  ('mgmt_interface', 'http_endpoint', 'monitoring_param')
+    vnf_prefix_type = 'tosca.nodes.nfv.riftio.'
 
 
     def __init__(self,
@@ -49,6 +50,13 @@
         self.mgmt_intf = {}
         self.mon_param = []
         self.http_ep = []
+        self.vnf_configuration = None
+        self.monitor_param = {}
+        self.monitor_param_1 = {}
+        self.vnf_type = None
+        self.tosca = None
+        self.script_files = []
+        self.service_function_type = None
 
     def handle_yang(self):
         self.log.debug(_("Process VNFD desc {0}: {1}").format(self.name,
@@ -56,28 +64,45 @@
 
         def process_vnf_config(conf):
             vnf_conf = {}
-            if self.CONFIG_ATTR in conf:
-                for key, value in conf.pop(self.CONFIG_ATTR).items():
-                    vnf_conf[key] = value
+            config = {}
 
-            if self.CONFIG_TMPL in conf:
-                vnf_conf[self.CONFIG_TMPL] = conf.pop(self.CONFIG_TMPL)
+            init_primitive_config = {}
+            if 'config_template' in conf:
+                config['config_template'] = conf['config_template']
+            if 'config_attributes' in conf:
+                if 'config_delay' in conf['config_attributes']:
+                    config['config_delay'] = conf['config_attributes']['config_delay']
+                if 'config_priority' in conf['config_attributes']:
+                    config['config_priority'] = conf['config_attributes']['config_priority']
+            if 'config_type' in conf:
+                config['config_type'] = conf['config_type']
+            if 'script' in conf:
+                config['config_details'] = conf['script']
+            for conf_type in self.CONFIG_TYPES:
+                if conf_type in conf:
+                    config['config_type'] = conf_type
+            if len(config) > 0:
+                vnf_conf['config'] = config
 
-            def copy_config_details(conf_type, conf_details):
-                vnf_conf[self.CONFIG_TYPE] = conf_type
-                vnf_conf[self.CONFIG_DETAILS] = conf_details
+            if 'initial_config_primitive' in conf:
+                init_config_prims = []
+                for init_conf_prim in conf['initial_config_primitive']:
+                    init_conf = {}
+                    if 'name' in init_conf_prim:
+                        init_conf['name'] = init_conf_prim['name']
+                    if 'seq' in init_conf_prim:
+                        init_conf['seq'] = init_conf_prim['seq']
+                    if 'user_defined_script' in init_conf_prim:
+                        init_conf['user_defined_script'] = init_conf_prim['user_defined_script']
+                        self.script_files.append(init_conf_prim['user_defined_script'])
+                    if 'parameter' in init_conf_prim:
+                        init_conf['parameter'] = []
+                        for parameter in init_conf_prim['parameter']:
+                            init_conf['parameter'].append({parameter['name']: parameter['value']})
+                    init_config_prims.append(init_conf)
+                vnf_conf['initial_config_primitive'] = init_config_prims
 
-            for key in self.CONFIG_TYPES:
-                if key in conf:
-                    copy_config_details(key, conf.pop(key))
-                    break
-
-            if len(conf):
-                self.log.warn(_("{0}, Did not process all in VNF "
-                                "configuration {1}").
-                              format(self, conf))
-            self.log.debug(_("{0}, vnf config: {1}").format(self, vnf_conf))
-            self.props[self.VNF_CONFIG] = vnf_conf
+            self.vnf_configuration = vnf_conf
 
         def process_mgmt_intf(intf):
             if len(self.mgmt_intf) > 0:
@@ -117,7 +142,8 @@
                 http_ep = {'protocol': 'http'}  # Required for TOSCA
                 http_ep[self.PATH] = ep.pop(self.PATH)
                 http_ep[self.PORT] = ep.pop(self.PORT)
-                http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS)
+                if self.POLL_INTVL in http_ep:
+                    http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS)
                 if len(ep):
                     self.log.warn(_("{0}, Did not process the following for "
                                     "http ep {1}").format(self, ep))
@@ -130,16 +156,34 @@
                 fields = [self.NAME, self.ID, 'value_type', 'units', 'group_tag',
                           'json_query_method', 'http_endpoint_ref', 'widget_type',
                           self.DESC]
-                for key in fields:
-                    if key in param:
-                        monp[key] = param.pop(key)
+                mon_param = {}
+                ui_param = {}
+                if 'name' in param:
+                    mon_param['name'] = param['name']
+                if 'description' in param:
+                    mon_param['description'] = param['description']
+                if 'polling_interval' in param:
+                    mon_param['polling_interval'] = param['polling_interval']
+                if 'http_endpoint_ref' in param:
+                    mon_param['url_path'] = param['http_endpoint_ref']
+                if 'json_query_method' in param:
+                    mon_param['json_query_method'] = param['json_query_method'].lower()
+                if 'group_tag' in param:
+                    ui_param['group_tag'] = param['group_tag']
+                if 'widget_type' in param:
+                    ui_param['widget_type'] = param['widget_type'].lower()
+                if 'units'  in param:
+                    ui_param['units'] = param['units']
+                mon_param['ui_data'] = ui_param
+
+                self.mon_param.append(mon_param)
 
                 if len(param):
                     self.log.warn(_("{0}, Did not process the following for "
                                     "monitporing-param {1}").
                                   format(self, param))
                     self.log.debug(_("{0}, Monitoring param: {1}").format(self, monp))
-                self.mon_param.append(monp)
+                #self.mon_param.append(monp)
 
         def process_cp(cps):
             for cp_dic in cps:
@@ -154,13 +198,15 @@
                                     "connection-point {1}: {2}").
                                   format(self, name, cp_dic))
 
+        def process_service_type(dic):
+            self.service_function_type = dic['service_function_type']
+
         ENDPOINTS_MAP = {
             self.MGMT_INTF: process_mgmt_intf,
             self.HTTP_EP:  process_http_ep,
             self.MON_PARAM: process_mon_param,
             'connection_point': process_cp
         }
-
         dic = deepcopy(self.yang)
         try:
             for key in self.REQUIRED_FIELDS:
@@ -176,14 +222,15 @@
                                   self.VDU, vdu_dic)
                     vdu.process_vdu()
                     self.vdus.append(vdu)
-
             for key in ENDPOINTS_MAP.keys():
                 if key in dic:
                     ENDPOINTS_MAP[key](dic.pop(key))
-
             if self.VNF_CONFIG in dic:
                 process_vnf_config(dic.pop(self.VNF_CONFIG))
 
+            if 'service_function_type' in dic:
+                process_service_type(dic)
+
             self.remove_ignored_fields(dic)
             if len(dic):
                 self.log.warn(_("{0}, Did not process the following for "
@@ -202,135 +249,54 @@
             if cp:
                 vdu.set_vld(cp_name, vld_name)
                 break
+    def _generate_vnf_type(self, tosca):
+        name = self.name.replace("_","")
+        name = name.split('_', 1)[0]
+        self.vnf_type = "{0}{1}{2}".format(self.vnf_prefix_type, name, 'VNF')
+        if self.NODE_TYPES not in tosca and self.vnf_type:
+            tosca[self.NODE_TYPES] = {}
+            tosca[self.NODE_TYPES][self.vnf_type] = {
+            self.DERIVED_FROM : self.T_VNF1
+            }
 
-    def generate_tosca_type(self, tosca):
-        self.log.debug(_("{0} Generate tosa types").
-                       format(self))
+    def generate_tosca_template(self, tosca):
+        self.tosca = tosca
+        tosca['tosca_definitions_version'] = 'tosca_simple_profile_for_nfv_1_0'
+        tosca[self.IMPORT] = []
+        tosca[self.IMPORT].append("riftiotypes.yaml")
+        tosca[self.DESC] = self.props[self.DESC]
+        tosca[self.METADATA] = {
+            'ID': self.name,
+            self.VENDOR: self.props[self.VENDOR],
+            self.VERSION: self.props[self.VERSION],
+        }
+        if self.name:
+            self._generate_vnf_type(tosca);
+
+
+        tosca[self.TOPOLOGY_TMPL] = {}
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
+        tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING] = {}
+        tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]['node_type'] = self.vnf_type
 
         for vdu in self.vdus:
-            tosca = vdu.generate_tosca_type(tosca)
-
-        # Add data_types
-        if self.T_VNF_CONFIG not in tosca[self.DATA_TYPES]:
-            tosca[self.DATA_TYPES][self.T_VNF_CONFIG] = {
-                self.PROPERTIES:
-                {self.CONFIG_TYPE:
-                 {self.TYPE: self.STRING},
-                 'config_delay':
-                 {self.TYPE: self.INTEGER,
-                  self.DEFAULT: 0,
-                  self.REQUIRED: self.NO,
-                  self.CONSTRAINTS:
-                  [{'greater_or_equal': 0}]},
-                 'config_priority':
-                 {self.TYPE: self.INTEGER,
-                  self.CONSTRAINTS:
-                  [{'greater_than': 0}]},
-                 self.CONFIG_DETAILS:
-                 {self.TYPE: self.MAP},
-                 self.CONFIG_TMPL:
-                 {self.TYPE: self.STRING,
-                  self.REQUIRED: self.NO},
-                }
-            }
-
-        # Add capability types
-        if self.CAPABILITY_TYPES not in tosca:
-            tosca[self.CAPABILITY_TYPES] = {}
-        if self.T_HTTP_EP not in tosca[self.CAPABILITY_TYPES]:
-            tosca[self.CAPABILITY_TYPES][self.T_HTTP_EP] = {
-                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
-                self.PROPERTIES: {
-                    'polling_interval':
-                    {self.TYPE: self.INTEGER},
-                    'path':
-                    {self.TYPE: self.STRING},
-                },
-            }
-
-        if self.T_MGMT_INTF not in tosca[self.CAPABILITY_TYPES]:
-            tosca[self.CAPABILITY_TYPES][self.T_MGMT_INTF] = {
-                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
-                self.PROPERTIES: {
-                    self.DASHBOARD_PARAMS:
-                    {self.TYPE: self.MAP},
-                    self.VDU:
-                    {self.TYPE: self.STRING},
-                },
-            }
-
-        if self.T_MON_PARAM not in tosca[self.CAPABILITY_TYPES]:
-            tosca[self.CAPABILITY_TYPES][self.T_MON_PARAM] = {
-                self.DERIVED_FROM: 'tosca.capabilities.nfv.Metric',
-                self.PROPERTIES: {
-                    'id':
-                    {self.TYPE: self.INTEGER},
-                    'name':
-                    {self.TYPE: self.STRING},
-                    'value_type':
-                    {self.TYPE: self.STRING,
-                     self.DEFAULT: 'INT'},
-                    'group_tag':
-                    {self.TYPE: self.STRING,
-                     self.DEFAULT: 'Group1'},
-                    'units':
-                    {self.TYPE: self.STRING},
-                    'description':
-                    {self.TYPE: self.STRING},
-                    'json_query_method':
-                    {self.TYPE: self.STRING,
-                     self.DEFAULT: 'NAMEKEY'},
-                    'http_endpoint_ref':
-                    {self.TYPE: self.STRING},
-                    'widget_type':
-                    {self.TYPE: self.STRING,
-                     self.DEFAULT: 'COUNTER'},
-                }
-            }
-
-        # Define the VNF type
-        if self.T_VNF1 not in tosca[self.NODE_TYPES]:
-            tosca[self.NODE_TYPES][self.T_VNF1] = {
-                self.DERIVED_FROM: 'tosca.nodes.nfv.VNF',
-                self.PROPERTIES: {
-                    'vnf_configuration':
-                    {self.TYPE: self.T_VNF_CONFIG},
-                    'port':
-                    {self.TYPE: self.INTEGER,
-                     self.CONSTRAINTS:
-                     [{'in_range': '[1, 65535]'}]},
-                    self.START_BY_DFLT:
-                    {self.TYPE: self.BOOL,
-                     self.DEFAULT: self.TRUE},
-                },
-                self.CAPABILITIES: {
-                    'mgmt_interface':
-                    {self.TYPE: self.T_MGMT_INTF},
-                    'http_endpoint':
-                    {self.TYPE: self.T_HTTP_EP},
-                    'monitoring_param_0':
-                    {self.TYPE: self.T_MON_PARAM},
-                    'monitoring_param_1':
-                    {self.TYPE: self.T_MON_PARAM},
-                },
-                self.REQUIREMENTS: [
-                    {'vdus':
-                     {self.TYPE: 'tosca.capabilities.nfv.VirtualLinkable',
-                      self.RELATIONSHIP:
-                      'tosca.relationships.nfv.VirtualLinksTo',
-                      self.NODE: self.T_VDU1,
-                      self.OCCURENCES: '[1, UNBOUND]'}}
-                ],
-            }
-
-        return tosca
-
-    def generate_vnf_template(self, tosca, index):
-        self.log.debug(_("{0}, Generate tosca template for VNF {1}").
-                       format(self, index, tosca))
-
-        for vdu in self.vdus:
-            tosca = vdu.generate_vdu_template(tosca, self.name)
+            vdu.generate_vdu_template(tosca, self.name)
+            if 'vdu' in self.mgmt_intf and self.mgmt_intf['vdu'] == vdu.get_name(self.name): #TEST
+                mgmt_interface = {}
+                mgmt_interface[self.PROPERTIES] = self.mgmt_intf
+                self.mgmt_intf.pop('vdu')
+                caps = []
+                tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['mgmt_interface'] = mgmt_interface #TEST
+                if len(self.mon_param) > 0:
+                    mon_param = {}
+                    mon_param = {}
+                    mon_param['properties'] = self.mon_param[0]
+                    tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param'] = mon_param #TEST
+                if len(self.mon_param) == 2:
+                    mon_param = {}
+                    mon_param = {}
+                    mon_param['properties'] = self.mon_param[1]
+                    tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param_1'] = mon_param
 
         node = {}
         node[self.TYPE] = self.T_VNF1
@@ -339,7 +305,9 @@
         self.props.pop(self.DESC)
 
         # Update index to the member-vnf-index
-        self.props[self.ID] = index
+
+        # For now I am putting index as 1. This needs to be revisted
+        self.props[self.ID] = 1
         node[self.PROPERTIES] = self.props
 
         caps = {}
@@ -378,16 +346,88 @@
 
         self.log.debug(_("{0}, VNF node: {1}").format(self, node))
 
-        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+        #tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+        self.get_vnf_configuration_policy(tosca)
 
         return tosca
 
-    def get_supporting_files(self):
-        files = []
+    def generate_vld_link(self, virtualLink, conn_point):
+        if self.REQUIREMENTS not in self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]:
+            self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING] = {}
+            self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]['node_type'] = self.vnf_type
+            #self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]['node_type'] = []
+            #self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]['node_type'].\
+            #append(['node_type', self.vnf_type])
+            self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS] = []
 
         for vdu in self.vdus:
-            f = vdu.get_supporting_files()
-            if f and len(f):
-                files.extend(f)
+            if conn_point in vdu.cp_name_to_cp_node:
+                conn_point_node_name = vdu.cp_name_to_cp_node[conn_point]
+                self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS].\
+                    append({virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")})
+
+        if self.REQUIREMENTS not in self.tosca[self.NODE_TYPES][self.vnf_type]:
+            self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS] = []
+        self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS].append({virtualLink : {
+                                                                        "type": "tosca.nodes.nfv.VL"}})
+    def generate_forwarder_sub_mapping(self, sub_link):
+        if self.CAPABILITIES not in self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]:
+            self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.CAPABILITIES] = {}
+            self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.CAPABILITIES]
+
+        self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.CAPABILITIES][sub_link[1]] = \
+                            "[{}, forwarder]".format(sub_link[2])
+
+    def generate_sfc_link(self, sfs_conn_point_name):
+        for vdu in self.vdus:
+            if sfs_conn_point_name in vdu.cp_name_to_cp_node:
+                 conn_point_node_name = vdu.cp_name_to_cp_node[sfs_conn_point_name]
+                 if conn_point_node_name in self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]:
+                    if self.CAPABILITIES not in  self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]:
+                        self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][conn_point_node_name][self.CAPABILITIES] = {}
+                    self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][conn_point_node_name][self.CAPABILITIES]['sfc'] =  {self.PROPERTIES: {}}
+                    self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][conn_point_node_name] \
+                            [self.CAPABILITIES]['sfc'][self.PROPERTIES]['sfc_type'] = 'sf'
+
+                    if self.service_function_type:
+                        self.tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][conn_point_node_name] \
+                                [self.CAPABILITIES]['sfc'][self.PROPERTIES]['sf_type'] = self.service_function_type
+
+    def generate_tosca(self):
+        tosca = {}
+        return tosca
+
+    def get_vnf_configuration_policy(self, tosca):
+        if self.vnf_configuration:
+            if self.POLICIES in tosca:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES]['configuration'] ={
+                'type' : self.T_VNF_CONFIG,
+                 self.PROPERTIES: self.vnf_configuration
+                }
+            else:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+            # This is bad hack. TOSCA Openstack does not return policies without target
+            if len(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]) > 0:
+                node_name = list(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL].keys())[0]
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({'configuration' :{
+                 'type' : self.T_VNF_CONFIG,
+                 self.PROPERTIES: self.vnf_configuration,
+                 self.TARGETS : "[{0}]".format(node_name)
+                }})
+
+    def get_supporting_files(self):
+        files = []
+        for file in self.script_files:
+            files.append({
+                        self.TYPE: 'script',
+                        self.NAME: file,
+                        self.DEST: "{}/{}".format(self.SCRIPT_DIR, file),
+                    })
+
+
+        for vdu in self.vdus:
+            vdu_files = vdu.get_supporting_files()
+            for vdu_file in vdu_files:
+                files.append(vdu_file)
 
         return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
index 907a4a0..0919494 100644
--- a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
+++ b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
@@ -48,24 +48,36 @@
         self.tosca_template = ToscaTemplate(log)
         self.node_translator = None
         self.pkgs = packages
+        self.output_files = {}
+        self.output_files['nsd'] = []
+        self.output_files['vnfd'] = []
+
         log.info(_('Initialized parameters for translation.'))
 
     def translate(self):
         if self.files:
             self.get_yangs()
+        else:
+            if 'nsd' in self.yangs:
+                self.output_files['nsd'].append(self.yangs['nsd'][0]['short_name'])
+            if 'vnfd' in self.yangs:
+                for yang_vnfd in self.yangs['vnfd']:
+                    self.output_files['vnfd'].append(yang_vnfd['short_name'])
 
         self.node_translator = TranslateDescriptors(self.log,
                                                       self.yangs,
-                                                      self.tosca_template)
-
+                                                      self.tosca_template,
+                                                      self.output_files['vnfd'])
         self.tosca_template.resources = self.node_translator.translate()
 
+
         return self.tosca_template.output_to_tosca()
 
     def get_yangs(self):
         '''Get the descriptors and convert to yang instances'''
         for filename in self.files:
             self.log.debug(_("Load file {0}").format(filename))
+
             # Only one descriptor per file
             if tarfile.is_tarfile(filename):
                 tar = open(filename, "r+b")
@@ -78,36 +90,70 @@
                         self.yangs[TranslateDescriptors.NSD] = []
                     self.yangs[TranslateDescriptors.NSD]. \
                         append(pkg.descriptor_msg.as_dict())
+                    if 'name' in pkg.descriptor_msg.as_dict() is not None:
+                        self.output_files['nsd'].append(pkg.descriptor_msg.as_dict()['name'])
+                    else:
+                        raise ValidationError(message="NSD Descriptor name attribute is not populated ")
                 elif desc_type == TranslateDescriptors.VNFD:
                     if TranslateDescriptors.VNFD not in self.yangs:
                         self.yangs[TranslateDescriptors.VNFD] = []
                     self.yangs[TranslateDescriptors.VNFD]. \
                         append(pkg.descriptor_msg.as_dict())
+                    if 'name' in pkg.descriptor_msg.as_dict() is not None:
+                        self.output_files['vnfd'].append(pkg.descriptor_msg.as_dict()['name'])
+                    else:
+                        raise ValidationError(message="VNFD Descriptor name attribute is not populated ")
                 else:
                     raise ValidationError("Unknown descriptor type: {}".
                                           format(desc_type))
 
-    def _create_csar_files(self, output_dir, name, tmpl,
+    def _create_csar_files(self, output_dir, tmpl_out,
                            archive=False):
-        if ToscaTemplate.TOSCA not in tmpl:
-            self.log.error(_("Did not find TOSCA template for {0}").
-                           format(name))
-            return
-
+        '''
+        for tmpl in tmpl_out:
+            if ToscaTemplate.TOSCA not in tmpl:
+                self.log.error(_("Did not find TOSCA template for {0}").
+                           format(tmpl))
+                return
+        '''
         # Create sub for each NS template
-        subdir = os.path.join(output_dir, name)
+        sub_folder_name = None
+        if self.files:
+            if len(self.output_files['nsd']) > 0:
+                if len(self.output_files['nsd']) == 1:
+                    sub_folder_name = self.output_files['nsd'][0]
+                else:
+                    raise ValidationError(message="Multiple NSD Descriptor uploaded ")
+            elif len(self.output_files['vnfd']) > 0:
+                if len(self.output_files['vnfd']) == 1:
+                    sub_folder_name = self.output_files['vnfd'][0]
+                else:
+                    raise ValidationError(message="Multiple VNFDs Descriptors uploaded without NSD")
+            else:
+                raise ValidationError(message="No NSD or VNFD uploaded")
+        else:
+            if 'nsd' in self.yangs:
+                sub_folder_name = self.yangs['nsd'][0]['short_name']
+            elif 'vnfd' in self.yangs:
+                sub_folder_name = self.yangs['vnfd'][0]['short_name']
+
+
+        subdir = os.path.join(output_dir, sub_folder_name)
         if os.path.exists(subdir):
             shutil.rmtree(subdir)
         os.makedirs(subdir)
-
+        riftio_src_file = "{0}{1}".format(os.getenv('RIFT_INSTALL'), "/usr/rift/mano/common/riftiotypes.yaml")
         # Create the definitions dir
         def_dir = os.path.join(subdir, 'Definitions')
         os.makedirs(def_dir)
-        entry_file = os.path.join(def_dir, name+'.yaml')
-        self.log.debug(_("Writing file {0}").
-                       format(entry_file))
-        with open(entry_file, 'w+') as f:
-            f.write(tmpl[ToscaTemplate.TOSCA])
+        shutil.copy2(riftio_src_file, def_dir + "/riftiotypes.yaml")
+        for tmpl_key in tmpl_out:
+            tmpl = tmpl_out[tmpl_key]
+            entry_file = os.path.join(def_dir, tmpl_key+'.yaml')
+            self.log.debug(_("Writing file {0}").
+                           format(entry_file))
+            with open(entry_file, 'w+') as f:
+                f.write(tmpl[ToscaTemplate.TOSCA])
 
         # Create the Tosca meta
         meta_dir = os.path.join(subdir, 'TOSCA-Metadata')
@@ -116,7 +162,7 @@
 CSAR-Version: 1.1
 Created-By: RIFT.io
 Entry-Definitions: Definitions/'''
-        meta_data = "{}{}".format(meta, name+'.yaml')
+        meta_data = "{}{}".format(meta, sub_folder_name+'.yaml')
         meta_file = os.path.join(meta_dir, 'TOSCA.meta')
         self.log.debug(_("Writing file {0}:\n{1}").
                        format(meta_file, meta_data))
@@ -124,53 +170,55 @@
             f.write(meta_data)
 
         # Copy other supporting files
-        if ToscaTemplate.FILES in tmpl:
-            for f in tmpl[ToscaTemplate.FILES]:
-                self.log.debug(_("Copy supporting file {0}").format(f))
+        for key in tmpl_out:
+            tmpl = tmpl_out[key]
+            if ToscaTemplate.FILES in tmpl:
+                for f in tmpl[ToscaTemplate.FILES]:
+                    self.log.debug(_("Copy supporting file {0}").format(f))
 
-                # Search in source packages
-                if len(self.pkgs):
-                    for pkg in self.pkgs:
-                        # TODO(pjoseph): Need to add support for other file types
-                        fname = f[ToscaResource.NAME]
-                        dest_path = os.path.join(subdir, f[ToscaResource.DEST])
-                        ftype = f[ToscaResource.TYPE]
+                    # Search in source packages
+                    if len(self.pkgs):
+                        for pkg in self.pkgs:
+                            # TODO(pjoseph): Need to add support for other file types
+                            fname = f[ToscaResource.NAME]
+                            dest_path = os.path.join(subdir, f[ToscaResource.DEST])
+                            ftype = f[ToscaResource.TYPE]
 
-                        if ftype == 'image':
-                            image_file_map = rift.package.image.get_package_image_files(pkg)
+                            if ftype == 'image':
+                                image_file_map = rift.package.image.get_package_image_files(pkg)
 
-                            if fname in image_file_map:
-                                self.log.debug(_("Extracting image {0} to {1}").
-                                               format(fname, dest_path))
-                                pkg.extract_file(image_file_map[fname],
-                                                 dest_path)
-                                break
+                                if fname in image_file_map:
+                                    self.log.debug(_("Extracting image {0} to {1}").
+                                                   format(fname, dest_path))
+                                    pkg.extract_file(image_file_map[fname],
+                                                     dest_path)
+                                    break
 
-                        elif ftype == 'script':
-                            script_file_map = \
-                                rift.package.script.PackageScriptExtractor.package_script_files(pkg)
-                            if fname in script_file_map:
-                                self.log.debug(_("Extracting script {0} to {1}").
-                                               format(fname, dest_path))
-                                pkg.extract_file(script_file_map[fname],
-                                                 dest_path)
-                                break
+                            elif ftype == 'script':
+                                script_file_map = \
+                                    rift.package.script.PackageScriptExtractor.package_script_files(pkg)
+                                if fname in script_file_map:
+                                    self.log.debug(_("Extracting script {0} to {1}").
+                                                   format(fname, dest_path))
+                                    pkg.extract_file(script_file_map[fname],
+                                                     dest_path)
+                                    break
 
-                        elif ftype == 'cloud_init':
-                            script_file_map = \
-                                rift.package.cloud_init.PackageCloudInitExtractor.package_script_files(pkg)
-                            if fname in script_file_map:
-                                self.log.debug(_("Extracting script {0} to {1}").
-                                               format(fname, dest_path))
-                                pkg.extract_file(script_file_map[fname],
-                                                 dest_path)
-                                break
+                            elif ftype == 'cloud_init':
+                                script_file_map = \
+                                    rift.package.cloud_init.PackageCloudInitExtractor.package_script_files(pkg)
+                                if fname in script_file_map:
+                                    self.log.debug(_("Extracting script {0} to {1}").
+                                                   format(fname, dest_path))
+                                    pkg.extract_file(script_file_map[fname],
+                                                     dest_path)
+                                    break
 
-                        else:
-                            self.log.warn(_("Unknown file type {0}: {1}").
-                                          format(ftype, f))
+                            else:
+                                self.log.warn(_("Unknown file type {0}: {1}").
+                                              format(ftype, f))
 
-                #TODO(pjoseph): Search in other locations
+                    #TODO(pjoseph): Search in other locations
 
         # Create the ZIP archive
         if archive:
@@ -178,7 +226,7 @@
             os.chdir(subdir)
 
             try:
-                zip_file = name + '.zip'
+                zip_file = key + '.zip'
                 zip_path = os.path.join(output_dir, zip_file)
                 self.log.debug(_("Creating zip file {0}").format(zip_path))
                 zip_cmd = "zip -r {}.partial ."
@@ -207,14 +255,12 @@
                      archive=False,):
         if output:
             zip_files = []
-            for key in output.keys():
-                if output_dir:
-                    zf = self._create_csar_files(output_dir,
-                                                 key,
-                                                 output[key],
-                                                 archive=archive,)
-                    zip_files.append(zf)
-                else:
-                    print(_("TOSCA Template {0}:\n{1}").
-                          format(key, output[key]))
+            #for key in output.keys():
+            if output_dir:
+                zf = self._create_csar_files(output_dir,
+                                             output,
+                                             archive=archive,)
+                zip_files.append(zf)
+            else:
+                print(_("There is an issue with TOSCA Template"))
             return zip_files
diff --git a/common/python/test/utest_url_downloader.py b/common/python/test/utest_url_downloader.py
index 33e24a8..379f3c9 100755
--- a/common/python/test/utest_url_downloader.py
+++ b/common/python/test/utest_url_downloader.py
@@ -29,10 +29,14 @@
 
 TEST_URL = "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell"
 
-class TestCase(unittest.TestCase):
+class UrlTestCase(unittest.TestCase):
     def setUp(self):
         pass
 
+    @classmethod
+    def set_logger(cls, log):
+        cls.log = log
+
     def _common_checks(self, job):
         if job.status != "COMPLETED":
             return
@@ -42,6 +46,9 @@
         assert job.start_time > 0
         assert job.stop_time >= job.start_time
 
+    def _display_result(self, url_downl):
+        UrlTestCase.log.debug("URL download result: {}".format(url_downl))
+
     def test_file_download(self):
         """
         Asserts:
@@ -50,9 +57,9 @@
         """
         url_downl = downloader.UrlDownloader(TEST_URL)
         url_downl.download()
-        assert os.path.isfile(url_downl.filename)
+        assert os.path.isfile(url_downl.filepath)
 
-
+        self._display_result(url_downl)
         assert url_downl.meta.status == downloader.DownloadStatus.COMPLETED
         # assert url_downl.job.progress_percent == 100
         assert "success" in url_downl.meta.detail
@@ -67,7 +74,8 @@
         url_downl = downloader.UrlDownloader(TEST_URL + ".blah")
         url_downl.download()
 
-        assert not os.path.isfile(url_downl.filename)
+        self._display_result(url_downl)
+        assert not os.path.isfile(url_downl.filepath)
         assert url_downl.meta.status == downloader.DownloadStatus.FAILED
         assert "Max retries" in url_downl.meta.detail or "404" in url_downl.meta.detail
 
@@ -79,7 +87,7 @@
             1. Cancel for a download and clean up of the downloaded file.
             2. Model attributes (Process percent, detail, status)
         """
-        url = "http://speedtest.ftp.otenet.gr/files/test1Mb.db"
+        url = "http://speedtest.ftp.otenet.gr/files/test10Mb.db"
         url_dwld = downloader.UrlDownloader(url)
         loop = asyncio.get_event_loop()
         fut = loop.run_in_executor(None, url_dwld.download)
@@ -96,6 +104,7 @@
 
         loop.run_until_complete(sleep())
 
+        self._display_result(url_dwld)
         assert url_dwld.meta.status == downloader.DownloadStatus.CANCELLED
         assert url_dwld.meta.bytes_downloaded == url_dwld.meta.bytes_downloaded
         assert "cancel" in url_dwld.meta.detail
@@ -106,6 +115,7 @@
                 'https://api.github.com/user')
 
         url_downl.download()
+        self._display_result(url_downl)
 
 
     def tearDown(self):
@@ -125,7 +135,9 @@
         runner = None
 
     # Set the global logging level
-    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+    log = logging.getLogger()
+    log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+    UrlTestCase.set_logger(log)
 
     # The unittest framework requires a program name, so use the name of this
     # file instead (we do not want to have to pass a fake program name to main
diff --git a/models/openmano/python/rift/openmano/rift2openmano.py b/models/openmano/python/rift/openmano/rift2openmano.py
index 4325ea0..2b9ec48 100755
--- a/models/openmano/python/rift/openmano/rift2openmano.py
+++ b/models/openmano/python/rift/openmano/rift2openmano.py
@@ -542,7 +542,9 @@
                     # Add Openmano devices
                     device = {}
                     device["type"] = volume.device_type
-                    device["image"] = volume.image
+                    device["image name"] = volume.image
+                    if volume.has_field("image_checksum"):
+                        device["image checksum"] = volume.image_checksum
                     vnfc["devices"].append(device)   
 
         vnfc_boot_data_init = False
diff --git a/models/plugins/yang/mano-types.yang b/models/plugins/yang/mano-types.yang
index 89fbca0..601bdac 100644
--- a/models/plugins/yang/mano-types.yang
+++ b/models/plugins/yang/mano-types.yang
@@ -128,6 +128,32 @@
     }
   }
   
+  grouping ui-primitive-group {
+    list parameter-group {
+      description
+            "Grouping of parameters which are logically grouped in UI";
+      key "name";
+ 
+      leaf name {
+        description
+            "Name of the parameter group";
+        type string;
+      }
+ 
+      list parameter {
+        description
+            "List of parameters for the service primitive.";
+        key "name";
+        uses manotypes:primitive-parameter;
+      }
+ 
+      leaf mandatory {
+        description "Is this parameter group mandatory";
+        type boolean;
+        default true;
+      }
+    }
+  }
 
   grouping image-properties {
     leaf image {
@@ -1023,108 +1049,6 @@
     }
   }
 
-  grouping ns-service-primitive {
-    list service-primitive {
-      description
-          "Network service level service primitives.";
-
-      key "name";
-
-      leaf name {
-        description
-            "Name of the service primitive.";
-        type string;
-      }
-
-      list parameter {
-        description
-            "List of parameters for the service primitive.";
-
-        key "name";
-        uses manotypes:primitive-parameter;
-      }
-
-      list parameter-group {
-        description
-            "Grouping of parameters which are logically grouped in UI";
-        key "name";
-
-        leaf name {
-          description
-              "Name of the parameter group";
-          type string;
-        }
-
-        list parameter {
-          description
-              "List of parameters for the service primitive.";
-          key "name";
-          uses manotypes:primitive-parameter;
-        }
-
-        leaf mandatory {
-          description "Is this parameter group mandatory";
-          type boolean;
-          default true;
-        }
-      }
-
-      list vnf-primitive-group {
-        description
-            "List of service primitives grouped by VNF.";
-
-        key "member-vnf-index-ref";
-        leaf member-vnf-index-ref {
-          description
-              "Reference to member-vnf within constituent-vnfds";
-          type uint64;
-        }
-
-        leaf vnfd-id-ref {
-          description
-              "A reference to a vnfd. This is a 
-               leafref to path:
-                   ../../../../nsd:constituent-vnfd
-                   + [nsd:id = current()/../nsd:id-ref]
-                   + /nsd:vnfd-id-ref
-               NOTE: An issue with confd is preventing the
-               use of xpath. Seems to be an issue with leafref
-               to leafref, whose target is in a different module.
-               Once that is resovled this will switched to use
-               leafref";
-
-          type string;
-        }
-
-        leaf vnfd-name {
-          description
-              "Name of the VNFD";
-          type string;
-        }
-
-        list primitive {
-          key "index";
-
-          leaf index {
-            description "Index of this primitive";
-            type uint32;
-          }
-
-          leaf name {
-            description "Name of the primitive in the VNF primitive ";
-            type string;
-          }
-        }
-      }
-
-      leaf user-defined-script {
-        description
-            "A user defined script.";
-        type string;
-      }
-    }
-  }
-
   grouping monitoring-param {
     list http-endpoint {
       description
@@ -1157,7 +1081,7 @@
         type string;
       }
 
-      leaf polling_interval_secs {
+      leaf polling-interval-secs {
         description "The HTTP polling interval in seconds";
         type uint8;
         default 2;
@@ -2116,7 +2040,7 @@
 
     }
 
-    leaf device_bus {
+    leaf device-bus {
       description "Type of disk-bus on which this disk is exposed to guest";
       type enumeration {
         enum ide;
@@ -2126,7 +2050,7 @@
       }
     }
 
-    leaf device_type {
+    leaf device-type {
       description "The type of device as exposed to guest";
       type enumeration {
           enum disk;
diff --git a/models/plugins/yang/nsd-base.yang b/models/plugins/yang/nsd-base.yang
index 70cacda..b279398 100644
--- a/models/plugins/yang/nsd-base.yang
+++ b/models/plugins/yang/nsd-base.yang
@@ -591,8 +591,6 @@
 
     uses manotypes:ip-profile-list;
 
-    uses manotypes:ns-service-primitive;
-
     list initial-config-primitive {
       rwpb:msg-new NsdInitialConfigPrimitive;
       description
diff --git a/models/plugins/yang/nsd.yang b/models/plugins/yang/nsd.yang
index 80b01a8..ddd4d92 100644
--- a/models/plugins/yang/nsd.yang
+++ b/models/plugins/yang/nsd.yang
@@ -106,17 +106,12 @@
 
         leaf vnfd-id-ref {
           description
-              "A reference to a vnfd. This is a
-               leafref to path:
-                   ../../constituent-vnfd
-                   + [id = current()/../id-ref]
-                   + /vnfd-id-ref
-               NOTE: An issue with confd is preventing the
-               use of xpath. Seems to be an issue with leafref
-               to leafref, whose target is in a different module.
-               Once that is resovled this will switched to use
-               leafref";
-          type string;
+              "A reference to a vnfd";
+          type leafref {
+            path "../../../constituent-vnfd" +
+                 "[member-vnf-index = current()/../member-vnf-index-ref]" +
+                 "/vnfd-id-ref";
+          }
         }
 
         leaf vnfd-connection-point-ref {
@@ -195,18 +190,13 @@
 
         leaf vnfd-id-ref {
           description
-            "A reference to a vnfd. This is a
-              leafref to path:
-                  ../../../../nsd:constituent-vnfd
-                  + [nsd:id = current()/../nsd:id-ref]
-                  + /nsd:vnfd-id-ref
-              NOTE: An issue with confd is preventing the
-              use of xpath. Seems to be an issue with leafref
-              to leafref, whose target is in a different module.
-              Once that is resolved this will switched to use
-              leafref";
+             "A reference to a vnfd. This is a leafref";
 
-          type yang:uuid;
+          type leafref {
+            path "../../../constituent-vnfd" +
+                 "[member-vnf-index = current()/../member-vnf-index-ref]" +
+                 "/vnfd-id-ref";
+          }
         }
 
         leaf vnfd-monitoring-param-ref {
@@ -229,6 +219,85 @@
     }
   }
 
+  grouping nsd-service-primitive {
+   list service-primitive {
+      description
+          "Network service level service primitives.";
+
+      key "name";
+
+      leaf name {
+        description
+            "Name of the service primitive.";
+        type string;
+      }
+
+      list parameter {
+        description
+            "List of parameters for the service primitive.";
+
+        key "name";
+        uses manotypes:primitive-parameter;
+      }
+
+      uses manotypes:ui-primitive-group;
+
+      list vnf-primitive-group {
+        description
+            "List of service primitives grouped by VNF.";
+
+        key "member-vnf-index-ref";
+        leaf member-vnf-index-ref {
+          description
+              "Reference to member-vnf within constituent-vnfds";
+          type leafref {
+             path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a leafref";
+
+          type leafref {
+             path "../../../constituent-vnfd" +
+                "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+          }
+        }
+
+        leaf vnfd-name {
+          description
+              "Name of the VNFD";
+          type leafref {
+              path "/vnfd:vnfd-catalog/vnfd:vnfd"
+                    + "[vnfd:id = current()/../vnfd-id-ref]"
+                    + "/vnfd:name";
+          }
+        }
+
+        list primitive {
+          key "index";
+
+          leaf index {
+            description "Index of this primitive";
+            type uint32;
+          }
+
+          leaf name {
+            description "Name of the primitive in the VNF primitive ";
+            type string;
+          }
+        }
+      }
+
+      leaf user-defined-script {
+        description
+            "A user defined script.";
+        type string;
+      }
+    }
+  }
+
   container nsd-catalog {
 
     list nsd {
@@ -245,6 +314,8 @@
       uses nsd-vnf-dependency;
 
       uses nsd-monitoring-param;
+
+      uses nsd-service-primitive;
     }
   }
 }
diff --git a/models/plugins/yang/nsr.yang b/models/plugins/yang/nsr.yang
index 136b62b..419b05b 100644
--- a/models/plugins/yang/nsr.yang
+++ b/models/plugins/yang/nsr.yang
@@ -169,6 +169,8 @@
           uses project-nsd:nsr-nsd-vnf-dependency;
 
           uses project-nsd:nsr-nsd-monitoring-param;
+
+          uses project-nsd:nsr-nsd-service-primitive;
         }
 
         uses ns-instance-config-params;
@@ -843,7 +845,82 @@
           type config-states;
         }
 
-        uses manotypes:ns-service-primitive;
+        list service-primitive {
+           description
+                "Network service level service primitives.";
+
+           key "name";
+
+           leaf name {
+              description
+                  "Name of the service primitive.";
+              type string;
+           }
+
+           list parameter {
+              description
+                  "List of parameters for the service primitive.";
+
+              key "name";
+              uses manotypes:primitive-parameter;
+           }
+
+           uses manotypes:ui-primitive-group;
+
+           list vnf-primitive-group {
+              description
+                  "List of service primitives grouped by VNF.";
+
+              key "member-vnf-index-ref";
+              leaf member-vnf-index-ref {
+                description
+                   "Reference to member-vnf within constituent-vnfds";
+                type string;
+              }
+
+              leaf vnfd-id-ref {
+                 description
+                   "A reference to a vnfd. This is a 
+                    leafref to path:
+                        ../../../../nsd:constituent-vnfd
+                        + [nsd:id = current()/../nsd:id-ref]
+                        + /nsd:vnfd-id-ref
+                    NOTE: An issue with confd is preventing the
+                    use of xpath. Seems to be an issue with leafref
+                    to leafref, whose target is in a different module.
+                    Once that is resovled this will switched to use
+                    leafref";
+
+                 type string;
+              }
+
+              leaf vnfd-name {
+                 description
+                   "Name of the VNFD";
+                 type string;
+              }
+
+              list primitive {
+                 key "index";
+
+                 leaf index {
+                   description "Index of this primitive";
+                   type uint32;
+                 }
+
+                 leaf name {
+                   description "Name of the primitive in the VNF primitive ";
+                   type string;
+                 }
+              }
+           }
+
+           leaf user-defined-script {
+             description
+                 "A user defined script.";
+             type string;
+           }
+        }
 
         list initial-config-primitive {
           rwpb:msg-new NsrInitialConfigPrimitive;
diff --git a/models/plugins/yang/project-nsd.yang b/models/plugins/yang/project-nsd.yang
index 1cf36af..9e20fd2 100644
--- a/models/plugins/yang/project-nsd.yang
+++ b/models/plugins/yang/project-nsd.yang
@@ -414,6 +414,164 @@
     }
   }
 
+  grouping nsd-service-primitive {
+   list service-primitive {
+      description
+          "Network service level service primitives.";
+
+      key "name";
+
+      leaf name {
+        description
+            "Name of the service primitive.";
+        type string;
+      }
+
+      list parameter {
+        description
+            "List of parameters for the service primitive.";
+
+        key "name";
+        uses manotypes:primitive-parameter;
+      }
+
+      uses manotypes:ui-primitive-group;
+
+      list vnf-primitive-group {
+        description
+            "List of service primitives grouped by VNF.";
+
+        key "member-vnf-index-ref";
+        leaf member-vnf-index-ref {
+          description
+              "Reference to member-vnf within constituent-vnfds";
+          type leafref {
+             path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a leafref";
+
+          type leafref {
+             path "../../../constituent-vnfd" +
+                "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+          }
+        }
+
+        leaf vnfd-name {
+          description
+              "Name of the VNFD";
+          type leafref {
+              path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+                    + "[project-vnfd:id = current()/../vnfd-id-ref]"
+                    + "/project-vnfd:name";
+          }
+        }
+
+        list primitive {
+          key "index";
+
+          leaf index {
+            description "Index of this primitive";
+            type uint32;
+          }
+
+          leaf name {
+            description "Name of the primitive in the VNF primitive ";
+            type string;
+          }
+        }
+      }
+
+      leaf user-defined-script {
+        description
+            "A user defined script.";
+        type string;
+      }
+    }
+  }
+
+  grouping nsr-nsd-service-primitive {
+   list service-primitive {
+      description
+          "Network service level service primitives.";
+
+      key "name";
+
+      leaf name {
+        description
+            "Name of the service primitive.";
+        type string;
+      }
+
+      list parameter {
+        description
+            "List of parameters for the service primitive.";
+
+        key "name";
+        uses manotypes:primitive-parameter;
+      }
+
+      uses manotypes:ui-primitive-group;
+
+      list vnf-primitive-group {
+        description
+            "List of service primitives grouped by VNF.";
+
+        key "member-vnf-index-ref";
+        leaf member-vnf-index-ref {
+          description
+              "Reference to member-vnf within constituent-vnfds";
+          type leafref {
+             path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a leafref";
+
+          type leafref {
+             path "../../../constituent-vnfd" +
+                "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+          }
+        }
+
+        leaf vnfd-name {
+          description
+              "Name of the VNFD";
+          type leafref {
+              path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+                    + "[project-vnfd:id = current()/../vnfd-id-ref]"
+                    + "/project-vnfd:name";
+          }
+        }
+
+        list primitive {
+          key "index";
+
+          leaf index {
+            description "Index of this primitive";
+            type uint32;
+          }
+
+          leaf name {
+            description "Name of the primitive in the VNF primitive ";
+            type string;
+          }
+        }
+      }
+
+      leaf user-defined-script {
+        description
+            "A user defined script.";
+        type string;
+      }
+    }
+  }
+
   grouping nsd-descriptor {
      uses nsd-base:nsd-descriptor-common;
 
@@ -426,6 +584,8 @@
      uses nsd-vnf-dependency;
 
      uses nsd-monitoring-param;
+
+     uses nsd-service-primitive;
   }
 
   augment "/rw-project:project" {
diff --git a/models/plugins/yang/vnfr.yang b/models/plugins/yang/vnfr.yang
index 5cae6b8..2678c50 100644
--- a/models/plugins/yang/vnfr.yang
+++ b/models/plugins/yang/vnfr.yang
@@ -312,6 +312,15 @@
             type string;
           }
 
+          leaf unique-short-name {
+            description "Short Unique name of the VDU
+                  This will be of the format NSR name-ShortnedString-VDUname
+                  NSR name and VDU name shall be constrained to 10 characters";
+            rwpb:field-inline "true";
+            rwpb:field-string-max 64;
+            type string;
+          }
+
           leaf vdu-id-ref {
             type leafref {
               path "../../vnfd/vdu/id";
diff --git a/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
index af92d7d..97d314b 100644
--- a/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
+++ b/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
@@ -29,8 +29,30 @@
   FILES
     rift/rwcal/openstack/__init__.py
     rift/rwcal/openstack/openstack_drv.py
-    rift/rwcal/openstack/openstack_utils.py
     rift/rwcal/openstack/prepare_vm.py
+    rift/rwcal/openstack/keystone/__init__.py
+    rift/rwcal/openstack/keystone/keystone_drv.py
+    rift/rwcal/openstack/nova/nova_drv.py
+    rift/rwcal/openstack/nova/__init__.py
+    rift/rwcal/openstack/neutron/__init__.py
+    rift/rwcal/openstack/neutron/neutron_drv.py
+    rift/rwcal/openstack/glance/__init__.py
+    rift/rwcal/openstack/glance/glance_drv.py
+    rift/rwcal/openstack/cinder/__init__.py
+    rift/rwcal/openstack/cinder/cinder_drv.py
+    rift/rwcal/openstack/ceilometer/__init__.py
+    rift/rwcal/openstack/ceilometer/ceilometer_drv.py
+    rift/rwcal/openstack/session/__init__.py
+    rift/rwcal/openstack/session/session_drv.py
+    rift/rwcal/openstack/session/auth_drv.py
+    rift/rwcal/openstack/portchain/__init__.py
+    rift/rwcal/openstack/portchain/portchain_drv.py
+    rift/rwcal/openstack/utils/__init__.py
+    rift/rwcal/openstack/utils/flavor.py
+    rift/rwcal/openstack/utils/network.py
+    rift/rwcal/openstack/utils/compute.py
+    rift/rwcal/openstack/utils/image.py
+    
   PYTHON3_ONLY
   COMPONENT ${PKG_LONG_NAME})
 
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
index 3226655..03e6dc5 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
@@ -16,7 +16,8 @@
 #
 
 from .openstack_drv import (
-        OpenstackDriver,
-        ValidationError
-        )
-from .openstack_utils import OpenstackExtraSpecUtils
+    OpenstackDriver,
+    ValidationError
+)
+
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/__init__.py
new file mode 100644
index 0000000..d411e24
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .ceilometer_drv import (
+    CeilometerDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/ceilometer_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/ceilometer_drv.py
new file mode 100644
index 0000000..e4eceaf
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/ceilometer/ceilometer_drv.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+import json
+
+from ceilometerclient import client as ceclient 
+
+
+class CeilometerAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(CeilometerAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+
+class CeilometerDriver(object):
+    """
+    CeilometerDriver Class for image management
+    """
+    ### List of supported API versions in prioritized order 
+    supported_versions = ["2"]
+    
+    def __init__(self,
+                 sess_handle,
+                 region_name = 'RegionOne',
+                 service_type = 'metering',
+                 logger = None):
+        """
+        Constructor for CeilometerDriver class
+        Arguments:
+        sess_handle (instance of class SessionDriver)
+        region_name (string ): Region name
+        service_type(string) : Service type name 
+        logger (instance of logging.Logger)
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.ceilometer')
+            logger.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+            
+        self._sess_handle = sess_handle
+        #### Attempt to use API versions in prioritized order defined in
+        #### CeilometerDriver.supported_versions
+        def select_version(version):
+            try:
+                self.log.info("Attempting to use Ceilometer v%s APIs", version)
+                cedrv = ceclient.Client(version=version,
+                                        region_name = region_name,
+                                        service_type = service_type,
+                                        session=self._sess_handle.session)
+            except Exception as e:
+                self.log.info(str(e))
+                raise
+            else:
+                self.log.info("Ceilometer API v%s selected", version)
+                return (version, cedrv)
+
+        errors = []
+        for v in CeilometerDriver.supported_versions:
+            try:
+                (self._version, self._ce_drv) = select_version(v)
+            except Exception as e:
+                errors.append(e)
+            else:
+                break
+        else:
+            raise CeilometerAPIVersionException(errors)
+
+    @property
+    def ceilometer_endpoint(self):
+        return self._ce_drv.http_client.get_endpoint()
+    
+    def _get_ceilometer_connection(self):
+        """
+        Returns instance of object ceilometerclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._ce_drv
+
+    @property
+    def client(self):
+        """
+        Returns instance of object ceilometerclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._ce_drv
+    
+    @property
+    def meters(self):
+        """A list of the available meters"""
+        try:
+            return self.client.meters.list()
+        except Exception as e:
+            self.log.exception("List meters operation failed. Exception: %s", str(e))
+            raise
+    
+    @property
+    def alarms(self):
+        """The ceilometer client alarms manager"""
+        return self.client.alarms
+
+    def nfvi_metrics(self, vim_id):
+        """Returns a dict of NFVI metrics for a given VM
+
+        Arguments:
+            vim_id - the VIM ID of the VM to retrieve the metrics for
+
+        Returns:
+            A dict of NFVI metrics
+
+        """
+        def query_latest_sample(counter_name):
+            try:
+                filter = json.dumps({
+                    "and": [
+                        {"=": {"resource": vim_id}},
+                        {"=": {"counter_name": counter_name}}
+                        ]
+                    })
+                orderby = json.dumps([{"timestamp": "DESC"}])
+                result = self.client.query_samples.query(filter=filter,
+                                                         orderby=orderby,
+                                                         limit=1)
+                return result[0]
+
+            except IndexError:
+                pass
+
+            except Exception as e:
+                self.log.exception("Got exception while querying ceilometer, exception details:%s", str(e))
+
+            return None
+
+        memory_usage = query_latest_sample("memory.usage")
+        disk_usage = query_latest_sample("disk.usage")
+        cpu_util = query_latest_sample("cpu_util")
+
+        metrics = dict()
+
+        if memory_usage is not None:
+            memory_usage.volume = 1e6 * memory_usage.volume
+            metrics["memory_usage"] = memory_usage.to_dict()
+
+        if disk_usage is not None:
+            metrics["disk_usage"] = disk_usage.to_dict()
+
+        if cpu_util is not None:
+            metrics["cpu_util"] = cpu_util.to_dict()
+            # RIFT-14041 when ceilometer returns value of more than 100, make it 100
+            if metrics["cpu_util"]["volume"] > 100:
+                metrics["cpu_util"]["volume"] = 100
+
+        return metrics
+
+    def query_samples(self, vim_instance_id, counter_name, limit=1):
+        """Returns a list of samples
+
+        Arguments:
+            vim_instance_id - the ID of the VIM that the samples are from
+            counter_name    - the counter that the samples will come from
+            limit           - a limit on the number of samples to return
+                              (default: 1)
+
+        Returns:
+            A list of samples
+
+        """
+        try:
+            filter = json.dumps({
+                "and": [
+                    {"=": {"resource": vim_instance_id}},
+                    {"=": {"counter_name": counter_name}}
+                    ]
+                })
+            try:
+                result = self.client.query_samples.query(filter=filter, limit=limit)
+            except Exception as e:
+                self.log.exception("Query samples operation failed. Exception: %s",str(e))
+            return result[-limit:]
+
+        except Exception as e:
+            self.log.exception(e)
+
+        return []
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/__init__.py
new file mode 100644
index 0000000..4cbb1b5
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .cinder_drv import (
+    CinderDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/cinder_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/cinder_drv.py
new file mode 100644
index 0000000..5bb5cc4
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/cinder/cinder_drv.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+from cinderclient import client as ciclient
+import cinderclient.exceptions as CinderException
+import keystoneauth1
+
+
+class CinderAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(CinderAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during Cinder API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+
+class CinderEndpointException(Exception):
+    "Cinder Endpoint is absent"
+    pass
+
+class CinderDriver(object):
+    """
+    CinderDriver Class for image management
+    """
+    ### List of supported API versions in prioritized order 
+    supported_versions = ["2"]
+    
+    def __init__(self,
+                 sess_handle,
+                 region_name = 'RegionOne',
+                 service_type = 'volume',
+                 logger  = None):
+        """
+        Constructor for CinderDriver class
+        Arguments:
+        sess_handle (instance of class SessionDriver)
+        region_name (string ): Region name
+        service_type(string) : Service type name 
+        logger (instance of logging.Logger)
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.cinder')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+            
+        self._sess_handle = sess_handle
+        #### Attempt to use API versions in prioritized order defined in
+        #### CinderDriver.supported_versions
+        def select_version(version):
+            try:
+                self.log.info("Attempting to use Cinder v%s APIs", version)
+                cidrv = ciclient.Client(version=version,
+                                        region_name = region_name,
+                                        service_type = service_type,
+                                        session=self._sess_handle.session)
+            except Exception as e:
+                self.log.info(str(e))
+                raise
+            else:
+                self.log.info("Cinder API v%s selected", version)
+                return (version, cidrv)
+
+        errors = []
+        for v in CinderDriver.supported_versions:
+            try:
+                (self._version, self._ci_drv) = select_version(v)
+            except Exception as e:
+                errors.append(e)
+            else:
+                break
+        else:
+            raise CinderAPIVersionException(errors)
+
+        try:
+            self._ci_drv.client.get_endpoint()
+        except keystoneauth1.exceptions.catalog.EndpointNotFound:
+            self.log.info("Cinder endpoint not found")
+            raise CinderEndpointException()
+
+    @property
+    def cinder_endpoint(self):
+        return self._ci_drv.client.get_endpoint()
+
+    @property
+    def project_id(self):
+        return self._sess_handle.project_id
+
+    @property
+    def quota(self):
+        """
+        Returns CinderDriver Quota (a dictionary) for project
+        """
+        try:
+            quota = self._ci_drv.quotas.get(self.project_id)
+        except Exception as e:
+            self.log.exception("Get Cinder quota operation failed. Exception: %s", str(e))
+            raise
+        return quota
+
+    def _get_cinder_connection(self):
+        """
+        Returns instance of object cinderclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._ci_drv
+
+    def volume_list(self):
+          """
+          Returns list of dictionaries. Each dictionary contains attributes associated with
+          volumes
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          volumes = []
+          try:
+              volume_info = self._ci_drv.volumes.list()
+          except Exception as e:
+              self.log.error("List volumes operation failed. Exception: %s", str(e))
+              raise
+          volumes = [ volume for volume in volume_info ]
+          return volumes
+  
+    def volume_get(self, volume_id):
+          """
+          Get details volume
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          try:
+              vol = self._ci_drv.volumes.get(volume_id)
+          except Exception as e:
+              self.log.error("Get volume operation failed. Exception: %s", str(e))
+              raise
+          return vol
+
+    def volume_set_metadata(self, volume_id, metadata):
+          """
+          Set metadata for volume
+          Metadata is a dictionary of key-value pairs
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          try:
+              self._ci_drv.volumes.set_metadata(volume_id, metadata)
+          except Exception as e:
+              self.log.error("Set metadata operation failed. Exception: %s", str(e))
+              raise
+  
+    def volume_delete_metadata(self, volume_id, metadata):
+          """
+          Delete metadata for volume
+          Metadata is a dictionary of key-value pairs
+  
+          Arguments: None
+  
+          Returns: List of dictionaries.
+          """
+          try:
+              self._ci_drv.volumes.delete_metadata(volume_id, metadata)
+          except Exception as e:
+              self.log.error("Delete metadata operation failed. Exception: %s", str(e))
+              raise
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/__init__.py
new file mode 100644
index 0000000..35ae887
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .glance_drv import (
+    GlanceDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/glance_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/glance_drv.py
new file mode 100644
index 0000000..550d77f
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/glance/glance_drv.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+from glanceclient import client as glclient
+import glanceclient.exc as GlanceException
+import time
+
+
+
+class GlanceAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(GlanceAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+
+class GlanceDriver(object):
+    """
+    GlanceDriver Class for image management
+    """
+    ### List of supported API versions in prioritized order 
+    supported_versions = ["2"]
+    
+    def __init__(self,
+                 sess_handle,
+                 region_name = 'RegionOne',
+                 service_type = 'image',
+                 logger  = None):
+        """
+        Constructor for GlanceDriver class
+        Arguments:
+        sess_handle (instance of class SessionDriver)
+        region_name (string ): Region name
+        service_type(string) : Service type name 
+        logger (instance of logging.Logger)
+        """
+        self._sess_handle = sess_handle
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.glance')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        
+        #### Attempt to use API versions in prioritized order defined in
+        #### GlanceDriver.supported_versions
+        def select_version(version):
+            try:
+                self.log.info("Attempting to use Glance v%s APIs", version)
+                gldrv = glclient.Client(version = version,
+                                        region_name = region_name,
+                                        service_type = service_type,
+                                        session=self._sess_handle.session)
+            except Exception as e:
+                self.log.info(str(e))
+                raise
+            else:
+                self.log.info("Glance API v%s selected", version)
+                return (version, gldrv)
+
+        errors = []
+        for v in GlanceDriver.supported_versions:
+            try:
+                (self._version, self._gl_drv) = select_version(v)
+            except Exception as e:
+                errors.append(e)
+            else:
+                break
+        else:
+            raise GlanceAPIVersionException(errors)
+
+    @property
+    def glance_endpoint(self):
+        return self._gl_drv.http_client.get_endpoint()
+
+    @property
+    def project_id(self):
+        return self._sess_handle.project_id
+    
+    def _get_glance_connection(self):
+        """
+        Returns instance of object glanceclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._gl_drv
+
+    def image_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with
+        image
+
+        Arguments: None
+
+        Returns: List of dictionaries.
+        """
+        images = []
+        try:
+            image_info = self._gl_drv.images.list()
+        except Exception as e:
+            self.log.exception("List Image operation failed. Exception: %s", str(e))
+            raise
+        images = [ img for img in image_info ]
+        return images
+
+    def image_create(self, **kwargs):
+        """
+        Creates an image
+        Arguments:
+           A dictionary of kwargs with following keys
+           {
+              'name'(string)         : Name of the image
+              'location'(string)     : URL (http://....) where image is located
+              'disk_format'(string)  : Disk format
+                    Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
+              'container_format'(string): Container format
+                                       Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
+              'tags'                 : A list of user tags
+              'checksum'             : The image md5 checksum
+           }
+        Returns:
+           image_id (string)  : UUID of the image
+
+        """
+        try:
+            image = self._gl_drv.images.create(**kwargs)
+        except Exception as e:
+            self.log.exception("Create Image operation failed. Exception: %s", str(e))
+            raise
+
+        return image.id
+
+    def image_upload(self, image_id, fd):
+        """
+        Upload the image
+
+        Arguments:
+            image_id: UUID of the image
+            fd      : File descriptor for the image file
+        Returns: None
+        """
+        try:
+            self._gl_drv.images.upload(image_id, fd)
+        except Exception as e:
+            self.log.exception("Image upload operation failed. Exception: %s",str(e))
+            raise
+
+    def image_add_location(self, image_id, location, metadata):
+        """
+        Add image URL location
+
+        Arguments:
+           image_id : UUID of the image
+           location : http URL for the image
+
+        Returns: None
+        """
+        try:
+            self._gl_drv.images.add_location(image_id, location, metadata)
+        except Exception as e:
+            self.log.exception("Image location add operation failed. Exception: %s",str(e))
+            raise
+
+    def image_update(self, image_id, remove_props = None, **kwargs):
+        """
+        Update an image
+
+        Arguments:
+            image_id: UUID of the image
+            remove_props: list of property names to remove
+            [
+                'my_custom_property1',
+                'my_custom_property2'
+            ]
+            kwargs: A dctionary of kwargs with the image attributes and their new values
+            {
+                'my_custom_property'(name of property) : Value of the custom property
+            }
+
+        If remove_props is not None, it is assumed that the function is called to
+        remove the specified property from the image, and kwargs is None.
+        Otherwise, the image properties are updated with kwargs. Its either-or.
+        """
+        assert image_id == self._image_get(image_id)['id']
+        try:
+            if remove_props is not None:
+                self._gl_drv.images.update(image_id, remove_props=remove_props)
+            else:
+                self._gl_drv.images.update(image_id, **kwargs)
+        except Exception as e:
+            self.log.exception("Update Image operation failed for image_id : %s. Exception: %s",image_id, str(e))
+            raise
+
+    def image_delete(self, image_id):
+        """
+        Delete an image
+
+        Arguments:
+           image_id: UUID of the image
+
+        Returns: None
+
+        """
+        assert image_id == self._image_get(image_id)['id']
+        try:
+            self._gl_drv.images.delete(image_id)
+        except Exception as e:
+            self.log.exception("Delete Image operation failed for image_id : %s. Exception: %s",image_id, str(e))
+            raise
+
+
+    def _image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        max_retry = 5
+        try:
+            image = self._gl_drv.images.get(image_id)
+        except GlanceException.HTTPBadRequest as e:
+            # RIFT-14241: The get image request occasionally returns the below message.  Retry in case of bad request exception.
+            # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
+            self.log.warning("Got bad request response during get_image request.  Retrying.")
+            if max_retry > 0:
+                max_retry -= 1
+                time.sleep(2)
+                image = self._gl_drv.images.get(image_id)
+            else:
+                self.log.exception("Get Image operation failed for image_id : %s. Exception: %s", image_id, str(e))
+                raise
+        except Exception as e:
+            self.log.exception("Get Image operation failed for image_id : %s. Exception: %s", image_id, str(e))
+            raise
+
+        return image
+
+    def image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        return self._image_get(image_id)
+
+    def image_verify(self, image_id):
+        """
+        Verifies if image with image-id exists and is in active state
+
+        Arguments:
+          image_id(string): UUID of the image
+
+        Returns:
+          None
+          Raises except if image not found or not in active state
+        """
+        img = self.image_get(image_id)
+        if img['status'] != 'active':
+            raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s"
+                                           %(img['id'], img['status']))
+        
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/__init__.py
new file mode 100644
index 0000000..f6a0775
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/__init__.py
@@ -0,0 +1,21 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .keystone_drv import (
+    KeystoneDriver,
+    KeystoneVersionDiscover
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/keystone_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/keystone_drv.py
new file mode 100644
index 0000000..a754ecf
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/keystone/keystone_drv.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+from keystoneclient import client as ksclient
+from keystoneclient import discover
+import keystoneclient.exceptions as KeystoneExceptions
+
+
+class KsDrvAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(KsDrvAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during keystone API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+    
+class KeystoneVersionDiscover(object):
+    """
+    Class for keystone version discovery
+    """
+    supported_versions = [(2, ), (3, )]
+    
+    def __init__(self, auth_url, logger = None):
+        """
+        Constructor for class
+        Arguments
+           auth_url(string): Keystone Auth URL
+           logger (instance of logging.Logger)
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.keystone')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        try:
+            self._discover = discover.Discover(auth_url=auth_url)
+        except Exception as e:
+            self.log.exception(str(e))
+            self._discover = None
+            raise
+        
+    def get_version(self):
+        if self._discover:
+            for v in KeystoneVersionDiscover.supported_versions:
+                try:
+                    rsp = self._discover._calculate_version(v, unstable=False)
+                except KeystoneExceptions.VersionNotAvailable as e:
+                    self.log.debug(str(e))
+                    self.log.info("Keystone API version %d not available", v[0])
+                else:
+                    (major, minor)  = rsp['version']
+                    self.log.info("Found Keystone API major version: %d, minor version: %d", major, minor)
+                    return major, minor
+        raise KeystoneExceptions.NotFound("No supported keystone API version found")
+
+
+
+class KeystoneDriver(object):
+    """
+    Driver class for openstack keystone
+    """
+    ### List of supported API versions in prioritized order 
+    def __init__(self,
+                 version,
+                 sess_handle,
+                 logger = None):
+        """
+        Constructor for KeystoneDriver class
+        Arguments:
+          version(str): Keystone API version 
+          sess_handle (instance of class SessionDriver)
+          logger (instance of logging.Logger)
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.keystone')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        self._version = int(float(version))
+        self._sess = sess_handle
+        self._ks_drv = ksclient.Client(version = (self._version, ),
+                                       session = sess_handle.session)
+        
+    @property
+    def keystone_endpoint(self):
+        return self._sess.auth_url
+    
+    def _get_keystone_connection(self):
+        """
+        Returns instance of object keystoneclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._ks_drv
+    
+    def list_users(self):
+        """
+        Returns list of users
+        """
+        return self._ks_drv.users.list()
+
+    def list_projects(self):
+        """
+        Returns list of projects
+        """
+        return self._ks_drv.projects.list()
+
+    def list_roles(self):
+        """
+        Returns list of roles
+        """
+        return self._ks_drv.roles.list()
+    
+    def list_regions(self):
+        """
+        Returns list of Regions
+        """
+        return self._ks_drv.regions.list()
+
+    def list_domains(self):
+        """
+        Returns list of domains
+        """
+        return self._ks_drv.domains.list()
+    
+            
+            
+                
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/__init__.py
new file mode 100644
index 0000000..be86f9e
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .neutron_drv import (
+    NeutronDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/neutron_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/neutron_drv.py
new file mode 100644
index 0000000..ebb32d2
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/neutron/neutron_drv.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+import ipaddress
+from neutronclient.neutron import client as ntclient
+
+import neutronclient.common.exceptions as NeutronException
+
+
+class NeutronAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(NeutronAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during Neutron API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+
+
+class NeutronDriver(object):
+    """
+    NeutronDriver Class for network orchestration
+    """
+    ### List of supported API versions in prioritized order 
+    supported_versions = ["2"]
+    
+    def __init__(self,
+                 sess_handle,
+                 region_name  = 'RegionOne',
+                 service_type = 'network',
+                 logger = None):
+        """
+        Constructor for NeutronDriver class
+        Arguments:
+        sess_handle (instance of class SessionDriver)
+        region_name (string): Region Name
+        service_type(string): Type of service in service catalog
+        logger (instance of logging.Logger)
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.neutron')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        self._sess_handle = sess_handle
+
+        #### Attempt to use API versions in prioritized order defined in
+        #### NeutronDriver.supported_versions
+        def select_version(version):
+            try:
+                self.log.info("Attempting to use Neutron v%s APIs", version)
+                ntdrv = ntclient.Client(api_version = version,
+                                        region_name = region_name,
+                                        service_type = service_type,
+                                        session = self._sess_handle.session,
+                                        logger = self.log)
+            except Exception as e:
+                self.log.info(str(e))
+                raise
+            else:
+                self.log.info("Neutron API v%s selected", version)
+                return (version, ntdrv)
+
+        errors = []
+        for v in NeutronDriver.supported_versions:
+            try:
+                (self._version, self._nt_drv) = select_version(v)
+            except Exception as e:
+                errors.append(e)
+            else:
+                break
+        else:
+            raise NeutronAPIVersionException(errors)
+
+    @property
+    def neutron_endpoint(self):
+        return self._nt_drv.get_auth_info()['endpoint_url']
+
+    @property
+    def project_id(self):
+        return self._sess_handle.project_id
+    
+    @property
+    def neutron_quota(self):
+        """
+        Returns Neutron Quota (a dictionary) for project
+        """
+        try:
+            quota = self._nt_drv.show_quota(self.project_id)
+        except Exception as e:
+            self.log.exception("Get Neutron quota operation failed. Exception: %s", str(e))
+            raise
+        return quota
+    
+    def extensions_list(self):
+        """
+        Returns a list of available neutron extensions.
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single Neutron extension
+        """
+        try:
+            extensions = self._nt_drv.list_extensions()
+        except Exception as e:
+            self.log.exception("List extension operation failed. Exception: %s", str(e))
+            raise
+        if 'extensions' in extensions:
+            return extensions['extensions']
+        return list()
+
+    
+    def _get_neutron_connection(self):
+        """
+        Returns instance of object neutronclient.neutron.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._nt_drv
+
+    def _network_find(self, **kwargs):
+        """
+        Returns a network object dictionary based on the filters provided in kwargs 
+        
+        Arguments:
+        kwargs (dictionary): A dictionary of key-value pair filters
+
+        Returns:
+        One or more dictionary object associated with network
+        """
+        try:
+            networks = self._nt_drv.list_networks(**kwargs)['networks']
+        except Exception as e:
+            self.log.exception("List network operation failed. Exception: %s", str(e))
+            raise
+        return networks
+
+    def network_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains the attributes for a network
+        under project
+
+        Arguments: None
+
+        Returns:
+          A list of dictionaries
+        """
+        return self._network_find(**{'tenant_id':self.project_id}) + self._network_find(**{'shared':True})
+    
+    
+    def network_create(self, **kwargs):
+        """
+        Creates a new network for the project
+
+        Arguments:
+          A dictionary with following key-values
+        {
+          name (string)              : Name of the network
+          admin_state_up(Boolean)    : True/False (Defaults: True)
+          external_router(Boolean)   : Connectivity with external router. True/False (Defaults: False)
+          shared(Boolean)            : Shared among tenants. True/False (Defaults: False)
+          physical_network(string)   : The physical network where this network object is implemented (optional).
+          network_type               : The type of physical network that maps to this network resource (optional).
+                                       Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
+          segmentation_id            : An isolated segment on the physical network. The network_type attribute
+                                       defines the segmentation model. For example, if the network_type value
+                                       is vlan, this ID is a vlan identifier. If the network_type value is gre,
+                                       this ID is a gre key.
+        }
+        """
+        params = {'network':
+                  {'name'                 : kwargs['name'],
+                   'admin_state_up'       : kwargs['admin_state_up'],
+                   'tenant_id'            : self.project_id,
+                   'shared'               : kwargs['shared'],
+                   #'port_security_enabled': port_security_enabled,
+                   'router:external'      : kwargs['external_router']}}
+
+        if 'physical_network' in kwargs:
+            params['network']['provider:physical_network'] = kwargs['physical_network']
+        if 'network_type' in kwargs:
+            params['network']['provider:network_type'] = kwargs['network_type']
+        if 'segmentation_id' in kwargs:
+            params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
+
+        try:
+            self.log.debug("Calling neutron create_network() with params: %s", str(params))
+            net = self._nt_drv.create_network(params)
+        except Exception as e:
+            self.log.exception("Create Network operation failed. Exception: %s", str(e))
+            raise
+        
+        network_id = net['network']['id']
+        if not network_id:
+            raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
+
+        return network_id
+
+    def network_delete(self, network_id):
+        """
+        Deletes a network identified by network_id
+
+        Arguments:
+          network_id (string): UUID of the network
+
+        Returns: None
+        """
+        try:
+            self._nt_drv.delete_network(network_id)
+        except Exception as e:
+            self.log.exception("Delete Network operation failed. Exception: %s",str(e))
+            raise
+
+
+    def network_get(self, network_id='', network_name=''):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        networks = self._network_find(**{'id': network_id, 'name': network_name})
+        if not networks:
+            raise NeutronException.NotFound("Could not find network. Network id: %s, Network name: %s " %(network_id, network_name))
+        return networks[0]
+    
+
+    def subnet_create(self, **kwargs):
+        """
+        Creates a subnet on the network
+
+        Arguments:
+        A dictionary with following key value pairs
+        {
+          network_id(string)  : UUID of the network where subnet needs to be created
+          subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
+          ip_version (integer): 4 for IPv4 and 6 for IPv6
+        
+        }
+
+        Returns:
+           subnet_id (string): UUID of the created subnet
+        """
+        params = {}
+        params['network_id'] = kwargs['network_id']
+        params['ip_version'] = kwargs['ip_version']
+
+        # if params['ip_version'] == 6:
+        #     assert 0, "IPv6 is not supported"
+        
+        if 'subnetpool_id' in kwargs:
+            params['subnetpool_id'] = kwargs['subnetpool_id']
+        else:
+            params['cidr'] = kwargs['cidr']
+
+        if 'gateway_ip' in kwargs:
+            params['gateway_ip'] = kwargs['gateway_ip']
+        else:
+            params['gateway_ip'] = None
+
+        if 'dhcp_params' in kwargs:
+            params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
+            if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
+                end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
+                params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
+                                                'end' : end_address} ]
+                
+        if 'dns_server' in kwargs:
+            params['dns_nameservers'] = []
+            for server in kwargs['dns_server']:
+                params['dns_nameservers'].append(server)
+
+        try:
+            subnet = self._nt_drv.create_subnet({'subnets': [params]})
+        except Exception as e:
+            self.log.exception("Create Subnet operation failed. Exception: %s",str(e))
+            raise
+
+        return subnet['subnets'][0]['id']
+
+    def subnet_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
+
+        Arguments: None
+
+        Returns:
+           A dictionary of the objects of subnet attributes
+        """
+        try:
+            subnets = self._nt_drv.list_subnets(**kwargs)['subnets']
+        except Exception as e:
+            self.log.exception("List Subnet operation failed. Exception: %s", str(e))
+            raise
+        return subnets
+
+    def _subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        subnets = self._nt_drv.list_subnets(id=subnet_id)
+        if not subnets['subnets']:
+            self.log.error("Get subnet operation failed for subnet_id: %s", subnet_id)
+            #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
+            return {'cidr': ''}
+        else:
+            return subnets['subnets'][0]
+
+    def subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        return self._subnet_get(subnet_id)
+
+    def subnet_delete(self, subnet_id):
+        """
+        Deletes a subnet identified by subnet_id
+
+        Arguments:
+           subnet_id (string): UUID of the subnet to be deleted
+
+        Returns: None
+        """
+        assert subnet_id == self._subnet_get(self,subnet_id)
+        try:
+            self._nt_drv.delete_subnet(subnet_id)
+        except Exception as e:
+            self.log.exception("Delete Subnet operation failed for subnet_id : %s. Exception: %s", subnet_id, str(e))
+            raise
+
+    def port_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the port
+
+        Arguments:
+            kwargs (dictionary): A dictionary for filters for port_list operation
+
+        Returns:
+           A dictionary of the objects of port attributes
+
+        """
+        ports  = []
+
+        kwargs['tenant_id'] = self.project_id
+
+        try:
+            ports  = self._nt_drv.list_ports(**kwargs)
+        except Exception as e:
+            self.log.exception("List Port operation failed. Exception: %s",str(e))
+            raise
+        return ports['ports']
+
+    def port_create(self, ports):
+        """
+        Create a port in network
+
+        Arguments:
+           Ports
+           List of dictionaries of following
+           {
+              name (string)          : Name of the port
+              network_id(string)     : UUID of the network_id identifying the network to which port belongs
+              ip_address(string)     : (Optional) Static IP address to assign to the port
+              vnic_type(string)      : Possible values are "normal", "direct", "macvtap"
+              admin_state_up         : True/False
+              port_security_enabled  : True/False
+              security_groups        : A List of Neutron security group Ids
+           }
+        Returns:
+           A list of port_id (string)   
+        """
+        params = dict()
+        params['ports'] = ports 
+        self.log.debug("Port create params: {}".format(params))
+        try:
+            ports  = self._nt_drv.create_port(params)
+        except Exception as e:
+            self.log.exception("Ports Create operation failed. Exception: %s",str(e))
+            raise
+        return [ p['id'] for p in ports['ports'] ] 
+
+    
+    def port_update(self, port_id, no_security_groups=None,port_security_enabled=None):
+        """
+        Update a port in network
+        """
+        params = {}
+        params["port"] = {}
+        if no_security_groups:
+            params["port"]["security_groups"] = []
+        if port_security_enabled == False:
+            params["port"]["port_security_enabled"] = False
+        elif  port_security_enabled == True:
+            params["port"]["port_security_enabled"] = True
+
+        try:
+            port  = self._nt_drv.update_port(port_id,params)
+        except Exception as e:
+            self.log.exception("Port Update operation failed. Exception: %s", str(e))
+            raise
+        return port['port']['id']
+
+    def _port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        port   = self._nt_drv.list_ports(id=port_id)['ports']
+        if not port:
+            raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
+        return port[0]
+
+    def port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        return self._port_get(port_id)
+
+    def port_delete(self, port_id):
+        """
+        Deletes a port identified by port_id
+
+        Arguments:
+           port_id (string) : UUID of the port
+
+        Returns: None
+        """
+        assert port_id == self._port_get(port_id)['id']
+        try:
+            self._nt_drv.delete_port(port_id)
+        except Exception as e:
+            self.log.exception("Port Delete operation failed for port_id : %s. Exception: %s",port_id, str(e))
+            raise
+
+    def security_group_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the security group
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of security group attributes
+        """
+        try:
+            kwargs['tenant_id'] = self.project_id
+            group_list = self._nt_drv.list_security_groups(**kwargs)
+        except Exception as e:
+            self.log.exception("List Security group operation, Exception: %s", str(e))
+            raise
+        return group_list['security_groups']
+    
+
+    def subnetpool_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of subnet prefix pool
+        """
+        try:
+            pool_list = self._nt_drv.list_subnetpools(**kwargs)
+        except Exception as e:
+            self.log.exception("List SubnetPool operation, Exception: %s",str(e))
+            raise
+
+        if 'subnetpools' in pool_list:
+            return pool_list['subnetpools']
+        else:
+            return []
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/__init__.py
new file mode 100644
index 0000000..b75f4d3
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .nova_drv import (
+    NovaDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/nova_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/nova_drv.py
new file mode 100644
index 0000000..4dc8c65
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/nova/nova_drv.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+from novaclient import client as nvclient
+
+
+class NovaDrvAPIVersionException(Exception):
+    def __init__(self, errors):
+        self.errors = errors
+        super(NovaDrvAPIVersionException, self).__init__("Multiple Exception Received")
+        
+    def __str__(self):
+        return self.__repr__()
+        
+    def __repr__(self):
+        msg = "{} : Following Exception(s) have occured during Nova API discovery".format(self.__class__)
+        for n,e in enumerate(self.errors):
+            msg += "\n"
+            msg += " {}:  {}".format(n, str(e))
+        return msg
+
+
+class NovaDriver(object):
+    """
+    NovaDriver Class for compute orchestration
+    """
+    ### List of supported API versions in prioritized order 
+    supported_versions = ["2.1", "2.0"]
+    
+    def __init__(self,
+                 sess_handle,
+                 region_name    = 'RegionOne',
+                 service_type   = 'compute',
+                 logger = None):
+        """
+        Constructor for NovaDriver class
+        Arguments:
+        sess_handle (instance of class SessionDriver)
+        region_name (string): Region Name
+        service_type(string): Type of service in service catalog
+        logger (instance of logging.Logger)
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.nova')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+        
+        self._sess_handle = sess_handle
+
+        #### Attempt to use API versions in prioritized order defined in
+        #### NovaDriver.supported_versions
+        def select_version(version):
+            try:
+                self.log.info("Attempting to use Nova v%s APIs", version)
+                nvdrv = nvclient.Client(version=version,
+                                        region_name = region_name,
+                                        service_type = service_type,
+                                        session = self._sess_handle.session,
+                                        logger = self.log)
+            except Exception as e:
+                self.log.info(str(e))
+                raise
+            else:
+                self.log.info("Nova API v%s selected", version)
+                return (version, nvdrv)
+
+        errors = []
+        for v in NovaDriver.supported_versions:
+            try:
+                (self._version, self._nv_drv) = select_version(v)
+            except Exception as e:
+                errors.append(e)
+            else:
+                break
+        else:
+            raise NovaDrvAPIVersionException(errors)
+
+    @property
+    def project_id(self):
+        return self._sess_handle.project_id
+
+    @property
+    def nova_endpoint(self):
+        return self._nv_drv.client.get_endpoint()
+
+    @property
+    def nova_quota(self):
+        """
+        Returns Nova Quota (a dictionary) for project
+        """
+        try:
+            quota = self._nv_drv.quotas.get(self.project_id)
+        except Exception as e:
+            self.log.exception("Get Nova quota operation failed. Exception: %s", str(e))
+            raise
+        return quota.to_dict()
+    
+    def extensions_list(self):
+        """
+        Returns a list of available nova extensions.
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single NOVA extension
+        """
+        try:
+            extensions = self._nv_drv.list_extensions.show_all()
+        except Exception as e:
+            self.log.exception("List extension operation failed. Exception: %s", str(e))
+            raise
+        return [ ext.to_dict() for ext in extensions ]
+            
+    
+    def _get_nova_connection(self):
+        """
+        Returns instance of object novaclient.client.Client
+        Use for DEBUG ONLY
+        """
+        return self._nv_drv
+
+    def _flavor_extra_spec_get(self, flavor):
+        """
+        Get extra_specs associated with a flavor
+        Arguments:
+           flavor: Object of novaclient.v2.flavors.Flavor
+
+        Returns:
+           A dictionary of extra_specs (key-value pairs)
+        """
+        try:
+            extra_specs = flavor.get_keys()
+        except Exception as e:
+            self.log.exception("Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s",
+                               flavor.id, str(e))
+            raise
+        return extra_specs
+    
+    def _flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        try:
+            flavor = self._nv_drv.flavors.get(flavor_id)
+        except Exception as e:
+            self.log.exception("Did not find flavor with flavor_id : %s. Exception: %s",flavor_id, str(e))
+            raise
+        response = flavor.to_dict()
+        response['extra_specs'] = self._flavor_extra_spec_get(flavor)
+        return response
+        
+        try:
+            extra_specs = flavor.get_keys()
+        except Exception as e:
+            self.log.exception("Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s",
+                               flavor_id, str(e))
+            raise
+
+        response = flavor.to_dict()
+        assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
+        response['extra_specs'] = extra_specs
+        return response
+
+    def flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        return self._flavor_get(flavor_id)
+
+    def flavor_find(self, **kwargs):
+        """
+        Returns list of all flavors (dictionary) matching the filters provided in kwargs 
+
+        Arguments:
+          A dictionary in following keys
+             {
+                "vcpus": Number of vcpus required
+                "ram"  : Memory in MB
+                "disk" : Secondary storage in GB
+             }
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+        """
+        try:
+            flavor_list = self._nv_drv.flavors.findall(**kwargs)
+        except Exception as e:
+            self.log.exception("Find Flavor operation failed. Exception: %s",str(e))
+            raise
+        
+        flavor_info = list()
+        for f in flavor_list:
+            flavor = f.to_dict()
+            flavor['extra_specs'] = self._flavor_extra_spec_get(f)
+            flavor_info.append(flavor)
+            
+        return flavor_info
+    
+    def flavor_list(self):
+        """
+        Returns list of all flavors (dictionary per flavor)
+
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+        """
+        flavors = []
+        flavor_info = []
+        
+        try:
+            flavors = self._nv_drv.flavors.list()
+        except Exception as e:
+            self.log.exception("List Flavor operation failed. Exception: %s",str(e))
+            raise
+        if flavors:
+            flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
+        return flavor_info
+
+    def flavor_create(self, name, ram, vcpu, disk, extra_specs):
+        """
+        Create a new flavor
+
+        Arguments:
+           name   (string):  Name of the new flavor
+           ram    (int)   :  Memory in MB
+           vcpus  (int)   :  Number of VCPUs
+           disk   (int)   :  Secondary storage size in GB
+           extra_specs (dictionary): EPA attributes dictionary
+
+        Returns:
+           flavor_id (string): UUID of flavor created
+        """
+        try:
+            flavor = self._nv_drv.flavors.create(name         = name,
+                                                 ram          = ram,
+                                                 vcpus        = vcpu,
+                                                 disk         = disk,
+                                                 flavorid     = 'auto',
+                                                 ephemeral    = 0,
+                                                 swap         = 0,
+                                                 rxtx_factor  = 1.0,
+                                                 is_public    = True)
+        except Exception as e:
+            self.log.exception("Create Flavor operation failed. Exception: %s",str(e))
+            raise
+
+        if extra_specs:
+            try:
+                flavor.set_keys(extra_specs)
+            except Exception as e:
+                self.log.exception("Set Key operation failed for flavor: %s. Exception: %s",
+                                   flavor.id, str(e))
+                raise
+        return flavor.id
+
+    def flavor_delete(self, flavor_id):
+        """
+        Deletes a flavor identified by flavor_id
+
+        Arguments:
+           flavor_id (string):  UUID of flavor to be deleted
+
+        Returns: None
+        """
+        assert flavor_id == self._flavor_get(flavor_id)['id']
+        try:
+            self._nv_drv.flavors.delete(flavor_id)
+        except Exception as e:
+            self.log.exception("Delete flavor operation failed for flavor: %s. Exception: %s",
+                               flavor_id, str(e))
+            raise
+
+
+    def server_list(self):
+        """
+        Returns a list of available VMs for the project
+
+        Arguments: None
+
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes associated
+           with individual VM
+        """
+        servers     = []
+        server_info = []
+        try:
+            servers     = self._nv_drv.servers.list()
+        except Exception as e:
+            self.log.exception("List Server operation failed. Exception: %s", str(e))
+            raise
+        server_info = [ server.to_dict() for server in servers]
+        return server_info
+
+    def _nova_server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        try:
+            server = self._nv_drv.servers.get(server = server_id)
+        except Exception as e:
+            self.log.exception("Get Server operation failed for server_id: %s. Exception: %s",
+                               server_id, str(e))
+            raise
+        else:
+            return server.to_dict()
+
+    def server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        return self._nova_server_get(server_id)
+
+    def server_create(self, **kwargs):
+        """
+        Creates a new VM/server instance
+
+        Arguments:
+          A dictionary of following key-value pairs
+         {
+           server_name(string)        : Name of the VM/Server
+           flavor_id  (string)        : UUID of the flavor to be used for VM
+           image_id   (string)        : UUID of the image to be used VM/Server instance,
+                                             This could be None if volumes (with images) are being used
+           network_list(List)         : A List of network_ids. A port will be created in these networks
+           port_list (List)           : A List of port-ids. These ports will be added to VM.
+           metadata   (dict)          : A dictionary of arbitrary key-value pairs associated with VM/server
+           userdata   (string)        : A script which shall be executed during first boot of the VM
+           availability_zone (string) : A name of the availability zone where instance should be launched
+           scheduler_hints (string)   : Openstack scheduler_hints to be passed to nova scheduler
+         }
+        Returns:
+          server_id (string): UUID of the VM/server created
+
+        """
+        nics = []
+        if 'network_list' in kwargs:
+            for network_id in kwargs['network_list']:
+                nics.append({'net-id': network_id})
+
+        if 'port_list' in kwargs:
+            for port_id in kwargs['port_list']:
+                nics.append({'port-id': port_id})
+
+        try:
+            server = self._nv_drv.servers.create(
+                kwargs['name'],
+                kwargs['image_id'],
+                kwargs['flavor_id'],
+                meta                 = kwargs['metadata'] if 'metadata' in kwargs else None,
+                files                = kwargs['files'] if 'files' in kwargs else None,
+                reservation_id       = None,
+                min_count            = None,
+                max_count            = None,
+                userdata             = kwargs['userdata'] if 'userdata' in kwargs else None,
+                security_groups      = kwargs['security_groups'] if 'security_groups' in kwargs else None,
+                availability_zone    = kwargs['availability_zone'] if 'availability_zone' in kwargs else None,
+                block_device_mapping_v2 = kwargs['block_device_mapping_v2'] if 'block_device_mapping_v2' in kwargs else None,
+                nics                 = nics,
+                scheduler_hints      = kwargs['scheduler_hints'] if 'scheduler_hints' in kwargs else None,
+                config_drive         = kwargs['config_drive'] if 'config_drive' in kwargs else None
+            )
+            
+        except Exception as e:
+            self.log.exception("Create Server operation failed. Exception: %s", str(e))
+            raise
+        return server.to_dict()['id']
+
+    def server_delete(self, server_id):
+        """
+        Deletes a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be deleted
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.delete(server_id)
+        except Exception as e:
+            self.log.exception("Delete server operation failed for server_id: %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_start(self, server_id):
+        """
+        Starts a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be started
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.start(server_id)
+        except Exception as e:
+            self.log.exception("Start Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_stop(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be stopped
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.stop(server_id)
+        except Exception as e:
+            self.log.exception("Stop Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_pause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be paused
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.pause(server_id)
+        except Exception as e:
+            self.log.exception("Pause Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_unpause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be unpaused
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.unpause(server_id)
+        except Exception as e:
+            self.log.exception("Resume Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+
+    def server_suspend(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be suspended
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.suspend(server_id)
+        except Exception as e:
+            self.log.exception("Suspend Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+
+
+    def server_resume(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be resumed
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.resume(server_id)
+        except Exception as e:
+            self.log.exception("Resume Server operation failed for server_id : %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_reboot(self, server_id, reboot_type):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           reboot_type(string):
+                         'SOFT': Soft Reboot
+                         'HARD': Hard Reboot
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.reboot(server_id, reboot_type)
+        except Exception as e:
+            self.log.exception("Reboot Server operation failed for server_id: %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+    def server_console(self, server_id, console_type = 'novnc'):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           console_type(string):
+                               'novnc',
+                               'xvpvnc'
+        Returns:
+          A dictionary object response for console information
+        """
+        try:
+            console_info = self._nv_drv.servers.get_vnc_console(server_id, console_type)
+        except Exception as e:
+            self.log.exception("Server Get-Console operation failed for server_id: %s. Exception: %s",
+                               server_id, str(e))
+            raise
+        return console_info
+
+    def server_rebuild(self, server_id, image_id):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           image_id (string)  : UUID of the image to use
+        Returns: None
+        """
+
+        try:
+            self._nv_drv.servers.rebuild(server_id, image_id)
+        except Exception as e:
+            self.log.exception("Rebuild Server operation failed for server_id: %s. Exception: %s",
+                               server_id, str(e))
+            raise
+
+
+    def server_add_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be attached
+
+        Returns: None
+        """
+        try:
+            self._nv_drv.servers.interface_attach(server_id,
+                                            port_id,
+                                            net_id = None,
+                                            fixed_ip = None)
+        except Exception as e:
+            self.log.exception("Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s",
+                               server_id, port_id, str(e))
+            raise
+
+    def server_delete_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be deleted
+        Returns: None
+
+        """
+        try:
+            self._nv_drv.servers.interface_detach(server_id, port_id)
+        except Exception as e:
+            self.log.exception("Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s",
+                               server_id, port_id, str(e))
+            raise
+
+    def floating_ip_list(self):
+        """
+        Arguments:
+            None
+        Returns:
+            List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        try:
+            ip_list = self._nv_drv.floating_ips.list()
+        except Exception as e:
+            self.log.exception("Floating IP List operation failed. Exception: %s", str(e))
+            raise
+
+        return ip_list
+
+    def floating_ip_create(self, pool):
+        """
+        Arguments:
+           pool (string): Name of the pool (optional)
+        Returns:
+           An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        try:
+            floating_ip = self._nv_drv.floating_ips.create(pool)
+        except Exception as e:
+            self.log.exception("Floating IP Create operation failed. Exception: %s", str(e))
+            raise
+
+        return floating_ip
+
+    def floating_ip_delete(self, floating_ip):
+        """
+        Arguments:
+           floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        Returns:
+           None
+        """
+        try:
+            floating_ip = self._nv_drv.floating_ips.delete(floating_ip)
+        except Exception as e:
+            self.log.exception("Floating IP Delete operation failed. Exception: %s", str(e))
+            raise
+
+    def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+           fixed_ip (string)   : IP address string for the fixed-ip with which floating ip will be associated
+        Returns:
+           None
+        """
+        try:
+            self._nv_drv.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
+        except Exception as e:
+            self.log.exception("Assign Floating IP operation failed. Exception: %s", str(e))
+            raise
+
+    def floating_ip_release(self, server_id, floating_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+        Returns:
+           None
+        """
+        try:
+            self._nv_drv.servers.remove_floating_ip(server_id, floating_ip)
+        except Exception as e:
+            self.log.exception("Release Floating IP operation failed. Exception: %s", str(e))
+            raise
+
+    def volume_list(self, server_id):
+        """
+          List of volumes attached to the server
+  
+          Arguments:
+              None
+          Returns:
+             List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
+        """
+        try:
+            volumes = self._nv_drv.volumes.get_server_volumes(server_id=server_id)
+        except Exception as e:
+            self.log.exception("Get volume information failed. Exception: %s", str(e))
+            raise
+
+        volume_info = [v.to_dict() for v in volumes]
+        return volume_info
+
+
+    def group_list(self):
+        """
+        List of Server Affinity and Anti-Affinity Groups
+
+        Arguments:
+            None
+        Returns:
+           List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
+        """
+        try:
+            group_list = self._nv_drv.server_groups.list()
+        except Exception as e:
+            self.log.exception("Server Group List operation failed. Exception: %s", str(e))
+            raise
+
+        group_info = [ group.to_dict() for group in group_list ]
+        return group_info
+
+        
+    def security_group_list(self):
+        """
+        List of Security Group
+        Arguments:
+        None
+        Returns:
+        List of dictionary objects representating novaclient.v2.security_groups.SecurityGroup class
+        """
+        try:
+            sec_groups = self._nv_drv.security_groups.list()
+        except Exception as e:
+            self.log.exception("Security Group List operation failed. Exception: %s", str(e))
+            raise
+        sec_info = [ sec_group.to_dict() for sec_group in sec_groups]
+        return sec_info
+    
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
index 191a06f..6ee2cb6 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
@@ -16,1764 +16,314 @@
 #   limitations under the License.
 #
 
-import json
 import logging
-import ipaddress
 
-from keystoneclient import v3 as ksclientv3
-from keystoneclient.v2_0 import client as ksclientv2
-from novaclient import client as nova_client
-from neutronclient.neutron import client as ntclient
-from glanceclient.v2 import client as glclient
-from ceilometerclient import client as ceilo_client
-from cinderclient.v2 import client as cinder_client
+from . import session as sess_drv
+from . import keystone as ks_drv
+from . import nova as nv_drv
+from . import neutron as nt_drv
+from . import glance as gl_drv
+from . import ceilometer as ce_drv
+from . import cinder as ci_drv
+from . import utils as drv_utils
 
 # Exceptions
-import novaclient.exceptions as NovaException
 import keystoneclient.exceptions as KeystoneExceptions
-import neutronclient.common.exceptions as NeutronException
-import glanceclient.exc as GlanceException
-import cinderclient.exceptions as CinderException
 
-logger = logging.getLogger('rwcal.openstack.drv')
-logger.setLevel(logging.DEBUG)
 
 class ValidationError(Exception):
     pass
 
 
-class KeystoneDriver(object):
+class DriverUtilities(object):
     """
-    Driver base-class for keystoneclient APIs
+    Class with utility method 
     """
-    def __init__(self, ksclient):
+    def __init__(self, driver):
         """
-        Constructor for KeystoneDriver base class
-        Arguments: None
-        Returns: None
-        """
-        self.ksclient = ksclient
-
-    def get_username(self):
-        """
-        Returns the username associated with keystoneclient connection
-        """
-        return self._username
-
-    def get_password(self):
-        """
-        Returns the password associated with keystoneclient connection
-        """
-        return self._password
-
-    def get_tenant_name(self):
-        """
-        Returns the tenant name associated with keystoneclient connection
-        """
-        return self._tenant_name
-
-    def get_user_domain_name(self):
-        """
-        Returns None as this field does not exist for v2.
-        """
-        return None;
-
-    def get_project_domain_name(self):
-        """
-        Returns None as this field does not exist for v2.
-        """
-        return None;
-
-    def _get_keystone_connection(self):
-        """
-        Returns object of class python-keystoneclient class
-        """
-        if not hasattr(self, '_keystone_connection'):
-            self._keystone_connection = self.ksclient(**self._get_keystone_credentials())
-        return self._keystone_connection
-
-    def is_auth_token_valid(self, token_expiry, time_fmt):
-        """
-        Performs validity on auth_token
+        Constructor of DriverUtilities class
         Arguments:
-          token_expiry (string): Expiry time for token
-          time_fmt (string)    : Format for expiry string in auth_ref
-
-        Returns:
-        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+          driver: Object of OpenstackDriver
         """
-        import time
-        import datetime
-        import dateutil.parser
-        try:
-            now = datetime.datetime.timetuple(datetime.datetime.utcnow())
-            expires_at = dateutil.parser.parse(token_expiry)
-            t_now = time.mktime(now)
-            t_expiry = time.mktime(expires_at.timetuple())
-
-            if (t_expiry <= t_now) or ((t_expiry - t_now) < 300 ):
-                ### Token has expired or about to expire (5 minute)
-                delattr(self, '_keystone_connection')
-                return False
-            else:
-                return True
-        except Exception as e:
-            logger.error("Received except %s during auth_token validity check" %str(e))
-            logger.info("Can not validate the auth_token. Assuming invalid")
-            return False
-
-
-    def get_service_endpoint(self, service_type, endpoint_type):
-        """
-        Returns requested type of endpoint for requested service type
-        Arguments:
-          service_type (string): Service Type (e.g. computev3, image, network)
-          endpoint_type(string): Endpoint Type (e.g. publicURL,adminURL,internalURL)
-        Returns:
-          service_endpoint(string): Service endpoint string
-        """
-        endpoint_kwargs   = {'service_type'  : service_type,
-                             'endpoint_type' : endpoint_type}
-        try:
-            ksconn = self._get_keystone_connection()
-            service_endpoint  = ksconn.service_catalog.url_for(**endpoint_kwargs)
-        except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
-            raise
-        except Exception as e:
-            logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
-            raise
-        return service_endpoint
-
-
-    def get_raw_token(self):
-        """
-        Returns a valid raw_auth_token string
-
-        Returns (string): raw_auth_token string
-        """
-        ksconn = self._get_keystone_connection()
-        try:
-            raw_token = ksconn.get_raw_token_from_identity_service(auth_url = self._auth_url,
-                                                                   token    = self.get_auth_token())
-        except KeystoneExceptions.AuthorizationFailure as e:
-            logger.error("OpenstackDriver: get_raw_token_from_identity_service Failure. Exception: %s" %(str(e)))
-            return None
-
-        except Exception as e:
-            logger.error("OpenstackDriver: Could not retrieve raw_token. Exception: %s" %(str(e)))
-
-        return raw_token
-
-    def get_tenant_id(self):
-        """
-        Returns tenant_id for the project/tenant. Tenant name is provided during
-        class instantiation
-
-        Returns (string): Tenant ID
-        """
-        ksconn = self._get_keystone_connection()
-        return ksconn.tenant_id
-
-    def get_security_mode(self):
-        """
-        Returns certificate_validation policy in case of SSL/TLS connection.
-        This policy is provided during class instantiation
-
-        Returns (boolean):
-        The boolean returned are designed to match the python-client class instantiation ("insecure") value.
-        for nova/neutron/glance/keystone clients
-
-        True: No certificate validation required -- Insecure mode
-        False: Certificate validation required -- Secure mode
-        """
-        return self._insecure
-
-    def tenant_list(self):
-        """
-        Returns list of tenants
-        """
-        pass
-
-    def tenant_create(self, name):
-        """
-        Create a new tenant
-        """
-        pass
-
-    def tenant_delete(self, tenant_id):
-        """
-        Deletes a tenant identified by tenant_id
-        """
-        pass
-
-    def roles_list(self):
-        pass
-
-    def roles_create(self):
-        pass
-
-    def roles_delete(self):
-        pass
-
-class KeystoneDriverV2(KeystoneDriver):
-    """
-    Driver class for keystoneclient V2 APIs
-    """
-    def __init__(self, username, password, auth_url,tenant_name, insecure, region):
-        """
-        Constructor for KeystoneDriverV3 class
-        Arguments:
-        username (string)  : Username
-        password (string)  : Password
-        auth_url (string)  : Authentication URL
-        tenant_name(string): Tenant Name
-        region (string)    : Region name
-        Returns: None
-        """
-        self._username    = username
-        self._password    = password
-        self._auth_url    = auth_url
-        self._tenant_name = tenant_name
-        self._insecure    = insecure
-        self._region      = region
-        super(KeystoneDriverV2, self).__init__(ksclientv2.Client)
-
-    def _get_keystone_credentials(self):
-        """
-        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
-        """
-        creds                 = {}
-        #creds['user_domain'] = self._domain_name
-        creds['username']     = self._username
-        creds['password']     = self._password
-        creds['auth_url']     = self._auth_url
-        creds['tenant_name']  = self._tenant_name
-        creds['insecure']     = self.get_security_mode()
-        creds['region_name']  = self._region
-        return creds
-
-    def get_auth_token(self):
-        """
-        Returns a valid auth_token
-
-        Returns (string): auth_token string
-        """
-        ksconn = self._get_keystone_connection()
-        return ksconn.auth_token
-
-    def is_auth_token_valid(self):
-        """
-        Performs validity on auth_token
-        Arguments:
-
-        Returns:
-        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
-        """
-        ksconn = self._get_keystone_connection()
-        result = super(KeystoneDriverV2, self).is_auth_token_valid(ksconn.auth_ref['token']['expires'],
-                                                                   "%Y-%m-%dT%H:%M:%SZ")
-        return result
-
-
-class KeystoneDriverV3(KeystoneDriver):
-    """
-    Driver class for keystoneclient V3 APIs
-    """
-    def __init__(self, username,
-                 password,
-                 auth_url,
-                 tenant_name,
-                 insecure,
-                 user_domain_name = None,
-                 project_domain_name = None,
-                 region = None):
-        """
-        Constructor for KeystoneDriverV3 class
-        Arguments:
-        username (string)  : Username
-        password (string)  : Password
-        auth_url (string)  : Authentication URL
-        tenant_name(string): Tenant Name
-        user_domain_name (string) : User domain name
-        project_domain_name (string): Project domain name
-        region (string)    : Region name
-        Returns: None
-        """
-        self._username             = username
-        self._password             = password
-        self._auth_url             = auth_url
-        self._tenant_name          = tenant_name
-        self._insecure             = insecure
-        self._user_domain_name     = user_domain_name
-        self._project_domain_name  = project_domain_name
-        self._region               = region
-        super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
-
-    def _get_keystone_credentials(self):
-        """
-        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
-        """
-        creds                        = {}
-        creds['username']            = self._username
-        creds['password']            = self._password
-        creds['auth_url']            = self._auth_url
-        creds['project_name']        = self._tenant_name
-        creds['insecure']            = self._insecure
-        creds['user_domain_name']    = self._user_domain_name
-        creds['project_domain_name'] = self._project_domain_name
-        creds['region_name']         = self._region
-        return creds
-
-    def get_user_domain_name(self):
-        """
-        Returns the domain_name of the associated OpenStack user account
-        """
-        return self._user_domain_name;
-
-    def get_project_domain_name(self):
-        """
-        Returns the domain_name of the associated OpenStack project
-        """
-        return self._project_domain_name;
-
-    def get_auth_token(self):
-        """
-        Returns a valid auth_token
-
-        Returns (string): auth_token string
-        """
-        ksconn = self._get_keystone_connection()
-        return ksconn.auth_ref['auth_token']
-
-    def is_auth_token_valid(self):
-        """
-        Performs validity on auth_token
-        Arguments:
-
-        Returns:
-        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
-        """
-        ksconn = self._get_keystone_connection()
-        result = super(KeystoneDriverV3, self).is_auth_token_valid(ksconn.auth_ref['expires_at'],
-                                                                   "%Y-%m-%dT%H:%M:%S.%fZ")
-        return result
-
-class NovaDriver(object):
-    """
-    Driver for openstack nova_client
-    """
-    def __init__(self, ks_drv, service_name, version):
-        """
-        Constructor for NovaDriver
-        Arguments: KeystoneDriver class object
-        """
-        self.ks_drv = ks_drv
-        self._service_name = service_name
-        self._version = version
-
-    def _get_nova_credentials(self):
-        """
-        Returns a dictionary of kwargs required to instantiate python-novaclient class
-        """
-        creds               = {}
-        creds['version']    = self._version
-        creds['bypass_url'] = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
-        creds['username']   = self.ks_drv.get_username()
-        creds['project_id'] = self.ks_drv.get_tenant_name()
-        creds['auth_token'] = self.ks_drv.get_auth_token()
-        creds['insecure']   = self.ks_drv.get_security_mode()
-        #creds['user_domain_name'] = self.ks_drv.get_user_domain_name()
-        #creds['project_domain_name'] = self.ks_drv.get_project_domain_name()
-
-        return creds
-
-    def _get_nova_connection(self):
-        """
-        Returns an object of class python-novaclient
-        """
-        if not hasattr(self, '_nova_connection'):
-            self._nova_connection = nova_client.Client(**self._get_nova_credentials())
-        else:
-            # Reinitialize if auth_token is no longer valid
-            if not self.ks_drv.is_auth_token_valid():
-                self._nova_connection = nova_client.Client(**self._get_nova_credentials())
-        return self._nova_connection
-
-    def _flavor_get(self, flavor_id):
-        """
-        Get flavor by flavor_id
-        Arguments:
-           flavor_id(string): UUID of flavor_id
-
-        Returns:
-        dictionary of flavor parameters
-        """
-        nvconn = self._get_nova_connection()
-        try:
-            flavor = nvconn.flavors.get(flavor_id)
-        except Exception as e:
-            logger.info("OpenstackDriver: Did not find flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
-            raise
-
-        try:
-            extra_specs = flavor.get_keys()
-        except Exception as e:
-            logger.info("OpenstackDriver: Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
-            raise
-
-        response = flavor.to_dict()
-        assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
-        response['extra_specs'] = extra_specs
-        return response
-
-    def flavor_get(self, flavor_id):
-        """
-        Get flavor by flavor_id
-        Arguments:
-           flavor_id(string): UUID of flavor_id
-
-        Returns:
-        dictionary of flavor parameters
-        """
-        return self._flavor_get(flavor_id)
-
-    def flavor_list(self):
-        """
-        Returns list of all flavors (dictionary per flavor)
-
-        Arguments:
-           None
-        Returns:
-           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
-        """
-        flavors = []
-        flavor_info = []
-        nvconn =  self._get_nova_connection()
-        try:
-            flavors = nvconn.flavors.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: List Flavor operation failed. Exception: %s"%(str(e)))
-            raise
-        if flavors:
-            flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
-        return flavor_info
-
-    def flavor_create(self, name, ram, vcpu, disk, extra_specs):
-        """
-        Create a new flavor
-
-        Arguments:
-           name   (string):  Name of the new flavor
-           ram    (int)   :  Memory in MB
-           vcpus  (int)   :  Number of VCPUs
-           disk   (int)   :  Secondary storage size in GB
-           extra_specs (dictionary): EPA attributes dictionary
-
-        Returns:
-           flavor_id (string): UUID of flavor created
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            flavor = nvconn.flavors.create(name        = name,
-                                           ram         = ram,
-                                           vcpus       = vcpu,
-                                           disk        = disk,
-                                           flavorid    = 'auto',
-                                           ephemeral   = 0,
-                                           swap        = 0,
-                                           rxtx_factor = 1.0,
-                                           is_public    = True)
-        except Exception as e:
-            logger.error("OpenstackDriver: Create Flavor operation failed. Exception: %s"%(str(e)))
-            raise
-
-        if extra_specs:
-            try:
-                flavor.set_keys(extra_specs)
-            except Exception as e:
-                logger.error("OpenstackDriver: Set Key operation failed for flavor: %s. Exception: %s" %(flavor.id, str(e)))
-                raise
-        return flavor.id
-
-    def flavor_delete(self, flavor_id):
-        """
-        Deletes a flavor identified by flavor_id
-
-        Arguments:
-           flavor_id (string):  UUID of flavor to be deleted
-
-        Returns: None
-        """
-        assert flavor_id == self._flavor_get(flavor_id)['id']
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.flavors.delete(flavor_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Delete flavor operation failed for flavor: %s. Exception: %s" %(flavor_id, str(e)))
-            raise
-
-
-    def server_list(self):
-        """
-        Returns a list of available VMs for the project
-
-        Arguments: None
-
-        Returns:
-           A list of dictionaries. Each dictionary contains attributes associated
-           with individual VM
-        """
-        servers     = []
-        server_info = []
-        nvconn      = self._get_nova_connection()
-        try:
-            servers     = nvconn.servers.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: List Server operation failed. Exception: %s" %(str(e)))
-            raise
-        server_info = [ server.to_dict() for server in servers]
-        return server_info
-
-    def _nova_server_get(self, server_id):
-        """
-        Returns a dictionary of attributes associated with VM identified by service_id
-
-        Arguments:
-          server_id (string): UUID of the VM/server for which information is requested
-
-        Returns:
-          A dictionary object with attributes associated with VM identified by server_id
-        """
-        nvconn = self._get_nova_connection()
-        try:
-            server = nvconn.servers.get(server = server_id)
-        except Exception as e:
-            logger.info("OpenstackDriver: Get Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
-            raise
-        else:
-            return server.to_dict()
-
-    def server_get(self, server_id):
-        """
-        Returns a dictionary of attributes associated with VM identified by service_id
-
-        Arguments:
-          server_id (string): UUID of the VM/server for which information is requested
-
-        Returns:
-          A dictionary object with attributes associated with VM identified by server_id
-        """
-        return self._nova_server_get(server_id)
-
-    def server_create(self, **kwargs):
-        """
-        Creates a new VM/server instance
-
-        Arguments:
-          A dictionary of following key-value pairs
-         {
-           server_name(string)        : Name of the VM/Server
-           flavor_id  (string)        : UUID of the flavor to be used for VM
-           image_id   (string)        : UUID of the image to be used VM/Server instance,
-                                             This could be None if volumes (with images) are being used
-           network_list(List)         : A List of network_ids. A port will be created in these networks
-           port_list (List)           : A List of port-ids. These ports will be added to VM.
-           metadata   (dict)          : A dictionary of arbitrary key-value pairs associated with VM/server
-           userdata   (string)        : A script which shall be executed during first boot of the VM
-           availability_zone (string) : A name of the availability zone where instance should be launched
-           scheduler_hints (string)   : Openstack scheduler_hints to be passed to nova scheduler
-         }
-        Returns:
-          server_id (string): UUID of the VM/server created
-
-        """
-        nics = []
-        if 'network_list' in kwargs:
-            for network_id in kwargs['network_list']:
-                nics.append({'net-id': network_id})
-
-        if 'port_list' in kwargs:
-            for port_id in kwargs['port_list']:
-                nics.append({'port-id': port_id})
-
-        nvconn = self._get_nova_connection()
-
-
-        try:
-            server = nvconn.servers.create(kwargs['name'],
-                                           kwargs['image_id'],
-                                           kwargs['flavor_id'],
-                                           meta                 = kwargs['metadata'],
-                                           files                = kwargs['files'],
-                                           reservation_id       = None,
-                                           min_count            = None,
-                                           max_count            = None,
-                                           userdata             = kwargs['userdata'],
-                                           security_groups      = kwargs['security_groups'],
-                                           availability_zone    = kwargs['availability_zone'],
-                                           block_device_mapping_v2 = kwargs['block_device_mapping_v2'],
-                                           nics                 = nics,
-                                           scheduler_hints      = kwargs['scheduler_hints'],
-                                           config_drive         = kwargs['config_drive'])
-        except Exception as e:
-            logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
-            raise
-        return server.to_dict()['id']
-
-    def server_delete(self, server_id):
-        """
-        Deletes a server identified by server_id
-
-        Arguments:
-           server_id (string): UUID of the server to be deleted
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.delete(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Delete server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_start(self, server_id):
-        """
-        Starts a server identified by server_id
-
-        Arguments:
-           server_id (string): UUID of the server to be started
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.start(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Start Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_stop(self, server_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server to be stopped
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.stop(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Stop Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_pause(self, server_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server to be paused
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.pause(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Pause Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_unpause(self, server_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server to be unpaused
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.unpause(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-
-    def server_suspend(self, server_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server to be suspended
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.suspend(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Suspend Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-
-
-    def server_resume(self, server_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server to be resumed
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.resume(server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_reboot(self, server_id, reboot_type):
-        """
-        Arguments:
-           server_id (string) : UUID of the server to be rebooted
-           reboot_type(string):
-                         'SOFT': Soft Reboot
-                         'HARD': Hard Reboot
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.reboot(server_id, reboot_type)
-        except Exception as e:
-            logger.error("OpenstackDriver: Reboot Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-    def server_console(self, server_id, console_type = 'novnc'):
-        """
-        Arguments:
-           server_id (string) : UUID of the server to be rebooted
-           console_type(string):
-                               'novnc',
-                               'xvpvnc'
-        Returns:
-          A dictionary object response for console information
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            console_info = nvconn.servers.get_vnc_console(server_id, console_type)
-        except Exception as e:
-            logger.error("OpenstackDriver: Server Get-Console operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
-            raise
-        return console_info
-
-    def server_rebuild(self, server_id, image_id):
-        """
-        Arguments:
-           server_id (string) : UUID of the server to be rebooted
-           image_id (string)  : UUID of the image to use
-        Returns: None
-        """
-
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.rebuild(server_id, image_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Rebuild Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
-            raise
-
-
-    def server_add_port(self, server_id, port_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server
-           port_id   (string): UUID of the port to be attached
-
-        Returns: None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.interface_attach(server_id,
-                                            port_id,
-                                            net_id = None,
-                                            fixed_ip = None)
-        except Exception as e:
-            logger.error("OpenstackDriver: Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
-            raise
-
-    def server_delete_port(self, server_id, port_id):
-        """
-        Arguments:
-           server_id (string): UUID of the server
-           port_id   (string): UUID of the port to be deleted
-        Returns: None
-
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.interface_detach(server_id, port_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
-            raise
-
-    def floating_ip_list(self):
-        """
-        Arguments:
-            None
-        Returns:
-            List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            ip_list = nvconn.floating_ips.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: Floating IP List operation failed. Exception: %s" %str(e))
-            raise
-
-        return ip_list
-
-    def floating_ip_create(self, pool):
-        """
-        Arguments:
-           pool (string): Name of the pool (optional)
-        Returns:
-           An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            floating_ip = nvconn.floating_ips.create(pool)
-        except Exception as e:
-            logger.error("OpenstackDriver: Floating IP Create operation failed. Exception: %s"  %str(e))
-            raise
-
-        return floating_ip
-
-    def floating_ip_delete(self, floating_ip):
-        """
-        Arguments:
-           floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
-        Returns:
-           None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            floating_ip = nvconn.floating_ips.delete(floating_ip)
-        except Exception as e:
-            logger.error("OpenstackDriver: Floating IP Delete operation failed. Exception: %s"  %str(e))
-            raise
-
-    def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
-        """
-        Arguments:
-           server_id (string)  : UUID of the server
-           floating_ip (string): IP address string for floating-ip
-           fixed_ip (string)   : IP address string for the fixed-ip with which floating ip will be associated
-        Returns:
-           None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
-        except Exception as e:
-            logger.error("OpenstackDriver: Assign Floating IP operation failed. Exception: %s"  %str(e))
-            raise
-
-    def floating_ip_release(self, server_id, floating_ip):
-        """
-        Arguments:
-           server_id (string)  : UUID of the server
-           floating_ip (string): IP address string for floating-ip
-        Returns:
-           None
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            nvconn.servers.remove_floating_ip(server_id, floating_ip)
-        except Exception as e:
-            logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s"  %str(e))
-            raise
-
-    def volume_list(self, server_id):
-        """
-          List of volumes attached to the server
-  
-          Arguments:
-              None
-          Returns:
-             List of dictionary objects where dictionary is representation of class (novaclient.v2.volumes.Volume)
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            volumes = nvconn.volumes.get_server_volumes(server_id=server_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Get volume information failed. Exception: %s"  %str(e))
-            raise
-
-        volume_info = [v.to_dict() for v in volumes]
-        return volume_info
-
-
-    def group_list(self):
-        """
-        List of Server Affinity and Anti-Affinity Groups
-
-        Arguments:
-            None
-        Returns:
-           List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
-        """
-        nvconn =  self._get_nova_connection()
-        try:
-            group_list = nvconn.server_groups.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: Server Group List operation failed. Exception: %s"  %str(e))
-            raise
-
-        group_info = [ group.to_dict() for group in group_list ]
-        return group_info
-
-
-
-class NovaDriverV2(NovaDriver):
-    """
-    Driver class for novaclient V2 APIs
-    """
-    def __init__(self, ks_drv):
-        """
-        Constructor for NovaDriver
-        Arguments: KeystoneDriver class object
-        """
-        super(NovaDriverV2, self).__init__(ks_drv, 'compute', '2.0')
-
-class NovaDriverV21(NovaDriver):
-    """
-    Driver class for novaclient V2 APIs
-    """
-    def __init__(self, ks_drv):
-        """
-        Constructor for NovaDriver
-        Arguments: KeystoneDriver class object
-        """
-        super(NovaDriverV21, self).__init__(ks_drv, 'compute', '2.1')
-
-class GlanceDriver(object):
-    """
-    Driver for openstack glance-client
-    """
-    def __init__(self, ks_drv, service_name, version):
-        """
-        Constructor for GlanceDriver
-        Arguments: KeystoneDriver class object
-        """
-        self.ks_drv = ks_drv
-        self._service_name = service_name
-        self._version = version
-
-    def _get_glance_credentials(self):
-        """
-        Returns a dictionary of kwargs required to instantiate python-glanceclient class
-
-        Arguments: None
-
-        Returns:
-           A dictionary object of arguments
-        """
-        creds             = {}
-        creds['version']  = self._version
-        creds['endpoint'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
-        creds['token']    = self.ks_drv.get_auth_token()
-        creds['insecure'] = self.ks_drv.get_security_mode()
-        return creds
-
-    def _get_glance_connection(self):
-        """
-        Returns a object of class python-glanceclient
-        """
-        if not hasattr(self, '_glance_connection'):
-            self._glance_connection = glclient.Client(**self._get_glance_credentials())
-        else:
-            # Reinitialize if auth_token is no longer valid
-            if not self.ks_drv.is_auth_token_valid():
-                self._glance_connection = glclient.Client(**self._get_glance_credentials())
-        return self._glance_connection
-
-    def image_list(self):
-        """
-        Returns list of dictionaries. Each dictionary contains attributes associated with
-        image
-
-        Arguments: None
-
-        Returns: List of dictionaries.
-        """
-        glconn = self._get_glance_connection()
-        images = []
-        try:
-            image_info = glconn.images.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: List Image operation failed. Exception: %s" %(str(e)))
-            raise
-        images = [ img for img in image_info ]
-        return images
-
-    def image_create(self, **kwargs):
-        """
-        Creates an image
-        Arguments:
-           A dictionary of kwargs with following keys
-           {
-              'name'(string)         : Name of the image
-              'location'(string)     : URL (http://....) where image is located
-              'disk_format'(string)  : Disk format
-                    Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
-              'container_format'(string): Container format
-                                       Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
-              'tags'                 : A list of user tags
-              'checksum'             : The image md5 checksum
-           }
-        Returns:
-           image_id (string)  : UUID of the image
-
-        """
-        glconn = self._get_glance_connection()
-        try:
-            image = glconn.images.create(**kwargs)
-        except Exception as e:
-            logger.error("OpenstackDriver: Create Image operation failed. Exception: %s" %(str(e)))
-            raise
-
-        return image.id
-
-    def image_upload(self, image_id, fd):
-        """
-        Upload the image
-
-        Arguments:
-            image_id: UUID of the image
-            fd      : File descriptor for the image file
-        Returns: None
-        """
-        glconn = self._get_glance_connection()
-        try:
-            glconn.images.upload(image_id, fd)
-        except Exception as e:
-            logger.error("OpenstackDriver: Image upload operation failed. Exception: %s" %(str(e)))
-            raise
-
-    def image_add_location(self, image_id, location, metadata):
-        """
-        Add image URL location
-
-        Arguments:
-           image_id : UUID of the image
-           location : http URL for the image
-
-        Returns: None
-        """
-        glconn = self._get_glance_connection()
-        try:
-            image = glconn.images.add_location(image_id, location, metadata)
-        except Exception as e:
-            logger.error("OpenstackDriver: Image location add operation failed. Exception: %s" %(str(e)))
-            raise
-
-    def image_update(self):
-        pass
-
-    def image_delete(self, image_id):
-        """
-        Delete an image
-
-        Arguments:
-           image_id: UUID of the image
-
-        Returns: None
-
-        """
-        assert image_id == self._image_get(image_id)['id']
-        glconn = self._get_glance_connection()
-        try:
-            glconn.images.delete(image_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Delete Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
-            raise
-
-
-    def _image_get(self, image_id):
-        """
-        Returns a dictionary object of VM image attributes
-
-        Arguments:
-           image_id (string): UUID of the image
-
-        Returns:
-           A dictionary of the image attributes
-        """
-        glconn = self._get_glance_connection()
-        try:
-            image = glconn.images.get(image_id)
-        except GlanceException.HTTPBadRequest:
-            # RIFT-14241: The get image request occasionally returns the below message.  Retry in case of bad request exception.
-            # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
-            logger.warning("OpenstackDriver: Got bad request response during get_image request.  Retrying.")
-            image = glconn.images.get(image_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Get Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
-            raise
-
-        return image
-
-    def image_get(self, image_id):
-        """
-        Returns a dictionary object of VM image attributes
-
-        Arguments:
-           image_id (string): UUID of the image
-
-        Returns:
-           A dictionary of the image attributes
-        """
-        return self._image_get(image_id)
-
-class GlanceDriverV2(GlanceDriver):
-    """
-    Driver for openstack glance-client V2
-    """
-    def __init__(self, ks_drv):
-        super(GlanceDriverV2, self).__init__(ks_drv, 'image', 2)
-
-class NeutronDriver(object):
-    """
-    Driver for openstack neutron neutron-client
-    """
-    def __init__(self, ks_drv, service_name, version):
-        """
-        Constructor for NeutronDriver
-        Arguments: KeystoneDriver class object
-        """
-        self.ks_drv = ks_drv
-        self._service_name = service_name
-        self._version = version
-
-    def _get_neutron_credentials(self):
-        """
-        Returns a dictionary of kwargs required to instantiate python-neutronclient class
-
-        Returns:
-          Dictionary of kwargs
-        """
-        creds                 = {}
-        creds['api_version']  = self._version
-        creds['endpoint_url'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
-        creds['token']        = self.ks_drv.get_auth_token()
-        creds['tenant_name']  = self.ks_drv.get_tenant_name()
-        creds['insecure']     = self.ks_drv.get_security_mode()
-        return creds
-
-    def _get_neutron_connection(self):
-        """
-        Returns an object of class python-neutronclient
-        """
-        if not hasattr(self, '_neutron_connection'):
-            self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
-        else:
-            # Reinitialize if auth_token is no longer valid
-            if not self.ks_drv.is_auth_token_valid():
-                self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
-        return self._neutron_connection
-
-    def network_list(self):
-        """
-        Returns list of dictionaries. Each dictionary contains the attributes for a network
-        under project
-
-        Arguments: None
-
-        Returns:
-          A list of dictionaries
-        """
-        networks = []
-        ntconn   = self._get_neutron_connection()
-        try:
-            networks = ntconn.list_networks()
-        except Exception as e:
-            logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
-            raise
-        return networks['networks']
-
-    def network_create(self, **kwargs):
-        """
-        Creates a new network for the project
-
-        Arguments:
-          A dictionary with following key-values
-        {
-          name (string)              : Name of the network
-          admin_state_up(Boolean)    : True/False (Defaults: True)
-          external_router(Boolean)   : Connectivity with external router. True/False (Defaults: False)
-          shared(Boolean)            : Shared among tenants. True/False (Defaults: False)
-          physical_network(string)   : The physical network where this network object is implemented (optional).
-          network_type               : The type of physical network that maps to this network resource (optional).
-                                       Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
-          segmentation_id            : An isolated segment on the physical network. The network_type attribute
-                                       defines the segmentation model. For example, if the network_type value
-                                       is vlan, this ID is a vlan identifier. If the network_type value is gre,
-                                       this ID is a gre key.
-        }
-        """
-        params = {'network':
-                  {'name'                 : kwargs['name'],
-                   'admin_state_up'       : kwargs['admin_state_up'],
-                   'tenant_id'            : self.ks_drv.get_tenant_id(),
-                   'shared'               : kwargs['shared'],
-                   #'port_security_enabled': port_security_enabled,
-                   'router:external'      : kwargs['external_router']}}
-
-        if 'physical_network' in kwargs:
-            params['network']['provider:physical_network'] = kwargs['physical_network']
-        if 'network_type' in kwargs:
-            params['network']['provider:network_type'] = kwargs['network_type']
-        if 'segmentation_id' in kwargs:
-            params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
-
-        ntconn = self._get_neutron_connection()
-        try:
-            logger.debug("Calling neutron create_network() with params: %s", str(params))
-            net = ntconn.create_network(params)
-        except Exception as e:
-            logger.error("OpenstackDriver: Create Network operation failed. Exception: %s" %(str(e)))
-            raise
-        logger.debug("Got create_network response from neutron connection: %s", str(net))
-        network_id = net['network']['id']
-        if not network_id:
-            raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
-
-        return network_id
-
-    def network_delete(self, network_id):
-        """
-        Deletes a network identified by network_id
-
-        Arguments:
-          network_id (string): UUID of the network
-
-        Returns: None
-        """
-        assert network_id == self._network_get(network_id)['id']
-        ntconn = self._get_neutron_connection()
-        try:
-            ntconn.delete_network(network_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Delete Network operation failed. Exception: %s" %(str(e)))
-            raise
-
-    def _network_get(self, network_id):
-        """
-        Returns a dictionary object describing the attributes of the network
-
-        Arguments:
-           network_id (string): UUID of the network
-
-        Returns:
-           A dictionary object of the network attributes
-        """
-        ntconn = self._get_neutron_connection()
-        network = ntconn.list_networks(id = network_id)['networks']
-        if not network:
-            raise NeutronException.NotFound("Network with id %s not found"%(network_id))
-
-        return network[0]
-
-    def network_get(self, network_id):
-        """
-        Returns a dictionary object describing the attributes of the network
-
-        Arguments:
-           network_id (string): UUID of the network
-
-        Returns:
-           A dictionary object of the network attributes
-        """
-        return self._network_get(network_id)
-
-    def subnet_create(self, **kwargs):
-        """
-        Creates a subnet on the network
-
-        Arguments:
-        A dictionary with following key value pairs
-        {
-          network_id(string)  : UUID of the network where subnet needs to be created
-          subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
-          ip_version (integer): 4 for IPv4 and 6 for IPv6
+        self.flavor_utils = drv_utils.FlavorUtils(driver)
+        self.network_utils = drv_utils.NetworkUtils(driver)
+        self.image_utils = drv_utils.ImageUtils(driver)
+        self.compute_utils = drv_utils.ComputeUtils(driver)
         
-        }
-
-        Returns:
-           subnet_id (string): UUID of the created subnet
-        """
-        params = {}
-        params['network_id'] = kwargs['network_id']
-        params['ip_version'] = kwargs['ip_version']
-
-        # if params['ip_version'] == 6:
-        #     assert 0, "IPv6 is not supported"
-        
-        if 'subnetpool_id' in kwargs:
-            params['subnetpool_id'] = kwargs['subnetpool_id']
-        else:
-            params['cidr'] = kwargs['cidr']
-
-        if 'gateway_ip' in kwargs:
-            params['gateway_ip'] = kwargs['gateway_ip']
-        else:
-            params['gateway_ip'] = None
-
-        if 'dhcp_params' in kwargs:
-            params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
-            if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
-                end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
-                params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
-                                                'end' : end_address} ]
-                
-        if 'dns_server' in kwargs:
-            params['dns_nameservers'] = []
-            for server in kwargs['dns_server']:
-                params['dns_nameservers'].append(server)
-
-        ntconn = self._get_neutron_connection()
-        try:
-            subnet = ntconn.create_subnet({'subnets': [params]})
-        except Exception as e:
-            logger.error("OpenstackDriver: Create Subnet operation failed. Exception: %s" %(str(e)))
-            raise
-
-        return subnet['subnets'][0]['id']
-
-    def subnet_list(self):
-        """
-        Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
-
-        Arguments: None
-
-        Returns:
-           A dictionary of the objects of subnet attributes
-        """
-        ntconn = self._get_neutron_connection()
-        try:
-            subnets = ntconn.list_subnets()['subnets']
-        except Exception as e:
-            logger.error("OpenstackDriver: List Subnet operation failed. Exception: %s" %(str(e)))
-            raise
-        return subnets
-
-    def _subnet_get(self, subnet_id):
-        """
-        Returns a dictionary object describing the attributes of a subnet.
-
-        Arguments:
-           subnet_id (string): UUID of the subnet
-
-        Returns:
-           A dictionary object of the subnet attributes
-        """
-        ntconn = self._get_neutron_connection()
-        subnets = ntconn.list_subnets(id=subnet_id)
-        if not subnets['subnets']:
-            logger.error("OpenstackDriver: Get subnet operation failed for subnet_id: %s" %(subnet_id))
-            #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
-            return {'cidr': ''}
-        else:
-            return subnets['subnets'][0]
-
-    def subnet_get(self, subnet_id):
-        """
-        Returns a dictionary object describing the attributes of a subnet.
-
-        Arguments:
-           subnet_id (string): UUID of the subnet
-
-        Returns:
-           A dictionary object of the subnet attributes
-        """
-        return self._subnet_get(subnet_id)
-
-    def subnet_delete(self, subnet_id):
-        """
-        Deletes a subnet identified by subnet_id
-
-        Arguments:
-           subnet_id (string): UUID of the subnet to be deleted
-
-        Returns: None
-        """
-        ntconn = self._get_neutron_connection()
-        assert subnet_id == self._subnet_get(self,subnet_id)
-        try:
-            ntconn.delete_subnet(subnet_id)
-        except Exception as e:
-            logger.error("OpenstackDriver: Delete Subnet operation failed for subnet_id : %s. Exception: %s" %(subnet_id, str(e)))
-            raise
-
-    def port_list(self, **kwargs):
-        """
-        Returns a list of dictionaries. Each dictionary contains attributes describing the port
-
-        Arguments:
-            kwargs (dictionary): A dictionary for filters for port_list operation
-
-        Returns:
-           A dictionary of the objects of port attributes
-
-        """
-        ports  = []
-        ntconn = self._get_neutron_connection()
-
-        kwargs['tenant_id'] = self.ks_drv.get_tenant_id()
-
-        try:
-            ports  = ntconn.list_ports(**kwargs)
-        except Exception as e:
-            logger.info("OpenstackDriver: List Port operation failed. Exception: %s" %(str(e)))
-            raise
-        return ports['ports']
-
-    def port_create(self, **kwargs):
-        """
-        Create a port in network
-
-        Arguments:
-           A dictionary of following
-           {
-              name (string)      : Name of the port
-              network_id(string) : UUID of the network_id identifying the network to which port belongs
-              subnet_id(string)  : UUID of the subnet_id from which IP-address will be assigned to port
-              vnic_type(string)  : Possible values are "normal", "direct", "macvtap"
-           }
-        Returns:
-           port_id (string)   : UUID of the port
-        """
-        params = {
-            "port": {
-                "admin_state_up"    : kwargs['admin_state_up'],
-                "name"              : kwargs['name'],
-                "network_id"        : kwargs['network_id'],
-                "fixed_ips"         : [ {"subnet_id": kwargs['subnet_id']}],
-                "binding:vnic_type" : kwargs['port_type']}}
-        if 'port_security_enabled' in kwargs:
-            params["port"]["port_security_enabled"] = kwargs['port_security_enabled']
-
-        ntconn = self._get_neutron_connection()
-        try:
-            port  = ntconn.create_port(params)
-        except Exception as e:
-            logger.error("OpenstackDriver: Port Create operation failed. Exception: %s" %(str(e)))
-            raise
-        return port['port']['id']
-
-    def _port_get(self, port_id):
-        """
-        Returns a dictionary object describing the attributes of the port
-
-        Arguments:
-           port_id (string): UUID of the port
-
-        Returns:
-           A dictionary object of the port attributes
-        """
-        ntconn = self._get_neutron_connection()
-        port   = ntconn.list_ports(id=port_id)['ports']
-        if not port:
-            raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
-        return port[0]
-
-    def port_get(self, port_id):
-        """
-        Returns a dictionary object describing the attributes of the port
-
-        Arguments:
-           port_id (string): UUID of the port
-
-        Returns:
-           A dictionary object of the port attributes
-        """
-        return self._port_get(port_id)
-
-    def port_delete(self, port_id):
-        """
-        Deletes a port identified by port_id
-
-        Arguments:
-           port_id (string) : UUID of the port
-
-        Returns: None
-        """
-        assert port_id == self._port_get(port_id)['id']
-        ntconn = self._get_neutron_connection()
-        try:
-            ntconn.delete_port(port_id)
-        except Exception as e:
-            logger.error("Port Delete operation failed for port_id : %s. Exception: %s" %(port_id, str(e)))
-            raise
-
-    def security_group_list(self):
-        """
-        Returns a list of dictionaries. Each dictionary contains attributes describing the security group
-
-        Arguments:
-           None
-
-        Returns:
-           A dictionary of the objects of security group attributes
-        """
-        ntconn = self._get_neutron_connection()
-        try:
-            group_list = ntconn.list_security_groups(tenant_id=self.ks_drv.get_tenant_id())
-        except Exception as e:
-            logger.error("List Security group operation, Exception: %s" %(str(e)))
-            raise
-
-        if 'security_groups' in group_list:
-            return group_list['security_groups']
-        else:
-            return []
-
-    def subnetpool_list(self, **kwargs):
-        """
-        Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
-
-        Arguments:
-           None
-
-        Returns:
-           A dictionary of the objects of subnet prefix pool
-        """
-        ntconn = self._get_neutron_connection()
-        try:
-            pool_list = ntconn.list_subnetpools(**kwargs)
-        except Exception as e:
-            logger.error("List SubnetPool operation, Exception: %s" %(str(e)))
-            raise
-
-        if 'subnetpools' in pool_list:
-            return pool_list['subnetpools']
-        else:
-            return []
-        
-class NeutronDriverV2(NeutronDriver):
-    """
-    Driver for openstack neutron neutron-client v2
-    """
-    def __init__(self, ks_drv):
-        """
-        Constructor for NeutronDriver
-        Arguments: KeystoneDriver class object
-        """
-        super(NeutronDriverV2, self).__init__(ks_drv, 'network', '2.0')
-
-
-
-class CeilometerDriver(object):
-    """
-    Driver for openstack ceilometer client
-    """
-
-    def __init__(self, ks_drv, service_name, version):
-        """
-        Constructor for CeilometerDriver
-        Arguments: KeystoneDriver class object
-        """
-        self.ks_drv = ks_drv
-        self._service_name = service_name
-        self._version = version
-        self._client = None
+    @property
+    def flavor(self):
+        return self.flavor_utils
 
     @property
-    def version(self):
-        """The version of the ceilometer client used by the driver"""
-        return self._version
-
+    def compute(self):
+        return self.compute_utils
+    
     @property
-    def client(self):
-        """The instance of ceilometer client used by the driver"""
-        if self._client is None or not self.ks_drv.is_auth_token_valid():
-            self._client = ceilo_client.Client(**self.credentials)
-
-        return self._client
-
+    def network(self):
+        return self.network_utils
+    
     @property
-    def auth_token(self):
-        """The authorization token for the ceilometer client"""
-        try:
-            return self.ks_drv.get_auth_token()
-        except KeystoneExceptions.EndpointNotFound as e:
-            logger.error("OpenstackDriver: unable to get authorization token for ceilometer. Exception: %s" %(str(e)))
-            raise
+    def image(self):
+        return self.image_utils
 
-    @property
-    def security_mode(self):
-        """The security mode for the ceilometer client"""
-        try:
-            return self.ks_drv.get_security_mode()
-        except KeystoneExceptions.EndpointNotFound as e:
-            logger.error("OpenstackDriver: unable to get security mode for ceilometer. Exception: %s" %(str(e)))
-            raise
-
-    @property
-    def endpoint(self):
-        """The service endpoint for the ceilometer client"""
-        try:
-            return self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
-        except KeystoneExceptions.EndpointNotFound as e:
-            logger.error("OpenstackDriver: unable to get endpoint for ceilometer. Exception: %s" %(str(e)))
-            raise
-
-    @property
-    def credentials(self):
-        """A dictionary of credentials for the ceilometer client"""
-        return dict(
-                version=self.version,
-                endpoint=self.endpoint,
-                token=self.auth_token,
-                insecure=self.security_mode,
-                )
-
-    @property
-    def meters(self):
-        """A list of the available meters"""
-        try:
-            return self.client.meters.list()
-        except Exception as e:
-            logger.error("OpenstackDriver: List meters operation failed. Exception: %s" %(str(e)))
-            raise
-
-    @property
-    def alarms(self):
-        """The ceilometer client alarms manager"""
-        return self.client.alarms
-
-    def query_samples(self, vim_instance_id, counter_name, limit=1):
-        """Returns a list of samples
-
-        Arguments:
-            vim_instance_id - the ID of the VIM that the samples are from
-            counter_name    - the counter that the samples will come from
-            limit           - a limit on the number of samples to return
-                              (default: 1)
-
-        Returns:
-            A list of samples
-
-        """
-        try:
-            filter = json.dumps({
-                "and": [
-                    {"=": {"resource": vim_instance_id}},
-                    {"=": {"counter_name": counter_name}}
-                    ]
-                })
-            result = self.client.query_samples.query(filter=filter, limit=limit)
-            return result[-limit:]
-
-        except Exception as e:
-            logger.exception(e)
-
-        return []
-
-
-class CeilometerDriverV2(CeilometerDriver):
-    """
-    Driver for openstack ceilometer ceilometer-client
-    """
-    def __init__(self, ks_drv):
-        """
-        Constructor for CeilometerDriver
-        Arguments: CeilometerDriver class object
-        """
-        super(CeilometerDriverV2, self).__init__(ks_drv, 'metering', '2')
-
+    
 class OpenstackDriver(object):
     """
     Driver for openstack nova, neutron, glance, keystone, swift, cinder services
     """
-    def __init__(self, username,
-                 password,
-                 auth_url,
-                 tenant_name,
-                 mgmt_network = None,
-                 cert_validate = False,
-                 user_domain_name = None,
-                 project_domain_name = None,
-                 region = None):
+    def __init__(self, logger = None, **kwargs):
         """
         OpenstackDriver Driver constructor
         Arguments:
-          username (string)                   : Username for project/tenant.
-          password (string)                   : Password
-          auth_url (string)                   : Keystone Authentication URL.
-          tenant_name (string)                : Openstack project name
-          mgmt_network(string, optional)      : Management network name. Each VM created with this cloud-account will
-                                                have a default interface into management network.
-          cert_validate (boolean, optional)   : In case of SSL/TLS connection if certificate validation is required or not.
-          user_domain_name                    : Domain name for user
-          project_domain_name                 : Domain name for project
-          region                              : Region name
+           logger: (instance of logging.Logger)
+           kwargs:  A dictionary of 
+            {
+              username (string)                   : Username for project/tenant.
+              password (string)                   : Password
+              auth_url (string)                   : Keystone Authentication URL.
+              project  (string)                   : Openstack project name
+              mgmt_network(string, optional)      : Management network name. Each VM created with this cloud-account will
+                                                    have a default interface into management network.
+              cert_validate (boolean, optional)   : In case of SSL/TLS connection if certificate validation is required or not.
+              user_domain                         : Domain name for user
+              project_domain                      : Domain name for project
+              region                              : Region name
+            }
         """
-        insecure = not cert_validate
-        if auth_url.find('/v3') != -1:
-            self.ks_drv        = KeystoneDriverV3(username,
-                                                  password,
-                                                  auth_url,
-                                                  tenant_name,
-                                                  insecure,
-                                                  user_domain_name,
-                                                  project_domain_name,
-                                                  region)
-            self.glance_drv    = GlanceDriverV2(self.ks_drv)
-            self.nova_drv      = NovaDriverV21(self.ks_drv)
-            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
-            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
-            self.cinder_drv     = CinderDriverV2(self.ks_drv)
-        elif auth_url.find('/v2') != -1:
-            
-            self.ks_drv        = KeystoneDriverV2(username,
-                                                  password,
-                                                  auth_url,
-                                                  tenant_name,
-                                                  insecure,
-                                                  region)
-            self.glance_drv    = GlanceDriverV2(self.ks_drv)
-            self.nova_drv      = NovaDriverV2(self.ks_drv)
-            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
-            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
-            self.cinder_drv     = CinderDriverV2(self.ks_drv)
+
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.driver')
+            self.log.setLevel(logging.DEBUG)
         else:
-            logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
-            raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+            self.log = logger
 
-        self._mgmt_network_id = None
-        if mgmt_network != None:
-            self._mgmt_network = mgmt_network
+        args =  dict(auth_url            = kwargs['auth_url'],
+                     username            = kwargs['username'],
+                     password            = kwargs['password'],
+                     project_name        = kwargs['project'],
+                     project_domain_name = kwargs['project_domain'] if 'project_domain' in kwargs else None,
+                     user_domain_name    = kwargs['user_domain'] if 'user_domain' in kwargs else None,)
 
-            networks = []
-            try:
-                ntconn   = self.neutron_drv._get_neutron_connection()
-                networks = ntconn.list_networks()
-            except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure) as e:
-                raise
-            except Exception as e:
-                logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
-                raise
+        cert_validate = kwargs['cert_validate'] if 'cert_validate' in kwargs else False
+        region = kwargs['region_name'] if 'region_name' in kwargs else False
+        mgmt_network = kwargs['mgmt_network'] if 'mgmt_network' in kwargs else None
+        
+        discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'], logger = self.log)
+        (major, minor) = discover.get_version()
 
-            network_list = [ network for network in networks['networks'] if network['name'] == mgmt_network ]
+        self.sess_drv = sess_drv.SessionDriver(auth_method = 'password',
+                                               version = str(major),
+                                               cert_validate = cert_validate,
+                                               logger = self.log,
+                                               **args)
 
-            if not network_list:
-                raise NeutronException.NotFound("Could not find network %s" %(mgmt_network))
-            self._mgmt_network_id = network_list[0]['id']
+        self.ks_drv = ks_drv.KeystoneDriver(str(major),
+                                            self.sess_drv,
+                                            logger = self.log)
+        
+        self.nova_drv = nv_drv.NovaDriver(self.sess_drv,
+                                          region_name = region,
+                                          logger = self.log)
+        
+        self.neutron_drv = nt_drv.NeutronDriver(self.sess_drv,
+                                                region_name = region,
+                                                logger = self.log)
+        
+        self.glance_drv = gl_drv.GlanceDriver(self.sess_drv,
+                                              region_name = region,
+                                              logger = self.log)
+       
+        try: 
+           self.cinder_drv = ci_drv.CinderDriver(self.sess_drv,
+                                              region_name = region,
+                                              logger = self.log)
+        except Exception as e:
+           self.cinder_drv = None
+        
+        self.ceilo_drv = ce_drv.CeilometerDriver(self.sess_drv,
+                                                 region_name = region,
+                                                 logger = self.log)
+        
+        self.utils = DriverUtilities(self)
+        
+        self._mgmt_network = mgmt_network
+        
+        self._cache = dict(neutron = dict(),
+                           nova = dict(),
+                           cinder = dict(),
+                           glance = dict())
+        self.build_resource_cache()
+
+    @property
+    def nova_cache(self):
+        return self._cache['nova']
+
+    @property
+    def neutron_cache(self):
+        return self._cache['neutron']
+    
+    @property
+    def glance_cache(self):
+        return self._cache['glance']
+
+    @property
+    def cinder_cache(self):
+        return self._cache['cinder']
+    
+    def build_resource_cache(self):
+        self.build_network_resource_cache()
+        self.build_nova_resource_cache()
+        self.build_cinder_resource_cache()
+        self.build_glance_resource_cache()
+
+    def _cache_populate(self, method, datatype, *args, **kwargs):
+        try:
+            rsp = method(*args, **kwargs)
+        except Exception as e:
+            self.log.exception("Exception %s occured during execution of %s",
+                               str(e), method)
+            return datatype
+        else:
+            return rsp
+        
+    def _build_nova_security_group_list(self):
+        self.log.info("Building Nova security group cache")
+        self.nova_cache['security_groups'] = self._cache_populate(self.nova_drv.security_group_list,
+                                                                  list())
+        return self.nova_cache['security_groups']
+    
+    def _build_nova_affinity_group_list(self):
+        self.log.info("Building Nova affinity/anti-affinity group cache")
+        self.nova_cache['affinity_groups'] = self._cache_populate(self.nova_server_group_list,
+                                                                  list())              
+        return self.nova_cache['affinity_groups']
+    
+    def _build_neutron_security_group_list(self):
+        self.log.info("Discovering neutron security group")
+        self.neutron_cache['security_groups'] = self._cache_populate(self.neutron_security_group_list,
+                                                                     list())
+        return self.neutron_cache['security_groups']
+
+    def _build_neutron_subnet_prefix_list(self):
+        self.log.info("Discovering subnet prefix pools")
+        self.neutron_cache['subnet_pool'] = self._cache_populate(self.neutron_subnetpool_list,
+                                                                 list())
+        return self.neutron_cache['subnet_pool']
+
+    def _get_neutron_mgmt_network(self):
+        if self._mgmt_network:
+            self.log.info("Discovering management network %s", self._mgmt_network)
+            network_list = self._cache_populate(self.neutron_drv.network_get,
+                                                None,
+                                                **{'network_name':self._mgmt_network})
+            if network_list:
+                self.neutron_cache['mgmt_net'] = network_list['id']
+            else:
+                raise KeyError("Error")
+
+            
+    def _build_glance_image_list(self):
+        self.log.info("Discovering images")
+        self.glance_cache['images'] = self._cache_populate(self.glance_image_list,
+                                                           list())
+
+        return self.glance_cache['images']
+    
+    def _build_cinder_volume_list(self):
+        self.log.info("Discovering volumes")
+        vollist = self.cinder_volume_list()
+        self.cinder_cache['volumes'] = self._cache_populate(self.cinder_volume_list,
+                                                           list())
+        return self.cinder_cache['volumes']
+                                                                 
+    def build_nova_resource_cache(self):
+        self.log.info("Building nova resource cache")
+        self._build_nova_security_group_list()
+        self._build_nova_affinity_group_list()
+        
+            
+    def build_network_resource_cache(self):
+        self.log.info("Building network resource cache")
+        self._get_neutron_mgmt_network()
+        self._build_neutron_security_group_list()
+        self._build_neutron_subnet_prefix_list()
+
+    def build_cinder_resource_cache(self):
+        self.log.info("Building cinder resource cache")
+        if self.cinder_drv is not None:
+            self._build_cinder_volume_list()
+
+    def build_glance_resource_cache(self):
+        self.log.info("Building glance resource cache")
+        self._build_glance_image_list()
+
+        
+    @property
+    def _nova_affinity_group(self):
+        if 'affinity_groups' in self.nova_cache:
+            return self.nova_cache['affinity_groups']
+        else:
+            return self._build_nova_affinity_group_list()
+
+    @property
+    def _nova_security_groups(self):
+        if 'security_groups' in self.nova_cache:
+            return self.nova_cache['security_groups']
+        else:
+            return self._build_nova_security_group_list()
+        
+    @property
+    def mgmt_network(self):
+        return self._mgmt_network
+    
+    @property
+    def _mgmt_network_id(self):
+        if 'mgmt_net' in self.neutron_cache:
+            return self.neutron_cache['mgmt_net']
+        else:
+            return list()
+
+    @property
+    def _neutron_security_groups(self):
+        if 'security_groups' in self.neutron_cache:
+            return self.neutron_cache['security_groups']
+        else:
+            return self._build_neutron_security_group_list()
+
+    @property
+    def _neutron_subnet_prefix_pool(self):
+        if 'subnet_pool' in self.neutron_cache:
+            return self.neutron_cache['subnet_pool']
+        else:
+            return self._build_neutron_subnet_prefix_list()
+        
+    @property
+    def _glance_image_list(self):
+        if 'images' in self.glance_cache:
+            return self.glance_cache['images']
+        else:
+            return self._build_glance_image_list()
+    
+    @property
+    def _cinder_volume_list(self):
+        if 'volumes' in self.cinder_cache:
+            return self.cinder_cache['volumes']
+        else:
+            return self._build_cinder_volume_list()
 
     def validate_account_creds(self):
         try:
-            ksconn = self.ks_drv._get_keystone_connection()
+            self.sess_drv.invalidate_auth_token()
+            self.sess_drv.auth_token
+            self.build_resource_cache()
         except KeystoneExceptions.AuthorizationFailure as e:
-            logger.error("OpenstackDriver: Unable to authenticate or validate the existing credentials. Exception: %s" %(str(e)))
+            self.log.error("Unable to authenticate or validate the existing credentials. Exception: %s", str(e))
             raise ValidationError("Invalid Credentials: "+ str(e))
         except Exception as e:
-            logger.error("OpenstackDriver: Could not connect to Openstack. Exception: %s" %(str(e)))
+            self.log.error("Could not connect to Openstack. Exception: %s", str(e))
             raise ValidationError("Connection Error: "+ str(e))
 
-    def get_mgmt_network_id(self):
-        return self._mgmt_network_id
-
+    
     def glance_image_create(self, **kwargs):
         if not 'disk_format' in kwargs:
             kwargs['disk_format'] = 'qcow2'
@@ -1791,6 +341,9 @@
     def glance_image_add_location(self, image_id, location):
         self.glance_drv.image_add_location(image_id, location)
 
+    def glance_image_update(self, image_id, remove_props = None, **kwargs):
+        self.glance_drv.image_update(image_id, remove_props=remove_props, **kwargs)
+
     def glance_image_delete(self, image_id):
         self.glance_drv.image_delete(image_id)
 
@@ -1800,17 +353,18 @@
     def glance_image_get(self, image_id):
         return self.glance_drv.image_get(image_id)
 
-
     def nova_flavor_list(self):
         return self.nova_drv.flavor_list()
 
-    def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs):
-        extra_specs = epa_specs if epa_specs else {}
+    def nova_flavor_find(self, **kwargs):
+        return self.nova_drv.flavor_find(**kwargs)
+    
+    def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs = dict()):
         return self.nova_drv.flavor_create(name,
                                            ram         = ram,
                                            vcpu        = vcpus,
                                            disk        = disk,
-                                           extra_specs = extra_specs)
+                                           extra_specs = epa_specs)
 
     def nova_flavor_delete(self, flavor_id):
         self.nova_drv.flavor_delete(flavor_id)
@@ -1819,34 +373,8 @@
         return self.nova_drv.flavor_get(flavor_id)
 
     def nova_server_create(self, **kwargs):
-        def _verify_image(image_id):
-            image = self.glance_drv.image_get(image_id)
-            if image['status'] != 'active':
-                raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
-
-        assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
-
-        if kwargs['block_device_mapping_v2'] is not None:
-            for block_map in kwargs['block_device_mapping_v2']:
-                if 'uuid' in block_map:
-                    _verify_image(block_map['uuid'])
-        else:
-            _verify_image(kwargs['image_id'])
-
-        # if 'network_list' in kwargs:
-        #     kwargs['network_list'].append(self._mgmt_network_id)
-        # else:
-        #     kwargs['network_list'] = [self._mgmt_network_id]
-
         if 'security_groups' not in kwargs:
-            nvconn = self.nova_drv._get_nova_connection()
-            sec_groups = nvconn.security_groups.list()
-            if sec_groups:
-                ## Should we add VM in all availability security_groups ???
-                kwargs['security_groups'] = [x.name for x in sec_groups]
-            else:
-                kwargs['security_groups'] = None
-
+            kwargs['security_groups'] = [ s['name'] for s in self._nova_security_groups ]
         return self.nova_drv.server_create(**kwargs)
 
     def nova_server_add_port(self, server_id, port_id):
@@ -1904,7 +432,7 @@
         return self.neutron_drv.network_list()
 
     def neutron_network_get(self, network_id):
-        return self.neutron_drv.network_get(network_id)
+        return self.neutron_drv.network_get(network_id=network_id)
 
     def neutron_network_create(self, **kwargs):
         return self.neutron_drv.network_create(**kwargs)
@@ -1913,7 +441,7 @@
         self.neutron_drv.network_delete(network_id)
 
     def neutron_subnet_list(self):
-        return self.neutron_drv.subnet_list()
+        return self.neutron_drv.subnet_list(**{})
 
     def neutron_subnet_get(self, subnet_id):
         return self.neutron_drv.subnet_get(subnet_id)
@@ -1933,7 +461,7 @@
             return pool_list[0]
         else:
             return None
-        
+
     def neutron_port_list(self, **kwargs):
         return self.neutron_drv.port_list(**kwargs)
 
@@ -1941,25 +469,21 @@
         return self.neutron_drv.port_get(port_id)
 
     def neutron_port_create(self, **kwargs):
-        subnets = [subnet for subnet in self.neutron_drv.subnet_list() if subnet['network_id'] == kwargs['network_id']]
-        assert len(subnets) == 1
-        kwargs['subnet_id'] = subnets[0]['id']
-        if not 'admin_state_up' in kwargs:
-            kwargs['admin_state_up'] = True
-        port_id =  self.neutron_drv.port_create(**kwargs)
-
+        port_id =  self.neutron_drv.port_create([kwargs])[0]
         if 'vm_id' in kwargs:
             self.nova_server_add_port(kwargs['vm_id'], port_id)
         return port_id
 
+    def neutron_multi_port_create(self, ports):
+        return self.neutron_drv.port_create(ports)
+        
     def neutron_security_group_list(self):
-        return self.neutron_drv.security_group_list()
+        return self.neutron_drv.security_group_list(**{})
 
     def neutron_security_group_by_name(self, group_name):
-        group_list = self.neutron_drv.security_group_list()
-        groups = [group for group in group_list if group['name'] == group_name]
-        if groups:
-            return groups[0]
+        group_list = self.neutron_drv.security_group_list(**{'name': group_name})
+        if group_list:
+            return group_list[0]
         else:
             return None
 
@@ -1982,47 +506,7 @@
             A dict of NFVI metrics
 
         """
-        def query_latest_sample(counter_name):
-            try:
-                filter = json.dumps({
-                    "and": [
-                        {"=": {"resource": vim_id}},
-                        {"=": {"counter_name": counter_name}}
-                        ]
-                    })
-                orderby = json.dumps([{"timestamp": "DESC"}])
-                result = self.ceilo_drv.client.query_samples.query(
-                        filter=filter,
-                        orderby=orderby,
-                        limit=1,
-                        )
-                return result[0]
-
-            except IndexError:
-                pass
-
-            except Exception as e:
-                logger.error("Got exception while querying ceilometer, exception details:%s " %str(e))
-
-            return None
-
-        memory_usage = query_latest_sample("memory.usage")
-        disk_usage = query_latest_sample("disk.usage")
-        cpu_util = query_latest_sample("cpu_util")
-
-        metrics = dict()
-
-        if memory_usage is not None:
-            memory_usage.volume = 1e6 * memory_usage.volume
-            metrics["memory_usage"] = memory_usage.to_dict()
-
-        if disk_usage is not None:
-            metrics["disk_usage"] = disk_usage.to_dict()
-
-        if cpu_util is not None:
-            metrics["cpu_util"] = cpu_util.to_dict()
-
-        return metrics
+        return self.ceilo_drv.nfvi_metrics(vim_id)
 
     def ceilo_alarm_list(self):
         """Returns a list of ceilometer alarms"""
@@ -2077,22 +561,20 @@
         alarm_actions = actions.get('alarm') if actions is not None else None
         insufficient_data_actions = actions.get('insufficient_data') if actions is not None else None
 
-        return self.ceilo_drv.client.alarms.create(
-                name=name,
-                meter_name=meter,
-                statistic=statistic,
-                comparison_operator=operation,
-                threshold=threshold,
-                period=period,
-                evaluation_periods=evaluations,
-                severity=severity,
-                repeat_actions=repeat,
-                enabled=enabled,
-                ok_actions=ok_actions,
-                alarm_actions=alarm_actions,
-                insufficient_data_actions=insufficient_data_actions,
-                **kwargs
-                )
+        return self.ceilo_drv.client.alarms.create(name=name,
+                                                   meter_name=meter,
+                                                   statistic=statistic,
+                                                   comparison_operator=operation,
+                                                   threshold=threshold,
+                                                   period=period,
+                                                   evaluation_periods=evaluations,
+                                                   severity=severity,
+                                                   repeat_actions=repeat,
+                                                   enabled=enabled,
+                                                   ok_actions=ok_actions,
+                                                   alarm_actions=alarm_actions,
+                                                   insufficient_data_actions=insufficient_data_actions,
+                                                   **kwargs)
 
     def ceilo_alarm_update(self, alarm_id, **kwargs):
         """Updates an existing alarm
@@ -2121,121 +603,3 @@
           
               
           
-class CinderDriver(object):
-      """
-      Driver for openstack cinder-client
-      """
-      def __init__(self, ks_drv, service_name, version):
-          """
-          Constructor for CinderDriver
-          Arguments: KeystoneDriver class object
-          """
-          self.ks_drv = ks_drv
-          self._service_name = service_name
-          self._version = version
-  
-      def _get_cinder_credentials(self):
-          """
-          Returns a dictionary of kwargs required to instantiate python-cinderclient class
-  
-          Arguments: None
-  
-          Returns:
-             A dictionary object of arguments
-          """
-          creds             = {}
-          creds['version']  = self._version 
-          creds['username']   = self.ks_drv.get_username() 
-          creds['api_key']   = self.ks_drv.get_password() 
-          creds['auth_url'] = self.ks_drv.get_service_endpoint("identity", "publicURL") 
-          creds['project_id'] = self.ks_drv.get_tenant_name() 
-          creds['insecure']   = self.ks_drv.get_security_mode()
-  
-          return creds
-
-      def _get_cinder_connection(self):
-          """
-          Returns a object of class python-cinderclient
-          """
-          if not hasattr(self, '_cinder_connection'):
-              self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
-          else:
-              # Reinitialize if auth_token is no longer valid
-              if not self.ks_drv.is_auth_token_valid():
-                  self._cinder_connection = cinder_client.Client(**self._get_cinder_credentials())
-          return self._cinder_connection
-  
-      def volume_list(self):
-          """
-          Returns list of dictionaries. Each dictionary contains attributes associated with
-          volumes
-  
-          Arguments: None
-  
-          Returns: List of dictionaries.
-          """
-          cinderconn = self._get_cinder_connection()
-          volumes = []
-          try:
-              volume_info = cinderconn.volumes.list()
-          except Exception as e:
-              logger.error("OpenstackDriver: List volumes operation failed. Exception: %s" %(str(e)))
-              raise
-          volumes = [ volume for volume in volume_info ]
-          return volumes
-  
-      def volume_get(self, volume_id):
-          """
-          Get details volume
-  
-          Arguments: None
-  
-          Returns: List of dictionaries.
-          """
-          cinderconn = self._get_cinder_connection()
-          try:
-              vol = cinderconn.volumes.get(volume_id)
-          except Exception as e:
-              logger.error("OpenstackDriver: Get volume operation failed. Exception: %s" %(str(e)))
-              raise
-          return vol
-
-      def volume_set_metadata(self, volume_id, metadata):
-          """
-          Set metadata for volume
-          Metadata is a dictionary of key-value pairs
-  
-          Arguments: None
-  
-          Returns: List of dictionaries.
-          """
-          cinderconn = self._get_cinder_connection()
-          try:
-              cinderconn.volumes.set_metadata(volume_id, metadata)
-          except Exception as e:
-              logger.error("OpenstackDriver: Set metadata operation failed. Exception: %s" %(str(e)))
-              raise
-  
-      def volume_delete_metadata(self, volume_id, metadata):
-          """
-          Delete metadata for volume
-          Metadata is a dictionary of key-value pairs
-  
-          Arguments: None
-  
-          Returns: List of dictionaries.
-          """
-          cinderconn = self._get_cinder_connection()
-          try:
-              cinderconn.volumes.delete_metadata(volume_id, metadata)
-          except Exception as e:
-              logger.error("OpenstackDriver: Delete metadata operation failed. Exception: %s" %(str(e)))
-              raise
-  
-class CinderDriverV2(CinderDriver):
-      """
-      Driver for openstack cinder-client V2
-      """
-      def __init__(self, ks_drv):
-          super(CinderDriverV2, self).__init__(ks_drv, 'volumev2', 2)
-  
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py
deleted file mode 100644
index eda3ccb..0000000
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py
+++ /dev/null
@@ -1,552 +0,0 @@
-#!/usr/bin/env python3
-
-# 
-#   Copyright 2016 RIFT.IO Inc
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-import re
-
-class OpenstackGuestEPAUtils(object):
-    """
-    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
-    """
-    def __init__(self):
-        self._mano_to_espec_cpu_pinning_policy = {
-            'DEDICATED' : 'dedicated',
-            'SHARED'    : 'shared',
-            'ANY'       : 'any',
-        }
-
-        self._espec_to_mano_cpu_pinning_policy = {
-            'dedicated' : 'DEDICATED',
-            'shared'    : 'SHARED',
-            'any'       : 'ANY',
-        }
-        
-        self._mano_to_espec_mempage_size = {
-            'LARGE'        : 'large', 
-            'SMALL'        : 'small',
-            'SIZE_2MB'     :  2048,
-            'SIZE_1GB'     :  1048576,
-            'PREFER_LARGE' : 'large',
-        }
-
-        self._espec_to_mano_mempage_size = {
-            'large'        : 'LARGE', 
-            'small'        : 'SMALL',
-             2048          : 'SIZE_2MB',
-             1048576       : 'SIZE_1GB',
-            'large'        : 'PREFER_LARGE',
-        }
-
-        self._mano_to_espec_cpu_thread_pinning_policy = {
-            'AVOID'    : 'avoid',
-            'SEPARATE' : 'separate',
-            'ISOLATE'  : 'isolate',
-            'PREFER'   : 'prefer',
-        }
-
-        self._espec_to_mano_cpu_thread_pinning_policy = {
-            'avoid'    : 'AVOID',
-            'separate' : 'SEPARATE',
-            'isolate'  : 'ISOLATE',
-            'prefer'   : 'PREFER',
-        }
-
-        self._espec_to_mano_numa_memory_policy = {
-            'strict'   : 'STRICT',
-            'preferred': 'PREFERRED'
-        }
-
-        self._mano_to_espec_numa_memory_policy = {
-            'STRICT'   : 'strict',
-            'PREFERRED': 'preferred'
-        }
-
-    def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
-        if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
-            return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
-        else:
-            return None
-
-    def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
-        if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
-            return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
-        else:
-            return None
-
-    def mano_to_extra_spec_mempage_size(self, mempage_size):
-        if mempage_size in self._mano_to_espec_mempage_size:
-            return self._mano_to_espec_mempage_size[mempage_size]
-        else:
-            return None
-        
-    def extra_spec_to_mano_mempage_size(self, mempage_size):
-        if mempage_size in self._espec_to_mano_mempage_size:
-            return self._espec_to_mano_mempage_size[mempage_size]
-        else:
-            return None
-
-    def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
-        if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
-            return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
-        else:
-            return None
-
-    def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
-        if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
-            return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
-        else:
-            return None
-
-    def mano_to_extra_spec_trusted_execution(self, trusted_execution):
-        if trusted_execution:
-            return 'trusted'
-        else:
-            return 'untrusted'
-
-    def extra_spec_to_mano_trusted_execution(self, trusted_execution):
-        if trusted_execution == 'trusted':
-            return True
-        elif trusted_execution == 'untrusted':
-            return False
-        else:
-            return None
-        
-    def mano_to_extra_spec_numa_node_count(self, numa_node_count):
-        return numa_node_count
-
-    def extra_specs_to_mano_numa_node_count(self, numa_node_count):
-        return int(numa_node_count)
-    
-    def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
-        if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
-            return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
-        else:
-            return None
-
-    def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
-        if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
-            return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
-        else:
-            return None
-        
-                                                          
-    
-    
-class OpenstackHostEPAUtils(object):
-    """
-    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
-    """
-    def __init__(self):
-        self._mano_to_espec_cpumodel = {
-            "PREFER_WESTMERE"     : "Westmere",
-            "REQUIRE_WESTMERE"    : "Westmere",
-            "PREFER_SANDYBRIDGE"  : "SandyBridge",
-            "REQUIRE_SANDYBRIDGE" : "SandyBridge",
-            "PREFER_IVYBRIDGE"    : "IvyBridge",
-            "REQUIRE_IVYBRIDGE"   : "IvyBridge",
-            "PREFER_HASWELL"      : "Haswell",
-            "REQUIRE_HASWELL"     : "Haswell",
-            "PREFER_BROADWELL"    : "Broadwell",
-            "REQUIRE_BROADWELL"   : "Broadwell",
-            "PREFER_NEHALEM"      : "Nehalem",
-            "REQUIRE_NEHALEM"     : "Nehalem",
-            "PREFER_PENRYN"       : "Penryn",
-            "REQUIRE_PENRYN"      : "Penryn",
-            "PREFER_CONROE"       : "Conroe",
-            "REQUIRE_CONROE"      : "Conroe",
-            "PREFER_CORE2DUO"     : "Core2Duo",
-            "REQUIRE_CORE2DUO"    : "Core2Duo",
-        }
-
-        self._espec_to_mano_cpumodel = {
-            "Westmere"     : "REQUIRE_WESTMERE",
-            "SandyBridge"  : "REQUIRE_SANDYBRIDGE",
-            "IvyBridge"    : "REQUIRE_IVYBRIDGE",
-            "Haswell"      : "REQUIRE_HASWELL",
-            "Broadwell"    : "REQUIRE_BROADWELL",
-            "Nehalem"      : "REQUIRE_NEHALEM",
-            "Penryn"       : "REQUIRE_PENRYN",
-            "Conroe"       : "REQUIRE_CONROE",
-            "Core2Duo"     : "REQUIRE_CORE2DUO",
-        }
-
-        self._mano_to_espec_cpuarch = {
-            "PREFER_X86"     : "x86",
-            "REQUIRE_X86"    : "x86",
-            "PREFER_X86_64"  : "x86_64",
-            "REQUIRE_X86_64" : "x86_64",
-            "PREFER_I686"    : "i686",
-            "REQUIRE_I686"   : "i686",
-            "PREFER_IA64"    : "ia64",
-            "REQUIRE_IA64"   : "ia64",
-            "PREFER_ARMV7"   : "ARMv7",
-            "REQUIRE_ARMV7"  : "ARMv7",
-            "PREFER_ARMV8"   : "ARMv8-A",
-            "REQUIRE_ARMV8"  : "ARMv8-A",
-        }
-
-        self._espec_to_mano_cpuarch = {
-            "x86"     : "REQUIRE_X86",
-            "x86_64"  : "REQUIRE_X86_64",
-            "i686"    : "REQUIRE_I686",
-            "ia64"    : "REQUIRE_IA64",
-            "ARMv7-A" : "REQUIRE_ARMV7",
-            "ARMv8-A" : "REQUIRE_ARMV8",
-        }
-
-        self._mano_to_espec_cpuvendor = {
-            "PREFER_INTEL"  : "Intel",
-            "REQUIRE_INTEL" : "Intel",
-            "PREFER_AMD"    : "AMD",
-            "REQUIRE_AMD"   : "AMD",
-        }
-
-        self._espec_to_mano_cpuvendor = {
-            "Intel" : "REQUIRE_INTEL",
-            "AMD"   : "REQUIRE_AMD",
-        }
-
-        self._mano_to_espec_cpufeatures = {
-            "PREFER_AES"       : "aes",
-            "REQUIRE_AES"      : "aes",
-            "REQUIRE_VME"      : "vme",
-            "PREFER_VME"       : "vme",
-            "REQUIRE_DE"       : "de",
-            "PREFER_DE"        : "de",
-            "REQUIRE_PSE"      : "pse",
-            "PREFER_PSE"       : "pse",
-            "REQUIRE_TSC"      : "tsc",
-            "PREFER_TSC"       : "tsc",
-            "REQUIRE_MSR"      : "msr",
-            "PREFER_MSR"       : "msr",
-            "REQUIRE_PAE"      : "pae",
-            "PREFER_PAE"       : "pae",
-            "REQUIRE_MCE"      : "mce",
-            "PREFER_MCE"       : "mce",
-            "REQUIRE_CX8"      : "cx8",
-            "PREFER_CX8"       : "cx8",
-            "REQUIRE_APIC"     : "apic",
-            "PREFER_APIC"      : "apic",
-            "REQUIRE_SEP"      : "sep",
-            "PREFER_SEP"       : "sep",
-            "REQUIRE_MTRR"     : "mtrr",
-            "PREFER_MTRR"      : "mtrr",
-            "REQUIRE_PGE"      : "pge",
-            "PREFER_PGE"       : "pge",
-            "REQUIRE_MCA"      : "mca",
-            "PREFER_MCA"       : "mca",
-            "REQUIRE_CMOV"     : "cmov",
-            "PREFER_CMOV"      : "cmov",
-            "REQUIRE_PAT"      : "pat",
-            "PREFER_PAT"       : "pat",
-            "REQUIRE_PSE36"    : "pse36",
-            "PREFER_PSE36"     : "pse36",
-            "REQUIRE_CLFLUSH"  : "clflush",
-            "PREFER_CLFLUSH"   : "clflush",
-            "REQUIRE_DTS"      : "dts",
-            "PREFER_DTS"       : "dts",
-            "REQUIRE_ACPI"     : "acpi",
-            "PREFER_ACPI"      : "acpi",
-            "REQUIRE_MMX"      : "mmx",
-            "PREFER_MMX"       : "mmx",
-            "REQUIRE_FXSR"     : "fxsr",
-            "PREFER_FXSR"      : "fxsr",
-            "REQUIRE_SSE"      : "sse",
-            "PREFER_SSE"       : "sse",
-            "REQUIRE_SSE2"     : "sse2",
-            "PREFER_SSE2"      : "sse2",
-            "REQUIRE_SS"       : "ss",
-            "PREFER_SS"        : "ss",
-            "REQUIRE_HT"       : "ht",
-            "PREFER_HT"        : "ht",
-            "REQUIRE_TM"       : "tm",
-            "PREFER_TM"        : "tm",
-            "REQUIRE_IA64"     : "ia64",
-            "PREFER_IA64"      : "ia64",
-            "REQUIRE_PBE"      : "pbe",
-            "PREFER_PBE"       : "pbe",
-            "REQUIRE_RDTSCP"   : "rdtscp",
-            "PREFER_RDTSCP"    : "rdtscp",
-            "REQUIRE_PNI"      : "pni",
-            "PREFER_PNI"       : "pni",
-            "REQUIRE_PCLMULQDQ": "pclmulqdq",
-            "PREFER_PCLMULQDQ" : "pclmulqdq",
-            "REQUIRE_DTES64"   : "dtes64",
-            "PREFER_DTES64"    : "dtes64",
-            "REQUIRE_MONITOR"  : "monitor",
-            "PREFER_MONITOR"   : "monitor",
-            "REQUIRE_DS_CPL"   : "ds_cpl",
-            "PREFER_DS_CPL"    : "ds_cpl",
-            "REQUIRE_VMX"      : "vmx",
-            "PREFER_VMX"       : "vmx",
-            "REQUIRE_SMX"      : "smx",
-            "PREFER_SMX"       : "smx",
-            "REQUIRE_EST"      : "est",
-            "PREFER_EST"       : "est",
-            "REQUIRE_TM2"      : "tm2",
-            "PREFER_TM2"       : "tm2",
-            "REQUIRE_SSSE3"    : "ssse3",
-            "PREFER_SSSE3"     : "ssse3",
-            "REQUIRE_CID"      : "cid",
-            "PREFER_CID"       : "cid",
-            "REQUIRE_FMA"      : "fma",
-            "PREFER_FMA"       : "fma",
-            "REQUIRE_CX16"     : "cx16",
-            "PREFER_CX16"      : "cx16",
-            "REQUIRE_XTPR"     : "xtpr",
-            "PREFER_XTPR"      : "xtpr",
-            "REQUIRE_PDCM"     : "pdcm",
-            "PREFER_PDCM"      : "pdcm",
-            "REQUIRE_PCID"     : "pcid",
-            "PREFER_PCID"      : "pcid",
-            "REQUIRE_DCA"      : "dca",
-            "PREFER_DCA"       : "dca",
-            "REQUIRE_SSE4_1"   : "sse4_1",
-            "PREFER_SSE4_1"    : "sse4_1",
-            "REQUIRE_SSE4_2"   : "sse4_2",
-            "PREFER_SSE4_2"    : "sse4_2",
-            "REQUIRE_X2APIC"   : "x2apic",
-            "PREFER_X2APIC"    : "x2apic",
-            "REQUIRE_MOVBE"    : "movbe",
-            "PREFER_MOVBE"     : "movbe",
-            "REQUIRE_POPCNT"   : "popcnt",
-            "PREFER_POPCNT"    : "popcnt",
-            "REQUIRE_TSC_DEADLINE_TIMER"   : "tsc_deadline_timer",
-            "PREFER_TSC_DEADLINE_TIMER"    : "tsc_deadline_timer",
-            "REQUIRE_XSAVE"    : "xsave",
-            "PREFER_XSAVE"     : "xsave",
-            "REQUIRE_AVX"      : "avx",
-            "PREFER_AVX"       : "avx",
-            "REQUIRE_F16C"     : "f16c",
-            "PREFER_F16C"      : "f16c",
-            "REQUIRE_RDRAND"   : "rdrand",
-            "PREFER_RDRAND"    : "rdrand",
-            "REQUIRE_FSGSBASE" : "fsgsbase",
-            "PREFER_FSGSBASE"  : "fsgsbase",
-            "REQUIRE_BMI1"     : "bmi1",
-            "PREFER_BMI1"      : "bmi1",
-            "REQUIRE_HLE"      : "hle",
-            "PREFER_HLE"       : "hle",
-            "REQUIRE_AVX2"     : "avx2",
-            "PREFER_AVX2"      : "avx2",
-            "REQUIRE_SMEP"     : "smep",
-            "PREFER_SMEP"      : "smep",
-            "REQUIRE_BMI2"     : "bmi2",
-            "PREFER_BMI2"      : "bmi2",
-            "REQUIRE_ERMS"     : "erms",
-            "PREFER_ERMS"      : "erms",
-            "REQUIRE_INVPCID"  : "invpcid",
-            "PREFER_INVPCID"   : "invpcid",
-            "REQUIRE_RTM"      : "rtm",
-            "PREFER_RTM"       : "rtm",
-            "REQUIRE_MPX"      : "mpx",
-            "PREFER_MPX"       : "mpx",
-            "REQUIRE_RDSEED"   : "rdseed",
-            "PREFER_RDSEED"    : "rdseed",
-            "REQUIRE_ADX"      : "adx",
-            "PREFER_ADX"       : "adx",
-            "REQUIRE_SMAP"     : "smap",
-            "PREFER_SMAP"      : "smap",
-        }
-
-        self._espec_to_mano_cpufeatures = {
-            "aes"      : "REQUIRE_AES",
-            "vme"      : "REQUIRE_VME",
-            "de"       : "REQUIRE_DE",
-            "pse"      : "REQUIRE_PSE",
-            "tsc"      : "REQUIRE_TSC",
-            "msr"      : "REQUIRE_MSR",
-            "pae"      : "REQUIRE_PAE",
-            "mce"      : "REQUIRE_MCE",
-            "cx8"      : "REQUIRE_CX8",
-            "apic"     : "REQUIRE_APIC",
-            "sep"      : "REQUIRE_SEP",
-            "mtrr"     : "REQUIRE_MTRR",
-            "pge"      : "REQUIRE_PGE",
-            "mca"      : "REQUIRE_MCA",
-            "cmov"     : "REQUIRE_CMOV",
-            "pat"      : "REQUIRE_PAT",
-            "pse36"    : "REQUIRE_PSE36",
-            "clflush"  : "REQUIRE_CLFLUSH",
-            "dts"      : "REQUIRE_DTS",
-            "acpi"     : "REQUIRE_ACPI",
-            "mmx"      : "REQUIRE_MMX",
-            "fxsr"     : "REQUIRE_FXSR",
-            "sse"      : "REQUIRE_SSE",
-            "sse2"     : "REQUIRE_SSE2",
-            "ss"       : "REQUIRE_SS",
-            "ht"       : "REQUIRE_HT",
-            "tm"       : "REQUIRE_TM",
-            "ia64"     : "REQUIRE_IA64",
-            "pbe"      : "REQUIRE_PBE",
-            "rdtscp"   : "REQUIRE_RDTSCP",
-            "pni"      : "REQUIRE_PNI",
-            "pclmulqdq": "REQUIRE_PCLMULQDQ",
-            "dtes64"   : "REQUIRE_DTES64",
-            "monitor"  : "REQUIRE_MONITOR",
-            "ds_cpl"   : "REQUIRE_DS_CPL",
-            "vmx"      : "REQUIRE_VMX",
-            "smx"      : "REQUIRE_SMX",
-            "est"      : "REQUIRE_EST",
-            "tm2"      : "REQUIRE_TM2",
-            "ssse3"    : "REQUIRE_SSSE3",
-            "cid"      : "REQUIRE_CID",
-            "fma"      : "REQUIRE_FMA",
-            "cx16"     : "REQUIRE_CX16",
-            "xtpr"     : "REQUIRE_XTPR",
-            "pdcm"     : "REQUIRE_PDCM",
-            "pcid"     : "REQUIRE_PCID",
-            "dca"      : "REQUIRE_DCA",
-            "sse4_1"   : "REQUIRE_SSE4_1",
-            "sse4_2"   : "REQUIRE_SSE4_2",
-            "x2apic"   : "REQUIRE_X2APIC",
-            "movbe"    : "REQUIRE_MOVBE",
-            "popcnt"   : "REQUIRE_POPCNT",
-            "tsc_deadline_timer"   : "REQUIRE_TSC_DEADLINE_TIMER",
-            "xsave"    : "REQUIRE_XSAVE",
-            "avx"      : "REQUIRE_AVX",
-            "f16c"     : "REQUIRE_F16C",
-            "rdrand"   : "REQUIRE_RDRAND",
-            "fsgsbase" : "REQUIRE_FSGSBASE",
-            "bmi1"     : "REQUIRE_BMI1",
-            "hle"      : "REQUIRE_HLE",
-            "avx2"     : "REQUIRE_AVX2",
-            "smep"     : "REQUIRE_SMEP",
-            "bmi2"     : "REQUIRE_BMI2",
-            "erms"     : "REQUIRE_ERMS",
-            "invpcid"  : "REQUIRE_INVPCID",
-            "rtm"      : "REQUIRE_RTM",
-            "mpx"      : "REQUIRE_MPX",
-            "rdseed"   : "REQUIRE_RDSEED",
-            "adx"      : "REQUIRE_ADX",
-            "smap"     : "REQUIRE_SMAP",
-        }
-
-    def mano_to_extra_spec_cpu_model(self, cpu_model):
-        if cpu_model in self._mano_to_espec_cpumodel:
-            return self._mano_to_espec_cpumodel[cpu_model]
-        else:
-            return None
-            
-    def extra_specs_to_mano_cpu_model(self, cpu_model):
-        if cpu_model in self._espec_to_mano_cpumodel:
-            return self._espec_to_mano_cpumodel[cpu_model]
-        else:
-            return None
-        
-    def mano_to_extra_spec_cpu_arch(self, cpu_arch):
-        if cpu_arch in self._mano_to_espec_cpuarch:
-            return self._mano_to_espec_cpuarch[cpu_arch]
-        else:
-            return None
-        
-    def extra_specs_to_mano_cpu_arch(self, cpu_arch):
-        if cpu_arch in self._espec_to_mano_cpuarch:
-            return self._espec_to_mano_cpuarch[cpu_arch]
-        else:
-            return None
-    
-    def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
-        if cpu_vendor in self._mano_to_espec_cpuvendor:
-            return self._mano_to_espec_cpuvendor[cpu_vendor]
-        else:
-            return None
-
-    def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
-        if cpu_vendor in self._espec_to_mano_cpuvendor:
-            return self._espec_to_mano_cpuvendor[cpu_vendor]
-        else:
-            return None
-    
-    def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
-        return cpu_sockets
-
-    def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
-        return int(cpu_sockets)
-    
-    def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
-        return cpu_core_count
-
-    def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
-        return int(cpu_core_count)
-    
-    def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
-        return core_thread_count
-
-    def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
-        return int(core_thread_count)
-
-    def mano_to_extra_spec_cpu_features(self, features):
-        cpu_features = []
-        epa_feature_str = None
-        for f in features:
-            if f in self._mano_to_espec_cpufeatures:
-                cpu_features.append(self._mano_to_espec_cpufeatures[f])
-                
-        if len(cpu_features) > 1:
-            epa_feature_str =  '<all-in> '+ " ".join(cpu_features)
-        elif len(cpu_features) == 1:
-            epa_feature_str = " ".join(cpu_features)
-
-        return epa_feature_str
-
-    def extra_spec_to_mano_cpu_features(self, features):
-        oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
-        cpu_features = []
-        result = None
-        for oper in oper_symbols:
-            regex = '^'+oper+' (.*?)$'
-            result = re.search(regex, features)
-            if result is not None:
-                break
-            
-        if result is not None:
-            feature_list = result.group(1)
-        else:
-            feature_list = features
-
-        for f in feature_list.split():
-            if f in self._espec_to_mano_cpufeatures:
-                cpu_features.append(self._espec_to_mano_cpufeatures[f])
-
-        return cpu_features
-    
-
-class OpenstackExtraSpecUtils(object):
-    """
-    General utility class for flavor Extra Specs processing
-    """
-    def __init__(self):
-        self.host = OpenstackHostEPAUtils()
-        self.guest = OpenstackGuestEPAUtils()
-        self.extra_specs_keywords = [ 'hw:cpu_policy',
-                                      'hw:cpu_threads_policy',
-                                      'hw:mem_page_size',
-                                      'hw:numa_nodes',
-                                      'hw:numa_mempolicy',
-                                      'hw:numa_cpus',
-                                      'hw:numa_mem',
-                                      'trust:trusted_host',
-                                      'pci_passthrough:alias',
-                                      'capabilities:cpu_info:model',
-                                      'capabilities:cpu_info:arch',
-                                      'capabilities:cpu_info:vendor',
-                                      'capabilities:cpu_info:topology:sockets',
-                                      'capabilities:cpu_info:topology:cores',
-                                      'capabilities:cpu_info:topology:threads',
-                                      'capabilities:cpu_info:features',
-                                ]
-        self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/__init__.py
new file mode 100644
index 0000000..a1bf770
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .portchain_drv import (
+    L2PortChainDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/portchain_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/portchain_drv.py
new file mode 100644
index 0000000..78e805d
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/portchain/portchain_drv.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import json
+import requests
+
+
+class L2PortChainDriver(object):
+    """
+    Driver for openstack neutron neutron-client v2
+    """
+    PORT_PAIRS_URL='/sfc/port_pairs' 
+    PORT_PAIR_GROUPS_URL='/sfc/port_pair_groups' 
+    PORT_CHAINS_URL='/sfc/port_chains' 
+    FLOW_CLASSIFIERS_URL='/sfc/flow_classifiers' 
+
+    def __init__(self, sess_handle, neutron_base_url, logger = None):
+        """
+        Constructor for L2PortChainDriver class
+        Arguments: 
+           sess_handle (instance of class SessionDriver)
+           neutron_base_url  Neutron service endpoint
+           logger (instance of logging.Logger)
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.portchain')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        self._sess = sess_handle
+        self._neutron_base_url = neutron_base_url
+        
+    @property
+    def neutron_base_url(self): 
+        return self._neutron_base_url
+
+    @property
+    def tenant_id(self):
+        return self._sess.project_id
+
+    @property
+    def auth_token(self):
+        return self._sess.auth_token
+
+    def rest_api_handler(self,url,method,payload=None,refresh_token=True):
+        try:
+            if method == 'GET':
+                result=requests.get(self.neutron_base_url+url,
+                                    headers={"X-Auth-Token":self.auth_token,
+                                             "Content-Type": "application/json" })
+            elif method == 'POST':
+                self.log.debug("POST request being sent for url %s has payload %s",
+                               self.neutron_base_url+url,payload)
+                
+                result=requests.post(self.neutron_base_url+url,
+                                     headers={"X-Auth-Token":self.auth_token,
+                                              "Content-Type": "application/json"},
+                                     data=payload)
+            elif method == 'PUT':
+                result=requests.put(self.neutron_base_url+url,
+                                    headers={"X-Auth-Token":self.auth_token,
+                                             "Content-Type": "application/json"},
+                                    data=payload)
+            elif method == 'DELETE':
+                result=requests.delete(self.neutron_base_url+url,
+                                       headers={"X-Auth-Token": self.auth_token,
+                                                "Content-Type": "application/json"})
+            else:
+                raise("Invalid method name %s",method)
+            
+            result.raise_for_status()
+            
+        except requests.exceptions.HTTPError as e:
+            if result.status_code == 401 and refresh_token:
+                self._sess.invalidate_auth_token()
+                result = self.rest_api_handler(url,method,payload=payload,refresh_token=False)
+            else:
+                self.log.exception(e)
+                raise
+            
+        return result 
+
+    def create_port_pair(self,name,ingress_port,egress_port):
+        """
+        Create port pair
+        """
+        port_pair_dict = {}
+        port_pair = {}
+        port_pair_dict["name"] = name
+        port_pair_dict['tenant_id'] = self.tenant_id
+        port_pair_dict['ingress'] = ingress_port
+        port_pair_dict['egress'] = egress_port
+        port_pair["port_pair"] = port_pair_dict
+        port_pair_json = json.dumps(port_pair)
+
+        try: 
+            result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL, 'POST', port_pair_json)
+            result.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            if (result.status_code == 400 and 'NeutronError' in result.json() 
+                    and result.json()['NeutronError']['type'] == 'PortPairIngressEgressInUse'): 
+                self.log.info("Port pair with same ingress and egress port already exists")
+                result = self.get_port_pair_list()
+                port_pair_list = result.json()['port_pairs']
+                port_pair_ids = [ pp['id'] for pp in port_pair_list if pp['ingress'] == ingress_port and pp['egress'] == egress_port]
+                return port_pair_ids[0]
+            else: 
+                self.log.exception(e)
+                raise
+
+        self.log.debug("Port Pair response received is status code: %s, response: %s",
+                       result.status_code, result.json())
+        return result.json()['port_pair']['id']
+
+    def delete_port_pair(self,port_pair_id):
+        try:
+            result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL+'/{}'.format(port_pair_id), 'DELETE')
+            result.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            if (result.status_code == 409 and 'NeutronError' in result.json() 
+                and result.json()['NeutronError']['type'] == 'PortPairInUse'): 
+                self.log.info("Port pair is in use")
+            else:
+                self.log.exception(e)
+                raise
+        self.log.debug("Delete Port Pair response received is status code: %s", result.status_code)
+        
+    def get_port_pair(self,port_pair_id):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL+'/{}'.format(port_pair_id), 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Pair response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def get_port_pair_list(self):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_PAIRS_URL, 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Pair list response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def create_port_pair_group(self,name,port_pairs):
+        """
+        Create port pair group
+        """
+        port_pair_group_dict = {}
+        port_pair_group_dict["name"] = name
+        port_pair_group_dict['tenant_id'] = self.tenant_id
+        port_pair_group_dict['port_pairs'] = list()
+        port_pair_group_dict['port_pairs'].extend(port_pairs)
+        port_pair_group = {}
+        port_pair_group["port_pair_group"] = port_pair_group_dict
+        port_pair_group_json = json.dumps(port_pair_group)
+
+        try:
+            result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL, 'POST', port_pair_group_json)
+            result.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            if (result.status_code == 409 and 'NeutronError' in result.json() 
+                and result.json()['NeutronError']['type'] == 'PortPairInUse'): 
+                self.log.info("Port pair group with same port pair already exists")
+                result = self.get_port_pair_group_list()
+                port_pair_group_list = result.json()['port_pair_groups']
+                port_pair_group_ids = [ppg['id'] for ppg in port_pair_group_list 
+                                       if ppg['port_pairs'] == port_pairs]
+                return port_pair_group_ids[0]
+            else:
+                self.log.exception(e)
+                raise
+
+        self.log.debug("Create Port Pair group response received is status code: %s, response: %s",
+                     result.status_code,
+                     result.json())
+        return result.json()['port_pair_group']['id']
+
+    def delete_port_pair_group(self,port_pair_group_id):
+        try:
+            result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL+'/{}'.format(port_pair_group_id), 'DELETE')
+            result.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            if (result.status_code == 409 and 'NeutronError' in result.json() 
+                and result.json()['NeutronError']['type'] == 'PortPairGroupInUse'): 
+                self.log.info("Port pair group is in use")
+            else:
+                self.log.exception(e)
+                raise
+        self.log.debug("Delete Port Pair group response received is status code: %s",
+                       result.status_code)
+        
+    def get_port_pair_group(self,port_pair_group_id):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL+'/{}'.format(port_pair_group_id), 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Pair group response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def get_port_pair_group_list(self):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_PAIR_GROUPS_URL, 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Pair group list response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def create_port_chain(self,name,port_pair_groups,flow_classifiers=None):
+        """
+        Create port chain
+        """
+        port_chain_dict = {}
+        port_chain_dict["name"]=name
+        port_chain_dict['tenant_id'] = self.tenant_id
+        port_chain_dict['port_pair_groups'] = list()
+        port_chain_dict['port_pair_groups'].extend(port_pair_groups)
+        if flow_classifiers: 
+            port_chain_dict['flow_classifiers'] = list()
+            port_chain_dict['flow_classifiers'].extend(flow_classifiers)
+        port_chain = {}
+        port_chain["port_chain"] = port_chain_dict
+        port_chain_json = json.dumps(port_chain)
+
+        try:
+            result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL, 'POST', port_chain_json)
+            result.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            if (result.status_code == 409 and 'NeutronError' in result.json() 
+                    and result.json()['NeutronError']['type'] == 'InvalidPortPairGroups'): 
+                self.log.info("Port chain with same port pair group already exists")
+                result = self.get_port_chain_list()
+                port_chain_list = result.json()['port_chains']
+                port_chain_ids = [ pc['id'] for pc in port_chain_list 
+                                   if pc['port_pair_groups'] == port_pair_groups ]
+                return port_chain_ids[0]
+            else: 
+                self.log.exception(e)
+                raise()
+
+        self.log.debug("Create Port chain response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        
+        return result.json()['port_chain']['id']
+
+    def delete_port_chain(self,port_chain_id):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'DELETE')
+        result.raise_for_status()
+        self.log.debug("Delete Port chain response received is status code: %s", result.status_code)
+        
+    def get_port_chain(self,port_chain_id):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Chain response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def get_port_chain_list(self):
+        result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL, 'GET')
+        result.raise_for_status()
+        self.log.debug("Get Port Chain list response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
+
+    def update_port_chain(self,port_chain_id,port_pair_groups=None,flow_classifiers=None):
+        port_chain_dict = {}
+        if flow_classifiers: 
+            port_chain_dict['flow_classifiers'] = list()
+            port_chain_dict['flow_classifiers'].extend(flow_classifiers)
+        if port_pair_groups:
+            port_chain_dict['port_pair_groups'] = list()
+            port_chain_dict['port_pair_groups'].extend(port_pair_groups)
+        port_chain = {}
+        port_chain["port_chain"] = port_chain_dict
+        port_chain_json = json.dumps(port_chain)
+
+        result = self.rest_api_handler(L2PortChainDriver.PORT_CHAINS_URL+'/{}'.format(port_chain_id), 'PUT', port_chain_json)
+        result.raise_for_status()
+        self.log.debug("Update Port chain response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result.json()['port_chain']['id']
+
+    def create_flow_classifier(self,name,classifier_dict):
+        """
+            Create flow classifier
+        """
+        classifier_fields = [ 'ethertype',
+                              'protocol',
+                              'source_port_range_min',
+                              'source_port_range_max',
+                              'destination_port_range_min',
+                              'destination_port_range_max',
+                              'source_ip_prefix',
+                              'destination_ip_prefix',
+                              'logical_source_port' ]
+        
+        flow_classifier_dict = {}
+        flow_classifier_dict = {k: v for k, v in classifier_dict.items()
+                                if k in classifier_fields}
+        flow_classifier_dict["name"]= name
+        flow_classifier_dict['tenant_id']= self.tenant_id
+
+        #flow_classifier_dict['ethertype']= 'IPv4'
+        #flow_classifier_dict['protocol']= 'TCP'
+        #flow_classifier_dict['source_port_range_min']= 80
+        #flow_classifier_dict['source_port_range_max']= 80
+        #flow_classifier_dict['destination_port_range_min']= 80
+        #flow_classifier_dict['destination_port_range_max']= 80
+        #flow_classifier_dict['source_ip_prefix']= '11.0.6.5/32'
+        #flow_classifier_dict['destination_ip_prefix']= '11.0.6.6/32'
+        #flow_classifier_dict['logical_source_port']= source_neutron_port
+        #flow_classifier_dict['logical_destination_port']= ''
+        flow_classifier = {}
+        flow_classifier["flow_classifier"] = flow_classifier_dict
+        flow_classifier_json = json.dumps(flow_classifier)
+    
+        result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL, 'POST', flow_classifier_json)
+        result.raise_for_status()
+        self.log.debug("Create flow classifier response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result.json()['flow_classifier']['id']
+
+    def delete_flow_classifier(self,flow_classifier_id):
+        result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL+'/{}'.format(flow_classifier_id), 'DELETE')
+        result.raise_for_status()
+        self.log.debug("Delete flow classifier response received is status code: %s",
+                       result.status_code)
+        
+    def get_flow_classifier(self,flow_classifier_id):
+        result = self.rest_api_handler(L2PortChainDriver.FLOW_CLASSIFIERS_URL+'/{}'.format(flow_classifier_id), 'GET')
+        result.raise_for_status()
+        self.log.debug("Get flow classifier response received is status code: %s, response: %s",
+                       result.status_code,
+                       result.json())
+        return result
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
index 06b65fc..7f3800b 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
@@ -354,16 +354,17 @@
         logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
         sys.exit(2)
 
+    kwargs = dict(username = argument.username,
+                  password = argument.password,
+                  auth_url = argument.auth_url,
+                  project =  argument.tenant_name,
+                  mgmt_network = argument.mgmt_network,
+                  cert_validate = False,
+                  user_domain = argument.user_domain,
+                  project_domain = argument.project_domain,
+                  region = argument.region)
 
-    drv = openstack_drv.OpenstackDriver(username            = argument.username,
-                                        password            = argument.password,
-                                        auth_url            = argument.auth_url,
-                                        tenant_name         = argument.tenant_name,
-                                        mgmt_network        = argument.mgmt_network,
-                                        user_domain_name    = argument.user_domain,
-                                        project_domain_name = argument.project_domain,
-                                        region              = argument.region)
-    
+    drv = openstack_drv.OpenstackDriver(logger = logger, **kwargs)
     prepare_vm_after_boot(drv, argument)
     sys.exit(0)
     
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/__init__.py
new file mode 100644
index 0000000..cd80cbe
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/__init__.py
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .session_drv import (
+    SessionDriver,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/auth_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/auth_drv.py
new file mode 100644
index 0000000..3c72588
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/auth_drv.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+from keystoneauth1.identity import v3
+from keystoneauth1.identity import v2
+import logging
+
+
+class TokenDriver(object):
+    """
+    Class for token based authentication for openstack.
+
+    This is just placeholder for now
+    """
+    def __init__(self, version, logger=None, **kwargs):
+        """
+        Constructor for class
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.keystone.token')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+        
+    
+    @property        
+    def auth_handle(self):
+        return None
+        
+class PasswordDriver(object):
+    """
+    Class for password based authentication for openstack
+    """
+    def __init__(self, version, logger=None, **kwargs):
+        """
+        Constructor for class
+        Arguments:
+        version (str): Keystone API version to use
+        logger (instance of logging.Logger)
+        A dictionary of following key-value pairs
+        {
+          auth_url (string) : Keystone Auth URL
+          username (string) : Username for authentication
+          password (string) : Password for authentication
+          project_name (string) : Name of the project or tenant
+          project_domain_name (string) : Name of the project domain
+          user_domain_name (string) : Name of the user domain
+          logger (instance of logging.Logger)
+        }
+        Returns: 
+             None
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.keystone.password')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        self.log = logger
+        version = int(float(version))
+        
+        if version == 3:
+            self.log.info("Using keystone version 3 for authentication at URL: %s", kwargs['auth_url'])
+            self._auth = v3.Password(auth_url = kwargs['auth_url'],
+                                     username = kwargs['username'],
+                                     password = kwargs['password'],
+                                     project_name = kwargs['project_name'],
+                                     project_domain_name = kwargs['project_domain_name'],
+                                     user_domain_name = kwargs['user_domain_name'])
+        elif version == 2:
+            self.log.info("Using keystone version 2 for authentication at URL: %s", kwargs['auth_url'])
+            self._auth = v2.Password(auth_url = kwargs['auth_url'],
+                                     username = kwargs['username'],
+                                     password = kwargs['password'],
+                                     tenant_name = kwargs['project_name'])
+    @property        
+    def auth_handle(self):
+        return self._auth
+    
+    
+class AuthDriver(object):
+    """
+    Driver class for handling authentication plugins for openstack
+    """
+    AuthMethod = dict(
+        password=PasswordDriver,
+        token=TokenDriver,
+    )
+    def __init__(self, auth_type, version, logger = None, **kwargs):
+        """
+        auth_type (string): At this point, only "password" based 
+                            authentication is supported.
+        version (string): Keystone API version 
+        logger (instance of logging.Logger)
+
+        kwargs a dictionary of following key/value pairs
+        {
+          username (string)  : Username
+          password (string)  : Password
+          auth_url (string)  : Authentication URL
+          tenant_name(string): Tenant Name
+          user_domain_name (string) : User domain name
+          project_domain_name (string): Project domain name
+          region (string)    : Region name
+        }
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.auth')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        
+        self.log.info("Using %s authentication method", auth_type)
+        if auth_type not in AuthDriver.AuthMethod:
+            self.log.error("Unsupported authentication method %s", auth_type)
+            raise KeyError("Unsupported authentication method %s", auth_type)
+        else:
+            self._auth_method = AuthDriver.AuthMethod[auth_type](version, self.log, **kwargs)
+            
+    @property
+    def auth_handle(self):
+        return self._auth_method.auth_handle
+        
+                                               
+        
+    
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/session_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/session_drv.py
new file mode 100644
index 0000000..751caf8
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/session/session_drv.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import logging
+from .auth_drv import AuthDriver
+from keystoneauth1 import session
+
+
+class SessionDriver(object):
+    """
+    Authentication session class for openstack
+    """
+    def __init__(self, auth_method, version, cert_validate, logger = None, **kwargs):
+        """
+        Constructor for class SessionDriver
+        auth_method (string): At this point, only "password" based 
+                              authentication is supported. See AuthDriver.AuthMethod 
+                              for more details
+        version (string): Keystone API version 
+        cert_validate (boolean): Boolean to indicate if certificate validation is required
+        logger (instance of logging.Logger)
+        kwargs a dictionary of following key/value pairs
+        {
+          username (string)  : Username
+          password (string)  : Password
+          auth_url (string)  : Authentication URL
+          tenant_name(string): Tenant Name
+          user_domain_name (string) : User domain name
+          project_domain_name (string): Project domain name
+          region (string)    : Region name
+        }
+
+        """
+        if logger is None:
+            self.log = logging.getLogger('rwcal.openstack.session')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        self._auth_url = kwargs['auth_url']
+
+        self._auth = AuthDriver(auth_method, version, logger, **kwargs)
+        self._sess = session.Session(auth=self._auth.auth_handle,
+                                     verify = cert_validate)
+        
+    @property
+    def session(self):
+        return self._sess
+    
+    @property
+    def auth_token(self):
+        """
+        Returns a valid Auth-Token
+        """
+        if not self._sess.auth.get_auth_state():
+            return self._sess.get_token()
+        else:
+            if self.will_expire_after():
+                self._sess.invalidate()
+                return self._sess.get_token()
+            else:
+                return self._sess.get_token()
+    @property
+    def auth_url(self):
+        return self._auth_url
+    
+    def invalidate_auth_token(self):
+        """
+        This method will return a fresh token (in case of HTTP 401 response)
+        """
+        self._sess.invalidate()
+
+    @property
+    def auth_header(self):
+        return self._sess.auth.get_headers(self._sess)
+
+    @property
+    def project_id(self):
+        return self._sess.get_project_id()
+
+    @property
+    def user_id(self):
+        return self._sess.get_user_id()
+    
+    def get_auth_state(self):
+        return self._sess.auth.get_auth_state()
+        
+    def will_expire_after(self, timeout=180):
+        return self._sess.auth.auth_ref.will_expire_soon(stale_duration=timeout)
+
+    
+    
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/__init__.py
new file mode 100644
index 0000000..07a8b3e
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/__init__.py
@@ -0,0 +1,32 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .flavor import (
+    FlavorUtils,
+)
+
+from .network import (
+    NetworkUtils,
+)
+
+from .image import (
+    ImageUtils,
+)
+
+from .compute import(
+    ComputeUtils,
+)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py
new file mode 100644
index 0000000..be7a969
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/compute.py
@@ -0,0 +1,697 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import uuid
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+class ImageValidateError(Exception):
+    pass
+
+class VolumeValidateError(Exception):
+    pass
+
+class AffinityGroupError(Exception):
+    pass
+
+
+class ComputeUtils(object):
+    """
+    Utility class for compute operations
+    """
+    epa_types = ['vm_flavor',
+                 'guest_epa',
+                 'host_epa',
+                 'host_aggregate',
+                 'hypervisor_epa',
+                 'vswitch_epa']
+    def __init__(self, driver):
+        """
+        Constructor for class
+        Arguments:
+           driver: object of OpenstackDriver()
+        """
+        self._driver = driver
+        self.log = driver.log
+
+    @property
+    def driver(self):
+        return self._driver
+
+    def search_vdu_flavor(self, vdu_params):
+        """
+        Function to search a matching flavor for VDU instantiation
+        from already existing flavors
+        
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           flavor_id(string): Flavor id for VDU instantiation
+           None if no flavor could be found
+        """
+        kwargs = { 'vcpus': vdu_params.vm_flavor.vcpu_count,
+                   'ram'  : vdu_params.vm_flavor.memory_mb,
+                   'disk' : vdu_params.vm_flavor.storage_gb,}
+        
+        flavors = self.driver.nova_flavor_find(**kwargs)
+        flavor_list = list()
+        for flv in flavors:
+            flavor_list.append(self.driver.utils.flavor.parse_flavor_info(flv))
+            
+        flavor_id = self.driver.utils.flavor.match_resource_flavor(vdu_params, flavor_list)
+        return flavor_id
+        
+    def select_vdu_flavor(self, vdu_params):
+        """
+        This function attempts to find a pre-existing flavor matching required 
+        parameters for VDU instantiation. If no such flavor is found, a new one
+        is created.
+        
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           flavor_id(string): Flavor id for VDU instantiation
+        """
+        flavor_id = self.search_vdu_flavor(vdu_params)
+        if flavor_id is not None:
+            self.log.info("Found flavor with id: %s matching requirements for VDU: %s",
+                          flavor_id, vdu_params.name)
+            return flavor_id
+
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        
+        epa_dict = { k: v for k, v in vdu_params.as_dict().items()
+                     if k in ComputeUtils.epa_types }
+        
+        flavor.from_dict(epa_dict)
+
+        flavor_id = self.driver.nova_flavor_create(name      = flavor.name,
+                                                   ram       = flavor.vm_flavor.memory_mb,
+                                                   vcpus     = flavor.vm_flavor.vcpu_count,
+                                                   disk      = flavor.vm_flavor.storage_gb,
+                                                   epa_specs = self.driver.utils.flavor.get_extra_specs(flavor))
+        return flavor_id
+
+    def make_vdu_flavor_args(self, vdu_params):
+        """
+        Creates flavor related arguments for VDU operation
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           A dictionary {'flavor_id': <flavor-id>}
+        """
+        return {'flavor_id': self.select_vdu_flavor(vdu_params)}
+
+
+    def make_vdu_image_args(self, vdu_params):
+        """
+        Creates image related arguments for VDU operation
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           A dictionary {'image_id': <image-id>}
+
+        """
+        kwargs = dict()
+        if vdu_params.has_field('image_name'):
+            kwargs['image_id'] = self.resolve_image_n_validate(vdu_params.image_name,
+                                                               vdu_params.image_checksum)
+        elif vdu_params.has_field('image_id'):
+            kwargs['image_id'] = vdu_params.image_id
+            
+        return kwargs
+
+    def resolve_image_n_validate(self, image_name, checksum = None):
+        """
+        Resolve the image_name to image-object by matching image_name and checksum
+        
+        Arguments:
+          image_name (string): Name of image
+          checksums  (string): Checksum associated with image
+
+        Raises ImageValidateError in case of Errors
+        """
+        image_info = [ i for i in self.driver._glance_image_list if i['name'] == image_name]
+
+        if not image_info:
+            self.log.error("No image with name: %s found", image_name)
+            raise ImageValidateError("No image with name %s found" %(image_name))
+        
+        for image in image_info:
+            if 'status' not in image or image['status'] != 'active':
+                self.log.error("Image %s not in active state. Current state: %s",
+                               image_name, image['status'])
+                raise ImageValidateError("Image with name %s found in incorrect (%s) state"
+                                         %(image_name, image['status']))
+            if not checksum or checksum == image['checksum']:
+                break
+        else:
+            self.log.info("No image found with matching name: %s and checksum: %s",
+                          image_name, checksum)
+            raise ImageValidateError("No image found with matching name: %s and checksum: %s"
+                                     %(image_name, checksum))
+        return image['id']
+        
+    def resolve_volume_n_validate(self, volume_ref):
+        """
+        Resolve the volume reference
+        
+        Arguments:
+          volume_ref (string): Name of volume reference
+
+        Raises VolumeValidateError in case of Errors
+        """
+        
+        for vol in self.driver._cinder_volume_list:
+            voldict = vol.to_dict()
+            if 'display_name' in voldict and voldict['display_name'] == volume_ref:
+                if 'status' in voldict:
+                    if voldict['status'] == 'available':
+                        return voldict['id']
+                    else:
+                        self.log.error("Volume %s not in available state. Current state: %s",
+                               volume_ref, voldict['status'])
+                        raise VolumeValidateError("Volume with name %s found in incorrect (%s) state"
+                                         %(volume_ref, voldict['status']))
+
+        self.log.info("No volume found with matching name: %s ", volume_ref)
+        raise VolumeValidateError("No volume found with matching name: %s " %(volume_ref))
+        
+    def make_vdu_volume_args(self, volume, vdu_params):
+        """
+        Arguments:
+           volume:   Protobuf GI object RwcalYang.VDUInitParams_Volumes()
+           vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+        
+        Returns:
+           A dictionary required to create volume for VDU
+
+        Raises VolumeValidateError in case of Errors
+        """
+        kwargs = dict()
+
+        if 'boot_priority' in volume:
+            # Rift-only field
+            kwargs['boot_index'] = volume.boot_priority
+        if volume.has_field("image"):
+            # Support image->volume
+            kwargs['source_type'] = "image"
+            kwargs['uuid'] = self.resolve_image_n_validate(volume.image, volume.image_checksum)
+            kwargs['delete_on_termination'] = True
+        elif "volume_ref" in volume:
+            # Support volume-ref->volume (only ref)
+            # Rift-only field
+            kwargs['source_type'] = "volume"
+            kwargs['uuid'] = self.resolve_volume_n_validate(volume.volume_ref)
+            kwargs['delete_on_termination'] = False
+        else:
+            # Support blank->volume
+            kwargs['source_type'] = "blank"
+            kwargs['delete_on_termination'] = True
+        kwargs['device_name'] = volume.name
+        kwargs['destination_type'] = "volume"
+        kwargs['volume_size'] = volume.size
+
+        if volume.has_field('device_type'):
+            if volume.device_type in ['cdrom', 'disk']:
+                kwargs['device_type'] = volume.device_type
+            else:
+                self.log.error("Unsupported device_type <%s> found for volume: %s",
+                               volume.device_type, volume.name)
+                raise VolumeValidateError("Unsupported device_type <%s> found for volume: %s"
+                                          %(volume.device_type, volume.name))
+
+        if volume.has_field('device_bus'):
+            if volume.device_bus in ['ide', 'virtio', 'scsi']:
+                kwargs['disk_bus'] = volume.device_bus
+            else:
+                self.log.error("Unsupported device_type <%s> found for volume: %s",
+                               volume.device_type, volume.name)
+                raise VolumeValidateError("Unsupported device_type <%s> found for volume: %s"
+                                          %(volume.device_type, volume.name))
+
+        return kwargs
+            
+    def make_vdu_storage_args(self, vdu_params):
+        """
+        Creates volume related arguments for VDU operation
+        
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           A dictionary required for volumes creation for VDU instantiation
+        """
+        kwargs = dict()
+        if vdu_params.has_field('volumes'):
+            kwargs['block_device_mapping_v2'] = list()
+            # Ignore top-level image
+            kwargs['image_id']  = ""
+            for volume in vdu_params.volumes:
+                kwargs['block_device_mapping_v2'].append(self.make_vdu_volume_args(volume, vdu_params))
+        return kwargs
+
+    def make_vdu_network_args(self, vdu_params):
+        """
+        Creates VDU network related arguments for VDU operation
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+           A dictionary {'port_list' : [ports], 'network_list': [networks]}
+
+        """
+        kwargs = dict()
+        kwargs['port_list'], kwargs['network_list'] = self.driver.utils.network.setup_vdu_networking(vdu_params)
+        return kwargs
+
+    
+    def make_vdu_boot_config_args(self, vdu_params):
+        """
+        Creates VDU boot config related arguments for VDU operation
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+
+        Returns:
+          A dictionary {
+                         'userdata'    :  <cloud-init> , 
+                         'config_drive':  True/False, 
+                         'files'       :  [ file name ],
+                         'metadata'    :  <metadata string>
+        }
+        """
+        kwargs = dict()
+        metadata = dict()
+
+        if vdu_params.has_field('node_id'):
+            metadata['rift_node_id'] = vdu_params.node_id
+            kwargs['metadata'] = metadata
+
+        if vdu_params.has_field('vdu_init') and vdu_params.vdu_init.has_field('userdata'):
+            kwargs['userdata'] = vdu_params.vdu_init.userdata
+        else:
+            kwargs['userdata'] = ''
+
+        if not vdu_params.has_field('supplemental_boot_data'):
+            return kwargs
+        
+        if vdu_params.supplemental_boot_data.has_field('config_file'):
+            files = dict()
+            for cf in vdu_params.supplemental_boot_data.config_file:
+                files[cf.dest] = cf.source
+            kwargs['files'] = files
+
+        if vdu_params.supplemental_boot_data.has_field('boot_data_drive'):
+            kwargs['config_drive'] = vdu_params.supplemental_boot_data.boot_data_drive
+        else:
+            kwargs['config_drive'] = False
+
+        try:
+            # Rift model only
+            if vdu_params.supplemental_boot_data.has_field('custom_meta_data'):
+                for cm in vdu_params.supplemental_boot_data.custom_meta_data:
+                    metadata[cm.name] = cm.value
+                    kwargs['metadata'] = metadata
+        except Exception as e:
+            pass
+
+        return kwargs
+
+    def _select_affinity_group(self, group_name):
+        """
+        Selects the affinity group based on name and return its id
+        Arguments:
+          group_name (string): Name of the Affinity/Anti-Affinity group
+        Returns:
+          Id of the matching group
+
+        Raises exception AffinityGroupError if no matching group is found
+        """
+        groups = [g['id'] for g in self.driver._nova_affinity_group if g['name'] == group_name]
+        if not groups:
+            self.log.error("No affinity/anti-affinity group with name: %s found", group_name)
+            raise AffinityGroupError("No affinity/anti-affinity group with name: %s found" %(group_name))
+        return groups[0]
+
+        
+    def make_vdu_server_placement_args(self, vdu_params):
+        """
+        Function to create kwargs required for nova server placement
+        
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+        
+        Returns:
+         A dictionary { 'availability_zone' : < Zone >, 'scheduler_hints': <group-id> } 
+
+        """
+        kwargs = dict()
+        
+        if vdu_params.has_field('availability_zone') \
+           and vdu_params.availability_zone.has_field('name'):
+            kwargs['availability_zone']  = vdu_params.availability_zone
+
+        if vdu_params.has_field('server_group'):
+            kwargs['scheduler_hints'] = {
+                'group': self._select_affinity_group(vdu_params.server_group)
+            }            
+        return kwargs
+
+    def make_vdu_server_security_args(self, vdu_params, account):
+        """
+        Function to create kwargs required for nova security group
+
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+          account: Protobuf GI object RwcalYang.CloudAccount()
+        
+        Returns:
+          A dictionary {'security_groups' : < group > }
+        """
+        kwargs = dict()
+        if account.openstack.security_groups:
+            kwargs['security_groups'] = account.openstack.security_groups
+        return kwargs
+    
+    
+    def make_vdu_create_args(self, vdu_params, account):
+        """
+        Function to create kwargs required for nova_server_create API
+        
+        Arguments:
+          vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+          account: Protobuf GI object RwcalYang.CloudAccount()
+
+        Returns:
+          A kwargs dictionary for VDU create operation
+        """
+        kwargs = dict()
+        
+        kwargs['name'] = vdu_params.name
+
+        kwargs.update(self.make_vdu_flavor_args(vdu_params))
+        kwargs.update(self.make_vdu_storage_args(vdu_params))
+        kwargs.update(self.make_vdu_image_args(vdu_params))
+        kwargs.update(self.make_vdu_network_args(vdu_params))
+        kwargs.update(self.make_vdu_boot_config_args(vdu_params))
+        kwargs.update(self.make_vdu_server_placement_args(vdu_params))
+        kwargs.update(self.make_vdu_server_security_args(vdu_params, account))
+        return kwargs
+        
+    
+    def _parse_vdu_mgmt_address_info(self, vm_info):
+        """
+        Get management_ip and public_ip for VDU
+        
+        Arguments:
+          vm_info : A dictionary object return by novaclient library listing VM attributes
+        
+        Returns:
+          A tuple of mgmt_ip (string) and public_ip (string)
+        """
+        mgmt_ip = None
+        public_ip = None
+        if 'addresses' in vm_info:
+            for network_name, network_info in vm_info['addresses'].items():
+                if network_info and network_name == self.driver.mgmt_network:
+                    for interface in network_info:
+                        if 'OS-EXT-IPS:type' in interface:
+                            if interface['OS-EXT-IPS:type'] == 'fixed':
+                                mgmt_ip = interface['addr']
+                            elif interface['OS-EXT-IPS:type'] == 'floating':
+                                public_ip = interface['addr']
+        return (mgmt_ip, public_ip)
+
+    def get_vdu_epa_info(self, vm_info):
+        """
+        Get flavor information (including EPA) for VDU
+
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+        Returns:
+          flavor_info: A dictionary object returned by novaclient library listing flavor attributes
+        """
+        if 'flavor' in vm_info and 'id' in vm_info['flavor']:
+            try:
+                flavor_info = self.driver.nova_flavor_get(vm_info['flavor']['id'])
+                return flavor_info
+            except Exception as e:
+                self.log.exception("Exception %s occured during get-flavor", str(e))
+        return dict()
+
+    def _parse_vdu_cp_info(self, vdu_id):
+        """
+        Get connection point information for VDU identified by vdu_id
+        Arguments:
+        vdu_id (string) : VDU Id (vm_info['id']) 
+        Returns:
+        A List of object RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        """
+        cp_list = []
+        # Fill the port information
+        port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
+        for port in port_list:
+            cp_info = self.driver.utils.network._parse_cp(port)
+            cp = RwcalYang.VDUInfoParams_ConnectionPoints()
+            cp.from_dict(cp_info.as_dict())
+            cp_list.append(cp)
+        return cp_list
+
+    def _parse_vdu_state_info(self, vm_info):
+        """
+        Get VDU state information
+
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+
+        Returns:
+          state (string): State of the VDU
+        """
+        if 'status' in vm_info:
+            if vm_info['status'] == 'ACTIVE':
+                vdu_state = 'active'
+            elif vm_info['status'] == 'ERROR':
+                vdu_state = 'failed'
+            else:
+                vdu_state = 'inactive'
+        else:
+            vdu_state = 'unknown'
+        return vdu_state
+
+    def _parse_vdu_server_group_info(self, vm_info):
+        """
+        Get VDU server group information
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+
+        Returns:
+          server_group_name (string): Name of the server group to which VM belongs, else empty string
+        
+        """
+        server_group = [ v['name']
+                         for v in self.driver.nova_server_group_list()
+                         if vm_info['id'] in v['members'] ]
+        if server_group:
+            return server_group[0]
+        else:
+            return str()
+
+    def _parse_vdu_boot_config_data(self, vm_info):
+        """
+        Parses VDU supplemental boot data
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+
+        Returns:
+          List of RwcalYang.VDUInfoParams_SupplementalBootData()
+        """
+        supplemental_boot_data = None
+        node_id = None
+        if 'config_drive' in vm_info:
+            supplemental_boot_data = RwcalYang.VDUInfoParams_SupplementalBootData()
+            supplemental_boot_data.boot_data_drive = vm_info['config_drive']
+        # Look for any metadata
+        if 'metadata' not in vm_info:
+            return node_id, supplemental_boot_data
+        if supplemental_boot_data is None:
+            supplemental_boot_data = RwcalYang.VDUInfoParams_SupplementalBootData()
+        for key, value in vm_info['metadata'].items():
+            if key == 'rift_node_id':
+                node_id = value
+            else:
+                try:
+                    # rift only
+                    cm = supplemental_boot_data.custom_meta_data.add()
+                    cm.name = key
+                    cm.value = str(value)
+                except Exception as e:
+                    pass
+        return node_id, supplemental_boot_data 
+
+    def _parse_vdu_volume_info(self, vm_info):
+        """
+        Get VDU server group information
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+
+        Returns:
+          List of RwcalYang.VDUInfoParams_Volumes()
+        """
+        volumes = list()
+        
+        try:
+            volume_list = self.driver.nova_volume_list(vm_info['id'])
+        except Exception as e:
+            self.log.exception("Exception %s occured during nova-volume-list", str(e))
+            return volumes
+
+        for v in volume_list:
+            volume = RwcalYang.VDUInfoParams_Volumes()
+            try:
+                volume.name = (v['device']).split('/')[2]
+                volume.volume_id = v['volumeId']
+                details = self.driver.cinder_volume_get(volume.volume_id)
+                try:
+                    # Rift only
+                    for k, v in details.metadata.items():
+                        vd = volume.custom_meta_data.add()
+                        vd.name = k
+                        vd.value = v
+                except Exception as e:
+                    pass
+            except Exception as e:
+                self.log.exception("Exception %s occured during volume list parsing", str(e))
+                continue
+            else:
+                volumes.append(volume)
+        return volumes
+    
+    def _parse_vdu_console_url(self, vm_info):
+        """
+        Get VDU console URL
+        Arguments:
+          vm_info : A dictionary returned by novaclient library listing VM attributes
+
+        Returns:
+          console_url(string): Console URL for VM
+        """
+        console_url = None
+        if self._parse_vdu_state_info(vm_info) == 'active':
+            try:
+                serv_console_url = self.driver.nova_server_console(vm_info['id'])
+                if 'console' in serv_console_url:
+                    console_url = serv_console_url['console']['url']
+                else:
+                    self.log.error("Error fetching console url. This could be an Openstack issue. Console : " + str(serv_console_url))
+
+
+            except Exception as e:
+                self.log.exception("Exception %s occured during volume list parsing", str(e))
+        return console_url
+
+    def parse_cloud_vdu_info(self, vm_info):
+        """
+        Parse vm_info dictionary (return by python-client) and put values in GI object for VDU
+
+        Arguments:
+           vm_info : A dictionary object return by novaclient library listing VM attributes
+        
+        Returns:
+           Protobuf GI Object of type RwcalYang.VDUInfoParams()
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        vdu.cloud_type  = 'openstack'
+
+        if 'image' in vm_info and 'id' in vm_info['image']:
+            vdu.image_id = vm_info['image']['id']
+
+        if 'availability_zone' in vm_info:
+            vdu.availability_zone = vm_info['availability_zone']
+
+        vdu.state = self._parse_vdu_state_info(vm_info)
+        management_ip,public_ip = self._parse_vdu_mgmt_address_info(vm_info)
+
+        if management_ip:
+            vdu.management_ip = management_ip
+
+        if public_ip:
+            vdu.public_ip = public_ip
+
+        if 'flavor' in vm_info and 'id' in vm_info['flavor']:
+            vdu.flavor_id = vm_info['flavor']['id']
+            flavor_info = self.get_vdu_epa_info(vm_info)
+            vm_flavor = self.driver.utils.flavor.parse_vm_flavor_epa_info(flavor_info)
+            guest_epa = self.driver.utils.flavor.parse_guest_epa_info(flavor_info)
+            host_epa = self.driver.utils.flavor.parse_host_epa_info(flavor_info)
+            host_aggregates = self.driver.utils.flavor.parse_host_aggregate_epa_info(flavor_info)
+
+            vdu.vm_flavor.from_dict(vm_flavor.as_dict())
+            vdu.guest_epa.from_dict(guest_epa.as_dict())
+            vdu.host_epa.from_dict(host_epa.as_dict())
+            for aggr in host_aggregates:
+                ha = vdu.host_aggregate.add()
+                ha.from_dict(aggr.as_dict())
+
+        node_id, boot_data = self._parse_vdu_boot_config_data(vm_info)
+        if node_id:
+            vdu.node_id = node_id
+        if boot_data:
+            vdu.supplemental_boot_data = boot_data
+
+        cp_list = self._parse_vdu_cp_info(vdu.vdu_id)
+        for cp in cp_list:
+            vdu.connection_points.append(cp)
+        
+        vdu.server_group.name = self._parse_vdu_server_group_info(vm_info)
+
+        for v in self._parse_vdu_volume_info(vm_info):
+            vdu.volumes.append(v)
+
+        vdu.console_url = self._parse_vdu_console_url(vm_info)
+        return vdu
+
+
+    def perform_vdu_network_cleanup(self, vdu_id):
+        """
+        This function cleans up networking resources related to VDU
+        Arguments:
+           vdu_id(string): VDU id 
+        Returns:
+           None
+        """
+        ### Get list of floating_ips associated with this instance and delete them
+        floating_ips = [ f for f in self.driver.nova_floating_ip_list() if f.instance_id == vdu_id ]
+        for f in floating_ips:
+            self.driver.nova_floating_ip_delete(f)
+
+        ### Get list of port on VM and delete them.
+        port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
+
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.driver.neutron_port_delete(port['id'])
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/flavor.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/flavor.py
new file mode 100644
index 0000000..3199775
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/flavor.py
@@ -0,0 +1,1308 @@
+#!/usr/bin/python
+
+#
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import gi
+
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+class GuestEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpu_pinning_policy = {
+            'DEDICATED' : 'dedicated',
+            'SHARED'    : 'shared',
+            'ANY'       : 'any',
+        }
+
+        self._espec_to_mano_cpu_pinning_policy = {
+            'dedicated' : 'DEDICATED',
+            'shared'    : 'SHARED',
+            'any'       : 'ANY',
+        }
+        
+        self._mano_to_espec_mempage_size = {
+            'LARGE'        : 'large', 
+            'SMALL'        : 'small',
+            'SIZE_2MB'     :  2048,
+            'SIZE_1GB'     :  1048576,
+            'PREFER_LARGE' : 'large',
+        }
+
+        self._espec_to_mano_mempage_size = {
+            'large'        : 'LARGE', 
+            'small'        : 'SMALL',
+             2048          : 'SIZE_2MB',
+             1048576       : 'SIZE_1GB',
+            'large'        : 'PREFER_LARGE',
+        }
+
+        self._mano_to_espec_cpu_thread_pinning_policy = {
+            'AVOID'    : 'avoid',
+            'SEPARATE' : 'separate',
+            'ISOLATE'  : 'isolate',
+            'PREFER'   : 'prefer',
+        }
+
+        self._espec_to_mano_cpu_thread_pinning_policy = {
+            'avoid'    : 'AVOID',
+            'separate' : 'SEPARATE',
+            'isolate'  : 'ISOLATE',
+            'prefer'   : 'PREFER',
+        }
+
+        self._espec_to_mano_numa_memory_policy = {
+            'strict'   : 'STRICT',
+            'preferred': 'PREFERRED'
+        }
+
+        self._mano_to_espec_numa_memory_policy = {
+            'STRICT'   : 'strict',
+            'PREFERRED': 'preferred'
+        }
+
+    def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
+            return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
+            return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_mempage_size(self, mempage_size):
+        if mempage_size in self._mano_to_espec_mempage_size:
+            return self._mano_to_espec_mempage_size[mempage_size]
+        else:
+            return None
+        
+    def extra_spec_to_mano_mempage_size(self, mempage_size):
+        if mempage_size in self._espec_to_mano_mempage_size:
+            return self._espec_to_mano_mempage_size[mempage_size]
+        else:
+            return None
+
+    def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
+            return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
+            return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_trusted_execution(self, trusted_execution):
+        if trusted_execution:
+            return 'trusted'
+        else:
+            return 'untrusted'
+
+    def extra_spec_to_mano_trusted_execution(self, trusted_execution):
+        if trusted_execution == 'trusted':
+            return True
+        elif trusted_execution == 'untrusted':
+            return False
+        else:
+            return None
+        
+    def mano_to_extra_spec_numa_node_count(self, numa_node_count):
+        return numa_node_count
+
+    def extra_specs_to_mano_numa_node_count(self, numa_node_count):
+        return int(numa_node_count)
+    
+    def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
+            return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+
+    def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
+            return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+        
+                                                          
+    
+    
+class HostEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpumodel = {
+            "PREFER_WESTMERE"     : "Westmere",
+            "REQUIRE_WESTMERE"    : "Westmere",
+            "PREFER_SANDYBRIDGE"  : "SandyBridge",
+            "REQUIRE_SANDYBRIDGE" : "SandyBridge",
+            "PREFER_IVYBRIDGE"    : "IvyBridge",
+            "REQUIRE_IVYBRIDGE"   : "IvyBridge",
+            "PREFER_HASWELL"      : "Haswell",
+            "REQUIRE_HASWELL"     : "Haswell",
+            "PREFER_BROADWELL"    : "Broadwell",
+            "REQUIRE_BROADWELL"   : "Broadwell",
+            "PREFER_NEHALEM"      : "Nehalem",
+            "REQUIRE_NEHALEM"     : "Nehalem",
+            "PREFER_PENRYN"       : "Penryn",
+            "REQUIRE_PENRYN"      : "Penryn",
+            "PREFER_CONROE"       : "Conroe",
+            "REQUIRE_CONROE"      : "Conroe",
+            "PREFER_CORE2DUO"     : "Core2Duo",
+            "REQUIRE_CORE2DUO"    : "Core2Duo",
+        }
+
+        self._espec_to_mano_cpumodel = {
+            "Westmere"     : "REQUIRE_WESTMERE",
+            "SandyBridge"  : "REQUIRE_SANDYBRIDGE",
+            "IvyBridge"    : "REQUIRE_IVYBRIDGE",
+            "Haswell"      : "REQUIRE_HASWELL",
+            "Broadwell"    : "REQUIRE_BROADWELL",
+            "Nehalem"      : "REQUIRE_NEHALEM",
+            "Penryn"       : "REQUIRE_PENRYN",
+            "Conroe"       : "REQUIRE_CONROE",
+            "Core2Duo"     : "REQUIRE_CORE2DUO",
+        }
+
+        self._mano_to_espec_cpuarch = {
+            "PREFER_X86"     : "x86",
+            "REQUIRE_X86"    : "x86",
+            "PREFER_X86_64"  : "x86_64",
+            "REQUIRE_X86_64" : "x86_64",
+            "PREFER_I686"    : "i686",
+            "REQUIRE_I686"   : "i686",
+            "PREFER_IA64"    : "ia64",
+            "REQUIRE_IA64"   : "ia64",
+            "PREFER_ARMV7"   : "ARMv7",
+            "REQUIRE_ARMV7"  : "ARMv7",
+            "PREFER_ARMV8"   : "ARMv8-A",
+            "REQUIRE_ARMV8"  : "ARMv8-A",
+        }
+
+        self._espec_to_mano_cpuarch = {
+            "x86"     : "REQUIRE_X86",
+            "x86_64"  : "REQUIRE_X86_64",
+            "i686"    : "REQUIRE_I686",
+            "ia64"    : "REQUIRE_IA64",
+            "ARMv7-A" : "REQUIRE_ARMV7",
+            "ARMv8-A" : "REQUIRE_ARMV8",
+        }
+
+        self._mano_to_espec_cpuvendor = {
+            "PREFER_INTEL"  : "Intel",
+            "REQUIRE_INTEL" : "Intel",
+            "PREFER_AMD"    : "AMD",
+            "REQUIRE_AMD"   : "AMD",
+        }
+
+        self._espec_to_mano_cpuvendor = {
+            "Intel" : "REQUIRE_INTEL",
+            "AMD"   : "REQUIRE_AMD",
+        }
+
+        self._mano_to_espec_cpufeatures = {
+            "PREFER_AES"       : "aes",
+            "REQUIRE_AES"      : "aes",
+            "REQUIRE_VME"      : "vme",
+            "PREFER_VME"       : "vme",
+            "REQUIRE_DE"       : "de",
+            "PREFER_DE"        : "de",
+            "REQUIRE_PSE"      : "pse",
+            "PREFER_PSE"       : "pse",
+            "REQUIRE_TSC"      : "tsc",
+            "PREFER_TSC"       : "tsc",
+            "REQUIRE_MSR"      : "msr",
+            "PREFER_MSR"       : "msr",
+            "REQUIRE_PAE"      : "pae",
+            "PREFER_PAE"       : "pae",
+            "REQUIRE_MCE"      : "mce",
+            "PREFER_MCE"       : "mce",
+            "REQUIRE_CX8"      : "cx8",
+            "PREFER_CX8"       : "cx8",
+            "REQUIRE_APIC"     : "apic",
+            "PREFER_APIC"      : "apic",
+            "REQUIRE_SEP"      : "sep",
+            "PREFER_SEP"       : "sep",
+            "REQUIRE_MTRR"     : "mtrr",
+            "PREFER_MTRR"      : "mtrr",
+            "REQUIRE_PGE"      : "pge",
+            "PREFER_PGE"       : "pge",
+            "REQUIRE_MCA"      : "mca",
+            "PREFER_MCA"       : "mca",
+            "REQUIRE_CMOV"     : "cmov",
+            "PREFER_CMOV"      : "cmov",
+            "REQUIRE_PAT"      : "pat",
+            "PREFER_PAT"       : "pat",
+            "REQUIRE_PSE36"    : "pse36",
+            "PREFER_PSE36"     : "pse36",
+            "REQUIRE_CLFLUSH"  : "clflush",
+            "PREFER_CLFLUSH"   : "clflush",
+            "REQUIRE_DTS"      : "dts",
+            "PREFER_DTS"       : "dts",
+            "REQUIRE_ACPI"     : "acpi",
+            "PREFER_ACPI"      : "acpi",
+            "REQUIRE_MMX"      : "mmx",
+            "PREFER_MMX"       : "mmx",
+            "REQUIRE_FXSR"     : "fxsr",
+            "PREFER_FXSR"      : "fxsr",
+            "REQUIRE_SSE"      : "sse",
+            "PREFER_SSE"       : "sse",
+            "REQUIRE_SSE2"     : "sse2",
+            "PREFER_SSE2"      : "sse2",
+            "REQUIRE_SS"       : "ss",
+            "PREFER_SS"        : "ss",
+            "REQUIRE_HT"       : "ht",
+            "PREFER_HT"        : "ht",
+            "REQUIRE_TM"       : "tm",
+            "PREFER_TM"        : "tm",
+            "REQUIRE_IA64"     : "ia64",
+            "PREFER_IA64"      : "ia64",
+            "REQUIRE_PBE"      : "pbe",
+            "PREFER_PBE"       : "pbe",
+            "REQUIRE_RDTSCP"   : "rdtscp",
+            "PREFER_RDTSCP"    : "rdtscp",
+            "REQUIRE_PNI"      : "pni",
+            "PREFER_PNI"       : "pni",
+            "REQUIRE_PCLMULQDQ": "pclmulqdq",
+            "PREFER_PCLMULQDQ" : "pclmulqdq",
+            "REQUIRE_DTES64"   : "dtes64",
+            "PREFER_DTES64"    : "dtes64",
+            "REQUIRE_MONITOR"  : "monitor",
+            "PREFER_MONITOR"   : "monitor",
+            "REQUIRE_DS_CPL"   : "ds_cpl",
+            "PREFER_DS_CPL"    : "ds_cpl",
+            "REQUIRE_VMX"      : "vmx",
+            "PREFER_VMX"       : "vmx",
+            "REQUIRE_SMX"      : "smx",
+            "PREFER_SMX"       : "smx",
+            "REQUIRE_EST"      : "est",
+            "PREFER_EST"       : "est",
+            "REQUIRE_TM2"      : "tm2",
+            "PREFER_TM2"       : "tm2",
+            "REQUIRE_SSSE3"    : "ssse3",
+            "PREFER_SSSE3"     : "ssse3",
+            "REQUIRE_CID"      : "cid",
+            "PREFER_CID"       : "cid",
+            "REQUIRE_FMA"      : "fma",
+            "PREFER_FMA"       : "fma",
+            "REQUIRE_CX16"     : "cx16",
+            "PREFER_CX16"      : "cx16",
+            "REQUIRE_XTPR"     : "xtpr",
+            "PREFER_XTPR"      : "xtpr",
+            "REQUIRE_PDCM"     : "pdcm",
+            "PREFER_PDCM"      : "pdcm",
+            "REQUIRE_PCID"     : "pcid",
+            "PREFER_PCID"      : "pcid",
+            "REQUIRE_DCA"      : "dca",
+            "PREFER_DCA"       : "dca",
+            "REQUIRE_SSE4_1"   : "sse4_1",
+            "PREFER_SSE4_1"    : "sse4_1",
+            "REQUIRE_SSE4_2"   : "sse4_2",
+            "PREFER_SSE4_2"    : "sse4_2",
+            "REQUIRE_X2APIC"   : "x2apic",
+            "PREFER_X2APIC"    : "x2apic",
+            "REQUIRE_MOVBE"    : "movbe",
+            "PREFER_MOVBE"     : "movbe",
+            "REQUIRE_POPCNT"   : "popcnt",
+            "PREFER_POPCNT"    : "popcnt",
+            "REQUIRE_TSC_DEADLINE_TIMER"   : "tsc_deadline_timer",
+            "PREFER_TSC_DEADLINE_TIMER"    : "tsc_deadline_timer",
+            "REQUIRE_XSAVE"    : "xsave",
+            "PREFER_XSAVE"     : "xsave",
+            "REQUIRE_AVX"      : "avx",
+            "PREFER_AVX"       : "avx",
+            "REQUIRE_F16C"     : "f16c",
+            "PREFER_F16C"      : "f16c",
+            "REQUIRE_RDRAND"   : "rdrand",
+            "PREFER_RDRAND"    : "rdrand",
+            "REQUIRE_FSGSBASE" : "fsgsbase",
+            "PREFER_FSGSBASE"  : "fsgsbase",
+            "REQUIRE_BMI1"     : "bmi1",
+            "PREFER_BMI1"      : "bmi1",
+            "REQUIRE_HLE"      : "hle",
+            "PREFER_HLE"       : "hle",
+            "REQUIRE_AVX2"     : "avx2",
+            "PREFER_AVX2"      : "avx2",
+            "REQUIRE_SMEP"     : "smep",
+            "PREFER_SMEP"      : "smep",
+            "REQUIRE_BMI2"     : "bmi2",
+            "PREFER_BMI2"      : "bmi2",
+            "REQUIRE_ERMS"     : "erms",
+            "PREFER_ERMS"      : "erms",
+            "REQUIRE_INVPCID"  : "invpcid",
+            "PREFER_INVPCID"   : "invpcid",
+            "REQUIRE_RTM"      : "rtm",
+            "PREFER_RTM"       : "rtm",
+            "REQUIRE_MPX"      : "mpx",
+            "PREFER_MPX"       : "mpx",
+            "REQUIRE_RDSEED"   : "rdseed",
+            "PREFER_RDSEED"    : "rdseed",
+            "REQUIRE_ADX"      : "adx",
+            "PREFER_ADX"       : "adx",
+            "REQUIRE_SMAP"     : "smap",
+            "PREFER_SMAP"      : "smap",
+        }
+
+        self._espec_to_mano_cpufeatures = {
+            "aes"      : "REQUIRE_AES",
+            "vme"      : "REQUIRE_VME",
+            "de"       : "REQUIRE_DE",
+            "pse"      : "REQUIRE_PSE",
+            "tsc"      : "REQUIRE_TSC",
+            "msr"      : "REQUIRE_MSR",
+            "pae"      : "REQUIRE_PAE",
+            "mce"      : "REQUIRE_MCE",
+            "cx8"      : "REQUIRE_CX8",
+            "apic"     : "REQUIRE_APIC",
+            "sep"      : "REQUIRE_SEP",
+            "mtrr"     : "REQUIRE_MTRR",
+            "pge"      : "REQUIRE_PGE",
+            "mca"      : "REQUIRE_MCA",
+            "cmov"     : "REQUIRE_CMOV",
+            "pat"      : "REQUIRE_PAT",
+            "pse36"    : "REQUIRE_PSE36",
+            "clflush"  : "REQUIRE_CLFLUSH",
+            "dts"      : "REQUIRE_DTS",
+            "acpi"     : "REQUIRE_ACPI",
+            "mmx"      : "REQUIRE_MMX",
+            "fxsr"     : "REQUIRE_FXSR",
+            "sse"      : "REQUIRE_SSE",
+            "sse2"     : "REQUIRE_SSE2",
+            "ss"       : "REQUIRE_SS",
+            "ht"       : "REQUIRE_HT",
+            "tm"       : "REQUIRE_TM",
+            "ia64"     : "REQUIRE_IA64",
+            "pbe"      : "REQUIRE_PBE",
+            "rdtscp"   : "REQUIRE_RDTSCP",
+            "pni"      : "REQUIRE_PNI",
+            "pclmulqdq": "REQUIRE_PCLMULQDQ",
+            "dtes64"   : "REQUIRE_DTES64",
+            "monitor"  : "REQUIRE_MONITOR",
+            "ds_cpl"   : "REQUIRE_DS_CPL",
+            "vmx"      : "REQUIRE_VMX",
+            "smx"      : "REQUIRE_SMX",
+            "est"      : "REQUIRE_EST",
+            "tm2"      : "REQUIRE_TM2",
+            "ssse3"    : "REQUIRE_SSSE3",
+            "cid"      : "REQUIRE_CID",
+            "fma"      : "REQUIRE_FMA",
+            "cx16"     : "REQUIRE_CX16",
+            "xtpr"     : "REQUIRE_XTPR",
+            "pdcm"     : "REQUIRE_PDCM",
+            "pcid"     : "REQUIRE_PCID",
+            "dca"      : "REQUIRE_DCA",
+            "sse4_1"   : "REQUIRE_SSE4_1",
+            "sse4_2"   : "REQUIRE_SSE4_2",
+            "x2apic"   : "REQUIRE_X2APIC",
+            "movbe"    : "REQUIRE_MOVBE",
+            "popcnt"   : "REQUIRE_POPCNT",
+            "tsc_deadline_timer"   : "REQUIRE_TSC_DEADLINE_TIMER",
+            "xsave"    : "REQUIRE_XSAVE",
+            "avx"      : "REQUIRE_AVX",
+            "f16c"     : "REQUIRE_F16C",
+            "rdrand"   : "REQUIRE_RDRAND",
+            "fsgsbase" : "REQUIRE_FSGSBASE",
+            "bmi1"     : "REQUIRE_BMI1",
+            "hle"      : "REQUIRE_HLE",
+            "avx2"     : "REQUIRE_AVX2",
+            "smep"     : "REQUIRE_SMEP",
+            "bmi2"     : "REQUIRE_BMI2",
+            "erms"     : "REQUIRE_ERMS",
+            "invpcid"  : "REQUIRE_INVPCID",
+            "rtm"      : "REQUIRE_RTM",
+            "mpx"      : "REQUIRE_MPX",
+            "rdseed"   : "REQUIRE_RDSEED",
+            "adx"      : "REQUIRE_ADX",
+            "smap"     : "REQUIRE_SMAP",
+        }
+
+    def mano_to_extra_spec_cpu_model(self, cpu_model):
+        if cpu_model in self._mano_to_espec_cpumodel:
+            return self._mano_to_espec_cpumodel[cpu_model]
+        else:
+            return None
+            
+    def extra_specs_to_mano_cpu_model(self, cpu_model):
+        if cpu_model in self._espec_to_mano_cpumodel:
+            return self._espec_to_mano_cpumodel[cpu_model]
+        else:
+            return None
+        
+    def mano_to_extra_spec_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._mano_to_espec_cpuarch:
+            return self._mano_to_espec_cpuarch[cpu_arch]
+        else:
+            return None
+        
+    def extra_specs_to_mano_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._espec_to_mano_cpuarch:
+            return self._espec_to_mano_cpuarch[cpu_arch]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._mano_to_espec_cpuvendor:
+            return self._mano_to_espec_cpuvendor[cpu_vendor]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._espec_to_mano_cpuvendor:
+            return self._espec_to_mano_cpuvendor[cpu_vendor]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
+        return cpu_sockets
+
+    def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
+        return int(cpu_sockets)
+    
+    def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
+        return cpu_core_count
+
+    def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
+        return int(cpu_core_count)
+    
+    def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
+        return core_thread_count
+
+    def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
+        return int(core_thread_count)
+
+    def mano_to_extra_spec_cpu_features(self, features):
+        cpu_features = []
+        epa_feature_str = None
+        for f in features:
+            if f in self._mano_to_espec_cpufeatures:
+                cpu_features.append(self._mano_to_espec_cpufeatures[f])
+                
+        if len(cpu_features) > 1:
+            epa_feature_str =  '<all-in> '+ " ".join(cpu_features)
+        elif len(cpu_features) == 1:
+            epa_feature_str = " ".join(cpu_features)
+
+        return epa_feature_str
+
+    def extra_spec_to_mano_cpu_features(self, features):
+        oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
+        cpu_features = []
+        result = None
+        for oper in oper_symbols:
+            regex = '^'+oper+' (.*?)$'
+            result = re.search(regex, features)
+            if result is not None:
+                break
+            
+        if result is not None:
+            feature_list = result.group(1)
+        else:
+            feature_list = features
+
+        for f in feature_list.split():
+            if f in self._espec_to_mano_cpufeatures:
+                cpu_features.append(self._espec_to_mano_cpufeatures[f])
+
+        return cpu_features
+    
+
+class ExtraSpecUtils(object):
+    """
+    General utility class for flavor Extra Specs processing
+    """
+    def __init__(self):
+        self.host = HostEPAUtils()
+        self.guest = GuestEPAUtils()
+        self.extra_specs_keywords = [ 'hw:cpu_policy',
+                                      'hw:cpu_threads_policy',
+                                      'hw:mem_page_size',
+                                      'hw:numa_nodes',
+                                      'hw:numa_mempolicy',
+                                      'hw:numa_cpus',
+                                      'hw:numa_mem',
+                                      'trust:trusted_host',
+                                      'pci_passthrough:alias',
+                                      'capabilities:cpu_info:model',
+                                      'capabilities:cpu_info:arch',
+                                      'capabilities:cpu_info:vendor',
+                                      'capabilities:cpu_info:topology:sockets',
+                                      'capabilities:cpu_info:topology:cores',
+                                      'capabilities:cpu_info:topology:threads',
+                                      'capabilities:cpu_info:features',
+                                ]
+        self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
+
+
+
+class FlavorUtils(object):
+    """
+    Utility class for handling the flavor 
+    """
+    def __init__(self, driver):
+        """
+        Constructor for class
+        Arguments:
+          driver: object of OpenstackDriver()
+        """
+        self._epa = ExtraSpecUtils()
+        self._driver = driver
+        self.log = driver.log
+        
+    @property
+    def driver(self):
+        return self._driver
+    
+    def _get_guest_epa_specs(self, guest_epa):
+        """
+        Returns EPA Specs dictionary for guest_epa attributes
+        """
+        epa_specs = dict()
+        if guest_epa.has_field('mempage_size'):
+            mempage_size = self._epa.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
+            if mempage_size is not None:
+                epa_specs['hw:mem_page_size'] = mempage_size
+
+        if guest_epa.has_field('cpu_pinning_policy'):
+            cpu_pinning_policy = self._epa.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
+            if cpu_pinning_policy is not None:
+                epa_specs['hw:cpu_policy'] = cpu_pinning_policy
+
+        if guest_epa.has_field('cpu_thread_pinning_policy'):
+            cpu_thread_pinning_policy = self._epa.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
+            if cpu_thread_pinning_policy is None:
+                epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
+
+        if guest_epa.has_field('trusted_execution'):
+            trusted_execution = self._epa.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
+            if trusted_execution is not None:
+                epa_specs['trust:trusted_host'] = trusted_execution
+
+        if guest_epa.has_field('numa_node_policy'):
+            if guest_epa.numa_node_policy.has_field('node_cnt'):
+                numa_node_count = self._epa.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
+                if numa_node_count is not None:
+                    epa_specs['hw:numa_nodes'] = numa_node_count
+
+            if guest_epa.numa_node_policy.has_field('mem_policy'):
+                numa_memory_policy = self._epa.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
+                if numa_memory_policy is not None:
+                    epa_specs['hw:numa_mempolicy'] = numa_memory_policy
+
+            if guest_epa.numa_node_policy.has_field('node'):
+                for node in guest_epa.numa_node_policy.node:
+                    if node.has_field('vcpu') and node.vcpu:
+                        epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j.id) for j in node.vcpu])
+                    if node.memory_mb:
+                        epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
+
+        if guest_epa.has_field('pcie_device'):
+            pci_devices = []
+            for device in guest_epa.pcie_device:
+                pci_devices.append(device.device_id +':'+str(device.count))
+            epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
+
+        return epa_specs
+
+    def _get_host_epa_specs(self,host_epa):
+        """
+        Returns EPA Specs dictionary for host_epa attributes
+        """
+        epa_specs = dict()
+
+        if host_epa.has_field('cpu_model'):
+            cpu_model = self._epa.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
+            if cpu_model is not None:
+                epa_specs['capabilities:cpu_info:model'] = cpu_model
+
+        if host_epa.has_field('cpu_arch'):
+            cpu_arch = self._epa.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
+            if cpu_arch is not None:
+                epa_specs['capabilities:cpu_info:arch'] = cpu_arch
+
+        if host_epa.has_field('cpu_vendor'):
+            cpu_vendor = self._epa.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
+            if cpu_vendor is not None:
+                epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
+
+        if host_epa.has_field('cpu_socket_count'):
+            cpu_socket_count = self._epa.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
+            if cpu_socket_count is not None:
+                epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
+
+        if host_epa.has_field('cpu_core_count'):
+            cpu_core_count = self._epa.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
+            if cpu_core_count is not None:
+                epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
+
+        if host_epa.has_field('cpu_core_thread_count'):
+            cpu_core_thread_count = self._epa.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
+            if cpu_core_thread_count is not None:
+                epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
+
+        if host_epa.has_field('cpu_feature'):
+            cpu_features = []
+            espec_cpu_features = []
+            for feature in host_epa.cpu_feature:
+                cpu_features.append(feature.feature)
+            espec_cpu_features = self._epa.host.mano_to_extra_spec_cpu_features(cpu_features)
+            if espec_cpu_features is not None:
+                epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
+        return epa_specs
+
+    def _get_hypervisor_epa_specs(self,guest_epa):
+        """
+        Returns EPA Specs dictionary for hypervisor_epa attributes
+        """
+        hypervisor_epa = dict()
+        return hypervisor_epa
+
+    def _get_vswitch_epa_specs(self, guest_epa):
+        """
+        Returns EPA Specs dictionary for vswitch_epa attributes
+        """
+        vswitch_epa = dict()
+        return vswitch_epa
+
+    def _get_host_aggregate_epa_specs(self, host_aggregate):
+        """
+        Returns EPA Specs dictionary for host aggregates
+        """
+        epa_specs = dict()
+        for aggregate in host_aggregate:
+            epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
+
+        return epa_specs
+    
+    def get_extra_specs(self, flavor):
+        """
+        Returns epa_specs dictionary based on flavor information
+        Arguments
+           flavor -- Protobuf GI object for flavor_info (RwcalYang.FlavorInfoItem())
+        Returns:
+           A dictionary of extra_specs in format understood by novaclient library
+        """
+        epa_specs = dict()
+        if flavor.has_field('guest_epa'):
+            guest_epa = self._get_guest_epa_specs(flavor.guest_epa)
+            epa_specs.update(guest_epa)
+        if flavor.has_field('host_epa'):
+            host_epa = self._get_host_epa_specs(flavor.host_epa)
+            epa_specs.update(host_epa)
+        if flavor.has_field('hypervisor_epa'):
+            hypervisor_epa = self._get_hypervisor_epa_specs(flavor.hypervisor_epa)
+            epa_specs.update(hypervisor_epa)
+        if flavor.has_field('vswitch_epa'):
+            vswitch_epa = self._get_vswitch_epa_specs(flavor.vswitch_epa)
+            epa_specs.update(vswitch_epa)
+        if flavor.has_field('host_aggregate'):
+            host_aggregate = self._get_host_aggregate_epa_specs(flavor.host_aggregate)
+            epa_specs.update(host_aggregate)
+        return epa_specs
+
+
+    def parse_vm_flavor_epa_info(self, flavor_info):
+        """
+        Parse the flavor_info dictionary (returned by python-client) for vm_flavor
+
+        Arguments:
+           flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+        Returns:
+               vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+        """
+        vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+
+        if 'vcpus' in flavor_info and flavor_info['vcpus']:
+            vm_flavor.vcpu_count = flavor_info['vcpus']
+
+        if 'ram' in flavor_info and flavor_info['ram']:
+            vm_flavor.memory_mb  = flavor_info['ram']
+
+        if 'disk' in flavor_info and flavor_info['disk']:
+            vm_flavor.storage_gb  = flavor_info['disk']
+
+        return vm_flavor
+    
+    def parse_guest_epa_info(self, flavor_info):
+        """
+        Parse the flavor_info dictionary (returned by python-client) for guest_epa
+
+        Arguments:
+           flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+        Returns:
+           guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+        """
+        guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+        for attr in flavor_info['extra_specs']:
+            if attr == 'hw:cpu_policy':
+                cpu_pinning_policy = self._epa.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
+                if cpu_pinning_policy is not None:
+                    guest_epa.cpu_pinning_policy = cpu_pinning_policy
+
+            elif attr == 'hw:cpu_threads_policy':
+                cpu_thread_pinning_policy = self._epa.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
+                if cpu_thread_pinning_policy is not None:
+                    guest_epa.cpu_thread_pinning_policy = cpu_thread_pinning_policy
+
+            elif attr == 'hw:mem_page_size':
+                mempage_size = self._epa.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
+                if mempage_size is not None:
+                    guest_epa.mempage_size = mempage_size
+
+            elif attr == 'hw:numa_nodes':
+                numa_node_count = self._epa.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
+                if numa_node_count is not None:
+                    guest_epa.numa_node_policy.node_cnt = numa_node_count
+
+            elif attr.startswith('hw:numa_cpus.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = guest_epa.numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                for x in flavor_info['extra_specs'][attr].split(','):
+                   numa_node_vcpu = numa_node.vcpu.add()
+                   numa_node_vcpu.id = int(x)
+
+            elif attr.startswith('hw:numa_mem.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = guest_epa.numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                numa_node.memory_mb =  int(flavor_info['extra_specs'][attr])
+
+            elif attr == 'hw:numa_mempolicy':
+                numa_memory_policy = self._epa.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
+                if numa_memory_policy is not None:
+                    guest_epa.numa_node_policy.mem_policy = numa_memory_policy
+
+            elif attr == 'trust:trusted_host':
+                trusted_execution = self._epa.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
+                if trusted_execution is not None:
+                    guest_epa.trusted_execution = trusted_execution
+
+            elif attr == 'pci_passthrough:alias':
+                device_types = flavor_info['extra_specs']['pci_passthrough:alias']
+                for device in device_types.split(','):
+                    dev = guest_epa.pcie_device.add()
+                    dev.device_id = device.split(':')[0]
+                    dev.count = int(device.split(':')[1])
+        return guest_epa
+
+    def parse_host_epa_info(self, flavor_info):
+        """
+        Parse the flavor_info dictionary (returned by python-client) for host_epa
+
+        Arguments:
+           flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+        Returns:
+           host_epa  = RwcalYang.FlavorInfoItem_HostEpa()
+        """
+        host_epa  = RwcalYang.FlavorInfoItem_HostEpa()
+        for attr in flavor_info['extra_specs']:
+            if attr == 'capabilities:cpu_info:model':
+                cpu_model = self._epa.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
+                if cpu_model is not None:
+                    host_epa.cpu_model = cpu_model
+
+            elif attr == 'capabilities:cpu_info:arch':
+                cpu_arch = self._epa.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
+                if cpu_arch is not None:
+                    host_epa.cpu_arch = cpu_arch
+
+            elif attr == 'capabilities:cpu_info:vendor':
+                cpu_vendor = self._epa.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
+                if cpu_vendor is not None:
+                    host_epa.cpu_vendor = cpu_vendor
+
+            elif attr == 'capabilities:cpu_info:topology:sockets':
+                cpu_sockets = self._epa.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
+                if cpu_sockets is not None:
+                    host_epa.cpu_socket_count = cpu_sockets
+
+            elif attr == 'capabilities:cpu_info:topology:cores':
+                cpu_cores = self._epa.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
+                if cpu_cores is not None:
+                    host_epa.cpu_core_count = cpu_cores
+
+            elif attr == 'capabilities:cpu_info:topology:threads':
+                cpu_threads = self._epa.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
+                if cpu_threads is not None:
+                    host_epa.cpu_core_thread_count = cpu_threads
+
+            elif attr == 'capabilities:cpu_info:features':
+                cpu_features = self._epa.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
+                if cpu_features is not None:
+                    for feature in cpu_features:
+                        host_epa.cpu_feature.append(feature)
+        return host_epa
+    
+    def parse_host_aggregate_epa_info(self, flavor_info):
+        """
+        Parse the flavor_info dictionary (returned by python-client) for host_aggregate
+
+        Arguments:
+           flavor_info: A dictionary object return by novaclient library listing flavor attributes
+
+        Returns:
+           A list of objects host_aggregate of type RwcalYang.FlavorInfoItem_HostAggregate()
+        """
+        host_aggregates = list()
+        for attr in flavor_info['extra_specs']:
+            if attr.startswith('aggregate_instance_extra_specs:'):
+                aggregate = RwcalYang.FlavorInfoItem_HostAggregate()
+                aggregate.metadata_key = ":".join(attr.split(':')[1::])
+                aggregate.metadata_value = flavor_info['extra_specs'][attr]
+                host_aggregates.append(aggregate)
+        return host_aggregates
+    
+        
+    def parse_flavor_info(self, flavor_info):
+        """
+        Parse the flavor_info dictionary and put value in RIFT GI object for flavor
+        Arguments:
+           flavor_info: A dictionary object returned by novaclient library listing flavor attributes
+
+        Returns: 
+           Protobuf GI Object of type RwcalYang.FlavorInfoItem()
+
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        if 'name' in flavor_info and flavor_info['name']:
+            flavor.name  = flavor_info['name']
+        if 'id' in flavor_info and flavor_info['id']:
+            flavor.id  = flavor_info['id']
+
+        ### If extra_specs in flavor_info
+        if 'extra_specs' in flavor_info:
+            flavor.vm_flavor = self.parse_vm_flavor_epa_info(flavor_info)
+            flavor.guest_epa = self.parse_guest_epa_info(flavor_info)
+            flavor.host_epa = self.parse_host_epa_info(flavor_info)
+            for aggr in self.parse_host_aggregate_epa_info(flavor_info):
+                ha = flavor.host_aggregate.add()
+                ha.from_dict(aggr.as_dict())
+        return flavor
+
+    def _match_vm_flavor(self, required, available):
+        self.log.info("Matching VM Flavor attributes")
+        if available.vcpu_count != required.vcpu_count:
+            self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+                           required.vcpu_count,
+                           available.vcpu_count)
+            return False
+        if available.memory_mb != required.memory_mb:
+            self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+                           required.memory_mb,
+                           available.memory_mb)
+            return False
+        if available.storage_gb != required.storage_gb:
+            self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+                           required.storage_gb,
+                           available.storage_gb)
+            return False
+        self.log.debug("VM Flavor match found")
+        return True
+
+    def _match_guest_epa(self, required, available):
+        self.log.info("Matching Guest EPA attributes")
+        if required.has_field('pcie_device'):
+            self.log.debug("Matching pcie_device")
+            if available.has_field('pcie_device') == False:
+                self.log.debug("Matching pcie_device failed. Not available in flavor")
+                return False
+            else:
+                for dev in required.pcie_device:
+                    if not [ d for d in available.pcie_device
+                            if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+                        self.log.debug("Matching pcie_device failed. Required: %s, Available: %s",
+                                       required.pcie_device, available.pcie_device)
+                        return False
+        elif available.has_field('pcie_device'):
+            self.log.debug("Rejecting available flavor because pcie_device not required but available")
+            return False
+
+
+        if required.has_field('mempage_size'):
+            self.log.debug("Matching mempage_size")
+            if available.has_field('mempage_size') == False:
+                self.log.debug("Matching mempage_size failed. Not available in flavor")
+                return False
+            else:
+                if required.mempage_size != available.mempage_size:
+                    self.log.debug("Matching mempage_size failed. Required: %s, Available: %s",
+                                   required.mempage_size, available.mempage_size)
+                    return False
+        elif available.has_field('mempage_size'):
+            self.log.debug("Rejecting available flavor because mempage_size not required but available")
+            return False
+
+        if required.has_field('cpu_pinning_policy'):
+            self.log.debug("Matching cpu_pinning_policy")
+            if required.cpu_pinning_policy != 'ANY':
+                if available.has_field('cpu_pinning_policy') == False:
+                    self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+                    return False
+                else:
+                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
+                        self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s",
+                                       required.cpu_pinning_policy, available.cpu_pinning_policy)
+                        return False
+        elif available.has_field('cpu_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+            return False
+
+        if required.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Matching cpu_thread_pinning_policy")
+            if available.has_field('cpu_thread_pinning_policy') == False:
+                self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+                    self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s",
+                                   required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+                    return False
+        elif available.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+            return False
+
+        if required.has_field('trusted_execution'):
+            self.log.debug("Matching trusted_execution")
+            if required.trusted_execution == True:
+                if available.has_field('trusted_execution') == False:
+                    self.log.debug("Matching trusted_execution failed. Not available in flavor")
+                    return False
+                else:
+                    if required.trusted_execution != available.trusted_execution:
+                        self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s",
+                                       required.trusted_execution, available.trusted_execution)
+                        return False
+        elif available.has_field('trusted_execution'):
+            self.log.debug("Rejecting available flavor because trusted_execution not required but available")
+            return False
+
+        if required.has_field('numa_node_policy'):
+            self.log.debug("Matching numa_node_policy")
+            if available.has_field('numa_node_policy') == False:
+                self.log.debug("Matching numa_node_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Matching numa_node_policy node_cnt")
+                    if available.numa_node_policy.has_field('node_cnt') == False:
+                        self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+                            self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",
+                                           required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+                            return False
+                elif available.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Rejecting available flavor because numa node count not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Matching numa_node_policy mem_policy")
+                    if available.numa_node_policy.has_field('mem_policy') == False:
+                        self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+                            self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s",
+                                           required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+                            return False
+                elif available.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('node'):
+                    self.log.debug("Matching numa_node_policy nodes configuration")
+                    if available.numa_node_policy.has_field('node') == False:
+                        self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+                        return False
+                    for required_node in required.numa_node_policy.node:
+                        self.log.debug("Matching numa_node_policy nodes configuration for node %s",
+                                       required_node)
+                        numa_match = False
+                        for available_node in available.numa_node_policy.node:
+                            if required_node.id != available_node.id:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+                                               required_node, available_node)
+                                continue
+                            if required_node.vcpu != available_node.vcpu:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+                                               required_node, available_node)
+                                continue
+                            if required_node.memory_mb != available_node.memory_mb:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s",
+                                               required_node, available_node)
+                                continue
+                            numa_match = True
+                        if numa_match == False:
+                            return False
+                elif available.numa_node_policy.has_field('node'):
+                    self.log.debug("Rejecting available flavor because numa nodes not required but available")
+                    return False
+        elif available.has_field('numa_node_policy'):
+            self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
+            return False
+        self.log.info("Successful match for Guest EPA attributes")
+        return True
+
+    def _match_vswitch_epa(self, required, available):
+        self.log.debug("VSwitch EPA match found")
+        return True
+
+    def _match_hypervisor_epa(self, required, available):
+        self.log.debug("Hypervisor EPA match found")
+        return True
+
+    def _match_host_epa(self, required, available):
+        self.log.info("Matching Host EPA attributes")
+        if required.has_field('cpu_model'):
+            self.log.debug("Matching CPU model")
+            if available.has_field('cpu_model') == False:
+                self.log.debug("Matching CPU model failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+                    self.log.debug("Matching CPU model failed. Required: %s, Available: %s",
+                                   required.cpu_model, available.cpu_model)
+                    return False
+        elif available.has_field('cpu_model'):
+            self.log.debug("Rejecting available flavor because cpu_model not required but available")
+            return False
+
+        if required.has_field('cpu_arch'):
+            self.log.debug("Matching CPU architecture")
+            if available.has_field('cpu_arch') == False:
+                self.log.debug("Matching CPU architecture failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+                    self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s",
+                                   required.cpu_arch, available.cpu_arch)
+                    return False
+        elif available.has_field('cpu_arch'):
+            self.log.debug("Rejecting available flavor because cpu_arch not required but available")
+            return False
+
+        if required.has_field('cpu_vendor'):
+            self.log.debug("Matching CPU vendor")
+            if available.has_field('cpu_vendor') == False:
+                self.log.debug("Matching CPU vendor failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+                    self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s",
+                                   required.cpu_vendor, available.cpu_vendor)
+                    return False
+        elif available.has_field('cpu_vendor'):
+            self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
+            return False
+
+        if required.has_field('cpu_socket_count'):
+            self.log.debug("Matching CPU socket count")
+            if available.has_field('cpu_socket_count') == False:
+                self.log.debug("Matching CPU socket count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_socket_count != available.cpu_socket_count:
+                    self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s",
+                                   required.cpu_socket_count, available.cpu_socket_count)
+                    return False
+        elif available.has_field('cpu_socket_count'):
+            self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+            return False
+
+        if required.has_field('cpu_core_count'):
+            self.log.debug("Matching CPU core count")
+            if available.has_field('cpu_core_count') == False:
+                self.log.debug("Matching CPU core count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_count != available.cpu_core_count:
+                    self.log.debug("Matching CPU core count failed. Required: %s, Available: %s",
+                                   required.cpu_core_count, available.cpu_core_count)
+                    return False
+        elif available.has_field('cpu_core_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
+            return False
+
+        if required.has_field('cpu_core_thread_count'):
+            self.log.debug("Matching CPU core thread count")
+            if available.has_field('cpu_core_thread_count') == False:
+                self.log.debug("Matching CPU core thread count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_thread_count != available.cpu_core_thread_count:
+                    self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s",
+                                   required.cpu_core_thread_count, available.cpu_core_thread_count)
+                    return False
+        elif available.has_field('cpu_core_thread_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+            return False
+
+        if required.has_field('cpu_feature'):
+            self.log.debug("Matching CPU feature list")
+            if available.has_field('cpu_feature') == False:
+                self.log.debug("Matching CPU feature list failed. Not available in flavor")
+                return False
+            else:
+                for feature in required.cpu_feature:
+                    if feature not in available.cpu_feature:
+                        self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s",
+                                       feature, available.cpu_feature)
+                        return False
+        elif available.has_field('cpu_feature'):
+            self.log.debug("Rejecting available flavor because cpu_feature not required but available")
+            return False
+        self.log.info("Successful match for Host EPA attributes")
+        return True
+
+
+    def _match_placement_group_inputs(self, required, available):
+        self.log.info("Matching Host aggregate attributes")
+
+        if not required and not available:
+            # Host aggregate not required and not available => success
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        if required and available:
+            # Host aggregate requested and available => Do a match and decide
+            xx = [ x.as_dict() for x in required ]
+            yy = [ y.as_dict() for y in available ]
+            for i in xx:
+                if i not in yy:
+                    self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s",
+                                   required, available)
+                    return False
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        else:
+            # Either of following conditions => Failure
+            #  - Host aggregate required but not available
+            #  - Host aggregate not required but available
+            self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s",
+                           required, available)
+            return False
+    
+
+    def _match_epa_params(self, resource_info, request_params):
+        """
+        Match EPA attributes
+        Arguments:
+           resource_info: Protobuf GI object RwcalYang.FlavorInfoItem()
+                          Following attributes would be accessed
+                          - vm_flavor
+                          - guest_epa
+                          - host_epa
+                          - host_aggregate
+
+           request_params: Protobuf GI object RwcalYang.VDUInitParams(). 
+                          Following attributes would be accessed
+                          - vm_flavor
+                          - guest_epa
+                          - host_epa
+                          - host_aggregate
+        Returns:
+           True -- Match between resource_info and request_params
+           False -- No match between resource_info and request_params
+        """
+        result = False
+        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+                                       getattr(resource_info, 'vm_flavor'))
+        if result == False:
+            self.log.debug("VM Flavor mismatched")
+            return False
+
+        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+                                       getattr(resource_info, 'guest_epa'))
+        if result == False:
+            self.log.debug("Guest EPA mismatched")
+            return False
+
+        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+                                         getattr(resource_info, 'vswitch_epa'))
+        if result == False:
+            self.log.debug("Vswitch EPA mismatched")
+            return False
+
+        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+                                            getattr(resource_info, 'hypervisor_epa'))
+        if result == False:
+            self.log.debug("Hypervisor EPA mismatched")
+            return False
+
+        result = self._match_host_epa(getattr(request_params, 'host_epa'),
+                                      getattr(resource_info, 'host_epa'))
+        if result == False:
+            self.log.debug("Host EPA mismatched")
+            return False
+
+        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+                                                    getattr(resource_info, 'host_aggregate'))
+
+        if result == False:
+            self.log.debug("Host Aggregate mismatched")
+            return False
+
+        return True
+
+    def match_resource_flavor(self, vdu_init, flavor_list):
+        """
+        Arguments:
+           vdu_init: Protobuf GI object RwcalYang.VDUInitParams(). 
+           flavor_list: List of Protobuf GI object RwcalYang.FlavorInfoItem()
+
+        Returns:
+           Flavor_ID -- If match is found between vdu_init and one of flavor_info from flavor_list
+           None -- No match between vdu_init and one of flavor_info from flavor_list
+
+        Select a existing flavor if it matches the request or create new flavor
+        """
+        for flv in flavor_list:
+            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                          vdu_init.name, flv)
+            if self._match_epa_params(flv, vdu_init):
+                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                              vdu_init.name, flv.name, flv.id)
+                return flv.id
+        return None
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/image.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/image.py
new file mode 100644
index 0000000..c58fc8d
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/image.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import os
+import gi
+
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+
+
+class ImageUtils(object):
+    """
+    Utility class for image operations
+    """
+    def __init__(self, driver):
+        """
+        Constructor for class
+        Arguments:
+          driver: object of OpenstackDriver()
+        """
+        self._driver = driver
+        self.log = driver.log
+        
+    def make_image_args(self, image):
+        """
+        Function to create kwargs required for glance_image_create API
+        
+        Arguments:
+          image: Protobuf GI object for RwcalYang.ImageInfoItem()
+
+        Returns:
+          A kwargs dictionary for glance operation
+        """
+        kwargs = dict()
+        kwargs['name'] = image.name
+        if image.disk_format:
+            kwargs['disk_format'] = image.disk_format
+        if image.container_format:
+            kwargs['container_format'] = image.container_format
+        return kwargs
+    
+    def create_image_handle(self, image):
+        """
+        Function to create a image-file handle 
+
+        Arguments:
+          image: Protobuf GI object for RwcalYang.ImageInfoItem()
+
+        Returns:
+          An object of _io.BufferedReader (file handle)
+        """
+        try:
+            if image.has_field("fileno"):
+                new_fileno = os.dup(image.fileno)
+                hdl = os.fdopen(new_fileno, 'rb')
+            else:
+                hdl = open(image.location, "rb")
+        except Exception as e:
+            self.log.exception("Could not open file for upload. Exception received: %s", str(e))
+            raise
+        return hdl
+
+    def parse_cloud_image_info(self, image_info):
+        """
+        Parse image_info dictionary (return by python-client) and put values in GI object for image
+
+        Arguments:
+        image_info : A dictionary object return by glanceclient library listing image attributes
+        
+        Returns:
+        Protobuf GI Object of type RwcalYang.ImageInfoItem()
+        """
+        image = RwcalYang.ImageInfoItem()
+        if 'name' in image_info and image_info['name']:
+            image.name = image_info['name']
+        if 'id' in image_info and image_info['id']:
+            image.id = image_info['id']
+        if 'checksum' in image_info and image_info['checksum']:
+            image.checksum = image_info['checksum']
+        if 'disk_format' in image_info and image_info['disk_format']:
+            image.disk_format = image_info['disk_format']
+        if 'container_format' in image_info and image_info['container_format']:
+            image.container_format = image_info['container_format']
+
+        image.state = 'inactive'
+        if 'status' in image_info and image_info['status']:
+            if image_info['status'] == 'active':
+                image.state = 'active'
+                
+        return image
+    
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/network.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/network.py
new file mode 100644
index 0000000..8e6f608
--- /dev/null
+++ b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/utils/network.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+import neutronclient.common.exceptions as NeutronException
+
+
+class NetworkUtils(object):
+    """
+    Utility class for network operations
+    """
+    def __init__(self, driver):
+        """
+        Constructor for class
+        Arguments:
+          driver: object of OpenstackDriver()
+        """
+        self._driver = driver
+        self.log = driver.log
+
+    @property
+    def driver(self):
+        return self._driver
+    
+    def _parse_cp(self, cp_info):
+        """
+        Parse the port_info dictionary returned by neutronclient 
+        Arguments:
+          cp_info: A dictionary object representing port attributes
+
+        Returns:
+          Protobuf GI oject of type RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+        """
+        cp = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+        if 'name' in cp_info and cp_info['name']:
+            cp.name = cp_info['name']
+            
+        if 'id' in cp_info and cp_info['id']:
+            cp.connection_point_id = cp_info['id']
+            
+        if ('fixed_ips' in cp_info) and (len(cp_info['fixed_ips']) >= 1):
+            if 'ip_address' in cp_info['fixed_ips'][0]:
+                cp.ip_address = cp_info['fixed_ips'][0]['ip_address']
+                
+        if 'mac_address' in cp_info and cp_info['mac_address']:
+            cp.mac_addr = cp_info['mac_address']
+            
+        if cp_info['status'] == 'ACTIVE':
+            cp.state = 'active'
+        else:
+            cp.state = 'inactive'
+            
+        if 'network_id' in cp_info and cp_info['network_id']:
+            cp.virtual_link_id = cp_info['network_id']
+            
+        if 'device_id' in cp_info and cp_info['device_id']:
+            cp.vdu_id = cp_info['device_id']
+        return cp
+
+    def parse_cloud_virtual_link_info(self, vlink_info, port_list, subnet):
+        """
+        Parse vlink_info dictionary (return by python-client) and put values in GI object for Virtual Link
+
+        Arguments:
+        vlink_info : A dictionary object return by neutronclient library listing network attributes
+        
+        Returns:
+        Protobuf GI Object of type RwcalYang.VirtualLinkInfoParams()
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name  = vlink_info['name']
+        if 'status' in vlink_info and vlink_info['status'] == 'ACTIVE':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+
+        link.virtual_link_id = vlink_info['id']
+        for port in port_list:
+            if ('device_owner' in port) and (port['device_owner'] == 'compute:None'):
+                link.connection_points.append(self._parse_cp(port))
+
+        if subnet is not None:
+            link.subnet = subnet['cidr']
+
+        if ('provider:network_type' in vlink_info) and (vlink_info['provider:network_type'] != None):
+            link.provider_network.overlay_type = vlink_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in vlink_info) and (vlink_info['provider:segmentation_id']):
+            link.provider_network.segmentation_id = vlink_info['provider:segmentation_id']
+        if ('provider:physical_network' in vlink_info) and (vlink_info['provider:physical_network']):
+            link.provider_network.physical_network = vlink_info['provider:physical_network'].upper()
+
+        return link
+    
+    def setup_vdu_networking(self, vdu_params):
+        """
+        This function validates the networking/connectivity setup.
+
+        Arguments:
+          vdu_params: object of RwcalYang.VDUInitParams()
+
+        Returns:
+          A list of port_ids and network_ids for VDU
+  
+        """
+        port_args = list()
+        network_ids = list()
+        add_mgmt_net = False
+        for cp in vdu_params.connection_points:
+            if cp.virtual_link_id == self.driver._mgmt_network_id:
+                ### Remove mgmt_network_id from net_ids
+                add_mgmt_net = True
+            port_args.append(self._create_cp_args(cp))
+
+        if not add_mgmt_net:
+            network_ids.append(self.driver._mgmt_network_id)
+            
+        ### Create ports and collect port ids
+        if port_args:
+            port_ids = self.driver.neutron_multi_port_create(port_args)
+        else:
+            port_ids = list()
+
+        return port_ids, network_ids
+    
+        
+    def _create_cp_args(self, cp):
+        """
+        Creates a request dictionary for port create call
+        Arguments:
+           cp: Object of RwcalYang.VDUInitParams_ConnectionPoints() 
+        Returns:
+           dict() of request params
+        """
+        args = dict()
+        args['name'] = cp.name
+        args['network_id'] = cp.virtual_link_id
+        args['admin_state_up'] = True
+
+        if cp.type_yang == 'VIRTIO' or cp.type_yang == 'E1000':
+            args['binding:vnic_type'] = 'normal'
+        elif cp.type_yang == 'SR_IOV':
+            args['binding:vnic_type'] = 'direct'
+        else:
+            raise NotImplementedError("Port Type: %s not supported" %(cp.type_yang))
+
+        try:
+            if cp.static_ip_address:
+                args["fixed_ips"] = [{"ip_address" : cp.static_ip_address}]
+        except Exception as e:
+            pass
+
+        if 'port_security_enabled' in cp:
+            args['port_security_enabled'] = cp.port_security_enabled
+
+        if cp.has_field('security_group'):
+            if self.driver._neutron_security_groups:
+                gid = self.driver._neutron_security_groups[0]['id']
+                args['security_groups'] = [ gid ]
+        return args
+
+    def make_virtual_link_args(self, link_params):
+        """
+        Function to create kwargs required for neutron_network_create API
+        
+        Arguments:
+         link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+
+        Returns:
+          A kwargs dictionary for network operation
+        """
+        kwargs = dict()
+        kwargs['name']            = link_params.name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+
+        if link_params.has_field('provider_network'):
+            if link_params.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = link_params.provider_network.physical_network
+            if link_params.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
+            if link_params.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
+
+        return kwargs
+
+    def make_subnet_args(self, link_params, network_id):
+        """
+        Function to create kwargs required for neutron_subnet_create API
+        
+        Arguments:
+         link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+
+        Returns:
+          A kwargs dictionary for subnet operation
+        """
+        kwargs = {'network_id' : network_id,
+                  'dhcp_params': {'enable_dhcp': True},
+                  'gateway_ip' : None,}
+
+        if link_params.ip_profile_params.has_field('ip_version'):
+            kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
+        else:
+            kwargs['ip_version'] = 4
+
+        if link_params.ip_profile_params.has_field('subnet_address'):
+            kwargs['cidr'] = link_params.ip_profile_params.subnet_address
+        elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
+            name = link_params.ip_profile_params.subnet_prefix_pool
+            pools = [ p['id']  for p in self.driver._neutron_subnet_prefix_pool if p['name'] == name ]
+            if not pools:
+                self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
+                               link_params.ip_profile_params.subnet_prefix_pool,
+                               link_params.name)
+                raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
+            
+            kwargs['subnetpool_id'] = pools[0]
+            
+        elif link_params.has_field('subnet'):
+            kwargs['cidr'] = link_params.subnet
+        else:
+            raise NeutronException.NeutronException("No IP Prefix or Pool name specified")
+
+        if link_params.ip_profile_params.has_field('dhcp_params'):
+            if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
+                kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
+            if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
+                kwargs['dhcp_params']['start_address']  = link_params.ip_profile_params.dhcp_params.start_address
+            if link_params.ip_profile_params.dhcp_params.has_field('count'):
+                kwargs['dhcp_params']['count']  = link_params.ip_profile_params.dhcp_params.count
+
+        if link_params.ip_profile_params.has_field('dns_server'):
+            kwargs['dns_server'] = []
+            for server in link_params.ip_profile_params.dns_server:
+                kwargs['dns_server'].append(server.address)
+
+        if link_params.ip_profile_params.has_field('gateway_address'):
+            kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
+
+        return kwargs
diff --git a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
index 2be896a..eac0d6c 100644
--- a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
+++ b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
@@ -19,20 +19,22 @@
 import logging
 import os
 import subprocess
-import uuid
 import tempfile
 import yaml
 
+import gi
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwcalYang', '1.0')
+
 import rift.rwcal.openstack as openstack_drv
+
+
 import rw_status
 import rift.cal.rwcal_status as rwcal_status
 import rwlogger
 import neutronclient.common.exceptions as NeutronException
 import keystoneclient.exceptions as KeystoneExceptions
-import tornado
-import gi
 
-gi.require_version('RwSdn', '1.0')
 
 from gi.repository import (
     GObject,
@@ -50,8 +52,6 @@
 rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
 
 
-espec_utils = openstack_drv.OpenstackExtraSpecUtils()
-
 class OpenstackCALOperationFailure(Exception):
     pass
 
@@ -67,6 +67,25 @@
     pass
 
 
+class RwcalAccountDriver(object):
+    """
+    Container class per cloud account
+    """
+    def __init__(self, logger, **kwargs):
+        self.log = logger
+        try:
+            self._driver = openstack_drv.OpenstackDriver(logger = self.log, **kwargs)
+        except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
+                NeutronException.NotFound) as e:
+            raise
+        except Exception as e:
+            self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def driver(self):
+        return self._driver
+    
 class RwcalOpenstackPlugin(GObject.Object, RwCal.Cloud):
     """This class implements the CAL VALA methods for openstack."""
 
@@ -74,46 +93,51 @@
 
     def __init__(self):
         GObject.Object.__init__(self)
-        self._driver_class = openstack_drv.OpenstackDriver
         self.log = logging.getLogger('rwcal.openstack.%s' % RwcalOpenstackPlugin.instance_num)
         self.log.setLevel(logging.DEBUG)
-
         self._rwlog_handler = None
+        self._account_drivers = dict()
         RwcalOpenstackPlugin.instance_num += 1
 
-    @contextlib.contextmanager
+    def _get_account_key(self, account):
+        key = str()
+        for f in account.openstack.fields:
+            try:
+                key+= str(getattr(account.openstack, f))
+            except:
+                pass
+        key += account.name
+        return key
+    
     def _use_driver(self, account):
         if self._rwlog_handler is None:
             raise UninitializedPluginError("Must call init() in CAL plugin before use.")
 
-        with rwlogger.rwlog_root_handler(self._rwlog_handler):
-            try:
-                drv = self._driver_class(username            = account.openstack.key,
-                                         password            = account.openstack.secret,
-                                         auth_url            = account.openstack.auth_url,
-                                         tenant_name         = account.openstack.tenant,
-                                         mgmt_network        = account.openstack.mgmt_network,
-                                         cert_validate       = account.openstack.cert_validate,
-                                         user_domain_name    = account.openstack.user_domain,
-                                         project_domain_name = account.openstack.project_domain,
-                                         region              = account.openstack.region)
-            except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,
-                        NeutronException.NotFound) as e:
-                raise
-            except Exception as e:
-                self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
-                raise
-
-            yield drv
-
+        acct_key = self._get_account_key(account)
+        
+        if acct_key not in self._account_drivers:
+            self.log.debug("Creating OpenstackDriver")
+            kwargs = dict(username = account.openstack.key,
+                          password = account.openstack.secret,
+                          auth_url = account.openstack.auth_url,
+                          project = account.openstack.tenant,
+                          mgmt_network = account.openstack.mgmt_network,
+                          cert_validate = account.openstack.cert_validate,
+                          user_domain = account.openstack.user_domain,
+                          project_domain = account.openstack.project_domain,
+                          region = account.openstack.region)
+            drv = RwcalAccountDriver(self.log, **kwargs)
+            self._account_drivers[account.name] = drv
+            return drv.driver
+        else:
+            return self._account_drivers[acct_key].driver
+        
 
     @rwstatus
     def do_init(self, rwlog_ctx):
-        self._rwlog_handler = rwlogger.RwLogger(
-                category="rw-cal-log",
-                subcategory="openstack",
-                log_hdl=rwlog_ctx,
-                )
+        self._rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                                subcategory="openstack",
+                                                log_hdl=rwlog_ctx,)
         self.log.addHandler(self._rwlog_handler)
         self.log.propagate = False
 
@@ -130,24 +154,24 @@
             Validation Code and Details String
         """
         status = RwcalYang.CloudConnectionStatus()
+        drv = self._use_driver(account) 
         try:
-            with self._use_driver(account) as drv:
-                drv.validate_account_creds()
-
+            drv.validate_account_creds()
         except KeystoneExceptions.Unauthorized as e:
-            self.log.error("Invalid credentials given for VIM account %s" %account.name)
+            self.log.error("Invalid credentials given for VIM account %s", account.name)
             status.status = "failure"
             status.details = "Invalid Credentials: %s" % str(e)
 
         except KeystoneExceptions.AuthorizationFailure as e:
-            self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s" % (
-            account.name, account.openstack.auth_url))
+            self.log.error("Bad authentication URL given for VIM account %s. Given auth url: %s",
+                           account.name, account.openstack.auth_url)
             status.status = "failure"
             status.details = "Invalid auth url: %s" % str(e)
 
         except NeutronException.NotFound as e:
-            self.log.error("Given management network %s could not be found for VIM account %s" % (
-                        account.openstack.mgmt_network, account.name))
+            self.log.error("Given management network %s could not be found for VIM account %s",
+                           account.openstack.mgmt_network,
+                           account.name)
             status.status = "failure"
             status.details = "mgmt network does not exist: %s" % str(e)
 
@@ -261,42 +285,39 @@
         Returns:
             The image id
         """
+        drv = self._use_driver(account)
+        fd = drv.utils.image.create_image_handle(image)
+        kwargs = drv.utils.image.make_image_args(image)
 
         try:
-            # If the use passed in a file descriptor, use that to
-            # upload the image.
-            if image.has_field("fileno"):
-                new_fileno = os.dup(image.fileno)
-                hdl = os.fdopen(new_fileno, 'rb')
-            else:
-                hdl = open(image.location, "rb")
+            # Create Image
+            image_id = drv.glance_image_create(**kwargs)
+            drv.glance_image_upload(image_id, fd)
         except Exception as e:
-            self.log.error("Could not open file for upload. Exception received: %s", str(e))
+            self.log.exception("Exception %s occured during image create", str(e))
             raise
-
-        with hdl as fd:
-            kwargs = {}
-            kwargs['name'] = image.name
-
-            if image.disk_format:
-                kwargs['disk_format'] = image.disk_format
-            if image.container_format:
-                kwargs['container_format'] = image.container_format
-
-            with self._use_driver(account) as drv:
-                # Create Image
-                image_id = drv.glance_image_create(**kwargs)
-                # Upload the Image
-                drv.glance_image_upload(image_id, fd)
-
-                if image.checksum:
-                    stored_image = drv.glance_image_get(image_id)
-                    if stored_image.checksum != image.checksum:
-                        drv.glance_image_delete(image_id=image_id)
-                        raise ImageUploadError(
-                                "image checksum did not match (actual: %s, expected: %s). Deleting." %
-                                (stored_image.checksum, image.checksum)
-                                )
+        finally:
+            fd.close()
+            
+        # Update image properties, if they are provided
+        try:
+            if image.has_field("properties") and image.properties is not None:
+                for key in image.properties:
+                    drv.glance_image_update(image_id, **{key.name: key.property_value})
+        except Exception as e:
+            self.log.exception("Exception %s occured during image update", str(e))
+            raise
+        
+        if image.checksum:
+            try:
+                stored_image = drv.glance_image_get(image_id)
+                if stored_image.checksum != image.checksum:
+                    drv.glance_image_delete(image_id=image_id)
+                    raise ImageUploadError("image checksum did not match (actual: %s, expected: %s). Deleting." %
+                                           (stored_image.checksum, image.checksum))
+            except Exception as e:
+                self.log.exception("Exception %s occured during image checksum verification", str(e))
+                raise
 
         return image_id
 
@@ -308,36 +329,14 @@
             account - a cloud account
             image_id - id of the image to delete
         """
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             drv.glance_image_delete(image_id=image_id)
+        except Exception as e:
+            self.log.exception("Exception %s occured during image deletion", str(e))
+            raise
 
 
-    @staticmethod
-    def _fill_image_info(img_info):
-        """Create a GI object from image info dictionary
-
-        Converts image information dictionary object returned by openstack
-        driver into Protobuf Gi Object
-
-        Arguments:
-            account - a cloud account
-            img_info - image information dictionary object from openstack
-
-        Returns:
-            The ImageInfoItem
-        """
-        img = RwcalYang.ImageInfoItem()
-        img.name = img_info['name']
-        img.id = img_info['id']
-        img.checksum = img_info['checksum']
-        img.disk_format = img_info['disk_format']
-        img.container_format = img_info['container_format']
-        if img_info['status'] == 'active':
-            img.state = 'active'
-        else:
-            img.state = 'inactive'
-        return img
-
     @rwstatus(ret_on_failure=[[]])
     def do_get_image_list(self, account):
         """Return a list of the names of all available images.
@@ -349,10 +348,14 @@
             The the list of images in VimResources object
         """
         response = RwcalYang.VimResources()
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             images = drv.glance_image_list()
-        for img in images:
-            response.imageinfo_list.append(RwcalOpenstackPlugin._fill_image_info(img))
+            for img in images:
+                response.imageinfo_list.append(drv.utils.image.parse_cloud_image_info(img))
+        except Exception as e:
+            self.log.exception("Exception %s occured during get-image-list", str(e))
+            raise
         return response
 
     @rwstatus(ret_on_failure=[None])
@@ -366,9 +369,15 @@
         Returns:
             ImageInfoItem object containing image information.
         """
-        with self._use_driver(account) as drv:
-            image = drv.glance_image_get(image_id)
-        return RwcalOpenstackPlugin._fill_image_info(image)
+        drv = self._use_driver(account)
+        try:
+            image_info = drv.glance_image_get(image_id)
+            image =  drv.utils.image.parse_cloud_image_info(image_info)
+        except Exception as e:
+            self.log.exception("Exception %s occured during get-image", str(e))
+            raise
+        return image
+    
 
     # This is being deprecated. Please do not use for new SW development
     @rwstatus(ret_on_failure=[""])
@@ -382,20 +391,21 @@
         Returns:
             The image id
         """
+        from warnings import warn
+        warn("This function is deprecated")
         kwargs = {}
         kwargs['name']      = vminfo.vm_name
         kwargs['flavor_id'] = vminfo.flavor_id
         if vminfo.has_field('image_id'):
             kwargs['image_id']  = vminfo.image_id
 
-        with self._use_driver(account) as drv:
-            ### If floating_ip is required and we don't have one, better fail before any further allocation
-            pool_name = None
-            floating_ip = False
-            if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
-                if account.openstack.has_field('floating_ip_pool'):
-                    pool_name = account.openstack.floating_ip_pool
-                floating_ip = True
+        ### If floating_ip is required and we don't have one, better fail before any further allocation
+        pool_name = None
+        floating_ip = False
+        if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
+            if account.openstack.has_field('floating_ip_pool'):
+                pool_name = account.openstack.floating_ip_pool
+            floating_ip = True
 
         if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
             kwargs['userdata']  = vminfo.cloud_init.userdata
@@ -435,10 +445,10 @@
         else:
             kwargs['scheduler_hints'] = None
 
-        with self._use_driver(account) as drv:
-            vm_id = drv.nova_server_create(**kwargs)
-            if floating_ip:
-                self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+        drv = self._use_driver(account)
+        vm_id = drv.nova_server_create(**kwargs)
+        if floating_ip:
+            self.prepare_vdu_on_boot(account, vm_id, floating_ip)
 
         return vm_id
 
@@ -450,8 +460,8 @@
             account - a cloud account
             vm_id - an id of the VM
         """
-        with self._use_driver(account) as drv:
-            drv.nova_server_start(vm_id)
+        drv = self._use_driver(account)
+        drv.nova_server_start(vm_id)
 
     @rwstatus
     def do_stop_vm(self, account, vm_id):
@@ -461,8 +471,8 @@
             account - a cloud account
             vm_id - an id of the VM
         """
-        with self._use_driver(account) as drv:
-            drv.nova_server_stop(vm_id)
+        drv = self._use_driver(account)
+        drv.nova_server_stop(vm_id)
 
     @rwstatus
     def do_delete_vm(self, account, vm_id):
@@ -472,8 +482,8 @@
             account - a cloud account
             vm_id - an id of the VM
         """
-        with self._use_driver(account) as drv:
-            drv.nova_server_delete(vm_id)
+        drv = self._use_driver(account)
+        drv.nova_server_delete(vm_id)
 
     @rwstatus
     def do_reboot_vm(self, account, vm_id):
@@ -483,8 +493,8 @@
             account - a cloud account
             vm_id - an id of the VM
         """
-        with self._use_driver(account) as drv:
-            drv.nova_server_reboot(vm_id)
+        drv = self._use_driver(account)
+        drv.nova_server_reboot(vm_id)
 
     @staticmethod
     def _fill_vm_info(vm_info, mgmt_network):
@@ -510,7 +520,7 @@
             if network_info:
                 if network_name == mgmt_network:
                     vm.public_ip = next((item['addr']
-                                            for item in network_info
+                                         for item in network_info
                                             if item['OS-EXT-IPS:type'] == 'floating'),
                                         network_info[0]['addr'])
                     vm.management_ip = network_info[0]['addr']
@@ -548,8 +558,8 @@
             List containing VM information
         """
         response = RwcalYang.VimResources()
-        with self._use_driver(account) as drv:
-            vms = drv.nova_server_list()
+        drv = self._use_driver(account)
+        vms = drv.nova_server_list()
         for vm in vms:
             response.vminfo_list.append(RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network))
         return response
@@ -565,159 +575,10 @@
         Returns:
             VM information
         """
-        with self._use_driver(account) as drv:
-            vm = drv.nova_server_get(id)
+        drv = self._use_driver(account)
+        vm = drv.nova_server_get(id)
         return RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network)
 
-    @staticmethod
-    def _get_guest_epa_specs(guest_epa):
-        """
-        Returns EPA Specs dictionary for guest_epa attributes
-        """
-        epa_specs = {}
-        if guest_epa.has_field('mempage_size'):
-            mempage_size = espec_utils.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
-            if mempage_size is not None:
-                epa_specs['hw:mem_page_size'] = mempage_size
-
-        if guest_epa.has_field('cpu_pinning_policy'):
-            cpu_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
-            if cpu_pinning_policy is not None:
-                epa_specs['hw:cpu_policy'] = cpu_pinning_policy
-
-        if guest_epa.has_field('cpu_thread_pinning_policy'):
-            cpu_thread_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
-            if cpu_thread_pinning_policy is None:
-                epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
-
-        if guest_epa.has_field('trusted_execution'):
-            trusted_execution = espec_utils.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
-            if trusted_execution is not None:
-                epa_specs['trust:trusted_host'] = trusted_execution
-
-        if guest_epa.has_field('numa_node_policy'):
-            if guest_epa.numa_node_policy.has_field('node_cnt'):
-                numa_node_count = espec_utils.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
-                if numa_node_count is not None:
-                    epa_specs['hw:numa_nodes'] = numa_node_count
-
-            if guest_epa.numa_node_policy.has_field('mem_policy'):
-                numa_memory_policy = espec_utils.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
-                if numa_memory_policy is not None:
-                    epa_specs['hw:numa_mempolicy'] = numa_memory_policy
-
-            if guest_epa.numa_node_policy.has_field('node'):
-                for node in guest_epa.numa_node_policy.node:
-                    if node.has_field('vcpu') and node.vcpu:
-                        epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j.id) for j in node.vcpu])
-                    if node.memory_mb:
-                        epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
-
-        if guest_epa.has_field('pcie_device'):
-            pci_devices = []
-            for device in guest_epa.pcie_device:
-                pci_devices.append(device.device_id +':'+str(device.count))
-            epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
-
-        return epa_specs
-
-    @staticmethod
-    def _get_host_epa_specs(host_epa):
-        """
-        Returns EPA Specs dictionary for host_epa attributes
-        """
-
-        epa_specs = {}
-
-        if host_epa.has_field('cpu_model'):
-            cpu_model = espec_utils.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
-            if cpu_model is not None:
-                epa_specs['capabilities:cpu_info:model'] = cpu_model
-
-        if host_epa.has_field('cpu_arch'):
-            cpu_arch = espec_utils.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
-            if cpu_arch is not None:
-                epa_specs['capabilities:cpu_info:arch'] = cpu_arch
-
-        if host_epa.has_field('cpu_vendor'):
-            cpu_vendor = espec_utils.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
-            if cpu_vendor is not None:
-                epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
-
-        if host_epa.has_field('cpu_socket_count'):
-            cpu_socket_count = espec_utils.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
-            if cpu_socket_count is not None:
-                epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
-
-        if host_epa.has_field('cpu_core_count'):
-            cpu_core_count = espec_utils.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
-            if cpu_core_count is not None:
-                epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
-
-        if host_epa.has_field('cpu_core_thread_count'):
-            cpu_core_thread_count = espec_utils.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
-            if cpu_core_thread_count is not None:
-                epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
-
-        if host_epa.has_field('cpu_feature'):
-            cpu_features = []
-            espec_cpu_features = []
-            for feature in host_epa.cpu_feature:
-                cpu_features.append(feature.feature)
-            espec_cpu_features = espec_utils.host.mano_to_extra_spec_cpu_features(cpu_features)
-            if espec_cpu_features is not None:
-                epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
-        return epa_specs
-
-    @staticmethod
-    def _get_hypervisor_epa_specs(guest_epa):
-        """
-        Returns EPA Specs dictionary for hypervisor_epa attributes
-        """
-        hypervisor_epa = {}
-        return hypervisor_epa
-
-    @staticmethod
-    def _get_vswitch_epa_specs(guest_epa):
-        """
-        Returns EPA Specs dictionary for vswitch_epa attributes
-        """
-        vswitch_epa = {}
-        return vswitch_epa
-
-    @staticmethod
-    def _get_host_aggregate_epa_specs(host_aggregate):
-        """
-        Returns EPA Specs dictionary for host aggregates
-        """
-        epa_specs = {}
-        for aggregate in host_aggregate:
-            epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
-
-        return epa_specs
-
-    @staticmethod
-    def _get_epa_specs(flavor):
-        """
-        Returns epa_specs dictionary based on flavor information
-        """
-        epa_specs = {}
-        if flavor.has_field('guest_epa'):
-            guest_epa = RwcalOpenstackPlugin._get_guest_epa_specs(flavor.guest_epa)
-            epa_specs.update(guest_epa)
-        if flavor.has_field('host_epa'):
-            host_epa = RwcalOpenstackPlugin._get_host_epa_specs(flavor.host_epa)
-            epa_specs.update(host_epa)
-        if flavor.has_field('hypervisor_epa'):
-            hypervisor_epa = RwcalOpenstackPlugin._get_hypervisor_epa_specs(flavor.hypervisor_epa)
-            epa_specs.update(hypervisor_epa)
-        if flavor.has_field('vswitch_epa'):
-            vswitch_epa = RwcalOpenstackPlugin._get_vswitch_epa_specs(flavor.vswitch_epa)
-            epa_specs.update(vswitch_epa)
-        if flavor.has_field('host_aggregate'):
-            host_aggregate = RwcalOpenstackPlugin._get_host_aggregate_epa_specs(flavor.host_aggregate)
-            epa_specs.update(host_aggregate)
-        return epa_specs
 
     @rwstatus(ret_on_failure=[""])
     def do_create_flavor(self, account, flavor):
@@ -730,14 +591,18 @@
         Returns:
             flavor id
         """
-        epa_specs = RwcalOpenstackPlugin._get_epa_specs(flavor)
-        with self._use_driver(account) as drv:
-            return drv.nova_flavor_create(name      = flavor.name,
-                                          ram       = flavor.vm_flavor.memory_mb,
-                                          vcpus     = flavor.vm_flavor.vcpu_count,
-                                          disk      = flavor.vm_flavor.storage_gb,
-                                          epa_specs = epa_specs)
-
+        drv = self._use_driver(account)
+        try:
+            flavor_id = drv.nova_flavor_create(name      = flavor.name,
+                                               ram       = flavor.vm_flavor.memory_mb,
+                                               vcpus     = flavor.vm_flavor.vcpu_count,
+                                               disk      = flavor.vm_flavor.storage_gb,
+                                               epa_specs = drv.utils.flavor.get_extra_specs(flavor))
+        except Exception as e:
+            self.log.error("Encountered exceptions during Flavor creation. Exception: %s", str(e))
+            raise
+            
+        return flavor_id  
 
     @rwstatus
     def do_delete_flavor(self, account, flavor_id):
@@ -747,148 +612,12 @@
             account - a cloud account
             flavor_id - id flavor of the VM
         """
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             drv.nova_flavor_delete(flavor_id)
-
-    @staticmethod
-    def _fill_epa_attributes(flavor, flavor_info):
-        """Helper function to populate the EPA attributes
-
-        Arguments:
-              flavor     : Object with EPA attributes
-              flavor_info: A dictionary of flavor_info received from openstack
-        Returns:
-              None
-        """
-        getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info['vcpus']
-        getattr(flavor, 'vm_flavor').memory_mb   = flavor_info['ram']
-        getattr(flavor, 'vm_flavor').storage_gb  = flavor_info['disk']
-
-        ### If extra_specs in flavor_info
-        if not 'extra_specs' in flavor_info:
-            return
-
-        for attr in flavor_info['extra_specs']:
-            if attr == 'hw:cpu_policy':
-                cpu_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
-                if cpu_pinning_policy is not None:
-                    getattr(flavor, 'guest_epa').cpu_pinning_policy = cpu_pinning_policy
-
-            elif attr == 'hw:cpu_threads_policy':
-                cpu_thread_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
-                if cpu_thread_pinning_policy is not None:
-                    getattr(flavor, 'guest_epa').cpu_thread_pinning_policy = cpu_thread_pinning_policy
-
-            elif attr == 'hw:mem_page_size':
-                mempage_size = espec_utils.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
-                if mempage_size is not None:
-                    getattr(flavor, 'guest_epa').mempage_size = mempage_size
-
-
-            elif attr == 'hw:numa_nodes':
-                numa_node_count = espec_utils.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
-                if numa_node_count is not None:
-                    getattr(flavor,'guest_epa').numa_node_policy.node_cnt = numa_node_count
-
-            elif attr.startswith('hw:numa_cpus.'):
-                node_id = attr.split('.')[1]
-                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
-                if nodes:
-                    numa_node = nodes[0]
-                else:
-                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
-                    numa_node.id = int(node_id)
-
-                for x in flavor_info['extra_specs'][attr].split(','):
-                   numa_node_vcpu = numa_node.vcpu.add()
-                   numa_node_vcpu.id = int(x)
-
-            elif attr.startswith('hw:numa_mem.'):
-                node_id = attr.split('.')[1]
-                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
-                if nodes:
-                    numa_node = nodes[0]
-                else:
-                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
-                    numa_node.id = int(node_id)
-
-                numa_node.memory_mb =  int(flavor_info['extra_specs'][attr])
-
-            elif attr == 'hw:numa_mempolicy':
-                numa_memory_policy = espec_utils.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
-                if numa_memory_policy is not None:
-                    getattr(flavor,'guest_epa').numa_node_policy.mem_policy = numa_memory_policy
-
-            elif attr == 'trust:trusted_host':
-                trusted_execution = espec_utils.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
-                if trusted_execution is not None:
-                    getattr(flavor,'guest_epa').trusted_execution = trusted_execution
-
-            elif attr == 'pci_passthrough:alias':
-                device_types = flavor_info['extra_specs']['pci_passthrough:alias']
-                for device in device_types.split(','):
-                    dev = getattr(flavor,'guest_epa').pcie_device.add()
-                    dev.device_id = device.split(':')[0]
-                    dev.count = int(device.split(':')[1])
-
-            elif attr == 'capabilities:cpu_info:model':
-                cpu_model = espec_utils.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
-                if cpu_model is not None:
-                    getattr(flavor, 'host_epa').cpu_model = cpu_model
-
-            elif attr == 'capabilities:cpu_info:arch':
-                cpu_arch = espec_utils.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
-                if cpu_arch is not None:
-                    getattr(flavor, 'host_epa').cpu_arch = cpu_arch
-
-            elif attr == 'capabilities:cpu_info:vendor':
-                cpu_vendor = espec_utils.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
-                if cpu_vendor is not None:
-                    getattr(flavor, 'host_epa').cpu_vendor = cpu_vendor
-
-            elif attr == 'capabilities:cpu_info:topology:sockets':
-                cpu_sockets = espec_utils.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
-                if cpu_sockets is not None:
-                    getattr(flavor, 'host_epa').cpu_socket_count = cpu_sockets
-
-            elif attr == 'capabilities:cpu_info:topology:cores':
-                cpu_cores = espec_utils.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
-                if cpu_cores is not None:
-                    getattr(flavor, 'host_epa').cpu_core_count = cpu_cores
-
-            elif attr == 'capabilities:cpu_info:topology:threads':
-                cpu_threads = espec_utils.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
-                if cpu_threads is not None:
-                    getattr(flavor, 'host_epa').cpu_core_thread_count = cpu_threads
-
-            elif attr == 'capabilities:cpu_info:features':
-                cpu_features = espec_utils.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
-                if cpu_features is not None:
-                    for feature in cpu_features:
-                        getattr(flavor, 'host_epa').cpu_feature.append(feature)
-            elif attr.startswith('aggregate_instance_extra_specs:'):
-                    aggregate = getattr(flavor, 'host_aggregate').add()
-                    aggregate.metadata_key = ":".join(attr.split(':')[1::])
-                    aggregate.metadata_value = flavor_info['extra_specs'][attr]
-
-    @staticmethod
-    def _fill_flavor_info(flavor_info):
-        """Create a GI object from flavor info dictionary
-
-        Converts Flavor information dictionary object returned by openstack
-        driver into Protobuf Gi Object
-
-        Arguments:
-            flavor_info: Flavor information from openstack
-
-        Returns:
-             Object of class FlavorInfoItem
-        """
-        flavor = RwcalYang.FlavorInfoItem()
-        flavor.name                       = flavor_info['name']
-        flavor.id                         = flavor_info['id']
-        RwcalOpenstackPlugin._fill_epa_attributes(flavor, flavor_info)
-        return flavor
+        except Exception as e:
+            self.log.error("Encountered exceptions during Flavor deletion. Exception: %s", str(e))
+            raise
 
 
     @rwstatus(ret_on_failure=[[]])
@@ -902,10 +631,15 @@
             List of flavors
         """
         response = RwcalYang.VimResources()
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             flavors = drv.nova_flavor_list()
-        for flv in flavors:
-            response.flavorinfo_list.append(RwcalOpenstackPlugin._fill_flavor_info(flv))
+            for flv in flavors:
+                response.flavorinfo_list.append(drv.utils.flavor.parse_flavor_info(flv))
+        except Exception as e:
+            self.log.error("Encountered exceptions during get-flavor-list. Exception: %s", str(e))
+            raise
+
         return response
 
     @rwstatus(ret_on_failure=[None])
@@ -919,9 +653,15 @@
         Returns:
             Flavor info item
         """
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             flavor = drv.nova_flavor_get(id)
-        return RwcalOpenstackPlugin._fill_flavor_info(flavor)
+            response = drv.utils.flavor.parse_flavor_info(flavor)
+        except Exception as e:
+            self.log.error("Encountered exceptions during get-flavor. Exception: %s", str(e))
+            raise
+        
+        return response
 
 
     def _fill_network_info(self, network_info, account):
@@ -949,8 +689,8 @@
 
         if 'subnets' in network_info and network_info['subnets']:
             subnet_id = network_info['subnets'][0]
-            with self._use_driver(account) as drv:
-                subnet = drv.neutron_subnet_get(subnet_id)
+            drv = self._use_driver(account)
+            subnet = drv.neutron_subnet_get(subnet_id)
             network.subnet = subnet['cidr']
         return network
 
@@ -965,8 +705,8 @@
             List of networks
         """
         response = RwcalYang.VimResources()
-        with self._use_driver(account) as drv:
-            networks = drv.neutron_network_list()
+        drv = self._use_driver(account)
+        networks = drv.neutron_network_list()
         for network in networks:
             response.networkinfo_list.append(self._fill_network_info(network, account))
         return response
@@ -982,8 +722,8 @@
         Returns:
             Network info item
         """
-        with self._use_driver(account) as drv:
-            network = drv.neutron_network_get(id)
+        drv = self._use_driver(account)
+        network = drv.neutron_network_get(id)
         return self._fill_network_info(network, account)
 
     @rwstatus(ret_on_failure=[""])
@@ -997,6 +737,9 @@
         Returns:
             Network id
         """
+        from warnings import warn
+        warn("This function is deprecated")
+
         kwargs = {}
         kwargs['name']            = network.network_name
         kwargs['admin_state_up']  = True
@@ -1011,10 +754,10 @@
             if network.provider_network.has_field('segmentation_id'):
                 kwargs['segmentation_id'] = network.provider_network.segmentation_id
 
-        with self._use_driver(account) as drv:
-            network_id = drv.neutron_network_create(**kwargs)
-            drv.neutron_subnet_create(network_id = network_id,
-                                      cidr = network.subnet)
+        drv = self._use_driver(account)
+        network_id = drv.neutron_network_create(**kwargs)
+        drv.neutron_subnet_create(network_id = network_id,
+                                  cidr = network.subnet)
         return network_id
 
     @rwstatus
@@ -1025,8 +768,8 @@
             account - a cloud account
             network_id - an id for the network
         """
-        with self._use_driver(account) as drv:
-            drv.neutron_network_delete(network_id)
+        drv = self._use_driver(account)
+        drv.neutron_network_delete(network_id)
 
     @staticmethod
     def _fill_port_info(port_info):
@@ -1064,9 +807,8 @@
         Returns:
             Port info item
         """
-        with self._use_driver(account) as drv:
-            port = drv.neutron_port_get(port_id)
-
+        drv = self._use_driver(account)
+        port = drv.neutron_port_get(port_id)
         return RwcalOpenstackPlugin._fill_port_info(port)
 
     @rwstatus(ret_on_failure=[[]])
@@ -1080,8 +822,8 @@
             Port info list
         """
         response = RwcalYang.VimResources()
-        with self._use_driver(account) as drv:
-            ports = drv.neutron_port_list(*{})
+        drv = self._use_driver(account)
+        ports = drv.neutron_port_list(*{})
         for port in ports:
             response.portinfo_list.append(RwcalOpenstackPlugin._fill_port_info(port))
         return response
@@ -1097,6 +839,9 @@
         Returns:
             Port id
         """
+        from warnings import warn
+        warn("This function is deprecated")
+
         kwargs = {}
         kwargs['name'] = port.port_name
         kwargs['network_id'] = port.network_id
@@ -1108,8 +853,8 @@
         else:
             kwargs['port_type'] = "normal"
 
-        with self._use_driver(account) as drv:
-            return drv.neutron_port_create(**kwargs)
+        drv = self._use_driver(account)
+        return drv.neutron_port_create(**kwargs)
 
     @rwstatus
     def do_delete_port(self, account, port_id):
@@ -1119,8 +864,8 @@
             account - a cloud account
             port_id - an id for port
         """
-        with self._use_driver(account) as drv:
-            drv.neutron_port_delete(port_id)
+        drv = self._use_driver(account)
+        drv.neutron_port_delete(port_id)
 
     @rwstatus(ret_on_failure=[""])
     def do_add_host(self, account, host):
@@ -1170,162 +915,6 @@
         """
         raise NotImplementedError
 
-    @staticmethod
-    def _fill_connection_point_info(c_point, port_info):
-        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
-
-        Converts Port information dictionary object returned by openstack
-        driver into Protobuf Gi Object
-
-        Arguments:
-            port_info - Port information from openstack
-        Returns:
-            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
-        """
-        c_point.name = port_info['name']
-        c_point.connection_point_id = port_info['id']
-        if ('fixed_ips' in port_info) and (len(port_info['fixed_ips']) >= 1):
-            if 'ip_address' in port_info['fixed_ips'][0]:
-                c_point.ip_address = port_info['fixed_ips'][0]['ip_address']
-        if 'mac_address' in port_info :
-            c_point.mac_addr = port_info['mac_address']
-        if port_info['status'] == 'ACTIVE':
-            c_point.state = 'active'
-        else:
-            c_point.state = 'inactive'
-        if 'network_id' in port_info:
-            c_point.virtual_link_id = port_info['network_id']
-        if ('device_id' in port_info) and (port_info['device_id']):
-            c_point.vdu_id = port_info['device_id']
-
-    @staticmethod
-    def _fill_virtual_link_info(network_info, port_list, subnet):
-        """Create a GI object for VirtualLinkInfoParams
-
-        Converts Network and Port information dictionary object
-        returned by openstack driver into Protobuf Gi Object
-
-        Arguments:
-            network_info - Network information from openstack
-            port_list - A list of port information from openstack
-            subnet: Subnet information from openstack
-        Returns:
-            Protobuf Gi object for VirtualLinkInfoParams
-        """
-        link = RwcalYang.VirtualLinkInfoParams()
-        link.name  = network_info['name']
-        if network_info['status'] == 'ACTIVE':
-            link.state = 'active'
-        else:
-            link.state = 'inactive'
-        link.virtual_link_id = network_info['id']
-        for port in port_list:
-            if port['device_owner'] == 'compute:None':
-                c_point = link.connection_points.add()
-                RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
-
-        if subnet != None:
-            link.subnet = subnet['cidr']
-
-        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
-            link.provider_network.overlay_type = network_info['provider:network_type'].upper()
-        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
-            link.provider_network.segmentation_id = network_info['provider:segmentation_id']
-        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
-            link.provider_network.physical_network = network_info['provider:physical_network'].upper()
-
-        return link
-
-    @staticmethod
-    def _fill_vdu_info(drv, vm_info, flavor_info, mgmt_network, port_list, server_group, volume_list = None):
-        """Create a GI object for VDUInfoParams
-
-        Converts VM information dictionary object returned by openstack
-        driver into Protobuf Gi Object
-
-        Arguments:
-            vm_info - VM information from openstack
-            flavor_info - VM Flavor information from openstack
-            mgmt_network - Management network
-            port_list - A list of port information from openstack
-            server_group - A list (with one element or empty list) of server group to which this VM belongs
-        Returns:
-            Protobuf Gi object for VDUInfoParams
-        """
-        vdu = RwcalYang.VDUInfoParams()
-        vdu.name = vm_info['name']
-        vdu.vdu_id = vm_info['id']
-        for network_name, network_info in vm_info['addresses'].items():
-            if network_info and network_name == mgmt_network:
-                for interface in network_info:
-                    if 'OS-EXT-IPS:type' in interface:
-                        if interface['OS-EXT-IPS:type'] == 'fixed':
-                            vdu.management_ip = interface['addr']
-                        elif interface['OS-EXT-IPS:type'] == 'floating':
-                            vdu.public_ip = interface['addr']
-
-        # Look for any metadata
-#        for key, value in vm_info['metadata'].items():
-#            if key == 'node_id':
-#                vdu.node_id = value
-#            else:
-#                custommetadata = vdu.supplemental_boot_data.custom_meta_data.add()
-#                custommetadata.name = key
-#                custommetadata.value = str(value)
-
-        # Look for config_drive
-        if ('config_drive' in vm_info):
-            vdu.supplemental_boot_data.boot_data_drive = vm_info['config_drive']
-        if ('image' in vm_info) and ('id' in vm_info['image']):
-            vdu.image_id = vm_info['image']['id']
-        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
-            vdu.flavor_id = vm_info['flavor']['id']
-
-        if vm_info['status'] == 'ACTIVE':
-            vdu.state = 'active'
-        elif vm_info['status'] == 'ERROR':
-            vdu.state = 'failed'
-        else:
-            vdu.state = 'inactive'
-
-        if 'availability_zone' in vm_info:
-            vdu.availability_zone = vm_info['availability_zone']
-
-        if server_group:
-            vdu.server_group.name = server_group[0]
-
-        vdu.cloud_type  = 'openstack'
-        # Fill the port information
-        for port in port_list:
-            c_point = vdu.connection_points.add()
-            RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
-
-        if flavor_info is not None:
-            RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
-
-        # Fill the volume information
-        if volume_list is not None:
-            for os_volume in volume_list:
-                volr = vdu.volumes.add()
-                try:
-                   " Device name is of format /dev/vda"
-                   vol_name = (os_volume['device']).split('/')[2]
-                except:
-                   continue
-                volr.name = vol_name
-                volr.volume_id = os_volume['volumeId']
-                try:
-                   vol_details = drv.cinder_volume_get(volr.volume_id)
-                except:
-                   continue
-                if vol_details is None:
-                   continue
-                for key, value in vol_details.metadata.items():
-                      volmd = volr.custom_meta_data.add()
-                      volmd.name = key
-                      volmd.value = value
-
-        return vdu
 
     @rwcalstatus(ret_on_failure=[""])
     def do_create_virtual_link(self, account, link_params):
@@ -1336,72 +925,18 @@
             link_params - information that defines the type of VDU to create
 
         Returns:
-            The vdu_id
+            A kwargs dictionary for glance operation
         """
-        kwargs = {}
-        kwargs['name']            = link_params.name
-        kwargs['admin_state_up']  = True
-        kwargs['external_router'] = False
-        kwargs['shared']          = False
-
-        if link_params.has_field('provider_network'):
-            if link_params.provider_network.has_field('physical_network'):
-                kwargs['physical_network'] = link_params.provider_network.physical_network
-            if link_params.provider_network.has_field('overlay_type'):
-                kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
-            if link_params.provider_network.has_field('segmentation_id'):
-                kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
-
-
-        with self._use_driver(account) as drv:
-            try:
-                network_id = drv.neutron_network_create(**kwargs)
-            except Exception as e:
-                self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
-                raise
-
-            kwargs = {'network_id' : network_id,
-                      'dhcp_params': {'enable_dhcp': True},
-                      'gateway_ip' : None,}
-
-            if link_params.ip_profile_params.has_field('ip_version'):
-                kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
-            else:
-                kwargs['ip_version'] = 4
-
-            if link_params.ip_profile_params.has_field('subnet_address'):
-                kwargs['cidr'] = link_params.ip_profile_params.subnet_address
-            elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
-                subnet_pool = drv.netruon_subnetpool_by_name(link_params.ip_profile_params.subnet_prefix_pool)
-                if subnet_pool is None:
-                    self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
-                                   link_params.ip_profile_params.subnet_prefix_pool,
-                                   link_params.name)
-                    raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
-
-                kwargs['subnetpool_id'] = subnet_pool['id']
-            elif link_params.has_field('subnet'):
-                kwargs['cidr'] = link_params.subnet
-            else:
-                assert 0, "No IP Prefix or Pool name specified"
-
-            if link_params.ip_profile_params.has_field('dhcp_params'):
-                if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
-                    kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
-                if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
-                    kwargs['dhcp_params']['start_address']  = link_params.ip_profile_params.dhcp_params.start_address
-                if link_params.ip_profile_params.dhcp_params.has_field('count'):
-                    kwargs['dhcp_params']['count']  = link_params.ip_profile_params.dhcp_params.count
-
-            if link_params.ip_profile_params.has_field('dns_server'):
-                kwargs['dns_server'] = []
-                for server in link_params.ip_profile_params.dns_server:
-                    kwargs['dns_server'].append(server.address)
-
-            if link_params.ip_profile_params.has_field('gateway_address'):
-                kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
-
+        
+        drv = self._use_driver(account)
+        try:
+            kwargs = drv.utils.network.make_virtual_link_args(link_params)
+            network_id = drv.neutron_network_create(**kwargs)
+            kwargs = drv.utils.network.make_subnet_args(link_params, network_id)
             drv.neutron_subnet_create(**kwargs)
+        except Exception as e:
+            self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
+            raise
 
         return network_id
 
@@ -1417,17 +952,16 @@
         Returns:
             None
         """
-        if not link_id:
-            self.log.error("Empty link_id during the virtual link deletion")
-            raise Exception("Empty link_id during the virtual link deletion")
-
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             port_list = drv.neutron_port_list(**{'network_id': link_id})
-
-        for port in port_list:
-            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
-                self.do_delete_port(account, port['id'], no_rwstatus=True)
-        self.do_delete_network(account, link_id, no_rwstatus=True)
+            for port in port_list:
+                if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                    self.do_delete_port(account, port['id'], no_rwstatus=True)
+            self.do_delete_network(account, link_id, no_rwstatus=True)
+        except Exception as e:
+            self.log.exception("Exception %s occured during virtual-link deletion", str(e))
+            raise
 
     @rwstatus(ret_on_failure=[None])
     def do_get_virtual_link(self, account, link_id):
@@ -1440,22 +974,20 @@
         Returns:
             Object of type RwcalYang.VirtualLinkInfoParams
         """
-        if not link_id:
-            self.log.error("Empty link_id during the virtual link get request")
-            raise Exception("Empty link_id during the virtual link get request")
-
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             network = drv.neutron_network_get(link_id)
             if network:
                 port_list = drv.neutron_port_list(**{'network_id': network['id']})
-                if 'subnets' in network:
+                if 'subnets' in network and network['subnets']:
                     subnet = drv.neutron_subnet_get(network['subnets'][0])
                 else:
                     subnet = None
-                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
-            else:
-                virtual_link = None
-            return virtual_link
+                virtual_link = drv.utils.network.parse_cloud_virtual_link_info(network, port_list, subnet)
+        except Exception as e:
+            self.log.exception("Exception %s occured during virtual-link-get", str(e))
+            raise
+        return virtual_link
 
     @rwstatus(ret_on_failure=[None])
     def do_get_virtual_link_list(self, account):
@@ -1468,586 +1000,23 @@
             A list of objects of type RwcalYang.VirtualLinkInfoParams
         """
         vnf_resources = RwcalYang.VNFResources()
-        with self._use_driver(account) as drv:
+        drv =  self._use_driver(account)
+        try:
             networks = drv.neutron_network_list()
             for network in networks:
                 port_list = drv.neutron_port_list(**{'network_id': network['id']})
-                if ('subnets' in network) and (network['subnets']):
+                if 'subnets' in network and network['subnets']:
                     subnet = drv.neutron_subnet_get(network['subnets'][0])
                 else:
                     subnet = None
-                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+                virtual_link = drv.utils.network.parse_cloud_virtual_link_info(network, port_list, subnet)
                 vnf_resources.virtual_link_info_list.append(virtual_link)
-            return vnf_resources
-
-    def _create_connection_point(self, account, c_point):
-        """
-        Create a connection point
-        Arguments:
-           account  - a cloud account
-           c_point  - connection_points
-        """
-        kwargs = {}
-        kwargs['name'] = c_point.name
-        kwargs['network_id'] = c_point.virtual_link_id
-        kwargs['admin_state_up'] = True
-
-        if c_point.type_yang == 'VIRTIO' or c_point.type_yang == 'E1000':
-            kwargs['port_type'] = 'normal'
-        elif c_point.type_yang == 'SR_IOV':
-            kwargs['port_type'] = 'direct'
-        else:
-            raise NotImplementedError("Port Type: %s not supported" %(c_point.type_yang))
-
-        # By default port gets created with post_security enaled as True
-        if 'port_security_enabled' in c_point:
-            kwargs['port_security_enabled'] = c_point.port_security_enabled
-
-        with self._use_driver(account) as drv:
-            if c_point.has_field('security_group'):
-                group = drv.neutron_security_group_by_name(c_point.security_group)
-                if group is not None:
-                    kwargs['security_groups'] = [group['id']]
-            return drv.neutron_port_create(**kwargs)
-
-    def _allocate_floating_ip(self, drv, pool_name):
-        """
-        Allocate a floating_ip. If unused floating_ip exists then its reused.
-        Arguments:
-          drv:       OpenstackDriver instance
-          pool_name: Floating IP pool name
-
-        Returns:
-          An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
-        """
-
-        # available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
-
-        # if pool_name is not None:
-        #     ### Filter further based on IP address
-        #     available_ip = [ ip for ip in available_ip if ip.pool == pool_name ]
-
-        # if not available_ip:
-        #     floating_ip = drv.nova_floating_ip_create(pool_name)
-        # else:
-        #     floating_ip = available_ip[0]
-
-        floating_ip = drv.nova_floating_ip_create(pool_name)
-        return floating_ip
-
-    def _match_vm_flavor(self, required, available):
-        self.log.info("Matching VM Flavor attributes")
-        if available.vcpu_count != required.vcpu_count:
-            self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
-                            required.vcpu_count,
-                            available.vcpu_count)
-            return False
-        if available.memory_mb != required.memory_mb:
-            self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
-                            required.memory_mb,
-                            available.memory_mb)
-            return False
-        if available.storage_gb != required.storage_gb:
-            self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
-                            required.storage_gb,
-                            available.storage_gb)
-            return False
-        self.log.debug("VM Flavor match found")
-        return True
-
-    def _match_guest_epa(self, required, available):
-        self.log.info("Matching Guest EPA attributes")
-        if required.has_field('pcie_device'):
-            self.log.debug("Matching pcie_device")
-            if available.has_field('pcie_device') == False:
-                self.log.debug("Matching pcie_device failed. Not available in flavor")
-                return False
-            else:
-                for dev in required.pcie_device:
-                    if not [ d for d in available.pcie_device
-                             if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
-                        self.log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
-                        return False
-        elif available.has_field('pcie_device'):
-            self.log.debug("Rejecting available flavor because pcie_device not required but available")
-            return False
+        except Exception as e:
+            self.log.exception("Exception %s occured during virtual-link-list-get", str(e))
+            raise
+        return vnf_resources
 
 
-        if required.has_field('mempage_size'):
-            self.log.debug("Matching mempage_size")
-            if available.has_field('mempage_size') == False:
-                self.log.debug("Matching mempage_size failed. Not available in flavor")
-                return False
-            else:
-                if required.mempage_size != available.mempage_size:
-                    self.log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
-                    return False
-        elif available.has_field('mempage_size'):
-            self.log.debug("Rejecting available flavor because mempage_size not required but available")
-            return False
-
-        if required.has_field('cpu_pinning_policy'):
-            self.log.debug("Matching cpu_pinning_policy")
-            if required.cpu_pinning_policy != 'ANY':
-                if available.has_field('cpu_pinning_policy') == False:
-                    self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
-                    return False
-                else:
-                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
-                        self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
-                        return False
-        elif available.has_field('cpu_pinning_policy'):
-            self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
-            return False
-
-        if required.has_field('cpu_thread_pinning_policy'):
-            self.log.debug("Matching cpu_thread_pinning_policy")
-            if available.has_field('cpu_thread_pinning_policy') == False:
-                self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
-                return False
-            else:
-                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
-                    self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
-                    return False
-        elif available.has_field('cpu_thread_pinning_policy'):
-            self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
-            return False
-
-        if required.has_field('trusted_execution'):
-            self.log.debug("Matching trusted_execution")
-            if required.trusted_execution == True:
-                if available.has_field('trusted_execution') == False:
-                    self.log.debug("Matching trusted_execution failed. Not available in flavor")
-                    return False
-                else:
-                    if required.trusted_execution != available.trusted_execution:
-                        self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
-                        return False
-        elif available.has_field('trusted_execution'):
-            self.log.debug("Rejecting available flavor because trusted_execution not required but available")
-            return False
-
-        if required.has_field('numa_node_policy'):
-            self.log.debug("Matching numa_node_policy")
-            if available.has_field('numa_node_policy') == False:
-                self.log.debug("Matching numa_node_policy failed. Not available in flavor")
-                return False
-            else:
-                if required.numa_node_policy.has_field('node_cnt'):
-                    self.log.debug("Matching numa_node_policy node_cnt")
-                    if available.numa_node_policy.has_field('node_cnt') == False:
-                        self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
-                        return False
-                    else:
-                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
-                            self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
-                            return False
-                elif available.numa_node_policy.has_field('node_cnt'):
-                    self.log.debug("Rejecting available flavor because numa node count not required but available")
-                    return False
-
-                if required.numa_node_policy.has_field('mem_policy'):
-                    self.log.debug("Matching numa_node_policy mem_policy")
-                    if available.numa_node_policy.has_field('mem_policy') == False:
-                        self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
-                        return False
-                    else:
-                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
-                            self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
-                            return False
-                elif available.numa_node_policy.has_field('mem_policy'):
-                    self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
-                    return False
-
-                if required.numa_node_policy.has_field('node'):
-                    self.log.debug("Matching numa_node_policy nodes configuration")
-                    if available.numa_node_policy.has_field('node') == False:
-                        self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
-                        return False
-                    for required_node in required.numa_node_policy.node:
-                        self.log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
-                        numa_match = False
-                        for available_node in available.numa_node_policy.node:
-                            if required_node.id != available_node.id:
-                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
-                                continue
-                            if required_node.vcpu != available_node.vcpu:
-                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
-                                continue
-                            if required_node.memory_mb != available_node.memory_mb:
-                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
-                                continue
-                            numa_match = True
-                        if numa_match == False:
-                            return False
-                elif available.numa_node_policy.has_field('node'):
-                    self.log.debug("Rejecting available flavor because numa nodes not required but available")
-                    return False
-        elif available.has_field('numa_node_policy'):
-            self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
-            return False
-        self.log.info("Successful match for Guest EPA attributes")
-        return True
-
-    def _match_vswitch_epa(self, required, available):
-        self.log.debug("VSwitch EPA match found")
-        return True
-
-    def _match_hypervisor_epa(self, required, available):
-        self.log.debug("Hypervisor EPA match found")
-        return True
-
-    def _match_host_epa(self, required, available):
-        self.log.info("Matching Host EPA attributes")
-        if required.has_field('cpu_model'):
-            self.log.debug("Matching CPU model")
-            if available.has_field('cpu_model') == False:
-                self.log.debug("Matching CPU model failed. Not available in flavor")
-                return False
-            else:
-                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
-                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
-                    self.log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
-                    return False
-        elif available.has_field('cpu_model'):
-            self.log.debug("Rejecting available flavor because cpu_model not required but available")
-            return False
-
-        if required.has_field('cpu_arch'):
-            self.log.debug("Matching CPU architecture")
-            if available.has_field('cpu_arch') == False:
-                self.log.debug("Matching CPU architecture failed. Not available in flavor")
-                return False
-            else:
-                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
-                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
-                    self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
-                    return False
-        elif available.has_field('cpu_arch'):
-            self.log.debug("Rejecting available flavor because cpu_arch not required but available")
-            return False
-
-        if required.has_field('cpu_vendor'):
-            self.log.debug("Matching CPU vendor")
-            if available.has_field('cpu_vendor') == False:
-                self.log.debug("Matching CPU vendor failed. Not available in flavor")
-                return False
-            else:
-                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
-                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
-                    self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
-                    return False
-        elif available.has_field('cpu_vendor'):
-            self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
-            return False
-
-        if required.has_field('cpu_socket_count'):
-            self.log.debug("Matching CPU socket count")
-            if available.has_field('cpu_socket_count') == False:
-                self.log.debug("Matching CPU socket count failed. Not available in flavor")
-                return False
-            else:
-                if required.cpu_socket_count != available.cpu_socket_count:
-                    self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
-                    return False
-        elif available.has_field('cpu_socket_count'):
-            self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
-            return False
-
-        if required.has_field('cpu_core_count'):
-            self.log.debug("Matching CPU core count")
-            if available.has_field('cpu_core_count') == False:
-                self.log.debug("Matching CPU core count failed. Not available in flavor")
-                return False
-            else:
-                if required.cpu_core_count != available.cpu_core_count:
-                    self.log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
-                    return False
-        elif available.has_field('cpu_core_count'):
-            self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
-            return False
-
-        if required.has_field('cpu_core_thread_count'):
-            self.log.debug("Matching CPU core thread count")
-            if available.has_field('cpu_core_thread_count') == False:
-                self.log.debug("Matching CPU core thread count failed. Not available in flavor")
-                return False
-            else:
-                if required.cpu_core_thread_count != available.cpu_core_thread_count:
-                    self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
-                    return False
-        elif available.has_field('cpu_core_thread_count'):
-            self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
-            return False
-
-        if required.has_field('cpu_feature'):
-            self.log.debug("Matching CPU feature list")
-            if available.has_field('cpu_feature') == False:
-                self.log.debug("Matching CPU feature list failed. Not available in flavor")
-                return False
-            else:
-                for feature in required.cpu_feature:
-                    if feature not in available.cpu_feature:
-                        self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
-                        return False
-        elif available.has_field('cpu_feature'):
-            self.log.debug("Rejecting available flavor because cpu_feature not required but available")
-            return False
-        self.log.info("Successful match for Host EPA attributes")
-        return True
-
-
-    def _match_placement_group_inputs(self, required, available):
-        self.log.info("Matching Host aggregate attributes")
-
-        if not required and not available:
-            # Host aggregate not required and not available => success
-            self.log.info("Successful match for Host Aggregate attributes")
-            return True
-        if required and available:
-            # Host aggregate requested and available => Do a match and decide
-            xx = [ x.as_dict() for x in required ]
-            yy = [ y.as_dict() for y in available ]
-            for i in xx:
-                if i not in yy:
-                    self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
-                    return False
-            self.log.info("Successful match for Host Aggregate attributes")
-            return True
-        else:
-            # Either of following conditions => Failure
-            #  - Host aggregate required but not available
-            #  - Host aggregate not required but available
-            self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
-            return False
-
-    def match_epa_params(self, resource_info, request_params):
-        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
-                                       getattr(resource_info, 'vm_flavor'))
-        if result == False:
-            self.log.debug("VM Flavor mismatched")
-            return False
-
-        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
-                                       getattr(resource_info, 'guest_epa'))
-        if result == False:
-            self.log.debug("Guest EPA mismatched")
-            return False
-
-        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
-                                         getattr(resource_info, 'vswitch_epa'))
-        if result == False:
-            self.log.debug("Vswitch EPA mismatched")
-            return False
-
-        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
-                                            getattr(resource_info, 'hypervisor_epa'))
-        if result == False:
-            self.log.debug("Hypervisor EPA mismatched")
-            return False
-
-        result = self._match_host_epa(getattr(request_params, 'host_epa'),
-                                      getattr(resource_info, 'host_epa'))
-        if result == False:
-            self.log.debug("Host EPA mismatched")
-            return False
-
-        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
-                                                    getattr(resource_info, 'host_aggregate'))
-
-        if result == False:
-            self.log.debug("Host Aggregate mismatched")
-            return False
-
-        return True
-
-    def _select_resource_flavor(self, account, vdu_init):
-        """
-            Select a existing flavor if it matches the request or create new flavor
-        """
-        flavor = RwcalYang.FlavorInfoItem()
-        flavor.name = str(uuid.uuid4())
-        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
-        epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
-        flavor.from_dict(epa_dict)
-
-        rc, response = self.do_get_flavor_list(account)
-        if rc != RwTypes.RwStatus.SUCCESS:
-            self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
-                        account.name)
-            raise OpenstackCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
-
-        flavor_id = None
-        flavor_list = response.flavorinfo_list
-        self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
-        for flv in flavor_list:
-            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
-                       vdu_init.name, flv)
-            if self.match_epa_params(flv, vdu_init):
-                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
-                           vdu_init.name, flv.name, flv.id)
-                return flv.id
-
-        if account.openstack.dynamic_flavor_support is False:
-            self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
-            raise OpenstackCALOperationFailure("No resource available with matching EPA attributes")
-        else:
-            rc,flavor_id = self.do_create_flavor(account,flavor)
-            if rc != RwTypes.RwStatus.SUCCESS:
-                self.log.error("Create-flavor operation failed for cloud account: %s",
-                        account.name)
-                raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
-            return flavor_id
-
-    def _create_vm(self, account, vduinfo, pci_assignement=None, server_group=None, port_list=None, network_list=None, imageinfo_list=None):
-        """Create a new virtual machine.
-
-        Arguments:
-            account - a cloud account
-            vminfo - information that defines the type of VM to create
-
-        Returns:
-            The image id
-        """
-        kwargs = {}
-        kwargs['name']      = vduinfo.name
-        kwargs['flavor_id'] = vduinfo.flavor_id
-        if vduinfo.has_field('image_id'):
-            kwargs['image_id']  = vduinfo.image_id
-        else:
-            kwargs['image_id']  = ""
-
-        with self._use_driver(account) as drv:
-            ### If floating_ip is required and we don't have one, better fail before any further allocation
-            floating_ip = False
-            pool_name = None
-            if vduinfo.has_field('allocate_public_address') and vduinfo.allocate_public_address:
-                if account.openstack.has_field('floating_ip_pool'):
-                    pool_name = account.openstack.floating_ip_pool
-                floating_ip = True
-
-        if vduinfo.has_field('vdu_init') and vduinfo.vdu_init.has_field('userdata'):
-            kwargs['userdata'] = vduinfo.vdu_init.userdata
-        else:
-            kwargs['userdata'] = ''
-
-        if account.openstack.security_groups:
-            kwargs['security_groups'] = account.openstack.security_groups
-
-        kwargs['port_list'] = port_list
-        kwargs['network_list'] = network_list
-
-        metadata = {}
-        files = {}
-        config_drive = False
-        # Add all metadata related fields
-        if vduinfo.has_field('node_id'):
-            metadata['node_id'] = vduinfo.node_id
-        if pci_assignement is not None:
-            metadata['pci_assignement'] = pci_assignement
-        if vduinfo.has_field('supplemental_boot_data'):
-            if vduinfo.supplemental_boot_data.has_field('custom_meta_data'):
-                for custom_meta_item in vduinfo.supplemental_boot_data.custom_meta_data:
-                    if custom_meta_item.data_type == "STRING":
-                       metadata[custom_meta_item.name] = custom_meta_item.value
-                    elif custom_meta_item.data_type == "JSON":
-                       metadata[custom_meta_item.name] = tornado.escape.json_decode(custom_meta_item.value)
-                    else:
-                       raise OpenstackCALOperationFailure("Create-vdu operation failed. Unsupported data-type {} for custom-meta-data name {} ".format(custom_meta_item.data_type, custom_meta_item.name))
-            if vduinfo.supplemental_boot_data.has_field('config_file'):
-                for custom_config_file in vduinfo.supplemental_boot_data.config_file:
-                    files[custom_config_file.dest] = custom_config_file.source
-
-            if vduinfo.supplemental_boot_data.has_field('boot_data_drive'):
-                if vduinfo.supplemental_boot_data.boot_data_drive is True:
-                     config_drive = True
-                     
-        kwargs['metadata'] = metadata
-        kwargs['files'] = files
-        kwargs['config_drive'] = config_drive
-
-        if vduinfo.has_field('availability_zone') and vduinfo.availability_zone.has_field('name'):
-            kwargs['availability_zone']  = vduinfo.availability_zone
-        else:
-            kwargs['availability_zone'] = None
-
-        if server_group is not None:
-            kwargs['scheduler_hints'] = {'group': server_group}
-        else:
-            kwargs['scheduler_hints'] = None
-
-        kwargs['block_device_mapping_v2'] = None
-        vol_metadata = False
-        if vduinfo.has_field('volumes') :
-            kwargs['block_device_mapping_v2'] = []
-            with self._use_driver(account) as drv:
-            # Only support image->volume
-                for volume in vduinfo.volumes:
-                    block_map = dict()
-                    block_map['boot_index'] = volume.boot_priority
-                    if "image" in volume:
-                        # Support image->volume
-                        # Match retrived image info with volume based image name and checksum
-                        if volume.image is not None:
-                           matching_images = [img for img in imageinfo_list if img['name'] == volume.image]
-                           if volume.image_checksum is not None:
-                              matching_images = [img for img in matching_images if img['checksum'] == volume.image_checksum]
-                           img_id = matching_images[0]['id']
-                        if img_id is None:
-                           raise OpenstackCALOperationFailure("Create-vdu operation failed. Volume image not found for name {} checksum {}".format(volume.name, volume.checksum))
-                        block_map['uuid'] = img_id
-                        block_map['source_type'] = "image"
-                    else:
-                        block_map['source_type'] = "blank"
-                        
-                    block_map['device_name'] = volume.name
-                    block_map['destination_type'] = "volume"
-                    block_map['volume_size'] = volume.size
-                    block_map['delete_on_termination'] = True
-                    if volume.has_field('device_type') and volume.device_type == 'cdrom':
-                        block_map['device_type'] = 'cdrom'
-                    if volume.has_field('device_bus') and volume.device_bus == 'ide':
-                        block_map['disk_bus'] = 'ide'
-                    kwargs['block_device_mapping_v2'].append(block_map)
-                
-           
-        with self._use_driver(account) as drv:
-            vm_id = drv.nova_server_create(**kwargs)
-            if floating_ip:
-                self.prepare_vdu_on_boot(account, vm_id, floating_ip, pool_name, vduinfo.volumes)
-
-        return vm_id
-
-    def get_openstack_image_info(self, account, image_name, image_checksum=None):
-        self.log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
-                image_name, image_checksum, account.name
-                )
-
-        image_list = []
-        with self._use_driver(account) as drv:
-            image_list = drv.glance_image_list()
-        matching_images = [img for img in image_list if img['name'] == image_name]
-  
-        # If the image checksum was filled in then further filter the images by the checksum
-        if image_checksum is not None:
-            matching_images = [img for img in matching_images if img['checksum'] == image_checksum]
-        else:
-            self.log.warning("Image checksum not provided.  Lookup using image name (%s) only.",
-                                image_name) 
-  
-        if len(matching_images) == 0:
-            raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
-                  image_name, image_checksum, account.name
-                  ))
-  
-        elif len(matching_images) > 1:
-            unique_checksums = {i.checksum for i in matching_images}
-            if len(unique_checksums) > 1:
-                msg = ("Too many images with different checksums matched "
-                         "image name of %s for cloud account: %s" % (image_name, account.name))
-                raise ResMgrCALOperationFailure(msg)
-  
-        return matching_images[0]
 
     @rwcalstatus(ret_on_failure=[""])
     def do_create_vdu(self, account, vdu_init):
@@ -2060,162 +1029,18 @@
         Returns:
             The vdu_id
         """
-        ### First create required number of ports aka connection points
-        # Add the mgmt_ntwk by default.
-        mgmt_network_id = None
-        with self._use_driver(account) as drv:
-            mgmt_network_id = drv._mgmt_network_id
+        drv =  self._use_driver(account)
+        try:
+            kwargs = drv.utils.compute.make_vdu_create_args(vdu_init, account)
+            vm_id = drv.nova_server_create(**kwargs)
+            self.prepare_vdu_on_boot(account, vm_id, vdu_init)
+        except Exception as e:
+            self.log.exception("Exception %s occured during create-vdu", str(e))
+            raise
+        return vm_id
+    
 
-        port_list = []
-        network_list = []
-        imageinfo_list = []
-        is_explicit_mgmt_defined = False
-        for c_point in vdu_init.connection_points:
-            # if the user has specified explicit mgmt_network connection point
-            # then remove the mgmt_network from the VM list
-            if c_point.virtual_link_id == mgmt_network_id:
-                is_explicit_mgmt_defined = True
-            if c_point.virtual_link_id in network_list:
-                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
-            else:
-                network_list.append(c_point.virtual_link_id)
-            port_id = self._create_connection_point(account, c_point)
-            port_list.append(port_id)
-
-        if not vdu_init.has_field('flavor_id'):
-            vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
-
-        ### Obtain all images for volumes and perform validations
-        if vdu_init.has_field('volumes'):
-            for volume in vdu_init.volumes:
-                if "image" in volume:
-                    image_checksum = volume.image_checksum if volume.has_field("image_checksum") else None
-                    image_info = self.get_openstack_image_info(account, volume.image, image_checksum)
-                    imageinfo_list.append(image_info)
-        elif vdu_init.has_field('image_id'):
-            with self._use_driver(account) as drv:
-                image_info = drv.glance_image_get(vdu_init.image_id)
-                imageinfo_list.append(image_info)
-
-        if not imageinfo_list:
-            err_str = ("VDU has no image information")
-            self.log.error(err_str)
-            raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-
-        ### Check VDU Virtual Interface type and make sure VM with property exists
-        if vdu_init.connection_points:
-                ### All virtual interfaces need to be of the same type for Openstack Accounts
-                if not (all(cp.type_yang == 'E1000' for cp in vdu_init.connection_points) or all(cp.type_yang != 'E1000' for cp in vdu_init.connection_points)):
-                    ### We have a mix of E1000 & VIRTIO/SR_IPOV virtual interface types in the VDU, abort instantiation.
-                    assert False, "Only one type of Virtual Intefaces supported for Openstack accounts. Found a mix of VIRTIO/SR_IOV &   E1000."
-  
-                ## It is not clear if all the images need to checked for HW properties. In the absence of model info describing each im  age's properties,
-                ###   we shall assume that all images need to have similar properties
-                for img_info in imageinfo_list:
-  
-                    virt_intf_type = vdu_init.connection_points[0].type_yang
-                    if virt_intf_type == 'E1000':
-                        if 'hw_vif_model' in img_info and img_info.hw_vif_model == 'e1000':
-                            self.log.debug("VDU has Virtual Interface E1000, found matching image with property hw_vif_model=e1000")
-                        else:
-                            err_str = ("VDU has Virtual Interface E1000, but image '%s' does not have property hw_vif_model=e1000" % img_info.name)
-                            self.log.error(err_str)
-                            raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-                    elif virt_intf_type == 'VIRTIO' or virt_intf_type == 'SR_IOV':
-                        if 'hw_vif_model' in img_info:
-                            err_str = ("VDU has Virtual Interface %s, but image '%s' has hw_vif_model mismatch" % virt_intf_type,img_info.name)
-                            self.log.error(err_str)
-                            raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-                        else:
-                            self.log.debug("VDU has Virtual Interface %s, found matching image" % virt_intf_type)
-                    else:
-                        err_str = ("VDU Virtual Interface '%s' not supported yet" % virt_intf_type)
-                        self.log.error(err_str)
-                        raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str) 
-
-        with self._use_driver(account) as drv:
-            ### Now Create VM
-            vm_network_list = []
-            if not is_explicit_mgmt_defined:
-                vm_network_list.append(drv._mgmt_network_id)
-  
-            if vdu_init.has_field('volumes'):
-                  # Only combination supported: Image->Volume
-                  for volume in vdu_init.volumes:
-                      if "volume" in volume:
-                          err_str = ("VDU Volume source not supported yet")
-                          self.log.error(err_str)
-                          raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-                      if not volume.has_field('device_type'):
-                          err_str = ("VDU Volume destination type not defined")
-                          self.log.error(err_str)
-                          raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-                      if volume.device_type not in ['disk', 'cdrom'] :
-                          err_str = ("VDU Volume destination type '%s' not supported" % volume.device_type)
-                          self.log.error(err_str)
-                          raise OpenstackCALOperationFailure("Create-vdu operation failed. Error- %s" % err_str)
-  
-  
-            server_group = None
-            if vdu_init.has_field('server_group'):
-                  ### Get list of server group in openstack for name->id mapping
-                  openstack_group_list = drv.nova_server_group_list()
-                  group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
-                  if len(group_id) != 1:
-                      raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
-                  server_group = group_id[0]
-
-            pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
-            if pci_assignement != '':
-                vm.user_tags.pci_assignement = pci_assignement
-
-            vm_id = self._create_vm(account, vdu_init, pci_assignement=pci_assignement, server_group=server_group, port_list=port_list, network_list=vm_network_list, imageinfo_list = imageinfo_list)
-            return vm_id
-
-    def prepare_vpci_metadata(self, drv, vdu_init):
-        pci_assignement = ''
-        ### TEF specific metadata creation for
-        virtio_vpci = []
-        sriov_vpci = []
-        virtio_meta = ''
-        sriov_meta = ''
-        ### For MGMT interface
-        if vdu_init.has_field('mgmt_vpci'):
-            xx = 'u\''+ drv._mgmt_network_id + '\' :[[u\'' + vdu_init.mgmt_vpci + '\', ' + '\'\']]'
-            virtio_vpci.append(xx)
-
-        for c_point in vdu_init.connection_points:
-            if c_point.has_field('vpci'):
-                if c_point.has_field('vpci') and c_point.type_yang == 'VIRTIO':
-                    xx = 'u\''+c_point.virtual_link_id + '\' :[[u\'' + c_point.vpci + '\', ' + '\'\']]'
-                    virtio_vpci.append(xx)
-                elif c_point.has_field('vpci') and c_point.type_yang == 'SR_IOV':
-                    xx = '[u\'' + c_point.vpci + '\', ' + '\'\']'
-                    sriov_vpci.append(xx)
-
-        if virtio_vpci:
-            virtio_meta += ','.join(virtio_vpci)
-
-        if sriov_vpci:
-            sriov_meta = 'u\'VF\': ['
-            sriov_meta += ','.join(sriov_vpci)
-            sriov_meta += ']'
-
-        if virtio_meta != '':
-            pci_assignement +=  virtio_meta
-            pci_assignement += ','
-
-        if sriov_meta != '':
-            pci_assignement +=  sriov_meta
-
-        if pci_assignement != '':
-            pci_assignement = '{' + pci_assignement + '}'
-
-        return pci_assignement
-
-
-
-    def prepare_vdu_on_boot(self, account, server_id, floating_ip,  pool_name, volumes=None):
+    def prepare_vdu_on_boot(self, account, server_id, vdu_params):
         cmd = PREPARE_VM_CMD.format(auth_url       = account.openstack.auth_url,
                                     username       = account.openstack.key,
                                     password       = account.openstack.secret,
@@ -2225,29 +1050,23 @@
                                     project_domain = account.openstack.project_domain,
                                     mgmt_network   = account.openstack.mgmt_network,
                                     server_id      = server_id)
-        if floating_ip:
-            cmd += " --floating_ip"
-        if pool_name:
-            cmd += (" --pool_name " + pool_name)
-
-        vol_metadata = False
-        if volumes is not None:
-            for volume in volumes:
-                if volume.has_field('custom_meta_data'):
-                    vol_metadata = True
-                    break
+        vol_list = list()
         
-        if vol_metadata is True:       
-            tmp_file = None
+        if vdu_params.has_field('allocate_public_address') and vdu_params.allocate_public_address:
+            cmd += " --floating_ip"
+            if account.openstack.has_field('floating_ip_pool'):
+                cmd += (" --pool_name " + account.openstack.floating_ip_pool)
+        
+        if vdu_params.has_field('volumes'):
+            for volume in vdu_params.volumes:
+                if volume.has_field('custom_meta_data'):
+                    vol_list.append(volume.as_dict())
+
+        if vol_list:
             with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
-                 vol_list = list()
-                 for volume in volumes:
-                    vol_dict = volume.as_dict()
-                    vol_list.append(vol_dict)
-
-                 yaml.dump(vol_list, tmp_file)
-            cmd += (" --vol_metadata {}").format(tmp_file.name)
-
+                yaml.dump(vol_list, tmp_file)
+                cmd += (" --vol_metadata {}").format(tmp_file.name)
+            
         exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
         exec_cmd = exec_path+'/'+cmd
         self.log.info("Running command: %s" %(exec_cmd))
@@ -2261,6 +1080,7 @@
             account     -  a cloud account
             vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
         """
+        drv = self._use_driver(account)
         ### First create required number of ports aka connection points
         port_list = []
         network_list = []
@@ -2272,18 +1092,17 @@
             port_id = self._create_connection_point(account, c_point)
             port_list.append(port_id)
 
+        drv = self._use_driver(account)
         ### Now add the ports to VM
         for port_id in port_list:
-            with self._use_driver(account) as drv:
-                drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
+            drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
 
         ### Delete the requested connection_points
         for c_point in vdu_modify.connection_points_remove:
             self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
 
         if vdu_modify.has_field('image_id'):
-            with self._use_driver(account) as drv:
-                drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
+            drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
 
 
     @rwstatus
@@ -2297,25 +1116,14 @@
         Returns:
             None
         """
-        if not vdu_id:
-            self.log.error("empty vdu_id during the vdu deletion")
-            return
-
-        with self._use_driver(account) as drv:
-            ### Get list of floating_ips associated with this instance and delete them
-            floating_ips = [ f for f in drv.nova_floating_ip_list() if f.instance_id == vdu_id ]
-            for f in floating_ips:
-                drv.nova_drv.floating_ip_delete(f)
-
-            ### Get list of port on VM and delete them.
-            port_list = drv.neutron_port_list(**{'device_id': vdu_id})
-
-        for port in port_list:
-            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
-                self.do_delete_port(account, port['id'], no_rwstatus=True)
-
-        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
-
+        drv = self._use_driver(account)
+        try:
+            drv.utils.compute.perform_vdu_network_cleanup(vdu_id)
+            drv.nova_server_delete(vdu_id)
+        except Exception as e:
+            self.log.exception("Exception %s occured during delete-vdu", str(e))
+            raise
+            
 
     @rwstatus(ret_on_failure=[None])
     def do_get_vdu(self, account, vdu_id):
@@ -2328,37 +1136,15 @@
         Returns:
             Object of type RwcalYang.VDUInfoParams
         """
-        with self._use_driver(account) as drv:
-            port_list = drv.neutron_port_list(**{'device_id': vdu_id})
-
-            vm = drv.nova_server_get(vdu_id)
-
-            flavor_info = None
-            if ('flavor' in vm) and ('id' in vm['flavor']):
-                try:
-                    flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
-                except Exception as e:
-                    self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
-
-            openstack_group_list = drv.nova_server_group_list()
-            server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
-            openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
-            vdu_info = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
-                                                           flavor_info,
-                                                           account.openstack.mgmt_network,
-                                                           port_list,
-                                                           server_group,
-                                                           volume_list = openstack_srv_volume_list)
-            if vdu_info.state == 'active':
-                try:
-                    console_info = drv.nova_server_console(vdu_info.vdu_id)
-                except Exception as e:
-                    pass
-                else:
-                    vdu_info.console_url = console_info['console']['url']
-                    pass
-
-            return vdu_info
+        drv = self._use_driver(account)
+        try:
+            vm_info = drv.nova_server_get(vdu_id)
+            vdu_info = drv.utils.compute.parse_cloud_vdu_info(vm_info)
+        except Exception as e:
+            self.log.exception("Exception %s occured during get-vdu", str(e))
+            raise
+        
+        return vdu_info
 
 
     @rwstatus(ret_on_failure=[None])
@@ -2372,41 +1158,15 @@
             A list of objects of type RwcalYang.VDUInfoParams
         """
         vnf_resources = RwcalYang.VNFResources()
-        with self._use_driver(account) as drv:
+        drv = self._use_driver(account)
+        try:
             vms = drv.nova_server_list()
             for vm in vms:
-                port_list = drv.neutron_port_list(**{'device_id': vm['id']})
-
-                flavor_info = None
-
-                if ('flavor' in vm) and ('id' in vm['flavor']):
-                    try:
-                        flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
-                    except Exception as e:
-                        self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
-
-                else:
-                    flavor_info = None
-
-                openstack_group_list = drv.nova_server_group_list()
-                server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
-
-                openstack_srv_volume_list = drv.nova_volume_list(vm['id'])
-                vdu = RwcalOpenstackPlugin._fill_vdu_info(drv, vm,
-                                                          flavor_info,
-                                                          account.openstack.mgmt_network,
-                                                          port_list,
-                                                          server_group,
-                                                          volume_list = openstack_srv_volume_list)
-                if vdu.state == 'active':
-                    try:
-                        console_info = drv.nova_server_console(vdu.vdu_id)
-                    except Exception as e:
-                        pass
-                    else:
-                        vdu.console_url = console_info['console']['url']
-                        pass
+                vdu = drv.utils.compute.parse_cloud_vdu_info(vm)
                 vnf_resources.vdu_info_list.append(vdu)
-            return vnf_resources
+        except Exception as e:
+            self.log.exception("Exception %s occured during get-vdu-list", str(e))
+            raise
+        return vnf_resources
 
 
diff --git a/rwcal/test/test_rwcal_openstack.py b/rwcal/test/test_rwcal_openstack.py
index 6b0a247..8278a5e 100644
--- a/rwcal/test/test_rwcal_openstack.py
+++ b/rwcal/test/test_rwcal_openstack.py
@@ -28,7 +28,7 @@
 
 from gi.repository import RwcalYang
 from gi.repository.RwTypes import RwStatus
-from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver, KeystoneDriverV3, KeystoneDriverV2
+#from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver, KeystoneDriverV3, KeystoneDriverV2
 
 logger = logging.getLogger('rwcal-openstack')
 
@@ -43,10 +43,10 @@
 # Important information about openstack installation. This needs to be manually verified
 #
 openstack_info = {
-    'username'           : 'pluto',
-    'password'           : 'mypasswd',
-    'auth_url'           : 'http://10.66.4.17:5000/v3/',
-    'project_name'       : 'demo',
+    'username'           : 'xxxx',
+    'password'           : 'xxxxxx',
+    'auth_url'           : 'http://10.66.4.19:5000/v2.0/',
+    'project_name'       : 'xxxxx',
     'mgmt_network'       : 'private',
     'reserved_flavor'    : 'm1.medium',
     'reserved_image'     : 'Fedora-x86_64-20-20131211.1-sda-ping.qcow2',
@@ -544,6 +544,7 @@
         rc = self.cal.do_delete_flavor(self._acct, flavor_id)
         self.assertEqual(rc, RwStatus.SUCCESS)
 
+    '''
     @unittest.skip("Skipping test_expiry_token")
     def test_expiry_token(self):
         """
@@ -664,6 +665,7 @@
         except Exception: 
             auth_exp = True
         self.assertFalse(auth_exp)
+    '''
 
     @unittest.skip("Skipping test_vm_operations")
     def test_vm_operations(self):
@@ -929,17 +931,23 @@
         vdu.name = "cal.vdu"
         vdu.node_id = OpenStackTest.NodeID
         vdu.image_id = self._image.id
+        vdu.vm_flavor.memory_mb = 512
+        vdu.vm_flavor.vcpu_count = 1
+        vdu.vm_flavor.storage_gb = 4 
         vdu.flavor_id = self._flavor.id
         vdu.vdu_init.userdata = PING_USERDATA
         vdu.allocate_public_address = True
-        meta1 = vdu.supplemental_boot_data.custom_meta_data.add()
-        meta1.name = "EMS_IP"
-        meta1.data_type = "STRING"
-        meta1.value = "10.5.6.6"
-        #meta2 = vdu.supplemental_boot_data.custom_meta_data.add()
-        #meta2.name = "Cluster_data"
-        #meta2.data_type = "JSON"
-        #meta2.value = '''{ "cluster_id": "12" , "vnfc_id": "112" }'''
+        try:
+            meta1 = vdu.supplemental_boot_data.custom_meta_data.add()
+            meta1.name = "EMS_IP"
+            meta1.data_type = "STRING"
+            meta1.value = "10.5.6.6"
+            #meta2 = vdu.supplemental_boot_data.custom_meta_data.add()
+            #meta2.name = "Cluster_data"
+            #meta2.data_type = "JSON"
+            #meta2.value = '''{ "cluster_id": "12" , "vnfc_id": "112" }'''
+        except Exception as e:
+            pass
         #vdu.supplemental_boot_data.boot_data_drive = True
         customfile1 = vdu.supplemental_boot_data.config_file.add()
         customfile1.source = "abcdef124"
@@ -965,6 +973,136 @@
 
         return vdu
 
+    def _get_rbsh_vdu_request_info(self, vlink_list):
+          """
+          Returns object of type RwcalYang.VDUInitParams
+          """
+          vdu = RwcalYang.VDUInitParams()
+          vdu.name = "cal_rbsh_vdu"
+          vdu.vm_flavor.memory_mb = 2048
+          vdu.vm_flavor.vcpu_count = 1
+          vdu.vm_flavor.storage_gb = 10
+          vdu.flavor_id = self._flavor.id
+          vdu.allocate_public_address = True
+          ctr = 0
+          for vl in vlink_list:
+             c1 = vdu.connection_points.add()
+             c1.name = "c_point" + str(ctr)
+             ctr += 1
+             c1.virtual_link_id = vl
+             c1.type_yang = 'VIRTIO'
+
+          vol0 = vdu.volumes.add()
+          vol0.name = "vda"
+          vol0.image = "mgmt.img"
+          vol0.size = 40
+          vol0.boot_priority = 0
+          vol0.device_bus = "virtio"
+          vol0.device_type = "disk"
+
+          vol1 = vdu.volumes.add()
+          vol1.name = "vdb"
+          vol1.image = "segstore.img"
+          vol1.size = 60
+          vol1.boot_priority = 1
+          vol1.device_bus = "virtio"
+          vol1.device_type = "disk"
+
+          # blank disk
+          vol2 = vdu.volumes.add()
+          vol2.name = "vdc"
+          vol2.size = 10
+          vol2.boot_priority = 2
+          vol2.device_bus = "virtio"
+          vol2.device_type = "disk"
+
+          # existing volume disk
+          vol3 = vdu.volumes.add()
+          vol3.name = "vdd"
+          vol3.size = 10
+          vol3.volume_ref = "volume-ref1"
+          vol3.boot_priority = 3
+          vol3.device_bus = "virtio"
+          vol3.device_type = "disk"
+          return vdu
+
+    @unittest.skip("Skipping test_create_rbsh_vdu")
+    def test_create_rbsh_vdu(self):
+          """
+          Test to create VDU with mgmt port and 3 additional connection points
+          """
+          logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
+          vlink_list = []
+          for ctr in range(3):
+             vlink = RwcalYang.VirtualLinkReqParams()
+             vlink.name = 'rift.cal.virtual_link' + str(ctr)
+             vlink.subnet = '11.0.{}.0/24'.format(str(1 + ctr))
+
+             rc, rsp = self.cal.create_virtual_link(self._acct, vlink)
+             self.assertEqual(rc.status, RwStatus.SUCCESS)
+             logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+             vlink_id = rsp
+
+             #Check if virtual_link create is successful
+             rc, rsp = self.cal.get_virtual_link(self._acct, rsp)
+             self.assertEqual(rc, RwStatus.SUCCESS)
+             self.assertEqual(rsp.virtual_link_id, vlink_id)
+             vlink_list.append(vlink_id)
+
+
+          # Now create VDU
+          vdu_req = self._get_rbsh_vdu_request_info(vlink_list)
+          logger.info("Openstack-CAL-Test: Test Create RB steelhead VDU API (w/ mgmt port) and 3 CPs")
+
+          rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
+          logger.debug("Openstack-CAL-Test: rc %s rsp %s" % (rc, rsp))
+          self.assertEqual(rc.status, RwStatus.SUCCESS)
+          logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
+
+          test_vdu_id = rsp
+
+          ## Check if VDU get is successful
+          rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
+          logger.debug("Get VDU response %s", rsp)
+          self.assertEqual(rsp.vdu_id, test_vdu_id)
+
+          ### Wait until vdu_state is active
+          logger.debug("Waiting 10 secs")
+          time.sleep(10)
+          #{'name': 'dp0vhost7', 'connection_point_id': 'dp0vhost7', 'state': 'active', 'virtual_link_id': 'rift.cal.virtual_link', 'ip_address': '192.168.100.6'}
+          vdu_state = 'inactive'
+          cp_state = 'inactive'
+          for i in range(15):
+              rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
+              self.assertEqual(rc, RwStatus.SUCCESS)
+              logger.info("Openstack-CAL-Test: Iter %d VDU with id : %s. Reached State :  %s, mgmt ip %s" %(i, test_vdu_id, rsp.state, rsp.management_ip))
+              if (rsp.state == 'active') and ('management_ip' in rsp) and ('public_ip' in rsp):
+                  vdu_state = 'active'
+                  #'connection_points': [{'name': 'dp0vhost7', 'connection_point_id': 'dp0vhost7', 'state': 'active', 'virtual_link_id': 'rift.cal.virtual_link', 'ip_address': '192.168.100.6'}]
+                  for cp in rsp.connection_points:
+                      logger.info("Openstack-CAL-Test: Iter %d VDU with id : %s. Reached State :  %s CP state %s" %(i, test_vdu_id, rsp.state, cp))
+              logger.debug("Waiting another 5 secs")
+              time.sleep(5)
+
+          self.assertEqual(rc, RwStatus.SUCCESS)
+          self.assertEqual(rsp.state, 'active')
+          self.assertEqual(vdu_state, 'active')
+          logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state  : %s IP: %s" %(test_vdu_id, rsp.state, rsp.management_ip))
+          logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rsp))
+          logger.info("Waiting for 30 secs before deletion")
+          time.sleep(30)
+
+          ### Check vdu list as well
+          rc, rsp = self.cal.get_vdu_list(self._acct)
+          self.assertEqual(rc, RwStatus.SUCCESS)
+          found = False
+          logger.debug("Get VDU response %s", rsp)
+          for vdu in rsp.vdu_info_list:
+              if vdu.vdu_id == test_vdu_id:
+                 found = True
+          self.assertEqual(found, True)
+          logger.info("Openstack-CAL-Test: Passed VDU list" )
+
     @unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
     def test_create_delete_virtual_link_and_vdu(self):
         """
@@ -1019,10 +1157,10 @@
         vlink_id2= rsp
 
         ### Now exercise the modify_vdu_api
-        vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2)
-        rc = self.cal.modify_vdu(self._acct, vdu_modify)
-        self.assertEqual(rc, RwStatus.SUCCESS)
-        logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id)
+        #vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2)
+        #rc = self.cal.modify_vdu(self._acct, vdu_modify)
+        #self.assertEqual(rc, RwStatus.SUCCESS)
+        #logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id)
 
         ### Lets delete the VDU
         self.cal.delete_vdu(self._acct, vdu_id)
@@ -1054,7 +1192,9 @@
           """
           vdu = RwcalYang.VDUInitParams()
           vdu.name = "cal_vdu"
-          vdu.flavor_id = self._flavor.id
+          vdu.vm_flavor.memory_mb = 512
+          vdu.vm_flavor.vcpu_count = 1
+          vdu.vm_flavor.storage_gb = 4 
           vdu.allocate_public_address = True
           ctr = 0
           for vl in vlink_list:
@@ -1068,16 +1208,22 @@
           vol0.name = "vda"
           vol0.image = openstack_info['reserved_image']
           vol0.size = 10
-          vol0.boot_priority = 0
+          try:
+              vol0.boot_priority = 0
+          except Exception as e:
+              pass
           vol0.device_type = "disk"
-          meta1 = vol0.custom_meta_data.add()
-          meta1.name = "fs_type"
-          meta1.data_type = "STRING"
-          meta1.value = "ext4"
+          try:
+             meta1 = vol0.custom_meta_data.add()
+             meta1.name = "fs_type"
+             meta1.data_type = "STRING"
+             meta1.value = "ext4"
+          except Exception as e:
+             pass
 
           return vdu
 
-    #@unittest.skip("Skipping test_create_vol_vdu")
+    @unittest.skip("Skipping test_create_vol_vdu")
     def test_create_vol_vdu(self):
           """
           Test to create VDU with mgmt port using Volumes
@@ -1157,6 +1303,15 @@
           self.assertEqual(found, True)
           logger.info("Openstack-CAL-Test: Passed VDU list" )
 
+    @unittest.skip("Skipping test_validate_creds")
+    def test_validate_creds(self):
+          """
+          Test validate creds
+          """
+          logger.info("Openstack-CAL-Test: Test validate creds")
+          status = self.cal.validate_cloud_creds(self._acct)
+          logger.info("Openstack-CAL-Test: Passed validate creds")
+
 class VmData(object):
     """A convenience class that provides all the stats and EPA Attributes
     from the VM provided
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
index a5d6404..fbbc695 100644
--- a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
+++ b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
@@ -757,7 +757,7 @@
                         if k in vdu:
                             d[k] = vdu[k]
                     vdu_data.append(d)
-                v['vdur'].append(vdu_data)
+                v['vdur'] = vdu_data
 
                 inp['vnfr'][vnfr['member_vnf_index_ref']] = v
 
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
index 3f1fb82..569fc54 100644
--- a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
+++ b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
@@ -251,6 +251,8 @@
 
         if failed_tasks:
             self._log.error("%s had %s FAILED tasks.", self, len(failed_tasks))
+            for ftask in failed_tasks:
+                self._log.error("%s : Failed to upload image : %s to cloud_account : %s", self, ftask.image_name, ftask.cloud_account)
             self.state = "FAILED"
         else:
             self._log.debug("%s tasks completed successfully", len(self._upload_tasks))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
index 62b517a..a88cfdc 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
@@ -110,6 +110,8 @@
         Raises:
             PackageStoreError- The package could not be retrieved
         """
+        self.refresh()
+
         if package_id not in self._package_dirs:
             msg = "Package %s not found in %s" % (package_id, self._root_dir)
             raise PackageNotFoundError(msg)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
index cffd21c..d2cdb65 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
@@ -319,11 +319,6 @@
         if format_ != "yaml":
             log.warn("Only yaml format supported for TOSCA export")
 
-        if desc_type != "nsd":
-            raise tornado.web.HTTPError(
-                400,
-                "NSD need to passed to generate TOSCA: {}".format(desc_type))
-
         def get_pkg_from_store(id_, type_):
             package = None
             # Attempt to get the package from the package store
@@ -338,27 +333,38 @@
 
             return package
 
-        pkg = tosca.ExportTosca()
+        if desc_type == "nsd":
+            pkg = tosca.ExportTosca()
 
-        # Add NSD and related descriptors for exporting
-        nsd_id = pkg.add_nsd(desc_msg, get_pkg_from_store(desc_id, "nsd"))
+            # Add NSD and related descriptors for exporting
+            nsd_id = pkg.add_nsd(desc_msg, get_pkg_from_store(desc_id, "nsd"))
 
-        catalog = self.catalog_map["vnfd"]
-        for const_vnfd in desc_msg.constituent_vnfd:
-            vnfd_id = const_vnfd.vnfd_id_ref
-            if vnfd_id in catalog:
-                pkg.add_vnfd(nsd_id,
-                             catalog[vnfd_id],
-                             get_pkg_from_store(vnfd_id, "vnfd"))
-            else:
-                raise tornado.web.HTTPError(
-                    400,
-                    "Unknown VNFD descriptor {} for NSD {}".
-                    format(vnfd_id, nsd_id))
+            catalog = self.catalog_map["vnfd"]
+            for const_vnfd in desc_msg.constituent_vnfd:
+                vnfd_id = const_vnfd.vnfd_id_ref
+                if vnfd_id in catalog:
+                    pkg.add_vnfd(nsd_id,
+                                 catalog[vnfd_id],
+                                 get_pkg_from_store(vnfd_id, "vnfd"))
+                else:
+                    raise tornado.web.HTTPError(
+                        400,
+                        "Unknown VNFD descriptor {} for NSD {}".
+                        format(vnfd_id, nsd_id))
 
-        # Create the archive.
-        pkg.create_archive(transaction_id,
-                           dest=self.application.export_dir)
+            # Create the archive.
+            pkg.create_archive(transaction_id,
+                               dest=self.application.export_dir)
+        if desc_type == "vnfd":
+            pkg = tosca.ExportTosca()
+            vnfd_id = desc_msg.id
+            pkg.add_single_vnfd(vnfd_id,
+                                 desc_msg,
+                                 get_pkg_from_store(vnfd_id, "vnfd"))
+
+            # Create the archive.
+            pkg.create_archive(transaction_id,
+                               dest=self.application.export_dir)
 
 
 class ExportStateHandler(state.StateHandler):
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
index a025b37..2527aef 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
@@ -71,4 +71,5 @@
         try:
             upload_job.wait_until_complete_threadsafe()
         except client.UploadJobError as e:
-            raise ImageUploadError("Failed to upload image (image_name) to cloud accounts") from e
+            raise ImageUploadError("Failed to upload image " + image_name + " to cloud accounts") from e
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
index 8ccc899..d61e47e 100644
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
@@ -61,6 +61,7 @@
             self.log = log
         self.nsds = {}
         self.csars = list()
+        self.vnfds = {}
 
     def add_image(self, nsd_id, image, chksum=None):
         if image.name not in self.images:
@@ -73,6 +74,14 @@
         if pkg:
             self.nsds[nsd_id]['pkgs'].append(pkg)
 
+    def add_single_vnfd(self, vnfd_id, vnfd, pkg=None):
+        if vnfd is not None:
+            self.vnfds['vnfds'] = []
+            self.vnfds['pkgs'] = []
+        self.vnfds['vnfds'].append(vnfd)
+        if pkg:
+            self.vnfds['pkgs'].append(pkg)
+
     def add_vnfd(self, nsd_id, vnfd, pkg=None):
         if not 'vnfds' in self.nsds[nsd_id]:
             self.nsds[nsd_id]['vnfds'] = []
@@ -112,8 +121,25 @@
                                                   archive=True))
         self.log.debug("Created CSAR archive {}".format(self.csars[-1]))
 
+    def create_vnfd_csar(self, dest=None):
+        if dest is None:
+            dest = tempfile.mkdtemp()
+        yangs = {}
+        yangs['vnfd'] = []
+        for vnfd in self.vnfds['vnfds']:
+            yangs['vnfd'].append(vnfd.as_dict())
+        translator = YangTranslator(self.log,
+                                    yangs=yangs,
+                                    packages=self.vnfds['pkgs'])
+        output = translator.translate()
+        self.csars.extend(translator.write_output(output,
+                                                  output_dir=dest,
+                                                  archive=True))
+        self.log.debug("Created CSAR archive {}".format(self.csars[-1]))
+
+
     def create_archive(self, archive_name, dest=None):
-        if not len(self.nsds):
+        if not len(self.nsds) and len(self.vnfds) == 0:
             self.log.error("Did not find any NSDs to export")
             return
 
@@ -127,13 +153,16 @@
 
         try:
             # Convert each NSD to a TOSCA template
-            for nsd_id in self.nsds:
-                # Not passing the dest dir to prevent clash in case
-                # multiple export of the same desc happens
-                self.create_csar(nsd_id)
+            if len(self.nsds) > 0:
+                for nsd_id in self.nsds:
+                    # Not passing the dest dir to prevent clash in case
+                    # multiple export of the same desc happens
+                    self.create_csar(nsd_id)
+            elif len(self.vnfds) > 0:
+                self.create_vnfd_csar()
 
         except Exception as e:
-            msg = "Exception converting NSD {}: {}".format(nsd_id, e)
+            msg = "Exception converting NSD/VNFD {}".format(e)
             self.log.exception(e)
             raise YangTranslateNsdError(msg)
 
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
index d07cfb7..849f9e4 100755
--- a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
+++ b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/onboard_pkg
@@ -388,7 +388,7 @@
                 msg = "Error instantiating NS as {} with NSD {}: ". \
                   format(self._service_name, self._nsd_id,
                          reply["rpc-error"])
-                self.log.error(msg)
+                # self.log.error(msg)
                 raise OnboardPkgInstError(msg)
 
         self.log.info("Successfully initiated instantiation of NS as {} ({})".
@@ -424,7 +424,17 @@
                                             format(e))
 
     def process(self):
-        self.validate_args()
+        try:
+            self.validate_args()
+        except Exception as e:
+            if args.verbose:
+                log.exception(e)
+
+            print("\nERROR:", e)
+            print("\n")
+            parser.print_help()
+            sys.exit(2)
+
         self.validate_connectivity()
         self.upload_packages()
         self.instantiate()
@@ -473,15 +483,24 @@
     fmt = logging.Formatter(
         '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:' \
         '%(filename)s:%(lineno)d) - %(message)s')
-    stderr_handler = logging.StreamHandler(stream=sys.stderr)
-    stderr_handler.setFormatter(fmt)
-    logging.basicConfig(level=logging.INFO)
     log = logging.getLogger('onboard-pkg')
-    log.addHandler(stderr_handler)
+    log.setLevel(logging.INFO)
     if args.verbose:
         log.setLevel(logging.DEBUG)
+    ch = logging.StreamHandler()
+    ch.setLevel(logging.DEBUG)
+    ch.setFormatter(fmt)
+    log.addHandler(ch)
 
     log.debug("Input arguments: {}".format(args))
 
-    ob = OnboardPackage(log, args)
-    ob.process()
+    try:
+    	ob = OnboardPackage(log, args)
+    	ob.process()
+    except Exception as e:
+        if args.verbose:
+            log.exception(e)
+
+        print("\nERROR:", e)
+        sys.exit(1)
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
index ff00b2b..2b0c57b 100755
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
@@ -2056,10 +2056,11 @@
         # Fetch the VNFD associated with this VNF
         placement_groups = self.get_placement_groups(vnfd_msg, const_vnfd)
         self._log.info("Cloud Account for VNF %d is %s",const_vnfd.member_vnf_index,cloud_account_name)
-        self._log.info("Launching VNF: %s (Member Index: %s) in NSD plancement Groups: %s",
+        self._log.info("Launching VNF: %s (Member Index: %s) in NSD plancement Groups: %s, restart mode self.restart_mode %s",
                        vnfd_msg.name,
                        const_vnfd.member_vnf_index,
-                       [ group.name for group in placement_groups])
+                       [ group.name for group in placement_groups],
+                       self.restart_mode)
         vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
                                             self._log,
                                             self._loop,
@@ -4005,9 +4006,10 @@
             self._log.error(msg)
             raise NetworkServiceRecordError(msg)
 
-        self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s",
+        self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s, restart mode %s",
                        nsr_msg.id,
-                       nsr_msg.nsd.id)
+                       nsr_msg.nsd.id,
+                       restart_mode)
 
         nsm_plugin = self._ro_plugin_selector.ro_plugin
         sdn_account_name = self._cloud_account_handler.get_cloud_account_sdn_name(nsr_msg.cloud_account)
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
index 9c768a6..16359c9 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/publisher/download_status.py
@@ -18,13 +18,18 @@
 # 
 
 import asyncio
-import uuid
+import sys
 
 from gi.repository import (RwDts as rwdts)
 import rift.mano.dts as mano_dts
 
 import rift.downloader as url_downloader
 
+import functools
+import concurrent
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
 
 class DownloadStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
 
@@ -32,12 +37,19 @@
         super().__init__(log, dts, loop, project)
         self.tasks = {}
 
+
     def xpath(self, download_id=None):
         return self._project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
                                          ("[download-id='{}']".
                                           format(download_id) if download_id else ""))
 
     @asyncio.coroutine
+    def _dts_publisher(self, job):
+         # Publish the download state
+         self.reg.update_element(
+                        self.xpath(download_id=job.download_id), job)
+
+    @asyncio.coroutine
     def register(self):
         self.reg = yield from self.dts.register(xpath=self.xpath(),
                   flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
@@ -50,15 +62,36 @@
         if self.reg:
             self.reg.deregister()
             self.reg = None
+   
+    @staticmethod 
+    def _async_func(func, fut):
+        try:
+            ret = func()
+            fut.set_result(ret)
+        except Exception as e:
+            fut.set_exception(e)
+
+    def _schedule_dts_work(self, download_job_msg):
+        # Create a coroutine
+        cort = self._dts_publisher(download_job_msg)
+        # Use main asyncio loop (running in main thread)
+        newfunc = functools.partial(asyncio.ensure_future, cort, loop=self.loop)
+        fut = concurrent.futures.Future()
+        # Schedule future in main thread immediately
+        self.loop.call_soon_threadsafe(DownloadStatusPublisher._async_func, newfunc, fut)
+        res = fut.result()
+        exc = fut.exception()
+        if exc is not None:
+            self.log.error("Caught future exception during download: %s type %s", str(exc), type(exc))
+            raise exc
+        return res
 
     def on_download_progress(self, download_job_msg):
         """callback that triggers update.
         """
-        key = download_job_msg.download_id
         # Trigger progess update
-        self.reg.update_element(
-                self.xpath(download_id=key),
-                download_job_msg)
+        # Schedule a future in the main thread
+        self._schedule_dts_work(download_job_msg)
 
     def on_download_finished(self, download_job_msg):
         """callback that triggers update.
@@ -70,9 +103,8 @@
             del self.tasks[key]
 
         # Publish the final state
-        self.reg.update_element(
-                self.xpath(download_id=key),
-                download_job_msg)
+        # Schedule a future in the main thread
+        self._schedule_dts_work(download_job_msg)
 
     @asyncio.coroutine
     def register_downloader(self, downloader):
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
index 18acc4d..0a93ade 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/rwpkgmgr.py
@@ -40,7 +40,7 @@
 from . import rpc
 from .proxy import filesystem
 from . import publisher as pkg_publisher
-
+from . import subscriber 
 
 class PackageManagerProject(ManoProject):
 
@@ -48,15 +48,23 @@
         super(PackageManagerProject, self).__init__(tasklet.log, name)
         self.update(tasklet)
 
-        self.job_handler = pkg_publisher.DownloadStatusPublisher(
-            self._log, self._dts, self._loop, self)
+        args = [self.log, self.dts, self.loop, self]
+        self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
+        # create catalog subscribers 
+        self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
+        self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
+
 
     @asyncio.coroutine
     def register (self):
+        yield from self.vnfd_catalog_sub.register()
+        yield from self.nsd_catalog_sub.register()
         yield from self.job_handler.register()
 
     def deregister (self):
         yield from self.job_handler.deregister()
+        yield from self.vnfd_catalog_sub.deregister()
+        yield from self.nsd_catalog_sub.deregister()
 
 
 class PackageManagerTasklet(rift.tasklets.Tasklet):
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/__init__.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/__init__.py
index f305f2f..14c3dc1 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/__init__.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/__init__.py
@@ -13,4 +13,4 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 #
-from .download_status import DownloadStatusSubscriber
+from .download_status import DownloadStatusSubscriber, VnfdStatusSubscriber, NsdStatusSubscriber
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
index ea4b5e8..6bca858 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/subscriber/download_status.py
@@ -17,13 +17,100 @@
 # Creation Date: 09/25/2016
 # 
 
-import rift.mano.dts as mano_dts
+import os
+import io
+import shutil
 
+import rift.mano.dts as mano_dts
+import rift.package.package as package 
+import rift.package.store as store 
+import rift.package.convert as convert
+
+from gi.repository import (
+    RwYang,
+    NsdYang,
+    RwNsdYang,
+    VnfdYang,
+    RwVnfdYang,
+    RwDts
+)
 
 class DownloadStatusSubscriber(mano_dts.AbstractOpdataSubscriber):
-    def __init__(self, log, dts, loop, project, callback=None):
+    def __init__(self, log, dts, loop, project, callback):
         super().__init__(log, dts, loop, project, callback)
 
     def get_xpath(self):
         return self._project.add_project(
             "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+
+
+class VnfdStatusSubscriber(DownloadStatusSubscriber): 
+    DOWNLOAD_DIR = store.VnfdPackageFilesystemStore.DEFAULT_ROOT_DIR
+    MODULE_DESC = 'vnfd rw-vnfd'.split()
+    DESC_TYPE = 'vnfd'
+    
+    def __init__(self, log, dts, loop, project):
+        super().__init__(log, dts, loop, project, self.on_change)
+        self.subscriber = mano_dts.VnfdCatalogSubscriber(log, dts, loop, project)
+
+    def on_change(self, msg, action): 
+        log_msg = "1. Vnfd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
+        self.log.debug(log_msg)
+        if action == RwDts.QueryAction.UPDATE:
+            actionCreate(self, msg)
+        else:
+            self.log.debug("VnfdStatusSubscriber: No action for {}".format(repr(action)))
+            pass
+
+    def get_xpath(self): 
+        return self.subscriber.get_xpath() 
+
+
+class NsdStatusSubscriber(DownloadStatusSubscriber): 
+    DOWNLOAD_DIR = store.NsdPackageFilesystemStore.DEFAULT_ROOT_DIR
+    MODULE_DESC = 'nsd rw-nsd'.split()
+    DESC_TYPE = 'nsd'
+    
+    def __init__(self, log, dts, loop, project):
+        super().__init__(log, dts, loop, project, self.on_change)
+        self.subscriber = mano_dts.NsdCatalogSubscriber(log, dts, loop, project)
+
+    def on_change(self, msg, action): 
+        log_msg = "1. Nsd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
+        self.log.debug(log_msg)
+        if action == RwDts.QueryAction.UPDATE:
+            actionCreate(self, msg)
+        else:
+            self.log.debug("NsdStatusSubscriber: No action for {}".format(repr(action)))
+            pass
+
+    def get_xpath(self): 
+        return self.subscriber.get_xpath() 
+
+
+def actionCreate(descriptor, msg): 
+    ''' Create folder structure if it doesn't exist: id/vnf name OR id/nsd name  
+    Serialize the Vnfd/Nsd object to yaml and store yaml file in the created folder.
+    '''
+
+    desc_name = msg.name if msg.name else ""
+    download_dir = os.path.join(descriptor.DOWNLOAD_DIR, msg.id)
+
+    # If a download dir is present with contents, then we know it has been created in the 
+    # upload path. 
+    if os.path.exists(download_dir) and os.listdir(download_dir):
+        descriptor.log.debug("Skpping folder creation, {} already present".format(download_dir))
+        return
+    else: 
+        download_dir = os.path.join(download_dir, desc_name) 
+        if not os.path.exists(download_dir):
+            os.makedirs(download_dir)
+            descriptor.log.debug("Created directory {}".format(download_dir))
+
+            model = RwYang.Model.create_libncx()
+            for module in descriptor.MODULE_DESC: model.load_module(module)
+
+            yaml_path = "{base}/{name}_{type}.yaml".format(base=download_dir, name=msg.name, type=descriptor.DESC_TYPE) 
+            with open(yaml_path,"w") as fh:
+                fh.write(msg.to_yaml(model))
+
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
index 335638e..a02e5c6 100755
--- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
@@ -99,7 +99,7 @@
                 "package_id": "123",
                 "download_id": str(uuid.uuid4())})
 
-        self.job_handler.on_download_progress(mock_msg)
+        yield from self.job_handler._dts_publisher(mock_msg)
         yield from asyncio.sleep(5, loop=self.loop)
 
         itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
@@ -110,12 +110,12 @@
             result = yield from fut
             result = result.result
 
-        print (mock_msg)
+        print ("Mock ", mock_msg)
         assert result == mock_msg
 
         # Modify the msg
         mock_msg.url = "http://bar/foo"
-        self.job_handler.on_download_finished(mock_msg)
+        yield from self.job_handler._dts_publisher(mock_msg)
         yield from asyncio.sleep(5, loop=self.loop)
         
         itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
@@ -138,17 +138,18 @@
 
         proxy = mock.MagicMock()
 
-        url = "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell"
+        url = "http://boson.eng.riftio.com/common/unittests/plantuml.jar"
         url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", proxy)
 
         download_id = yield from self.job_handler.register_downloader(url_downloader)
         assert download_id is not None
-        
+       
+        # Waiting for 5 secs to be sure that the file is downloaded
         yield from asyncio.sleep(5, loop=self.loop)
         xpath = "/download-jobs/job[download-id='{}']".format(
             download_id)
         result = yield from self.read_xpath(xpath)
-        print (result)
+        self.log.debug("Test result before complete check - %s", result)
         assert result.status == "COMPLETED"
         assert len(self.job_handler.tasks) == 0
 
@@ -163,7 +164,7 @@
         yield from self.job_handler.register()
 
         proxy = mock.MagicMock()
-        url = "http://mirror.0x.sg/fedora/linux/releases/24/CloudImages/x86_64/images/Fedora-Cloud-Base-24-1.2.x86_64.qcow2"
+        url = "http://boson.eng.riftio.com/common/unittests/Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
         url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", proxy)
 
         download_id = yield from self.job_handler.register_downloader(url_downloader)
@@ -171,14 +172,16 @@
         xpath = "/download-jobs/job[download-id='{}']".format(
             download_id)
 
-        yield from asyncio.sleep(3, loop=self.loop)
+        yield from asyncio.sleep(1, loop=self.loop)
 
         result = yield from self.read_xpath(xpath)
+        self.log.debug("Test result before in_progress check - %s", result)
         assert result.status == "IN_PROGRESS"
 
         yield from self.job_handler.cancel_download(download_id)
         yield from asyncio.sleep(3, loop=self.loop)
         result = yield from self.read_xpath(xpath)
+        self.log.debug("Test result before cancel check - %s", result)
         assert result.status == "CANCELLED"
         assert len(self.job_handler.tasks) == 0
     
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
index d235915..929e483 100755
--- a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
+++ b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
@@ -123,7 +123,7 @@
 
     @asyncio.coroutine
     def reallocate_virtual_network(self, event_id, cloud_account_name, request, resource):
-        self._log.info("Received network resource allocation request with event-id: %s", event_id)
+        self._log.info("Received network resource reallocation request with event-id: %s", event_id)
         resource = yield from self.core.reallocate_virtual_resource(event_id, cloud_account_name, request, 'network', resource)
         return resource
 
@@ -150,7 +150,7 @@
 
     @asyncio.coroutine
     def reallocate_virtual_compute(self, event_id, cloud_account_name, request, resource):
-        self._log.info("Received compute resource allocation request "
+        self._log.info("Received compute resource reallocation request "
                        "(cloud account: %s) with event-id: %s",
                        cloud_account_name, event_id)
         resource = yield from self.core.reallocate_virtual_resource(
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
index a6277f1..cca5031 100755
--- a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
+++ b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
@@ -55,6 +55,7 @@
     ManoProject,
     ProjectHandler,
     )
+import rift.mano.utils.short_name as mano_short_name
 
 
 class VMResourceError(Exception):
@@ -277,6 +278,7 @@
                  project,
                  vdud,
                  vnfr,
+                 nsr_config,
                  mgmt_intf,
                  mgmt_network,
                  cloud_account_name,
@@ -289,6 +291,7 @@
         self._project = project
         self._vdud = vdud
         self._vnfr = vnfr
+        self._nsr_config = nsr_config
         self._mgmt_intf = mgmt_intf
         self._cloud_account_name = cloud_account_name
         self._vnfd_package_store = vnfd_package_store
@@ -349,6 +352,31 @@
         """ Return this VDUR's name """
         return self._name
 
+    # Truncated name confirming to RFC 1123
+    @property
+    def unique_short_name(self):
+        """ Return this VDUR's unique short name """
+        # Impose these restrictions on Unique name
+        #  Max 64
+        #    - Max 10 of NSR name (remove all specialcharacters, only numbers and alphabets)
+        #    - 6 chars of shortened name
+        #    - Max 10 of VDU name (remove all specialcharacters, only numbers and alphabets)
+        #
+        def _restrict_tag(input_str):
+           # Exclude all characters except a-zA-Z0-9
+           outstr = re.sub('[^a-zA-Z0-9]', '', input_str)
+           # Take max of 10 chars
+           return outstr[-10:]
+
+        # Use NSR name for part1
+        part1 = _restrict_tag(self._nsr_config.name)
+        # Get unique short string (6 chars)
+        part2 = mano_short_name.StringShortner(self._name)
+        # Use VDU ID for part3
+        part3 = _restrict_tag(self._vdud.id)
+        shortstr = part1 + "-" + part2.short_string + "-" + part3
+        return shortstr
+
     @property
     def cloud_account_name(self):
         """ Cloud account this VDU should be created in """
@@ -400,14 +428,17 @@
                       "hypervisor_epa",
                       "host_epa",
                       "volumes",
-                      "name"]
+                      ]
         vdu_copy_dict = {k: v for k, v in
                          self._vdud.as_dict().items() if k in vdu_fields}
         vdur_dict = {"id": self._vdur_id,
                      "vdu_id_ref": self._vdud.id,
                      "operational_status": self.operational_status,
                      "operational_status_details": self._state_failed_reason,
+                     "name": self.name,
+                     "unique_short_name": self.unique_short_name
                      }
+
         if self.vm_resp is not None:
             vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
                               "flavor_id": self.vm_resp.flavor_id
@@ -623,7 +654,8 @@
         vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
 
         vm_create_msg_dict = {
-                "name": self.name,
+                "name": self.unique_short_name, # Truncated name confirming to RFC 1123
+                "node_id": self.name,           # Rift assigned Id
                 }
 
         if self.image_name is not None:
@@ -1524,7 +1556,7 @@
         return None
 
     @asyncio.coroutine
-    def get_vdu_placement_groups(self, vdu):
+    def get_vdu_placement_groups(self, vdu, nsr_config):
         placement_groups = []
         ### Step-1: Get VNF level placement groups
         for group in self._vnfr_msg.placement_groups_info:
@@ -1532,10 +1564,7 @@
             #group_info.from_dict(group.as_dict())
             placement_groups.append(group)
 
-        ### Step-2: Get NSR config. This is required for resolving placement_groups cloud constructs
-        nsr_config = yield from self.get_nsr_config()
-
-        ### Step-3: Get VDU level placement groups
+        ### Step-2: Get VDU level placement groups
         for group in self.vnfd.placement_groups:
             for member_vdu in group.member_vdus:
                 if member_vdu.member_vdu_ref == vdu.id:
@@ -1583,16 +1612,22 @@
 
 
         self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id)
+
+        # Get NSR config - Needed for placement groups and to derive VDU short-name
+        nsr_config = yield from self.get_nsr_config()
+
         for vdu in self._rw_vnfd.vdu:
             self._log.debug("Creating vdu: %s", vdu)
             vdur_id = get_vdur_id(vdu)
 
-            placement_groups = yield from self.get_vdu_placement_groups(vdu)
-            self._log.info("Launching VDU: %s from VNFD :%s (Member Index: %s) with Placement Groups: %s",
+
+            placement_groups = yield from self.get_vdu_placement_groups(vdu, nsr_config)
+            self._log.info("Launching VDU: %s from VNFD :%s (Member Index: %s) with Placement Groups: %s, Existing vdur_id %s",
                            vdu.name,
                            self.vnf_name,
                            self.member_vnf_index,
-                           [ group.name for group in placement_groups])
+                           [ group.name for group in placement_groups],
+                           vdur_id)
 
             vdur = VirtualDeploymentUnitRecord(
                 dts=self._dts,
@@ -1601,6 +1636,7 @@
                 project = self._project,
                 vdud=vdu,
                 vnfr=vnfr,
+                nsr_config=nsr_config,
                 mgmt_intf=self.has_mgmt_interface(vdu),
                 mgmt_network=self._mgmt_network,
                 cloud_account_name=self.cloud_account_name,
@@ -1895,7 +1931,7 @@
 
 
         # instantiate VLs
-        self._log.debug("Instantiate VLs {}: {}".format(self._vnfr_id, self._state))
+        self._log.debug("VNFR-ID %s: Instantiate VLs, restart mode %s", self._vnfr_id, restart_mode)
         try:
             yield from self.instantiate_vls(xact, restart_mode)
         except Exception as e:
@@ -1906,7 +1942,7 @@
         self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE)
 
         # instantiate VDUs
-        self._log.debug("Create VDUs {}: {}".format(self._vnfr_id, self._state))
+        self._log.debug("VNFR-ID %s: Create VDUs, restart mode %s", self._vnfr_id, restart_mode)
         yield from self.create_vdus(self, restart_mode)
 
         try:
@@ -2155,7 +2191,7 @@
             if action == rwdts.QueryAction.READ:
                 schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
                 path_entry = schema.keyspec_to_entry(ks_path)
-                self._log.debug("VDU Opdata path is {}".format(path_entry))
+                self._log.debug("VDU Opdata path is {}".format(path_entry.key00.id))
                 try:
                     vnfr = self._vnfm.get_vnfr(self._vnfr_id)
                 except VnfRecordError as e:
@@ -2500,6 +2536,8 @@
 
         set_if_not_none('name', vdur._vdud.name)
         set_if_not_none('mgmt.ip', vdur.vm_management_ip)
+        # The below can be used for hostname
+        set_if_not_none('vdur_name', vdur.unique_short_name)
 
     def update(self, vdur):
         """Update the VDUR information in the datastore
@@ -2528,6 +2566,8 @@
 
         set_or_delete('name', vdur._vdud.name)
         set_or_delete('mgmt.ip', vdur.vm_management_ip)
+        # The below can be used for hostname
+        set_or_delete('vdur_name', vdur.unique_short_name)
 
     def remove(self, vdur_id):
         """Remove all of the data associated with specified VDUR
@@ -2620,7 +2660,7 @@
             yield from hdl.register()
 
     def deregister(self):
-        self.log.debug("De-register VNFM project {}".format(self.name))
+        self._log.debug("De-register VNFM project {}".format(self._project.name))
         for hdl in self._dts_handlers:
             hdl.deregister()
 
diff --git a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
index 6058a4c..7e7ee2f 100644
--- a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
+++ b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
@@ -68,6 +68,7 @@
   rwsdn_mock
   rwsdn_sim
   rwsdn_odl 
+  rwsdn_openstack
   rwsdn-python
   )
 rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/CMakeLists.txt
new file mode 100644
index 0000000..fcf944f
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/CMakeLists.txt
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2017 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_openstack rwsdn_openstack.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/rwsdn_openstack.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/rwsdn_openstack.py
new file mode 100644
index 0000000..ff5d019
--- /dev/null
+++ b/rwlaunchpad/plugins/rwvns/vala/rwsdn_openstack/rwsdn_openstack.py
@@ -0,0 +1,378 @@
+
+#
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import contextlib
+import logging
+
+import gi
+gi.require_version('RwSdn', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwcalYang', '1.0')
+
+from rift.rwcal.openstack import session as sess_drv
+from rift.rwcal.openstack import keystone as ks_drv
+from rift.rwcal.openstack import neutron as nt_drv
+from rift.rwcal.openstack import portchain as port_drv
+
+
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+import neutronclient.common.exceptions as NeutronException
+import keystoneclient.exceptions as KeystoneExceptions
+
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwSdn, # Vala package
+    RwsdnYang,
+    RwTypes,
+    RwcalYang)
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class OpenstackSdnOperationFailure(Exception):
+    pass
+
+class UninitializedPluginError(Exception):
+    pass
+
+class OpenstackL2PortChainingDriver(object):
+    """
+    Driver for openstack keystone and neutron
+    """
+    def __init__(self, logger = None, **kwargs):
+        """
+        OpenstackDriver Driver constructor
+        Arguments:
+           logger: (instance of logging.Logger)
+           kwargs:  A dictionary of 
+            {
+              username (string)                   : Username for project/tenant.
+              password (string)                   : Password
+              auth_url (string)                   : Keystone Authentication URL.
+              project  (string)                   : Openstack project name
+              cert_validate (boolean, optional)   : In case of SSL/TLS connection if certificate validation is required or not.
+              user_domain                         : Domain name for user
+              project_domain                      : Domain name for project
+              region                              : Region name
+            }
+        """
+
+        if logger is None:
+            self.log = logging.getLogger('rwsdn.openstack.driver')
+            self.log.setLevel(logging.DEBUG)
+        else:
+            self.log = logger
+
+        args =  dict(auth_url            = kwargs['auth_url'],
+                     username            = kwargs['username'],
+                     password            = kwargs['password'],
+                     project_name        = kwargs['project'],
+                     project_domain_name = kwargs['project_domain'] if 'project_domain' in kwargs else None,
+                     user_domain_name    = kwargs['user_domain'] if 'user_domain' in kwargs else None,)
+
+        cert_validate = kwargs['cert_validate'] if 'cert_validate' in kwargs else False
+        region = kwargs['region_name'] if 'region_name' in kwargs else False
+
+        discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'], logger = self.log)
+        (major, minor) = discover.get_version()
+
+        self.sess_drv = sess_drv.SessionDriver(auth_method = 'password',
+                                               version = str(major),
+                                               cert_validate = cert_validate,
+                                               logger = self.log,
+                                               **args)
+
+        self.neutron_drv = nt_drv.NeutronDriver(self.sess_drv,
+                                                region_name = region,
+                                                logger = self.log)
+
+        self.portchain_drv = port_drv.L2PortChainDriver(self.sess_drv,
+                                                        self.neutron_drv.neutron_endpoint,
+                                                        logger = self.log)
+
+    def validate_account_creds(self):
+        try:
+            self.sess_drv.invalidate_auth_token()
+            self.sess_drv.auth_token
+        except KeystoneExceptions.AuthorizationFailure as e:
+            self.log.error("Unable to authenticate or validate the existing credentials. Exception: %s", str(e))
+            raise ValidationError("Invalid Credentials: "+ str(e))
+        except Exception as e:
+            self.log.error("Could not connect to Openstack. Exception: %s", str(e))
+            raise ValidationError("Connection Error: "+ str(e))
+
+    def delete_port_chain(self,port_chain_id):
+        "Delete port chain"
+        try:
+            result = self.portchain_drv.get_port_chain(port_chain_id)
+            port_chain = result.json()
+            self.log.debug("Port chain result is %s", port_chain)
+            port_pair_groups = port_chain["port_chain"]["port_pair_groups"]
+            self.portchain_drv.delete_port_chain(port_chain_id)
+
+            # Get port pairs and delete port pair groups
+            port_pairs = list()
+            self.log.debug("Port pair groups during delete is %s", port_pair_groups)
+            for port_pair_group_id in port_pair_groups:
+                result = self.portchain_drv.get_port_pair_group(port_pair_group_id)
+                port_pair_group = result.json()
+                self.log.debug("Port pair group result is %s", port_pair_group)
+                port_pairs.extend(port_pair_group["port_pair_group"]["port_pairs"])
+                self.portchain_drv.delete_port_pair_group(port_pair_group_id)
+
+            self.log.debug("Port pairs during delete is %s",port_pairs)
+
+            for port_pair_id in port_pairs:
+                self.portchain_drv.delete_port_pair(port_pair_id)
+                pass
+        except Exception as e:
+            self.log.error("Error while delete port chain with id %s, exception %s", port_chain_id,str(e))
+
+    def update_port_chain(self,port_chain_id,flow_classifier_list):
+        result = self.portchain_drv.get_port_chain(port_chain_id)
+        result.raise_for_status()
+        port_chain = result.json()['port_chain']
+        new_flow_classifier_list = list()
+        if port_chain and port_chain['flow_classifiers']:
+           new_flow_classifier_list.extend(port_chain['flow_classifiers'])
+        new_flow_classifier_list.extend(flow_classifier_list)
+        port_chain_id = self.portchain_drv.update_port_chain(port_chain['id'],flow_classifiers=new_flow_classifier_list)
+        return port_chain_id
+
+    def create_flow_classifer(self,classifier_name,classifier_dict):
+        "Create flow classifier"
+        flow_classifier_id = self.portchain_drv.create_flow_classifier(classifier_name,classifier_dict)
+        return flow_classifier_id
+
+    def delete_flow_classifier(self,classifier_id):
+        "Create flow classifier"
+        try:
+            self.portchain_drv.delete_flow_classifier(classifier_id)
+        except Exception as e:
+            self.log.error("Error while deleting flow classifier with id %s, exception %s", classifier_id,str(e))
+
+    def get_port_chain_list(self):
+        result = self.portchain_drv.get_port_chain_list()
+        port_chain_list = result.json()
+        if 'port_chains' in port_chain_list:
+            return port_chain_list['port_chains']
+
+
+class RwsdnAccountDriver(object):                                                                             
+      """
+      Container class per sdn account                                                                         
+      """ 
+      def __init__(self, logger, **kwargs):                                                                     
+          self.log = logger
+          try:
+              self._driver = OpenstackL2PortChainingDriver(logger = self.log, **kwargs)                         
+          except (KeystoneExceptions.Unauthorized, KeystoneExceptions.AuthorizationFailure,                     
+                  NeutronException.NotFound) as e:                                                              
+              raise
+          except Exception as e:
+              self.log.error("RwsdnOpenstackPlugin: OpenstackL2PortChainingDriver init failed. Exception: %s" %(str(e)))      
+              raise
+  
+      @property                                                                                                 
+      def driver(self):
+          return self._driver
+
+    
+class SdnOpenstackPlugin(GObject.Object, RwSdn.Topology):
+    instance_num = 1
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.log = logging.getLogger('rwsdn.openstack.%s' % SdnOpenstackPlugin.instance_num)
+        self.log.setLevel(logging.DEBUG)
+
+        self._rwlog_handler = None
+        self._account_drivers = dict()
+        SdnOpenstackPlugin.instance_num += 1
+
+    def _use_driver(self, account):
+        if self._rwlog_handler is None:
+            raise UninitializedPluginError("Must call init() in SDN plugin before use.")
+
+        if account.name not in self._account_drivers:
+            self.log.debug("Creating SDN OpenstackDriver")
+            kwargs = dict(username = account.openstack.key,
+                          password = account.openstack.secret,
+                          auth_url = account.openstack.auth_url,
+                          project = account.openstack.tenant,
+                          cert_validate = account.openstack.cert_validate,
+                          user_domain = account.openstack.user_domain,
+                          project_domain = account.openstack.project_domain,
+                          region = account.openstack.region)
+            drv = RwsdnAccountDriver(self.log, **kwargs)
+            self._account_drivers[account.name] = drv
+            return drv.driver
+        else:
+            return self._account_drivers[account.name].driver    
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        self._rwlog_handler = rwlogger.RwLogger(
+                category="rw-cal-log",
+                subcategory="openstack",
+                log_hdl=rwlog_ctx,
+                )
+        self.log.addHandler(self._rwlog_handler)
+        self.log.propagate = False
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_sdn_creds(self, account):
+        """
+        Validates the sdn account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+
+        @param account - a SDN account
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwsdnYang.SdnConnectionStatus()
+        drv = self._use_driver(account)
+        try:
+            drv.validate_account_creds()
+
+        except openstack_drv.ValidationError as e:
+            self.log.error("SdnOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+
+        except Exception as e:
+            msg = "SdnOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+            self.log.error(msg)
+            status.status = "failure"
+            status.details = msg
+
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vnffg_chain(self, account,vnffg):
+        """
+        Creates Service Function chain in ODL
+
+        @param account - a SDN account
+
+        """
+        self.log.debug('Received Create VNFFG chain for account {}, chain {}'.format(account,vnffg))
+        drv = self._use_driver(account)
+        port_list = list()
+        vnf_chain_list = sorted(vnffg.vnf_chain_path, key = lambda x: x.order)
+        prev_vm_id = None 
+        for path in vnf_chain_list:
+            if prev_vm_id and path.vnfr_ids[0].vdu_list[0].vm_id == prev_vm_id:
+                prev_entry = port_list.pop()
+                port_list.append((prev_entry[0],path.vnfr_ids[0].vdu_list[0].port_id))
+                prev_vm_id = None
+            else:
+                prev_vm_id = path.vnfr_ids[0].vdu_list[0].vm_id
+                port_list.append((path.vnfr_ids[0].vdu_list[0].port_id,path.vnfr_ids[0].vdu_list[0].port_id))
+        vnffg_id = drv.create_port_chain(vnffg.name,port_list)
+        return vnffg_id
+
+    @rwstatus
+    def do_terminate_vnffg_chain(self, account,vnffg_id):
+        """
+        Terminate Service Function chain in ODL
+
+        @param account - a SDN account
+        """
+        self.log.debug('Received terminate VNFFG chain for id %s ', vnffg_id)
+        drv = self._use_driver(account)
+        drv.delete_port_chain(vnffg_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_vnffg_classifier(self, account, vnffg_classifier):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        self.log.debug('Received Create VNFFG classifier for account {}, classifier {}'.format(account,vnffg_classifier))
+        protocol_map = {1:'ICMP',6:'TCP',17:'UDP'}
+        flow_classifier_list = list()
+        drv =  self._use_driver(account)
+        for rule in vnffg_classifier.match_attributes:
+            classifier_name = vnffg_classifier.name + '_' + rule.name
+            flow_dict = {} 
+            for field, value in rule.as_dict().items():
+                 if field == 'ip_proto':
+                     flow_dict['protocol'] = protocol_map.get(value,None)
+                 elif field == 'source_ip_address':
+                     flow_dict['source_ip_prefix'] = value
+                 elif field == 'destination_ip_address':
+                     flow_dict['destination_ip_prefix'] = value
+                 elif field == 'source_port':
+                     flow_dict['source_port_range_min'] = value
+                     flow_dict['source_port_range_max'] = value
+                 elif field == 'destination_port':
+                     flow_dict['destination_port_range_min'] = value
+                     flow_dict['destination_port_range_max'] = value
+            if vnffg_classifier.has_field('port_id'):
+                    flow_dict['logical_source_port'] = vnffg_classifier.port_id 
+            flow_classifier_id = drv.create_flow_classifer(classifier_name, flow_dict)
+            flow_classifier_list.append(flow_classifier_id)
+        drv.update_port_chain(vnffg_classifier.rsp_id,flow_classifier_list)
+        return flow_classifier_list
+
+    @rwstatus(ret_on_failure=[None])
+    def do_terminate_vnffg_classifier(self, account, vnffg_classifier_list):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        self.log.debug('Received terminate VNFFG classifier for id %s ', vnffg_classifier_list)
+        drv = self._use_driver(account)
+        for classifier_id in vnffg_classifier_list:
+            drv.delete_flow_classifier(classifier_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vnffg_rendered_paths(self, account):
+        """
+           Get Rendered Service Path List (SFC)
+
+           @param account - a SDN account
+        """
+        self.log.debug('Received get VNFFG rendered path for account %s ', account)
+        vnffg_rsps = RwsdnYang.VNFFGRenderedPaths() 
+        drv = self._use_driver(account)
+        port_chain_list = drv.get_port_chain_list()
+        for port_chain in port_chain_list:
+            #rsp = vnffg_rsps.vnffg_rendered_path.add()
+            #rsp.name = port_chain['name']
+            pass
+        return vnffg_rsps
+
+
diff --git a/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang b/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
index e590436..fba317f 100644
--- a/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
+++ b/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
@@ -98,6 +98,7 @@
       enum odl;
       enum mock;
       enum sdnsim;
+      enum openstack;
     }
   }
 
@@ -152,6 +153,62 @@
           default "rwsdn_sim";
         }
       }
+
+      container openstack {
+        leaf key {
+          type string;
+          mandatory true;
+        }
+
+        leaf secret {
+          type string;
+          mandatory true;
+        }
+
+        leaf auth_url {
+          type string;
+          mandatory true;
+        }
+
+        leaf tenant {
+          type string;
+          mandatory true;
+        }
+
+        leaf admin {
+          type boolean;
+          default false;
+        }
+
+        leaf user-domain {
+          type string;
+          default "Default";
+          description "Domain of the OpenStack user";
+        }
+
+        leaf project-domain {
+          type string;
+          default "Default";
+          description "Domain of the OpenStack project";
+        }
+
+        leaf region {
+          type string;
+          default "RegionOne";
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwsdn_openstack";
+        }
+
+        leaf cert-validate {
+          type boolean;
+          default false;
+          description "Certificate validatation policy in case of SSL/TLS connection";
+        }
+      }
+
     }
   }
 
@@ -341,6 +398,9 @@
         leaf rsp-name {
           type string;
         }
+        leaf rsp-id {
+          type yang:uuid;
+        }
         leaf port-id {
           rwpb:field-inline "true";
           rwpb:field-string-max 64;
diff --git a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
index 49e8925..10e75e0 100644
--- a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
+++ b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.yang
@@ -53,6 +53,14 @@
     prefix "manotypes";
   }
 
+  import rw-vnfd {
+    prefix "rwvnfd";
+  }
+
+  import rw-nsd {
+    prefix "rwnsd";
+  }
+
   import rw-project {
     prefix "rw-project";
   }
@@ -368,4 +376,4 @@
     }
   }
 
-}
\ No newline at end of file
+}
diff --git a/rwlaunchpad/ra/pytest/conftest.py b/rwlaunchpad/ra/pytest/conftest.py
index fc094fa..80d739f 100644
--- a/rwlaunchpad/ra/pytest/conftest.py
+++ b/rwlaunchpad/ra/pytest/conftest.py
@@ -23,6 +23,7 @@
 import rift.auto.log
 import rift.auto.session
 import rift.vcs.vcs
+import rift.rwcal.openstack
 import logging
 
 import gi
@@ -129,3 +130,22 @@
     '''
     return cloud_accounts[0]
 
+@pytest.fixture(scope='class')
+def openstack_client(cloud_host, cloud_user, cloud_tenant):
+    """Fixture which returns a session to openstack host.
+
+    Returns:
+        Session to an openstack host.
+    """
+    password = 'mypasswd'
+    auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
+    mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+    return rift.rwcal.openstack.OpenstackDriver(**{'username': cloud_user,
+                                                   'password': password,
+                                                   'auth_url': auth_url,
+                                                   'project' : cloud_tenant,
+                                                   'mgmt_network': mgmt_network,
+                                                   'cert_validate': False,
+                                                   'user_domain': 'Default',
+                                                   'project_domain':'Default',
+                                                   'region': 'RegionOne'})