RIFT OSM R1 Initial Submission

Signed-off-by: Jeremy Mordkoff <jeremy.mordkoff@riftio.com>
diff --git a/common/python/CMakeLists.txt b/common/python/CMakeLists.txt
new file mode 100644
index 0000000..658d525
--- /dev/null
+++ b/common/python/CMakeLists.txt
@@ -0,0 +1,158 @@
+# Creation Date: 2016/1/12
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+cmake_minimum_required(VERSION 2.8)
+
+
+rift_python_install_tree(
+  FILES
+    rift/mano/__init__.py
+    rift/mano/ncclient.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/cloud/__init__.py
+    rift/mano/cloud/accounts.py
+    rift/mano/cloud/config.py
+    rift/mano/cloud/operdata.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/config_agent/operdata.py
+    rift/mano/config_agent/__init__.py
+    rift/mano/config_agent/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+
+rift_python_install_tree(
+  FILES
+    rift/mano/dts/__init__.py
+    rift/mano/dts/core.py
+    rift/mano/dts/subscriber/__init__.py
+    rift/mano/dts/subscriber/core.py
+    rift/mano/dts/subscriber/store.py
+    rift/mano/dts/subscriber/ns_subscriber.py
+    rift/mano/dts/subscriber/vnf_subscriber.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/config_data/__init__.py
+    rift/mano/config_data/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/dts/__init__.py
+    rift/mano/dts/core.py
+    rift/mano/dts/subscriber/__init__.py
+    rift/mano/dts/subscriber/core.py
+    rift/mano/dts/subscriber/store.py
+    rift/mano/dts/subscriber/ns_subscriber.py
+    rift/mano/dts/subscriber/vnf_subscriber.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/tosca_translator/__init__.py
+    rift/mano/tosca_translator/translator_logging.conf
+    rift/mano/tosca_translator/compare_desc.py
+    rift/mano/tosca_translator/shell.py
+    rift/mano/tosca_translator/rwmano/tosca_translator.py
+    rift/mano/tosca_translator/rwmano/translate_inputs.py
+    rift/mano/tosca_translator/rwmano/__init__.py
+    rift/mano/tosca_translator/rwmano/translate_outputs.py
+    rift/mano/tosca_translator/rwmano/translate_node_templates.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_resource.py
+    rift/mano/tosca_translator/rwmano/syntax/__init__.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_template.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_output.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
+    rift/mano/tosca_translator/rwmano/tosca/__init__.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
+    rift/mano/tosca_translator/common/__init__.py
+    rift/mano/tosca_translator/common/utils.py
+    rift/mano/tosca_translator/common/exception.py
+    rift/mano/tosca_translator/custom/__init__.py
+    rift/mano/tosca_translator/custom/rwmano/__init__.py
+    rift/mano/tosca_translator/conf/translator.conf
+    rift/mano/tosca_translator/conf/__init__.py
+    rift/mano/tosca_translator/conf/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/utils/__init.py__
+    rift/mano/utils/compare_desc.py
+    rift/mano/utils/juju_api.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/yang_translator/__init__.py
+    rift/mano/yang_translator/translator_logging.conf
+    rift/mano/yang_translator/shell.py
+    rift/mano/yang_translator/compare_desc.py
+    rift/mano/yang_translator/conf/config.py
+    rift/mano/yang_translator/conf/translator.conf
+    rift/mano/yang_translator/conf/__init__.py
+    rift/mano/yang_translator/rwmano/yang_translator.py
+    rift/mano/yang_translator/rwmano/translate_descriptors.py
+    rift/mano/yang_translator/rwmano/__init__.py
+    rift/mano/yang_translator/rwmano/yang/yang_vld.py
+    rift/mano/yang_translator/rwmano/yang/yang_vdu.py
+    rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
+    rift/mano/yang_translator/rwmano/yang/yang_nsd.py
+    rift/mano/yang_translator/rwmano/yang/__init__.py
+    rift/mano/yang_translator/rwmano/syntax/tosca_template.py
+    rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
+    rift/mano/yang_translator/rwmano/syntax/__init__.py
+    rift/mano/yang_translator/custom/__init__.py
+    rift/mano/yang_translator/custom/rwmano/__init__.py
+    rift/mano/yang_translator/common/utils.py
+    rift/mano/yang_translator/common/exception.py
+    rift/mano/yang_translator/common/__init__.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+set(TRANSLATOR_SCRIPTS
+  ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/tosca_translator/tosca-translator
+  ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/yang_translator/yang-translator)
+
+install(
+  FILES ${TRANSLATOR_SCRIPTS}
+    DESTINATION
+      usr/bin
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+set(subdirs
+  test
+  )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/common/python/rift/mano/__init__.py b/common/python/rift/mano/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/__init__.py
diff --git a/common/python/rift/mano/cloud/__init__.py b/common/python/rift/mano/cloud/__init__.py
new file mode 100644
index 0000000..4317d51
--- /dev/null
+++ b/common/python/rift/mano/cloud/__init__.py
@@ -0,0 +1,30 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .accounts import (
+    CloudAccount,
+    CloudAccountCalError,
+    )
+
+from .config import (
+    CloudAccountConfigSubscriber,
+    CloudAccountConfigCallbacks
+    )
+
+from .operdata import (
+     CloudAccountDtsOperdataHandler,
+)
diff --git a/common/python/rift/mano/cloud/accounts.py b/common/python/rift/mano/cloud/accounts.py
new file mode 100644
index 0000000..57ca55f
--- /dev/null
+++ b/common/python/rift/mano/cloud/accounts.py
@@ -0,0 +1,181 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import sys
+import asyncio
+from gi import require_version
+require_version('RwcalYang', '1.0')
+require_version('RwTypes', '1.0')
+require_version('RwCloudYang', '1.0')
+
+from gi.repository import (
+        RwTypes,
+        RwcalYang,
+        RwCloudYang,
+        )
+import rw_peas
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class PluginLoadingError(Exception):
+    pass
+
+
+class CloudAccountCalError(Exception):
+    pass
+
+
+class CloudAccount(object):
+    def __init__(self, log, rwlog_hdl, account_msg):
+        self._log = log
+        self._account_msg = account_msg.deep_copy()
+
+        self._cal_plugin = None
+        self._engine = None
+
+        self._cal = self.plugin.get_interface("Cloud")
+        self._cal.init(rwlog_hdl)
+
+        self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                status="unknown",
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    @property
+    def plugin(self):
+        if self._cal_plugin is None:
+            try:
+                self._cal_plugin = rw_peas.PeasPlugin(
+                        getattr(self._account_msg, self.account_type).plugin_name,
+                        'RwCal-1.0',
+                        )
+
+            except AttributeError as e:
+                raise PluginLoadingError(str(e))
+
+            self._engine, _, _ = self._cal_plugin()
+
+        return self._cal_plugin
+
+    def _wrap_status_fn(self, fn, *args, **kwargs):
+        ret = fn(*args, **kwargs)
+        rw_status = ret[0]
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self._log.error(msg)
+            raise CloudAccountCalError(msg)
+
+        # If there was only one other return value besides rw_status, then just
+        # return that element.  Otherwise return the rest of the return values
+        # as a list.
+        return ret[1] if len(ret) == 2 else ret[1:]
+
+    @property
+    def cal(self):
+        return self._cal
+
+    @property
+    def name(self):
+        return self._account_msg.name
+
+    @property
+    def account_msg(self):
+        return self._account_msg
+
+    @property
+    def cal_account_msg(self):
+        return RwcalYang.CloudAccount.from_dict(
+                self.account_msg.as_dict(),
+                ignore_missing_keys=True,
+                )
+
+    def cloud_account_msg(self, account_dict):
+        self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict)
+
+    @property
+    def account_type(self):
+        return self._account_msg.account_type
+
+    @property
+    def connection_status(self):
+        return self._status
+
+    def update_from_cfg(self, cfg):
+        self._log.debug("Updating parent CloudAccount to %s", cfg)
+
+        # Hack to catch updates triggered from apply_callback when a sdn-account is removed
+        # from a cloud-account. To be fixed properly when updates are handled
+        if (self.account_msg.name == cfg.name
+                and self.account_msg.account_type == cfg.account_type):
+            return
+
+        if cfg.has_field("sdn_account"):
+            self.account_msg.sdn_account = cfg.sdn_account
+        else:
+            raise NotImplementedError("Update cloud account not yet supported")
+
+    def create_image(self, image_info_msg):
+        image_id = self._wrap_status_fn(
+                self.cal.create_image, self.cal_account_msg, image_info_msg
+                )
+
+        return image_id
+
+    def get_image_list(self):
+        self._log.debug("Getting image list from account: %s", self.name)
+        resources = self._wrap_status_fn(
+                self.cal.get_image_list, self.cal_account_msg
+                )
+
+        return resources.imageinfo_list
+
+    @asyncio.coroutine
+    def validate_cloud_account_credentials(self, loop):
+        self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
+        self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                status="validating",
+                details="Cloud account connection validation in progress"
+                )
+        rwstatus, status = yield from loop.run_in_executor(
+                None,
+                self._cal.validate_cloud_creds,
+                self.cal_account_msg,
+                )
+        if rwstatus == RwTypes.RwStatus.SUCCESS:
+            self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+        else:
+            self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error when calling CAL validate cloud creds"
+                    )
+
+        self._log.info("Got cloud account validation response: %s", self._status)
+
+    def start_validate_credentials(self, loop):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_cloud_account_credentials(loop),
+                loop=loop
+                )
+
diff --git a/common/python/rift/mano/cloud/config.py b/common/python/rift/mano/cloud/config.py
new file mode 100644
index 0000000..1b1847c
--- /dev/null
+++ b/common/python/rift/mano/cloud/config.py
@@ -0,0 +1,249 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rw_peas
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+
+from gi.repository import (
+    RwcalYang as rwcal,
+    RwDts as rwdts,
+    ProtobufC,
+    )
+
+from . import accounts
+
+class CloudAccountNotFound(Exception):
+    pass
+
+
+class CloudAccountError(Exception):
+    pass
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class CloudAccountConfigCallbacks(object):
+    def __init__(self,
+                 on_add_apply=None, on_add_prepare=None,
+                 on_delete_apply=None, on_delete_prepare=None):
+
+        @asyncio.coroutine
+        def prepare_noop(*args, **kwargs):
+            pass
+
+        def apply_noop(*args, **kwargs):
+            pass
+
+        self.on_add_apply = on_add_apply
+        self.on_add_prepare = on_add_prepare
+        self.on_delete_apply = on_delete_apply
+        self.on_delete_prepare = on_delete_prepare
+
+        for f in ('on_add_apply', 'on_delete_apply'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, apply_noop)
+                continue
+
+            if asyncio.iscoroutinefunction(ref):
+                raise ValueError('%s cannot be a coroutine' % (f,))
+
+        for f in ('on_add_prepare', 'on_delete_prepare'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, prepare_noop)
+                continue
+
+            if not asyncio.iscoroutinefunction(ref):
+                raise ValueError("%s must be a coroutine" % f)
+
+
+class CloudAccountConfigSubscriber(object):
+    XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
+
+    def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+        self._dts = dts
+        self._log = log
+        self._rwlog_hdl = rwlog_hdl
+        self._reg = None
+
+        self.accounts = {}
+
+        self._cloud_callbacks = cloud_callbacks
+
+    def add_account(self, account_msg):
+        self._log.info("adding cloud account: {}".format(account_msg))
+
+        account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg)
+        self.accounts[account.name] = account
+
+        self._cloud_callbacks.on_add_apply(account)
+
+    def delete_account(self, account_name):
+        self._log.info("deleting cloud account: {}".format(account_name))
+        del self.accounts[account_name]
+
+        self._cloud_callbacks.on_delete_apply(account_name)
+
+    def update_account(self, account_msg):
+        """ Update an existing cloud account
+
+        In order to simplify update, turn an update into a delete followed by
+        an add.  The drawback to this approach is that we will not support
+        updates of an "in-use" cloud account, but this seems like a
+        reasonable trade-off.
+
+
+        Arguments:
+            account_msg - The cloud account config message
+        """
+        self._log.info("updating cloud account: {}".format(account_msg))
+
+        self.delete_account(account_msg.name)
+        self.add_account(account_msg)
+
+    def register(self):
+        @asyncio.coroutine
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
+
+            if xact.xact is None:
+                if action == rwdts.AppconfAction.INSTALL:
+                    curr_cfg = self._reg.elements
+                    for cfg in curr_cfg:
+                        self._log.debug("Cloud account being re-added after restart.")
+                        if not cfg.has_field('account_type'):
+                            raise CloudAccountError("New cloud account must contain account_type field.")
+                        self.add_account(cfg)
+                else:
+                    # When RIFT first comes up, an INSTALL is called with the current config
+                    # Since confd doesn't actally persist data this never has any data so
+                    # skip this for now.
+                    self._log.debug("No xact handle.  Skipping apply config")
+
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self._reg,
+                    xact=xact,
+                    key_name="name",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_account(cfg.name)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_account(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_account(cfg)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for Cloud Account """
+
+            action = xact_info.query_action
+            self._log.debug("Cloud account on_prepare config received (action: %s): %s",
+                            xact_info.query_action, msg)
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                if msg.name in self.accounts:
+                    self._log.debug("Cloud account already exists. Invoking update request")
+
+                    # Since updates are handled by a delete followed by an add, invoke the
+                    # delete prepare callbacks to give clients an opportunity to reject.
+                    yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    self._log.debug("Cloud account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        raise CloudAccountError("New cloud account must contain account_type field.")
+
+                    account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg)
+                    yield from self._cloud_callbacks.on_add_prepare(account)
+
+            elif action == rwdts.QueryAction.DELETE:
+                # Check if the entire cloud account got deleted
+                fref = ProtobufC.FieldReference.alloc()
+                fref.goto_whole_message(msg.to_pbcm())
+                if fref.is_field_deleted():
+                    yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    fref.goto_proto_name(msg.to_pbcm(), "sdn_account")
+                    if fref.is_field_deleted():
+                        # SDN account disassociated from cloud account
+                        account = self.accounts[msg.name]
+                        dict_account = account.account_msg.as_dict()
+                        del dict_account["sdn_account"]
+                        account.cloud_account_msg(dict_account)
+                    else:
+                        self._log.error("Deleting individual fields for cloud account not supported")
+                        xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                        return
+
+            else:
+                self._log.error("Action (%s) NOT SUPPORTED", action)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for Cloud Account config using xpath: %s",
+                        CloudAccountConfigSubscriber.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            self._reg = acg.register(
+                    xpath=CloudAccountConfigSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                    on_prepare=on_prepare,
+                    )
diff --git a/common/python/rift/mano/cloud/operdata.py b/common/python/rift/mano/cloud/operdata.py
new file mode 100644
index 0000000..4878691
--- /dev/null
+++ b/common/python/rift/mano/cloud/operdata.py
@@ -0,0 +1,140 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rift.tasklets
+
+from gi.repository import(
+        RwCloudYang,
+        RwDts as rwdts,
+        )
+
+class CloudAccountNotFound(Exception):
+    pass
+
+
+class CloudAccountDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self.cloud_accounts = {}
+
+    def add_cloud_account(self, account):
+        self.cloud_accounts[account.name] = account
+        account.start_validate_credentials(self._loop)
+
+    def delete_cloud_account(self, account_name):
+        del self.cloud_accounts[account_name]
+
+    def get_saved_cloud_accounts(self, cloud_account_name):
+        ''' Get Cloud Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_cloud_accounts = []
+
+        if cloud_account_name is None or cloud_account_name == "":
+            cloud_accounts = list(self.cloud_accounts.values())
+            saved_cloud_accounts.extend(cloud_accounts)
+        elif cloud_account_name in self.cloud_accounts:
+            account = self.cloud_accounts[cloud_account_name]
+            saved_cloud_accounts.append(account)
+        else:
+            errstr = "Cloud account {} does not exist".format(cloud_account_name)
+            raise KeyError(errstr)
+
+        return saved_cloud_accounts
+
+    @asyncio.coroutine
+    def create_notification(self, account):
+        xpath = "N,/rw-cloud:cloud-notif"
+        ac_status = RwCloudYang.YangNotif_RwCloud_CloudNotif()
+        ac_status.name = account.name
+        ac_status.message = account.connection_status.details
+
+        yield from self._dts.query_create(xpath, rwdts.XactFlag.ADVISE, ac_status)
+        self._log.info("Notification called by creating dts query: %s", ac_status)
+
+
+    def _register_show_status(self):
+        def get_xpath(cloud_name=None):
+            return "D,/rw-cloud:cloud/account{}/connection-status".format(
+                    "[name='%s']" % cloud_name if cloud_name is not None else ''
+                    )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+            cloud_account_name = path_entry.key00.name
+            self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self.get_saved_cloud_accounts(cloud_account_name)
+                for account in saved_accounts:
+                    connection_status = account.connection_status
+                    self._log.debug("Responding to cloud connection status request: %s", connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=account.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-cloud:update-cloud-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("cloud_account"):
+                raise CloudAccountNotFound("Cloud account name not provided")
+
+            cloud_account_name = msg.cloud_account
+            try:
+                account = self.cloud_accounts[cloud_account_name]
+            except KeyError:
+                raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+
+            account.start_validate_credentials(self._loop)
+
+            yield from self.create_notification(account)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
diff --git a/common/python/rift/mano/config_agent/__init__.py b/common/python/rift/mano/config_agent/__init__.py
new file mode 100644
index 0000000..5807e8d
--- /dev/null
+++ b/common/python/rift/mano/config_agent/__init__.py
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .config import (
+    ConfigAgentCallbacks,
+    ConfigAgentSubscriber
+    )
+
+from .operdata import (
+    ConfigAgentJobManager,
+    CfgAgentJobDtsHandler,
+    CfgAgentDtsOperdataHandler
+    )
+
diff --git a/common/python/rift/mano/config_agent/config.py b/common/python/rift/mano/config_agent/config.py
new file mode 100644
index 0000000..7500bac
--- /dev/null
+++ b/common/python/rift/mano/config_agent/config.py
@@ -0,0 +1,228 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rw_peas
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+
+from gi.repository import (
+    RwcalYang as rwcal,
+    RwDts as rwdts,
+    RwConfigAgentYang as rwcfg_agent,
+    ProtobufC,
+    )
+
+class ConfigAccountNotFound(Exception):
+    pass
+
+class ConfigAccountError(Exception):
+    pass
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ConfigAgentCallbacks(object):
+    def __init__(self,
+                 on_add_apply=None, on_add_prepare=None,
+                 on_delete_apply=None, on_delete_prepare=None):
+
+        @asyncio.coroutine
+        def prepare_noop(*args, **kwargs):
+            pass
+
+        def apply_noop(*args, **kwargs):
+            pass
+
+        self.on_add_apply = on_add_apply
+        self.on_add_prepare = on_add_prepare
+        self.on_delete_apply = on_delete_apply
+        self.on_delete_prepare = on_delete_prepare
+
+        for f in ('on_add_apply', 'on_delete_apply'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, apply_noop)
+                continue
+
+            if asyncio.iscoroutinefunction(ref):
+                raise ValueError('%s cannot be a coroutine' % (f,))
+
+        for f in ('on_add_prepare', 'on_delete_prepare'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, prepare_noop)
+                continue
+
+            if not asyncio.iscoroutinefunction(ref):
+                raise ValueError("%s must be a coroutine" % f)
+
+
+class ConfigAgentSubscriber(object):
+    XPATH = "C,/rw-config-agent:config-agent/account"
+
+    def __init__(self, dts, log, config_callbacks):
+        self._dts = dts
+        self._log = log
+        self._reg = None
+
+        self.accounts = {}
+
+        self._config_callbacks = config_callbacks
+
+    def add_account(self, account_msg):
+        self._log.info("adding config account: {}".format(account_msg))
+
+        self.accounts[account_msg.name] = account_msg
+
+        self._config_callbacks.on_add_apply(account_msg)
+
+    def delete_account(self, account_msg):
+        self._log.info("deleting config account: {}".format(account_msg.name))
+        del self.accounts[account_msg.name]
+
+        self._config_callbacks.on_delete_apply(account_msg)
+
+    def update_account(self, account_msg):
+        """ Update an existing config-agent account
+
+        In order to simplify update, turn an update into a delete followed by
+        an add.  The drawback to this approach is that we will not support
+        updates of an "in-use" config-agent account, but this seems like a
+        reasonable trade-off.
+
+        Arguments:
+            account_msg - The config-agent account config message
+        """
+
+        self._log.info("updating config-agent account: {}".format(account_msg))
+        self.delete_account(account_msg)
+        self.add_account(account_msg)
+
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
+
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self._log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self._reg,
+                    xact=xact,
+                    key_name="name",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_account(cfg)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_account(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_account(cfg)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for Config Account """
+
+            action = xact_info.handle.query_action
+            self._log.debug("Config account on_prepare config received (action: %s): %s",
+                            xact_info.handle.query_action, msg)
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                # If the account already exists, then this is an update.
+                if msg.name in self.accounts:
+                    self._log.debug("Config account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        raise ConfigAccountError("Cannot change config's account-type")
+
+                    # Since updates are handled by a delete followed by an add, invoke the
+                    # delete prepare callbacks to give clients an opportunity to reject.
+                    yield from self._config_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    self._log.debug("Config account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        raise ConfigAccountError("New Config account must contain account_type field.")
+
+                    account = msg
+                    yield from self._config_callbacks.on_add_prepare(account)
+
+            elif action == rwdts.QueryAction.DELETE:
+                # Check if the entire cloud account got deleted
+                fref = ProtobufC.FieldReference.alloc()
+                fref.goto_whole_message(msg.to_pbcm())
+                if fref.is_field_deleted():
+                    yield from self._config_callbacks.on_delete_prepare(msg.name)
+                else:
+                    self._log.error("Deleting individual fields for config account not supported")
+                    xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                    return
+
+            else:
+                self._log.error("Action (%s) NOT SUPPORTED", action)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for Config Account config using xpath: %s",
+                        ConfigAgentSubscriber.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            self._reg = acg.register(
+                    xpath=ConfigAgentSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    on_prepare=on_prepare,
+                    )
diff --git a/common/python/rift/mano/config_agent/operdata.py b/common/python/rift/mano/config_agent/operdata.py
new file mode 100644
index 0000000..b941667
--- /dev/null
+++ b/common/python/rift/mano/config_agent/operdata.py
@@ -0,0 +1,728 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+import time
+
+from gi.repository import (
+    NsrYang,
+    RwTypes,
+    RwcalYang,
+    RwNsrYang,
+    RwConfigAgentYang,
+    RwDts as rwdts)
+
+import rift.tasklets
+
+import rift.mano.utils.juju_api as juju
+
+
+class ConfigAgentAccountNotFound(Exception):
+    pass
+
+class JujuClient(object):
+    def __init__(self, log, ip, port, user, passwd):
+        self._log = log
+        self._ip = ip
+        self._port = port
+        self._user = user
+        self._passwd = passwd
+
+        self._api = juju.JujuApi(log=log,
+                                 server=ip, port=port,
+                                 user=user, secret=passwd)
+
+
+    def validate_account_creds(self):
+        status = RwcalYang.CloudConnectionStatus()
+        try:
+            env = self._api._get_env()
+        except juju.JujuEnvError as e:
+            msg = "JujuClient: Invalid account credentials: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        except ConnectionRefusedError as e:
+            msg = "JujuClient: Wrong IP or Port: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        except Exception as e:
+            msg = "JujuClient: Connection Failed: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+            self._log.info("JujuClient: Connection Successful")
+
+        return status
+
+
+class ConfigAgentAccount(object):
+    def __init__(self, log, account_msg):
+        self._log = log
+        self._account_msg = account_msg.deep_copy()
+
+        if account_msg.account_type == "juju":
+            self._cfg_agent_client_plugin = JujuClient(
+                    log,
+                    account_msg.juju.ip_address,
+                    account_msg.juju.port,
+                    account_msg.juju.user,
+                    account_msg.juju.secret)
+        else:
+            self._cfg_agent_client_plugin = None
+
+        self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                status="unknown",
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    @property
+    def name(self):
+        return self._account_msg.name
+
+    @property
+    def account_msg(self):
+        return self._account_msg
+
+    @property
+    def account_type(self):
+        return self._account_msg.account_type
+
+    @property
+    def connection_status(self):
+        return self._status
+
+    def update_from_cfg(self, cfg):
+        self._log.debug("Updating parent ConfigAgentAccount to %s", cfg)
+        raise NotImplementedError("Update config agent account not yet supported")
+
+    @asyncio.coroutine
+    def validate_cfg_agent_account_credentials(self, loop):
+        self._log.debug("Validating Config Agent Account %s, credential status %s", self._account_msg, self._status)
+
+        self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                status="validating",
+                details="Config Agent account connection validation in progress"
+                )
+
+        if self._cfg_agent_client_plugin is None:
+            self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                    status="unknown",
+                    details="Config Agent account does not support validation of account creds"
+                    )
+        else:
+            try:
+                status = yield from loop.run_in_executor(
+                    None,
+                    self._cfg_agent_client_plugin.validate_account_creds
+                    )
+                self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus.from_dict(status.as_dict())
+            except Exception as e:
+                self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error - " + str(e)
+                    )
+
+        self._log.info("Got config agent account validation response: %s", self._status)
+
+    def start_validate_credentials(self, loop):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_cfg_agent_account_credentials(loop),
+                loop=loop
+                )
+
+class CfgAgentDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self.cfg_agent_accounts = {}
+
+    def add_cfg_agent_account(self, account_msg):
+        account = ConfigAgentAccount(self._log, account_msg)
+        self.cfg_agent_accounts[account.name] = account
+        self._log.info("ConfigAgent Operdata Handler added. Starting account validation")
+
+        account.start_validate_credentials(self._loop)
+
+    def delete_cfg_agent_account(self, account_name):
+        del self.cfg_agent_accounts[account_name]
+        self._log.info("ConfigAgent Operdata Handler deleted.")
+
+    def get_saved_cfg_agent_accounts(self, cfg_agent_account_name):
+        ''' Get Config Agent Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_cfg_agent_accounts = []
+
+        if cfg_agent_account_name is None or cfg_agent_account_name == "":
+            cfg_agent_accounts = list(self.cfg_agent_accounts.values())
+            saved_cfg_agent_accounts.extend(cfg_agent_accounts)
+        elif cfg_agent_account_name in self.cfg_agent_accounts:
+            account = self.cfg_agent_accounts[cfg_agent_account_name]
+            saved_cfg_agent_accounts.append(account)
+        else:
+            errstr = "Config Agent account {} does not exist".format(cfg_agent_account_name)
+            raise KeyError(errstr)
+
+        return saved_cfg_agent_accounts
+
+
+    def _register_show_status(self):
+        def get_xpath(cfg_agent_name=None):
+            return "D,/rw-config-agent:config-agent/account{}/connection-status".format(
+                    "[name='%s']" % cfg_agent_name if cfg_agent_name is not None else ''
+                    )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwConfigAgentYang.ConfigAgentAccount.schema().keyspec_to_entry(ks_path)
+            cfg_agent_account_name = path_entry.key00.name
+            self._log.debug("Got show cfg_agent connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self.get_saved_cfg_agent_accounts(cfg_agent_account_name)
+                for account in saved_accounts:
+                    connection_status = account.connection_status
+                    self._log.debug("Responding to config agent connection status request: %s", connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=account.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-config-agent:update-cfg-agent-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("cfg_agent_account"):
+                raise ConfigAgentAccountNotFound("Config Agent account name not provided")
+
+            cfg_agent_account_name = msg.cfg_agent_account
+            try:
+                account = self.cfg_agent_accounts[cfg_agent_account_name]
+            except KeyError:
+                raise ConfigAgentAccountNotFound("Config Agent account name %s not found" % cfg_agent_account_name)
+
+            account.start_validate_credentials(self._loop)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
+
+class ConfigAgentJob(object):
+    """A wrapper over the config agent job object, providing some
+    convenience functions.
+
+    YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+    ||
+     ==> VNFRS
+          ||
+           ==> Primitives
+
+    """
+    # The normalizes the state terms from Juju to our yang models
+    # Juju : Yang model
+    STATUS_MAP = {"completed": "success",
+                  "pending"  : "pending",
+                  "running"  : "pending",
+                  "failed"   : "failure"}
+
+    def __init__(self, nsr_id, job, tasks=None):
+        """
+        Args:
+            nsr_id (uuid): ID of NSR record
+            job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+            tasks: List of asyncio.tasks. If provided the job monitor will
+                use it to monitor the tasks instead of the execution IDs
+        """
+        self._job = job
+        self.nsr_id = nsr_id
+        self.tasks = tasks
+
+    @property
+    def id(self):
+        """Job id"""
+        return self._job.job_id
+
+    @property
+    def name(self):
+        """Job name"""
+        return self._job.job_name
+
+    @property
+    def job_status(self):
+        """Status of the job (success|pending|failure)"""
+        return self._job.job_status
+
+    @job_status.setter
+    def job_status(self, value):
+        """Setter for job status"""
+        self._job.job_status = value
+
+    @property
+    def job(self):
+        """Gi object"""
+        return self._job
+
+    @property
+    def xpath(self):
+        """Xpath of the job"""
+        return ("D,/nsr:ns-instance-opdata" +
+                "/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
+                "/nsr:config-agent-job[nsr:job-id='{}']"
+                ).format(self.nsr_id, self.id)
+
+    @staticmethod
+    def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
+        """A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
+        to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+
+        Args:
+            nsr_id (uuid): NSR ID
+            rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): RPC output
+            tasks (list): A list of asyncio.Tasks
+
+        Returns:
+            ConfigAgentJob
+        """
+        # Shortcuts to prevent the HUUGE names.
+        CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
+        CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+        CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+        CfgAgentPrimitiveParam =  NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+
+        job = CfgAgentJob.from_dict({
+                "job_id": rpc_output.job_id,
+                "job_name" : rpc_output.name,
+                "job_status": "pending",
+                "triggered_by": rpc_output.triggered_by,
+                "create_time": rpc_output.create_time,
+                "job_status_details": rpc_output.job_status_details if rpc_output.job_status_details is not None else None,
+                "parameter": [param.as_dict() for param in rpc_output.parameter],
+                "parameter_group": [pg.as_dict() for pg in rpc_output.parameter_group]
+            })
+
+        for vnfr in rpc_output.vnf_out_list:
+            vnfr_job = CfgAgentVnfr.from_dict({
+                    "id": vnfr.vnfr_id_ref,
+                    "vnf_job_status": "pending",
+                    })
+
+            for primitive in vnfr.vnf_out_primitive:
+                vnf_primitive = CfgAgentPrimitive.from_dict({
+                        "name": primitive.name,
+                        "execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status],
+                        "execution_id": primitive.execution_id
+                    })
+
+                # Copy over the input param
+                for param in primitive.parameter:
+                    vnf_primitive.parameter.append(
+                            CfgAgentPrimitiveParam.from_dict({
+                                    "name": param.name,
+                                    "value": param.value
+                            }))
+
+                vnfr_job.primitive.append(vnf_primitive)
+
+            job.vnfr.append(vnfr_job)
+
+        return ConfigAgentJob(nsr_id, job, tasks)
+
+
+class ConfigAgentJobMonitor(object):
+    """Job monitor: Polls the Juju controller and get the status.
+    Rules:
+        If all Primitive are success, then vnf & nsr status will be "success"
+        If any one Primitive reaches a failed state then both vnf and nsr will fail.
+    """
+    POLLING_PERIOD = 2
+
+    def __init__(self, dts, log, job, executor, loop, config_plugin):
+        """
+        Args:
+            dts : DTS handle
+            log : log handle
+            job (ConfigAgentJob): ConfigAgentJob instance
+            executor (concurrent.futures): Executor for juju status api calls
+            loop (eventloop): Current event loop instance
+            config_plugin : Config plugin to be used.
+        """
+        self.job = job
+        self.log = log
+        self.loop = loop
+        self.executor = executor
+        self.polling_period = ConfigAgentJobMonitor.POLLING_PERIOD
+        self.config_plugin = config_plugin
+        self.dts = dts
+
+    @asyncio.coroutine
+    def _monitor_processes(self, registration_handle):
+        result = 0
+        for process in self.job.tasks:
+            rc = yield from process
+            self.log.debug("Process {} returned rc: {}".format(process, rc))
+            result |= rc
+
+        if result == 0:
+            self.job.job_status = "success"
+        else:
+            self.job.job_status = "failure"
+
+        registration_handle.update_element(self.job.xpath, self.job.job)
+
+    def get_error_details(self):
+        '''Get the error details from failed primitives'''
+        errs = ''
+        for vnfr in self.job.job.vnfr:
+            if vnfr.vnf_job_status != "failure":
+                continue
+
+            for primitive in vnfr.primitive:
+                if primitive.execution_status == "failure":
+                    errs += '<error>'
+                    errs += primitive.execution_error_details
+                    errs += "</error>"
+
+        return errs
+
+    @asyncio.coroutine
+    def publish_action_status(self):
+        """
+        Starts publishing the status for jobs/primitives
+        """
+        registration_handle = yield from self.dts.register(
+                xpath=self.job.xpath,
+                handler=rift.tasklets.DTS.RegistrationHandler(),
+                flags=(rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ),
+                )
+
+        self.log.debug('preparing to publish job status for {}'.format(self.job.xpath))
+
+        try:
+            registration_handle.create_element(self.job.xpath, self.job.job)
+
+            # If the config is done via a user defined script
+            if self.job.tasks is not None:
+                yield from self._monitor_processes(registration_handle)
+                return
+
+            prev = time.time()
+            # Run until pending moves to either failure/success
+            while self.job.job_status == "pending":
+                curr = time.time()
+
+                if curr - prev < self.polling_period:
+                    pause = self.polling_period - (curr - prev)
+                    yield from asyncio.sleep(pause, loop=self.loop)
+
+                prev = time.time()
+
+                tasks = []
+                for vnfr in self.job.job.vnfr:
+                    task = self.loop.create_task(self.get_vnfr_status(vnfr))
+                    tasks.append(task)
+
+                # Exit, if no tasks are found
+                if not tasks:
+                    break
+
+                yield from asyncio.wait(tasks, loop=self.loop)
+
+                job_status = [task.result() for task in tasks]
+
+                if "failure" in job_status:
+                    self.job.job_status = "failure"
+                    errs = self.get_error_details()
+                    if len(errs):
+                        self.job.job.job_status_details = errs
+                elif "pending" in job_status:
+                    self.job.job_status = "pending"
+                else:
+                    self.job.job_status = "success"
+
+                # self.log.debug("Publishing job status: {} at {} for nsr id: {}".format(
+                #     self.job.job_status,
+                #     self.job.xpath,
+                #     self.job.nsr_id))
+
+                registration_handle.update_element(self.job.xpath, self.job.job)
+
+
+        except Exception as e:
+            self.log.exception(e)
+            raise
+
+
+    @asyncio.coroutine
+    def get_vnfr_status(self, vnfr):
+        """Schedules tasks for all containing primitives and updates it's own
+        status.
+
+        Args:
+            vnfr : Vnfr job record containing primitives.
+
+        Returns:
+            (str): "success|failure|pending"
+        """
+        tasks = []
+        job_status = []
+
+        for primitive in vnfr.primitive:
+            if primitive.execution_id == "":
+                # TODO: For some config data, the id will be empty, check if
+                # mapping is needed.
+                job_status.append(primitive.execution_status)
+                continue
+
+            task = self.loop.create_task(self.get_primitive_status(primitive))
+            tasks.append(task)
+
+        if tasks:
+            yield from asyncio.wait(tasks, loop=self.loop)
+
+        job_status.extend([task.result() for task in tasks])
+        if "failure" in job_status:
+            vnfr.vnf_job_status = "failure"
+            return "failure"
+
+        elif "pending" in job_status:
+            vnfr.vnf_job_status = "pending"
+            return "pending"
+
+        else:
+            vnfr.vnf_job_status = "success"
+            return "success"
+
+    @asyncio.coroutine
+    def get_primitive_status(self, primitive):
+        """
+        Queries the juju api and gets the status of the execution id.
+
+        Args:
+            primitive : Primitive containing the execution ID.
+        """
+
+        try:
+            resp = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.config_plugin.get_action_status,
+                    primitive.execution_id
+                    )
+
+            status = resp['status']
+            if status == 'failed':
+                self.log.warning("Execution of action {} failed: {}".
+                                 format(primitive.execution_id, resp))
+                primitive.execution_error_details = resp['message']
+
+        except Exception as e:
+            self.log.exception(e)
+            status = "failed"
+
+        # Handle case status is None
+        if status:
+            primitive.execution_status = ConfigAgentJob.STATUS_MAP[status]
+        else:
+            primitive.execution_status = "failure"
+
+        return primitive.execution_status
+
+
+class CfgAgentJobDtsHandler(object):
+    """Dts Handler for CfgAgent"""
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:config-agent-job"
+
+    def __init__(self, dts, log, loop, nsm, cfgm):
+        """
+        Args:
+            dts  : Dts Handle.
+            log  : Log handle.
+            loop : Event loop.
+            nsm  : NsmManager.
+            cfgm : ConfigManager.
+        """
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cfgm = cfgm
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NSManager manager instance """
+        return self._nsm
+
+    @property
+    def cfgm(self):
+        """ Return the ConfigManager manager instance """
+        return self._cfgm
+
+    @staticmethod
+    def cfg_job_xpath(nsr_id, job_id):
+        return ("D,/nsr:ns-instance-opdata" +
+                "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
+                "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NS monitoring read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            if action == rwdts.QueryAction.READ:
+                schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                try:
+                    nsr_id = path_entry.key00.ns_instance_config_ref
+
+                    #print("###>>> self.nsm.nsrs:", self.nsm.nsrs)
+                    nsr_ids = []
+                    if nsr_id is None or nsr_id == "":
+                        nsrs = list(self.nsm.nsrs.values())
+                        nsr_ids = [nsr.id for nsr in nsrs if nsr is not None]
+                    else:
+                        nsr_ids = [nsr_id]
+
+                    for nsr_id in nsr_ids:
+                        job = self.cfgm.get_job(nsr_id)
+
+                        # If no jobs are queued for the NSR
+                        if job is None:
+                            continue
+
+                        xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.job_id),
+                            job)
+
+                except Exception as e:
+                    self._log.exception("Caught exception:%s", str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+            else:
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class ConfigAgentJobManager(object):
+    """A central class that manager all the Config Agent related data,
+    Including updating the status
+
+    TODO: Needs to support multiple config agents.
+    """
+    def __init__(self, dts, log, loop, nsm):
+        """
+        Args:
+            dts  : Dts handle
+            log  : Log handler
+            loop : Event loop
+            nsm  : NsmTasklet instance
+        """
+        self.jobs = {}
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.nsm = nsm
+        self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+
+    def add_job(self, rpc_output, tasks=None):
+        """Once an RPC is trigger add a now job
+
+        Args:
+            rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
+            rpc_input (YangInput_Nsr_ExecNsConfigPrimitive): Rpc input
+            tasks(list) A list of asyncio.Tasks
+
+        """
+        nsr_id = rpc_output.nsr_id_ref
+
+        self.jobs[nsr_id] = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+
+        self.log.debug("Creating a job monitor for Job id: {}".format(
+                rpc_output.job_id))
+
+        # For every Job we will schedule a new monitoring process.
+        job_monitor = ConfigAgentJobMonitor(
+            self.dts,
+            self.log,
+            self.jobs[nsr_id],
+            self.executor,
+            self.loop,
+            self.nsm.config_agent_plugins[0]  # Hack
+            )
+        task = self.loop.create_task(job_monitor.publish_action_status())
+
+    def get_job(self, nsr_id):
+        """Get the job associated with the NSR Id, if present."""
+        try:
+            return self.jobs[nsr_id].job
+        except KeyError:
+            return None
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.handler.register()
diff --git a/common/python/rift/mano/config_data/__init__.py b/common/python/rift/mano/config_data/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/config_data/__init__.py
diff --git a/common/python/rift/mano/config_data/config.py b/common/python/rift/mano/config_data/config.py
new file mode 100644
index 0000000..63a2e48
--- /dev/null
+++ b/common/python/rift/mano/config_data/config.py
@@ -0,0 +1,430 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import abc
+import json
+import os
+import yaml
+
+from gi.repository import NsdYang
+from gi.repository import VnfdYang
+
+
+class InitialConfigReadError(Exception):
+    pass
+
+
+class InitialConfigMethodError(Exception):
+    pass
+
+
+class InitialConfigPrimitiveReader(object):
+    """ Reader for the VNF Initial Config Input Data
+
+    This class interprets the Initial Config Primitive
+    Input data and constructs inital config primitive
+    protobuf messages.
+
+    The reason for creating a new format is to keep the structure
+    as dead-simple as possible for readability.
+
+    The structure (not serialization format) is defined as the
+    following.
+
+    [
+        {
+          "name": <primitive_name>,
+          "parameter": {
+            "hostname": "pe1"
+            "pass": "6windos"
+            ...
+          }
+        }
+        ...
+    ]
+
+    """
+    def __init__(self, primitive_input):
+        self._primitives = []
+
+        self._parse_input_data(primitive_input)
+
+    def _parse_input_data(self, input_dict):
+        for seq, cfg in enumerate(input_dict):
+            if "name" not in cfg:
+                raise InitialConfigReadError("Initial config primitive must have a name")
+
+            name = cfg["name"]
+
+            new_primitive = self. get_initial_config_primitive(seq=seq, name=name)
+            self._primitives.append(new_primitive)
+            if "parameter" in cfg:
+                for key, val in cfg["parameter"].items():
+                    new_primitive.parameter.add(name=key, value=val)
+
+    @abc.abstractmethod
+    def get_initial_config_primitive(self, seq, name):
+        '''Override in sub class to provide the correct yang model'''
+        raise InitialConfigMethodError(
+            "InitialConfigPrimitiveReader Calling abstract class method")
+
+    @property
+    def primitives(self):
+        """ Returns a copy of the read inital config primitives"""
+        return [prim.deep_copy() for prim in self._primitives]
+
+    @classmethod
+    def from_yaml_file_hdl(cls, file_hdl):
+        """ Create a instance of InitialConfigPrimitiveFileData
+        by reading a YAML file handle.
+
+        Arguments:
+            file_hdl - A file handle which contains serialized YAML which
+                       follows the documented structure.
+
+        Returns:
+            A new InitialConfigPrimitiveFileData() instance
+
+        Raises:
+            InitialConfigReadError: Input Data was malformed or could not be read
+        """
+        try:
+            input_dict = yaml.safe_load(file_hdl)
+        except yaml.YAMLError as e:
+            raise InitialConfigReadError(e)
+
+        return cls(input_dict)
+
+
+class VnfInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+    '''Class to read the VNF initial config primitives'''
+
+    def __init__(self, primitive_input):
+        super(VnfInitialConfigPrimitiveReader, self).__init__(primitive_input)
+
+    def get_initial_config_primitive(self, seq, name):
+        return VnfdYang.InitialConfigPrimitive(seq=seq, name=name)
+
+
+class NsInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+    '''Class to read the NS initial config primitives'''
+
+    def __init__(self, primitive_input):
+        super(NsInitialConfigPrimitiveReader, self).__init__(primitive_input)
+
+    def get_initial_config_primitive(self, seq, name):
+        return NsdYang.NsdInitialConfigPrimitive(seq=seq, name=name)
+
+
+class ConfigPrimitiveConvertor(object):
+    PARAMETER = "parameter"
+    PARAMETER_GROUP = "parameter_group"
+    CONFIG_PRIMITIVE = "service_primitive"
+    INITIAL_CONFIG_PRIMITIVE = "initial_config_primitive"
+
+    def _extract_param(self, param, field="default_value"):
+        key = param.name
+        value = getattr(param, field, None)
+
+        if value is not None:
+            setattr(param, field, None)
+
+        return key, value
+
+    def _extract_parameters(self, parameters, input_data, field="default_value"):
+        input_data[self.PARAMETER] = {}
+        for param in parameters:
+            key, value = self._extract_param(param, field)
+
+            if value is None:
+                continue
+
+            input_data[self.PARAMETER][key] = value
+
+        if not input_data[self.PARAMETER]:
+            del input_data[self.PARAMETER]
+
+    def _extract_parameter_group(self, param_groups, input_data):
+        input_data[self.PARAMETER_GROUP] = {}
+        for param_group in param_groups:
+            input_data[self.PARAMETER_GROUP][param_group.name] = {}
+            for param in param_group.parameter:
+                key, value = self._extract_param(param)
+
+                if value is None:
+                    continue
+
+                input_data[self.PARAMETER_GROUP][param_group.name][key] = value
+
+        if not input_data[self.PARAMETER_GROUP]:
+            del input_data[self.PARAMETER_GROUP]
+
+    def extract_config(self,
+                       config_primitives=None,
+                       initial_configs=None,
+                       format="yaml"):
+        input_data = {}
+
+        if config_primitives:
+            input_data[self.CONFIG_PRIMITIVE] = {}
+            for config_primitive in config_primitives:
+                input_data[self.CONFIG_PRIMITIVE][config_primitive.name] = {}
+                self._extract_parameters(
+                    config_primitive.parameter,
+                    input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+
+                try:
+                    self._extract_parameter_group(
+                        config_primitive.parameter_group,
+                        input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+                except AttributeError:
+                    pass
+
+                if not input_data[self.CONFIG_PRIMITIVE][config_primitive.name]:
+                    del input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+
+            if not input_data[self.CONFIG_PRIMITIVE]:
+                del input_data[self.CONFIG_PRIMITIVE]
+
+
+        if initial_configs:
+            input_data[self.INITIAL_CONFIG_PRIMITIVE] = []
+            for in_config_primitive in initial_configs:
+                primitive = {}
+                self._extract_parameters(
+                    in_config_primitive.parameter,
+                    primitive,
+                    field="value")
+
+                if primitive:
+                    input_data[self.INITIAL_CONFIG_PRIMITIVE].append(
+                        {
+                            "name": in_config_primitive.name,
+                            self.PARAMETER: primitive[self.PARAMETER],
+                        }
+                    )
+
+            if not input_data[self.INITIAL_CONFIG_PRIMITIVE]:
+                del input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+        if len(input_data):
+            if format == "json":
+                return json.dumps(input_data)
+            elif format == "yaml":
+                return yaml.dump(input_data, default_flow_style=False)
+        else:
+            return ''
+
+    def extract_nsd_config(self, nsd, format="yaml"):
+        config_prim = None
+        try:
+            config_prim = nsd.service_primitive
+        except AttributeError:
+            pass
+
+        initial_conf = None
+        try:
+            initial_conf = nsd.initial_config_primitive
+        except AttributeError:
+            pass
+
+        return self.extract_config(
+            config_primitives=config_prim,
+            initial_configs=initial_conf,
+            format=format)
+
+    def extract_vnfd_config(self, vnfd, format="yaml"):
+        config_prim = None
+        try:
+            config_prim = vnfd.vnf_configuration.service_primitive
+        except AttributeError:
+            pass
+
+        initial_conf = None
+        try:
+            initial_conf = vnfd.vnf_configuration.initial_config_primitive
+        except AttributeError:
+            pass
+
+        return self.extract_config(
+            config_primitives=config_prim,
+            initial_configs=initial_conf,
+            format=format)
+
+    def merge_params(self, parameters, input_config, field="default_value"):
+        for param in parameters:
+            try:
+                setattr(param, field, input_config[param.name])
+            except KeyError:
+                pass
+
+    def add_nsd_initial_config(self, nsd_init_cfg_prim_msg, input_data):
+        """ Add initial config primitives from NS Initial Config Input Data
+
+        Arguments:
+            nsd_init_cfg_prim_msg - manotypes:nsd/initial_config_primitive pb msg
+            ns_input_data - NsInitialConfigPrimitiveReader documented input data
+
+        Raises:
+           InitialConfigReadError: VNF input data was malformed
+        """
+        if self.INITIAL_CONFIG_PRIMITIVE in input_data:
+            ns_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+            reader = NsInitialConfigPrimitiveReader(ns_input_data)
+            for prim in reader.primitives:
+                nsd_init_cfg_prim_msg.append(prim)
+
+    def merge_nsd_initial_config(self, nsd, input_data):
+        try:
+            for config_primitive in nsd.initial_config_primitive:
+                for cfg in input_data[self.INITIAL_CONFIG_PRIMITIVE]:
+                    if cfg['name'] == config_primitive.name:
+                        self.merge_params(
+                            config_primitive.parameter,
+                            cfg[self.PARAMETER],
+                            field="value")
+                        break
+
+        except AttributeError as e:
+            self._log.debug("Did not find initial-config-primitive for NSD {}: {}".
+                            format(nsd.name, e))
+
+
+    def merge_nsd_config(self, nsd, input_data):
+        for config_primitive in nsd.service_primitive:
+            try:
+                cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+            except KeyError:
+                continue
+
+            self.merge_params(
+                    config_primitive.parameter,
+                    cfg[self.PARAMETER])
+
+            for param_group in config_primitive.parameter_group:
+                self.merge_params(
+                        param_group.parameter,
+                        cfg[self.PARAMETER_GROUP][param_group.name])
+
+    def add_vnfd_initial_config(self, vnfd_init_cfg_prim_msg, input_data):
+        """ Add initial config primitives from VNF Initial Config Input Data
+
+        Arguments:
+            vnfd_init_cfg_prim_msg - manotypes:vnf-configuration/initial_config_primitive pb msg
+            vnf_input_data - VnfInitialConfigPrimitiveReader documented input data
+
+        Raises:
+           InitialConfigReadError: VNF input data was malformed
+        """
+        if self.INITIAL_CONFIG_PRIMITIVE in input_data:
+            vnf_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+            reader = VnfInitialConfigPrimitiveReader(vnf_input_data)
+            for prim in reader.primitives:
+                vnfd_init_cfg_prim_msg.append(prim)
+
+    def merge_vnfd_config(self, vnfd, input_data):
+        for config_primitive in vnfd.vnf_configuration.service_primitive:
+            try:
+                cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+            except KeyError:
+                continue
+
+            self.merge_params(
+                config_primitive.parameter,
+                cfg[self.PARAMETER])
+
+
+class ConfigStore(object):
+    """Convenience class that fetches all the instance related data from the
+    $RIFT_ARTIFACTS/launchpad/libs directory.
+    """
+
+    def __init__(self, log):
+        """
+        Args:
+            log : Log handle.
+        """
+        self._log = log
+        self.converter = ConfigPrimitiveConvertor()
+
+    def merge_vnfd_config(self, nsd_id, vnfd, member_vnf_index):
+        """Merges the vnfd config from the config directory.
+
+        Args:
+            nsd_id (str): Id of the NSD object
+            vnfd_msg : VNFD pb message containing the VNFD id and
+                       the member index ref.
+        """
+        nsd_archive = os.path.join(
+            os.getenv('RIFT_ARTIFACTS'),
+            "launchpad/libs",
+            nsd_id,
+            "config")
+
+        self._log.info("Looking for config from the archive {}".format(nsd_archive))
+
+        if not os.path.exists(nsd_archive):
+            return
+
+        config_file = os.path.join(nsd_archive,
+                                   "{}__{}.yaml".format(vnfd.id, member_vnf_index))
+
+        if not os.path.exists(config_file):
+            self._log.info("Could not find VNF initial config in archive: %s", config_file)
+            return
+
+        input_data = self.read_from_file(config_file)
+        self._log.info("Loaded VNF config file {}: {}".format(config_file, input_data))
+
+        self.converter.merge_vnfd_config(vnfd, input_data)
+
+        self.converter.add_vnfd_initial_config(
+            vnfd.vnf_configuration.initial_config_primitive,
+            input_data,
+        )
+
+    def read_from_file(self, filename):
+        with open(filename) as fh:
+            input_data = yaml.load(fh)
+        return input_data
+
+    def merge_nsd_config(self, nsd):
+        nsd_archive = os.path.join(
+            os.getenv('RIFT_ARTIFACTS'),
+            "launchpad/libs",
+            nsd.id,
+            "config")
+
+        self._log.info("Looking for config from the archive {}".format(nsd_archive))
+
+        if not os.path.exists(nsd_archive):
+            return
+
+        config_file = os.path.join(nsd_archive,
+                                   "{}.yaml".format(nsd.id))
+        if not os.path.exists(config_file):
+            self._log.info("Could not find NS config in archive: %s", config_file)
+            return
+
+        input_data = self.read_from_file(config_file)
+        self._log.info("Loaded NS config file {}: {}".format(config_file, input_data))
+
+        self.converter.merge_nsd_config(nsd, input_data)
+
+        self.converter.merge_nsd_initial_config(nsd, input_data)
diff --git a/common/python/rift/mano/config_data/test/__init__.py b/common/python/rift/mano/config_data/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/config_data/test/__init__.py
diff --git a/common/python/rift/mano/config_data/test/test_converter.py b/common/python/rift/mano/config_data/test/test_converter.py
new file mode 100644
index 0000000..1bfd7d7
--- /dev/null
+++ b/common/python/rift/mano/config_data/test/test_converter.py
@@ -0,0 +1,424 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import uuid
+from gi.repository import NsdYang, VnfdYang
+from ..config import ConfigPrimitiveConvertor
+import yaml
+
+@pytest.fixture(scope="function")
+def nsd():
+    catalog = NsdYang.YangData_Nsd_NsdCatalog()
+    nsd = catalog.nsd.add()
+    nsd.id = str(uuid.uuid1())
+    return nsd
+
+@pytest.fixture(scope="function")
+def vnfd():
+    catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+    vnfd = catalog.vnfd.add()
+    vnfd.id = str(uuid.uuid1())
+    return vnfd
+
+@pytest.fixture(scope="session")
+def convertor():
+    return ConfigPrimitiveConvertor()
+
+def test_nsd_config(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        expected_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        assert expected_yaml == \
+               convertor.extract_nsd_config(nsd)
+
+
+def test_nsd_multiple_config(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [{
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    }],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [{
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation 2",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        expected_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+Add SP Test Corporation 2:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        assert yaml.load(expected_yaml) == \
+               yaml.load(convertor.extract_nsd_config(nsd))
+
+
+def test_vnfd_config(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    expected_yaml = """initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    assert expected_yaml == convertor.extract_vnfd_config(vnfd)
+
+def test_vnfd_config_prim(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [
+                {"name": "Foo", "default_value": "Bar"}
+        ]
+        })
+
+    expected_yaml = """service_primitive:
+  PE1:
+    parameter:
+      Foo: Bar
+initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    assert expected_yaml == convertor.extract_vnfd_config(vnfd)
+
+
+
+def test_vnfd_merge(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [{"name": "vpe-router"},
+                          {"name": "user"},
+                          {"name": "pass"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [{"name": "cidr"}],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [{"name": "Foo",}]
+        })
+
+    ip_yaml = """service_primitive:
+  PE1:
+    parameter:
+      Foo: Bar
+initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+    expected_vnfd = catalog.vnfd.add()
+    vnf_config = expected_vnfd.vnf_configuration
+    expected_vnfd.id = vnfd.id
+
+    # Set the initital-confi
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [
+                {"name": "Foo", "default_value": "Bar"}
+        ]
+        })
+
+    convertor.merge_vnfd_config(vnfd, yaml.load(ip_yaml))
+
+    assert vnfd.as_dict() == expected_vnfd.as_dict()
+
+
+def test_nsd_merge(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        ip_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        catalog = NsdYang.YangData_Nsd_NsdCatalog()
+        expected_nsd = catalog.nsd.add()
+        expected_nsd.id = nsd.id
+        expected_nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        convertor.merge_nsd_config(nsd, yaml.load(ip_yaml))
+
+        assert nsd.as_dict() == expected_nsd.as_dict()
+
+
diff --git a/common/python/rift/mano/dts/__init__.py b/common/python/rift/mano/dts/__init__.py
new file mode 100644
index 0000000..e523034
--- /dev/null
+++ b/common/python/rift/mano/dts/__init__.py
@@ -0,0 +1,24 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .core import DtsHandler
+
+# Subscribers
+from .subscriber.core import AbstractOpdataSubscriber, AbstractConfigSubscriber
+from .subscriber.vnf_subscriber import VnfdCatalogSubscriber, VnfrCatalogSubscriber
+from .subscriber.ns_subscriber import NsrCatalogSubscriber, NsdCatalogSubscriber
+from .subscriber.store import SubscriberStore
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/core.py b/common/python/rift/mano/dts/core.py
new file mode 100644
index 0000000..4894e16
--- /dev/null
+++ b/common/python/rift/mano/dts/core.py
@@ -0,0 +1,40 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+class DtsHandler(object):
+    """A common class to hold the barebone objects to build a publisher or
+    subscriber
+    """
+    def __init__(self, log, dts, loop):
+        """Constructor
+
+        Args:
+            log : Log handle
+            dts : DTS handle
+            loop : Asyncio event loop.
+        """
+        # Reg handle
+        self.reg = None
+        self.log = log
+        self.dts = dts
+        self.loop = loop
diff --git a/common/python/rift/mano/dts/subscriber/__init__.py b/common/python/rift/mano/dts/subscriber/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/__init__.py
diff --git a/common/python/rift/mano/dts/subscriber/core.py b/common/python/rift/mano/dts/subscriber/core.py
new file mode 100644
index 0000000..dd2513e
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/core.py
@@ -0,0 +1,215 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import abc
+import collections
+import asyncio
+
+from gi.repository import (RwDts as rwdts, ProtobufC)
+import rift.tasklets
+
+from ..core import DtsHandler
+
+
+class SubscriberDtsHandler(DtsHandler):
+    """A common class for all subscribers.
+    """
+    @classmethod
+    def from_tasklet(cls, tasklet, callback=None):
+        """Convenience method to build the object from tasklet
+
+        Args:
+            tasklet (rift.tasklets.Tasklet): Tasklet
+            callback (None, optional): Callable, which will be invoked on
+                    subscriber changes.
+
+        Signature of callback:
+            Args:
+                msg: The Gi Object msg from DTS
+                action(rwdts.QueryAction): Action type
+        """
+        return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+        self.callback = callback
+
+    def get_reg_flags(self):
+        """Default set of REG flags, can be over-ridden by sub classes.
+        
+        Returns:
+            Set of rwdts.Flag types.
+        """
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+
+
+
+class AbstractOpdataSubscriber(SubscriberDtsHandler):
+    """Abstract class that simplifies the process of creating subscribers
+    for opdata.
+
+    Opdata subscriber can be created in one step by subclassing and implementing
+    the MANDATORY get_xpath() method
+    
+    """
+    @abc.abstractmethod
+    def get_xpath(self):
+        """
+        Returns:
+           str: xpath
+        """
+        pass
+
+    @asyncio.coroutine
+    def register(self):
+        """Triggers the registration
+        """
+        xacts = {}
+
+        def on_commit(xact_info):
+            xact_id = xact_info.handle.get_xact().id
+            if xact_id in xacts:
+                msg, action = xacts.pop(xact_id)
+
+                if self.callback:
+                    self.callback(msg, action)
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                # Defer all actions till the commit state.
+                xacts[xact_info.xact.id] = (msg, action)
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        reg_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready,
+                on_prepare=on_prepare,
+                on_commit=on_commit
+                )
+
+        self.reg = yield from self.dts.register(
+                xpath=self.get_xpath(),
+                flags=self.get_reg_flags(),
+                handler=handler)
+
+        # yield from reg_event.wait()
+
+        assert self.reg is not None
+
+    def deregister(self):
+        self.reg.deregister()
+
+
+class AbstractConfigSubscriber(SubscriberDtsHandler):
+    """Abstract class that simplifies the process of creating subscribers
+    for config data.
+
+    Config subscriber can be created in one step by subclassing and implementing
+    the MANDATORY get_xpath() method
+    
+    """
+    KEY = "msgs"
+
+    @abc.abstractmethod
+    def get_xpath(self):
+        pass
+
+    @abc.abstractmethod
+    def key_name(self):
+        pass
+
+    def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
+        # Unforunately, it is currently difficult to figure out what has exactly
+        # changed in this xact without Pbdelta support (RIFT-4916)
+        # As a workaround, we can fetch the pre and post xact elements and
+        # perform a comparison to figure out adds/deletes/updates
+        xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+        curr_cfgs = list(dts_member_reg.elements)
+
+        xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+        curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+        # Find Adds
+        added_keys = set(xact_key_map) - set(curr_key_map)
+        added_cfgs = [xact_key_map[key] for key in added_keys]
+
+        # Find Deletes
+        deleted_keys = set(curr_key_map) - set(xact_key_map)
+        deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+        # Find Updates
+        updated_keys = set(curr_key_map) & set(xact_key_map)
+        updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+        return added_cfgs, deleted_cfgs, updated_cfgs
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+
+
+            add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name=self.key_name())
+
+            [self.callback(cfg, rwdts.QueryAction.DELETE)
+                    for cfg in delete_cfgs if self.callback]
+
+            [self.callback(cfg, rwdts.QueryAction.CREATE)
+                    for cfg in add_cfgs if self.callback]
+
+            [self.callback(cfg, rwdts.QueryAction.UPDATE)
+                    for cfg in update_cfgs if self.callback]
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self.dts.appconf_group_create(handler=acg_hdl) as acg:
+            self.reg = acg.register(
+                xpath=self.get_xpath(),
+                flags=self.get_reg_flags(),
+                on_prepare=on_prepare)
+
+    def deregister(self):
+        self.reg.deregister()
diff --git a/common/python/rift/mano/dts/subscriber/ns_subscriber.py b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
new file mode 100644
index 0000000..1078e85
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
@@ -0,0 +1,52 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file ns_subscriber.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import gi
+gi.require_version('RwDts', '1.0')
+from gi.repository import RwDts as rwdts
+
+from . import core
+
+
+class NsrCatalogSubscriber(core.AbstractOpdataSubscriber):
+    """Nsr Listener """
+
+    def key_name(self):
+        return "ns_instance_config_ref"
+
+    def get_reg_flags(self):
+        # Hack to around DTS issue with NSR and RwNsr
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
+
+    def get_xpath(self):
+        return "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+
+class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
+    """ The network service descriptor DTS handler """
+
+    def key_name(self):
+        return "id"
+
+    def get_xpath(self):
+        return "C,/nsd:nsd-catalog/nsd:nsd"
diff --git a/common/python/rift/mano/dts/subscriber/store.py b/common/python/rift/mano/dts/subscriber/store.py
new file mode 100644
index 0000000..88cb79a
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/store.py
@@ -0,0 +1,119 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file store.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import asyncio
+import enum
+
+from gi.repository import RwDts as rwdts
+from . import core, ns_subscriber, vnf_subscriber
+
+
+class SubscriberStore(core.SubscriberDtsHandler):
+    """A convenience class that hold all the VNF and NS related config and Opdata
+    """
+    KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+
+        params = (self.log, self.dts, self.loop)
+
+        self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
+        self._nsrs = {}
+        self._nsd_sub = ns_subscriber.NsdCatalogSubscriber(*params)
+
+        self._vnfr_sub = vnf_subscriber.VnfrCatalogSubscriber(*params, callback=self.on_vnfr_change)
+        self._vnfrs = {}
+        self._vnfd_sub = vnf_subscriber.VnfdCatalogSubscriber(*params)
+
+    @property
+    def vnfd(self):
+        return list(self._vnfd_sub.reg.get_xact_elements())
+
+    @property
+    def nsd(self):
+        return list(self._nsd_sub.reg.get_xact_elements())
+
+    @property
+    def vnfr(self):
+        return list(self._vnfrs.values())
+
+    @property
+    def nsr(self):
+        return list(self._nsrs.values())
+
+    def _unwrap(self, values, id_name):
+        try:
+            return values[0]
+        except KeyError:
+            self.log.exception("Unable to find the object with the given "
+                "ID {}".format(id_name))
+
+    def get_nsr(self, nsr_id):
+        values = [nsr for nsr in self.nsr if nsr.ns_instance_config_ref == nsr_id]
+        return self._unwrap(values, nsr_id)
+
+    def get_nsd(self, nsd_id):
+        values = [nsd for nsd in self.nsd if nsd.id == nsd_id]
+        return self._unwrap(values, nsd_id)
+
+    def get_vnfr(self, vnfr_id):
+        values = [vnfr for vnfr in self.vnfr if vnfr.id == vnfr_id]
+        return self._unwrap(values, vnfr_id)
+
+    def get_vnfd(self, vnfd_id):
+        values = [vnfd for vnfd in self.vnfd if vnfd.id == vnfd_id]
+        return self._unwrap(values, vnfd_id)
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._vnfd_sub.register()
+        yield from self._nsd_sub.register()
+        yield from self._vnfr_sub.register()
+        yield from self._nsr_sub.register()
+
+    @asyncio.coroutine
+    def refresh_store(self, subsriber, store):
+        itr = yield from self.dts.query_read(subsriber.get_xpath())
+
+        store.clear()
+        for res in itr:
+            result = yield from res
+            result = result.result
+            store[getattr(result, subsriber.key_name())] = result
+
+    def on_nsr_change(self, msg, action):
+        if action == rwdts.QueryAction.DELETE:
+            if msg.ns_instance_config_ref in self._nsrs:
+                del self._nsrs[msg.ns_instance_config_ref]
+            return
+
+        self.loop.create_task(self.refresh_store(self._nsr_sub, self._nsrs))
+
+    def on_vnfr_change(self, msg, action):
+        if action == rwdts.QueryAction.DELETE:
+            if msg.id in self._vnfrs:
+                del self._vnfrs[msg.id]
+            return
+
+        self.loop.create_task(self.refresh_store(self._vnfr_sub, self._vnfrs))
diff --git a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
new file mode 100644
index 0000000..a69a00f
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
@@ -0,0 +1,241 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+import types
+import unittest
+import uuid
+
+
+import rift.test.dts
+import rift.mano.dts as store
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfdYang,
+        RwVnfrYang,
+        RwNsrYang,
+        RwNsdYang,
+        VnfrYang
+        )
+
+
+class DescriptorPublisher(object):
+    def __init__(self, log, dts, loop):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+class SubscriberStoreDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+       return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.store = store.SubscriberStore(self.log, self.dts, self.loop)
+        self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+
+    def tearDown(self):
+        super().tearDown()
+
+    @rift.test.dts.async_test
+    def test_vnfd_handler(self):
+        yield from self.store.register()
+
+        mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        mock_vnfd.id = str(uuid.uuid1())
+
+        w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+        xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfd) == 1
+        assert self.store.get_vnfd(self.store.vnfd[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_vnfd)
+        assert len(self.store.vnfd) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        assert len(self.store.vnfd) == 0
+
+    @rift.test.dts.async_test
+    def test_vnfr_handler(self):
+        yield from self.store.register()
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        mock_vnfr.id = str(uuid.uuid1())
+
+        w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+        xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 1
+        assert self.store.get_vnfr(self.store.vnfr[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_vnfr)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 0
+
+    @rift.test.dts.async_test
+    def test_nsr_handler(self):
+        yield from self.store.register()
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+        mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
+        mock_nsr.name_ref = "Foo"
+
+        w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+        xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
+        yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 1
+        assert self.store.get_nsr(self.store.nsr[0].ns_instance_config_ref) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_nsr)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 0
+
+    @rift.test.dts.async_test
+    def test_nsd_handler(self):
+        yield from self.store.register()
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        mock_nsd.id = str(uuid.uuid1())
+
+        w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
+        xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
+
+        yield from asyncio.sleep(2, loop=self.loop)
+        assert len(self.store.nsd) == 1
+        assert self.store.get_nsd(self.store.nsd[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_nsd)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsd) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsd) == 0
+
+    @rift.test.dts.async_test
+    def test_vnfr_crash(self):
+        vnf_handler = store.VnfrCatalogSubscriber(self.log, self.dts, self.loop)
+        def get_reg_flags(self):
+            from gi.repository import RwDts as rwdts
+            return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+
+        vnf_handler.get_reg_flags = types.MethodType(get_reg_flags, vnf_handler)
+
+        # publish
+        yield from vnf_handler.register()
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        mock_vnfr.id = str(uuid.uuid1())
+
+        def mon_xpath(param_id=None):
+            """ Monitoring params xpath """
+            return("D,/vnfr:vnfr-catalog" +
+                   "/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
+                   "/vnfr:monitoring-param" +
+                   ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+
+
+        w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+        xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+        mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+                "id": "1"
+            })
+        mock_vnfr.monitoring_param.append(mock_param)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
new file mode 100644
index 0000000..76a58ab
--- /dev/null
+++ b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
@@ -0,0 +1,51 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file vnfr_subscriber.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import gi
+gi.require_version('RwDts', '1.0')
+from gi.repository import RwDts as rwdts
+
+from . import core
+
+
+class VnfrCatalogSubscriber(core.AbstractOpdataSubscriber):
+    """Vnfr Listener """
+
+    def key_name(self):
+        return "id"
+
+    def get_reg_flags(self):
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
+
+    def get_xpath(self):
+        return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+
+class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
+    """VNFD Listener"""
+
+    def key_name(self):
+        return "id"
+
+    def get_xpath(self):
+        return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
diff --git a/common/python/rift/mano/ncclient.py b/common/python/rift/mano/ncclient.py
new file mode 100644
index 0000000..9b87030
--- /dev/null
+++ b/common/python/rift/mano/ncclient.py
@@ -0,0 +1,104 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+
+from gi.repository import RwYang
+class ProxyConnectionError(Exception):
+    pass
+
+
+class NcClient(object):
+    '''Class representing a Netconf Session'''
+
+    OPERATION_TIMEOUT_SECS = 240
+
+    def __init__(self, host, port, username, password, loop):
+        '''Initialize a new Netconf Session instance
+
+        Arguments:
+            host - host ip
+            port - host port
+            username - credentials for accessing the host, username
+            password - credentials for accessing the host, password
+
+        Returns:
+            A newly initialized Netconf session instance
+        '''
+        self.host = host
+        self.port = port
+        self.username = username
+        self.password = password
+        self.loop = loop
+        self._nc_mgr = None
+
+        self._model = RwYang.Model.create_libncx()
+
+    @asyncio.coroutine
+    def connect(self, timeout=240):
+        '''Connect Netconf Session
+
+        Arguments:
+            timeout - maximum time allowed before connect fails [default 30s]
+        '''
+        # logger.info("Connecting to confd (%s) SSH port (%s)", self.host, self.port)
+        if self._nc_mgr:
+            return
+
+        start_time = time.time()
+        while (time.time() - start_time) < timeout:
+            try:
+                self._nc_mgr = yield from ncclient.asyncio_manager.asyncio_connect(
+                    loop=self.loop,
+                    host=self.host,
+                    port=self.port,
+                    username=self.username,
+                    password=self.password,
+                    # Setting allow_agent and look_for_keys to false will skip public key
+                    # authentication, and use password authentication.
+                    allow_agent=False,
+                    look_for_keys=False,
+                    hostkey_verify=False)
+
+                # logger.info("Successfully connected to confd (%s) SSH port (%s)", self.host, self.port)
+                self._nc_mgr.timeout = NcClient.OPERATION_TIMEOUT_SECS
+                return
+
+            except ncclient.NCClientError as e:
+                # logger.debug("Could not connect to (%s) confd ssh port (%s): %s",
+                #         self.host, self.port, str(e))
+                pass
+
+            yield from asyncio.sleep(5, loop=self.loop)
+
+        raise ProxyConnectionError("Could not connect to Confd ({}) ssh port ({}): within the timeout {} sec.".format(
+                self.host, self.port, timeout))
+
+    def convert_to_xml(self, module, yang_obj):
+        schema =  getattr(module, "get_schema")
+        self._model.load_schema_ypbc(schema())
+
+        get_xml = getattr(yang_obj, "to_xml_v2")
+
+        return get_xml(self._model)
+
+    @property
+    def manager(self):
+        return self._nc_mgr
diff --git a/common/python/rift/mano/tosca_translator/__init__.py b/common/python/rift/mano/tosca_translator/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/common/__init__.py b/common/python/rift/mano/tosca_translator/common/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/common/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/common/exception.py b/common/python/rift/mano/tosca_translator/common/exception.py
new file mode 100644
index 0000000..554396c
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/common/exception.py
@@ -0,0 +1,51 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+'''
+Exceptions for the TOSCA Translator package.
+'''
+
+from rift.mano.tosca_translator.common.utils import _
+
+from toscaparser.common.exception import TOSCAException
+
+
+class ConfFileParseError(TOSCAException):
+    msg_fmt = _('%(message)s')
+
+
+class ConfOptionNotDefined(TOSCAException):
+    msg_fmt = _('Option %(key)s in section %(section)s '
+                'is not defined in conf file')
+
+
+class ConfSectionNotDefined(TOSCAException):
+    msg_fmt = _('Section %(section)s is not defined in conf file')
+
+
+class ToscaModImportError(TOSCAException):
+    msg_fmt = _('Unable to import module %(mod_name)s. '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class ToscaClassImportError(TOSCAException):
+    msg_fmt = _('Unable to import class %(name)s in '
+                'module %(mod_name)s. Check to see that it '
+                'exists and has no language definition errors.')
+
+
+class ToscaClassAttributeError(TOSCAException):
+    msg_fmt = _('Class attribute referenced not found. '
+                '%(message)s. Check to see that it is defined.')
diff --git a/common/python/rift/mano/tosca_translator/common/utils.py b/common/python/rift/mano/tosca_translator/common/utils.py
new file mode 100644
index 0000000..c0ed2d0
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/common/utils.py
@@ -0,0 +1,456 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+
+import gettext
+import json
+import logging
+import math
+import numbers
+import os
+import re
+import requests
+from six.moves.urllib.parse import urlparse
+import yaml
+
+from hashlib import md5
+from hashlib import sha256
+
+import toscaparser.utils.yamlparser
+
+_localedir = os.environ.get('tosca-translator'.upper() + '_LOCALEDIR')
+_t = gettext.translation('tosca-translator', localedir=_localedir,
+                         fallback=True)
+
+
+def _(msg):
+    return _t.gettext(msg)
+
+
+YAML_ORDER_PARSER = toscaparser.utils.yamlparser.simple_ordered_parse
+log = logging.getLogger('tosca-translator')
+
+# Required environment variables to create openstackclient object.
+ENV_VARIABLES = ['OS_AUTH_URL', 'OS_PASSWORD', 'OS_USERNAME', 'OS_TENANT_NAME']
+
+
+class MemoryUnit(object):
+
+    UNIT_SIZE_DEFAULT = 'B'
+    UNIT_SIZE_DICT = {'B': 1, 'kB': 1000, 'KiB': 1024, 'MB': 1000000,
+                      'MiB': 1048576, 'GB': 1000000000,
+                      'GiB': 1073741824, 'TB': 1000000000000,
+                      'TiB': 1099511627776}
+
+    @staticmethod
+    def convert_unit_size_to_num(size, unit=None):
+        """Convert given size to a number representing given unit.
+
+        If unit is None, convert to a number representing UNIT_SIZE_DEFAULT
+        :param size: unit size e.g. 1 TB
+        :param unit: unit to be converted to e.g GB
+        :return: converted number e.g. 1000 for 1 TB size and unit GB
+        """
+        if unit:
+            unit = MemoryUnit.validate_unit(unit)
+        else:
+            unit = MemoryUnit.UNIT_SIZE_DEFAULT
+            log.info(_('A memory unit is not provided for size; using the '
+                       'default unit %(default)s.') % {'default': 'B'})
+        regex = re.compile('(\d*)\s*(\w*)')
+        result = regex.match(str(size)).groups()
+        if result[1]:
+            unit_size = MemoryUnit.validate_unit(result[1])
+            converted = int(str_to_num(result[0])
+                            * MemoryUnit.UNIT_SIZE_DICT[unit_size]
+                            * math.pow(MemoryUnit.UNIT_SIZE_DICT
+                                       [unit], -1))
+            log.info(_('Given size %(size)s is converted to %(num)s '
+                       '%(unit)s.') % {'size': size,
+                     'num': converted, 'unit': unit})
+        else:
+            converted = (str_to_num(result[0]))
+        return converted
+
+    @staticmethod
+    def validate_unit(unit):
+        if unit in MemoryUnit.UNIT_SIZE_DICT.keys():
+            return unit
+        else:
+            for key in MemoryUnit.UNIT_SIZE_DICT.keys():
+                if key.upper() == unit.upper():
+                    return key
+
+            msg = _('Provided unit "{0}" is not valid. The valid units are'
+                    ' {1}').format(unit, MemoryUnit.UNIT_SIZE_DICT.keys())
+            log.error(msg)
+            raise ValueError(msg)
+
+
+class CompareUtils(object):
+
+    MISMATCH_VALUE1_LABEL = "<Expected>"
+    MISMATCH_VALUE2_LABEL = "<Provided>"
+    ORDERLESS_LIST_KEYS = ['allowed_values', 'depends_on']
+
+    @staticmethod
+    def compare_dicts(dict1, dict2):
+        """Return False if not equal, True if both are equal."""
+
+        if dict1 is None and dict2 is None:
+            return True
+        if dict1 is None or dict2 is None:
+            return False
+
+        both_equal = True
+        for dict1_item, dict2_item in zip(dict1.items(), dict2.items()):
+            if dict1_item != dict2_item:
+                msg = (_("%(label1)s: %(item1)s \n is not equal to \n:"
+                         "%(label2)s: %(item2)s")
+                       % {'label1': CompareUtils.MISMATCH_VALUE2_LABEL,
+                          'item1': dict1_item,
+                          'label2': CompareUtils.MISMATCH_VALUE1_LABEL,
+                          'item2': dict2_item})
+                log.warning(msg)
+                both_equal = False
+                break
+        return both_equal
+
+    @staticmethod
+    def compare_mano_yamls(generated_yaml, expected_yaml):
+        mano_translated_dict = YAML_ORDER_PARSER(generated_yaml)
+        mano_expected_dict = YAML_ORDER_PARSER(expected_yaml)
+        return CompareUtils.compare_dicts(mano_translated_dict,
+                                          mano_expected_dict)
+
+    @staticmethod
+    def reorder(dic):
+        '''Canonicalize list items in the dictionary for ease of comparison.
+
+        For properties whose value is a list in which the order does not
+        matter, some pre-processing is required to bring those lists into a
+        canonical format. We use sorting just to make sure such differences
+        in ordering would not cause to a mismatch.
+        '''
+
+        if type(dic) is not dict:
+            return None
+
+        reordered = {}
+        for key in dic.keys():
+            value = dic[key]
+            if type(value) is dict:
+                reordered[key] = CompareUtils.reorder(value)
+            elif type(value) is list \
+                and key in CompareUtils.ORDERLESS_LIST_KEYS:
+                reordered[key] = sorted(value)
+            else:
+                reordered[key] = value
+        return reordered
+
+    @staticmethod
+    def diff_dicts(dict1, dict2, reorder=True):
+        '''Compares two dictionaries and returns their differences.
+
+        Returns a dictionary of mismatches between the two dictionaries.
+        An empty dictionary is returned if two dictionaries are equivalent.
+        The reorder parameter indicates whether reordering is required
+        before comparison or not.
+        '''
+
+        if reorder:
+            dict1 = CompareUtils.reorder(dict1)
+            dict2 = CompareUtils.reorder(dict2)
+
+        if dict1 is None and dict2 is None:
+            return {}
+        if dict1 is None or dict2 is None:
+            return {CompareUtils.MISMATCH_VALUE1_LABEL: dict1,
+                    CompareUtils.MISMATCH_VALUE2_LABEL: dict2}
+
+        diff = {}
+        keys1 = set(dict1.keys())
+        keys2 = set(dict2.keys())
+        for key in keys1.union(keys2):
+            if key in keys1 and key not in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: dict1[key],
+                             CompareUtils.MISMATCH_VALUE2_LABEL: None}
+            elif key not in keys1 and key in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: None,
+                             CompareUtils.MISMATCH_VALUE2_LABEL: dict2[key]}
+            else:
+                val1 = dict1[key]
+                val2 = dict2[key]
+                if val1 != val2:
+                    if type(val1) is dict and type(val2) is dict:
+                        diff[key] = CompareUtils.diff_dicts(val1, val2, False)
+                    else:
+                        diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: val1,
+                                     CompareUtils.MISMATCH_VALUE2_LABEL: val2}
+        return diff
+
+
+class YamlUtils(object):
+
+    @staticmethod
+    def get_dict(yaml_file):
+        '''Returns the dictionary representation of the given YAML spec.'''
+        try:
+            return yaml.load(open(yaml_file))
+        except IOError:
+            return None
+
+    @staticmethod
+    def compare_yamls(yaml1_file, yaml2_file):
+        '''Returns true if two dictionaries are equivalent, false otherwise.'''
+        dict1 = YamlUtils.get_dict(yaml1_file)
+        dict2 = YamlUtils.get_dict(yaml2_file)
+        return CompareUtils.compare_dicts(dict1, dict2)
+
+    @staticmethod
+    def compare_yaml_dict(yaml_file, dic):
+        '''Returns true if yaml matches the dictionary, false otherwise.'''
+        return CompareUtils.compare_dicts(YamlUtils.get_dict(yaml_file), dic)
+
+
+class TranslationUtils(object):
+
+    @staticmethod
+    def compare_tosca_translation_with_mano(tosca_file, mano_file, params):
+        '''Verify tosca translation against the given mano specification.
+
+        inputs:
+        tosca_file: relative local path or URL to the tosca input file
+        mano_file: relative path to expected mano output
+        params: dictionary of parameter name value pairs
+
+        Returns as a dictionary the difference between the MANO translation
+        of the given tosca_file and the given mano_file.
+        '''
+
+        from toscaparser.tosca_template import ToscaTemplate
+        from tosca_translator.mano.tosca_translator import TOSCATranslator
+
+        tosca_tpl = os.path.normpath(os.path.join(
+            os.path.dirname(os.path.abspath(__file__)), tosca_file))
+        a_file = os.path.isfile(tosca_tpl)
+        if not a_file:
+            tosca_tpl = tosca_file
+
+        expected_mano_tpl = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)), mano_file)
+
+        tosca = ToscaTemplate(tosca_tpl, params, a_file)
+        translate = TOSCATranslator(tosca, params)
+
+        output = translate.translate()
+        output_dict = toscaparser.utils.yamlparser.simple_parse(output)
+        expected_output_dict = YamlUtils.get_dict(expected_mano_tpl)
+        return CompareUtils.diff_dicts(output_dict, expected_output_dict)
+
+
+class UrlUtils(object):
+
+    @staticmethod
+    def validate_url(path):
+        """Validates whether the given path is a URL or not.
+
+        If the given path includes a scheme (http, https, ftp, ...) and a net
+        location (a domain name such as www.github.com) it is validated as a
+        URL.
+        """
+        parsed = urlparse(path)
+        return bool(parsed.scheme) and bool(parsed.netloc)
+
+
+class ChecksumUtils(object):
+
+    @staticmethod
+    def get_md5(input_file_name, log=None):
+        chunk_size = 1048576  # 1024 B * 1024 B = 1048576 B = 1 MB
+        file_md5_checksum = md5()
+        try:
+            with open(input_file_name, "rb") as f:
+                byte = f.read(chunk_size)
+                # previous_byte = byte
+                byte_size = len(byte)
+                file_read_iterations = 1
+                while byte:
+                    file_md5_checksum.update(byte)
+                    # previous_byte = byte
+                    byte = f.read(chunk_size)
+                    byte_size += len(byte)
+                    file_read_iterations += 1
+
+            cksum = file_md5_checksum.hexdigest()
+            if log:
+                log.debug(_("MD5 for {0} with size {1} (iter:{2}): {3}").
+                          format(input_file_name, byte_size,
+                                 file_read_iterations, cksum))
+            return cksum
+        except IOError:
+            if log:
+                log.error(_('File could not be opened: {0}').
+                          format(input_file_name))
+                return
+            else:
+                raise
+        except Exception as e:
+            raise e
+
+    @staticmethod
+    def get_sha256(input_file_name, log=None):
+        chunk_size = 1048576  # 1024 B * 1024 B = 1048576 B = 1 MB
+        file_sha256_checksum = sha256()
+        try:
+            with open(input_file_name, "rb") as f:
+                byte = f.read(chunk_size)
+                # previous_byte = byte
+                byte_size = len(byte)
+                file_read_iterations = 1
+                while byte:
+                    file_sha256_checksum.update(byte)
+                    # previous_byte = byte
+                    byte = f.read(chunk_size)
+                    byte_size += len(byte)
+                    file_read_iterations += 1
+
+            cksum = file_sha256_checksum.hexdigest()
+            if log:
+                log.debug(_("SHA256 for {0} with size {1} (iter:{2}): {3}").
+                          format(input_file_name, byte_size,
+                                 file_read_iterations, cksum))
+            return cksum
+        except IOError:
+            if log:
+                log.error(_('File could not be opened: {0}').
+                          format(input_file_name))
+                return
+            else:
+                raise
+        except Exception as e:
+            raise e
+
+
+def str_to_num(value):
+    """Convert a string representation of a number into a numeric type."""
+    if isinstance(value, numbers.Number):
+        return value
+    try:
+        return int(value)
+    except ValueError:
+        return float(value)
+
+
+def check_for_env_variables():
+    return set(ENV_VARIABLES) < set(os.environ.keys())
+
+
+def get_ks_access_dict():
+    tenant_name = os.getenv('OS_TENANT_NAME')
+    username = os.getenv('OS_USERNAME')
+    password = os.getenv('OS_PASSWORD')
+    auth_url = os.getenv('OS_AUTH_URL')
+
+    auth_dict = {
+        "auth": {
+            "tenantName": tenant_name,
+            "passwordCredentials": {
+                "username": username,
+                "password": password
+            }
+        }
+    }
+    headers = {'Content-Type': 'application/json'}
+    try:
+        keystone_response = requests.post(auth_url + '/tokens',
+                                          data=json.dumps(auth_dict),
+                                          headers=headers)
+        if keystone_response.status_code != 200:
+            return None
+        return json.loads(keystone_response.content)
+    except Exception:
+        return None
+
+
+def get_url_for(access_dict, service_type):
+    if access_dict is None:
+        return None
+    service_catalog = access_dict['access']['serviceCatalog']
+    service_url = ''
+    for service in service_catalog:
+        if service['type'] == service_type:
+            service_url = service['endpoints'][0]['publicURL']
+            break
+    return service_url
+
+
+def get_token_id(access_dict):
+    if access_dict is None:
+        return None
+    return access_dict['access']['token']['id']
+
+
+def map_name_to_python(name):
+    if name == 'type':
+        return 'type_yang'
+    return name.replace('-', '_')
+
+def convert_keys_to_python(d):
+    '''Change all keys from - to _'''
+    if isinstance(d, dict):
+        dic = {}
+        for key in d.keys():
+            dic[map_name_to_python(key)] = convert_keys_to_python(d[key])
+        return dic
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_python(memb))
+        return arr
+    else:
+        return d
+
+def map_name_to_yang (name):
+    return name.replace('_', '-')
+
+def convert_keys_to_yang(d):
+    '''Change all keys from _ to -'''
+    if isinstance(d, dict):
+        dic = {}
+        for key in d.keys():
+            dic[map_name_to_python(key)] = convert_keys_to_yang(d[key])
+        return dic
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_yang(memb))
+        return arr
+    else:
+        return d
+
+
+def dict_convert_values_to_str(d):
+    '''Convert all leaf values to str'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[key] = dict_convert_values_to_str(d[key])
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(dict_convert_values_to_str(memb))
+        return arr
+    else:
+        return str(d)
diff --git a/common/python/rift/mano/tosca_translator/compare_desc.py b/common/python/rift/mano/tosca_translator/compare_desc.py
new file mode 100644
index 0000000..0886b85
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/compare_desc.py
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['json']
+    INDENT = 2
+    DIFF_KEYS = (REMOVED_ITEMS, ADDED_ITEMS, TYPE_CHANGES, VALUES_CHANGED) = \
+                ('dic_item_removed', 'dic_item_added', 'type_changes',
+                 'values_changed')
+    DIFF_MAP = {REMOVED_ITEMS: 'Items removed',
+                ADDED_ITEMS: 'Items added',
+                TYPE_CHANGES: 'Changes in types',
+                VALUES_CHANGED: 'Changes in values'}
+    # Currently considering changes in removed keys or type changes
+    # as error.
+    ERROR_ITEMS = [REMOVED_ITEMS, TYPE_CHANGES]
+
+    def main(self, log, args):
+        self.log = log
+        print("Args: {}".format(args))
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated_file) as g:
+            gen_data = g.read()
+            json_gen = json.loads(gen_data)
+            self.log.debug(_("Generated: {0}").format(json_gen))
+
+        with open(args.expected_file) as e:
+            exp_data = e.read()
+            json_exp = json.loads(exp_data)
+            self.log.debug(_("Expected: {0}").format(json_exp))
+
+        diff = DeepDiff(json_exp, json_gen)
+        self.log.debug(_("Keys in diff: {0}").format(diff.keys()))
+        self.log.info(_("Differences:\n"))
+
+        d = pprint.pformat(diff, indent=self.INDENT)
+        self.log.info("Differences:\n{0}".format(d))
+
+        if len(set(self.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated-file",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected-file",
+        required=True,
+        help="Descriptor to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='json',
+        help="File type. Default json")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if args.debug:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.ERROR)
+    log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/conf/__init__.py b/common/python/rift/mano/tosca_translator/conf/__init__.py
new file mode 100644
index 0000000..db99bc7
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/conf/__init__.py
@@ -0,0 +1,39 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Initialize the global configuration for the translator '''
+
+import os
+
+from rift.mano.tosca_translator.conf.config import ConfigProvider
+
+CONF_FILENAME = 'translator.conf'
+
+
+def init_global_conf():
+    '''Initialize the configuration provider.
+
+    Allows the configuration to be shared throughout the translator code.
+    The file used is translator.conf, and is within the conf/ directory. It
+    is a standard ini format, and is prcessed using the ConfigParser module.
+
+    '''
+    conf_path = os.path.dirname(os.path.abspath(__file__))
+    conf_file = os.path.join(conf_path, CONF_FILENAME)
+    ConfigProvider._load_config(conf_file)
+
+
+init_global_conf()
diff --git a/common/python/rift/mano/tosca_translator/conf/config.py b/common/python/rift/mano/tosca_translator/conf/config.py
new file mode 100644
index 0000000..dd80a9c
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/conf/config.py
@@ -0,0 +1,71 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Provide a global configuration for the TOSCA translator'''
+
+from six.moves import configparser
+
+import rift.mano.tosca_translator.common.exception as exception
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+class ConfigProvider(object):
+    '''Global config proxy that wraps a ConfigParser object.
+
+    Allows for class based access to config values. Should only be initialized
+    once using the corresponding translator.conf file in the conf directory.
+
+    '''
+
+    # List that captures all of the conf file sections.
+    # Append any new sections to this list.
+    _sections = ['DEFAULT']
+    _translator_config = None
+
+    @classmethod
+    def _load_config(cls, conf_file):
+        '''Private method only to be called once from the __init__ module'''
+
+        cls._translator_config = configparser.ConfigParser()
+        try:
+            cls._translator_config.read(conf_file)
+        except configparser.ParsingError:
+            msg = _('Unable to parse translator.conf file.'
+                    'Check to see that it exists in the conf directory.')
+            raise exception.ConfFileParseError(message=msg)
+
+    @classmethod
+    def get_value(cls, section, key):
+        try:
+            value = cls._translator_config.get(section, key)
+        except configparser.NoOptionError:
+            raise exception.ConfOptionNotDefined(key=key, section=section)
+        except configparser.NoSectionError:
+            raise exception.ConfSectionNotDefined(section=section)
+
+        return value
+
+    @classmethod
+    def get_all_values(cls):
+        values = []
+        for section in cls._sections:
+            try:
+                values.extend(cls._translator_config.items(section=section))
+            except configparser.NoOptionError:
+                raise exception.ConfSectionNotDefined(section=section)
+
+        return values
diff --git a/common/python/rift/mano/tosca_translator/conf/translator.conf b/common/python/rift/mano/tosca_translator/conf/translator.conf
new file mode 100644
index 0000000..95a416a
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/conf/translator.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+
+# Relative path location for custom types
+custom_types_location=rift/mano/tosca_translator/custom/rwmano
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/custom/__init__.py b/common/python/rift/mano/tosca_translator/custom/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/custom/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py b/common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py
diff --git a/common/python/rift/mano/tosca_translator/rwmano/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py
new file mode 100644
index 0000000..a065da1
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py
@@ -0,0 +1,31 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+class ManoOutput(object):
+    '''Attributes for RIFT.io MANO output section.'''
+
+    def __init__(self, log, name, value, description=None):
+        self.log = log
+        self.name = name
+        self.value = value
+        self.description = description
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.value)
+
+    def get_dict_output(self):
+        return {self.name: {'value': self.value,
+                            'description': self.description}}
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
new file mode 100644
index 0000000..aa6b83b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
@@ -0,0 +1,62 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from collections import OrderedDict
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+KEYS = (TYPE, DESCRIPTION, DEFAULT, CONSTRAINTS, HIDDEN, LABEL) = \
+       ('type', 'description', 'default', 'constraints', 'hidden', 'label')
+
+
+class ManoParameter(object):
+    '''Attributes for RIFT.io MANO parameter section.'''
+
+    def __init__(self, log, name, type, label=None, description=None,
+                 default=None, hidden=None, constraints=None):
+        self.log = log
+        self.name = name
+        self.type = type
+        self.label = label
+        self.description = description
+        self.default = default
+        self.hidden = hidden
+        self.constraints = constraints
+        log.info(_('Initialized the input parameters.'))
+
+    def __str__(self):
+        return "%s(%s,%s)" % (self.name, self.type, self.label)
+
+    # TODO(Philip): Harcoding for now, need to make this generic
+    def get_xpath(self):
+        xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+        return xpath
+
+    def get_dict_output(self):
+        param_sections = OrderedDict()
+        param_sections[TYPE] = self.type
+        if self.label:
+            param_sections[LABEL] = self.label
+        if self.description:
+            param_sections[DESCRIPTION] = self.description
+        if self.default:
+            param_sections[DEFAULT] = self.default
+        if self.hidden:
+            param_sections[HIDDEN] = self.hidden
+        if self.constraints:
+            param_sections[CONSTRAINTS] = self.constraints
+
+        return {self.name: param_sections}
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py
new file mode 100644
index 0000000..1606f7f
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py
@@ -0,0 +1,374 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+import uuid
+
+from collections import OrderedDict
+
+import six
+
+from rift.mano.tosca_translator.common.utils import _
+
+from toscaparser.common.exception import ValidationError
+from toscaparser.elements.interfaces import InterfacesDef
+from toscaparser.functions import GetInput
+
+
+SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
+            DELETION_POLICY) = \
+           ('type', 'properties', 'metadata',
+            'depends_on', 'update_policy', 'deletion_policy')
+
+
+class ManoResource(object):
+    '''Base class for TOSCA node type translation to RIFT.io MANO type.'''
+
+    def __init__(self,
+                 log,
+                 nodetemplate,
+                 name=None,
+                 type_=None,
+                 properties=None,
+                 metadata=None,
+                 artifacts=None,
+                 depends_on=None,
+                 update_policy=None,
+                 deletion_policy=None):
+        self.log = log
+        self.nodetemplate = nodetemplate
+        if name:
+            self.name = name
+        else:
+            self.name = nodetemplate.name
+        self.type_ = type_
+        self._id = None
+        self._version = None
+        self.properties = properties or {}
+        self.metadata = metadata
+        self._artifacts = artifacts
+
+        # The difference between depends_on and depends_on_nodes is
+        # that depends_on defines dependency in the context of the
+        # HOT template and it is used during the template output.
+        # Depends_on_nodes defines the direct dependency between the
+        # tosca nodes and is not used during the output of the
+        # HOT template but for internal processing only. When a tosca
+        # node depends on another node it will be always added to
+        # depends_on_nodes but not always to depends_on. For example
+        # if the source of dependency is a server, the dependency will
+        # be added as properties.get_resource and not depends_on
+        if depends_on:
+            self.depends_on = depends_on
+            self.depends_on_nodes = depends_on
+        else:
+            self.depends_on = []
+            self.depends_on_nodes = []
+        self.update_policy = update_policy
+        self.deletion_policy = deletion_policy
+        self.group_dependencies = {}
+        self.operations = {}
+        # if hide_resource is set to true, then this resource will not be
+        # generated in the output yaml.
+        self.hide_resource = False
+        log.debug(_('Translating TOSCA node %(name)s of type %(type)s') %
+                  {'name': self.name,
+                   'type': self.type_})
+
+    # Added the below property menthods to support methods that
+    # works on both toscaparser.NodeType and translator.ManoResource
+    @property
+    def type(self):
+        return self.type_
+
+    @type.setter
+    def type(self, value):
+        self.type_ = value
+
+    def get_type(self):
+        return self.type_
+
+    @property
+    def id(self):
+        if self._id is None:
+            self._id = str(uuid.uuid1())
+        return self._id
+
+    @property
+    def description(self):
+        return _("Translated from TOSCA")
+
+    @property
+    def vendor(self):
+        if self._vendor is None:
+            if self.metadata and 'vendor' in self.metadata:
+                self._vendor = self.metadata['vendor']
+            else:
+                self._vendor = "RIFT.io"
+        return self._vendor
+
+    @property
+    def version(self):
+        if self._version is None:
+            if self.metadata and 'version' in self.metadata:
+                self._version = str(self.metadata['version'])
+            else:
+                self._version = '1.0'
+        return self._version
+
+    @property
+    def artifacts(self):
+        return self._artifacts
+
+    @artifacts.setter
+    def artifacts(self, value):
+        self._artifacts = value
+
+    def __str__(self):
+        return "%s(%s)"%(self.name, self.type)
+
+    def map_tosca_name_to_mano(self, name):
+        new_name = name.replace("_", "-")
+        return new_name
+
+    def map_keys_to_mano(self, d):
+        if isinstance(d, dict):
+            for key in d.keys():
+                d[self.map_tosca_name_to_mano(key)] = \
+                                    self.map_keys_to_mano(d.pop(key))
+            return d
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.map_keys_to_mano(memb))
+            return arr
+        else:
+            return d
+
+    def validate_properties(self, properties, required=None, optional=None):
+        if not isinstance(properties, dict):
+            err_msg = _("Properties for {0}({1}) is not right type"). \
+                      format(self.name, self.type_)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+        if required:
+            # Check if the required properties are present
+            if not set(required).issubset(properties.keys()):
+                for key in required:
+                    if key not in properties:
+                        err_msg = _("Property {0} is not defined "
+                                    "for {1}({2})"). \
+                                  format(key, self.name, self.type_)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+            # Check for unknown properties
+            for key in properties.keys():
+                if (key not in required or
+                    key not in optional):
+                    self.log.warn(_("Property {0} not supported for {1}({2}), "
+                                    "will be ignored.").
+                                  format(key, self.name, self.type_))
+
+    def handle_properties(self):
+        pass
+
+    def handle_artifacts(self):
+        pass
+
+    def handle_capabilities(self):
+        pass
+
+    def handle_requirements(self, nodes):
+        pass
+
+    def handle_interfaces(self):
+        pass
+
+    def update_image_checksum(self, in_file):
+        pass
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("{0}: Not doing anything for YANG model generation").
+                       format(self))
+
+    def get_supporting_files(self, files, desc_id=None):
+        pass
+
+    def top_of_chain(self):
+        dependent = self.group_dependencies.get(self)
+        if dependent is None:
+            return self
+        else:
+            return dependent.top_of_chain()
+
+    def get_dict_output(self):
+        resource_sections = OrderedDict()
+        resource_sections[TYPE] = self.type
+        if self.properties:
+            resource_sections[PROPERTIES] = self.properties
+        if self.metadata:
+            resource_sections[MEDADATA] = self.metadata
+        if self.depends_on:
+            resource_sections[DEPENDS_ON] = []
+            for depend in self.depends_on:
+                resource_sections[DEPENDS_ON].append(depend.name)
+        if self.update_policy:
+            resource_sections[UPDATE_POLICY] = self.update_policy
+        if self.deletion_policy:
+            resource_sections[DELETION_POLICY] = self.deletion_policy
+
+        return {self.name: resource_sections}
+
+    def get_tosca_props(self):
+        tosca_props = {}
+        for prop in self.nodetemplate.get_properties_objects():
+            if isinstance(prop.value, GetInput):
+                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+            else:
+                tosca_props[prop.name] = prop.value
+        return tosca_props
+
+    def get_tosca_caps(self):
+        tosca_caps = {}
+        for cap in self.nodetemplate.get_capabilities_objects():
+            properties = cap.get_properties()
+            if len(properties):
+                tosca_caps[cap.name] = {}
+                for name in properties:
+                    tosca_caps[cap.name][name] = properties[name].value
+        return tosca_caps
+
+    def get_tosca_reqs(self):
+        tosca_reqs = []
+        for requirement in self.nodetemplate.requirements:
+            for endpoint, details in six.iteritems(requirement):
+                req = {}
+                relation = None
+                interfaces = None
+                if isinstance(details, dict):
+                    target = details.get('node')
+                    relation = details.get('relationship')
+                else:
+                    target = details
+                if (target and relation and
+                    not isinstance(relation, six.string_types)):
+                    interfaces = relation.get('interfaces')
+                req[endpoint] = {'target': target}
+                if relation:
+                    req[endpoint] = {'relation': relation}
+                if interfaces:
+                    req[endpoint] = {'interfaces': interfaces}
+            tosca_reqs.append(req)
+        return tosca_reqs
+
+    def get_property(self, args):
+        # TODO(Philip): Should figure out how to get this resolved
+        # by tosca-parser using GetProperty
+        if isinstance(args, list):
+            if len(args) == 2 and \
+               args[0] == 'SELF':
+                if args[1] in self.properties:
+                    return self.properties[args[1]]
+                else:
+                    self.log.error(_("{0}, property {} not defined").
+                                   format(self.name, args[1]))
+                    return
+        self.log.error(_("Get property for {0} of type {1} not supported").
+                       format(self.name, args))
+
+    def get_node_with_name(self, name, nodes):
+        """Get the node instance with specified name"""
+        for node in nodes:
+            if node.name == name:
+                return node
+
+    def get_nodes_related(self, target, type_, nodes):
+        """Get list of nodes related to target node"""
+        dep_nodes = []
+        for node in nodes:
+            if (node.name == target.name or
+                type_ != node.type):
+                continue
+            for rel in node.nodetemplate.related_nodes:
+                if rel.name == target.name:
+                    dep_nodes.append(node)
+                    break
+        return dep_nodes
+
+    def get_mano_attribute(self, attribute, args):
+        # this is a place holder and should be implemented by the subclass
+        # if translation is needed for the particular attribute
+        raise Exception(_("No translation in TOSCA type {0} for attribute "
+                          "{1}").format(self.nodetemplate.type, attribute))
+
+    @staticmethod
+    def _get_all_operations(node):
+        operations = {}
+        for operation in node.interfaces:
+            operations[operation.name] = operation
+
+        node_type = node.type_definition
+        if (isinstance(node_type, str) or
+            node_type.type == "tosca.policies.Placement"):
+            return operations
+
+        while True:
+            type_operations = ManoResource._get_interface_operations_from_type(
+                node_type, node, 'Standard')
+            type_operations.update(operations)
+            operations = type_operations
+
+            if node_type.parent_type is not None:
+                node_type = node_type.parent_type
+            else:
+                return operations
+
+    @staticmethod
+    def _get_interface_operations_from_type(node_type, node, lifecycle_name):
+        operations = {}
+        if (isinstance(node_type, str) or
+            node_type.type == "tosca.policies.Placement"):
+            return operations
+        if node_type.interfaces and lifecycle_name in node_type.interfaces:
+            for name, elems in node_type.interfaces[lifecycle_name].items():
+                # ignore empty operations (only type)
+                # ignore global interface inputs,
+                # concrete inputs are on the operations themselves
+                if name != 'type' and name != 'inputs':
+                    operations[name] = InterfacesDef(node_type,
+                                                     lifecycle_name,
+                                                     node, name, elems)
+        return operations
+
+    @staticmethod
+    def get_parent_type(node_type):
+        if node_type.parent_type is not None:
+            return node_type.parent_type
+        else:
+            return None
+
+    @staticmethod
+    def get_base_type(node_type):
+        parent_type = ManoResource.get_parent_type(node_type)
+        if parent_type is not None:
+            if parent_type.type.endswith('.Root'):
+                return node_type
+            else:
+                return ManoResource.get_base_type(parent_type)
+        else:
+            return node_type
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
new file mode 100644
index 0000000..d263e6f
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
@@ -0,0 +1,262 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+import uuid
+
+import yaml
+
+from rift.mano.tosca_translator.common.utils import _
+
+from rift.mano.tosca_translator.common.utils import dict_convert_values_to_str
+
+try:
+    import gi
+    gi.require_version('RwYang', '1.0')
+    gi.require_version('RwNsdYang', '1.0')
+    gi.require_version('NsdYang', '1.0')
+
+    from gi.repository import NsdYang
+    from gi.repository import RwNsdYang
+    from gi.repository import RwYang
+except ImportError:
+    pass
+except ValueError as e:
+    pass
+
+
+class ManoTemplate(object):
+    '''Container for full RIFT.io MANO template.'''
+
+    YANG_NS = (NSD, VNFD) = ('nsd', 'vnfd')
+    OUTPUT_FIELDS = (NAME, ID, YANG, FILES) = ('name', 'id', 'yang', 'files')
+
+    def __init__(self, log):
+        self.log = log
+        self.resources = []
+        self.outputs = []
+        self.parameters = []
+        self.description = "Translated from TOSCA"
+        self.metadata = None
+        self.policies = []
+        self.groups = []
+
+    def output_to_yang(self, use_gi=False, indent=4):
+        self.log.debug(_('Converting translated output to yang model.'))
+
+        nsd_cat = None
+        nsd_id = str(uuid.uuid1())
+        vnfds = []
+
+        if use_gi:
+            try:
+                nsd_cat = RwNsdYang.YangData_Nsd_NsdCatalog()
+                nsd = nsd_cat.nsd.add()
+                nsd.id = nsd_id
+                nsd.name = self.metadata['name']
+                nsd.description = self.description
+                nsd.vendor = self.metadata['vendor']
+                nsd.short_name = self.metadata['name']
+                nsd.version = self.metadata['version']
+            except Exception as e:
+                self.log.warning(_("Unable to use YANG GI to generate "
+                                   "descriptors, falling back to alternate "
+                                   "method: {}").format(e))
+                self.log.exception(e)
+                use_gi = False
+
+        if not use_gi:
+            nsd = {
+                'id': nsd_id,
+                'name': self.metadata['name'],
+                'description': self.description,
+                'vendor': self.metadata['vendor'],
+                'short-name': self.metadata['name'],
+                'version': self.metadata['version'],
+            }
+
+        for resource in self.resources:
+            # Do the vlds first
+            if resource.type == 'vld':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for resource in self.resources:
+            # Do the vnfds next
+            if resource.type == 'vnfd':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for resource in self.resources:
+            # Do the other nodes
+            if resource.type != 'vnfd' and resource.type != 'vld':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for group in self.groups:
+            group.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for policy in self.policies:
+            policy.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        # Add input params to nsd
+        if use_gi:
+            for param in self.parameters:
+                nsd.input_parameter_xpath.append(
+                 NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath=param.get_xpath(),
+                    )
+                )
+        else:
+            nsd['input-parameter-xpath'] = []
+            for param in self.parameters:
+                nsd['input-parameter-xpath'].append(
+                    {'xpath': param.get_xpath()})
+
+        # Get list of supporting files referred in template
+        # Returned format is {desc_id: [{type: type, name: filename}]}
+        # TODO (pjoseph): Currently only images and scripts are retrieved.
+        # Need to add support to get script names, charms, etc.
+        other_files = {}
+        for resource in self.resources:
+            resource.get_supporting_files(other_files)
+
+        for policy in self.policies:
+            policy.get_supporting_files(other_files, desc_id=nsd_id)
+
+        self.log.debug(_("List of other files: {}".format(other_files)))
+
+        # Do the final processing and convert each descriptor to yaml string
+        tpl = {}
+
+        # Add the NSD
+        if use_gi:
+            nsd_pf = self.get_yaml(['nsd', 'rw-nsd'], nsd_cat)
+            nsd_id = nsd_cat.nsd[0].id
+            nsd_name = nsd_cat.nsd[0].name
+        else:
+            nsd_id = nsd['id']
+            nsd_name = nsd['name']
+
+            # In case of non gi proecssing,
+            # - convert all values to string
+            # - enclose in a catalog dict
+            # - prefix all keys with nsd or vnfd
+            # - Convert to YAML string
+            nsd_pf = yaml.dump(
+                self.prefix_dict(
+                    self.add_cat(dict_convert_values_to_str(nsd),
+                                 self.NSD),
+                    self.NSD),
+                default_flow_style=False)
+
+        nsd_out = {
+            self.NAME: nsd_name,
+            self.ID: nsd_id,
+            self.YANG: nsd_pf,
+        }
+
+        if nsd_id in other_files:
+            nsd_out[self.FILES] = other_files[nsd_id]
+
+        tpl[self.NSD] = [nsd_out]
+
+        # Add the VNFDs
+        tpl[self.VNFD] = []
+
+        for vnfd in vnfds:
+            if use_gi:
+                vnfd_pf = self.get_yaml(['vnfd', 'rw-vnfd'], vnfd)
+                vnfd_id = vnfd.vnfd[0].id
+                vnfd_name = vnfd.vnfd[0].name
+
+            else:
+                vnfd_id = vnfd['id']
+                vnfd_name = vnfd['name']
+
+                # In case of non gi proecssing,
+                # - convert all values to string
+                # - enclose in a catalog dict
+                # - prefix all keys with nsd or vnfd
+                # - Convert to YAML string
+                vnfd_pf = yaml.dump(
+                    self.prefix_dict(
+                        self.add_cat(dict_convert_values_to_str(vnfd),
+                                     self.VNFD),
+                        self.VNFD),
+                    default_flow_style=False)
+
+            vnfd_out = {
+                self.NAME: vnfd_name,
+                self.ID: vnfd_id,
+                self.YANG: vnfd_pf,
+            }
+
+            if vnfd_id in other_files:
+                vnfd_out[self.FILES] = other_files[vnfd_id]
+
+            tpl[self.VNFD].append(vnfd_out)
+
+        self.log.debug(_("NSD: {0}").format(tpl[self.NSD]))
+        self.log.debug(_("VNFDs:"))
+        for vnfd in tpl[self.VNFD]:
+            self.log.debug(_("{0}").format(vnfd))
+
+        return tpl
+
+    def _get_field(self, d, pf, field='name'):
+        '''Get the name given for the descriptor'''
+        # Search within the desc for a key pf:name
+        key = pf+':'+field
+        if isinstance(d, dict):
+            # If it is a dict, search for pf:name
+            if key in d:
+                return d[key]
+            else:
+                for k, v in d.items():
+                    result = self._get_field(v, pf, field)
+                    if result:
+                        return result
+        elif isinstance(d, list):
+            for memb in d:
+                result = self._get_field(memb, pf, field)
+                if result:
+                        return result
+
+    def prefix_dict(self, d, pf):
+        '''Prefix all keys of a dict with a specific prefix:'''
+        if isinstance(d, dict):
+            dic = {}
+            for key in d.keys():
+                # Only prefix keys without any prefix
+                # so later we can do custom prefixing
+                # which will not get overwritten here
+                if ':' not in key:
+                    dic[pf+':'+key] = self.prefix_dict(d[key], pf)
+                else:
+                    dic[key] = self.prefix_dict(d[key], pf)
+            return dic
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.prefix_dict(memb, pf))
+            return arr
+        else:
+            return d
+
+    def add_cat(self, desc, pf):
+        return {pf+'-catalog': {pf: [desc]}}
+
+    def get_yaml(self, module_list, desc):
+        model = RwYang.Model.create_libncx()
+        for module in module_list:
+            model.load_module(module)
+        return desc.to_yaml(model)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py
new file mode 100755
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
new file mode 100755
index 0000000..2b244d7
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
@@ -0,0 +1,269 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+import os
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+from toscaparser.elements.scalarunit import ScalarUnit_Size
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaCompute'
+
+
+class ToscaCompute(ManoResource):
+    '''Translate TOSCA node type RIFT.io VDUs.'''
+
+    REQUIRED_PROPS = ['name', 'id', 'image', 'count', 'vm-flavor']
+    OPTIONAL_PROPS = [
+        'external-interface',
+        'image-checksum',
+        'cloud-init',
+        'cloud-init-file',]
+    IGNORE_PROPS = []
+
+    toscatype = 'tosca.nodes.Compute'
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaCompute, self).__init__(log,
+                                           nodetemplate,
+                                           type_='vdu',
+                                           metadata=metadata)
+        # List with associated port resources with this server
+        self.assoc_port_resources = []
+        self._image = None  # Image to bring up the VDU
+        self._image_cksum = None
+        self._cloud_init = None # Cloud init file
+        self._vnf = None
+        self._yang = None
+        self._id = self.name
+
+    @property
+    def image(self):
+        return self._image
+
+    @property
+    def cloud_init(self):
+        return self._cloud_init
+
+    @property
+    def vnf(self):
+        return self._vnf
+
+    @vnf.setter
+    def vnf(self, vnf):
+        if self._vnf:
+            err_msg = (_('VDU {0} already has a VNF {1} associated').
+                       format(self, self._vnf))
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+        self._vnf = vnf
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("VDU {0} tosca properties: {1}").
+                       format(self.name, tosca_props))
+        vdu_props = {}
+        for key, value in tosca_props.items():
+            if key == 'cloud_init':
+                vdu_props['cloud-init'] = value
+            elif key == 'cloud-init-file':
+                self._cloud_init = "../cloud_init/{}".format(value)
+            else:
+                vdu_props[key] = value
+
+        if 'name' not in vdu_props:
+            vdu_props['name'] = self.name
+
+        if 'id' not in vdu_props:
+            vdu_props['id'] = self.id
+
+        if 'count' not in vdu_props:
+            vdu_props['count'] = 1
+
+        self.log.debug(_("VDU {0} properties: {1}").
+                       format(self.name, vdu_props))
+        self.properties = vdu_props
+
+    def handle_capabilities(self):
+
+        def get_vm_flavor(specs):
+            vm_flavor = {}
+            if 'num_cpus' in specs:
+                vm_flavor['vcpu-count'] = specs['num_cpus']
+            else:
+                vm_flavor['vcpu-count'] = 1
+
+            if 'mem_size' in specs:
+                vm_flavor['memory-mb'] = (ScalarUnit_Size(specs['mem_size']).
+                                          get_num_from_scalar_unit('MB'))
+            else:
+                vm_flavor['memory-mb'] = 512
+
+            if 'disk_size' in specs:
+                vm_flavor['storage-gb'] = (ScalarUnit_Size(specs['disk_size']).
+                                           get_num_from_scalar_unit('GB'))
+            else:
+                vm_flavor['storage-gb'] = 4
+
+            return vm_flavor
+
+        tosca_caps = self.get_tosca_caps()
+        self.log.debug(_("VDU {0} tosca capabilites: {1}").
+                       format(self.name, tosca_caps))
+
+        if 'host' in tosca_caps:
+            self.properties['vm-flavor'] = get_vm_flavor(tosca_caps['host'])
+            self.log.debug(_("VDU {0} properties: {1}").
+                           format(self.name, self.properties))
+
+    def handle_artifacts(self):
+        if self.artifacts is None:
+            return
+        self.log.debug(_("VDU {0} tosca artifacts: {1}").
+                       format(self.name, self.artifacts))
+        arts = {}
+        for key in self.artifacts:
+            props = self.artifacts[key]
+            if isinstance(props, dict):
+                details = {}
+                for name, value in props.items():
+                    if name == 'type':
+                        prefix, type_ = value.rsplit('.', 1)
+                        if type_ == 'QCOW2':
+                            details['type'] = 'qcow2'
+                        else:
+                            err_msg = _("VDU {0}, Currently only QCOW2 images "
+                                        "are supported in artifacts ({1}:{2})"). \
+                                        format(self.name, key, value)
+                            self.log.error(err_msg)
+                            raise ValidationError(message=err_msg)
+                    elif name == 'file':
+                        details['file'] = value
+                    elif name == 'image_checksum':
+                        details['image_checksum'] = value
+                    else:
+                        self.log.warn(_("VDU {0}, unsuported attribute {1}").
+                                      format(self.name, name))
+                if len(details):
+                    arts[key] = details
+            else:
+                arts[key] = self.artifacts[key]
+
+        self.log.debug(_("VDU {0} artifacts: {1}").
+                       format(self.name, arts))
+        self.artifacts = arts
+
+    def handle_interfaces(self):
+        # Currently, we support only create operation
+        operations_deploy_sequence = ['create']
+
+        operations = ManoResource._get_all_operations(self.nodetemplate)
+
+        # use the current ManoResource for the first operation in this order
+        # Currently we only support image in create operation
+        for operation in operations.values():
+            if operation.name in operations_deploy_sequence:
+                self.operations[operation.name] = None
+                try:
+                    self.operations[operation.name] = operation.implementation
+                    for name, details in self.artifacts.items():
+                        if name == operation.implementation:
+                            self._image = details['file']
+                except KeyError as e:
+                    self.log.exception(e)
+        return None
+
+    def update_image_checksum(self, in_file):
+        # Create image checksum
+        # in_file is the TOSCA yaml file location
+        if self._image is None:
+            return
+        self.log.debug("Update image: {}".format(in_file))
+        if os.path.exists(in_file):
+            in_dir = os.path.dirname(in_file)
+            img_dir = os.path.dirname(self._image)
+            abs_dir = os.path.normpath(
+                os.path.join(in_dir, img_dir))
+            self.log.debug("Abs path: {}".format(abs_dir))
+            if os.path.isdir(abs_dir):
+                img_path = os.path.join(abs_dir,
+                                        os.path.basename(self._image))
+                self.log.debug(_("Image path: {0}").
+                               format(img_path))
+                if os.path.exists(img_path):
+                    # TODO (pjoseph): To be fixed when we can retrieve
+                    # the VNF image in Launchpad.
+                    # Check if the file is not size 0
+                    # else it is a dummy file and to be ignored
+                    if os.path.getsize(img_path) != 0:
+                        self._image_cksum = ChecksumUtils.get_md5(img_path,
+                                                                  log=self.log)
+
+    def get_mano_attribute(self, attribute, args):
+        attr = {}
+        # Convert from a TOSCA attribute for a nodetemplate to a MANO
+        # attribute for the matching resource.  Unless there is additional
+        # runtime support, this should be a one to one mapping.
+
+        # Note: We treat private and public IP  addresses equally, but
+        # this will change in the future when TOSCA starts to support
+        # multiple private/public IP addresses.
+        self.log.debug(_('Converting TOSCA attribute for a nodetemplate to a MANO \
+                  attriute.'))
+        if attribute == 'private_address' or \
+           attribute == 'public_address':
+                attr['get_attr'] = [self.name, 'networks', 'private', 0]
+
+        return attr
+
+    def _update_properties_for_model(self):
+        if self._image:
+            self.properties['image'] = os.path.basename(self._image)
+            if self._image_cksum:
+                self.properties['image-checksum'] = self._image_cksum
+
+        for key in ToscaCompute.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+    def generate_yang_submodel_gi(self, vnfd):
+        if vnfd is None:
+            return None
+        self._update_properties_for_model()
+        props = convert_keys_to_python(self.properties)
+        try:
+            vnfd.vdu.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vdu from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_submodel(self):
+        """Generate yang model for the VDU"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        self._update_properties_for_model()
+
+        vdu = self.properties
+
+        return vdu
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py
new file mode 100644
index 0000000..b1a6ca2
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py
@@ -0,0 +1,102 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaConfigPrimitives'
+
+
+class ToscaConfigPrimitives(ManoResource):
+    '''Translate TOSCA node type tosca.groups.riftio.config_primitives.'''
+
+    toscatype = 'tosca.groups.riftio.ConfigPrimitives'
+
+    def __init__(self, log, name, details, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        self.name = name
+        self.details = details
+        self.type_ = 'config-prim'
+        self.metadata = metadata
+        self.nodes = []
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes):
+        tosca_props = self.details['properties']
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+
+        members = self.details['members']
+        for member in members:
+            found = False
+            for node in nodes:
+                if member == node.name:
+                    self.nodes.append(node)
+                    found = True
+                    break
+            if not found:
+                err_msg = _("{0}: Did not find the member node {1} in "
+                            "resources list"). \
+                          format(self, node)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.primitives = tosca_props['primitives']
+
+    def get_primitive(self, primitive):
+        if primitive in self.primitives:
+            return self.primitives[primitive]
+
+    def validate_primitive(self, primitive):
+        if primitive in self.primitives:
+            return True
+        return False
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        for name, value in self.primitives.items():
+            prim = {'name': name}
+            props = convert_keys_to_python(value)
+            try:
+                prim.update(props)
+            except Exception as e:
+                err_msg = _("{0} Exception nsd config primitives {1}: {2}"). \
+                          format(self, props, e)
+                self.log.error(err_msg)
+                raise e
+            nsd.service_primitive.add().from_dict(prim)
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        nsd['service-primitive'] = []
+        for name, value in self.primitives.items():
+            prim = {'name': name}
+            prim.update(self.map_keys_to_mano(value))
+            nsd['service-primitive'].append(prim)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
new file mode 100644
index 0000000..7c03d56
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaInitialConfig'
+
+
+class ToscaInitialConfig(ManoResource):
+    '''Translate TOSCA node type tosca.policies.InitialConfigPrimitive.'''
+
+    toscatype = 'tosca.policies.riftio.InitialConfigPrimitive'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, primitive, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        for name, details in primitive.items():
+            self.name = name
+            self.details = details
+            break
+        self.type_ = 'initial-cfg'
+        self.metadata = metadata
+        self.properties = {}
+        self.scripts = []
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.details
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+        self.properties['name'] = tosca_props['name']
+        self.properties['seq'] = \
+                                tosca_props['seq']
+        self.properties['user-defined-script'] = \
+                                tosca_props['user_defined_script']
+        self.scripts.append('../scripts/{}'. \
+                            format(tosca_props['user_defined_script']))
+
+        if 'parameter' in tosca_props:
+            self.properties['parameter'] = []
+            for name, value in tosca_props['parameter'].items():
+                self.properties['parameter'].append({
+                    'name': name,
+                    'value': value,
+                })
+
+        self.log.debug(_("{0} properties: {1}").format(self, self.properties))
+
+    def get_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.initial_config_primitive.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception nsd initial config from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaInitialConfig.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.get_yang_model_gi(nsd, vnfds)
+
+        if 'initial-config-primitive' not in nsd:
+            nsd['initial-config-primitive'] = []
+        prim = {}
+        for key, value in self.properties.items():
+            prim[key] = value
+        nsd['initial-config-primitive'].append(prim)
+
+    def get_supporting_files(self, files, desc_id=None):
+        if not len(self.scripts):
+            return
+
+        if desc_id not in files:
+            files[desc_id] = []
+
+        for script in self.scripts:
+            files[desc_id].append({
+                'type': 'script',
+                'name': script,
+            },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
new file mode 100644
index 0000000..b446e51
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
@@ -0,0 +1,136 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNetwork'
+
+
+class ToscaNetwork(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.network.Network.'''
+
+    toscatype = 'tosca.nodes.network.Network'
+    NETWORK_PROPS = ['network_name', 'network_id']
+    REQUIRED_PROPS = ['name', 'id', 'type', 'version', 'short-name',
+                      'description', 'vendor']
+    OPTIONAL_PROPS = ['vnfd-connection-point-ref']
+    IGNORE_PROPS = ['ip_version', 'dhcp_enabled']
+    VALID_TYPES = ['ELAN']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNetwork, self).__init__(log,
+                                           nodetemplate,
+                                           type_='vld',
+                                           metadata=metadata)
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+
+        if 'cidr' in tosca_props.keys():
+            self.log.warn(_("Support for subnet not yet "
+                            "available. Ignoring it"))
+        net_props = {}
+        for key, value in tosca_props.items():
+            if key in self.NETWORK_PROPS:
+                if key == 'network_name':
+                    net_props['name'] = value
+                elif key == 'network_id':
+                    net_props['id'] = value
+            else:
+                net_props[key] = value
+
+        net_props['type'] = self.get_type()
+
+        if 'name' not in net_props:
+            # Use the node name as network name
+            net_props['name'] = self.name
+
+        if 'short_name' not in net_props:
+            # Use the node name as network name
+            net_props['short-name'] = self.name
+
+        if 'id' not in net_props:
+            net_props['id'] = self.id
+
+        if 'description' not in net_props:
+            net_props['description'] = self.description
+
+        if 'vendor' not in net_props:
+            net_props['vendor'] = self.vendor
+
+        if 'version' not in net_props:
+            net_props['version'] = self.version
+
+        self.log.debug(_("Network {0} properties: {1}").
+                       format(self.name, net_props))
+        self.properties = net_props
+
+    def get_type(self):
+        """Get the network type based on propery or type derived from"""
+        node = self.nodetemplate
+        tosca_props = self.get_tosca_props()
+        try:
+            if tosca_props['network_type'] in ToscaNetwork.VALID_TYPES:
+                return tosca_props['network_type']
+        except KeyError:
+            pass
+
+        node_type = node.type_definition
+
+        while node_type.type:
+            self.log.debug(_("Node name {0} with type {1}").
+                           format(self.name, node_type.type))
+            prefix, nw_type = node_type.type.rsplit('.', 1)
+            if nw_type in ToscaNetwork.VALID_TYPES:
+                return nw_type
+            else:
+                # Get the parent
+                node_type = ManoResource.get_parent_type(node_type)
+
+        return "ELAN"
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.vld.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vld from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        # Remove the props to be ignroed:
+        for key in ToscaNetwork.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        vld = self.properties
+
+        if 'vld' not in nsd:
+            nsd['vld'] = []
+        nsd['vld'].append(vld)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
new file mode 100644
index 0000000..04e3a59
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
@@ -0,0 +1,145 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNetworkPort'
+TOSCA_LINKS_TO = 'tosca.relationships.network.LinksTo'
+TOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo'
+
+
+class ToscaNetworkPort(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.network.Port.'''
+
+    toscatype = 'tosca.nodes.network.Port'
+
+    VALID_TYPES = ['VIRTIO', 'VPORT']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNetworkPort, self).__init__(log,
+                                               nodetemplate,
+                                               type_='port',
+                                               metadata=metadata)
+        # Default order
+        self.order = 0
+        pass
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("Port {0} with tosca properties: {1}").
+                       format(self.name, tosca_props))
+        port_props = {}
+        for key, value in tosca_props.items():
+            port_props[key] = value
+
+        if 'cp_type' not in port_props:
+            port_props['cp_type'] = 'VPORT'
+        else:
+            if not port_props['cp_type'] in ToscaNetworkPort.VALID_TYPES:
+                err_msg = _("Invalid port type, {0}, specified for {1}"). \
+                          format(port_props['cp_type'], self.name)
+                self.log.warn(err_msg)
+                raise ValidationError(message=err_msg)
+
+        if 'vdu_intf_type' not in port_props:
+            port_props['vdu_intf_type'] = 'VIRTIO'
+        else:
+            if not port_props['vdu_intf_type'] in ToscaNetworkPort.VALID_TYPES:
+                err_msg = _("Invalid port type, {0}, specified for {1}"). \
+                          format(port_props['vdu_intf_type'], self.name)
+                self.log.warn(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.properties = port_props
+
+    def handle_requirements(self, nodes):
+        tosca_reqs = self.get_tosca_reqs()
+        self.log.debug("VNF {0} requirements: {1}".
+                       format(self.name, tosca_reqs))
+
+        vnf = None  # Need vnf ref to generate cp refs in vld
+        vld = None
+        if len(tosca_reqs) != 2:
+            err_msg = _("Invalid configuration as incorrect number of "
+                        "requirements for CP {0} are specified"). \
+                        format(self)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+        for req in tosca_reqs:
+            if 'virtualBinding' in req:
+                target = req['virtualBinding']['target']
+                node = self.get_node_with_name(target, nodes)
+                if node:
+                    vnf = node.vnf
+                    if not vnf:
+                        err_msg = _("No vnfs linked to a VDU {0}"). \
+                                    format(node)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+                    cp = {}
+                    cp['name'] = self.properties['name']
+                    cp['type'] = self.properties['cp_type']
+                    self.log.debug(_("Connection Point entry for VNF {0}:{1}").
+                                   format(vnf, cp))
+                    if 'connection-point' not in vnf.properties:
+                        vnf.properties['connection-point'] = []
+                    vnf.properties['connection-point'].append(cp)
+                    ext_intf = {}
+                    ext_intf['name'] = self.properties['vdu_intf_name']
+                    ext_intf['virtual-interface'] = \
+                                    {'type': self.properties['vdu_intf_type']}
+                    ext_intf['vnfd-connection-point-ref'] = \
+                                    self.properties['name']
+                    if 'external-interface' not in node.properties:
+                        node.properties['external-interface'] = []
+                    node.properties['external-interface'].append(ext_intf)
+                else:
+                    err_msg = _("Connection point {0}, VDU {1} "
+                                "specified not found"). \
+                                format(self.name, target)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+            elif 'virtualLink' in req:
+                target = req['virtualLink']['target']
+                node = self.get_node_with_name(target, nodes)
+                if node:
+                    vld = node
+                else:
+                    err_msg = _("CP {0}, VL {1} specified not found"). \
+                              format(self, target)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+
+        if vnf and vld:
+            cp_ref = {}
+            cp_ref['vnfd-connection-point-ref'] = self.properties['name']
+            cp_ref['vnfd-id-ref'] = vnf.properties['id']
+            cp_ref['member-vnf-index-ref'] = \
+                            vnf._const_vnfd['member-vnf-index']
+            if 'vnfd-connection-point-ref' not in vld.properties:
+                vld.properties['vnfd-connection-point-ref'] = []
+            vld.properties['vnfd-connection-point-ref'].append(cp_ref)
+        else:
+            err_msg = _("CP {0}, VNF {1} or VL {2} not found"). \
+                      format(self, vnf, vld)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
new file mode 100644
index 0000000..3e52967
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
@@ -0,0 +1,305 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+try:
+    import gi
+    gi.require_version('RwVnfdYang', '1.0')
+
+    from gi.repository import RwVnfdYang
+except ImportError:
+    pass
+except ValueError:
+    pass
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNfvVnf'
+
+
+class ToscaNfvVnf(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.nfv.vnf.'''
+
+    toscatype = 'tosca.nodes.nfv.VNF'
+
+    REQUIRED_PROPS = ['name', 'short-name', 'id', 'short-name', 'description',
+                      'mgmt-interface']
+    OPTIONAL_PROPS = ['version', 'vendor', 'http-endpoint', 'monitoring-param',
+                      'connection-point']
+    IGNORE_PROPS = ['port']
+    TOSCA_CAPS = ['mgmt_interface', 'http_endpoint', 'monitoring_param_0',
+                  'monitoring_param_1', 'connection_point']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNfvVnf, self).__init__(log,
+                                          nodetemplate,
+                                          type_="vnfd",
+                                          metadata=metadata)
+        self._const_vnfd = {}
+        self._vnf_config = {}
+        self._vdus = []
+
+    def map_tosca_name_to_mano(self, name):
+        new_name = super().map_tosca_name_to_mano(name)
+        if new_name.startswith('monitoring-param'):
+            new_name = 'monitoring-param'
+        if new_name == 'polling-interval':
+            new_name = 'polling_interval_secs'
+        return new_name
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("VNF {0} with tosca properties: {1}").
+                       format(self.name, tosca_props))
+
+        def get_vnf_config(config):
+            vnf_config = {}
+            for key, value in config.items():
+                new_key = self.map_tosca_name_to_mano(key)
+                if isinstance(value, dict):
+                    sub_config = {}
+                    for subkey, subvalue in value.items():
+                        sub_config[self.map_tosca_name_to_mano(subkey)] = \
+                                        subvalue
+                    vnf_config[new_key] = sub_config
+                else:
+                    vnf_config[new_key] = value
+
+            if vnf_config['config-type'] != 'script':
+                err_msg = _("{}, Only script config supported "
+                             "for now: {}"). \
+                           format(self, vnf_config['config-type'])
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+            # Replace config-details with actual name (config-type)
+            if ('config-type' in vnf_config and
+                'config-details' in vnf_config):
+                vnf_config[vnf_config['config-type']] = \
+                                    vnf_config.pop('config-details')
+                vnf_config.pop('config-type')
+
+            # Update config-delay and confgig-priortiy to correct struct
+            vnf_config['config-attributes'] = {}
+            if 'config-delay' in vnf_config:
+                vnf_config['config-attributes']['config-delay'] = \
+                            vnf_config.pop('config-delay')
+            else:
+                vnf_config['config-attributes']['config-delay'] = 0
+            if 'config-priority' in vnf_config:
+                vnf_config['config-attributes']['config-priority'] = \
+                            vnf_config.pop('config-priority')
+            return vnf_config
+
+        vnf_props = {}
+        for key, value in tosca_props.items():
+            if key == 'id':
+                self._const_vnfd['member-vnf-index'] = int(value)
+                self._const_vnfd['vnfd-id-ref'] = self.id
+            elif key == 'vnf_configuration':
+                self._vnf_config = get_vnf_config(value)
+            else:
+                vnf_props[key] = value
+
+        if 'name' not in vnf_props:
+            vnf_props['name'] = self.name
+
+        if 'short-name' not in vnf_props:
+            vnf_props['short-name'] = self.name
+
+        if 'id' not in vnf_props:
+            vnf_props['id'] = self.id
+
+        if 'vendor' not in vnf_props:
+            vnf_props['vendor'] = self.vendor
+
+        if 'description' not in vnf_props:
+            vnf_props['description'] = self.description
+
+        if 'start_by_default' in vnf_props:
+            self._const_vnfd['start-by-default'] = \
+                                        vnf_props.pop('start_by_default')
+
+        self.log.debug(_("VNF {0} with constituent vnf: {1}").
+                       format(self.name, self._const_vnfd))
+        self.log.debug(_("VNF {0} with properties: {1}").
+                       format(self.name, vnf_props))
+        self.properties = vnf_props
+
+    def handle_capabilities(self):
+        tosca_caps = self.get_tosca_caps()
+        self.log.debug(_("VDU {0} tosca capabilites: {1}").
+                       format(self.name, tosca_caps))
+
+        def get_props(props):
+            properties = {}
+            for key in props.keys():
+                value = props[key]
+                if isinstance(value, dict):
+                    if 'get_property' in value:
+                        val = self.get_property(value['get_property'])
+                        value = val
+                properties[self.map_tosca_name_to_mano(key)] = value
+            return properties
+
+        for key, value in tosca_caps.items():
+            if key in ToscaNfvVnf.TOSCA_CAPS:
+                new_key = self.map_tosca_name_to_mano(key)
+                props = get_props(value)
+                if 'id' in props:
+                    props['id'] = str(props['id'])
+                if 'protocol' in props:
+                    props.pop('protocol')
+
+                # There is only one instance of mgmt interface, but others
+                # are a list
+                if key == 'mgmt_interface':
+                    self.properties[new_key] = props
+                elif key == 'http_endpoint':
+                    if new_key not in self.properties:
+                        self.properties[new_key] = []
+                    self.properties[new_key].append(props)
+                else:
+                    if new_key not in self.properties:
+                        self.properties[new_key] = []
+                    self.properties[new_key].append(props)
+
+        self.log.debug(_("VDU {0} properties: {1}").
+                       format(self.name, self.properties))
+
+    def handle_requirements(self, nodes):
+        tosca_reqs = self.get_tosca_reqs()
+        self.log.debug("VNF {0} requirements: {1}".
+                       format(self.name, tosca_reqs))
+
+        try:
+            for req in tosca_reqs:
+                if 'vdus' in req:
+                    target = req['vdus']['target']
+                    node = self.get_node_with_name(target, nodes)
+                    if node:
+                        self._vdus.append(node)
+                        node._vnf = self
+                        # Add the VDU id to mgmt-intf
+                        if 'mgmt-interface' in self.properties:
+                            self.properties['mgmt-interface']['vdu-id'] = \
+                                            node.id
+                            if 'vdu' in self.properties['mgmt-interface']:
+                                # Older yang
+                                self.properties['mgmt-interface'].pop('vdu')
+                    else:
+                        err_msg = _("VNF {0}, VDU {1} specified not found"). \
+                                  format(self.name, target)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+        except Exception as e:
+            err_msg = _("Exception getting VDUs for VNF {0}: {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            raise e
+
+        self.log.debug(_("VNF {0} properties: {1}").
+                       format(self.name, self.properties))
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        vnfd_cat = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+        vnfd = vnfd_cat.vnfd.add()
+        props = convert_keys_to_python(self.properties)
+        try:
+            vnfd.from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception updating vnfd from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+        vnfds.append(vnfd_cat)
+
+        # Update the VDU properties
+        for vdu in self._vdus:
+            vdu.generate_yang_submodel_gi(vnfd)
+
+        # Update constituent vnfd in nsd
+        try:
+            props = convert_keys_to_python(self._const_vnfd)
+            nsd.constituent_vnfd.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception constituent vnfd from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+        # Update the vnf configuration info in mgmt_interface
+        props = convert_keys_to_python(self._vnf_config)
+        try:
+            vnfd.vnf_configuration.from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vnfd mgmt intf from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaNfvVnf.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        vnfd = {}
+        vnfd.update(self.properties)
+        # Update vnf configuration on mgmt interface
+        vnfd['mgmt-interface']['vnf-configuration'] = self._vnf_config
+
+        # Update the VDU properties
+        vnfd['vdu'] = []
+        for vdu in self._vdus:
+            vnfd['vdu'].append(vdu.generate_yang_submodel())
+
+        vnfds.append(vnfd)
+
+        # Update constituent vnfd in nsd
+        if 'constituent-vnfd' not in nsd:
+            nsd['constituent-vnfd'] = []
+        nsd['constituent-vnfd'].append(self._const_vnfd)
+
+    def get_member_vnf_index(self):
+        return self._const_vnfd['member-vnf-index']
+
+    def get_supporting_files(self, files, desc_id=None):
+        files[self.id] = []
+        for vdu in self._vdus:
+            if vdu.image:
+                files[self.id].append({
+                    'type': 'image',
+                    'name': vdu.image,
+                },)
+            if vdu.cloud_init:
+                files[self.id].append({
+                    'type': 'cloud_init',
+                    'name': vdu.cloud_init,
+                },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
new file mode 100644
index 0000000..25246af
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
@@ -0,0 +1,130 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaScalingGroup'
+
+
+class ToscaScalingGroup(ManoResource):
+    '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+    toscatype = 'tosca.policies.riftio.ScalingGroup'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, policy, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        for name, details in policy.items():
+            self.name = name
+            self.details = details
+            break
+        self.type_ = 'scale-grp'
+        self.metadata = metadata
+        self.properties = {}
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.details
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+        self.properties['name'] = tosca_props['name']
+        self.properties['max-instance-count'] = \
+                                tosca_props['max_instance_count']
+        self.properties['min-instance-count'] = \
+                                tosca_props['min_instance_count']
+        self.properties['vnfd-member'] = []
+
+        def _get_node(name):
+            for node in nodes:
+                if node.name == name:
+                    return node
+
+        for member, count in tosca_props['vnfd_members'].items():
+            node = _get_node(member)
+            if node:
+                memb = {}
+                memb['member-vnf-index-ref'] = node.get_member_vnf_index()
+                memb['count'] = count
+                self.properties['vnfd-member'].append(memb)
+            else:
+                err_msg = _("{0}: Did not find the member node {1} in "
+                            "resources list"). \
+                          format(self, member)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        def _validate_action(action):
+            for group in groups:
+                if group.validate_primitive(action):
+                    return True
+            return False
+
+        self.properties['scaling-config-action'] = []
+        for action, value in tosca_props['config_actions'].items():
+            conf = {}
+            if _validate_action(value):
+                conf['trigger'] = action
+                conf['ns-config-primitive-name-ref'] = value
+                self.properties['scaling-config-action'].append(conf)
+            else:
+                err_msg = _("{0}: Did not find the action {1} in "
+                            "config primitives"). \
+                          format(self, action)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.log.debug(_("{0} properties: {1}").format(self, self.properties))
+
+    def get_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.scaling_group_descriptor.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception nsd scaling group from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaScalingGroup.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.get_yang_model_gi(nsd, vnfds)
+
+        if 'scaling-group-descriptor' not in nsd:
+            nsd['scaling-group-descriptor'] = []
+        scale = {}
+        for key, value in self.properties.items():
+            scale[key] = value
+        nsd['scaling-group-descriptor'].append(scale)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py b/common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py
new file mode 100644
index 0000000..9c70a8a
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py
@@ -0,0 +1,83 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_template import ManoTemplate
+from rift.mano.tosca_translator.rwmano.translate_inputs import TranslateInputs
+from rift.mano.tosca_translator.rwmano.translate_node_templates \
+    import TranslateNodeTemplates
+from rift.mano.tosca_translator.rwmano.translate_outputs \
+    import TranslateOutputs
+
+
+class TOSCATranslator(object):
+    '''Invokes translation methods.'''
+
+    def __init__(self, log, tosca, parsed_params, deploy=None, use_gi=False):
+        super(TOSCATranslator, self).__init__()
+        self.log = log
+        self.tosca = tosca
+        self.mano_template = ManoTemplate(log)
+        self.parsed_params = parsed_params
+        self.deploy = deploy
+        self.use_gi = use_gi
+        self.node_translator = None
+        log.info(_('Initialized parmaters for translation.'))
+
+    def translate(self):
+        self._resolve_input()
+        self.mano_template.description = self.tosca.description
+        self.mano_template.parameters = self._translate_inputs()
+        self.node_translator = TranslateNodeTemplates(self.log,
+                                                      self.tosca,
+                                                      self.mano_template)
+        self.mano_template.resources = self.node_translator.translate()
+        # TODO(Philip): Currently doing groups and policies seperately
+        # due to limitations with parser
+        self.mano_template.groups = self.node_translator.translate_groups()
+        self.mano_template.policies = self.node_translator.translate_policies()
+        self.mano_template.metadata = self.node_translator.metadata
+        # Currently we do not use outputs, so not processing them
+        # self.mano_template.outputs = self._translate_outputs()
+        return self.mano_template.output_to_yang(use_gi=self.use_gi)
+
+    def _translate_inputs(self):
+        translator = TranslateInputs(self.log,
+                                     self.tosca.inputs,
+                                     self.parsed_params,
+                                     self.deploy)
+        return translator.translate()
+
+    def _translate_outputs(self):
+        translator = TranslateOutputs(self.log,
+                                      self.tosca.outputs,
+                                      self.node_translator)
+        return translator.translate()
+
+    # check all properties for all node and ensure they are resolved
+    # to actual value
+    def _resolve_input(self):
+        for n in self.tosca.nodetemplates:
+            for node_prop in n.get_properties_objects():
+                if isinstance(node_prop.value, dict):
+                    if 'get_input' in node_prop.value:
+                        try:
+                            self.parsed_params[node_prop.value['get_input']]
+                        except Exception:
+                            msg = (_('Must specify all input values in '
+                                     'TOSCA template, missing %s.') %
+                                   node_prop.value['get_input'])
+                        self.log.error(msg)
+                        raise ValueError(msg)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py b/common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py
new file mode 100644
index 0000000..e5583d5
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py
@@ -0,0 +1,172 @@
+# STANDARD_RIFT_IO_COPYRIGHT
+
+# Modified from https://github.com/openstack/heat-translator (APL 2.0)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_parameter import ManoParameter
+
+from toscaparser.dataentity import DataEntity
+from toscaparser.elements.scalarunit import ScalarUnit_Size
+from toscaparser.parameters import Input
+from toscaparser.utils.validateutils import TOSCAVersionProperty
+
+
+INPUT_CONSTRAINTS = (CONSTRAINTS, DESCRIPTION, LENGTH, RANGE,
+                     MIN, MAX, ALLOWED_VALUES, ALLOWED_PATTERN) = \
+                    ('constraints', 'description', 'length', 'range',
+                     'min', 'max', 'allowed_values', 'allowed_pattern')
+
+TOSCA_CONSTRAINT_OPERATORS = (EQUAL, GREATER_THAN, GREATER_OR_EQUAL, LESS_THAN,
+                              LESS_OR_EQUAL, IN_RANGE, VALID_VALUES, LENGTH,
+                              MIN_LENGTH, MAX_LENGTH, PATTERN) = \
+                             ('equal', 'greater_than', 'greater_or_equal',
+                              'less_than', 'less_or_equal', 'in_range',
+                              'valid_values', 'length', 'min_length',
+                              'max_length', 'pattern')
+
+TOSCA_TO_MANO_CONSTRAINTS_ATTRS = {'equal': 'allowed_values',
+                                   'greater_than': 'range',
+                                   'greater_or_equal': 'range',
+                                   'less_than': 'range',
+                                   'less_or_equal': 'range',
+                                   'in_range': 'range',
+                                   'valid_values': 'allowed_values',
+                                   'length': 'length',
+                                   'min_length': 'length',
+                                   'max_length': 'length',
+                                   'pattern': 'allowed_pattern'}
+
+TOSCA_TO_MANO_INPUT_TYPES = {'string': 'string',
+                             'integer': 'number',
+                             'float': 'number',
+                             'boolean': 'boolean',
+                             'timestamp': 'string',
+                             'scalar-unit.size': 'number',
+                             'version': 'string',
+                             'null': 'string',
+                             'PortDef': 'number'}
+
+
+class TranslateInputs(object):
+
+    '''Translate TOSCA Inputs to RIFT MANO input Parameters.'''
+
+    def __init__(self, log, inputs, parsed_params, deploy=None):
+        self.log = log
+        self.inputs = inputs
+        self.parsed_params = parsed_params
+        self.deploy = deploy
+
+    def translate(self):
+        return self._translate_inputs()
+
+    def _translate_inputs(self):
+        mano_inputs = []
+        if 'key_name' in self.parsed_params and 'key_name' not in self.inputs:
+            name = 'key_name'
+            type = 'string'
+            default = self.parsed_params[name]
+            schema_dict = {'type': type, 'default': default}
+            input = Input(name, schema_dict)
+            self.inputs.append(input)
+
+        self.log.info(_('Translating TOSCA input type to MANO input type.'))
+        for input in self.inputs:
+            mano_default = None
+            mano_input_type = TOSCA_TO_MANO_INPUT_TYPES[input.type]
+
+            if input.name in self.parsed_params:
+                mano_default = DataEntity.validate_datatype(
+                    input.type, self.parsed_params[input.name])
+            elif input.default is not None:
+                mano_default = DataEntity.validate_datatype(input.type,
+                                                            input.default)
+            else:
+                if self.deploy:
+                    msg = _("Need to specify a value "
+                            "for input {0}.").format(input.name)
+                    self.log.error(msg)
+                    raise Exception(msg)
+            if input.type == "scalar-unit.size":
+                # Assumption here is to use this scalar-unit.size for size of
+                # cinder volume in heat templates and will be in GB.
+                # should add logic to support other types if needed.
+                input_value = mano_default
+                mano_default = (ScalarUnit_Size(mano_default).
+                                get_num_from_scalar_unit('GiB'))
+                if mano_default == 0:
+                    msg = _('Unit value should be > 0.')
+                    self.log.error(msg)
+                    raise Exception(msg)
+                elif int(mano_default) < mano_default:
+                    mano_default = int(mano_default) + 1
+                    self.log.warning(_("Cinder unit value should be in"
+                                       " multiples of GBs. So corrected"
+                                       " %(input_value)s to %(mano_default)s"
+                                       " GB.")
+                                     % {'input_value': input_value,
+                                        'mano_default': mano_default})
+            if input.type == 'version':
+                mano_default = TOSCAVersionProperty(mano_default).get_version()
+
+            mano_constraints = []
+            if input.constraints:
+                for constraint in input.constraints:
+                    if mano_default:
+                        constraint.validate(mano_default)
+                    hc, hvalue = self._translate_constraints(
+                        constraint.constraint_key, constraint.constraint_value)
+                    mano_constraints.append({hc: hvalue})
+
+            mano_inputs.append(ManoParameter(self.log,
+                                             name=input.name,
+                                             type=mano_input_type,
+                                             description=input.description,
+                                             default=mano_default,
+                                             constraints=mano_constraints))
+        return mano_inputs
+
+    def _translate_constraints(self, name, value):
+        mano_constraint = TOSCA_TO_MANO_CONSTRAINTS_ATTRS[name]
+
+        # Offset used to support less_than and greater_than.
+        # TODO(anyone):  when parser supports float, verify this works
+        offset = 1
+
+        if name == EQUAL:
+            mano_value = [value]
+        elif name == GREATER_THAN:
+            mano_value = {"min": value + offset}
+        elif name == GREATER_OR_EQUAL:
+            mano_value = {"min": value}
+        elif name == LESS_THAN:
+            mano_value = {"max": value - offset}
+        elif name == LESS_OR_EQUAL:
+            mano_value = {"max": value}
+        elif name == IN_RANGE:
+            # value is list type here
+            min_value = min(value)
+            max_value = max(value)
+            mano_value = {"min": min_value, "max": max_value}
+        elif name == LENGTH:
+            mano_value = {"min": value, "max": value}
+        elif name == MIN_LENGTH:
+            mano_value = {"min": value}
+        elif name == MAX_LENGTH:
+            mano_value = {"max": value}
+        else:
+            mano_value = value
+        return mano_constraint, mano_value
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
new file mode 100644
index 0000000..dbfaa62
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
@@ -0,0 +1,328 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import importlib
+import os
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.exception import ToscaClassAttributeError
+from rift.mano.tosca_translator.common.exception import ToscaClassImportError
+from rift.mano.tosca_translator.common.exception import ToscaModImportError
+from rift.mano.tosca_translator.conf.config import ConfigProvider as translatorConfig
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+
+class TranslateNodeTemplates(object):
+    '''Translate TOSCA NodeTemplates to RIFT.io MANO Resources.'''
+
+    ##################
+    # Module constants
+    ##################
+
+    TOSCA_TO_MANO_REQUIRES = {'container': 'server',
+                              'host': 'server',
+                              'dependency': 'depends_on',
+                              'connects': 'depends_on'}
+
+    TOSCA_TO_MANO_PROPERTIES = {'properties': 'input'}
+
+    TOSCA_TO_MANO_TYPE = None
+
+    ###########################
+    # Module utility Functions
+    # for dynamic class loading
+    ###########################
+
+    def _load_classes(log, locations, classes):
+        '''Dynamically load all the classes from the given locations.'''
+
+        for cls_path in locations:
+            # Use the absolute path of the class path
+            abs_path = os.path.dirname(os.path.abspath(__file__))
+            abs_path = abs_path.replace('rift/mano/tosca_translator/rwmano', cls_path)
+            log.debug(_("Loading classes from %s") % abs_path)
+
+            # Grab all the tosca type module files in the given path
+            mod_files = [f for f in os.listdir(abs_path) if (
+                f.endswith('.py') and
+                not f.startswith('__init__') and
+                f.startswith('tosca_'))]
+
+            # For each module, pick out the target translation class
+            for f in mod_files:
+                # NOTE: For some reason the existing code does not use
+                # the map to instantiate
+                # ToscaBlockStorageAttachment. Don't add it to the map
+                # here until the dependent code is fixed to use the
+                # map.
+                if f == 'tosca_block_storage_attachment.py':
+                    continue
+
+                # mod_name = cls_path + '/' + f.rstrip('.py')
+                # Above have an issue if the mod name ends with p or y
+                f_name, ext = f.rsplit('.', 1)
+                mod_name = cls_path + '/' + f_name
+                mod_name = mod_name.replace('/', '.')
+                try:
+                    mod = importlib.import_module(mod_name)
+                    target_name = getattr(mod, 'TARGET_CLASS_NAME')
+                    clazz = getattr(mod, target_name)
+                    classes.append(clazz)
+                except ImportError:
+                    raise ToscaModImportError(mod_name=mod_name)
+                except AttributeError:
+                    if target_name:
+                        raise ToscaClassImportError(name=target_name,
+                                                    mod_name=mod_name)
+                    else:
+                        # TARGET_CLASS_NAME is not defined in module.
+                        # Re-raise the exception
+                        raise
+
+    def _generate_type_map(log):
+        '''Generate TOSCA translation types map.
+
+        Load user defined classes from location path specified in conf file.
+        Base classes are located within the tosca directory.
+        '''
+
+        # Base types directory
+        BASE_PATH = 'rift/mano/tosca_translator/rwmano/tosca'
+
+        # Custom types directory defined in conf file
+        custom_path = translatorConfig.get_value('DEFAULT',
+                                                 'custom_types_location')
+
+        # First need to load the parent module, for example 'contrib.mano',
+        # for all of the dynamically loaded classes.
+        classes = []
+        TranslateNodeTemplates._load_classes(log,
+                                             (BASE_PATH, custom_path),
+                                             classes)
+        try:
+            types_map = {clazz.toscatype: clazz for clazz in classes}
+            log.debug(_("Type maps loaded: {}").format(types_map.keys()))
+        except AttributeError as e:
+            raise ToscaClassAttributeError(message=e.message)
+
+        return types_map
+
+    def __init__(self, log, tosca, mano_template):
+        self.log = log
+        self.tosca = tosca
+        self.nodetemplates = self.tosca.nodetemplates
+        self.mano_template = mano_template
+        # list of all MANO resources generated
+        self.mano_resources = []
+        self.mano_policies = []
+        self.mano_groups = []
+        # mapping between TOSCA nodetemplate and MANO resource
+        log.debug(_('Mapping between TOSCA nodetemplate and MANO resource.'))
+        self.mano_lookup = {}
+        self.policies = self.tosca.topology_template.policies
+        self.groups = self.tosca.topology_template.groups
+        self.metadata = {}
+
+    def translate(self):
+        if TranslateNodeTemplates.TOSCA_TO_MANO_TYPE is None:
+            TranslateNodeTemplates.TOSCA_TO_MANO_TYPE = \
+                TranslateNodeTemplates._generate_type_map(self.log)
+        # Translate metadata
+        self.translate_metadata()
+        return self._translate_nodetemplates()
+
+    def translate_metadata(self):
+        """Translate and store the metadata in instance"""
+        FIELDS_MAP = {
+            'ID': 'name',
+            'vendor': 'vendor',
+            'version': 'version',
+        }
+        metadata = {}
+        # Initialize to default values
+        metadata['name'] = 'tosca_to_mano'
+        metadata['vendor'] = 'RIFT.io'
+        metadata['version'] = '1.0'
+        if 'metadata' in self.tosca.tpl:
+            tosca_meta = self.tosca.tpl['metadata']
+            for key in FIELDS_MAP:
+                if key in tosca_meta.keys():
+                    metadata[FIELDS_MAP[key]] = str(tosca_meta[key])
+        self.log.debug(_("Metadata {0}").format(metadata))
+        self.metadata = metadata
+
+    def _recursive_handle_properties(self, resource):
+        '''Recursively handle the properties of the depends_on_nodes nodes.'''
+        # Use of hashtable (dict) here should be faster?
+        if resource in self.processed_resources:
+            return
+        self.processed_resources.append(resource)
+        for depend_on in resource.depends_on_nodes:
+            self._recursive_handle_properties(depend_on)
+
+        if resource.type == "OS::Nova::ServerGroup":
+            resource.handle_properties(self.mano_resources)
+        else:
+            resource.handle_properties()
+
+    def _get_policy_type(self, policy):
+        if isinstance(policy, dict):
+            for key, details in policy.items():
+                if 'type' in details:
+                    return details['type']
+
+    def _translate_nodetemplates(self):
+
+        self.log.debug(_('Translating the node templates.'))
+        # Copy the TOSCA graph: nodetemplate
+        tpl = self.tosca.tpl['topology_template']['node_templates']
+        for node in self.nodetemplates:
+            base_type = ManoResource.get_base_type(node.type_definition)
+            self.log.debug(_("Translate node %(name)s of type %(type)s with "
+                             "base %(base)s") %
+                           {'name': node.name,
+                            'type': node.type,
+                            'base': base_type.type})
+            mano_node = TranslateNodeTemplates. \
+                        TOSCA_TO_MANO_TYPE[base_type.type](
+                            self.log,
+                            node,
+                            metadata=self.metadata)
+            # Currently tosca-parser does not add the artifacts
+            # to the node
+            if mano_node.name in tpl:
+                tpl_node = tpl[mano_node.name]
+                self.log.debug("Check artifacts for {}".format(tpl_node))
+                if 'artifacts' in tpl_node:
+                    mano_node.artifacts = tpl_node['artifacts']
+            self.mano_resources.append(mano_node)
+            self.mano_lookup[node] = mano_node
+
+        # The parser currently do not generate the objects for groups
+        if 'groups' in self.tosca.tpl['topology_template']:
+            tpl = self.tosca.tpl['topology_template']['groups']
+            self.log.debug("Groups: {}".format(tpl))
+            for group, details in tpl.items():
+                self.log.debug(_("Translate group {}: {}").
+                               format(group, details))
+                group_type = details['type']
+                if group_type:
+                    group_node = TranslateNodeTemplates. \
+                                 TOSCA_TO_MANO_TYPE[group_type](
+                                     self.log,
+                                     group,
+                                     details,
+                                     metadata=self.metadata)
+                    self.mano_groups.append(group_node)
+
+        # The parser currently do not generate the objects for policies
+        if 'policies' in self.tosca.tpl['topology_template']:
+            tpl = self.tosca.tpl['topology_template']['policies']
+            # for policy in self.policies:
+            for policy in tpl:
+                self.log.debug(_("Translate policy {}").
+                               format(policy))
+                policy_type = self._get_policy_type(policy)
+                if policy_type:
+                    policy_node = TranslateNodeTemplates. \
+                                  TOSCA_TO_MANO_TYPE[policy_type](
+                                      self.log,
+                                      policy,
+                                      metadata=self.metadata)
+                    self.mano_policies.append(policy_node)
+
+        for node in self.mano_resources:
+            self.log.debug(_("Handle properties for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_properties()
+
+            self.log.debug(_("Handle capabilites for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_capabilities()
+
+            self.log.debug(_("Handle aritfacts for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_artifacts()
+
+            self.log.debug(_("Handle interfaces for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_interfaces()
+
+            self.log.debug(_("Update image checksum for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.update_image_checksum(self.tosca.path)
+
+        for node in self.mano_resources:
+            # Handle vnf and vdu dependencies first
+            if node.type == "vnfd":
+                try:
+                    self.log.debug(_("Handle requirements for {0} of "
+                                     "type {1}").
+                                   format(node.name, node.type_))
+                    node.handle_requirements(self.mano_resources)
+                except Exception as e:
+                    self.log.error(_("Exception for {0} in requirements {1}").
+                                   format(node.name, node.type_))
+                    self.log.exception(e)
+
+        for node in self.mano_resources:
+            # Now handle other dependencies
+            if node.type != "vnfd":
+                try:
+                    self.log.debug(_("Handle requirements for {0} of type {1}").
+                                   format(node.name, node.type_))
+                    node.handle_requirements(self.mano_resources)
+                except Exception as e:
+                    self.log.error(_("Exception for {0} in requirements {1}").
+                                   format(node.name, node.type_))
+                    self.log.exception(e)
+
+        return self.mano_resources
+
+    def translate_groups(self):
+        for group in self.mano_groups:
+            group.handle_properties(self.mano_resources)
+        return self.mano_groups
+
+    def translate_policies(self):
+        for policy in self.mano_policies:
+            policy.handle_properties(self.mano_resources, self.mano_groups)
+        return self.mano_policies
+
+    def find_mano_resource(self, name):
+        for resource in self.mano_resources:
+            if resource.name == name:
+                return resource
+
+    def _find_tosca_node(self, tosca_name):
+        for node in self.nodetemplates:
+            if node.name == tosca_name:
+                return node
+
+    def _find_mano_resource_for_tosca(self, tosca_name,
+                                      current_mano_resource=None):
+        if tosca_name == 'SELF':
+            return current_mano_resource
+        if tosca_name == 'HOST' and current_mano_resource is not None:
+            for req in current_mano_resource.nodetemplate.requirements:
+                if 'host' in req:
+                    return self._find_mano_resource_for_tosca(req['host'])
+
+        for node in self.nodetemplates:
+            if node.name == tosca_name:
+                return self.mano_lookup[node]
+
+        return None
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py b/common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py
new file mode 100644
index 0000000..d684492
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py
@@ -0,0 +1,47 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_output import ManoOutput
+
+
+class TranslateOutputs(object):
+    '''Translate TOSCA Outputs to Heat Outputs.'''
+
+    def __init__(self, log, outputs, node_translator):
+        log.debug(_('Translating TOSCA outputs to MANO outputs.'))
+        self.log = log
+        self.outputs = outputs
+        self.nodes = node_translator
+
+    def translate(self):
+        return self._translate_outputs()
+
+    def _translate_outputs(self):
+        mano_outputs = []
+        for output in self.outputs:
+            if output.value.name == 'get_attribute':
+                get_parameters = output.value.args
+                mano_target = self.nodes.find_mano_resource(get_parameters[0])
+                mano_value = mano_target.get_mano_attribute(get_parameters[1],
+                                                            get_parameters)
+                mano_outputs.append(ManoOutput(output.name,
+                                               mano_value,
+                                               output.description))
+            else:
+                mano_outputs.append(ManoOutput(output.name,
+                                               output.value,
+                                               output.description))
+        return mano_outputs
diff --git a/common/python/rift/mano/tosca_translator/shell.py b/common/python/rift/mano/tosca_translator/shell.py
new file mode 100644
index 0000000..9221c79
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/shell.py
@@ -0,0 +1,515 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import logging.config
+import os
+import shutil
+import stat
+import subprocess
+import tempfile
+import zipfile
+
+import magic
+
+import yaml
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+from rift.mano.tosca_translator.rwmano.syntax.mano_template import ManoTemplate
+from rift.mano.tosca_translator.rwmano.tosca_translator import TOSCATranslator
+
+from toscaparser.tosca_template import ToscaTemplate
+
+
+"""
+Test the tosca translation from command line as:
+#translator
+  --template-file=<path to the YAML template or CSAR>
+  --template-type=<type of template e.g. tosca>
+  --parameters="purpose=test"
+  --output_dir=<output directory>
+  --archive
+  --validate_only
+Takes following user arguments,
+. Path to the file that needs to be translated (required)
+. Input parameters (optional)
+. Write to output files in a dir (optional), else print on screen
+. Create archive or not
+
+In order to use translator to only validate template,
+without actual translation, pass --validate-only along with
+other required arguments.
+
+"""
+
+
+class ToscaShellError(Exception):
+    pass
+
+
+class ToscaEntryFileError(ToscaShellError):
+    pass
+
+
+class ToscaNoEntryDefinitionError(ToscaShellError):
+    pass
+
+
+class ToscaEntryFileNotFoundError(ToscaShellError):
+    pass
+
+
+class ToscaCreateArchiveError(ToscaShellError):
+    pass
+
+
+class TranslatorShell(object):
+
+    SUPPORTED_TYPES = ['tosca']
+    COPY_DIRS = ['images']
+    SUPPORTED_INPUTS = (YAML, ZIP) = ('yaml', 'zip')
+
+    def __init__(self, log=None):
+        self.log = log
+
+    def main(self, raw_args=None):
+        args = self._parse_args(raw_args)
+
+        if self.log is None:
+            if args.debug:
+                logging.basicConfig(level=logging.DEBUG)
+            else:
+                logging.basicConfig(level=logging.ERROR)
+            self.log = logging.getLogger("tosca-translator")
+
+        self.template_file = args.template_file
+
+        parsed_params = {}
+        if args.parameters:
+            parsed_params = self._parse_parameters(args.parameters)
+
+        self.archive = False
+        if args.archive:
+            self.archive = True
+
+        self.tmpdir = None
+
+        if args.validate_only:
+            a_file = os.path.isfile(args.template_file)
+            tpl = ToscaTemplate(self.template_file, parsed_params, a_file)
+            self.log.debug(_('Template = {}').format(tpl.__dict__))
+            msg = (_('The input {} successfully passed ' \
+                     'validation.').format(self.template_file))
+            print(msg)
+        else:
+            self.use_gi = not args.no_gi
+            tpl = self._translate("tosca", parsed_params)
+            if tpl:
+                return self._write_output(tpl, args.output_dir)
+
+    def translate(self,
+                  template_file,
+                  output_dir=None,
+                  use_gi=True,
+                  archive=False,):
+        self.template_file = template_file
+
+        # Check the input file
+        path = os.path.abspath(template_file)
+        self.in_file = path
+        a_file = os.path.isfile(path)
+        if not a_file:
+            msg = _("The path {0} is not a valid file.").format(template_file)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+        # Get the file type
+        self.ftype = self._get_file_type()
+        self.log.debug(_("Input file {0} is of type {1}").
+                       format(path, self.ftype))
+
+        self.archive = archive
+
+        self.tmpdir = None
+
+        self.use_gi = use_gi
+
+        tpl = self._translate("tosca", {})
+        if tpl:
+            return self._write_output(tpl, output_dir)
+
+    def _parse_args(self, raw_args=None):
+        parser = argparse.ArgumentParser(
+            description='RIFT TOSCA translator for descriptors')
+
+        parser.add_argument(
+            "-f",
+            "--template-file",
+            required=True,
+            help="Template file to translate")
+
+        parser.add_argument(
+            "-o",
+            "--output-dir",
+            help="Directory to output")
+
+        parser.add_argument(
+            "-p", "--parameters",
+            help="Input parameters")
+
+        parser.add_argument(
+            "-a", "--archive",
+            action="store_true",
+            help="Archive the translated files")
+
+        parser.add_argument(
+            "--no-gi",
+            help="Do not use the YANG GI to generate descriptors",
+            action="store_true")
+
+        parser.add_argument(
+            "--validate-only",
+            help="Validate template, no translation",
+            action="store_true")
+
+        parser.add_argument(
+            "--debug",
+            help="Enable debug logging",
+            action="store_true")
+
+        if raw_args:
+            args = parser.parse_args(raw_args)
+        else:
+            args = parser.parse_args()
+        return args
+
+    def _parse_parameters(self, parameter_list):
+        parsed_inputs = {}
+        if parameter_list:
+            # Parameters are semi-colon separated
+            inputs = parameter_list.replace('"', '').split(';')
+            # Each parameter should be an assignment
+            for param in inputs:
+                keyvalue = param.split('=')
+                # Validate the parameter has both a name and value
+                msg = _("'%(param)s' is not a well-formed parameter.") % {
+                    'param': param}
+                if keyvalue.__len__() is 2:
+                    # Assure parameter name is not zero-length or whitespace
+                    stripped_name = keyvalue[0].strip()
+                    if not stripped_name:
+                        self.log.error(msg)
+                        raise ValueError(msg)
+                    # Add the valid parameter to the dictionary
+                    parsed_inputs[keyvalue[0]] = keyvalue[1]
+                else:
+                    self.log.error(msg)
+                    raise ValueError(msg)
+        return parsed_inputs
+
+    def get_entry_file(self):
+        # Extract the archive and get the entry file
+        if self.ftype == self.YAML:
+            return self.in_file
+
+        self.prefix = ''
+        if self.ftype == self.ZIP:
+            self.tmpdir = tempfile.mkdtemp()
+            prevdir = os.getcwd()
+            try:
+                with zipfile.ZipFile(self.in_file) as zf:
+                    self.prefix = os.path.commonprefix(zf.namelist())
+                    self.log.debug(_("Zipfile prefix is {0}").
+                                   format(self.prefix))
+                    zf.extractall(self.tmpdir)
+
+                    # Set the execute bits on scripts as zipfile
+                    # does not restore the permissions bits
+                    os.chdir(self.tmpdir)
+                    for fname in zf.namelist():
+                        if (fname.startswith('scripts/') and
+                            os.path.isfile(fname)):
+                            # Assume this is a script file
+                            # Give all permissions to owner and read+execute
+                            # for group and others
+                            os.chmod(fname,
+                                     stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH)
+
+                # TODO (pjoseph): Use the below code instead of extract all
+                # once unzip is installed on launchpad VMs
+                # zfile = os.path.abspath(self.in_file)
+                # os.chdir(self.tmpdir)
+                # zip_cmd = "unzip {}".format(zfile)
+                # subprocess.check_call(zip_cmd,
+                #                       #stdout=subprocess.PIPE,
+                #                       #stderr=subprocess.PIPE,
+                #                       shell=True,)
+
+            except Exception as e:
+                msg = _("Exception extracting input file {0}: {1}"). \
+                      format(self.in_file, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                os.chdir(prevdir)
+                shutil.rmtree(self.tmpdir)
+                self.tmpdir = None
+                raise ToscaEntryFileError(msg)
+
+        os.chdir(self.tmpdir)
+
+        try:
+            # Goto the TOSAC Metadata file
+            prefix_dir = os.path.join(self.tmpdir, self.prefix)
+            meta_file = os.path.join(prefix_dir, 'TOSCA-Metadata',
+                                     'TOSCA.meta')
+            self.log.debug(_("Checking metadata file {0}").format(meta_file))
+            if not os.path.exists(meta_file):
+                self.log.error(_("Not able to find metadata file in archive"))
+                return
+
+            # Open the metadata file and get the entry file
+            with open(meta_file, 'r') as f:
+                meta = yaml.load(f)
+
+                if 'Entry-Definitions' in meta:
+                    entry_file = os.path.join(prefix_dir,
+                                              meta['Entry-Definitions'])
+                    if os.path.exists(entry_file):
+                        self.log.debug(_("TOSCA entry file is {0}").
+                                       format(entry_file))
+                        return entry_file
+
+                    else:
+                        msg = _("Unable to get the entry file: {0}"). \
+                              format(entry_file)
+                        self.log.error(msg)
+                        raise ToscaEntryFileNotFoundError(msg)
+
+                else:
+                    msg = _("Did not find entry definition " \
+                            "in metadata: {0}").format(meta)
+                    self.log.error(msg)
+                    raise ToscaNoEntryDefinitionError(msg)
+
+        except Exception as e:
+            msg = _('Exception parsing metadata file {0}: {1}'). \
+                  format(meta_file, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise ToscaEntryFileError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+    def _translate(self, sourcetype, parsed_params):
+        output = None
+
+        # Check the input file
+        path = os.path.abspath(self.template_file)
+        self.in_file = path
+        a_file = os.path.isfile(path)
+        if not a_file:
+            msg = _("The path {} is not a valid file."). \
+                  format(self.template_file)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+        # Get the file type
+        self.ftype = self._get_file_type()
+        self.log.debug(_("Input file {0} is of type {1}").
+                       format(path, self.ftype))
+
+        if sourcetype == "tosca":
+            entry_file = self.get_entry_file()
+            if entry_file:
+                self.log.debug(_('Loading the tosca template.'))
+                tosca = ToscaTemplate(entry_file, parsed_params, True)
+                self.log.debug(_('TOSCA Template: {}').format(tosca.__dict__))
+                translator = TOSCATranslator(self.log, tosca, parsed_params,
+                                         use_gi=self.use_gi)
+                self.log.debug(_('Translating the tosca template.'))
+                output = translator.translate()
+        return output
+
+    def _copy_supporting_files(self, output_dir, files):
+        # Copy supporting files, if present in archive
+        if self.tmpdir:
+            # The files are refered relative to the definitions directory
+            arc_dir = os.path.join(self.tmpdir,
+                                   self.prefix,
+                                   'Definitions')
+            prevdir = os.getcwd()
+            try:
+                os.chdir(arc_dir)
+                for fn in files:
+                    fname = fn['name']
+                    fpath = os.path.abspath(fname)
+                    ty = fn['type']
+                    if ty == 'image':
+                        dest = os.path.join(output_dir, 'images')
+                    elif ty == 'script':
+                        dest = os.path.join(output_dir, 'scripts')
+                    elif ty == 'cloud_init':
+                        dest = os.path.join(output_dir, 'cloud_init')
+                    else:
+                        self.log.warn(_("Unknown file type {0} for {1}").
+                                      format(ty, fname))
+                        continue
+
+                    self.log.debug(_("File type {0} copy from {1} to {2}").
+                                   format(ty, fpath, dest))
+                    if os.path.exists(fpath):
+                        # Copy the files to the appropriate dir
+                        self.log.debug(_("Copy file(s) {0} to {1}").
+                                         format(fpath, dest))
+                        if os.path.isdir(fpath):
+                            # Copy directory structure like charm dir
+                            shutil.copytree(fpath, dest)
+                        else:
+                            # Copy a single file
+                            os.makedirs(dest, exist_ok=True)
+                            shutil.copy2(fpath, dest)
+
+                    else:
+                        self.log.warn(_("Could not find file {0} at {1}").
+                                      format(fname, fpath))
+
+            except Exception as e:
+                self.log.error(_("Exception copying files {0}: {1}").
+                               format(arc_dir, e))
+                self.log.exception(e)
+
+            finally:
+                os.chdir(prevdir)
+
+    def _create_checksum_file(self, output_dir):
+        # Create checkum for all files
+        flist = {}
+        for root, dirs, files in os.walk(output_dir):
+            rel_dir = root.replace(output_dir, '').lstrip('/')
+
+            for f in files:
+                fpath = os.path.join(root, f)
+                # TODO (pjoseph): To be fixed when we can
+                # retrieve image files from Launchpad
+                if os.path.getsize(fpath) != 0:
+                    flist[os.path.join(rel_dir, f)] = \
+                                                ChecksumUtils.get_md5(fpath)
+                    self.log.debug(_("Files in output_dir: {}").format(flist))
+
+                chksumfile = os.path.join(output_dir, 'checksums.txt')
+                with open(chksumfile, 'w') as c:
+                    for key in sorted(flist.keys()):
+                        c.write("{}  {}\n".format(flist[key], key))
+
+    def _create_archive(self, desc_id, output_dir):
+        """Create a tar.gz archive for the descriptor"""
+        aname = desc_id + '.tar.gz'
+        apath = os.path.join(output_dir, aname)
+        self.log.debug(_("Generating archive: {}").format(apath))
+
+        prevdir = os.getcwd()
+        os.chdir(output_dir)
+
+        # Generate the archive
+        tar_cmd = "tar zcvf {} {}".format(apath, desc_id)
+        self.log.debug(_("Generate archive: {}").format(tar_cmd))
+
+        try:
+            subprocess.check_call(tar_cmd,
+                                  stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE,
+                                  shell=True)
+            return apath
+
+        except subprocess.CalledProcessError as e:
+            msg = _("Error creating archive with {}: {}"). \
+                           format(tar_cmd, e)
+            self.log.error(msg)
+            raise ToscaCreateArchiveError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+    def _write_output(self, output, output_dir=None):
+        out_files = []
+
+        if output_dir:
+            output_dir = os.path.abspath(output_dir)
+
+        if output:
+            # Do the VNFDs first and then NSDs as later when
+            # loading in launchpad, VNFDs need to be loaded first
+            for key in [ManoTemplate.VNFD, ManoTemplate.NSD]:
+                for desc in output[key]:
+                    if output_dir:
+                        desc_id = desc[ManoTemplate.ID]
+                        # Create separate directories for each descriptors
+                        # Use the descriptor id to avoid name clash
+                        subdir = os.path.join(output_dir, desc_id)
+                        os.makedirs(subdir)
+
+                        output_file = os.path.join(subdir,
+                                            desc[ManoTemplate.NAME]+'.yml')
+                        self.log.debug(_("Writing file {0}").
+                                       format(output_file))
+                        with open(output_file, 'w+') as f:
+                            f.write(desc[ManoTemplate.YANG])
+
+                        if ManoTemplate.FILES in desc:
+                            self._copy_supporting_files(subdir,
+                                                desc[ManoTemplate.FILES])
+
+                        if self.archive:
+                            # Create checksum file
+                            self._create_checksum_file(subdir)
+                            out_files.append(self._create_archive(desc_id,
+                                                                  output_dir))
+                            # Remove the desc directory
+                            shutil.rmtree(subdir)
+                    else:
+                        print(_("Descriptor {0}:\n{1}").
+                              format(desc[ManoTemplate.NAME],
+                                     desc[ManoTemplate.YANG]))
+
+            if output_dir and self.archive:
+                # Return the list of archive files
+                return out_files
+
+    def _get_file_type(self):
+        m = magic.open(magic.MAGIC_MIME)
+        m.load()
+        typ = m.file(self.in_file)
+        if typ.startswith('text/plain'):
+            # Assume to be yaml
+            return self.YAML
+        elif typ.startswith('application/zip'):
+            return self.ZIP
+        else:
+            msg = _("The file {0} is not a supported type: {1}"). \
+                  format(self.in_file, typ)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None, log=None):
+    TranslatorShell(log=log).main(raw_args=args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip
new file mode 100644
index 0000000..e91aecd
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip
Binary files differ
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
new file mode 100644
index 0000000..9a68023
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
@@ -0,0 +1,390 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: Toy NS
+metadata:
+  ID: ping_pong_nsd
+  vendor: RIFT.io
+  version: 1.0
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_delay:
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: no
+        type: integer
+      config_details:
+        type: map
+      config_priority:
+        constraints:
+        - greater_than: 0
+        type: integer
+      config_template:
+        required: no
+        type: string
+      config_type:
+        type: string
+capability_types:
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+      vdu:
+        type: string
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      path:
+        type: string
+      polling_interval:
+        type: integer
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      description:
+        type: string
+      group_tag:
+        default: Group1
+        type: string
+      http_endpoint_ref:
+        type: string
+      id:
+        type: integer
+      json_query_method:
+        default: NAMEKEY
+        type: string
+      name:
+        type: string
+      units:
+        type: string
+      value_type:
+        default: INT
+        type: string
+      widget_type:
+        default: COUNTER
+        type: string
+node_types:
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+      name:
+        description: Name of the connection point
+        type: string
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: Type of the interface on VDU
+        type: string
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      port:
+        constraints:
+        - in_range: [1, 65535]
+        type: integer
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+      start_by_default:
+        type: boolean
+        default: true
+    capabilities:
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+    - vdus:
+        node: tosca.nodes.riftio.VDU1
+        occurences: [1, UNBOUND]
+        relationship: tosca.relationships.nfv.VirtualLinksTo
+        type: tosca.capabilities.nfv.VirtualLinkable
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      cloud_init:
+        default: #cloud-config
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.policies.Root
+    properties:
+      primitive: map
+policy_types:
+  tosca.policies.riftio.InitialConfigPrimitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      parameter:
+        type: map
+      seq:
+        type: integer
+      user_defined_script:
+        type: string
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      config_actions:
+        type: map
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      name:
+        type: string
+      vnfd_members:
+        type: map
+topology_template:
+  policies:
+  - scaling_group_descriptor:
+      config_actions:
+        post_scale_out: ping config
+      max_instance_count: 10
+      min_instance_count: 1
+      name: ping_group
+      type: tosca.policies.riftio.ScalingGroup
+      vnfd_members:
+        ping_vnfd: 1
+  - initial_config_primitive:
+      name: start traffic
+      seq: 1
+      type: tosca.policies.riftio.InitialConfigPrimitive
+      user_defined_script: start_traffic.py
+  groups:
+    config_primitive:
+      type: tosca.groups.riftio.ConfigPrimitives
+      members:
+      - ping_vnfd
+      - pong_vnfd
+      properties:
+        primitives:
+          ping config:
+            user_defined_script: ping_config.py
+  inputs:
+    vendor:
+      type: string
+      description: Translated from YANG
+  node_templates:
+    ping_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 1
+        port: 18888
+        start_by_default: false
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 0
+          config_details:
+            script_type: bash
+          config_priority: 2
+          config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
+            ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
+            \ to configure VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server info for\
+            \ ping!\"\n    exit $rc\nfi\n\ncurl -D /dev/stdout \\\n    -H \"Accept:\
+            \ application/vnd.yang.data+xml\" \\\n    -H \"Content-Type: application/vnd.yang.data+json\"\
+            \ \\\n    -X POST \\\n    -d \"{\\\"rate\\\":$ping_rate}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set ping rate!\"\n\
+            \    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/ping/stats
+            polling_interval: 2
+            port: 18888
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/ping/stats
+              port: 18888
+            port: 18888
+            protocol: tcp
+            vdu: ping_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: ping_vnfd_iovdu_0
+    pong_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, pong.service ]\n  - [ systemctl, start, --no-block, pong.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        pong_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: pong_vnfd_iovdu_0_vm_image
+    pong_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: pong_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: pong_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    ping_pong_vld:
+      type: tosca.nodes.riftio.VL1
+      properties:
+        description: Toy VL
+        vendor: RIFT.io
+    ping_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: ping_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: ping_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    pong_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 2
+        port: 18889
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 60
+          config_details:
+            script_type: bash
+          config_priority: 1
+          config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
+            pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
+            \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
+            \ VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server(own) info\
+            \ for pong!\"\n    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/pong/stats
+            polling_interval: 2
+            port: 18889
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/pong/stats
+              port: 18889
+            port: 18889
+            protocol: tcp
+            vdu: pong_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: pong_vnfd_iovdu_0
+    ping_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, ping.service ]\n  - [ systemctl, start, --no-block, ping.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        ping_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: ping_vnfd_iovdu_0_vm_image
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml
new file mode 100644
index 0000000..494a16d
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml
@@ -0,0 +1,156 @@
+tosca_definitions_version:      tosca_simple_profile_for_nfv_1_0_0
+#tosca_default_namespace :    # Optional. default namespace (schema, types version)
+
+description: Define RIFT.io custom types
+
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_type:
+        type: string
+      config_delay:
+        type: integer
+        default: 0
+        required: no
+        constraints:
+          - greater_or_equal: 0
+      config_priority:
+        type: integer
+        constraints:
+          - greater_than: 0
+      config_details:
+        type: map
+      config_template:
+        type: string
+        required: no
+
+capability_types:
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      polling_interval:
+        type: integer
+        #type: scalar_unit.time
+      path:
+        type: string
+
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      id:
+        type: integer
+      name:
+        type: string
+      value_type:
+        type: string
+        default: INT
+      group_tag:
+        type: string
+        default: Group1
+      units:
+        type: string
+      description:
+        type: string
+      json_query_method:
+        type: string
+        default: NAMEKEY
+      http_endpoint_ref:
+        type: string
+      widget_type:
+        type: string
+        default: COUNTER
+
+node_types:
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      #vdu_ref:
+      #  type: list
+      #  description: VDUs this VNF references
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+      port:
+        type: integer
+        constraints:
+          - in_range: [1, 65535]
+    capabilities:
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      # Have not figured out how to do a list for capabilities
+      # If you specify multiple cpabilites of same type the
+      # last one is only available in parser
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+      - vdus:
+          type: tosca.capabilities.nfv.VirtualLinkable
+          relationship: tosca.relationships.nfv.VirtualLinksTo
+          node: tosca.nodes.riftio.VDU1
+          occurrences: [ 1, UNBOUND ]
+
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      count:
+        type: integer
+        default: 1
+      cloud_init :
+        type: string
+        default: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ ifup, eth1 ]\n"
+    capabilities:
+      virtual_linkable:
+        type: tosca.capabilities.nfv.VirtualLinkable
+
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      name:
+        type: string
+        description: Name of the connection point
+      cp_type:
+        type: string
+        description: Type of connection point
+      vdu_intf_name:
+        type: string
+        description: Name of interface on VDU
+      vdu_intf_type:
+        type: string
+        description: Type of interface on VDU
+
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.groups.Root
+    properties:
+      primitive:
+        type: map
+
+policy_types:
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      vnfd_members:
+        type: map
+      config_actions:
+        type: map
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta
new file mode 100644
index 0000000..2351efd
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta
@@ -0,0 +1,4 @@
+TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/ping_pong_nsd.yaml
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2 b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2 b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README
new file mode 100644
index 0000000..16356a0
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README
@@ -0,0 +1 @@
+Dummy images for unit testing
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml
new file mode 100644
index 0000000..5b913ff
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+topology_template:
+  node_templates:
+    my_server:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 2
+           disk_size: 10 GB
+           mem_size: 512 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: Linux
+            distribution: RHEL
+            version: 6.5
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml
new file mode 100644
index 0000000..ea60733
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml
@@ -0,0 +1,23 @@
+tosca_definitions: tosca_simple_yaml_1_0
+
+description: Template with invalid version and topology_template section.
+
+topology_template:
+  node_temp:
+    my_server:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 2
+           disk_size: 10 GB
+           mem_size: 512 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: Linux
+            distribution: RHEL
+            version: 6.5
diff --git a/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py b/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
new file mode 100755
index 0000000..1b5b156
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Unittest for TOSCA tranlator to RIFT.io YANG model
+'''
+
+import argparse
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import xmlrunner
+
+import unittest
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+from rift.mano.tosca_translator.common.utils import _
+import rift.mano.tosca_translator.shell as shell
+
+from rift.mano.utils.compare_desc import CompareDescShell
+
+from rift.package import convert
+
+from toscaparser.common.exception import TOSCAException
+
+
+_TRUE_VALUES = ('True', 'true', '1', 'yes')
+
+
+class PingPongDescriptors(object):
+    def __init__(self):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                )
+        self.ping_pong_nsd = nsd.descriptor.nsd[0]
+        self.ping_vnfd = ping_vnfd.descriptor.vnfd[0]
+        self.pong_vnfd = pong_vnfd.descriptor.vnfd[0]
+
+class TestToscaTranslator(unittest.TestCase):
+
+    tosca_helloworld = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        "data/tosca_helloworld.yaml")
+    template_file = '--template-file=' + tosca_helloworld
+    template_validation = "--validate-only"
+    debug="--debug"
+    failure_msg = _('The program raised an exception unexpectedly.')
+
+    log_level = logging.WARN
+    log = None
+
+    exp_descs = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  " \
+                "(%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('tosca-translator-ut')
+        cls.log.addHandler(stderr_handler)
+        cls.exp_descs = PingPongDescriptors()
+
+    def test_missing_arg(self):
+       self.assertRaises(SystemExit, shell.main, '')
+
+    def test_invalid_file_arg(self):
+        self.assertRaises(SystemExit, shell.main, 'translate me')
+
+    def test_invalid_file_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          ('--template-file=template.txt'))
+
+    def test_invalid_type_value(self):
+        self.assertRaises(SystemExit, shell.main,
+                          (self.template_file, '--template-type=xyz'))
+
+    def test_invalid_parameters(self):
+        self.assertRaises(ValueError, shell.main,
+                          (self.template_file,
+                           '--parameters=key'))
+
+    def test_valid_template(self):
+        try:
+            shell.main([self.template_file])
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(self.failure_msg)
+
+    def test_validate_only(self):
+        try:
+            shell.main([self.template_file,
+                        self.template_validation])
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(self.failure_msg)
+
+        template = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            "data/tosca_helloworld_invalid.yaml")
+        invalid_template = '--template-file=' + template
+        self.assertRaises(TOSCAException, shell.main,
+                          [invalid_template,
+                           self.template_validation])
+
+    def compare_dict(self, gen_d, exp_d):
+        gen = "--generated="+str(gen_d)
+        exp = "--expected="+str(exp_d)
+        CompareDescShell.compare_dicts(gen, exp, log=self.log)
+
+    def check_output(self, out_dir, archive=False):
+        prev_dir = os.getcwd()
+        os.chdir(out_dir)
+        # Check the archives or directories are present
+        dirs = os.listdir(out_dir)
+        # The desc dirs are using uuid, so cannot match name
+        # Check there are 3 dirs or files
+        self.assertTrue(len(dirs) >= 3)
+
+        try:
+            count = 0
+            for a in dirs:
+                desc = None
+                if archive:
+                    if os.path.isfile(a):
+                        self.log.debug("Checking archive: {}".format(a))
+                        with tarfile.open(a, 'r') as t:
+                            for m in t.getnames():
+                                if m.endswith('.yaml')  or m.endswith('.yml'):
+                                    # Descriptor file
+                                    t.extract(m)
+                                    self.log.debug("Extracted file: {}".format(m))
+                                    desc = m
+                                    break
+                    else:
+                        continue
+
+                else:
+                    if os.path.isdir(a):
+                        self.log.debug("Checking directory: {}".format(a))
+                        for m in os.listdir(a):
+                            if m.endswith('.yaml')  or m.endswith('.yml'):
+                                desc = os.path.join(a, m)
+                                break
+
+                if desc:
+                    self.log.debug("Checking descriptor: {}".format(desc))
+                    with open(desc, 'r') as d:
+                        rest, ext = os.path.splitext(desc)
+                        if '_vnfd.y' in desc:
+                            vnfd = convert.VnfdSerializer().from_file_hdl(d, ext)
+                            gen_desc = vnfd.as_dict()
+                            if 'ping_vnfd.y' in desc:
+                                exp_desc = self.exp_descs.ping_vnfd.as_dict()
+                            elif 'pong_vnfd.y' in desc:
+                                exp_desc = self.exp_descs.pong_vnfd.as_dict()
+                            else:
+                                raise Exception("Unknown VNFD descriptor: {}".
+                                                format(desc))
+                        elif '_nsd.y' in desc:
+                            nsd = convert.NsdSerializer().from_file_hdl(d, ext)
+                            gen_desc = nsd.as_dict()
+                            exp_desc = self.exp_descs.ping_pong_nsd.as_dict()
+                        else:
+                            raise Exception("Unknown file: {}".format(desc))
+
+                        # Compare the descriptors
+                        self.compare_dict(gen_desc, exp_desc)
+
+                        # Increment the count of descriptiors found
+                        count += 1
+
+            if count != 3:
+                raise Exception("Did not find expected number of descriptors: {}".
+                                format(count))
+        except Exception as e:
+            self.log.exception(e)
+            raise e
+
+        finally:
+            os.chdir(prev_dir)
+
+    def test_output_dir(self):
+        test_base_dir = os.path.join(os.path.dirname(
+            os.path.abspath(__file__)), 'data')
+        template_file = os.path.join(test_base_dir,
+                            "ping_pong_csar/Definitions/ping_pong_nsd.yaml")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+        try:
+            shell.main([template, output_dir], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in test_output_dir: {}".format(e))
+
+        else:
+            self.check_output(temp_dir)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+    def test_input_csar(self):
+        test_base_dir = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            'data')
+        template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+
+        try:
+            shell.main([template, output_dir, '--archive'], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in test_output_dir: {}".format(e))
+
+        else:
+            self.check_output(temp_dir, archive=True)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+    def test_input_csar_no_gi(self):
+        test_base_dir = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            'data')
+        template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+        no_gi = '--no-gi'
+
+        try:
+            shell.main([template, output_dir, no_gi, '--archive'], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in input_csar_no_gi: {}".format(e))
+
+        else:
+            self.check_output(temp_dir, archive=True)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    TestToscaTranslator.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/tosca-translator b/common/python/rift/mano/tosca_translator/tosca-translator
new file mode 100755
index 0000000..6895d5d
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/tosca-translator
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator import shell as translator_shell
+
+if __name__ == '__main__':
+    translator_shell.main()
diff --git a/common/python/rift/mano/tosca_translator/translator_logging.conf b/common/python/rift/mano/tosca_translator/translator_logging.conf
new file mode 100644
index 0000000..e55b02b
--- /dev/null
+++ b/common/python/rift/mano/tosca_translator/translator_logging.conf
@@ -0,0 +1,43 @@
+
+[loggers]
+keys=root,tosca-translator
+
+[handlers]
+keys=RotatingFileHandler,SysLogHandler,NullHandler
+
+[formatters]
+keys=form01
+
+[logger_root]
+level=DEBUG
+handlers=NullHandler
+
+[logger_tosca-translator]
+level=INFO
+#one can be removed based on requirements
+handlers=SysLogHandler, RotatingFileHandler
+qualname=tosca-translator
+propagate=1
+
+[handler_RotatingFileHandler]
+class=handlers.RotatingFileHandler
+level=INFO
+formatter=form01
+#rotation happens after 100MB
+args=('/var/log/rift-translator.log', 'a', 100000000, 5, 'utf8')
+
+[handler_SysLogHandler]
+class=handlers.SysLogHandler
+formatter=form01
+level=INFO
+args=('/dev/log', handlers.SysLogHandler.LOG_SYSLOG)
+
+[handler_NullHandler]
+class=NullHandler
+formatter=form01
+level=DEBUG
+args=()
+
+[formatter_form01]
+format = %(asctime)s - %(name)s - %(levelname)s - %(filename)s : %(message)s
+datefmt =
diff --git a/common/python/rift/mano/utils/__init.py__ b/common/python/rift/mano/utils/__init.py__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/utils/__init.py__
diff --git a/common/python/rift/mano/utils/compare_desc.py b/common/python/rift/mano/utils/compare_desc.py
new file mode 100644
index 0000000..09b4fcd
--- /dev/null
+++ b/common/python/rift/mano/utils/compare_desc.py
@@ -0,0 +1,152 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from rift.mano.yang_translator.common.utils import _
+
+import yaml
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['yaml', 'json']
+    INDENT = 2
+
+    DIFF_KEYS = (
+        REMOVED_ITEMS,
+        ADDED_ITEMS,
+        ITER_ITEM_ADDED,
+        ITER_ITEM_REM,
+        TYPE_CHANGES,
+        VALUES_CHANGED,
+    ) = (
+        'dic_item_removed',
+        'dic_item_added',
+        'iterable_item_added',
+        'iterable_item_removed',
+        'type_changes',
+        'values_changed',
+    )
+
+    DIFF_MAP = {
+        REMOVED_ITEMS: 'Items removed',
+        ADDED_ITEMS: 'Items added',
+        ITER_ITEM_ADDED: 'Items added to list',
+        ITER_ITEM_REM: 'Items removed from list',
+        TYPE_CHANGES: 'Change in types',
+        VALUES_CHANGED: 'Change in values',
+    }
+
+    # Changes in following items are error
+    ERROR_ITEMS = [REMOVED_ITEMS, ADDED_ITEMS, ITER_ITEM_ADDED,
+                   ITER_ITEM_REM, TYPE_CHANGES, ]
+
+    @classmethod
+    def compare_dicts(cls, generated, expected, log=None):
+        """Compare two dictionaries and generate error if required"""
+        if log:
+            log.debug(_("Generated: {0}").format(generated))
+            log.debug(_("Expected: {0}").format(expected))
+
+        diff = DeepDiff(expected, generated)
+        if log:
+            log.debug(_("Keys in diff: {0}").format(diff.keys()))
+            log.info(_("Differences:\n"))
+
+        if log:
+            d = pprint.pformat(diff, indent=cls.INDENT)
+            log.info("Differences:\n{0}".format(d))
+
+        if len(set(cls.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            if log:
+                log.error(msg)
+            raise ValueError(msg)
+
+    def main(self, log, args):
+        self.log = log
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated) as g:
+            gen_data = g.read()
+            if args.type == 'yaml':
+                y_gen = yaml.load(gen_data)
+            else:
+                y_gen = json.loads(gen_data)
+
+        with open(args.expected) as e:
+            exp_data = e.read()
+            if args.type == 'yaml':
+                y_exp = yaml.load(exp_data)
+            else:
+                y_exp = json.loads(exp_data)
+
+        self.compare_dicts(y_gen, y_exp, log=self.log)
+
+
+def main(args=None, log=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected",
+        required=True,
+        help="Descriptor file to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='yaml',
+        help="File type. Default yaml")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if log is None:
+        if args.debug:
+            logging.basicConfig(level=logging.DEBUG)
+        else:
+            logging.basicConfig(level=logging.ERROR)
+        log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/utils/juju_api.py b/common/python/rift/mano/utils/juju_api.py
new file mode 100644
index 0000000..3f3b086
--- /dev/null
+++ b/common/python/rift/mano/utils/juju_api.py
@@ -0,0 +1,1071 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+import argparse
+import asyncio
+from functools import partial
+import logging
+import os
+import ssl
+import sys
+import time
+
+try:
+    from jujuclient.juju1.environment import Environment as Env1
+    from jujuclient.juju2.environment import Environment as Env2
+except ImportError as e:
+    # Try importing older jujuclient
+    from jujuclient import Environment as Env1
+
+try:
+    ssl._create_default_https_context = ssl._create_unverified_context
+except AttributeError:
+    # Legacy Python doesn't verify by default (see pep-0476)
+    #   https://www.python.org/dev/peps/pep-0476/
+    pass
+
+
+class JujuVersionError(Exception):
+    pass
+
+
+class JujuApiError(Exception):
+    pass
+
+
+class JujuEnvError(JujuApiError):
+    pass
+
+
+class JujuModelError(JujuApiError):
+    pass
+
+
+class JujuStatusError(JujuApiError):
+    pass
+
+
+class JujuUnitsError(JujuApiError):
+    pass
+
+
+class JujuWaitUnitsError(JujuApiError):
+    pass
+
+
+class JujuSrvNotDeployedError(JujuApiError):
+    pass
+
+
+class JujuAddCharmError(JujuApiError):
+    pass
+
+
+class JujuDeployError(JujuApiError):
+    pass
+
+
+class JujuDestroyError(JujuApiError):
+    pass
+
+
+class JujuResolveError(JujuApiError):
+    pass
+
+
+class JujuActionError(JujuApiError):
+    pass
+
+
+class JujuActionApiError(JujuActionError):
+    pass
+
+
+class JujuActionInfoError(JujuActionError):
+    pass
+
+
+class JujuActionExecError(JujuActionError):
+    pass
+
+
+class JujuApi(object):
+    '''
+    JujuApi wrapper on jujuclient library
+
+    There should be one instance of JujuApi for each VNF manged by Juju.
+
+    Assumption:
+        Currently we use one unit per service/VNF. So once a service
+        is deployed, we store the unit name and reuse it
+'''
+    log = None
+
+    def __init__ (self,
+                  log=None,
+                  loop=None,
+                  server='127.0.0.1',
+                  port=17070,
+                  user='admin',
+                  secret=None,
+                  version=None):
+        '''Initialize with the Juju credentials'''
+        self.server = server
+        self.port = port
+
+        self.secret = secret
+        if user.startswith('user-'):
+            self.user = user
+        else:
+            self.user = 'user-{}'.format(user)
+
+        self.loop = loop
+
+        if log is not None:
+            self.log = log
+        else:
+            self.log = JujuApi._get_logger()
+
+        if self.log is None:
+            raise JujuApiError("Logger not defined")
+
+        self.version = None
+        if version:
+            self.version = version
+        else:
+            try:
+                if Env2:
+                    pass
+            except NameError:
+                self.log.warn("Using older version of Juju client, which " \
+                              "supports only Juju 1.x")
+                self.version = 1
+
+        endpoint = 'wss://%s:%d' % (server, int(port))
+        self.endpoint = endpoint
+
+        self.charm = None  # Charm used
+        self.service = None  # Service deployed
+        self.units = []  # Storing as list to support more units in future
+
+        self.destroy_retries = 25 # Number retires to destroy service
+        self.retry_delay = 5 # seconds
+
+    def __str__(self):
+        return ("JujuApi-{}".format(self.endpoint))
+
+    @classmethod
+    def _get_logger(cls):
+        if cls.log is not None:
+            return cls.log
+
+        fmt = logging.Formatter(
+            '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:' \
+            '%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=logging.DEBUG)
+        cls.log = logging.getLogger('juju-api')
+        cls.log.addHandler(stderr_handler)
+
+        return cls.log
+
+    @staticmethod
+    def format_charm_name(name):
+        '''Format the name to valid charm name
+
+        Charm service name accepts only a to z and -.
+        '''
+
+        new_name = ''
+        for c in name:
+            if c.isdigit():
+                c = chr(97 + int(c))
+            elif not c.isalpha():
+                c = "-"
+            new_name += c
+        return new_name.lower()
+
+    def _get_version_tag(self, tag):
+        version_tag_map = {
+            'applications': {
+                1: 'Services',
+                2: 'applications',
+            },
+            'units': {
+                1: 'Units',
+                2: 'units',
+            },
+            'status': {
+                1: 'Status',
+                2: 'status',
+            },
+            'workload-status': {
+                1: 'Workload',
+                2: 'workload-status',
+            },
+            'charm-url': {
+                1: 'CharmURL',
+                2: 'charm-url',
+            },
+        }
+
+        return version_tag_map[tag][self.version]
+
+    def _get_env1(self):
+        try:
+            env = Env1(self.endpoint)
+            l = env.login(self.secret, user=self.user)
+            return env
+
+        except ConnectionRefusedError as e:
+            msg = "{}: Failed Juju 1.x connect: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise e
+
+        except Exception as e:
+            msg = "{}: Failed Juju 1.x connect: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuEnvError(msg)
+
+    def _get_env2(self):
+        try:
+            env = Env2(self.endpoint)
+            l = env.login(self.secret, user=self.user)
+        except KeyError as e:
+            msg = "{}: Failed Juju 2.x connect: {}".format(self, e)
+            self.log.debug(msg)
+            raise JujuVersionError(msg)
+
+        try:
+            models = env.models.list()
+            for m in models['user-models']:
+                if m['model']['name'] == 'default':
+                    mep =  '{}/model/{}/api'.format(endpoint, m['model']['uuid'])
+                    model = Env2(mep, env_uuid=m['model']['uuid'])
+                    l = model.login(args.password, user=args.user)
+                    break
+
+            if model is None:
+                raise
+
+            return model
+
+        except Exception as e:
+            msg = "{}: Failed logging to model: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            env.close()
+            raise JujuModelError(msg)
+
+    def _get_env(self):
+        self.log.debug("{}: Connect to endpoint {}".
+                      format(self, self.endpoint))
+
+        if self.version is None:
+            # Try version 2 first
+            try:
+                env = self._get_env2()
+                self.version = 2
+
+            except JujuVersionError as e:
+                self.log.info("Unable to login as Juju 2.x, trying 1.x")
+                env = self._get_env1()
+                self.version = 1
+
+            return env
+
+        elif self.version == 2:
+            return self._get_env2()
+
+        elif self.version == 1:
+            return self._get_env1()
+
+        else:
+            msg = "{}: Unknown version set: {}".format(self, self.version)
+            self.log.error(msg)
+            raise JujuVersionError(msg)
+
+    @asyncio.coroutine
+    def get_env(self):
+        ''' Connect to the Juju controller'''
+        env = yield from self.loop.run_in_executor(
+            None,
+            self._get_env,
+        )
+        return env
+
+    def _get_status(self, env=None):
+        if env is None:
+            env = self._get_env()
+
+        try:
+            status = env.status()
+            return status
+
+        except Exception as e:
+            msg = "{}: exception in getting status: {}". \
+                  format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuStatusError(msg)
+
+    @asyncio.coroutine
+    def get_status(self, env=None):
+        '''Get Juju controller status'''
+        pf = partial(self._get_status, env=env)
+        status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return status
+
+    def get_all_units(self, status, service=None):
+        '''Parse the status and get the units'''
+        results = {}
+        services = status.get(self._get_version_tag('applications'), {})
+
+        for svc_name, svc_data in services.items():
+            if service and service != svc_name:
+                continue
+            units = svc_data[self._get_version_tag('units')] or {}
+
+            results[svc_name] = {}
+            for unit in units:
+                results[svc_name][unit] = \
+                        units[unit][self._get_version_tag('workload-status')] \
+                        [self._get_version_tag('status')] or None
+        return results
+
+
+    def _get_service_units(self, service=None, status=None, env=None):
+        if service is None:
+            service = self.service
+
+        # Optimizing calls to Juju, as currently we deploy only 1 unit per
+        # service.
+        if self.service == service and len(self.units):
+            return self.units
+
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        try:
+            resp = self.get_all_units(status, service=service)
+            self.log.debug("Get all units: {}".format(resp))
+            units = set(resp[service].keys())
+
+            if self.service == service:
+                self.units = units
+
+            return units
+
+        except Exception as e:
+            msg = "{}: exception in get units {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuUnitsError(msg)
+
+    @asyncio.coroutine
+    def get_service_units(self, service=None, status=None, env=None):
+        '''Get the unit names for a service'''
+        pf = partial(self._get_service_units,
+                     service=service,
+                     status=status,
+                     env=env)
+        units = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return units
+
+    def _get_service_status(self, service=None, status=None, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        if service is None:
+            service = self.service
+
+        try:
+            srv_status = status[self._get_version_tag('applications')] \
+                         [service][self._get_version_tag('status')] \
+                         [self._get_version_tag('status')]
+            self.log.debug("{}: Service {} status is {}".
+                           format(self, service, srv_status))
+            return srv_status
+
+        except KeyError as e:
+            self.log.info("self: Did not find service {}, e={}".format(self, service, e))
+            return 'NA'
+
+        except Exception as e:
+            msg = "{}: exception checking service status for {}, e {}". \
+                  format(self, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuStatusError(msg)
+
+
+    @asyncio.coroutine
+    def get_service_status(self, service=None, status=None, env=None):
+        ''' Get service status
+
+            maintenance : The unit is not yet providing services, but is actively doing stuff.
+            unknown : Service has finished an event but the charm has not called status-set yet.
+            waiting : Service is unable to progress to an active state because of dependency.
+            blocked : Service needs manual intervention to get back to the Running state.
+            active  : Service correctly offering all the services.
+            NA      : Service is not deployed
+        '''
+        pf = partial(self._get_service_status,
+                     service=service,
+                     status=status,
+                     env=env)
+        srv_status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return srv_status
+
+    def _is_service_deployed(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp not in ['terminated', 'NA']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_deployed(self, service=None, status=None, env=None):
+        '''Check if the service is deployed'''
+        pf = partial(self._is_service_deployed,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_error(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['error']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_error(self, service=None, status=None, env=None):
+        '''Check if the service is in error state'''
+        pf = partial(self._is_service_error,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_maint(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['maintenance']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_maint(self, service=None, status=None, env=None):
+        '''Check if the service is in error state'''
+        pf = partial(self._is_service_maint,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_active(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['active']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_active(self, service=None, status=None, env=None):
+        '''Check if the service is active'''
+        pf = partial(self._is_service_active,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_blocked(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['blocked']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_blocked(self, service=None, status=None, env=None):
+        '''Check if the service is blocked'''
+        pf = partial(self._is_service_blocked,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_up(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['active', 'blocked']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_up(self, service=None, status=None, env=None):
+        '''Check if the service is installed and up'''
+        pf = partial(self._is_service_up,
+                     service=service,
+                     status=status,
+                     env=env)
+
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _apply_config(self, config, service=None, env=None):
+        if service is None:
+            service = self.service
+
+        if config is None or len(config) == 0:
+            self.log.warn("{}: Empty config passed for service {}".
+                          format(self, service))
+            return
+
+        if env is None:
+            env = self._get_env()
+
+        status = self._get_status(env=env)
+
+        if not self._is_service_deployed(service=service,
+                                         status=status,
+                                         env=env):
+            raise JujuSrvNotDeployedError("{}: service {} is not deployed".
+                                          format(self, service))
+
+        self.log.debug("{}: Config for service {} update to: {}".
+                       format(self, service, config))
+        try:
+            # Try to fix error on service, most probably due to config issue
+            if self._is_service_error(service=service, status=status, env=env):
+                self._resolve_error(service=service, env=env)
+
+            if self.version == 2:
+                env.service.set(service, config)
+            else:
+                env.set_config(service, config)
+
+        except Exception as e:
+            self.log.error("{}: exception setting config for {} with {}, e {}".
+                           format(self, service, config, e))
+            self.log.exception(e)
+            raise e
+
+    @asyncio.coroutine
+    def apply_config(self, config, service=None, env=None, wait=True):
+        '''Apply a config on the service'''
+        pf = partial(self._apply_config,
+                     config,
+                     service=service,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+        if wait:
+            # Wait till config finished applying
+            self.log.debug("{}: Wait for config apply to finish".
+                           format(self))
+            delay = 3  # secs
+            maint = True
+            while maint:
+                # Sleep first to give time for config_changed hook to be invoked
+                yield from asyncio.sleep(delay, loop=self.loop)
+                maint = yield from self.is_service_maint(service=service,
+                                                         env=env)
+
+        err = yield from self.is_service_error(service=service, env=env)
+        if err:
+            self.log.error("{}: Service is in error state".
+                           format(self))
+            return False
+
+        self.log.debug("{}: Finished applying config".format(self))
+        return True
+
+    def _set_parameter(self, parameter, value, service=None):
+        return self._apply_config({parameter : value}, service=service)
+
+    @asyncio.coroutine
+    def set_parameter(self, parameter, value, service=None):
+        '''Set a config parameter for a service'''
+        return self.apply_config({parameter : value}, service=service)
+
+    def _resolve_error(self, service=None, status=None, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = self._get_env()
+        if self._is_service_deployed(service=service, status=status):
+            units = self.get_all_units(status, service=service)
+
+            for unit, ustatus in units[service].items():
+                if ustatus == 'error':
+                    self.log.info("{}: Found unit {} with status {}".
+                                  format(self, unit, ustatus))
+                    try:
+                        # Takes the unit name as service_name/idx unlike action
+                        env.resolved(unit)
+
+                    except Exception as e:
+                        msg = "{}: Resolve on unit {}: {}". \
+                              format(self, unit, e)
+                        self.log.error(msg)
+                        self.log.exception(e)
+                        raise JujuResolveError(msg)
+
+    @asyncio.coroutine
+    def resolve_error(self, service=None, status=None, env=None):
+        '''Resolve units in error state'''
+        pf = partial(self._resolve_error,
+                     service=service,
+                     status=status,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+    def _deploy_service(self, charm, service,
+                        path=None, config=None, env=None):
+        self.log.debug("{}: Deploy service for charm {}({}) with service {}".
+                       format(self, charm, path, service))
+
+        if env is None:
+            env = self._get_env()
+
+        self.service = service
+        self.charm = charm
+
+        if self._is_service_deployed(service=service, env=env):
+            self.log.info("{}: Charm service {} already deployed".
+                          format (self, service))
+            if config:
+                self._apply_config(config, service=service, env=env)
+            return
+
+        series = "trusty"
+
+        deploy_to = None
+        if self.version == 1:
+            deploy_to = "lxc:0"
+
+        if path is None:
+            prefix=os.getenv('RIFT_INSTALL', '/')
+            path = os.path.join(prefix, 'usr/rift/charms', series, charm)
+
+        try:
+            self.log.debug("{}: Local charm settings: dir={}, series={}".
+                           format(self, path, series))
+            result = env.add_local_charm_dir(path, series)
+            url = result[self._get_version_tag('charm-url')]
+
+        except Exception as e:
+            msg = '{}: Error setting local charm directory {} for {}: {}'. \
+                  format(self, path, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuAddCharmError(msg)
+
+        try:
+            self.log.debug("{}: Deploying using: service={}, url={}, to={}, config={}".
+                           format(self, service, url, deploy_to, config))
+            env.deploy(service, url, config=config, machine_spec=deploy_to)
+
+        except Exception as e:
+            msg = '{}: Error deploying {}: {}'.format(self, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuDeployError(msg)
+
+    @asyncio.coroutine
+    def deploy_service(self, charm, service,
+                       wait=False, timeout=300,
+                       path=None, config=None):
+        '''Deploy a service using the charm name provided'''
+        env = yield from self.get_env()
+
+        pf = partial(self._deploy_service,
+                     charm,
+                     service,
+                     path=path,
+                     config=config,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+        rc = True
+        if wait is True:
+            # Wait for the deployed units to start
+            try:
+                self.log.debug("{}: Waiting for service {} to come up".
+                               format(self, service))
+                rc = yield from self.wait_for_service(timeout=timeout, env=env)
+
+            except Exception as e:
+                msg = '{}: Error starting all units for {}: {}'. \
+                      format(self, service, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                raise JujuWaitUnitsError(msg)
+
+        return rc
+
+    @asyncio.coroutine
+    def wait_for_service(self, service=None, timeout=0, env=None):
+        '''Wait for the service to come up'''
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = yield from self.get_env()
+
+        status = yield from self.get_status(env=env)
+
+        if self._is_service_up(service=service, status=status, env=env):
+            self.log.debug("{}: Service {} is already up".
+                               format(self, service))
+            return True
+
+        # Check if service is deployed
+        if not self._is_service_deployed(service=service, status=status, env=env):
+            raise JujuSrvNotDeployedError("{}: service {} is not deployed".
+                                          format(self, service))
+
+        if timeout < 0:
+            timeout = 0
+
+        count = 0
+        delay = self.retry_delay # seconds
+        self.log.debug("{}: In wait for service {}".format(self, service))
+
+        start_time = time.time()
+        max_time = time.time() + timeout
+        while timeout != 0 and (time.time() <= max_time):
+            count += 1
+            rc = yield from self.is_service_up(service=service, env=env)
+            if rc:
+                self.log.debug("{}: Service {} is up after {} seconds".
+                               format(self, service, time.time()-start_time))
+                return True
+            yield from asyncio.sleep(delay, loop=self.loop)
+        return False
+
+    def _destroy_service(self, service=None):
+        '''Destroy a service on Juju controller'''
+        self.log.debug("{}: Destroy charm service: {}".format(self,service))
+
+        if service is None:
+            service = self.service
+
+        env = self._get_env()
+
+        status = self._get_status(env=env)
+
+        count = 0
+        while self._is_service_deployed(service=service, status=status, env=env):
+            count += 1
+            self.log.debug("{}: Destroy service {}, count {}".
+                           format(self, service, count))
+
+            if count > self.destroy_retries:
+                msg = "{}: Not able to destroy service {} after {} tries". \
+                      format(self, service, count)
+                self.log.error(msg)
+                raise JujuDestroyError(msg)
+
+
+            if self._is_service_error(service=service, status=status):
+                self._resolve_error(service, status)
+
+            try:
+                env.destroy_service(service)
+
+            except Exception as e:
+                msg = "{}: Exception when running destroy on service {}: {}". \
+                      format(self, service, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                raise JujuDestroyError(msg)
+
+            time.sleep(self.retry_delay)
+            status = self._get_status(env=env)
+
+        self.log.debug("{}: Destroyed service {} ({})".
+                       format(self, service, count))
+
+    @asyncio.coroutine
+    def destroy_service(self, service=None):
+        '''Destroy a service on Juju controller'''
+        pf = partial(self._destroy_service,
+                     service=service)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+
+    def _get_action_status(self, action_tag, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if not action_tag.startswith('action-'):
+            action_tag = 'action-{}'.format(action_tag)
+
+        try:
+            action = env.actions
+        except Exception as e:
+            msg = "{}: exception in Action API: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionApiError(msg)
+
+        try:
+            status = action.info([{'Tag': action_tag}])
+
+            self.log.debug("{}: Action {} status {}".
+                           format(self, action_tag, status))
+            return status['results'][0]
+
+        except Exception as e:
+            msg = "{}: exception in get action status {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionInfoError(msg)
+
+    @asyncio.coroutine
+    def get_action_status(self, action_tag, env=None):
+        '''
+        Get the status of an action queued on the controller
+
+        responds with the action status, which is one of three values:
+
+         - completed
+         - pending
+         - failed
+
+         @param action_tag - the action UUID return from the enqueue method
+         eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9
+        '''
+        pf = partial(self._get_action_status,
+                     action_tag,
+                     env=env,)
+        status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return status
+
+    def _execute_action(self, action_name, params, service=None, env=None):
+        '''Execute the action on all units of a service'''
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = self._get_env()
+
+        try:
+            action = env.actions
+        except Exception as e:
+            msg = "{}: exception in Action API: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionApiError(msg)
+
+        units = self._get_service_units(service)
+        self.log.debug("{}: Apply action {} on units {}".
+                       format(self, action_name, units))
+
+        # Rename units from <service>/<n> to unit-<service>-<n>
+        unit_tags = []
+        for unit in units:
+            idx = int(unit[unit.index('/')+1:])
+            unit_name = "unit-%s-%d" % (service, idx)
+            unit_tags.append(unit_name)
+        self.log.debug("{}: Unit tags for action: {}".
+                       format(self, unit_tags))
+
+        try:
+            result = action.enqueue_units(unit_tags, action_name, params)
+            self.log.debug("{}: Response for action: {}".
+                           format(self, result))
+            return result['results'][0]
+
+        except Exception as e:
+            msg = "{}: Exception enqueing action {} on units {} with " \
+                  "params {}: {}".format(self, action, unit_tags, params, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionExecError(msg)
+
+    @asyncio.coroutine
+    def execute_action(self, action_name, params, service=None, env=None):
+        '''Execute an action for a service on the controller
+
+        Currently, we execute the action on all units of the service
+        '''
+        pf = partial(self._execute_action,
+                     action_name,
+                     params,
+                     service=service,
+                     env=env)
+        result = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return result
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Test Juju')
+    parser.add_argument("-s", "--server", default='10.0.202.49', help="Juju controller")
+    parser.add_argument("-u", "--user", default='admin', help="User, default user-admin")
+    parser.add_argument("-p", "--password", default='nfvjuju', help="Password for the user")
+    parser.add_argument("-P", "--port", default=17070, help="Port number, default 17070")
+    parser.add_argument("-d", "--directory", help="Local directory for the charm")
+    parser.add_argument("--service", help="Charm service name")
+    parser.add_argument("--vnf-ip", help="IP of the VNF to configure")
+    args = parser.parse_args()
+
+    api = JujuApi(server=args.server,
+                  port=args.port,
+                  user=args.user,
+                  secret=args.password)
+
+    env = api._get_env()
+    if env is None:
+        raise "Not able to login to the Juju controller"
+
+    print("Status: {}".format(api._get_status(env=env)))
+
+    if args.directory and args.service:
+        # Deploy the charm
+        charm = os.path.basename(args.directory)
+        api._deploy_service(charm, args.service,
+                            path=args.directory,
+                            env=env)
+
+        while not api._is_service_up():
+            time.sleep(5)
+
+        print ("Service {} is deployed with status {}".
+               format(args.service, api._get_service_status()))
+
+        if args.vnf_ip and \
+           ('clearwater-aio' in args.directory):
+            # Execute config on charm
+            api._apply_config({'proxied_ip': args.vnf_ip})
+
+            while not api._is_service_active():
+                time.sleep(10)
+
+            print ("Service {} is in status {}".
+                   format(args.service, api._get_service_status()))
+
+            res = api._execute_action('create-update-user', {'number': '125252352525',
+                                                             'password': 'asfsaf'})
+
+            print ("Action 'creat-update-user response: {}".format(res))
+
+            status = res['status']
+            while status not in [ 'completed', 'failed' ]:
+                time.sleep(2)
+                status = api._get_action_status(res['action']['tag'])['status']
+
+                print("Action status: {}".format(status))
+
+            # This action will fail as the number is non-numeric
+            res = api._execute_action('delete-user', {'number': '125252352525asf'})
+
+            print ("Action 'delete-user response: {}".format(res))
+
+            status = res['status']
+            while status not in [ 'completed', 'failed' ]:
+                time.sleep(2)
+                status = api._get_action_status(res['action']['tag'])['status']
+
+                print("Action status: {}".format(status))
diff --git a/common/python/rift/mano/yang_translator/__init__.py b/common/python/rift/mano/yang_translator/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/common/__init__.py b/common/python/rift/mano/yang_translator/common/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/common/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/common/exception.py b/common/python/rift/mano/yang_translator/common/exception.py
new file mode 100644
index 0000000..4d51ebb
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/common/exception.py
@@ -0,0 +1,243 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+'''
+Exceptions for the YANG Translator package.
+'''
+
+import logging
+import sys
+import traceback
+
+from rift.mano.yang_translator.common.utils import _
+
+log = logging.getLogger(__name__)
+
+
+class YANGException(Exception):
+    '''Base exception class for YANG
+
+    To correctly use this class, inherit from it and define
+    a 'msg_fmt' property.
+
+    '''
+
+    _FATAL_EXCEPTION_FORMAT_ERRORS = False
+
+    message = _('An unknown exception occurred.')
+
+    def __init__(self, **kwargs):
+        try:
+            self.message = self.msg_fmt % kwargs
+        except KeyError:
+            exc_info = sys.exc_info()
+            log.exception(_('Exception in string format operation: %s')
+                          % exc_info[1])
+
+            if YANGException._FATAL_EXCEPTION_FORMAT_ERRORS:
+                raise exc_info[0]
+
+    def __str__(self):
+        return self.message
+
+    @staticmethod
+    def generate_inv_schema_property_error(self, attr, value, valid_values):
+        msg = (_('Schema definition of "%(propname)s" has '
+                 '"%(attr)s" attribute with invalid value '
+                 '"%(value1)s". The value must be one of '
+                 '"%(value2)s".') % {"propname": self.name,
+                                     "attr": attr,
+                                     "value1": value,
+                                     "value2": valid_values})
+        ExceptionCollector.appendException(
+            InvalidSchemaError(message=msg))
+
+    @staticmethod
+    def set_fatal_format_exception(flag):
+        if isinstance(flag, bool):
+            YANGException._FATAL_EXCEPTION_FORMAT_ERRORS = flag
+
+
+class MissingRequiredFieldError(YANGException):
+    msg_fmt = _('%(what)s is missing required field "%(required)s".')
+
+
+class UnknownFieldError(YANGException):
+    msg_fmt = _('%(what)s contains unknown field "%(field)s". Refer to the '
+                'definition to verify valid values.')
+
+
+class TypeMismatchError(YANGException):
+    msg_fmt = _('%(what)s must be of type "%(type)s".')
+
+
+class InvalidNodeTypeError(YANGException):
+    msg_fmt = _('Node type "%(what)s" is not a valid type.')
+
+
+class InvalidTypeError(YANGException):
+    msg_fmt = _('Type "%(what)s" is not a valid type.')
+
+
+class InvalidSchemaError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class ValidationError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class UnknownInputError(YANGException):
+    msg_fmt = _('Unknown input "%(input_name)s".')
+
+
+class InvalidPropertyValueError(YANGException):
+    msg_fmt = _('Value of property "%(what)s" is invalid.')
+
+
+class InvalidTemplateVersion(YANGException):
+    msg_fmt = _('The template version "%(what)s" is invalid. '
+                'Valid versions are "%(valid_versions)s".')
+
+
+class InvalidYANGVersionPropertyException(YANGException):
+    msg_fmt = _('Value of YANG version property "%(what)s" is invalid.')
+
+
+class URLException(YANGException):
+    msg_fmt = _('%(what)s')
+
+
+class YangExtImportError(YANGException):
+    msg_fmt = _('Unable to import extension "%(ext_name)s". '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class YangExtAttributeError(YANGException):
+    msg_fmt = _('Missing attribute in extension "%(ext_name)s". '
+                'Check to see that it has required attributes '
+                '"%(attrs)s" defined.')
+
+
+class InvalidGroupTargetException(YANGException):
+    msg_fmt = _('"%(message)s"')
+
+
+class ConfFileParseError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class ConfOptionNotDefined(YANGException):
+    msg_fmt = _('Option %(key)s in section %(section)s '
+                'is not defined in conf file')
+
+
+class ConfSectionNotDefined(YANGException):
+    msg_fmt = _('Section %(section)s is not defined in conf file')
+
+
+class YangModImportError(YANGException):
+    msg_fmt = _('Unable to import module %(mod_name)s. '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class YangClassImportError(YANGException):
+    msg_fmt = _('Unable to import class %(name)s in '
+                'module %(mod_name)s. Check to see that it '
+                'exists and has no language definition errors.')
+
+
+class YangClassAttributeError(YANGException):
+    msg_fmt = _('Class attribute referenced not found. '
+                '%(message)s. Check to see that it is defined.')
+
+
+class ExceptionCollector(object):
+
+    exceptions = []
+    collecting = False
+
+    @staticmethod
+    def clear():
+        del ExceptionCollector.exceptions[:]
+
+    @staticmethod
+    def start():
+        ExceptionCollector.clear()
+        ExceptionCollector.collecting = True
+
+    @staticmethod
+    def stop():
+        ExceptionCollector.collecting = False
+
+    @staticmethod
+    def contains(exception):
+        for ex in ExceptionCollector.exceptions:
+            if str(ex) == str(exception):
+                return True
+        return False
+
+    @staticmethod
+    def appendException(exception):
+        if ExceptionCollector.collecting:
+            if not ExceptionCollector.contains(exception):
+                exception.trace = traceback.extract_stack()[:-1]
+                ExceptionCollector.exceptions.append(exception)
+        else:
+            raise exception
+
+    @staticmethod
+    def exceptionsCaught():
+        return len(ExceptionCollector.exceptions) > 0
+
+    @staticmethod
+    def getTraceString(traceList):
+        traceString = ''
+        for entry in traceList:
+            f, l, m, c = entry[0], entry[1], entry[2], entry[3]
+            traceString += (_('\t\tFile %(file)s, line %(line)s, in '
+                              '%(method)s\n\t\t\t%(call)s\n')
+                            % {'file': f, 'line': l, 'method': m, 'call': c})
+        return traceString
+
+    @staticmethod
+    def getExceptionReportEntry(exception, full=True):
+        entry = exception.__class__.__name__ + ': ' + str(exception)
+        if full:
+            entry += '\n' + ExceptionCollector.getTraceString(exception.trace)
+        return entry
+
+    @staticmethod
+    def getExceptions():
+        return ExceptionCollector.exceptions
+
+    @staticmethod
+    def getExceptionsReport(full=True):
+        report = []
+        for exception in ExceptionCollector.exceptions:
+            report.append(
+                ExceptionCollector.getExceptionReportEntry(exception, full))
+        return report
+
+    @staticmethod
+    def assertExceptionMessage(exception, message):
+        err_msg = exception.__name__ + ': ' + message
+        report = ExceptionCollector.getExceptionsReport(False)
+        assert err_msg in report, (_('Could not find "%(msg)s" in "%(rep)s".')
+                                   % {'rep': report.__str__(), 'msg': err_msg})
diff --git a/common/python/rift/mano/yang_translator/common/utils.py b/common/python/rift/mano/yang_translator/common/utils.py
new file mode 100644
index 0000000..c44e8f5
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/common/utils.py
@@ -0,0 +1,231 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gettext
+import numbers
+import os
+
+from six.moves.urllib.parse import urlparse
+
+import yaml
+
+_localedir = os.environ.get('yang-translator'.upper() + '_LOCALEDIR')
+_t = gettext.translation('yang-translator', localedir=_localedir,
+                         fallback=True)
+
+
+def _(msg):
+    return _t.gettext(msg)
+
+
+class CompareUtils(object):
+
+    MISMATCH_VALUE1_LABEL = "<Expected>"
+    MISMATCH_VALUE2_LABEL = "<Provided>"
+    ORDERLESS_LIST_KEYS = ['allowed_values', 'depends_on']
+
+    @staticmethod
+    def compare_dicts(dict1, dict2, log):
+        """Return False if not equal, True if both are equal."""
+
+        if dict1 is None and dict2 is None:
+            return True
+        if dict1 is None or dict2 is None:
+            return False
+
+        both_equal = True
+        for dict1_item, dict2_item in zip(dict1.items(), dict2.items()):
+            if dict1_item != dict2_item:
+                msg = (_("%(label1)s: %(item1)s \n is not equal to \n:"
+                         "%(label2)s: %(item2)s")
+                       % {'label1': CompareUtils.MISMATCH_VALUE2_LABEL,
+                          'item1': dict1_item,
+                          'label2': CompareUtils.MISMATCH_VALUE1_LABEL,
+                          'item2': dict2_item})
+                log.warning(msg)
+                both_equal = False
+                break
+        return both_equal
+
+    @staticmethod
+    def reorder(dic):
+        '''Canonicalize list items in the dictionary for ease of comparison.
+
+        For properties whose value is a list in which the order does not
+        matter, some pre-processing is required to bring those lists into a
+        canonical format. We use sorting just to make sure such differences
+        in ordering would not cause to a mismatch.
+        '''
+
+        if type(dic) is not dict:
+            return None
+
+        reordered = {}
+        for key in dic.keys():
+            value = dic[key]
+            if type(value) is dict:
+                reordered[key] = CompareUtils.reorder(value)
+            elif type(value) is list \
+                and key in CompareUtils.ORDERLESS_LIST_KEYS:
+                reordered[key] = sorted(value)
+            else:
+                reordered[key] = value
+        return reordered
+
+    @staticmethod
+    def diff_dicts(dict1, dict2, reorder=True):
+        '''Compares two dictionaries and returns their differences.
+
+        Returns a dictionary of mismatches between the two dictionaries.
+        An empty dictionary is returned if two dictionaries are equivalent.
+        The reorder parameter indicates whether reordering is required
+        before comparison or not.
+        '''
+
+        if reorder:
+            dict1 = CompareUtils.reorder(dict1)
+            dict2 = CompareUtils.reorder(dict2)
+
+        if dict1 is None and dict2 is None:
+            return {}
+        if dict1 is None or dict2 is None:
+            return {CompareUtils.MISMATCH_VALUE1_LABEL: dict1,
+                    CompareUtils.MISMATCH_VALUE2_LABEL: dict2}
+
+        diff = {}
+        keys1 = set(dict1.keys())
+        keys2 = set(dict2.keys())
+        for key in keys1.union(keys2):
+            if key in keys1 and key not in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: dict1[key],
+                             CompareUtils.MISMATCH_VALUE2_LABEL: None}
+            elif key not in keys1 and key in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: None,
+                             CompareUtils.MISMATCH_VALUE2_LABEL: dict2[key]}
+            else:
+                val1 = dict1[key]
+                val2 = dict2[key]
+                if val1 != val2:
+                    if type(val1) is dict and type(val2) is dict:
+                        diff[key] = CompareUtils.diff_dicts(val1, val2, False)
+                    else:
+                        diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: val1,
+                                     CompareUtils.MISMATCH_VALUE2_LABEL: val2}
+        return diff
+
+
+class YamlUtils(object):
+
+    @staticmethod
+    def get_dict(yaml_file):
+        '''Returns the dictionary representation of the given YAML spec.'''
+        try:
+            return yaml.load(open(yaml_file))
+        except IOError:
+            return None
+
+    @staticmethod
+    def compare_yamls(yaml1_file, yaml2_file):
+        '''Returns true if two dictionaries are equivalent, false otherwise.'''
+        dict1 = YamlUtils.get_dict(yaml1_file)
+        dict2 = YamlUtils.get_dict(yaml2_file)
+        return CompareUtils.compare_dicts(dict1, dict2)
+
+    @staticmethod
+    def compare_yaml_dict(yaml_file, dic):
+        '''Returns true if yaml matches the dictionary, false otherwise.'''
+        return CompareUtils.compare_dicts(YamlUtils.get_dict(yaml_file), dic)
+
+
+class UrlUtils(object):
+
+    @staticmethod
+    def validate_url(path):
+        """Validates whether the given path is a URL or not.
+
+        If the given path includes a scheme (http, https, ftp, ...) and a net
+        location (a domain name such as www.github.com) it is validated as a
+        URL.
+        """
+        parsed = urlparse(path)
+        return bool(parsed.scheme) and bool(parsed.netloc)
+
+
+def str_to_num(value):
+    """Convert a string representation of a number into a numeric type."""
+    if isinstance(value, numbers.Number):
+        return value
+    try:
+        return int(value)
+    except ValueError:
+        return float(value)
+
+
+def map_name_to_python(name):
+    if name == 'type':
+        return 'type_yang'
+    return name.replace('-', '_')
+
+
+def convert_keys_to_python(d):
+    '''Change all keys from - to _'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[map_name_to_python(key)] = convert_keys_to_python(d.pop(key))
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_python(memb))
+        return arr
+    else:
+        return d
+
+def map_name_to_yang(name):
+    return name.replace('_', '-')
+
+
+def convert_keys_to_yang(d):
+    '''Change all keys from _ to -'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[map_name_to_python(key)] = convert_keys_to_yang(d.pop(key))
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_yang(memb))
+        return arr
+    else:
+        return d
+
+
+def stringify_dict(d):
+    '''Convert all integer, float, etc to str'''
+    if isinstance(d, dict):
+        for key in d.keys():
+                d[key] = stringify_dict(d[key])
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(stringify_dict(memb))
+        return arr
+    else:
+        if not isinstance(d, str):
+            return str(d)
+        return d
diff --git a/common/python/rift/mano/yang_translator/compare_desc.py b/common/python/rift/mano/yang_translator/compare_desc.py
new file mode 100644
index 0000000..39fdd85
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/compare_desc.py
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from toscaparser.utils.gettextutils import _
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['json']
+    INDENT = 2
+    DIFF_KEYS = (REMOVED_ITEMS, ADDED_ITEMS, TYPE_CHANGES, VALUES_CHANGED) = \
+                ('dic_item_removed', 'dic_item_added', 'type_changes',
+                 'values_changed')
+    DIFF_MAP = {REMOVED_ITEMS: 'Items removed',
+                ADDED_ITEMS: 'Items added',
+                TYPE_CHANGES: 'Changes in types',
+                VALUES_CHANGED: 'Changes in values'}
+    # Currently considering changes in removed keys or type changes
+    # as error.
+    ERROR_ITEMS = [REMOVED_ITEMS, TYPE_CHANGES]
+
+    def main(self, log, args):
+        self.log = log
+        print("Args: {}".format(args))
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated_file) as g:
+            gen_data = g.read()
+            json_gen = json.loads(gen_data)
+            self.log.debug(_("Generated: {0}").format(json_gen))
+
+        with open(args.expected_file) as e:
+            exp_data = e.read()
+            json_exp = json.loads(exp_data)
+            self.log.debug(_("Expected: {0}").format(json_exp))
+
+        diff = DeepDiff(json_exp, json_gen)
+        self.log.debug(_("Keys in diff: {0}").format(diff.keys()))
+        self.log.info(_("Differences:\n"))
+
+        d = pprint.pformat(diff, indent=self.INDENT)
+        self.log.info("Differences:\n{0}".format(d))
+
+        if len(set(self.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated-file",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected-file",
+        required=True,
+        help="Descriptor to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='json',
+        help="File type. Default json")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if args.debug:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.ERROR)
+    log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/conf/__init__.py b/common/python/rift/mano/yang_translator/conf/__init__.py
new file mode 100644
index 0000000..b718eee
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/conf/__init__.py
@@ -0,0 +1,39 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Initialize the global configuration for the translator '''
+
+import os
+
+from rift.mano.yang_translator.conf.config import ConfigProvider
+
+CONF_FILENAME = 'translator.conf'
+
+
+def init_global_conf():
+    '''Initialize the configuration provider.
+
+    Allows the configuration to be shared throughout the translator code.
+    The file used is translator.conf, and is within the conf/ directory. It
+    is a standard ini format, and is prcessed using the ConfigParser module.
+
+    '''
+    conf_path = os.path.dirname(os.path.abspath(__file__))
+    conf_file = os.path.join(conf_path, CONF_FILENAME)
+    ConfigProvider._load_config(conf_file)
+
+
+init_global_conf()
diff --git a/common/python/rift/mano/yang_translator/conf/config.py b/common/python/rift/mano/yang_translator/conf/config.py
new file mode 100644
index 0000000..631db40
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/conf/config.py
@@ -0,0 +1,70 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Provide a global configuration for the TOSCA translator'''
+
+import rift.mano.yang_translator.common.exception as exception
+from rift.mano.yang_translator.common.utils import _
+
+from six.moves import configparser
+
+
+class ConfigProvider(object):
+    '''Global config proxy that wraps a ConfigParser object.
+
+    Allows for class based access to config values. Should only be initialized
+    once using the corresponding translator.conf file in the conf directory.
+
+    '''
+
+    # List that captures all of the conf file sections.
+    # Append any new sections to this list.
+    _sections = ['DEFAULT']
+    _translator_config = None
+
+    @classmethod
+    def _load_config(cls, conf_file):
+        '''Private method only to be called once from the __init__ module'''
+
+        cls._translator_config = configparser.ConfigParser()
+        try:
+            cls._translator_config.read(conf_file)
+        except configparser.ParsingError:
+            msg = _('Unable to parse translator.conf file.'
+                    'Check to see that it exists in the conf directory.')
+            raise exception.ConfFileParseError(message=msg)
+
+    @classmethod
+    def get_value(cls, section, key):
+        try:
+            value = cls._translator_config.get(section, key)
+        except configparser.NoOptionError:
+            raise exception.ConfOptionNotDefined(key=key, section=section)
+        except configparser.NoSectionError:
+            raise exception.ConfSectionNotDefined(section=section)
+
+        return value
+
+    @classmethod
+    def get_all_values(cls):
+        values = []
+        for section in cls._sections:
+            try:
+                values.extend(cls._translator_config.items(section=section))
+            except configparser.NoOptionError:
+                raise exception.ConfSectionNotDefined(section=section)
+
+        return values
diff --git a/common/python/rift/mano/yang_translator/conf/translator.conf b/common/python/rift/mano/yang_translator/conf/translator.conf
new file mode 100644
index 0000000..23214f3
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/conf/translator.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+
+# Relative path location for custom types
+custom_types_location=rift/mano/yang_translator/custom/rwmano
\ No newline at end of file
diff --git a/common/python/rift/mano/yang_translator/custom/__init__.py b/common/python/rift/mano/yang_translator/custom/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/custom/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/custom/rwmano/__init__.py b/common/python/rift/mano/yang_translator/custom/rwmano/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/custom/rwmano/__init__.py
diff --git a/common/python/rift/mano/yang_translator/rwmano/__init__.py b/common/python/rift/mano/yang_translator/rwmano/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py b/common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py
new file mode 100644
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
new file mode 100644
index 0000000..f05933b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
@@ -0,0 +1,221 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from rift.mano.yang_translator.common.utils import _
+
+
+class ToscaResource(object):
+    '''Base class for YANG node type translation to RIFT.io TOSCA type.'''
+
+    # Used when creating the resource, so keeping separate
+    # from REQUIRED_FIELDS below
+    NAME = 'name'
+
+    REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID) = \
+                      ('description', 'version', 'vendor', 'id')
+
+    COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE) = \
+                    ('path', 'port', 'host', 'xpath', 'type', 'count', 'file')
+
+    IGNORE_FIELDS = ['short_name']
+
+    FIELD_TYPES = (STRING, MAP, INTEGER, BOOL) = \
+                  ('string', 'map', 'integer', 'boolean',)
+
+    YANG_KEYS = (VLD, NSD, VNFD, VDU, DASHBOARD_PARAMS,
+                 CONFIG_ATTR, CONFIG_TMPL,
+                 CONFIG_TYPE, CONFIG_DETAILS, EXT_INTF,
+                 VIRT_INTF, POLL_INTVL_SECS,
+                 MEM_VNF_INDEX_REF, VNFD_ID_REF,
+                 MEM_VNF_INDEX, VNF_CONFIG, TYPE_Y,
+                 USER_DEF_SCRIPT, SEQ, PARAM,
+                 VALUE, START_BY_DFLT,) = \
+                ('vld', 'nsd', 'vnfd', 'vdu', 'dashboard_params',
+                 'config_attributes', 'config_template',
+                 'config_type', 'config_details', 'external_interface',
+                 'virtual_interface', 'polling_interval_secs',
+                 'member_vnf_index_ref', 'vnfd_id_ref',
+                 'member_vnf_index', 'vnf_configuration', 'type_yang',
+                 'user_defined_script', 'seq', 'parameter',
+                 'value', 'start_by_default',)
+
+    TOSCA_FIELDS = (DERIVED_FROM, PROPERTIES, DEFAULT, REQUIRED,
+                    NO, CONSTRAINTS, REALTIONSHIPS,
+                    REQUIREMENTS, UNBOUND, NODE,
+                    OCCURENCES, PRIMITIVES, MEMBERS,
+                    POLL_INTVL, DEFAULT, TRUE, FALSE,) = \
+                   ('derived_from', 'properties', 'default', 'required',
+                    'no', 'constraints', 'relationships',
+                    'requirements', 'UNBOUND', 'node',
+                    'occurences', 'primitives', 'members',
+                    'polling_interval', 'default', 'true', 'false')
+
+    TOSCA_SEC = (DATA_TYPES, CAPABILITY_TYPES, NODE_TYPES,
+                 GROUP_TYPES, POLICY_TYPES, REQUIREMENTS,
+                 ARTIFACTS, PROPERTIES, INTERFACES,
+                 CAPABILITIES, RELATIONSHIP,
+                 ARTIFACT_TYPES) = \
+                ('data_types', 'capability_types', 'node_types',
+                 'group_types', 'policy_types', 'requirements',
+                 'artifacts', 'properties', 'interfaces',
+                 'capabilities', 'relationship',
+                 'artifact_types')
+
+    TOSCA_TMPL = (INPUTS, NODE_TMPL, GROUPS, POLICIES,
+                  METADATA, TOPOLOGY_TMPL, OUTPUTS) = \
+                 ('inputs', 'node_templates', 'groups', 'policies',
+                  'metadata', 'topology_template', 'outputs')
+
+    TOSCA_DERIVED = (
+        T_VNF_CONFIG,
+        T_HTTP_EP,
+        T_MGMT_INTF,
+        T_MON_PARAM,
+        T_VNF1,
+        T_VDU1,
+        T_CP1,
+        T_VL1,
+        T_CONF_PRIM,
+        T_SCALE_GRP,
+        T_ARTF_QCOW2,
+        T_INITIAL_CFG,
+    ) = \
+        ('tosca.datatypes.network.riftio.vnf_configuration',
+         'tosca.capabilities.riftio.http_endpoint_type',
+         'tosca.capabilities.riftio.mgmt_interface_type',
+         'tosca.capabilities.riftio.monitoring_param',
+         'tosca.nodes.riftio.VNF1',
+         'tosca.nodes.riftio.VDU1',
+         'tosca.nodes.riftio.CP1',
+         'tosca.nodes.riftio.VL1',
+         'tosca.groups.riftio.ConfigPrimitives',
+         'tosca.policies.riftio.ScalingGroup',
+         'tosca.artifacts.Deployment.Image.riftio.QCOW2',
+         'tosca.policies.riftio.InitialConfigPrimitive'
+        )
+
+    SUPPORT_FILES = ( SRC, DEST, EXISTING) = \
+                    ('source', 'destination', 'existing')
+
+    SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR,) = \
+                   ('images', 'scripts',)
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        self.log = log
+        self.name = name
+        self.type_ = type_
+        self.yang = yang
+        self.id_ = None
+        log.debug(_('Translating YANG node %(name)s of type %(type)s') %
+                  {'name': self.name,
+                   'type': self.type_})
+
+    # Added the below property menthods to support methods that
+    # works on both toscaparser.NodeType and translator.ToscaResource
+    @property
+    def type(self):
+        return self.type_
+
+    @type.setter
+    def type(self, value):
+        self.type_ = value
+
+    def get_type(self):
+        return self.type_
+
+    @property
+    def id(self):
+        return self.id_
+
+    @id.setter
+    def id(self, value):
+        self.id_ = value
+
+    @property
+    def description(self):
+        return _("Translated from YANG")
+
+    @property
+    def vendor(self):
+        if self._vendor is None:
+            if self.metadata and 'vendor' in self.metadata:
+                self._vendor = self.metadata['vendor']
+            else:
+                self._vendor = "RIFT.io"
+        return self._vendor
+
+    @property
+    def version(self):
+        if self._version is None:
+            if self.metadata and 'version' in self.metadata:
+                self._version = str(self.metadata['version'])
+            else:
+                self._version = '1.0'
+        return self._version
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def map_yang_name_to_tosca(self, name):
+        new_name = name.replace("_", "-")
+        return new_name
+
+    def map_keys_to_tosca(self, d):
+        if isinstance(d, dict):
+            for key in d.keys():
+                d[self.map_yang_name_to_tosca(key)] = \
+                                    self.map_keys_to_tosca(d.pop(key))
+            return d
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.map_keys_to_tosca(memb))
+            return arr
+        else:
+            return d
+
+    def handle_yang(self):
+        self.log.debug(_("Need to implement handle_yang for {0}").
+                       format(self))
+
+    def remove_ignored_fields(self, d):
+        '''Remove keys in dict not used'''
+        for key in self.IGNORE_FIELDS:
+            if key in d:
+                d.pop(key)
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("Need to implement generate_tosca_type for {0}").
+                       format(self))
+
+    def generate_tosca_model(self, tosca):
+        self.log.debug(_("Need to implement generate_tosca_model for {0}").
+                       format(self))
+
+    def get_supporting_files(self):
+        """Get list of other required files for each resource"""
+        pass
+
+    def get_matching_item(self, name, items, key=None):
+        if key is None:
+            key = 'name'
+        for entry in items:
+            if entry[key] == name:
+                return entry
+        return None
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
new file mode 100644
index 0000000..7c31df5
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
@@ -0,0 +1,135 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import OrderedDict
+
+import textwrap
+
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+import yaml
+
+
+class ToscaTemplate(object):
+    '''Container for full RIFT.io TOSCA template.'''
+
+    KEYS = (TOSCA, FILES) = ('tosca', 'files')
+
+    def __init__(self, log):
+        self.log = log
+        self.resources = []
+
+    def output_to_tosca(self):
+        self.log.debug(_('Converting translated output to tosca template.'))
+
+        templates = {}
+
+        for resource in self.resources:
+            # Each NSD should generate separate templates
+            if resource.type == 'nsd':
+                tmpl = resource.generate_tosca_type()
+                tmpl = resource.generate_tosca_template(tmpl)
+                self.log.debug(_("TOSCA template generated for {0}:\n{1}").
+                               format(resource.name, tmpl))
+                templates[resource.name] = {self.TOSCA: self.output_to_yaml(tmpl)}
+                files = resource.get_supporting_files()
+                if len(files):
+                    templates[resource.name][self.FILES] = files
+
+        return templates
+
+    def represent_ordereddict(self, dumper, data):
+        nodes = []
+        for key, value in data.items():
+            node_key = dumper.represent_data(key)
+            node_value = dumper.represent_data(value)
+            nodes.append((node_key, node_value))
+        return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', nodes)
+
+    def ordered_node(self, node):
+        order = [ToscaResource.TYPE, ToscaResource.DERIVED_FROM,
+                 ToscaResource.DESC, ToscaResource.MEMBERS,
+                 ToscaResource.PROPERTIES, ToscaResource.CAPABILITIES,
+                 ToscaResource.REQUIREMENTS,ToscaResource.ARTIFACTS,
+                 ToscaResource.INTERFACES]
+        new_node = OrderedDict()
+        for ent in order:
+            if ent in node:
+                new_node.update({ent: node.pop(ent)})
+
+        # Check if we missed any entry
+        if len(node):
+            self.log.warn(_("Did not sort these entries: {0}").
+                          format(node))
+            new_node.update(node)
+
+        return new_node
+
+    def ordered_nodes(self, nodes):
+        new_nodes = OrderedDict()
+        if isinstance(nodes, dict):
+            for name, node in nodes.items():
+                new_nodes.update({name: self.ordered_node(node)})
+            return new_nodes
+        else:
+            return nodes
+
+    def output_to_yaml(self, tosca):
+        self.log.debug(_('Converting translated output to yaml format.'))
+        dict_output = OrderedDict()
+
+        dict_output.update({'tosca_definitions_version':
+                            tosca['tosca_definitions_version']})
+        # Description
+        desc_str = ""
+        if ToscaResource.DESC in tosca:
+            # Wrap the text to a new line if the line exceeds 80 characters.
+            wrapped_txt = "\n  ". \
+                          join(textwrap.wrap(tosca[ToscaResource.DESC], 80))
+            desc_str = ToscaResource.DESC + ": >\n  " + \
+                       wrapped_txt + "\n\n"
+        dict_output.update({ToscaResource.DESC: tosca[ToscaResource.DESC]})
+
+        if ToscaResource.METADATA in tosca:
+            dict_output.update({ToscaResource.METADATA:
+                               tosca[ToscaResource.METADATA]})
+
+        # Add all types
+        types_list = [ToscaResource.DATA_TYPES, ToscaResource.CAPABILITY_TYPES,
+                      ToscaResource.NODE_TYPES,
+                      ToscaResource.GROUP_TYPES, ToscaResource.POLICY_TYPES]
+        for typ in types_list:
+            if typ in tosca:
+                dict_output.update({typ: self.ordered_nodes(tosca[typ])})
+
+        # Add topology template
+        topo_list = [ToscaResource.INPUTS, ToscaResource.NODE_TMPL,
+                     ToscaResource.GROUPS, ToscaResource.POLICIES,
+                     ToscaResource.OUTPUTS]
+        if ToscaResource.TOPOLOGY_TMPL in tosca:
+            tmpl = OrderedDict()
+            for typ in tosca[ToscaResource.TOPOLOGY_TMPL]:
+                tmpl.update({typ:
+                             self.ordered_nodes(
+                                 tosca[ToscaResource.TOPOLOGY_TMPL][typ])})
+            dict_output.update({ToscaResource.TOPOLOGY_TMPL: tmpl})
+
+        yaml.add_representer(OrderedDict, self.represent_ordereddict)
+        yaml_string = yaml.dump(dict_output, default_flow_style=False)
+        # get rid of the '' from yaml.dump around numbers
+        yaml_string = yaml_string.replace('\'', '')
+        self.log.debug(_("YAML output:\n{0}").format(yaml_string))
+        return yaml_string
diff --git a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
new file mode 100644
index 0000000..f0a6866
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
@@ -0,0 +1,192 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import importlib
+import os
+
+from rift.mano.yang_translator.common.exception import YangClassAttributeError
+from rift.mano.yang_translator.common.exception import YangClassImportError
+from rift.mano.yang_translator.common.exception import YangModImportError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.conf.config import ConfigProvider \
+    as translatorConfig
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+
+class TranslateDescriptors(object):
+    '''Translate YANG NodeTemplates to RIFT.io MANO Resources.'''
+
+    YANG_DESC = (NSD, VNFD) = ('nsd', 'vnfd')
+
+    ###########################
+    # Module utility Functions
+    # for dynamic class loading
+    ###########################
+
+    YANG_TO_TOSCA_TYPE = None
+
+    def _load_classes(log, locations, classes):
+        '''Dynamically load all the classes from the given locations.'''
+
+        for cls_path in locations:
+            # Use the absolute path of the class path
+            abs_path = os.path.dirname(os.path.abspath(__file__))
+            abs_path = abs_path.replace('rift/mano/yang_translator/rwmano',
+                                        cls_path)
+            log.debug(_("Loading classes from %s") % abs_path)
+
+            # Grab all the yang type module files in the given path
+            mod_files = [f for f in os.listdir(abs_path) if (
+                f.endswith('.py') and
+                not f.startswith('__init__') and
+                f.startswith('yang_'))]
+
+            # For each module, pick out the target translation class
+            for f in mod_files:
+                f_name, ext = f.rsplit('.', 1)
+                mod_name = cls_path + '/' + f_name
+                mod_name = mod_name.replace('/', '.')
+                try:
+                    mod = importlib.import_module(mod_name)
+                    target_name = getattr(mod, 'TARGET_CLASS_NAME')
+                    clazz = getattr(mod, target_name)
+                    classes.append(clazz)
+                except ImportError:
+                    raise YangModImportError(mod_name=mod_name)
+                except AttributeError:
+                    if target_name:
+                        raise YangClassImportError(name=target_name,
+                                                    mod_name=mod_name)
+                    else:
+                        # TARGET_CLASS_NAME is not defined in module.
+                        # Re-raise the exception
+                        raise
+
+    def _generate_type_map(log):
+        '''Generate YANG translation types map.
+
+        Load user defined classes from location path specified in conf file.
+        Base classes are located within the yang directory.
+        '''
+
+        # Base types directory
+        BASE_PATH = 'rift/mano/yang_translator/rwmano/yang'
+
+        # Custom types directory defined in conf file
+        custom_path = translatorConfig.get_value('DEFAULT',
+                                                 'custom_types_location')
+
+        # First need to load the parent module, for example 'contrib.mano',
+        # for all of the dynamically loaded classes.
+        classes = []
+        TranslateDescriptors._load_classes(log,
+                                             (BASE_PATH, custom_path),
+                                             classes)
+        try:
+            types_map = {clazz.yangtype: clazz for clazz in classes}
+            log.debug(_("Type maps loaded: {}").format(types_map.keys()))
+        except AttributeError as e:
+            raise YangClassAttributeError(message=e.message)
+
+        return types_map
+
+    def __init__(self, log, yangs, tosca_template):
+        self.log = log
+        self.yangs = yangs
+        self.tosca_template = tosca_template
+        # list of all TOSCA resources generated
+        self.tosca_resources = []
+        self.metadata = {}
+        log.debug(_('Mapping between YANG nodetemplate and TOSCA resource.'))
+
+    def translate(self):
+        if TranslateDescriptors.YANG_TO_TOSCA_TYPE is None:
+            TranslateDescriptors.YANG_TO_TOSCA_TYPE = \
+                TranslateDescriptors._generate_type_map(self.log)
+        return self._translate_yang()
+
+    def translate_metadata(self):
+        """Translate and store the metadata in instance"""
+        FIELDS_MAP = {
+            'ID': 'name',
+            'vendor': 'vendor',
+            'version': 'version',
+        }
+        metadata = {}
+        # Initialize to default values
+        metadata['name'] = 'yang_to_tosca'
+        metadata['vendor'] = 'RIFT.io'
+        metadata['version'] = '1.0'
+        if 'nsd' in self.yangs:
+            yang_meta = self.yang['nsd'][0]
+        elif 'vnfd' in self.yangs:
+            yang_meta = self.yang['vnfd'][0]
+            for key in FIELDS_MAP:
+                if key in yang_meta.keys():
+                    metadata[key] = str(yang_meta[FIELDS_MAP[key]])
+        self.log.debug(_("Metadata {0}").format(metadata))
+        self.metadata = metadata
+
+    def _translate_yang(self):
+        self.log.debug(_('Translating the descriptors.'))
+        for nsd in self.yangs[self.NSD]:
+            self.log.debug(_("Translate descriptor of type nsd: {}").
+                           format(nsd))
+            tosca_node = TranslateDescriptors. \
+                         YANG_TO_TOSCA_TYPE[self.NSD](
+                             self.log,
+                             nsd.pop(ToscaResource.NAME),
+                             self.NSD,
+                             nsd)
+            self.tosca_resources.append(tosca_node)
+
+        for vnfd in self.yangs[self.VNFD]:
+            self.log.debug(_("Translate descriptor of type vnfd: {}").
+                           format(vnfd))
+            tosca_node = TranslateDescriptors. \
+                         YANG_TO_TOSCA_TYPE[self.VNFD](
+                             self.log,
+                             vnfd.pop(ToscaResource.NAME),
+                             self.VNFD,
+                             vnfd)
+            self.tosca_resources.append(tosca_node)
+
+        # First translate VNFDs
+        for node in self.tosca_resources:
+            if node.type == self.VNFD:
+                self.log.debug(_("Handle yang for {0} of type {1}").
+                               format(node.name, node.type_))
+                node.handle_yang()
+
+        # Now translate NSDs
+        for node in self.tosca_resources:
+            if node.type == self.NSD:
+                self.log.debug(_("Handle yang for {0} of type {1}").
+                               format(node.name, node.type_))
+                node.handle_yang(self.tosca_resources)
+
+        return self.tosca_resources
+
+    def find_tosca_resource(self, name):
+        for resource in self.tosca_resources:
+            if resource.name == name:
+                return resource
+
+    def _find_yang_node(self, yang_name):
+        for node in self.nodetemplates:
+            if node.name == yang_name:
+                return node
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/__init__.py b/common/python/rift/mano/yang_translator/rwmano/yang/__init__.py
new file mode 100755
index 0000000..bc4710b
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/__init__.py
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
new file mode 100644
index 0000000..491bd86
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
@@ -0,0 +1,396 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+import os
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.yang.yang_vld import YangVld
+
+TARGET_CLASS_NAME = 'YangNsd'
+
+
+class YangNsd(ToscaResource):
+    '''Class for RIFT.io YANG NS descriptor translation to TOSCA type.'''
+
+    yangtype = 'nsd'
+
+    OTHER_FIELDS = (SCALE_GRP, CONF_PRIM,
+                    USER_DEF_SCRIPT, SCALE_ACT,
+                    TRIGGER, NS_CONF_PRIM_REF,
+                    CONST_VNFD, VNFD_MEMBERS,
+                    MIN_INST_COUNT, MAX_INST_COUNT,
+                    INPUT_PARAM_XPATH, CONFIG_ACTIONS,
+                    INITIAL_CFG,) = \
+                   ('scaling_group_descriptor', 'service_primitive',
+                    'user_defined_script', 'scaling_config_action',
+                    'trigger', 'ns_config_primitive_name_ref',
+                    'constituent_vnfd', 'vnfd_member',
+                    'min_instance_count', 'max_instance_count',
+                    'input_parameter_xpath', 'config_actions',
+                    'initial_config_primitive',)
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangNsd, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.props = {}
+        self.inputs = []
+        self.vnfds = {}
+        self.vlds = []
+        self.conf_prims = []
+        self.scale_grps = []
+        self.initial_cfg = []
+
+    def handle_yang(self, vnfds):
+        self.log.debug(_("Process NSD desc {0}: {1}").
+                       format(self.name, self.yang))
+
+        def process_input_param(param):
+            if self.XPATH in param:
+                val = param.pop(self.XPATH)
+                # Strip namesapce, catalog and nsd part
+                self.inputs.append({
+                    self.NAME:
+                    self.map_yang_name_to_tosca(
+                        val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+            if len(param):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "input param {1}: {2}").
+                              format(self, self.inputs, param))
+            self.log.debug(_("{0}, inputs: {1}").format(self, self.inputs[-1]))
+
+        def process_const_vnfd(cvnfd):
+            # Get the matching VNFD
+            vnfd_id = cvnfd.pop(self.VNFD_ID_REF)
+            for vnfd in vnfds:
+                if vnfd.type == self.VNFD and vnfd.id == vnfd_id:
+                    self.vnfds[cvnfd.pop(self.MEM_VNF_INDEX)] = vnfd
+                    if self.START_BY_DFLT in cvnfd:
+                        vnfd.props[self.START_BY_DFLT] = \
+                                            cvnfd.pop(self.START_BY_DFLT)
+                    break
+
+            if len(cvnfd):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "constituent vnfd {1}: {2}").
+                              format(self, vnfd_id, cvnfd))
+            self.log.debug(_("{0}, VNFD: {1}").format(self, self.vnfds))
+
+        def process_scale_grp(dic):
+            sg = {}
+            self.log.debug(_("{0}, scale group: {1}").format(self, dic))
+            fields = [self.NAME, self.MIN_INST_COUNT, self.MAX_INST_COUNT]
+            for key in fields:
+                if key in dic:
+                    sg[key] = dic.pop(key)
+
+            membs = {}
+            for vnfd_memb in dic.pop(self.VNFD_MEMBERS):
+                vnfd_idx = vnfd_memb[self.MEM_VNF_INDEX_REF]
+                if vnfd_idx in self.vnfds:
+                        membs[self.vnfds[vnfd_idx].name] = \
+                                                    vnfd_memb[self.COUNT]
+            sg['vnfd_members'] = membs
+
+            trigs = {}
+            if self.SCALE_ACT in dic:
+                for sg_act in dic.pop(self.SCALE_ACT):
+                    # Validate the primitive
+                    prim = sg_act.pop(self.NS_CONF_PRIM_REF)
+                    for cprim in self.conf_prims:
+                        if cprim[self.NAME] == prim:
+                            trigs[sg_act.pop(self.TRIGGER)] = prim
+                            break
+                    if len(sg_act):
+                        err_msg = (_("{0}, Did not find config-primitive {1}").
+                                   format(self, prim))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+            sg[self.CONFIG_ACTIONS] = trigs
+
+            if len(dic):
+                self.log.warn(_("{0}, Did not process all fields for {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, Scale group {1}").format(self, sg))
+            self.scale_grps.append(sg)
+
+        def process_initial_config(dic):
+            icp = {}
+            self.log.debug(_("{0}, initial config: {1}").format(self, dic))
+            for key in [self.NAME, self.SEQ, self.USER_DEF_SCRIPT]:
+                if key in dic:
+                    icp[key] = dic.pop(key)
+
+            params = {}
+            if self.PARAM in dic:
+                for p in dic.pop(self.PARAM):
+                    if (self.NAME in p and
+                        self.VALUE in p):
+                        params[p[self.NAME]] = p[self.VALUE]
+                    else:
+                        # TODO (pjoseph): Need to add support to read the
+                        # config file and get the value from that
+                        self.log.warn(_("{0}, Got parameter without value: {1}").
+                                      format(self, p))
+                if len(params):
+                    icp[self.PARAM] = params
+
+            if len(dic):
+                self.log.warn(_("{0}, Did not process all fields for {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, Initial config {1}").format(self, icp))
+            self.initial_cfg.append(icp)
+
+        dic = deepcopy(self.yang)
+        try:
+            for key in self.REQUIRED_FIELDS:
+                self.props[key] = dic.pop(key)
+
+            self.id = self.props[self.ID]
+
+            # Process constituent VNFDs
+            if self.CONST_VNFD in dic:
+                for cvnfd in dic.pop(self.CONST_VNFD):
+                    process_const_vnfd(cvnfd)
+
+            # Process VLDs
+            if self.VLD in dic:
+                for vld_dic in dic.pop(self.VLD):
+                    vld = YangVld(self.log, vld_dic.pop(self.NAME),
+                                  self.VLD, vld_dic)
+                    vld.process_vld(self.vnfds)
+                    self.vlds.append(vld)
+
+            # Process config primitives
+            if self.CONF_PRIM in dic:
+                for cprim in dic.pop(self.CONF_PRIM):
+                    conf_prim = {self.NAME: cprim.pop(self.NAME)}
+                    if self.USER_DEF_SCRIPT in cprim:
+                        conf_prim[self.USER_DEF_SCRIPT] = \
+                                        cprim.pop(self.USER_DEF_SCRIPT)
+                        self.conf_prims.append(conf_prim)
+                    else:
+                        err_msg = (_("{0}, Only user defined script supported "
+                                     "in config-primitive for now {}: {}").
+                                   format(self, conf_prim, cprim))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+            # Process scaling group
+            if self.SCALE_GRP in dic:
+                for sg_dic in dic.pop(self.SCALE_GRP):
+                    process_scale_grp(sg_dic)
+
+            # Process initial config primitives
+            if self.INITIAL_CFG in dic:
+                for icp_dic in dic.pop(self.INITIAL_CFG):
+                    process_initial_config(icp_dic)
+
+            # Process the input params
+            if self.INPUT_PARAM_XPATH in dic:
+                for param in dic.pop(self.INPUT_PARAM_XPATH):
+                    process_input_param(param)
+
+            self.remove_ignored_fields(dic)
+            if len(dic):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "NSD {1}: {2}").
+                              format(self, self.props, dic))
+            self.log.debug(_("{0}, NSD: {1}").format(self, self.props))
+        except Exception as e:
+            err_msg = _("Exception processing NSD {0} : {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            self.log.exception(e)
+            raise ValidationError(message=err_msg)
+
+    def generate_tosca_type(self):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self))
+
+        tosca = {}
+        tosca[self.DATA_TYPES] = {}
+        tosca[self.NODE_TYPES] = {}
+
+        for idx, vnfd in self.vnfds.items():
+            tosca = vnfd.generate_tosca_type(tosca)
+
+        for vld in self.vlds:
+            tosca = vld.generate_tosca_type(tosca)
+
+        # Generate type for config primitives
+        if self.GROUP_TYPES not in tosca:
+            tosca[self.GROUP_TYPES] = {}
+        if self.T_CONF_PRIM not in tosca[self.GROUP_TYPES]:
+            tosca[self.GROUP_TYPES][self.T_CONF_PRIM] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    'primitive': self.MAP
+                }}
+
+        # Generate type for scaling group
+        if self.POLICY_TYPES not in tosca:
+            tosca[self.POLICY_TYPES] = {}
+        if self.T_SCALE_GRP not in tosca[self.POLICY_TYPES]:
+            tosca[self.POLICY_TYPES][self.T_SCALE_GRP] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING},
+                    self.MAX_INST_COUNT:
+                    {self.TYPE: self.INTEGER},
+                    self.MIN_INST_COUNT:
+                    {self.TYPE: self.INTEGER},
+                    'vnfd_members':
+                    {self.TYPE: self.MAP},
+                    self.CONFIG_ACTIONS:
+                    {self.TYPE: self.MAP}
+                }}
+
+        if self.T_INITIAL_CFG not in tosca[self.POLICY_TYPES]:
+            tosca[self.POLICY_TYPES][self.T_INITIAL_CFG] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING},
+                    self.SEQ:
+                    {self.TYPE: self.INTEGER},
+                    self.USER_DEF_SCRIPT:
+                    {self.TYPE: self.STRING},
+                    self.PARAM:
+                    {self.TYPE: self.MAP},
+                }}
+
+        return tosca
+
+    def generate_tosca_template(self, tosca):
+        self.log.debug(_("{0}, Generate tosca template").
+                       format(self, tosca))
+
+        # Add the standard entries
+        tosca['tosca_definitions_version'] = \
+                                    'tosca_simple_profile_for_nfv_1_0_0'
+        tosca[self.DESC] = self.props[self.DESC]
+        tosca[self.METADATA] = {
+            'ID': self.name,
+            self.VENDOR: self.props[self.VENDOR],
+            self.VERSION: self.props[self.VERSION],
+        }
+
+        tosca[self.TOPOLOGY_TMPL] = {}
+
+        # Add input params
+        if len(self.inputs):
+            if self.INPUTS not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.INPUTS] = {}
+            for inp in self.inputs:
+                entry = {inp[self.NAME]: {self.TYPE: self.STRING,
+                                          self.DESC:
+                                          'Translated from YANG'}}
+                tosca[self.TOPOLOGY_TMPL][self.INPUTS] = entry
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
+
+        # Add the VNFDs and VLDs
+        for idx, vnfd in self.vnfds.items():
+            vnfd.generate_vnf_template(tosca, idx)
+
+        for vld in self.vlds:
+            vld.generate_tosca_template(tosca)
+
+        # add the config primitives
+        if len(self.conf_prims):
+            if self.GROUPS not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.GROUPS] = {}
+
+            conf_prims = {
+                self.TYPE: self.T_CONF_PRIM
+            }
+            conf_prims[self.MEMBERS] = [vnfd.name for vnfd in
+                                        self.vnfds.values()]
+            prims = {}
+            for confp in self.conf_prims:
+                prims[confp[self.NAME]] = {
+                    self.USER_DEF_SCRIPT: confp[self.USER_DEF_SCRIPT]
+                }
+            conf_prims[self.PROPERTIES] = {
+                self.PRIMITIVES: prims
+            }
+
+            tosca[self.TOPOLOGY_TMPL][self.GROUPS][self.CONF_PRIM] = conf_prims
+
+
+        # Add the scale group
+        if len(self.scale_grps):
+            if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+            for sg in self.scale_grps:
+                sgt = {
+                    self.TYPE: self.T_SCALE_GRP,
+                }
+                sgt.update(sg)
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+                    self.SCALE_GRP: sgt
+                })
+
+        # Add initial configs
+        if len(self.initial_cfg):
+            if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+            for icp in self.initial_cfg:
+                icpt = {
+                    self.TYPE: self.T_INITIAL_CFG,
+                }
+                icpt.update(icp)
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+                    self.INITIAL_CFG: icpt
+                })
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        for vnfd in self.vnfds.values():
+            f = vnfd.get_supporting_files()
+            if f and len(f):
+                files.extend(f)
+
+        # Get the config files for initial config
+        for icp in self.initial_cfg:
+            if self.USER_DEF_SCRIPT in icp:
+                script = os.path.basename(icp[self.USER_DEF_SCRIPT])
+                files.append({
+                    self.TYPE: 'script',
+                    self.NAME: script,
+                    self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
+                })
+
+        # TODO (pjoseph): Add support for config scripts,
+        # charms, etc
+
+        self.log.debug(_("{0}, supporting files: {1}").format(self, files))
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
new file mode 100644
index 0000000..7d095c1
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
@@ -0,0 +1,302 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import shutil
+import tempfile
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+import rift.package.image
+
+TARGET_CLASS_NAME = 'YangVdu'
+
+
+class YangVdu(ToscaResource):
+    '''Class for RIFT.io YANG VDU descriptor translation to TOSCA type.'''
+
+    yangtype = 'vdu'
+
+    OTHER_KEYS = (VM_FLAVOR, CLOUD_INIT, IMAGE, IMAGE_CHKSUM,
+                  VNFD_CP_REF, CP_TYPE, CLOUD_INIT_FILE,) = \
+                 ('vm_flavor', 'cloud_init', 'image', 'image_checksum',
+                  'vnfd_connection_point_ref', 'cp_type', 'cloud_init_file',)
+
+    TOSCA_MISC_KEYS = (VIRT_LINK, VIRT_BIND, VDU_INTF_NAME,
+                       VDU_INTF_TYPE) = \
+                      ('virtualLink', 'virtualBinding', 'vdu_intf_name',
+                       'vdu_intf_type')
+
+    VM_FLAVOR_MAP = {
+        'vcpu_count': 'num_cpus',
+        'memory_mb': 'mem_size',
+        'storage_gb': 'disk_size',
+    }
+
+    VM_SIZE_UNITS_MAP = {
+        'vcpu_count': '',
+        'memory_mb': ' MB',
+        'storage_gb': ' GB',
+    }
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVdu, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.yang = yang
+        self.props = {}
+        self.ext_cp = []
+        self.int_cp = []
+        self.image = None
+        self.cloud_init_file = None
+
+    def process_vdu(self):
+        self.log.debug(_("Process VDU desc {0}: {1}").format(self.name,
+                                                             self.yang))
+
+        vdu_dic = deepcopy(self.yang)
+        vdu = {}
+
+        fields = [self.ID, self.COUNT, self.CLOUD_INIT,
+                  self.IMAGE, self.IMAGE_CHKSUM, self.CLOUD_INIT_FILE,]
+        for key in fields:
+            if key in vdu_dic:
+                vdu[key] = vdu_dic.pop(key)
+
+        self.id = vdu[self.ID]
+
+        if self.VM_FLAVOR in vdu_dic:
+            vdu[self.HOST] = {}
+            for key, value in vdu_dic.pop(self.VM_FLAVOR).items():
+                vdu[self.HOST][self.VM_FLAVOR_MAP[key]] = "{}{}". \
+                            format(value, self.VM_SIZE_UNITS_MAP[key])
+
+        if self.EXT_INTF in vdu_dic:
+            for ext_intf in vdu_dic.pop(self.EXT_INTF):
+                cp = {}
+                cp[self.NAME] = ext_intf.pop(self.VNFD_CP_REF)
+                cp[self.VDU_INTF_NAME] = ext_intf.pop(self.NAME)
+                cp[self.VDU_INTF_TYPE] = ext_intf[self.VIRT_INTF][self.TYPE_Y]
+                self.log.debug(_("{0}, External interface {1}: {2}").
+                               format(self, cp, ext_intf))
+                self.ext_cp.append(cp)
+
+        self.remove_ignored_fields(vdu_dic)
+        if len(vdu_dic):
+            self.log.warn(_("{0}, Did not process the following in "
+                            "VDU: {1}").
+                          format(self, vdu_dic))
+
+        self.log.debug(_("{0} VDU: {1}").format(self, vdu))
+        self.props = vdu
+
+    def get_cp(self, name):
+        for cp in self.ext_cp:
+            if cp[self.NAME] == name:
+                return cp
+        return None
+
+    def has_cp(self, name):
+        if self.get_cp(name):
+            return True
+        return False
+
+    def set_cp_type(self, name, cp_type):
+        for idx, cp in enumerate(self.ext_cp):
+            if cp[self.NAME] == name:
+                cp[self.CP_TYPE] = cp_type
+                self.ext_cp[idx] = cp
+                self.log.debug(_("{0}, Updated CP: {1}").
+                               format(self, self.ext_cp[idx]))
+                return
+
+        err_msg = (_("{0}, Did not find connection point {1}").
+                   format(self, name))
+        self.log.error(err_msg)
+        raise ValidationError(message=err_msg)
+
+    def set_vld(self, name, vld_name):
+        cp = self.get_cp(name)
+        if cp:
+            cp[self.VLD] = vld_name
+        else:
+            err_msg = (_("{0}, Did not find connection point {1}").
+                       format(self, name))
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+    def get_name(self, vnf_name):
+        # Create a unique name incase multiple VNFs use same
+        # name for the vdu
+        return "{}_{}".format(vnf_name, self.name)
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        # Add custom artifact type
+        if self.ARTIFACT_TYPES not in tosca:
+            tosca[self.ARTIFACT_TYPES] = {}
+        if self.T_ARTF_QCOW2 not in tosca[self.ARTIFACT_TYPES]:
+            tosca[self.ARTIFACT_TYPES][self.T_ARTF_QCOW2] = {
+                self.DERIVED_FROM: 'tosca.artifacts.Deployment.Image.VM.QCOW2',
+                self.IMAGE_CHKSUM:
+                {self.TYPE: self.STRING,
+                 self.REQUIRED: self.NO},
+            }
+
+        if self.T_VDU1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VDU1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VDU',
+                self.PROPERTIES: {
+                    self.COUNT:
+                    {self.TYPE: self.INTEGER,
+                     self.DEFAULT: 1},
+                    self.CLOUD_INIT:
+                    {self.TYPE: self.STRING,
+                     self.REQUIRED: self.NO,},
+                    self.CLOUD_INIT_FILE:
+                    {self.TYPE: self.STRING,
+                     self.REQUIRED: self.NO,},
+                },
+                self.CAPABILITIES: {
+                    self.VIRT_LINK: {
+                        self.TYPE: 'tosca.capabilities.nfv.VirtualLinkable'
+                    },
+                },
+            }
+
+        # Add CP type
+        if self.T_CP1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_CP1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.CP',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Name of the connection point'},
+                    self.CP_TYPE:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Type of the connection point'},
+                    self.VDU_INTF_NAME:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Name of the interface on VDU'},
+                    self.VDU_INTF_TYPE:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Type of the interface on VDU'},
+                },
+             }
+
+        return tosca
+
+    def generate_vdu_template(self, tosca, vnf_name):
+        self.log.debug(_("{0} Generate tosca template for {2}").
+                       format(self, tosca, vnf_name))
+
+        name = self.get_name(vnf_name)
+
+        node = {}
+        node[self.TYPE] = self.T_VDU1
+
+        if self.HOST in self.props:
+            node[self.CAPABILITIES] = {
+                self.HOST: {self.PROPERTIES: self.props.pop(self.HOST)}
+            }
+        else:
+            self.log.warn(_("{0}, Does not have host requirements defined").
+                          format(self))
+
+        if self.IMAGE in self.props:
+            img_name = "{}_{}_vm_image".format(vnf_name, self.name)
+            image = "../{}/{}".format(self.IMAGE_DIR, self.props.pop(self.IMAGE))
+            self.image = image
+            node[self.ARTIFACTS] = {img_name: {
+                self.FILE: image,
+                self.TYPE: self.T_ARTF_QCOW2,
+            }}
+            if self.IMAGE_CHKSUM in self.props:
+                node[self.ARTIFACTS][img_name][self.IMAGE_CHKSUM] = \
+                                            self.props.pop(self.IMAGE_CHKSUM)
+            node[self.INTERFACES] = {'Standard': {
+                'create': img_name
+            }}
+
+        # Add cloud init script if available
+        if self.CLOUD_INIT_FILE in self.props:
+            self.cloud_init_file = self.props[self.CLOUD_INIT_FILE]
+
+        # Remove
+        self.props.pop(self.ID)
+        node[self.PROPERTIES] = self.props
+
+        self.log.debug(_("{0}, VDU node: {1}").format(self, node))
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][name] = node
+
+        # Generate the connection point templates
+        for cp in self.ext_cp:
+            cpt = {self.TYPE: self.T_CP1}
+
+            cpt[self.REQUIREMENTS] = []
+            cpt[self.REQUIREMENTS].append({self.VIRT_BIND: {
+                self.NODE: self.get_name(vnf_name)
+            }})
+            if self.VLD in cp:
+                vld = cp.pop(self.VLD)
+                cpt[self.REQUIREMENTS].append({self.VIRT_LINK: {
+                    self.NODE: vld
+                }})
+
+            cpt[self.PROPERTIES] = cp
+            cp_name = cp[self.NAME].replace('/', '_')
+
+            self.log.debug(_("{0}, CP node {1}: {2}").
+                           format(self, cp_name, cpt))
+            tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][cp_name] = cpt
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        if self.image is not None:
+            image_name = os.path.basename(self.image)
+
+            files.append({
+                self.TYPE: 'image',
+                self.NAME: image_name,
+                self.DEST: "{}/{}".format(self.IMAGE_DIR, image_name),
+            })
+
+        if self.cloud_init_file is not None:
+            files.append({
+                self.TYPE: 'cloud_init',
+                self.NAME: self.cloud_init_file,
+                self.DEST: "{}/{}".format(self.CLOUD_INIT, self.cloud_init_file)
+            })
+
+        self.log.debug(_("Supporting files for {} : {}").format(self, files))
+        if not len(files):
+            shutil.rmtree(out_dir)
+
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py
new file mode 100644
index 0000000..eb47daf
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py
@@ -0,0 +1,127 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+TARGET_CLASS_NAME = 'YangVld'
+
+
+class YangVld(ToscaResource):
+    '''Class for RIFT.io YANG VLD descriptor translation to TOSCA type.'''
+
+    yangtype = 'vld'
+
+    OTHER_KEYS = (VNFD_CP_REF) = \
+                 ('vnfd_connection_point_ref')
+
+    VLD_TYPE_MAP = {
+        'ELAN': ToscaResource.T_VL1,
+    }
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVld, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.yang = yang
+        self.props = {}
+
+    def process_vld(self, vnfds):
+        self.log.debug(_("Process VLD desc {0}").format(self.name))
+
+        dic = deepcopy(self.yang)
+
+        for key in self.REQUIRED_FIELDS:
+            self.props[key] = dic.pop(key)
+
+        self.id = self.props[self.ID]
+
+        if self.TYPE_Y in dic:
+            self.props[self.TYPE] = dic.pop(self.TYPE_Y)
+            if self.props[self.TYPE] not in self.VLD_TYPE_MAP:
+                err_msg = (_("{0}: VLD type {1} not supported").
+                           format(self, self.props[self.TYPE]))
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        if self.VNFD_CP_REF in dic:
+            for cp_ref in dic.pop(self.VNFD_CP_REF):
+                vnfd_idx = cp_ref.pop(self.MEM_VNF_INDEX_REF)
+                vnfd_id = cp_ref.pop(self.VNFD_ID_REF)
+                vnfd_cp = cp_ref.pop(self.VNFD_CP_REF)
+                if vnfd_idx in vnfds:
+                    vnfd = vnfds[vnfd_idx]
+                    if vnfd.id == vnfd_id:
+                        # Update the CP as linked to this VLD
+                        vnfd.update_cp_vld(vnfd_cp, self.name)
+                    else:
+                        err_msg = (_("{0}, The VNFD memebr index {1} and id "
+                                     "{2} did not match the VNFD {3} with "
+                                     "id {4}").format(self, vnfd_idx, vnfd_id,
+                                                      vnfd.name, vnfd.id))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+                else:
+                    err_msg = (_("{0}, Did not find VNFD memer index {1}").
+                               format(self, vnfd_idx))
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+
+        self.remove_ignored_fields(dic)
+        if len(dic):
+            self.log.warn(_("{0}, Did not process the following for "
+                            "VLD {1}: {2}").
+                          format(self, self.props, dic))
+        self.log.debug(_("{0}, VLD: {1}").format(self, self.props))
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        if self.T_VL1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VL1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VL.ELAN',
+                self.PROPERTIES: {
+                    'description':
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        return tosca
+
+    def generate_tosca_template(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        node = {}
+        node[self.TYPE] = self.VLD_TYPE_MAP[self.props.pop(self.TYPE)]
+
+        # Remove
+        self.props.pop(self.ID)
+        self.props.pop(self.VERSION)
+        node[self.PROPERTIES] = self.props
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+
+        return tosca
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
new file mode 100644
index 0000000..7449c5a
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
@@ -0,0 +1,393 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.yang.yang_vdu import YangVdu
+
+TARGET_CLASS_NAME = 'YangVnfd'
+
+
+class YangVnfd(ToscaResource):
+    '''Class for RIFT.io YANG VNF descriptor translation to TOSCA type.'''
+
+    yangtype = 'vnfd'
+
+    CONFIG_TYPES = ['script', 'netconf', 'rest', 'juju']
+
+    OTHER_KEYS = (MGMT_INTF, HTTP_EP, MON_PARAM) = \
+                 ('mgmt_interface', 'http_endpoint', 'monitoring_param')
+
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVnfd, self).__init__(log,
+                                       name,
+                                       type_,
+                                       yang)
+        self.props = {}
+        self.vdus = []
+        self.mgmt_intf = {}
+        self.mon_param = []
+        self.http_ep = []
+
+    def handle_yang(self):
+        self.log.debug(_("Process VNFD desc {0}: {1}").format(self.name,
+                                                              self.yang))
+
+        def process_vnf_config(conf):
+            vnf_conf = {}
+            if self.CONFIG_ATTR in conf:
+                for key, value in conf.pop(self.CONFIG_ATTR).items():
+                    vnf_conf[key] = value
+
+            if self.CONFIG_TMPL in conf:
+                vnf_conf[self.CONFIG_TMPL] = conf.pop(self.CONFIG_TMPL)
+
+            def copy_config_details(conf_type, conf_details):
+                vnf_conf[self.CONFIG_TYPE] = conf_type
+                vnf_conf[self.CONFIG_DETAILS] = conf_details
+
+            for key in self.CONFIG_TYPES:
+                if key in conf:
+                    copy_config_details(key, conf.pop(key))
+                    break
+
+            if len(conf):
+                self.log.warn(_("{0}, Did not process all in VNF "
+                                "configuration {1}").
+                              format(self, conf))
+            self.log.debug(_("{0}, vnf config: {1}").format(self, vnf_conf))
+            self.props[self.VNF_CONFIG] = vnf_conf
+
+        def process_mgmt_intf(intf):
+            if len(self.mgmt_intf) > 0:
+                err_msg(_("{0}, Already processed another mgmt intf {1}, "
+                          "got another {2}").
+                        format(self, self.msmg_intf, intf))
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+            self.mgmt_intf['protocol'] = 'tcp'
+
+            if self.PORT in intf:
+                self.mgmt_intf[self.PORT] = intf.pop(self.PORT)
+                self.props[self.PORT] = self.mgmt_intf[self.PORT]
+
+            if 'vdu_id' in intf:
+                for vdu in self.vdus:
+                    if intf['vdu_id'] == vdu.id:
+                        self.mgmt_intf[self.VDU] = vdu.get_name(self.name)
+                        intf.pop('vdu_id')
+                        break
+
+            if self.DASHBOARD_PARAMS in intf:
+                self.mgmt_intf[self.DASHBOARD_PARAMS] = \
+                                            intf.pop(self.DASHBOARD_PARAMS)
+
+            if len(intf):
+                self.log.warn(_("{0}, Did not process all in mgmt "
+                                "interface {1}").
+                              format(self, intf))
+            self.log.debug(_("{0}, Management interface: {1}").
+                           format(self, self.mgmt_intf))
+
+        def process_http_ep(eps):
+            self.log.debug("{}, HTTP EP: {}".format(self, eps))
+            for ep in eps:
+                http_ep = {'protocol': 'http'}  # Required for TOSCA
+                http_ep[self.PATH] = ep.pop(self.PATH)
+                http_ep[self.PORT] = ep.pop(self.PORT)
+                http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS)
+                if len(ep):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "http ep {1}").format(self, ep))
+                    self.log.debug(_("{0}, http endpoint: {1}").format(self, http_ep))
+                self.http_ep.append(http_ep)
+
+        def process_mon_param(params):
+            for param in params:
+                monp = {}
+                fields = [self.NAME, self.ID, 'value_type', 'units', 'group_tag',
+                          'json_query_method', 'http_endpoint_ref', 'widget_type',
+                          self.DESC]
+                for key in fields:
+                    if key in param:
+                        monp[key] = param.pop(key)
+
+                if len(param):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "monitporing-param {1}").
+                                  format(self, param))
+                    self.log.debug(_("{0}, Monitoring param: {1}").format(self, monp))
+                self.mon_param.append(monp)
+
+        def process_cp(cps):
+            for cp_dic in cps:
+                self.log.debug("{}, CP: {}".format(self, cp_dic))
+                name = cp_dic.pop(self.NAME)
+                for vdu in self.vdus:
+                    if vdu.has_cp(name):
+                        vdu.set_cp_type(name, cp_dic.pop(self.TYPE_Y))
+                        break
+                if len(cp_dic):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "connection-point {1}: {2}").
+                                  format(self, name, cp_dic))
+
+        ENDPOINTS_MAP = {
+            self.MGMT_INTF: process_mgmt_intf,
+            self.HTTP_EP:  process_http_ep,
+            self.MON_PARAM: process_mon_param,
+            'connection_point': process_cp
+        }
+
+        dic = deepcopy(self.yang)
+        try:
+            for key in self.REQUIRED_FIELDS:
+                self.props[key] = dic.pop(key)
+
+            self.id = self.props[self.ID]
+
+            # Process VDUs before CPs so as to update the CP struct in VDU
+            # when we process CP later
+            if self.VDU in dic:
+                for vdu_dic in dic.pop(self.VDU):
+                    vdu = YangVdu(self.log, vdu_dic.pop(self.NAME),
+                                  self.VDU, vdu_dic)
+                    vdu.process_vdu()
+                    self.vdus.append(vdu)
+
+            for key in ENDPOINTS_MAP.keys():
+                if key in dic:
+                    ENDPOINTS_MAP[key](dic.pop(key))
+
+            if self.VNF_CONFIG in dic:
+                process_vnf_config(dic.pop(self.VNF_CONFIG))
+
+            self.remove_ignored_fields(dic)
+            if len(dic):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "VNFD: {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, VNFD: {1}").format(self, self.props))
+        except Exception as e:
+            err_msg = _("Exception processing VNFD {0} : {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+    def update_cp_vld(self, cp_name, vld_name):
+        for vdu in self.vdus:
+            cp = vdu.get_cp(cp_name)
+            if cp:
+                vdu.set_vld(cp_name, vld_name)
+                break
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self))
+
+        for vdu in self.vdus:
+            tosca = vdu.generate_tosca_type(tosca)
+
+        # Add data_types
+        if self.T_VNF_CONFIG not in tosca[self.DATA_TYPES]:
+            tosca[self.DATA_TYPES][self.T_VNF_CONFIG] = {
+                self.PROPERTIES:
+                {self.CONFIG_TYPE:
+                 {self.TYPE: self.STRING},
+                 'config_delay':
+                 {self.TYPE: self.INTEGER,
+                  self.DEFAULT: 0,
+                  self.REQUIRED: self.NO,
+                  self.CONSTRAINTS:
+                  [{'greater_or_equal': 0}]},
+                 'config_priority':
+                 {self.TYPE: self.INTEGER,
+                  self.CONSTRAINTS:
+                  [{'greater_than': 0}]},
+                 self.CONFIG_DETAILS:
+                 {self.TYPE: self.MAP},
+                 self.CONFIG_TMPL:
+                 {self.TYPE: self.STRING,
+                  self.REQUIRED: self.NO},
+                }
+            }
+
+        # Add capability types
+        if self.CAPABILITY_TYPES not in tosca:
+            tosca[self.CAPABILITY_TYPES] = {}
+        if self.T_HTTP_EP not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_HTTP_EP] = {
+                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
+                self.PROPERTIES: {
+                    'polling_interval':
+                    {self.TYPE: self.INTEGER},
+                    'path':
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        if self.T_MGMT_INTF not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_MGMT_INTF] = {
+                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
+                self.PROPERTIES: {
+                    self.DASHBOARD_PARAMS:
+                    {self.TYPE: self.MAP},
+                    self.VDU:
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        if self.T_MON_PARAM not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_MON_PARAM] = {
+                self.DERIVED_FROM: 'tosca.capabilities.nfv.Metric',
+                self.PROPERTIES: {
+                    'id':
+                    {self.TYPE: self.INTEGER},
+                    'name':
+                    {self.TYPE: self.STRING},
+                    'value_type':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'INT'},
+                    'group_tag':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'Group1'},
+                    'units':
+                    {self.TYPE: self.STRING},
+                    'description':
+                    {self.TYPE: self.STRING},
+                    'json_query_method':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'NAMEKEY'},
+                    'http_endpoint_ref':
+                    {self.TYPE: self.STRING},
+                    'widget_type':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'COUNTER'},
+                }
+            }
+
+        # Define the VNF type
+        if self.T_VNF1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VNF1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VNF',
+                self.PROPERTIES: {
+                    'vnf_configuration':
+                    {self.TYPE: self.T_VNF_CONFIG},
+                    'port':
+                    {self.TYPE: self.INTEGER,
+                     self.CONSTRAINTS:
+                     [{'in_range': '[1, 65535]'}]},
+                    self.START_BY_DFLT:
+                    {self.TYPE: self.BOOL,
+                     self.DEFAULT: self.TRUE},
+                },
+                self.CAPABILITIES: {
+                    'mgmt_interface':
+                    {self.TYPE: self.T_MGMT_INTF},
+                    'http_endpoint':
+                    {self.TYPE: self.T_HTTP_EP},
+                    'monitoring_param_0':
+                    {self.TYPE: self.T_MON_PARAM},
+                    'monitoring_param_1':
+                    {self.TYPE: self.T_MON_PARAM},
+                },
+                self.REQUIREMENTS: [
+                    {'vdus':
+                     {self.TYPE: 'tosca.capabilities.nfv.VirtualLinkable',
+                      self.RELATIONSHIP:
+                      'tosca.relationships.nfv.VirtualLinksTo',
+                      self.NODE: self.T_VDU1,
+                      self.OCCURENCES: '[1, UNBOUND]'}}
+                ],
+            }
+
+        return tosca
+
+    def generate_vnf_template(self, tosca, index):
+        self.log.debug(_("{0}, Generate tosca template for VNF {1}").
+                       format(self, index, tosca))
+
+        for vdu in self.vdus:
+            tosca = vdu.generate_vdu_template(tosca, self.name)
+
+        node = {}
+        node[self.TYPE] = self.T_VNF1
+
+        # Remove fields not required in TOSCA
+        self.props.pop(self.DESC)
+
+        # Update index to the member-vnf-index
+        self.props[self.ID] = index
+        node[self.PROPERTIES] = self.props
+
+        caps = {}
+        if len(self.mgmt_intf):
+            caps[self.MGMT_INTF] = {
+                self.PROPERTIES: self.mgmt_intf
+            }
+
+        if len(self.http_ep):
+            caps[self.HTTP_EP] = {
+                self.PROPERTIES: self.http_ep[0]
+            }
+            if len(self.http_ep) > 1:
+                self.log.warn(_("{0}: Currently only one HTTP endpoint "
+                                "supported: {1}").
+                              format(self, self.http_ep))
+
+        if len(self.mon_param):
+            count = 0
+            for monp in self.mon_param:
+                name = "{}_{}".format(self.MON_PARAM, count)
+                caps[name] = {self.PROPERTIES: monp}
+                count += 1
+
+        node[self.CAPABILITIES] = caps
+
+        if len(self.vdus):
+            reqs = []
+            for vdu in self.vdus:
+                reqs.append({'vdus': {self.NODE: vdu.get_name(self.name)}})
+
+            node[self.REQUIREMENTS] = reqs
+        else:
+            self.log.warn(_("{0}, Did not find any VDUS with this VNF").
+                          format(self))
+
+        self.log.debug(_("{0}, VNF node: {1}").format(self, node))
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        for vdu in self.vdus:
+            f = vdu.get_supporting_files()
+            if f and len(f):
+                files.extend(f)
+
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
new file mode 100644
index 0000000..907a4a0
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
@@ -0,0 +1,220 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import shutil
+import subprocess
+import tarfile
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.syntax.tosca_template \
+    import ToscaTemplate
+from rift.mano.yang_translator.rwmano.translate_descriptors \
+    import TranslateDescriptors
+
+import rift.package.image
+from rift.package.package import TarPackageArchive
+import rift.package.cloud_init
+import rift.package.script
+import rift.package.store
+
+
+class YangTranslator(object):
+    '''Invokes translation methods.'''
+
+    def __init__(self, log, yangs=None, files=None, packages=[]):
+        super(YangTranslator, self).__init__()
+        self.log = log
+        self.yangs = {}
+        if yangs is not None:
+            self.yangs = yangs
+        self.files = files
+        self.archive = None
+        self.tosca_template = ToscaTemplate(log)
+        self.node_translator = None
+        self.pkgs = packages
+        log.info(_('Initialized parameters for translation.'))
+
+    def translate(self):
+        if self.files:
+            self.get_yangs()
+
+        self.node_translator = TranslateDescriptors(self.log,
+                                                      self.yangs,
+                                                      self.tosca_template)
+
+        self.tosca_template.resources = self.node_translator.translate()
+
+        return self.tosca_template.output_to_tosca()
+
+    def get_yangs(self):
+        '''Get the descriptors and convert to yang instances'''
+        for filename in self.files:
+            self.log.debug(_("Load file {0}").format(filename))
+            # Only one descriptor per file
+            if tarfile.is_tarfile(filename):
+                tar = open(filename, "r+b")
+                archive = TarPackageArchive(self.log, tar)
+                pkg = archive.create_package()
+                self.pkgs.append(pkg)
+                desc_type = pkg.descriptor_type
+                if desc_type == TranslateDescriptors.NSD:
+                    if TranslateDescriptors.NSD not in self.yangs:
+                        self.yangs[TranslateDescriptors.NSD] = []
+                    self.yangs[TranslateDescriptors.NSD]. \
+                        append(pkg.descriptor_msg.as_dict())
+                elif desc_type == TranslateDescriptors.VNFD:
+                    if TranslateDescriptors.VNFD not in self.yangs:
+                        self.yangs[TranslateDescriptors.VNFD] = []
+                    self.yangs[TranslateDescriptors.VNFD]. \
+                        append(pkg.descriptor_msg.as_dict())
+                else:
+                    raise ValidationError("Unknown descriptor type: {}".
+                                          format(desc_type))
+
+    def _create_csar_files(self, output_dir, name, tmpl,
+                           archive=False):
+        if ToscaTemplate.TOSCA not in tmpl:
+            self.log.error(_("Did not find TOSCA template for {0}").
+                           format(name))
+            return
+
+        # Create sub for each NS template
+        subdir = os.path.join(output_dir, name)
+        if os.path.exists(subdir):
+            shutil.rmtree(subdir)
+        os.makedirs(subdir)
+
+        # Create the definitions dir
+        def_dir = os.path.join(subdir, 'Definitions')
+        os.makedirs(def_dir)
+        entry_file = os.path.join(def_dir, name+'.yaml')
+        self.log.debug(_("Writing file {0}").
+                       format(entry_file))
+        with open(entry_file, 'w+') as f:
+            f.write(tmpl[ToscaTemplate.TOSCA])
+
+        # Create the Tosca meta
+        meta_dir = os.path.join(subdir, 'TOSCA-Metadata')
+        os.makedirs(meta_dir)
+        meta = '''TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/'''
+        meta_data = "{}{}".format(meta, name+'.yaml')
+        meta_file = os.path.join(meta_dir, 'TOSCA.meta')
+        self.log.debug(_("Writing file {0}:\n{1}").
+                       format(meta_file, meta_data))
+        with open(meta_file, 'w+') as f:
+            f.write(meta_data)
+
+        # Copy other supporting files
+        if ToscaTemplate.FILES in tmpl:
+            for f in tmpl[ToscaTemplate.FILES]:
+                self.log.debug(_("Copy supporting file {0}").format(f))
+
+                # Search in source packages
+                if len(self.pkgs):
+                    for pkg in self.pkgs:
+                        # TODO(pjoseph): Need to add support for other file types
+                        fname = f[ToscaResource.NAME]
+                        dest_path = os.path.join(subdir, f[ToscaResource.DEST])
+                        ftype = f[ToscaResource.TYPE]
+
+                        if ftype == 'image':
+                            image_file_map = rift.package.image.get_package_image_files(pkg)
+
+                            if fname in image_file_map:
+                                self.log.debug(_("Extracting image {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(image_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        elif ftype == 'script':
+                            script_file_map = \
+                                rift.package.script.PackageScriptExtractor.package_script_files(pkg)
+                            if fname in script_file_map:
+                                self.log.debug(_("Extracting script {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(script_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        elif ftype == 'cloud_init':
+                            script_file_map = \
+                                rift.package.cloud_init.PackageCloudInitExtractor.package_script_files(pkg)
+                            if fname in script_file_map:
+                                self.log.debug(_("Extracting script {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(script_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        else:
+                            self.log.warn(_("Unknown file type {0}: {1}").
+                                          format(ftype, f))
+
+                #TODO(pjoseph): Search in other locations
+
+        # Create the ZIP archive
+        if archive:
+            prev_dir=os.getcwd()
+            os.chdir(subdir)
+
+            try:
+                zip_file = name + '.zip'
+                zip_path = os.path.join(output_dir, zip_file)
+                self.log.debug(_("Creating zip file {0}").format(zip_path))
+                zip_cmd = "zip -r {}.partial ."
+                subprocess.check_call(zip_cmd.format(zip_path),
+                                      shell=True,
+                                      stdout=subprocess.DEVNULL)
+                mv_cmd = "mv {0}.partial {0}"
+                subprocess.check_call(mv_cmd.format(zip_path),
+                                      shell=True,
+                                      stdout=subprocess.DEVNULL)
+                shutil.rmtree(subdir)
+                return zip_path
+
+            except subprocess.CalledProcessError as e:
+                self.log.error(_("Creating CSAR archive failed: {0}").
+                               format(e))
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                os.chdir(prev_dir)
+
+    def write_output(self, output,
+                     output_dir=None,
+                     archive=False,):
+        if output:
+            zip_files = []
+            for key in output.keys():
+                if output_dir:
+                    zf = self._create_csar_files(output_dir,
+                                                 key,
+                                                 output[key],
+                                                 archive=archive,)
+                    zip_files.append(zf)
+                else:
+                    print(_("TOSCA Template {0}:\n{1}").
+                          format(key, output[key]))
+            return zip_files
diff --git a/common/python/rift/mano/yang_translator/shell.py b/common/python/rift/mano/yang_translator/shell.py
new file mode 100644
index 0000000..f353e92
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/shell.py
@@ -0,0 +1,166 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import logging.config
+import os
+
+import magic
+
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.yang_translator import YangTranslator
+
+
+"""
+Test the yang translation from command line as:
+#translator
+  --template-file=<path to the JSON template or tar.gz>
+  --template-type=<type of template e.g. yang>
+  --parameters="purpose=test"
+  --output_dir=<output directory>
+  --validate_only
+Takes four user arguments,
+1. type of translation (e.g. yang) (required)
+2. Path to the file that needs to be translated (required)
+3. Input parameters (optional)
+4. Write to output files in a dir (optional), else print on screen
+
+In order to use translator to only validate template,
+without actual translation, pass --validate-only along with
+other required arguments.
+
+"""
+
+
+class TranslatorShell(object):
+
+    SUPPORTED_TYPES = ['yang']
+    COPY_DIRS = ['images']
+    SUPPORTED_INPUTS = (TAR, JSON, XML, YAML) = ('tar', 'json', 'xml', 'yaml')
+
+    def _parse_args(self, raw_args=None):
+        parser = argparse.ArgumentParser(
+            description='RIFT.io YANG translator for descriptors')
+        parser.add_argument(
+            "-f",
+            "--template-file",
+            nargs="+",
+            required=True,
+            action="append",
+            help="Template file to translate")
+        parser.add_argument(
+            "-o",
+            "--output-dir",
+            default=None,
+            help="Directory to output")
+        parser.add_argument(
+            "-p", "--parameters",
+            help="Input parameters")
+        parser.add_argument(
+            "--archive",
+            help="Create a ZIP archive",
+            action="store_true")
+        parser.add_argument(
+            "--debug",
+            help="Enable debug logging",
+            action="store_true")
+        if raw_args:
+            args = parser.parse_args(raw_args)
+        else:
+            args = parser.parse_args()
+        return args
+
+    def main(self, raw_args=None, log=None):
+        args = self._parse_args(raw_args)
+        if log is None:
+            if args.debug:
+                logging.basicConfig(level=logging.DEBUG)
+            else:
+                logging.basicConfig(level=logging.ERROR)
+            log = logging.getLogger("yang-translator")
+
+        log.debug(_("Args passed is {}").format(args))
+        self.log = log
+        self.in_files = []
+        self.ftype = None
+        for f in args.template_file:
+            path = os.path.abspath(f[0])
+            if not os.path.isfile(path):
+                msg = _("The path %(path)s is not a valid file.") % {
+                    'path': path}
+                log.error(msg)
+                raise ValueError(msg)
+            # Get the file type
+            ftype = self._get_file_type(path)
+            if self.ftype is None:
+                self.ftype = ftype
+            elif self.ftype != ftype:
+                msg = (_("All input files hould be of same type"))
+                log.error(msg)
+                raise ValueError(msg)
+            self.in_files.append(path)
+
+        self.log.debug(_("Input files are of type {0}").
+                       format(self.ftype))
+
+        self.archive = None
+        self._translate(output_dir=args.output_dir,
+                        archive=args.archive)
+
+    def _translate(self, output_dir=None, archive=False):
+        output = None
+        self.log.debug(_('Loading the yang template for {0}.').
+                       format(self.in_files))
+        translator = YangTranslator(self.log, files=self.in_files)
+        self.log.debug(_('Translating the yang template for {0}.').
+                       format(self.in_files))
+        output = translator.translate()
+        if output:
+            if output_dir:
+                translator.write_output(output,
+                                        output_dir=output_dir,
+                                        archive=archive)
+            else:
+                for key in output.keys():
+                    print(_("TOSCA Template {0}:\n{1}").
+                          format(key, output[key]))
+        else:
+            self.log.error(_("Did not get any translated output!!"))
+
+
+    def _get_file_type(self, path):
+        m = magic.open(magic.MAGIC_MIME)
+        m.load()
+        typ = m.file(path)
+        if typ.startswith('text/plain'):
+            # Assume to be yaml
+            return self.YAML
+        elif typ.startswith('application/x-gzip'):
+            return self.TAR
+        else:
+            msg = _("The file {0} is not a supported type: {1}"). \
+                  format(path, typ)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None, log=None):
+    TranslatorShell().main(raw_args=args, log=log)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml b/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
new file mode 100644
index 0000000..9a35a7e
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
@@ -0,0 +1,390 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: Toy NS
+metadata:
+  ID: ping_pong_nsd
+  vendor: RIFT.io
+  version: 1.0
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_delay:
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: no
+        type: integer
+      config_details:
+        type: map
+      config_priority:
+        constraints:
+        - greater_than: 0
+        type: integer
+      config_template:
+        required: no
+        type: string
+      config_type:
+        type: string
+capability_types:
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+      vdu:
+        type: string
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      path:
+        type: string
+      polling_interval:
+        type: integer
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      description:
+        type: string
+      group_tag:
+        default: Group1
+        type: string
+      http_endpoint_ref:
+        type: string
+      id:
+        type: integer
+      json_query_method:
+        default: NAMEKEY
+        type: string
+      name:
+        type: string
+      units:
+        type: string
+      value_type:
+        default: INT
+        type: string
+      widget_type:
+        default: COUNTER
+        type: string
+node_types:
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+      name:
+        description: Name of the connection point
+        type: string
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: Type of the interface on VDU
+        type: string
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      port:
+        constraints:
+        - in_range: [1, 65535]
+        type: integer
+      start_by_default:
+        type: boolean
+        default: true
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+    capabilities:
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+    - vdus:
+        node: tosca.nodes.riftio.VDU1
+        occurences: [1, UNBOUND]
+        relationship: tosca.relationships.nfv.VirtualLinksTo
+        type: tosca.capabilities.nfv.VirtualLinkable
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      cloud_init:
+        default: #cloud-config
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.policies.Root
+    properties:
+      primitive: map
+policy_types:
+  tosca.policies.riftio.InitialConfigPrimitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      parameter:
+        type: map
+      seq:
+        type: integer
+      user_defined_script:
+        type: string
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      config_actions:
+        type: map
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      name:
+        type: string
+      vnfd_members:
+        type: map
+topology_template:
+  policies:
+  - scaling_group_descriptor:
+      config_actions:
+        post_scale_out: ping config
+      max_instance_count: 10
+      min_instance_count: 1
+      name: ping_group
+      type: tosca.policies.riftio.ScalingGroup
+      vnfd_members:
+        ping_vnfd: 1
+  - initial_config_primitive:
+      name: start traffic
+      seq: 1
+      type: tosca.policies.riftio.InitialConfigPrimitive
+      user_defined_script: start_traffic.py
+  groups:
+    config_primitive:
+      type: tosca.groups.riftio.ConfigPrimitives
+      members:
+      - ping_vnfd
+      - pong_vnfd
+      properties:
+        primitives:
+          ping config:
+            user_defined_script: ping_config.py
+  inputs:
+    vendor:
+      type: string
+      description: Translated from YANG
+  node_templates:
+    ping_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 1
+        port: 18888
+        start_by_default: false
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 0
+          config_details:
+            script_type: bash
+          config_priority: 2
+          config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
+            ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
+            \ to configure VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server info for\
+            \ ping!\"\n    exit $rc\nfi\n\ncurl -D /dev/stdout \\\n    -H \"Accept:\
+            \ application/vnd.yang.data+xml\" \\\n    -H \"Content-Type: application/vnd.yang.data+json\"\
+            \ \\\n    -X POST \\\n    -d \"{\\\"rate\\\":$ping_rate}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set ping rate!\"\n\
+            \    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/ping/stats
+            polling_interval: 2
+            port: 18888
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/ping/stats
+              port: 18888
+            port: 18888
+            protocol: tcp
+            vdu: ping_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: ping_vnfd_iovdu_0
+    pong_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, pong.service ]\n  - [ systemctl, start, --no-block, pong.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        pong_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: pong_vnfd_iovdu_0_vm_image
+    pong_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: pong_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: pong_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    ping_pong_vld:
+      type: tosca.nodes.riftio.VL1
+      properties:
+        description: Toy VL
+        vendor: RIFT.io
+    ping_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: ping_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: ping_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    pong_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 2
+        port: 18889
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 60
+          config_details:
+            script_type: bash
+          config_priority: 1
+          config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
+            pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
+            \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
+            \ VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server(own) info\
+            \ for pong!\"\n    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/pong/stats
+            polling_interval: 2
+            port: 18889
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/pong/stats
+              port: 18889
+            port: 18889
+            protocol: tcp
+            vdu: pong_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: pong_vnfd_iovdu_0
+    ping_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, ping.service ]\n  - [ systemctl, start, --no-block, ping.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        ping_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: ping_vnfd_iovdu_0_vm_image
diff --git a/common/python/rift/mano/yang_translator/test/data/yang_helloworld.json b/common/python/rift/mano/yang_translator/test/data/yang_helloworld.json
new file mode 100644
index 0000000..e5ff679
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/test/data/yang_helloworld.json
@@ -0,0 +1,25 @@
+module helloworld {
+
+    namespace "http://helloworld.com/ns/helloworld";
+
+    prefix "helloworld";
+
+    organization  "helloworld organization";
+
+    description
+      "helloworld module";
+
+    revision 2013-04-02 {
+        description
+            "Initial revision";
+    }
+
+    container helloworld {
+        description
+           "Helloworld example for creating YANG-netconfd SIL modules";
+        leaf message {
+            config false;
+            type string;
+        }
+    }
+}
diff --git a/common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json b/common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json
new file mode 100644
index 0000000..1db0555
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json
@@ -0,0 +1,28 @@
+module helloworld {
+
+    namespace "http://helloworld.com/ns/helloworld";
+
+    prefix "helloworld";
+
+    organization  "helloworld organization";
+
+    description
+      "helloworld module";
+
+    revision 2013-04-02 {
+        description
+            "Initial revision";
+    }
+
+    container helloworld {
+        description
+           "Helloworld example for creating YANG-netconfd SIL modules";
+        leaf message {
+            config false;
+            type string;
+        }
+        leaf invalid {
+          type invalid;
+        }
+    }
+}
diff --git a/common/python/rift/mano/yang_translator/test/yang_translator_ut.py b/common/python/rift/mano/yang_translator/test/yang_translator_ut.py
new file mode 100755
index 0000000..100aeb5
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/test/yang_translator_ut.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import xmlrunner
+
+import unittest
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+import rift.mano.tosca_translator.shell as tshell
+
+import rift.mano.utils.compare_desc as cmpdesc
+
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+
+from rift.mano.yang_translator.common.utils import _
+import rift.mano.yang_translator.shell as shell
+
+
+_TRUE_VALUES = ('True', 'true', '1', 'yes')
+
+
+class PingPongDescriptors(object):
+
+    def __init__(self, output_dir, log):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    fmt='yaml',
+                    write_to_file=True,
+                    out_dir=output_dir,
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                    use_placement_group = False,
+                )
+
+        # Create the tar files in output dir
+        def create_archive(desc):
+            # Create checksum file
+            cur_dir = os.path.join(output_dir, desc)
+            flist = {}
+            for root, dirs, files in os.walk(cur_dir):
+                rel_dir = root.replace(cur_dir+'/', '')
+                for f in files:
+                    fpath = os.path.join(root, f)
+                    flist[os.path.join(rel_dir, f)] = \
+                                        ChecksumUtils.get_md5(fpath)
+            log.debug(_("Files in {}: {}").format(cur_dir, flist))
+
+            chksumfile = os.path.join(cur_dir, 'checksums.txt')
+            with open(chksumfile, 'w') as c:
+                for key in sorted(flist.keys()):
+                    c.write("{}  {}\n".format(flist[key], key))
+
+            # Create the tar archive
+            tar_cmd = "tar zcvf {0}.tar.gz {0}"
+            subprocess.check_call(tar_cmd.format(desc),
+                                  shell=True,
+                                  stdout=subprocess.DEVNULL)
+
+        prevdir = os.getcwd()
+        os.chdir(output_dir)
+        for d in os.listdir(output_dir):
+            create_archive(d)
+        os.chdir(prevdir)
+
+class TestYangTranslator(unittest.TestCase):
+
+    yang_helloworld = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        "data/yang_helloworld.json")
+    template_file = '--template-file=' + yang_helloworld
+    template_validation = "--validate-only"
+    debug="--debug"
+    failure_msg = _('The program raised an exception unexpectedly.')
+
+    default_timeout = 0
+    log_level = logging.WARN
+    log = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+            '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('yang-translator-ut')
+        cls.log.addHandler(stderr_handler)
+
+        cls.desc_dir = tempfile.mkdtemp()
+        PingPongDescriptors(cls.desc_dir, cls.log)
+        cls.log.debug("Yang comaprison descs in {}".format(cls.desc_dir))
+
+    @classmethod
+    def tearDownClass(cls):
+        '''Clean up temporary directory'''
+        # Remove directory if not debug level
+        if cls.log_level != logging.DEBUG:
+            shutil.rmtree(cls.desc_dir)
+        else:
+            cls.log.warn("Descriptor directory: {}".format(cls.desc_dir))
+
+    def test_missing_arg(self):
+        self.assertRaises(SystemExit, shell.main, '')
+
+    def test_invalid_file_arg(self):
+        self.assertRaises(SystemExit, shell.main, 'translate me')
+
+    def test_invalid_file_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          ('--template-file=template.txt'))
+
+    def test_invalid_type_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          (self.template_file,
+                           '--template-type=xyz'))
+
+    def compare_tosca(self, gen_desc, exp_desc):
+        gen = "--generated="+gen_desc
+        exp = "--expected="+exp_desc
+        cmpdesc.main([gen, exp])
+
+    def test_output(self):
+        test_base_dir = os.path.join(os.path.dirname(
+            os.path.abspath(__file__)), 'data')
+        temp_dir = tempfile.mkdtemp()
+        args = []
+        for f in os.listdir(self.desc_dir):
+            fpath = os.path.join(self.desc_dir, f)
+            if os.path.isfile(fpath):
+                template = '--template-file='+fpath
+                args.append(template)
+        output_dir = "--output-dir=" + temp_dir
+        args.append(output_dir)
+        self.log.debug("Args passed: {}".format(args))
+
+        try:
+            shell.main(args, log=self.log)
+
+            # Check the dirs are present
+            out_dir = os.path.join(temp_dir, 'ping_pong_nsd')
+            self.assertTrue(os.path.isdir(out_dir))
+            dirs = os.listdir(out_dir)
+            expected_dirs = ['TOSCA-Metadata', 'Definitions']
+            self.assertTrue(set(expected_dirs) <= set(dirs))
+
+            # Compare the descriptors
+            gen_desc = os.path.join(out_dir, 'Definitions', 'ping_pong_nsd.yaml')
+            exp_desc = os.path.join(test_base_dir,
+                                    'ping_pong_tosca.yaml')
+            self.compare_tosca(gen_desc, exp_desc)
+
+            # Convert back to yang and compare
+            template = '--template-file='+gen_desc
+            yang_out_dir = os.path.join(temp_dir, 'ping_pong_yang')
+            output_dir = "--output-dir=" + yang_out_dir
+            tshell.main([template, output_dir], log=self.log)
+
+            # Check the dirs are present
+            dirs = os.listdir(yang_out_dir)
+            self.assertTrue(len(dirs) >= 3)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(_("Exception {}").format(e))
+
+        finally:
+            if temp_dir:
+                shutil.rmtree(temp_dir)
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    TestYangTranslator.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/translator_logging.conf b/common/python/rift/mano/yang_translator/translator_logging.conf
new file mode 100644
index 0000000..968ebc9
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/translator_logging.conf
@@ -0,0 +1,43 @@
+
+[loggers]
+keys=root,yang-translator
+
+[handlers]
+keys=RotatingFileHandler,SysLogHandler,NullHandler
+
+[formatters]
+keys=form01
+
+[logger_root]
+level=DEBUG
+handlers=NullHandler
+
+[logger_tosca-translator]
+level=INFO
+#one can be removed based on requirements
+handlers=SysLogHandler, RotatingFileHandler
+qualname=tosca-translator
+propagate=1
+
+[handler_RotatingFileHandler]
+class=handlers.RotatingFileHandler
+level=INFO
+formatter=form01
+#rotation happens after 100MB
+args=('/var/log/yang-translator.log', 'a', 100000000, 5, 'utf8')
+
+[handler_SysLogHandler]
+class=handlers.SysLogHandler
+formatter=form01
+level=INFO
+args=('/dev/log', handlers.SysLogHandler.LOG_SYSLOG)
+
+[handler_NullHandler]
+class=NullHandler
+formatter=form01
+level=DEBUG
+args=()
+
+[formatter_form01]
+format = %(asctime)s - %(name)s - %(levelname)s - %(filename)s : %(message)s
+datefmt =
diff --git a/common/python/rift/mano/yang_translator/yang-translator b/common/python/rift/mano/yang_translator/yang-translator
new file mode 100755
index 0000000..a4c0ee6
--- /dev/null
+++ b/common/python/rift/mano/yang_translator/yang-translator
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from rift.mano.yang_translator import shell as translator_shell
+
+if __name__ == '__main__':
+    translator_shell.main()
diff --git a/common/python/test/CMakeLists.txt b/common/python/test/CMakeLists.txt
new file mode 100644
index 0000000..1abb50d
--- /dev/null
+++ b/common/python/test/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Creation Date: 2016/1/12
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+cmake_minimum_required(VERSION 2.8)
+
+rift_py3test(utest_juju_api
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_juju_api.py
+  )
diff --git a/common/python/test/utest_config_data.py b/common/python/test/utest_config_data.py
new file mode 100644
index 0000000..8287c11
--- /dev/null
+++ b/common/python/test/utest_config_data.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import os
+import sys
+import tempfile
+import unittest
+import xmlrunner
+import yaml
+
+
+from rift.mano.config_data import config
+
+import gi
+gi.require_version('VnfdYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        VnfdYang,
+        RwYang,
+        )
+
+class InitialPrimitiveReaderTest(unittest.TestCase):
+    def test_read_valid_config(self):
+        input_prim_data = [
+                {
+                    "name": "prim_1",
+                    "parameter": {
+                        "hostname": "pe1",
+                        #"pass": "6windos"
+                        # Hard to compare with multiple elements because ordering of list
+                        # element is not deterministic.
+                    }
+                },
+                {
+                    "name": "prim_2",
+                    # No, parameters (use default values)
+                },
+            ]
+
+        with io.StringIO() as yaml_hdl:
+            yaml_hdl.write(yaml.safe_dump(input_prim_data))
+            yaml_hdl.seek(0)
+            reader = config.VnfInitialConfigPrimitiveReader.from_yaml_file_hdl(yaml_hdl)
+
+        expected_primitives = [
+                VnfdYang.InitialConfigPrimitive.from_dict({
+                        "name": "prim_1", "seq": 0, "parameter": [
+                            {
+                                "name": "hostname",
+                                "value": "pe1",
+                            },
+                        ]
+                    }),
+                VnfdYang.InitialConfigPrimitive.from_dict({
+                        "name": "prim_2", "seq": 1
+                    }),
+                ]
+
+        for i, prim in enumerate(reader.primitives):
+            logging.debug("Expected: %s", str(expected_primitives[i]))
+            logging.debug("Got: %s", str(prim))
+            self.assertEqual(expected_primitives[i], prim)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/test/utest_juju_api.py b/common/python/test/utest_juju_api.py
new file mode 100755
index 0000000..7da9e81
--- /dev/null
+++ b/common/python/test/utest_juju_api.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import argparse
+import asyncio
+import logging
+from unittest import mock
+import os
+import sys
+import unittest
+import xmlrunner
+
+import rift.mano.utils.juju_api as juju_api
+
+
+class JujuClientTest(unittest.TestCase):
+
+    log = None
+
+    @classmethod
+    def set_logger(cls, log):
+        cls.log = log
+
+    @asyncio.coroutine
+    def juju_client_test(self, mock_jujuclient, loop):
+        api = juju_api.JujuApi(secret='test', loop=loop, version=1)
+
+        env = yield from api.get_env()
+
+        self.assertTrue(env.login.called,
+                        "Login to Juju not called")
+        env.login.assert_called_with('test', user='user-admin')
+
+        charm = 'test-charm'
+        service = 'test-service'
+        yield from api.deploy_service(charm, service)
+        # self.assertTrue(env.deploy.called,
+        #                "Deploy failed")
+
+        config = {
+            'test_param': 'test_value',
+        }
+        yield from api.apply_config(config, env=env)
+        self.assertTrue(env.set_config.called,
+                        "Config failed")
+
+        try:
+            yield from api.resolve_error(env=env)
+        except KeyError as e:
+            # Since the status does have values, this throws error
+            pass
+        # resolved method will not be called  due to error above
+        self.assertFalse(env.resolved.called,
+                        "Resolve error failed")
+
+        action = 'test-action'
+        params = {}
+        api.units = ['test-service-0']
+        # yield from api.execute_action(action, params, service=service, env=env)
+
+        action_tag = 'test-123434352'
+        # yield from api.get_action_status(action_tag)
+
+        api.destroy_retries = 2
+        api.retry_delay = 0.1
+        try:
+            yield from api.destroy_service()
+
+        except Exception as e:
+            JujuClientTest.log.debug("Expected exception on destroy service: {}".
+                                     format(e))
+
+        self.assertTrue(env.destroy_service.called,
+                        "Destroy failed")
+
+    @mock.patch('rift.mano.utils.juju_api.Env1', autospec=True)
+    def test_client(self, mock_jujuclient):
+        loop = asyncio.get_event_loop()
+
+        loop.run_until_complete(self.juju_client_test(mock_jujuclient,
+                                                      loop))
+
+        loop.close()
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    log = logging.getLogger()
+    log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+    JujuClientTest.set_logger(log)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()