Merge from SO/master
Change-Id: Icf7df6ed2bf3fbb4a065b0a30670fe3ecff812b2
Signed-off-by: Philip Joseph <philip.joseph@riftio.com>
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
index ea17e60..00c549b 100755
--- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
+++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
@@ -3789,13 +3789,18 @@
self._cloud_account_handler = cloud_account_handler
self._ro_plugin_selector = ro_plugin_selector
- self._ncclient = rift.mano.ncclient.NcClient(
- host="127.0.0.1",
- port=2022,
- username="admin",
- password="admin",
- loop=self._loop)
+ # Intialize the set of variables for implementing Scaling RPC using REST.
+ self._headers = {"content-type":"application/json", "accept":"application/json"}
+ #This will break when we have rbac in the rift code and admin user password is changed or admin it self is removed.
+ self._user = 'admin'
+ self._password = 'admin'
+ self._ip = 'localhost'
+ self._rport = 8008
+ self._conf_url = "https://{ip}:{port}/api/config". \
+ format(ip=self._ip,
+ port=self._rport)
+
self._nsrs = {}
self._nsds = {}
self._vnfds = {}
@@ -3933,70 +3938,84 @@
msg : RPC input
action : Scaling Action
"""
- ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
- ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
+ def get_scaling_group_information():
+ scaling_group_url = "{url}/project/{project}/ns-instance-config/nsr/{nsr_id}".format(
+ url=self._conf_url, project=self._project, nsr_id=msg.nsr_id_ref)
+ output = requests.get(scaling_group_url, headers=self._headers, auth=(self._user, self._password), verify=False)
+ if output.text == None or len(output.text) == 0:
+ self.log.error("nsr id %s information not present", self._nsr_id)
+ return None
+ scaling_group_info = json.loads(output.text)
+ return scaling_group_info
+
+ def config_scaling_group_information(scaling_group_info):
+ data_str = json.dumps(scaling_group_info)
+ self.log.debug("scaling group Info %s", data_str)
- xpath = self._project.add_project(
- ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').
- format(msg.nsr_id_ref))
-
- instance = ScalingGroupInstance.from_dict({
- "id": msg.instance_id,
- "project_name": self._project.name,})
-
- @asyncio.coroutine
- def get_nsr_scaling_group():
- results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
-
- for result in results:
- res = yield from result
- nsr_config = res.result
-
- for scaling_group in nsr_config.scaling_group:
- if scaling_group.scaling_group_name_ref == msg.scaling_group_name_ref:
- break
- else:
- scaling_group = nsr_config.scaling_group.add()
- scaling_group.scaling_group_name_ref = msg.scaling_group_name_ref
-
- return (nsr_config, scaling_group)
-
- @asyncio.coroutine
- def update_config(nsr_config):
- xml = self._ncclient.convert_to_xml(RwNsrYang, nsr_config)
- xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(xml)
- yield from self._ncclient.connect()
- yield from self._ncclient.manager.edit_config(target="running", config=xml, default_operation="replace")
-
- @asyncio.coroutine
+ scale_out_url = "{url}/project/{project}/ns-instance-config/nsr/{nsr_id}".format(
+ url=self._conf_url, project=self._project, nsr_id=msg.nsr_id_ref)
+ response = requests.put(scale_out_url, data=data_str, verify=False, auth=(self._user, self._password), headers=self._headers)
+ response.raise_for_status()
+
def scale_out():
- nsr_config, scaling_group = yield from get_nsr_scaling_group()
- scaling_group.instance.append(instance)
- yield from update_config(nsr_config)
+ scaling_group_info = get_scaling_group_information()
+ if scaling_group_info is None:
+ return
+
+ scaling_group_present = False
+ if "scaling-group" in scaling_group_info["nsr:nsr"]:
+ scaling_group_array = scaling_group_info["nsr:nsr"]["scaling-group"]
+ for scaling_group in scaling_group_array:
+ if scaling_group["scaling-group-name-ref"] == msg.scaling_group_name_ref:
+ scaling_group_present = True
+ if 'instance' not in scaling_group:
+ scaling_group['instance'] = []
+ for instance in scaling_group['instance']:
+ if instance["id"] == int(msg.instance_id):
+ self.log.error("scaling group with instance id %s exists for scale out", msg.instance_id)
+ return
+ scaling_group["instance"].append({"id": int(msg.instance_id)})
+
+ if not scaling_group_present:
+ scaling_group_info["nsr:nsr"]["scaling-group"] = [{"scaling-group-name-ref": msg.scaling_group_name_ref, "instance": [{"id": msg.instance_id}]}]
+
+ config_scaling_group_information(scaling_group_info)
+ return
- @asyncio.coroutine
def scale_in():
- nsr_config, scaling_group = yield from get_nsr_scaling_group()
- scaling_group.instance.remove(instance)
- yield from update_config(nsr_config)
+ scaling_group_info = get_scaling_group_information()
+ if scaling_group_info is None:
+ return
+
+ scaling_group_array = scaling_group_info["nsr:nsr"]["scaling-group"]
+ scaling_group_present = False
+ instance_id_present = False
+ for scaling_group in scaling_group_array:
+ if scaling_group["scaling-group-name-ref"] == msg.scaling_group_name_ref:
+ scaling_group_present = True
+ if 'instance' in scaling_group:
+ instance_array = scaling_group["instance"];
+ for index in range(len(instance_array)):
+ if instance_array[index]["id"] == int(msg.instance_id):
+ instance_array.pop(index)
+ instance_id_present = True
+ break
+
+ if not scaling_group_present:
+ self.log.error("Scaling group %s doesnot exists for scale in", msg.scaling_group_name_ref)
+ return
+
+ if not instance_id_present:
+ self.log.error("Instance id %s doesnot exists for scale in", msg.instance_id)
+ return
+
+ config_scaling_group_information(scaling_group_info)
+ return
if action == ScalingRpcHandler.ACTION.SCALE_OUT:
- self._loop.create_task(scale_out())
+ self._loop.run_in_executor(None, scale_out)
else:
- self._loop.create_task(scale_in())
-
- # Opdata based calls, disabled for now!
- # if action == ScalingRpcHandler.ACTION.SCALE_OUT:
- # self.scale_nsr_out(
- # msg.nsr_id_ref,
- # msg.scaling_group_name_ref,
- # msg.instance_id,
- # xact)
- # else:
- # self.scale_nsr_in(
- # msg.nsr_id_ref,
- # msg.scaling_group_name_ref,
- # msg.instance_id)
+ self._loop.run_in_executor(None, scale_in)
def nsr_update_cfg(self, nsr_id, msg):
nsr = self._nsrs[nsr_id]
@@ -4408,9 +4427,6 @@
if not self._project.rpc_check(msg, xact_info=xact_info):
return
- if self.callback:
- self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
-
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
"instance_id": msg.instance_id,
"project_name": self._project.name,})
@@ -4420,6 +4436,8 @@
self.__class__.SCALE_IN_OUTPUT_XPATH,
rpc_op)
+ if self.callback:
+ self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
except Exception as e:
self.log.exception(e)
xact_info.respond_xpath(
@@ -4440,9 +4458,6 @@
msg.instance_id = last_instance_id + 1
self.last_instance_id[scale_group] += 1
- if self.callback:
- self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
-
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
"instance_id": msg.instance_id,
"project_name": self._project.name,})
@@ -4452,6 +4467,8 @@
self.__class__.SCALE_OUT_OUTPUT_XPATH,
rpc_op)
+ if self.callback:
+ self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
except Exception as e:
self.log.exception(e)
xact_info.respond_xpath(
diff --git a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/downloader/copy.py b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/downloader/copy.py
index fa3dd3e..b1f11ec 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/downloader/copy.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/rift/tasklets/rwpkgmgr/downloader/copy.py
@@ -123,6 +123,8 @@
Copy directory tree to destination descriptor folder.
"""
+ self.copy_progress()
+
store = self.proxy._get_store(self.package_type)
src_path = store._get_package_dir(self.src_package_id)
self.src_package = store.get_package(self.src_package_id)
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/CMakeLists.txt b/rwlaunchpad/plugins/rwpkgmgr/test/CMakeLists.txt
index a42e8e9..8f090a2 100644
--- a/rwlaunchpad/plugins/rwpkgmgr/test/CMakeLists.txt
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/CMakeLists.txt
@@ -43,6 +43,11 @@
${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_url_download
)
+rift_py3test(utest_publisher_dts.test_url_download_unreachable_ip
+ TEST_ARGS
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_url_download_unreachable_ip
+ )
+
rift_py3test(utest_publisher_dts.test_cancelled
TEST_ARGS
${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_cancelled
@@ -53,6 +58,7 @@
utest_publisher_dts.test_download_publisher
utest_publisher_dts.test_publish
utest_publisher_dts.test_url_download
+ utest_publisher_dts.test_url_download_unreachable_ip
utest_publisher_dts.test_cancelled
)
diff --git a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
index 8e79889..1606ad2 100755
--- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
+++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py
@@ -146,28 +146,42 @@
download_id = yield from self.job_handler.register_downloader(url_downloader)
assert download_id is not None
-
- # Waiting to be sure that the file is downloaded
- # From BLR, it sometimes take longer for the file to
- # be downloaded
- max_time = 60
- total_time = 0
- while True:
- yield from asyncio.sleep(5, loop=self.loop)
- xpath = self.project.add_project("/download-jobs/job[download-id='{}']".
- format(download_id))
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before complete check - %s", result)
- if result.status != "COMPLETED":
- total_time = total_time + 5
- if total_time <= max_time:
- continue
- else:
- break
-
+
+ # Waiting for 5 secs to be sure that the file is downloaded
+ yield from asyncio.sleep(10, loop=self.loop)
+ xpath = self.project.add_project("/download-jobs/job[download-id='{}']".format(
+ download_id))
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before complete check - %s", result)
assert result.status == "COMPLETED"
assert len(self.job_handler.tasks) == 0
+ @rift.test.dts.async_test
+ def test_url_download_unreachable_ip(self):
+ """
+ Integration Test:
+ Ensure that a bad IP does not block forever
+ """
+ yield from self.job_handler.register()
+
+ proxy = mock.MagicMock()
+
+ # Here, we are assuming that there is no HTTP server at 10.1.2.3
+ url = "http://10.1.2.3/common/unittests/plantuml.jar"
+ url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", proxy)
+
+ download_id = yield from self.job_handler.register_downloader(url_downloader)
+ assert download_id is not None
+
+ # Waiting for 10 secs to be sure all reconnect attempts have been exhausted
+ yield from asyncio.sleep(10, loop=self.loop)
+ xpath = "/download-jobs/job[download-id='{}']".format(
+ download_id)
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before complete check - %s", result)
+ assert result.status == "FAILED"
+ assert len(self.job_handler.tasks) == 0
+
@rift.test.dts.async_test
def test_cancelled(self):
diff --git a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
index 9e15181..6469e2f 100644
--- a/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
+++ b/rwlaunchpad/plugins/yang/rw-pkg-mgmt.tailf.yang
@@ -43,6 +43,10 @@
tailf:actionpoint rw_actionpoint;
}
+ tailf:annotate "/rw-pkg-mgmt:package-copy" {
+ tailf:actionpoint rw_actionpoint;
+ }
+
tailf:annotate "/rw-pkg-mgmt:get-package-schema" {
tailf:actionpoint rw_actionpoint;
}
diff --git a/rwlaunchpad/test/launchpad.py b/rwlaunchpad/test/launchpad.py
index b73c500..6db393c 100755
--- a/rwlaunchpad/test/launchpad.py
+++ b/rwlaunchpad/test/launchpad.py
@@ -484,7 +484,7 @@
AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
PackageManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
StagingManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ProjectMgrManoTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+ #ProjectMgrManoTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
]
if not mgmt_ip_list or len(mgmt_ip_list) == 0: