From: Philip Joseph Date: Wed, 12 Apr 2017 07:53:50 +0000 (+0000) Subject: Merge from SO/master X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=7c5192d381f1c63f2eedc43adce43f10e0c0a3c8;p=osm%2FSO.git Merge from SO/master Change-Id: Icf7df6ed2bf3fbb4a065b0a30670fe3ecff812b2 Signed-off-by: Philip Joseph --- 7c5192d381f1c63f2eedc43adce43f10e0c0a3c8 diff --cc common/python/rift/downloader/url.py index 0cdd8d4a,7ffb9997..c22a04e2 --- a/common/python/rift/downloader/url.py +++ b/common/python/rift/downloader/url.py @@@ -34,14 -33,8 +33,9 @@@ from requests.packages.urllib3.util.ret from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) - import gi - gi.require_version("RwPkgMgmtYang", "1.0") - - from gi.repository import RwPkgMgmtYang from . import base +from .local_file import LocalFileAdapter as LocalFileAdapter - class UrlDownloader(base.AbstractDownloader): """Handles downloads of URL with some basic retry strategy. """ @@@ -107,10 -100,11 +101,12 @@@ def _create_session(self): session = requests.Session() - retries = Retry(total=5, backoff_factor=1) + # 3 connection attempts should be more than enough, We can't wait forever! + # The user needs to be updated of the status + retries = Retry(total=2, backoff_factor=1) session.mount("http://", HTTPAdapter(max_retries=retries)) session.mount("https://", HTTPAdapter(max_retries=retries)) + session.mount("file://", LocalFileAdapter()) return session diff --cc rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py index ea17e601,85b31a5d..00c549b6 --- a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py +++ b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py @@@ -3933,70 -3762,82 +3938,84 @@@ class NsManager(object) msg : RPC input action : Scaling Action """ - ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance - ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup - - xpath = self._project.add_project( - ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]'). - format(msg.nsr_id_ref)) - - instance = ScalingGroupInstance.from_dict({ - "id": msg.instance_id, - "project_name": self._project.name,}) - - @asyncio.coroutine - def get_nsr_scaling_group(): - results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE) - - for result in results: - res = yield from result - nsr_config = res.result - - for scaling_group in nsr_config.scaling_group: - if scaling_group.scaling_group_name_ref == msg.scaling_group_name_ref: - break - else: - scaling_group = nsr_config.scaling_group.add() - scaling_group.scaling_group_name_ref = msg.scaling_group_name_ref - - return (nsr_config, scaling_group) - - @asyncio.coroutine - def update_config(nsr_config): - xml = self._ncclient.convert_to_xml(RwNsrYang, nsr_config) - xml = '{}'.format(xml) - yield from self._ncclient.connect() - yield from self._ncclient.manager.edit_config(target="running", config=xml, default_operation="replace") - - @asyncio.coroutine + def get_scaling_group_information(): - scaling_group_url = "{url}/ns-instance-config/nsr/{nsr_id}".format(url=self._conf_url, nsr_id=msg.nsr_id_ref) ++ scaling_group_url = "{url}/project/{project}/ns-instance-config/nsr/{nsr_id}".format( ++ url=self._conf_url, project=self._project, nsr_id=msg.nsr_id_ref) + output = requests.get(scaling_group_url, headers=self._headers, auth=(self._user, self._password), verify=False) + if output.text == None or len(output.text) == 0: + self.log.error("nsr id %s information not present", self._nsr_id) + return None + scaling_group_info = json.loads(output.text) + return scaling_group_info + + def config_scaling_group_information(scaling_group_info): + data_str = json.dumps(scaling_group_info) + self.log.debug("scaling group Info %s", data_str) + - scale_out_url = "{url}/ns-instance-config/nsr/{nsr_id}".format(url=self._conf_url, nsr_id=msg.nsr_id_ref) ++ scale_out_url = "{url}/project/{project}/ns-instance-config/nsr/{nsr_id}".format( ++ url=self._conf_url, project=self._project, nsr_id=msg.nsr_id_ref) + response = requests.put(scale_out_url, data=data_str, verify=False, auth=(self._user, self._password), headers=self._headers) + response.raise_for_status() + def scale_out(): - nsr_config, scaling_group = yield from get_nsr_scaling_group() - scaling_group.instance.append(instance) - yield from update_config(nsr_config) + scaling_group_info = get_scaling_group_information() + if scaling_group_info is None: + return + + scaling_group_present = False + if "scaling-group" in scaling_group_info["nsr:nsr"]: + scaling_group_array = scaling_group_info["nsr:nsr"]["scaling-group"] + for scaling_group in scaling_group_array: + if scaling_group["scaling-group-name-ref"] == msg.scaling_group_name_ref: + scaling_group_present = True + if 'instance' not in scaling_group: + scaling_group['instance'] = [] + for instance in scaling_group['instance']: + if instance["id"] == int(msg.instance_id): + self.log.error("scaling group with instance id %s exists for scale out", msg.instance_id) + return + scaling_group["instance"].append({"id": int(msg.instance_id)}) + + if not scaling_group_present: + scaling_group_info["nsr:nsr"]["scaling-group"] = [{"scaling-group-name-ref": msg.scaling_group_name_ref, "instance": [{"id": msg.instance_id}]}] + + config_scaling_group_information(scaling_group_info) + return - @asyncio.coroutine def scale_in(): - nsr_config, scaling_group = yield from get_nsr_scaling_group() - scaling_group.instance.remove(instance) - yield from update_config(nsr_config) + scaling_group_info = get_scaling_group_information() + if scaling_group_info is None: + return + + scaling_group_array = scaling_group_info["nsr:nsr"]["scaling-group"] + scaling_group_present = False + instance_id_present = False + for scaling_group in scaling_group_array: + if scaling_group["scaling-group-name-ref"] == msg.scaling_group_name_ref: + scaling_group_present = True + if 'instance' in scaling_group: + instance_array = scaling_group["instance"]; + for index in range(len(instance_array)): + if instance_array[index]["id"] == int(msg.instance_id): + instance_array.pop(index) + instance_id_present = True + break + + if not scaling_group_present: + self.log.error("Scaling group %s doesnot exists for scale in", msg.scaling_group_name_ref) + return + + if not instance_id_present: + self.log.error("Instance id %s doesnot exists for scale in", msg.instance_id) + return + + config_scaling_group_information(scaling_group_info) + return if action == ScalingRpcHandler.ACTION.SCALE_OUT: - self._loop.create_task(scale_out()) + self._loop.run_in_executor(None, scale_out) else: - self._loop.create_task(scale_in()) - - # Opdata based calls, disabled for now! - # if action == ScalingRpcHandler.ACTION.SCALE_OUT: - # self.scale_nsr_out( - # msg.nsr_id_ref, - # msg.scaling_group_name_ref, - # msg.instance_id, - # xact) - # else: - # self.scale_nsr_in( - # msg.nsr_id_ref, - # msg.scaling_group_name_ref, - # msg.instance_id) + self._loop.run_in_executor(None, scale_in) def nsr_update_cfg(self, nsr_id, msg): nsr = self._nsrs[nsr_id] @@@ -4405,15 -4236,8 +4424,12 @@@ class ScalingRpcHandler(mano_dts.DtsHan assert action == rwdts.QueryAction.RPC try: + if not self._project.rpc_check(msg, xact_info=xact_info): + return + - if self.callback: - self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN) - rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({ - "instance_id": msg.instance_id}) + "instance_id": msg.instance_id, + "project_name": self._project.name,}) xact_info.respond_xpath( rwdts.XactRspCode.ACK, @@@ -4440,12 -4263,8 +4458,9 @@@ msg.instance_id = last_instance_id + 1 self.last_instance_id[scale_group] += 1 - if self.callback: - self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT) - rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({ - "instance_id": msg.instance_id}) + "instance_id": msg.instance_id, + "project_name": self._project.name,}) xact_info.respond_xpath( rwdts.XactRspCode.ACK, diff --cc rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py index 8e798896,518b9276..1606ad2f --- a/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py +++ b/rwlaunchpad/plugins/rwpkgmgr/test/utest_publisher_dts.py @@@ -146,26 -143,40 +146,40 @@@ class TestCase(rift.test.dts.AbstractDT download_id = yield from self.job_handler.register_downloader(url_downloader) assert download_id is not None + + # Waiting for 5 secs to be sure that the file is downloaded + yield from asyncio.sleep(10, loop=self.loop) - xpath = "/download-jobs/job[download-id='{}']".format( - download_id) ++ xpath = self.project.add_project("/download-jobs/job[download-id='{}']".format( ++ download_id)) + result = yield from self.read_xpath(xpath) + self.log.debug("Test result before complete check - %s", result) + assert result.status == "COMPLETED" + assert len(self.job_handler.tasks) == 0 + + @rift.test.dts.async_test + def test_url_download_unreachable_ip(self): + """ + Integration Test: + Ensure that a bad IP does not block forever + """ + yield from self.job_handler.register() - # Waiting to be sure that the file is downloaded - # From BLR, it sometimes take longer for the file to - # be downloaded - max_time = 60 - total_time = 0 - while True: - yield from asyncio.sleep(5, loop=self.loop) - xpath = self.project.add_project("/download-jobs/job[download-id='{}']". - format(download_id)) - result = yield from self.read_xpath(xpath) - self.log.debug("Test result before complete check - %s", result) - if result.status != "COMPLETED": - total_time = total_time + 5 - if total_time <= max_time: - continue - else: - break + proxy = mock.MagicMock() - assert result.status == "COMPLETED" + # Here, we are assuming that there is no HTTP server at 10.1.2.3 + url = "http://10.1.2.3/common/unittests/plantuml.jar" + url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", proxy) + + download_id = yield from self.job_handler.register_downloader(url_downloader) + assert download_id is not None + + # Waiting for 10 secs to be sure all reconnect attempts have been exhausted + yield from asyncio.sleep(10, loop=self.loop) + xpath = "/download-jobs/job[download-id='{}']".format( + download_id) + result = yield from self.read_xpath(xpath) + self.log.debug("Test result before complete check - %s", result) + assert result.status == "FAILED" assert len(self.job_handler.tasks) == 0 diff --cc rwlaunchpad/test/launchpad.py index b73c5000,98680ba5..6db393cf --- a/rwlaunchpad/test/launchpad.py +++ b/rwlaunchpad/test/launchpad.py @@@ -484,7 -453,6 +484,7 @@@ class Demo(rift.vcs.demo.Demo) AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore), PackageManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore), StagingManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore), - ProjectMgrManoTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore), ++ #ProjectMgrManoTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore), ] if not mgmt_ip_list or len(mgmt_ip_list) == 0: