+
+ if topic != "admin" and command != "ping":
+ self.logger.debug(
+ "Task kafka_read receives {} {}: {}".format(topic, command, params)
+ )
+ self.consecutive_errors = 0
+ self.first_start = False
+ order_id += 1
+ if command == "exit":
+ raise LcmExceptionExit
+ elif command.startswith("#"):
+ return
+ elif command == "echo":
+ # just for test
+ print(params)
+ sys.stdout.flush()
+ return
+ elif command == "test":
+ asyncio.Task(self.test(params), loop=self.loop)
+ return
+
+ if topic == "admin":
+ if command == "ping" and params["to"] == "lcm" and params["from"] == "lcm":
+ if params.get("worker_id") != self.worker_id:
+ return
+ self.pings_not_received = 0
+ try:
+ with open(self.health_check_file, "w") as f:
+ f.write(str(time()))
+ except Exception as e:
+ self.logger.error(
+ "Cannot write into '{}' for healthcheck: {}".format(
+ self.health_check_file, e
+ )
+ )
+ return
+ elif topic == "pla":
+ if command == "placement":
+ self.ns.update_nsrs_with_pla_result(params)
+ return
+ elif topic == "k8scluster":
+ if command == "create" or command == "created":
+ k8scluster_id = params.get("_id")
+ task = asyncio.ensure_future(self.k8scluster.create(params, order_id))
+ self.lcm_tasks.register(
+ "k8scluster", k8scluster_id, order_id, "k8scluster_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ k8scluster_id = params.get("_id")
+ task = asyncio.ensure_future(self.k8scluster.delete(params, order_id))
+ self.lcm_tasks.register(
+ "k8scluster", k8scluster_id, order_id, "k8scluster_delete", task
+ )
+ return
+ elif topic == "vca":
+ if command == "create" or command == "created":
+ vca_id = params.get("_id")
+ task = asyncio.ensure_future(self.vca.create(params, order_id))
+ self.lcm_tasks.register("vca", vca_id, order_id, "vca_create", task)
+ return
+ elif command == "edit" or command == "edited":
+ vca_id = params.get("_id")
+ task = asyncio.ensure_future(self.vca.edit(params, order_id))
+ self.lcm_tasks.register("vca", vca_id, order_id, "vca_edit", task)
+ return
+ elif command == "delete" or command == "deleted":
+ vca_id = params.get("_id")
+ task = asyncio.ensure_future(self.vca.delete(params, order_id))
+ self.lcm_tasks.register("vca", vca_id, order_id, "vca_delete", task)
+ return
+ elif topic == "k8srepo":
+ if command == "create" or command == "created":
+ k8srepo_id = params.get("_id")
+ self.logger.debug("k8srepo_id = {}".format(k8srepo_id))
+ task = asyncio.ensure_future(self.k8srepo.create(params, order_id))
+ self.lcm_tasks.register(
+ "k8srepo", k8srepo_id, order_id, "k8srepo_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ k8srepo_id = params.get("_id")
+ task = asyncio.ensure_future(self.k8srepo.delete(params, order_id))
+ self.lcm_tasks.register(
+ "k8srepo", k8srepo_id, order_id, "k8srepo_delete", task
+ )
+ return
+ elif topic == "ns":
+ if command == "instantiate":
+ # self.logger.debug("Deploying NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id))
+ self.lcm_tasks.register(
+ "ns", nsr_id, nslcmop_id, "ns_instantiate", task
+ )
+ return
+ elif command == "terminate":
+ # self.logger.debug("Deleting NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ self.lcm_tasks.cancel(topic, nsr_id)
+ task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_terminate", task)
+ return
+ elif command == "vca_status_refresh":
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(
+ self.ns.vca_status_refresh(nsr_id, nslcmop_id)
+ )
+ self.lcm_tasks.register(
+ "ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task
+ )
+ return
+ elif command == "action":
+ # self.logger.debug("Update NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.action(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_action", task)
+ return
+ elif command == "update":
+ # self.logger.debug("Update NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.update(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_update", task)
+ return
+ elif command == "scale":
+ # self.logger.debug("Update NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.scale(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_scale", task)
+ return
+ elif command == "heal":
+ # self.logger.debug("Healing NS {}".format(nsr_id))
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.heal(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_heal", task)
+ return
+ elif command == "migrate":
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.migrate(nsr_id, nslcmop_id))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_migrate", task)
+ return
+ elif command == "verticalscale":
+ nslcmop = params
+ nslcmop_id = nslcmop["_id"]
+ nsr_id = nslcmop["nsInstanceId"]
+ task = asyncio.ensure_future(self.ns.vertical_scale(nsr_id, nslcmop_id))
+ self.logger.debug(
+ "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task)
+ )
+ self.lcm_tasks.register(
+ "ns", nsr_id, nslcmop_id, "ns_verticalscale", task
+ )
+ self.logger.debug(
+ "LCM task registered {},{},{} ".format(nsr_id, nslcmop_id, task)
+ )
+ return
+ elif command == "show":
+ nsr_id = params
+ try:
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ print(
+ "nsr:\n _id={}\n operational-status: {}\n config-status: {}"
+ "\n detailed-status: {}\n deploy: {}\n tasks: {}"
+ "".format(
+ nsr_id,
+ db_nsr["operational-status"],
+ db_nsr["config-status"],
+ db_nsr["detailed-status"],
+ db_nsr["_admin"]["deployed"],
+ self.lcm_tasks.task_registry["ns"].get(nsr_id, ""),
+ )
+ )
+ except Exception as e:
+ print("nsr {} not found: {}".format(nsr_id, e))
+ sys.stdout.flush()
+ return
+ elif command == "deleted":
+ return # TODO cleaning of task just in case should be done
+ elif command in (
+ "vnf_terminated",
+ "policy_updated",
+ "terminated",
+ "instantiated",
+ "scaled",
+ "healed",
+ "actioned",
+ "updated",
+ "migrated",
+ "verticalscaled",
+ ): # "scaled-cooldown-time"
+ return
+
+ elif topic == "nsi": # netslice LCM processes (instantiate, terminate, etc)
+ if command == "instantiate":
+ # self.logger.debug("Instantiating Network Slice {}".format(nsilcmop["netsliceInstanceId"]))
+ nsilcmop = params
+ nsilcmop_id = nsilcmop["_id"] # slice operation id
+ nsir_id = nsilcmop["netsliceInstanceId"] # slice record id
+ task = asyncio.ensure_future(
+ self.netslice.instantiate(nsir_id, nsilcmop_id)
+ )
+ self.lcm_tasks.register(
+ "nsi", nsir_id, nsilcmop_id, "nsi_instantiate", task
+ )
+ return
+ elif command == "terminate":
+ # self.logger.debug("Terminating Network Slice NS {}".format(nsilcmop["netsliceInstanceId"]))
+ nsilcmop = params
+ nsilcmop_id = nsilcmop["_id"] # slice operation id
+ nsir_id = nsilcmop["netsliceInstanceId"] # slice record id
+ self.lcm_tasks.cancel(topic, nsir_id)
+ task = asyncio.ensure_future(
+ self.netslice.terminate(nsir_id, nsilcmop_id)
+ )
+ self.lcm_tasks.register(
+ "nsi", nsir_id, nsilcmop_id, "nsi_terminate", task
+ )
+ return
+ elif command == "show":
+ nsir_id = params
+ try:
+ db_nsir = self.db.get_one("nsirs", {"_id": nsir_id})
+ print(
+ "nsir:\n _id={}\n operational-status: {}\n config-status: {}"
+ "\n detailed-status: {}\n deploy: {}\n tasks: {}"
+ "".format(
+ nsir_id,
+ db_nsir["operational-status"],
+ db_nsir["config-status"],
+ db_nsir["detailed-status"],
+ db_nsir["_admin"]["deployed"],
+ self.lcm_tasks.task_registry["nsi"].get(nsir_id, ""),
+ )
+ )
+ except Exception as e:
+ print("nsir {} not found: {}".format(nsir_id, e))
+ sys.stdout.flush()
+ return
+ elif command == "deleted":
+ return # TODO cleaning of task just in case should be done
+ elif command in (
+ "terminated",
+ "instantiated",
+ "scaled",
+ "healed",
+ "actioned",
+ ): # "scaled-cooldown-time"
+ return
+ elif topic == "vim_account":
+ vim_id = params["_id"]
+ if command in ("create", "created"):
+ if not self.main_config.RO.ng:
+ task = asyncio.ensure_future(self.vim.create(params, order_id))
+ self.lcm_tasks.register(
+ "vim_account", vim_id, order_id, "vim_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.lcm_tasks.cancel(topic, vim_id)
+ task = asyncio.ensure_future(self.vim.delete(params, order_id))
+ self.lcm_tasks.register(
+ "vim_account", vim_id, order_id, "vim_delete", task
+ )
+ return
+ elif command == "show":
+ print("not implemented show with vim_account")
+ sys.stdout.flush()
+ return
+ elif command in ("edit", "edited"):
+ if not self.main_config.RO.ng:
+ task = asyncio.ensure_future(self.vim.edit(params, order_id))
+ self.lcm_tasks.register(
+ "vim_account", vim_id, order_id, "vim_edit", task
+ )
+ return
+ elif command == "deleted":
+ return # TODO cleaning of task just in case should be done
+ elif topic == "wim_account":
+ wim_id = params["_id"]
+ if command in ("create", "created"):
+ if not self.main_config.RO.ng:
+ task = asyncio.ensure_future(self.wim.create(params, order_id))
+ self.lcm_tasks.register(
+ "wim_account", wim_id, order_id, "wim_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.lcm_tasks.cancel(topic, wim_id)
+ task = asyncio.ensure_future(self.wim.delete(params, order_id))
+ self.lcm_tasks.register(
+ "wim_account", wim_id, order_id, "wim_delete", task
+ )
+ return
+ elif command == "show":
+ print("not implemented show with wim_account")
+ sys.stdout.flush()
+ return
+ elif command in ("edit", "edited"):
+ task = asyncio.ensure_future(self.wim.edit(params, order_id))
+ self.lcm_tasks.register(
+ "wim_account", wim_id, order_id, "wim_edit", task
+ )
+ return
+ elif command == "deleted":
+ return # TODO cleaning of task just in case should be done
+ elif topic == "sdn":
+ _sdn_id = params["_id"]
+ if command in ("create", "created"):
+ if not self.main_config.RO.ng:
+ task = asyncio.ensure_future(self.sdn.create(params, order_id))
+ self.lcm_tasks.register(
+ "sdn", _sdn_id, order_id, "sdn_create", task
+ )
+ return
+ elif command == "delete" or command == "deleted":
+ self.lcm_tasks.cancel(topic, _sdn_id)
+ task = asyncio.ensure_future(self.sdn.delete(params, order_id))
+ self.lcm_tasks.register("sdn", _sdn_id, order_id, "sdn_delete", task)
+ return
+ elif command in ("edit", "edited"):
+ task = asyncio.ensure_future(self.sdn.edit(params, order_id))
+ self.lcm_tasks.register("sdn", _sdn_id, order_id, "sdn_edit", task)
+ return
+ elif command == "deleted":
+ return # TODO cleaning of task just in case should be done
+ self.logger.critical("unknown topic {} and command '{}'".format(topic, command))
+
+ async def kafka_read(self):
+ self.logger.debug(
+ "Task kafka_read Enter with worker_id={}".format(self.worker_id)
+ )