Pin pylint version to 3.1.1 in tox.ini
[osm/LCM.git] / osm_lcm / lcm.py
index 82947e9..9b62d82 100644 (file)
@@ -19,7 +19,6 @@
 
 
 # DEBUG WITH PDB
-import os
 import pdb
 
 import asyncio
@@ -28,6 +27,7 @@ import logging
 import logging.handlers
 import getopt
 import sys
+from random import SystemRandom
 
 from osm_lcm import ns, vim_sdn, netslice
 from osm_lcm.ng_ro import NgRoException, NgRoClient
@@ -44,12 +44,13 @@ from osm_common.fsbase import FsException
 from osm_common.msgbase import MsgException
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
-from os import environ, path
-from random import choice as random_choice
+from osm_lcm.data_utils.lcm_config import LcmCfg
+from osm_lcm.lcm_hc import get_health_check_file
+from os import path, getenv
 from n2vc import version as n2vc_version
 import traceback
 
-if os.getenv("OSMLCM_PDB_DEBUG", None) is not None:
+if getenv("OSMLCM_PDB_DEBUG", None) is not None:
     pdb.set_trace()
 
 
@@ -58,26 +59,17 @@ min_RO_version = "6.0.2"
 min_n2vc_version = "0.0.2"
 
 min_common_version = "0.1.19"
-health_check_file = (
-    path.expanduser("~") + "/time_last_ping"
-)  # TODO find better location for this file
 
 
 class Lcm:
-
     ping_interval_pace = (
         120  # how many time ping is send once is confirmed all is running
     )
     ping_interval_boot = 5  # how many time ping is sent when booting
-    cfg_logger_name = {
-        "message": "lcm.msg",
-        "database": "lcm.db",
-        "storage": "lcm.fs",
-        "tsdb": "lcm.prometheus",
-    }
-    # ^ contains for each section at lcm.cfg the used logger name
-
-    def __init__(self, config_file, loop=None):
+
+    main_config = LcmCfg()
+
+    def __init__(self, config_file):
         """
         Init, Connect to database, filesystem storage, and messaging
         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
@@ -97,27 +89,12 @@ class Lcm:
         self.worker_id = self.get_process_id()
         # load configuration
         config = self.read_config_file(config_file)
-        self.config = config
-        self.config["ro_config"] = {
-            "ng": config["RO"].get("ng", False),
-            "uri": config["RO"].get("uri"),
-            "tenant": config.get("tenant", "osm"),
-            "logger_name": "lcm.roclient",
-            "loglevel": config["RO"].get("loglevel", "ERROR"),
-        }
-        if not self.config["ro_config"]["uri"]:
-            self.config["ro_config"]["uri"] = "http://{}:{}/".format(
-                config["RO"]["host"], config["RO"]["port"]
-            )
-        elif (
-            "/ro" in self.config["ro_config"]["uri"][-4:]
-            or "/openmano" in self.config["ro_config"]["uri"][-10:]
-        ):
-            # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
-            index = self.config["ro_config"]["uri"][-1].rfind("/")
-            self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index + 1]
-
-        self.loop = loop or asyncio.get_event_loop()
+        self.main_config.set_from_dict(config)
+        self.main_config.transform()
+        self.main_config.load_from_env()
+        self.logger.critical("Loaded configuration:" + str(self.main_config.to_dict()))
+        # TODO: check if lcm_hc.py is necessary
+        self.health_check_file = get_health_check_file(self.main_config.to_dict())
         self.ns = (
             self.netslice
         ) = (
@@ -131,35 +108,35 @@ class Lcm:
         log_formatter_simple = logging.Formatter(
             log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
         )
-        config["database"]["logger_name"] = "lcm.db"
-        config["storage"]["logger_name"] = "lcm.fs"
-        config["message"]["logger_name"] = "lcm.msg"
-        if config["global"].get("logfile"):
+        if self.main_config.globalConfig.logfile:
             file_handler = logging.handlers.RotatingFileHandler(
-                config["global"]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                self.main_config.globalConfig.logfile,
+                maxBytes=100e6,
+                backupCount=9,
+                delay=0,
             )
             file_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(file_handler)
-        if not config["global"].get("nologging"):
+        if not self.main_config.globalConfig.to_dict()["nologging"]:
             str_handler = logging.StreamHandler()
             str_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(str_handler)
 
-        if config["global"].get("loglevel"):
-            self.logger.setLevel(config["global"]["loglevel"])
+        if self.main_config.globalConfig.to_dict()["loglevel"]:
+            self.logger.setLevel(self.main_config.globalConfig.loglevel)
 
         # logging other modules
-        for k1, logname in self.cfg_logger_name.items():
-            config[k1]["logger_name"] = logname
-            logger_module = logging.getLogger(logname)
-            if config[k1].get("logfile"):
+        for logger in ("message", "database", "storage", "tsdb"):
+            logger_config = self.main_config.to_dict()[logger]
+            logger_module = logging.getLogger(logger_config["logger_name"])
+            if logger_config["logfile"]:
                 file_handler = logging.handlers.RotatingFileHandler(
-                    config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                    logger_config["logfile"], maxBytes=100e6, backupCount=9, delay=0
                 )
                 file_handler.setFormatter(log_formatter_simple)
                 logger_module.addHandler(file_handler)
-            if config[k1].get("loglevel"):
-                logger_module.setLevel(config[k1]["loglevel"])
+            if logger_config["loglevel"]:
+                logger_module.setLevel(logger_config["loglevel"])
         self.logger.critical(
             "starting osm/lcm version {} {}".format(lcm_version, lcm_version_date)
         )
@@ -182,14 +159,14 @@ class Lcm:
             )
 
         try:
-            self.db = Database(config).instance.db
+            self.db = Database(self.main_config.to_dict()).instance.db
 
-            self.fs = Filesystem(config).instance.fs
+            self.fs = Filesystem(self.main_config.to_dict()).instance.fs
             self.fs.sync()
 
             # copy message configuration in order to remove 'group_id' for msg_admin
-            config_message = config["message"].copy()
-            config_message["loop"] = self.loop
+            config_message = self.main_config.message.to_dict()
+            config_message["loop"] = asyncio.get_event_loop()
             if config_message["driver"] == "local":
                 self.msg = msglocal.MsgLocal()
                 self.msg.connect(config_message)
@@ -205,7 +182,7 @@ class Lcm:
             else:
                 raise LcmException(
                     "Invalid configuration param '{}' at '[message]':'driver'".format(
-                        config["message"]["driver"]
+                        self.main_config.message.driver
                     )
                 )
         except (DbException, FsException, MsgException) as e:
@@ -219,19 +196,21 @@ class Lcm:
         tries = 14
         last_error = None
         while True:
-            ro_uri = self.config["ro_config"]["uri"]
+            ro_uri = self.main_config.RO.uri
+            if not ro_uri:
+                ro_uri = ""
             try:
                 # try new  RO, if fail old RO
                 try:
-                    self.config["ro_config"]["uri"] = ro_uri + "ro"
-                    ro_server = NgRoClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "ro"
+                    ro_server = NgRoClient(**self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = True
+                    self.main_config.RO.ng = True
                 except Exception:
-                    self.config["ro_config"]["uri"] = ro_uri + "openmano"
-                    ro_server = ROClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "openmano"
+                    ro_server = ROClient(**self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = False
+                    self.main_config.RO.ng = False
                 if versiontuple(ro_version) < versiontuple(min_RO_version):
                     raise LcmException(
                         "Not compatible osm/RO version '{}'. Needed '{}' or higher".format(
@@ -240,16 +219,16 @@ class Lcm:
                     )
                 self.logger.info(
                     "Connected to RO version {} new-generation version {}".format(
-                        ro_version, self.config["ro_config"]["ng"]
+                        ro_version, self.main_config.RO.ng
                     )
                 )
                 return
             except (ROClientException, NgRoException) as e:
-                self.config["ro_config"]["uri"] = ro_uri
+                self.main_config.RO.uri = ro_uri
                 tries -= 1
                 traceback.print_tb(e.__traceback__)
                 error_text = "Error while connecting to RO on {}: {}".format(
-                    self.config["ro_config"]["uri"], e
+                    self.main_config.RO.uri, e
                 )
                 if tries <= 0:
                     self.logger.critical(error_text)
@@ -281,7 +260,6 @@ class Lcm:
                         "worker_id": self.worker_id,
                         "version": lcm_version,
                     },
-                    self.loop,
                 )
                 # time between pings are low when it is not received and at starting
                 wait_time = (
@@ -292,7 +270,7 @@ class Lcm:
                 if not self.pings_not_received:
                     kafka_has_received = True
                 self.pings_not_received += 1
-                await asyncio.sleep(wait_time, loop=self.loop)
+                await asyncio.sleep(wait_time)
                 if self.pings_not_received > 10:
                     raise LcmException("It is not receiving pings from Kafka bus")
                 consecutive_errors = 0
@@ -314,9 +292,9 @@ class Lcm:
                     "Task kafka_read retrying after Exception {}".format(e)
                 )
                 wait_time = 2 if not first_start else 5
-                await asyncio.sleep(wait_time, loop=self.loop)
+                await asyncio.sleep(wait_time)
 
-    def kafka_read_callback(self, topic, command, params):
+    async def kafka_read_callback(self, topic, command, params):
         order_id = 1
 
         if topic != "admin" and command != "ping":
@@ -336,7 +314,7 @@ class Lcm:
             sys.stdout.flush()
             return
         elif command == "test":
-            asyncio.Task(self.test(params), loop=self.loop)
+            asyncio.Task(self.test(params))
             return
 
         if topic == "admin":
@@ -345,15 +323,53 @@ class Lcm:
                     return
                 self.pings_not_received = 0
                 try:
-                    with open(health_check_file, "w") as f:
+                    with open(self.health_check_file, "w") as f:
                         f.write(str(time()))
                 except Exception as e:
                     self.logger.error(
                         "Cannot write into '{}' for healthcheck: {}".format(
-                            health_check_file, e
+                            self.health_check_file, e
                         )
                     )
             return
+        elif topic == "nslcmops":
+            if command == "cancel":
+                nslcmop_id = params["_id"]
+                self.logger.debug("Cancelling nslcmop {}".format(nslcmop_id))
+                nsr_id = params["nsInstanceId"]
+                # cancel the tasks and wait
+                for task in self.lcm_tasks.cancel("ns", nsr_id, nslcmop_id):
+                    try:
+                        await task
+                        self.logger.debug(
+                            "Cancelled task ended {},{},{}".format(
+                                nsr_id, nslcmop_id, task
+                            )
+                        )
+                    except asyncio.CancelledError:
+                        self.logger.debug(
+                            "Task already cancelled and finished {},{},{}".format(
+                                nsr_id, nslcmop_id, task
+                            )
+                        )
+                # update DB
+                q_filter = {"_id": nslcmop_id}
+                update_dict = {
+                    "operationState": "FAILED_TEMP",
+                    "isCancelPending": False,
+                }
+                unset_dict = {
+                    "cancelMode": None,
+                }
+                self.db.set_one(
+                    "nslcmops",
+                    q_filter=q_filter,
+                    update_dict=update_dict,
+                    fail_on_empty=False,
+                    unset=unset_dict,
+                )
+                self.logger.debug("LCM task cancelled {},{}".format(nsr_id, nslcmop_id))
+            return
         elif topic == "pla":
             if command == "placement":
                 self.ns.update_nsrs_with_pla_result(params)
@@ -366,6 +382,13 @@ class Lcm:
                     "k8scluster", k8scluster_id, order_id, "k8scluster_create", task
                 )
                 return
+            elif command == "edit" or command == "edited":
+                k8scluster_id = params.get("_id")
+                task = asyncio.ensure_future(self.k8scluster.edit(params, order_id))
+                self.lcm_tasks.register(
+                    "k8scluster", k8scluster_id, order_id, "k8scluster_edit", task
+                )
+                return
             elif command == "delete" or command == "deleted":
                 k8scluster_id = params.get("_id")
                 task = asyncio.ensure_future(self.k8scluster.delete(params, order_id))
@@ -379,6 +402,11 @@ class Lcm:
                 task = asyncio.ensure_future(self.vca.create(params, order_id))
                 self.lcm_tasks.register("vca", vca_id, order_id, "vca_create", task)
                 return
+            elif command == "edit" or command == "edited":
+                vca_id = params.get("_id")
+                task = asyncio.ensure_future(self.vca.edit(params, order_id))
+                self.lcm_tasks.register("vca", vca_id, order_id, "vca_edit", task)
+                return
             elif command == "delete" or command == "deleted":
                 vca_id = params.get("_id")
                 task = asyncio.ensure_future(self.vca.delete(params, order_id))
@@ -455,6 +483,14 @@ class Lcm:
                 task = asyncio.ensure_future(self.ns.scale(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_scale", task)
                 return
+            elif command == "heal":
+                # self.logger.debug("Healing NS {}".format(nsr_id))
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
+                task = asyncio.ensure_future(self.ns.heal(nsr_id, nslcmop_id))
+                self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_heal", task)
+                return
             elif command == "migrate":
                 nslcmop = params
                 nslcmop_id = nslcmop["_id"]
@@ -462,6 +498,21 @@ class Lcm:
                 task = asyncio.ensure_future(self.ns.migrate(nsr_id, nslcmop_id))
                 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_migrate", task)
                 return
+            elif command == "verticalscale":
+                nslcmop = params
+                nslcmop_id = nslcmop["_id"]
+                nsr_id = nslcmop["nsInstanceId"]
+                task = asyncio.ensure_future(self.ns.vertical_scale(nsr_id, nslcmop_id))
+                self.logger.debug(
+                    "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id, nslcmop_id, task)
+                )
+                self.lcm_tasks.register(
+                    "ns", nsr_id, nslcmop_id, "ns_verticalscale", task
+                )
+                self.logger.debug(
+                    "LCM task registered {},{},{} ".format(nsr_id, nslcmop_id, task)
+                )
+                return
             elif command == "show":
                 nsr_id = params
                 try:
@@ -475,7 +526,7 @@ class Lcm:
                             db_nsr["config-status"],
                             db_nsr["detailed-status"],
                             db_nsr["_admin"]["deployed"],
-                            self.lcm_ns_tasks.get(nsr_id),
+                            self.lcm_tasks.task_registry["ns"].get(nsr_id, ""),
                         )
                     )
                 except Exception as e:
@@ -486,12 +537,15 @@ class Lcm:
                 return  # TODO cleaning of task just in case should be done
             elif command in (
                 "vnf_terminated",
+                "policy_updated",
                 "terminated",
                 "instantiated",
                 "scaled",
+                "healed",
                 "actioned",
                 "updated",
                 "migrated",
+                "verticalscaled",
             ):  # "scaled-cooldown-time"
                 return
 
@@ -534,7 +588,7 @@ class Lcm:
                             db_nsir["config-status"],
                             db_nsir["detailed-status"],
                             db_nsir["_admin"]["deployed"],
-                            self.lcm_netslice_tasks.get(nsir_id),
+                            self.lcm_tasks.task_registry["nsi"].get(nsir_id, ""),
                         )
                     )
                 except Exception as e:
@@ -547,13 +601,14 @@ class Lcm:
                 "terminated",
                 "instantiated",
                 "scaled",
+                "healed",
                 "actioned",
             ):  # "scaled-cooldown-time"
                 return
         elif topic == "vim_account":
             vim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.create(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_create", task
@@ -571,7 +626,7 @@ class Lcm:
                 sys.stdout.flush()
                 return
             elif command in ("edit", "edited"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.edit(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_edit", task
@@ -582,7 +637,7 @@ class Lcm:
         elif topic == "wim_account":
             wim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.wim.create(params, order_id))
                     self.lcm_tasks.register(
                         "wim_account", wim_id, order_id, "wim_create", task
@@ -610,7 +665,7 @@ class Lcm:
         elif topic == "sdn":
             _sdn_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.sdn.create(params, order_id))
                     self.lcm_tasks.register(
                         "sdn", _sdn_id, order_id, "sdn_create", task
@@ -633,7 +688,6 @@ class Lcm:
         self.logger.debug(
             "Task kafka_read Enter with worker_id={}".format(self.worker_id)
         )
-        # future = asyncio.Future()
         self.consecutive_errors = 0
         self.first_start = True
         while self.consecutive_errors < 10:
@@ -648,16 +702,18 @@ class Lcm:
                     "vca",
                     "k8srepo",
                     "pla",
+                    "nslcmops",
                 )
                 topics_admin = ("admin",)
                 await asyncio.gather(
                     self.msg.aioread(
-                        topics, self.loop, self.kafka_read_callback, from_beginning=True
+                        topics,
+                        aiocallback=self.kafka_read_callback,
+                        from_beginning=True,
                     ),
                     self.msg_admin.aioread(
                         topics_admin,
-                        self.loop,
-                        self.kafka_read_callback,
+                        aiocallback=self.kafka_read_callback,
                         group_id=False,
                     ),
                 )
@@ -680,34 +736,34 @@ class Lcm:
                     "Task kafka_read retrying after Exception {}".format(e)
                 )
                 wait_time = 2 if not self.first_start else 5
-                await asyncio.sleep(wait_time, loop=self.loop)
+                await asyncio.sleep(wait_time)
 
-        # self.logger.debug("Task kafka_read terminating")
         self.logger.debug("Task kafka_read exit")
 
-    def start(self):
+    async def kafka_read_ping(self):
+        await asyncio.gather(self.kafka_read(), self.kafka_ping())
 
+    async def start(self):
         # check RO version
-        self.loop.run_until_complete(self.check_RO_version())
+        await self.check_RO_version()
 
-        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.config, self.loop)
+        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.main_config)
+        # TODO: modify the rest of classes to use the LcmCfg object instead of dicts
         self.netslice = netslice.NetsliceLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop, self.ns
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.ns
         )
-        self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop)
+        self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
+        self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
+        self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
         self.k8scluster = vim_sdn.K8sClusterLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict()
         )
-        self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop)
+        self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.main_config.to_dict())
         self.k8srepo = vim_sdn.K8sRepoLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict()
         )
 
-        self.loop.run_until_complete(
-            asyncio.gather(self.kafka_read(), self.kafka_ping())
-        )
+        await self.kafka_read_ping()
 
         # TODO
         # self.logger.debug("Terminating cancelling creation tasks")
@@ -715,12 +771,10 @@ class Lcm:
         # timeout = 200
         # while self.is_pending_tasks():
         #     self.logger.debug("Task kafka_read terminating. Waiting for tasks termination")
-        #     await asyncio.sleep(2, loop=self.loop)
+        #     await asyncio.sleep(2)
         #     timeout -= 2
         #     if not timeout:
         #         self.lcm_tasks.cancel("ALL", "ALL")
-        self.loop.close()
-        self.loop = None
         if self.db:
             self.db.db_disconnect()
         if self.msg:
@@ -731,67 +785,9 @@ class Lcm:
             self.fs.fs_disconnect()
 
     def read_config_file(self, config_file):
-        # TODO make a [ini] + yaml inside parser
-        # the configparser library is not suitable, because it does not admit comments at the end of line,
-        # and not parse integer or boolean
         try:
-            # read file as yaml format
             with open(config_file) as f:
-                conf = yaml.load(f, Loader=yaml.Loader)
-            # Ensure all sections are not empty
-            for k in (
-                "global",
-                "timeout",
-                "RO",
-                "VCA",
-                "database",
-                "storage",
-                "message",
-            ):
-                if not conf.get(k):
-                    conf[k] = {}
-
-            # read all environ that starts with OSMLCM_
-            for k, v in environ.items():
-                if not k.startswith("OSMLCM_"):
-                    continue
-                subject, _, item = k[7:].lower().partition("_")
-                if not item:
-                    continue
-                if subject in ("ro", "vca"):
-                    # put in capital letter
-                    subject = subject.upper()
-                try:
-                    if item == "port" or subject == "timeout":
-                        conf[subject][item] = int(v)
-                    else:
-                        conf[subject][item] = v
-                except Exception as e:
-                    self.logger.warning(
-                        "skipping environ '{}' on exception '{}'".format(k, e)
-                    )
-
-            # backward compatibility of VCA parameters
-
-            if "pubkey" in conf["VCA"]:
-                conf["VCA"]["public_key"] = conf["VCA"].pop("pubkey")
-            if "cacert" in conf["VCA"]:
-                conf["VCA"]["ca_cert"] = conf["VCA"].pop("cacert")
-            if "apiproxy" in conf["VCA"]:
-                conf["VCA"]["api_proxy"] = conf["VCA"].pop("apiproxy")
-
-            if "enableosupgrade" in conf["VCA"]:
-                conf["VCA"]["enable_os_upgrade"] = conf["VCA"].pop("enableosupgrade")
-            if isinstance(conf["VCA"].get("enable_os_upgrade"), str):
-                if conf["VCA"]["enable_os_upgrade"].lower() == "false":
-                    conf["VCA"]["enable_os_upgrade"] = False
-                elif conf["VCA"]["enable_os_upgrade"].lower() == "true":
-                    conf["VCA"]["enable_os_upgrade"] = True
-
-            if "aptmirror" in conf["VCA"]:
-                conf["VCA"]["apt_mirror"] = conf["VCA"].pop("aptmirror")
-
-            return conf
+                return yaml.safe_load(f)
         except Exception as e:
             self.logger.critical("At config file '{}': {}".format(config_file, e))
             exit(1)
@@ -803,18 +799,22 @@ class Lcm:
         will provide a random one
         :return: Obtained ID
         """
-        # Try getting docker id. If fails, get pid
-        try:
-            with open("/proc/self/cgroup", "r") as f:
-                text_id_ = f.readline()
-                _, _, text_id = text_id_.rpartition("/")
-                text_id = text_id.replace("\n", "")[:12]
-                if text_id:
-                    return text_id
-        except Exception:
-            pass
-        # Return a random id
-        return "".join(random_choice("0123456789abcdef") for _ in range(12))
+
+        def get_docker_id():
+            try:
+                with open("/proc/self/cgroup", "r") as f:
+                    text_id_ = f.readline()
+                    _, _, text_id = text_id_.rpartition("/")
+                    return text_id.replace("\n", "")[:12]
+            except Exception:
+                return None
+
+        def generate_random_id():
+            return "".join(SystemRandom().choice("0123456789abcdef") for _ in range(12))
+
+        # Try getting docker id. If it fails, generate a random id
+        docker_id = get_docker_id()
+        return docker_id if docker_id else generate_random_id()
 
 
 def usage():
@@ -832,7 +832,6 @@ def usage():
 
 
 if __name__ == "__main__":
-
     try:
         # print("SYS.PATH='{}'".format(sys.path))
         # load parameters and configuration
@@ -855,15 +854,10 @@ if __name__ == "__main__":
             elif o == "--health-check":
                 from osm_lcm.lcm_hc import health_check
 
-                health_check(health_check_file, Lcm.ping_interval_pace)
-            # elif o == "--log-socket-port":
-            #     log_socket_port = a
-            # elif o == "--log-socket-host":
-            #     log_socket_host = a
-            # elif o == "--log-file":
-            #     log_file = a
+                health_check(config_file, Lcm.ping_interval_pace)
             else:
-                assert False, "Unhandled option"
+                print(f"Unhandled option: {o}")
+                exit(1)
 
         if config_file:
             if not path.isfile(config_file):
@@ -887,7 +881,7 @@ if __name__ == "__main__":
                 )
                 exit(1)
         lcm = Lcm(config_file)
-        lcm.start()
+        asyncio.run(lcm.start())
     except (LcmException, getopt.GetoptError) as e:
         print(str(e), file=sys.stderr)
         # usage()