Feature 10929: LCM saga, Milestone 1. 84/12684/14 release-v13.0-start
authorLuis Vega <lvega@whitestack.com>
Fri, 11 Nov 2022 20:10:49 +0000 (20:10 +0000)
committerGabriel Cuba <gcuba@whitestack.com>
Fri, 25 Nov 2022 04:11:48 +0000 (23:11 -0500)
Move configurations to ConfigMan class, using our own class (OsmConfigman).

Change-Id: Ia8f4e435d20ac88540aec071ca5cab9e70f112a6
Signed-off-by: Luis Vega <lvega@whitestack.com>
13 files changed:
osm_lcm/data_utils/lcm_config.py [new file with mode: 0644]
osm_lcm/lcm.cfg
osm_lcm/lcm.py
osm_lcm/lcm_helm_conn.py
osm_lcm/netslice.py
osm_lcm/ns.py
osm_lcm/tests/test_lcm.py
osm_lcm/tests/test_lcm_config_file_ini.cfg [new file with mode: 0644]
osm_lcm/tests/test_lcm_helm_conn.py
osm_lcm/tests/test_ns.py
osm_lcm/vim_sdn.py
requirements.in
requirements.txt

diff --git a/osm_lcm/data_utils/lcm_config.py b/osm_lcm/data_utils/lcm_config.py
new file mode 100644 (file)
index 0000000..13d3e68
--- /dev/null
@@ -0,0 +1,229 @@
+# Copyright 2022 Whitestack, LLC
+# *************************************************************
+#
+# This file is part of OSM Monitoring module
+# All Rights Reserved to Whitestack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: lvega@whitestack.com
+##
+
+from configman import ConfigMan
+from glom import glom, assign
+
+
+class OsmConfigman(ConfigMan):
+    def __init__(self, config_dict=None):
+        super().__init__()
+        self.set_from_dict(config_dict)
+        self.set_auto_env("OSMLCM")
+
+    def get(self, key, defaultValue):
+        return self.to_dict()[key]
+
+    def set_from_dict(self, config_dict):
+        def func(attr_path: str, _: type) -> None:
+            conf_val = glom(config_dict, attr_path, default=None)
+            if conf_val is not None:
+                assign(self, attr_path, conf_val)
+
+        self._run_func_for_all_premitives(func)
+
+    def _get_env_name(self, path: str, prefix: str = None) -> str:
+        path_parts = path.split(".")
+        if prefix is not None:
+            path_parts.insert(0, prefix)
+        return "_".join(path_parts).upper()
+
+    def transform(self):
+        pass
+
+
+# Configs from lcm.cfg
+
+
+class GlobalConfig(OsmConfigman):
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    nologging: bool = False
+
+
+class Timeout(OsmConfigman):
+    nsi_deploy: int = None
+    vca_on_error: int = (
+        5 * 60
+    )  # Time for charm from first time at blocked,error status to mark as failed
+    ns_deploy: int = 2 * 3600  # default global timeout for deployment a ns
+    ns_terminate: int = 1800  # default global timeout for un deployment a ns
+    ns_heal: int = 1800  # default global timeout for un deployment a ns
+    charm_delete: int = 10 * 60
+    primitive: int = 30 * 60  # timeout for primitive execution
+    ns_update: int = 30 * 60  # timeout for ns update
+    progress_primitive: int = (
+        10 * 60
+    )  # timeout for some progress in a primitive execution
+    migrate: int = 1800  # default global timeout for migrating vnfs
+    operate: int = 1800  # default global timeout for migrating vnfs
+    verticalscale: int = 1800  # default global timeout for Vertical Sclaing
+    scale_on_error = (
+        5 * 60
+    )  # Time for charm from first time at blocked,error status to mark as failed
+    scale_on_error_outer_factor = 1.05  # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
+    primitive_outer_factor = 1.05  # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
+
+
+class RoConfig(OsmConfigman):
+    host: str = None
+    ng: bool = False
+    port: int = None
+    uri: str = None
+    tenant: str = "osm"
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        if not self.uri:
+            self.uri = "http://{}:{}/".format(self.host, self.port)
+        elif "/ro" in self.uri[-4:] or "/openmano" in self.uri[-10:]:
+            # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
+            index = self.uri[-1].rfind("/")
+            self.uri = self.uri[index + 1]
+        self.logger_name = "lcm.roclient"
+
+
+class VcaConfig(OsmConfigman):
+    host: str = None
+    port: int = None
+    user: str = None
+    secret: str = None
+    cloud: str = None
+    k8s_cloud: str = None
+    helmpath: str = None
+    helm3path: str = None
+    kubectlpath: str = None
+    jujupath: str = None
+    public_key: str = None
+    ca_cert: str = None
+    api_proxy: str = None
+    apt_mirror: str = None
+    eegrpcinittimeout: int = None
+    eegrpctimeout: int = None
+    eegrpc_tls_enforce: bool = False
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    ca_store: str = "/etc/ssl/certs/osm-ca.crt"
+    kubectl_osm_namespace: str = "osm"
+    kubectl_osm_cluster_name: str = "_system-osm-k8s"
+    helm_ee_service_port: int = 50050
+    helm_max_initial_retry_time: int = 600
+    helm_max_retry_time: int = 30  # Max retry time for normal operations
+    helm_ee_retry_delay: int = (
+        10  # time between retries, retry time after a connection error is raised
+    )
+
+    def transform(self):
+        if self.eegrpcinittimeout:
+            self.helm_max_initial_retry_time = self.eegrpcinittimeout
+        if self.eegrpctimeout:
+            self.helm_max_retry_time = self.eegrpctimeout
+
+
+class DatabaseConfig(OsmConfigman):
+    driver: str = None
+    host: str = None
+    port: int = None
+    uri: str = None
+    name: str = None
+    replicaset: str = None
+    user: str = None
+    password: str = None
+    commonkey: str = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.db"
+
+
+class StorageConfig(OsmConfigman):
+    driver: str = None
+    path: str = "/app/storage"
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+    collection: str = None
+    uri: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.fs"
+
+
+class MessageConfig(OsmConfigman):
+    driver: str = None
+    path: str = None
+    host: str = None
+    port: int = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    group_id: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.msg"
+
+
+class TsdbConfig(OsmConfigman):
+    driver: str = None
+    path: str = None
+    uri: str = None
+    loglevel: str = "DEBUG"
+    logfile: str = None
+    logger_name: str = None
+
+    def transform(self):
+        self.logger_name = "lcm.prometheus"
+
+
+# Main configuration Template
+
+
+class LcmCfg(OsmConfigman):
+    globalConfig: GlobalConfig = GlobalConfig()
+    timeout: Timeout = Timeout()
+    RO: RoConfig = RoConfig()
+    VCA: VcaConfig = VcaConfig()
+    database: DatabaseConfig = DatabaseConfig()
+    storage: StorageConfig = StorageConfig()
+    message: MessageConfig = MessageConfig()
+    tsdb: TsdbConfig = TsdbConfig()
+
+    def transform(self):
+        for attribute in dir(self):
+            method = getattr(self, attribute)
+            if isinstance(method, OsmConfigman):
+                method.transform()
+
+
+class SubOperation(OsmConfigman):
+    STATUS_NOT_FOUND: int = -1
+    STATUS_NEW: int = -2
+    STATUS_SKIP: int = -3
+
+
+class LCMConfiguration(OsmConfigman):
+    suboperation: SubOperation = SubOperation()
+    task_name_deploy_vca = "Deploying VCA"
index 2c04743..9f580da 100644 (file)
 
 # TODO currently is a pure yaml format. Consider to change it to [ini] style with yaml inside to be coherent with other modules
 
-#[global]
-global:
+[global]
     loglevel: DEBUG
     # logfile:  /app/log  # or /var/log/osm/lcm.log
     # nologging: True     # do no log to stdout/stderr
 
-#[timeout]
-timeout:
+[timeout]
     # ns_deploy: 7200     # total deploy timeout for a ns 2 hours
     # nsi_deploy: 7200     # total deploy timeout for a nsi 2 hours
 
-#[RO]
-RO:
+[RO]
     host:   ro          # hostname or IP
     port:   9090
     tenant: osm
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-ro.log
 
-#[VCA]
-VCA:
+[VCA]
     host:   vca
     port:   17070
     user:   admin
@@ -48,17 +44,16 @@ VCA:
     kubectlpath: /usr/bin/kubectl
     jujupath:    /usr/local/bin/juju
     eegrpc_tls_enforce: False
-    # pubkey: pubkey
-    # cacert: cacert
-    # apiproxy: apiproxy
-    #eegrpcinittimeout: 600
-    #eegrpctimeout: 30
+    # public_key: pubkey
+    # ca_cert: cacert
+    # api_proxy: apiproxy
+    # eegrpcinittimeout: 600
+    # eegrpctimeout: 30
 
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-vca.log
 
-#[database]
-database:
+[database]
     driver: mongo       # mongo or memory
     host:   mongo       # hostname or IP
     port:   27017
@@ -70,16 +65,14 @@ database:
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-database.log
 
-#[storage]
-storage:
+[storage]
     driver: local       # local filesystem
     # for local provide file path
     path:   /app/storage
     # loglevel: DEBUG
     # logfile:  /var/log/osm/lcm-storage.log
 
-#[message]
-message:
+[message]
     driver:   kafka       # local or kafka
     # for local provide file path
     path:     /app/storage/kafka
@@ -90,7 +83,7 @@ message:
     # logfile:  /var/log/osm/lcm-message.log
     group_id: lcm-server
 
-tsdb:    # time series database
+[tsdb]    # time series database
     driver:   prometheus
     # local file to store the configuration
     path:     /etc/prometheus
index 273edc1..9f0e310 100644 (file)
@@ -28,6 +28,7 @@ import logging
 import logging.handlers
 import getopt
 import sys
+import configparser
 
 from osm_lcm import ns, vim_sdn, netslice
 from osm_lcm.ng_ro import NgRoException, NgRoClient
@@ -44,8 +45,9 @@ from osm_common.fsbase import FsException
 from osm_common.msgbase import MsgException
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.lcm_hc import get_health_check_file
-from os import environ, path
+from os import path
 from random import choice as random_choice
 from n2vc import version as n2vc_version
 import traceback
@@ -67,13 +69,8 @@ class Lcm:
         120  # how many time ping is send once is confirmed all is running
     )
     ping_interval_boot = 5  # how many time ping is sent when booting
-    cfg_logger_name = {
-        "message": "lcm.msg",
-        "database": "lcm.db",
-        "storage": "lcm.fs",
-        "tsdb": "lcm.prometheus",
-    }
-    # ^ contains for each section at lcm.cfg the used logger name
+
+    main_config = LcmCfg()
 
     def __init__(self, config_file, loop=None):
         """
@@ -95,27 +92,12 @@ class Lcm:
         self.worker_id = self.get_process_id()
         # load configuration
         config = self.read_config_file(config_file)
-        self.config = config
-        self.health_check_file = get_health_check_file(self.config)
-        self.config["ro_config"] = {
-            "ng": config["RO"].get("ng", False),
-            "uri": config["RO"].get("uri"),
-            "tenant": config.get("tenant", "osm"),
-            "logger_name": "lcm.roclient",
-            "loglevel": config["RO"].get("loglevel", "ERROR"),
-        }
-        if not self.config["ro_config"]["uri"]:
-            self.config["ro_config"]["uri"] = "http://{}:{}/".format(
-                config["RO"]["host"], config["RO"]["port"]
-            )
-        elif (
-            "/ro" in self.config["ro_config"]["uri"][-4:]
-            or "/openmano" in self.config["ro_config"]["uri"][-10:]
-        ):
-            # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
-            index = self.config["ro_config"]["uri"][-1].rfind("/")
-            self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index + 1]
-
+        self.main_config.set_from_dict(config)
+        self.main_config.transform()
+        self.main_config.load_from_env()
+        self.logger.critical("Loaded configuration:" + str(self.main_config.to_dict()))
+        # TODO: check if lcm_hc.py is necessary
+        self.health_check_file = get_health_check_file(self.main_config.to_dict())
         self.loop = loop or asyncio.get_event_loop()
         self.ns = (
             self.netslice
@@ -130,35 +112,35 @@ class Lcm:
         log_formatter_simple = logging.Formatter(
             log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
         )
-        config["database"]["logger_name"] = "lcm.db"
-        config["storage"]["logger_name"] = "lcm.fs"
-        config["message"]["logger_name"] = "lcm.msg"
-        if config["global"].get("logfile"):
+        if self.main_config.globalConfig.logfile:
             file_handler = logging.handlers.RotatingFileHandler(
-                config["global"]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                self.main_config.globalConfig.logfile,
+                maxBytes=100e6,
+                backupCount=9,
+                delay=0,
             )
             file_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(file_handler)
-        if not config["global"].get("nologging"):
+        if not self.main_config.globalConfig.to_dict()["nologging"]:
             str_handler = logging.StreamHandler()
             str_handler.setFormatter(log_formatter_simple)
             self.logger.addHandler(str_handler)
 
-        if config["global"].get("loglevel"):
-            self.logger.setLevel(config["global"]["loglevel"])
+        if self.main_config.globalConfig.to_dict()["loglevel"]:
+            self.logger.setLevel(self.main_config.globalConfig.loglevel)
 
         # logging other modules
-        for k1, logname in self.cfg_logger_name.items():
-            config[k1]["logger_name"] = logname
-            logger_module = logging.getLogger(logname)
-            if config[k1].get("logfile"):
+        for logger in ("message", "database", "storage", "tsdb"):
+            logger_config = self.main_config.to_dict()[logger]
+            logger_module = logging.getLogger(logger_config["logger_name"])
+            if logger_config["logfile"]:
                 file_handler = logging.handlers.RotatingFileHandler(
-                    config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+                    logger_config["logfile"], maxBytes=100e6, backupCount=9, delay=0
                 )
                 file_handler.setFormatter(log_formatter_simple)
                 logger_module.addHandler(file_handler)
-            if config[k1].get("loglevel"):
-                logger_module.setLevel(config[k1]["loglevel"])
+            if logger_config["loglevel"]:
+                logger_module.setLevel(logger_config["loglevel"])
         self.logger.critical(
             "starting osm/lcm version {} {}".format(lcm_version, lcm_version_date)
         )
@@ -181,13 +163,13 @@ class Lcm:
             )
 
         try:
-            self.db = Database(config).instance.db
+            self.db = Database(self.main_config.to_dict()).instance.db
 
-            self.fs = Filesystem(config).instance.fs
+            self.fs = Filesystem(self.main_config.to_dict()).instance.fs
             self.fs.sync()
 
             # copy message configuration in order to remove 'group_id' for msg_admin
-            config_message = config["message"].copy()
+            config_message = self.main_config.message.to_dict()
             config_message["loop"] = self.loop
             if config_message["driver"] == "local":
                 self.msg = msglocal.MsgLocal()
@@ -204,7 +186,7 @@ class Lcm:
             else:
                 raise LcmException(
                     "Invalid configuration param '{}' at '[message]':'driver'".format(
-                        config["message"]["driver"]
+                        self.main_config.message.driver
                     )
                 )
         except (DbException, FsException, MsgException) as e:
@@ -218,19 +200,21 @@ class Lcm:
         tries = 14
         last_error = None
         while True:
-            ro_uri = self.config["ro_config"]["uri"]
+            ro_uri = self.main_config.RO.uri
+            if not ro_uri:
+                ro_uri = ""
             try:
                 # try new  RO, if fail old RO
                 try:
-                    self.config["ro_config"]["uri"] = ro_uri + "ro"
-                    ro_server = NgRoClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "ro"
+                    ro_server = NgRoClient(self.loop, **self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = True
+                    self.main_config.RO.ng = True
                 except Exception:
-                    self.config["ro_config"]["uri"] = ro_uri + "openmano"
-                    ro_server = ROClient(self.loop, **self.config["ro_config"])
+                    self.main_config.RO.uri = ro_uri + "openmano"
+                    ro_server = ROClient(self.loop, **self.main_config.RO.to_dict())
                     ro_version = await ro_server.get_version()
-                    self.config["ro_config"]["ng"] = False
+                    self.main_config.RO.ng = False
                 if versiontuple(ro_version) < versiontuple(min_RO_version):
                     raise LcmException(
                         "Not compatible osm/RO version '{}'. Needed '{}' or higher".format(
@@ -239,16 +223,16 @@ class Lcm:
                     )
                 self.logger.info(
                     "Connected to RO version {} new-generation version {}".format(
-                        ro_version, self.config["ro_config"]["ng"]
+                        ro_version, self.main_config.RO.ng
                     )
                 )
                 return
             except (ROClientException, NgRoException) as e:
-                self.config["ro_config"]["uri"] = ro_uri
+                self.main_config.RO.uri = ro_uri
                 tries -= 1
                 traceback.print_tb(e.__traceback__)
                 error_text = "Error while connecting to RO on {}: {}".format(
-                    self.config["ro_config"]["uri"], e
+                    self.main_config.RO.uri, e
                 )
                 if tries <= 0:
                     self.logger.critical(error_text)
@@ -579,7 +563,7 @@ class Lcm:
         elif topic == "vim_account":
             vim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.create(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_create", task
@@ -597,7 +581,7 @@ class Lcm:
                 sys.stdout.flush()
                 return
             elif command in ("edit", "edited"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.vim.edit(params, order_id))
                     self.lcm_tasks.register(
                         "vim_account", vim_id, order_id, "vim_edit", task
@@ -608,7 +592,7 @@ class Lcm:
         elif topic == "wim_account":
             wim_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.wim.create(params, order_id))
                     self.lcm_tasks.register(
                         "wim_account", wim_id, order_id, "wim_create", task
@@ -636,7 +620,7 @@ class Lcm:
         elif topic == "sdn":
             _sdn_id = params["_id"]
             if command in ("create", "created"):
-                if not self.config["ro_config"].get("ng"):
+                if not self.main_config.RO.ng:
                     task = asyncio.ensure_future(self.sdn.create(params, order_id))
                     self.lcm_tasks.register(
                         "sdn", _sdn_id, order_id, "sdn_create", task
@@ -716,19 +700,28 @@ class Lcm:
         # check RO version
         self.loop.run_until_complete(self.check_RO_version())
 
-        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.config, self.loop)
+        self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.main_config, self.loop)
+        # TODO: modify the rest of classes to use the LcmCfg object instead of dicts
         self.netslice = netslice.NetsliceLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop, self.ns
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop, self.ns
+        )
+        self.vim = vim_sdn.VimLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.wim = vim_sdn.WimLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.sdn = vim_sdn.SdnLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
-        self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
-        self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop)
         self.k8scluster = vim_sdn.K8sClusterLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
+        )
+        self.vca = vim_sdn.VcaLcm(
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
-        self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop)
         self.k8srepo = vim_sdn.K8sRepoLcm(
-            self.msg, self.lcm_tasks, self.config, self.loop
+            self.msg, self.lcm_tasks, self.main_config.to_dict(), self.loop
         )
 
         self.loop.run_until_complete(
@@ -760,67 +753,34 @@ class Lcm:
         # TODO make a [ini] + yaml inside parser
         # the configparser library is not suitable, because it does not admit comments at the end of line,
         # and not parse integer or boolean
+        conf = {}
         try:
             # read file as yaml format
-            with open(config_file) as f:
-                conf = yaml.safe_load(f)
-            # Ensure all sections are not empty
-            for k in (
-                "global",
-                "timeout",
-                "RO",
-                "VCA",
-                "database",
-                "storage",
-                "message",
-            ):
-                if not conf.get(k):
-                    conf[k] = {}
-
-            # read all environ that starts with OSMLCM_
-            for k, v in environ.items():
-                if not k.startswith("OSMLCM_"):
-                    continue
-                subject, _, item = k[7:].lower().partition("_")
-                if not item:
-                    continue
-                if subject in ("ro", "vca"):
-                    # put in capital letter
-                    subject = subject.upper()
-                try:
-                    if item == "port" or subject == "timeout":
-                        conf[subject][item] = int(v)
-                    else:
-                        conf[subject][item] = v
-                except Exception as e:
-                    self.logger.warning(
-                        "skipping environ '{}' on exception '{}'".format(k, e)
-                    )
-
-            # backward compatibility of VCA parameters
-
-            if "pubkey" in conf["VCA"]:
-                conf["VCA"]["public_key"] = conf["VCA"].pop("pubkey")
-            if "cacert" in conf["VCA"]:
-                conf["VCA"]["ca_cert"] = conf["VCA"].pop("cacert")
-            if "apiproxy" in conf["VCA"]:
-                conf["VCA"]["api_proxy"] = conf["VCA"].pop("apiproxy")
-
-            if "enableosupgrade" in conf["VCA"]:
-                conf["VCA"]["enable_os_upgrade"] = conf["VCA"].pop("enableosupgrade")
-            if isinstance(conf["VCA"].get("enable_os_upgrade"), str):
-                if conf["VCA"]["enable_os_upgrade"].lower() == "false":
-                    conf["VCA"]["enable_os_upgrade"] = False
-                elif conf["VCA"]["enable_os_upgrade"].lower() == "true":
-                    conf["VCA"]["enable_os_upgrade"] = True
-
-            if "aptmirror" in conf["VCA"]:
-                conf["VCA"]["apt_mirror"] = conf["VCA"].pop("aptmirror")
-
-            return conf
+            config = configparser.ConfigParser(inline_comment_prefixes="#")
+            config.read(config_file)
+            conf = {s: dict(config.items(s)) for s in config.sections()}
         except Exception as e:
             self.logger.critical("At config file '{}': {}".format(config_file, e))
-            exit(1)
+            self.logger.critical("Trying to load config as legacy mode")
+            try:
+                with open(config_file) as f:
+                    conf = yaml.safe_load(f)
+                # Ensure all sections are not empty
+                for k in (
+                    "global",
+                    "timeout",
+                    "RO",
+                    "VCA",
+                    "database",
+                    "storage",
+                    "message",
+                ):
+                    if not conf.get(k):
+                        conf[k] = {}
+            except Exception as e:
+                self.logger.critical("At config file '{}': {}".format(config_file, e))
+                exit(1)
+        return conf
 
     @staticmethod
     def get_process_id():
index 887c212..b8817a5 100644 (file)
@@ -24,6 +24,7 @@ import ssl
 
 from grpclib.client import Channel
 
+from osm_lcm.data_utils.lcm_config import VcaConfig
 from osm_lcm.frontend_pb2 import PrimitiveRequest
 from osm_lcm.frontend_pb2 import SshKeyRequest, SshKeyReply
 from osm_lcm.frontend_grpc import FrontendExecutorStub
@@ -43,8 +44,6 @@ from n2vc.exceptions import (
 
 from osm_lcm.lcm_utils import deep_get
 
-CA_STORE = "/etc/ssl/certs/osm-ca.crt"
-
 
 def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"):
     def wrapper(func):
@@ -99,22 +98,11 @@ def create_secure_context(
 
 
 class LCMHelmConn(N2VCConnector, LcmBase):
-    _KUBECTL_OSM_NAMESPACE = "osm"
-    _KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s"
-    _EE_SERVICE_PORT = 50050
-
-    # Initial max retry time
-    _MAX_INITIAL_RETRY_TIME = 600
-    # Max retry time for normal operations
-    _MAX_RETRY_TIME = 30
-    # Time beetween retries, retry time after a connection error is raised
-    _EE_RETRY_DELAY = 10
-
     def __init__(
         self,
         log: object = None,
         loop: object = None,
-        vca_config: dict = None,
+        vca_config: VcaConfig = None,
         on_update_db=None,
     ):
         """
@@ -131,43 +119,20 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
         self.vca_config = vca_config
         self.log.debug("Initialize helm N2VC connector")
-        self.log.debug("initial vca_config: {}".format(vca_config))
+        self.log.debug("initial vca_config: {}".format(vca_config.to_dict()))
 
-        # TODO - Obtain data from configuration
-        self._ee_service_port = self._EE_SERVICE_PORT
-
-        self._retry_delay = self._EE_RETRY_DELAY
-
-        if self.vca_config and self.vca_config.get("eegrpcinittimeout"):
-            self._initial_retry_time = self.vca_config.get("eegrpcinittimeout")
-            self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
-        else:
-            self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME
-            self.log.debug(
-                "Applied default retry time: {}".format(self._initial_retry_time)
-            )
+        self._retry_delay = self.vca_config.helm_ee_retry_delay
 
-        if self.vca_config and self.vca_config.get("eegrpctimeout"):
-            self._max_retry_time = self.vca_config.get("eegrpctimeout")
-            self.log.debug("Retry time: {}".format(self._max_retry_time))
-        else:
-            self._max_retry_time = self._MAX_RETRY_TIME
-            self.log.debug(
-                "Applied default retry time: {}".format(self._max_retry_time)
-            )
+        self._initial_retry_time = self.vca_config.helm_max_initial_retry_time
+        self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
 
-        if self.vca_config and self.vca_config.get("eegrpc_tls_enforce"):
-            self._tls_enforce = str(
-                self.vca_config.get("eegrpc_tls_enforce")
-            ).lower() in ("true", "1", "yes")
-        else:
-            self._tls_enforce = False
-        self.log.debug("TLS enforce enabled: {}".format(self._tls_enforce))
+        self._max_retry_time = self.vca_config.helm_max_retry_time
+        self.log.debug("Retry time: {}".format(self._max_retry_time))
 
         # initialize helm connector for helmv2 and helmv3
         self._k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helmpath,
             fs=self.fs,
             db=self.db,
             log=self.log,
@@ -175,8 +140,8 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         )
 
         self._k8sclusterhelm3 = K8sHelm3Connector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helm3path"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helm3path,
             fs=self.fs,
             log=self.log,
             db=self.db,
@@ -289,7 +254,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     system_cluster_uuid,
                     kdu_model=kdu_model,
                     kdu_instance=helm_id,
-                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    namespace=self.vca_config.kubectl_osm_namespace,
                     params=config,
                     db_dict=db_dict,
                     timeout=progress_timeout,
@@ -303,13 +268,15 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     system_cluster_uuid,
                     kdu_model=kdu_model,
                     kdu_instance=helm_id,
-                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    namespace=self.vca_config.kubectl_osm_namespace,
                     params=config,
                     db_dict=db_dict,
                     timeout=progress_timeout,
                 )
 
-            ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id)
+            ee_id = "{}:{}.{}".format(
+                vca_type, self.vca_config.kubectl_osm_namespace, helm_id
+            )
             return ee_id, None
         except N2VCException:
             raise
@@ -434,14 +401,14 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         secret_name: str,
         usage: str,
         dns_prefix: str,
-        namespace: str = _KUBECTL_OSM_NAMESPACE,
+        namespace: str = None,
     ):
         # Obtain system cluster id from database
         system_cluster_uuid = await self._get_system_cluster_id()
         # use helm-v3 as certificates don't depend on helm version
         await self._k8sclusterhelm3.create_certificate(
             cluster_uuid=system_cluster_uuid,
-            namespace=namespace,
+            namespace=namespace or self.vca_config.kubectl_osm_namespace,
             dns_prefix=dns_prefix,
             name=nsr_id,
             secret_name=secret_name,
@@ -451,13 +418,13 @@ class LCMHelmConn(N2VCConnector, LcmBase):
     async def delete_tls_certificate(
         self,
         certificate_name: str = None,
-        namespace: str = _KUBECTL_OSM_NAMESPACE,
+        namespace: str = None,
     ):
         # Obtain system cluster id from database
         system_cluster_uuid = await self._get_system_cluster_id()
         await self._k8sclusterhelm3.delete_certificate(
             cluster_uuid=system_cluster_uuid,
-            namespace=namespace,
+            namespace=namespace or self.vca_config.kubectl_osm_namespace,
             certificate_name=certificate_name,
         )
 
@@ -806,16 +773,21 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                 else:
                     return "ERROR", "No result received"
 
-        ssl_context = create_secure_context(CA_STORE)
-        channel = Channel(ip_addr, self._ee_service_port, ssl=ssl_context)
+        ssl_context = create_secure_context(self.vca_config.ca_store)
+        channel = Channel(
+            ip_addr, self.vca_config.helm_ee_service_port, ssl=ssl_context
+        )
         try:
             return await execute()
         except ssl.SSLError as ssl_error:  # fallback to insecure gRPC
-            if ssl_error.reason == "WRONG_VERSION_NUMBER" and not self._tls_enforce:
+            if (
+                ssl_error.reason == "WRONG_VERSION_NUMBER"
+                and not self.vca_config.eegrpc_tls_enforce
+            ):
                 self.log.debug(
                     "Execution environment doesn't support TLS, falling back to unsecure gRPC"
                 )
-                channel = Channel(ip_addr, self._ee_service_port)
+                channel = Channel(ip_addr, self.vca_config.helm_ee_service_port)
                 return await execute()
             elif ssl_error.reason == "WRONG_VERSION_NUMBER":
                 raise N2VCException(
@@ -848,7 +820,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
     async def _get_system_cluster_id(self):
         if not self._system_cluster_id:
             db_k8cluster = self.db.get_one(
-                "k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME}
+                "k8sclusters", {"name": self.vca_config.kubectl_osm_cluster_name}
             )
             k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id"))
             if not k8s_hc_id:
index 3a8002c..42b634e 100644 (file)
@@ -48,7 +48,7 @@ class NetsliceLcm(LcmBase):
         self.loop = loop
         self.lcm_tasks = lcm_tasks
         self.ns = ns
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
         self.timeout = config["timeout"]
 
         super().__init__(msg, self.logger)
index 968e1f3..ced0ec0 100644 (file)
@@ -34,6 +34,7 @@ from jinja2 import (
 )
 
 from osm_lcm import ROclient
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.data_utils.nsr import (
     get_deployed_kdu,
     get_deployed_vca,
@@ -128,29 +129,12 @@ __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
 
 class NsLcm(LcmBase):
-    timeout_scale_on_error = (
-        5 * 60
-    )  # Time for charm from first time at blocked,error status to mark as failed
-    timeout_scale_on_error_outer_factor = 1.05  # Factor in relation to timeout_scale_on_error related to the timeout to be applied within the asyncio.wait_for coroutine
-    timeout_ns_deploy = 2 * 3600  # default global timeout for deployment a ns
-    timeout_ns_terminate = 1800  # default global timeout for un deployment a ns
-    timeout_ns_heal = 1800  # default global timeout for un deployment a ns
-    timeout_charm_delete = 10 * 60
-    timeout_primitive = 30 * 60  # Timeout for primitive execution
-    timeout_primitive_outer_factor = 1.05  # Factor in relation to timeout_primitive related to the timeout to be applied within the asyncio.wait_for coroutine
-    timeout_ns_update = 30 * 60  # timeout for ns update
-    timeout_progress_primitive = (
-        10 * 60
-    )  # timeout for some progress in a primitive execution
-    timeout_migrate = 1800  # default global timeout for migrating vnfs
-    timeout_operate = 1800  # default global timeout for migrating vnfs
-    timeout_verticalscale = 1800  # default global timeout for Vertical Sclaing
     SUBOPERATION_STATUS_NOT_FOUND = -1
     SUBOPERATION_STATUS_NEW = -2
     SUBOPERATION_STATUS_SKIP = -3
     task_name_deploy_vca = "Deploying VCA"
 
-    def __init__(self, msg, lcm_tasks, config, loop):
+    def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
         """
         Init, Connect to database, filesystem storage, and messaging
         :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
@@ -162,10 +146,9 @@ class NsLcm(LcmBase):
         self.fs = Filesystem().instance.fs
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.timeout = config["timeout"]
-        self.ro_config = config["ro_config"]
-        self.ng_ro = config["ro_config"].get("ng")
-        self.vca_config = config["VCA"].copy()
+        self.timeout = config.timeout
+        self.ro_config = config.RO
+        self.vca_config = config.VCA
 
         # create N2VC connector
         self.n2vc = N2VCJujuConnector(
@@ -184,8 +167,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterhelm2 = K8sHelmConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helmpath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helmpath,
             log=self.logger,
             on_update_db=None,
             fs=self.fs,
@@ -193,8 +176,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterhelm3 = K8sHelm3Connector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            helm_command=self.vca_config.get("helm3path"),
+            kubectl_command=self.vca_config.kubectlpath,
+            helm_command=self.vca_config.helm3path,
             fs=self.fs,
             log=self.logger,
             db=self.db,
@@ -202,8 +185,8 @@ class NsLcm(LcmBase):
         )
 
         self.k8sclusterjuju = K8sJujuConnector(
-            kubectl_command=self.vca_config.get("kubectlpath"),
-            juju_command=self.vca_config.get("jujupath"),
+            kubectl_command=self.vca_config.kubectlpath,
+            juju_command=self.vca_config.jujupath,
             log=self.logger,
             loop=self.loop,
             on_update_db=self._on_update_k8s_db,
@@ -228,7 +211,7 @@ class NsLcm(LcmBase):
         }
 
         # create RO client
-        self.RO = NgRoClient(self.loop, **self.ro_config)
+        self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
 
         self.op_status_map = {
             "instantiation": self.RO.status,
@@ -1482,9 +1465,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
+                timeout_ns_deploy = self.timeout.ns_deploy
 
             # Check for and optionally request placement optimization. Database will be updated if placement activated
             stage[2] = "Waiting for Placement."
@@ -1674,7 +1655,7 @@ class NsLcm(LcmBase):
                     ro_vm_id = "{}-{}".format(
                         db_vnfr["member-vnf-index-ref"], target_vdu_id
                     )  # TODO add vdu_index
-                    if self.ng_ro:
+                    if self.ro_config.ng:
                         target = {
                             "action": {
                                 "action": "inject_ssh_key",
@@ -2512,9 +2493,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_deploy"):
                 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
             else:
-                timeout_ns_deploy = self.timeout.get(
-                    "ns_deploy", self.timeout_ns_deploy
-                )
+                timeout_ns_deploy = self.timeout.ns_deploy
 
             # read from db: ns
             stage[1] = "Getting nsr={} from db.".format(nsr_id)
@@ -2615,8 +2594,8 @@ class NsLcm(LcmBase):
             # feature 1429. Add n2vc public key to needed VMs
             n2vc_key = self.n2vc.get_public_key()
             n2vc_key_list = [n2vc_key]
-            if self.vca_config.get("public_key"):
-                n2vc_key_list.append(self.vca_config["public_key"])
+            if self.vca_config.public_key:
+                n2vc_key_list.append(self.vca_config.public_key)
 
             stage[1] = "Deploying NS at VIM."
             task_ro = asyncio.ensure_future(
@@ -4275,7 +4254,7 @@ class NsLcm(LcmBase):
         try:
             await self.n2vc.delete_namespace(
                 namespace=namespace,
-                total_timeout=self.timeout_charm_delete,
+                total_timeout=self.timeout.charm_delete,
                 vca_id=vca_id,
             )
         except N2VCNotFound:  # already deleted. Skip
@@ -4490,7 +4469,7 @@ class NsLcm(LcmBase):
 
         logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
         self.logger.debug(logging_text + "Enter")
-        timeout_ns_terminate = self.timeout_ns_terminate
+        timeout_ns_terminate = self.timeout.ns_terminate
         db_nsr = None
         db_nslcmop = None
         operation_params = None
@@ -4616,7 +4595,7 @@ class NsLcm(LcmBase):
                 error_list = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    min(self.timeout_charm_delete, timeout_ns_terminate),
+                    min(self.timeout.charm_delete, timeout_ns_terminate),
                     stage,
                     nslcmop_id,
                 )
@@ -4634,7 +4613,7 @@ class NsLcm(LcmBase):
                 task_delete_ee = asyncio.ensure_future(
                     asyncio.wait_for(
                         self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
-                        timeout=self.timeout_charm_delete,
+                        timeout=self.timeout.charm_delete,
                     )
                 )
                 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
@@ -4680,7 +4659,7 @@ class NsLcm(LcmBase):
 
             # remove from RO
             stage[1] = "Deleting ns from VIM."
-            if self.ng_ro:
+            if self.ro_config.ng:
                 task_delete_ro = asyncio.ensure_future(
                     self._terminate_ng_ro(
                         logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
@@ -5043,13 +5022,13 @@ class NsLcm(LcmBase):
                             ee_id=ee_id,
                             primitive_name=primitive,
                             params_dict=primitive_params,
-                            progress_timeout=self.timeout_progress_primitive,
-                            total_timeout=self.timeout_primitive,
+                            progress_timeout=self.timeout.progress_primitive,
+                            total_timeout=self.timeout.primitive,
                             db_dict=db_dict,
                             vca_id=vca_id,
                             vca_type=vca_type,
                         ),
-                        timeout=timeout or self.timeout_primitive,
+                        timeout=timeout or self.timeout.primitive,
                     )
                     # execution was OK
                     break
@@ -5157,7 +5136,7 @@ class NsLcm(LcmBase):
             primitive = db_nslcmop["operationParams"]["primitive"]
             primitive_params = db_nslcmop["operationParams"]["primitive_params"]
             timeout_ns_action = db_nslcmop["operationParams"].get(
-                "timeout_ns_action", self.timeout_primitive
+                "timeout_ns_action", self.timeout.primitive
             )
 
             if vnf_index:
@@ -5532,7 +5511,7 @@ class NsLcm(LcmBase):
             stage[2] = "Terminating VDUs"
             if scaling_info.get("vdu-delete"):
                 # scale_process = "RO"
-                if self.ro_config.get("ng"):
+                if self.ro_config.ng:
                     await self._scale_ng_ro(
                         logging_text,
                         db_nsr,
@@ -5710,7 +5689,7 @@ class NsLcm(LcmBase):
                         }
                     )
                 scaling_info["vdu-create"][vdud["id"]] = count_index
-            if self.ro_config.get("ng"):
+            if self.ro_config.ng:
                 self.logger.debug(
                     "New Resources to be deployed: {}".format(scaling_info)
                 )
@@ -5756,7 +5735,7 @@ class NsLcm(LcmBase):
                 path=path,
                 charm_id=charm_id,
                 charm_type=charm_type,
-                timeout=timeout or self.timeout_ns_update,
+                timeout=timeout or self.timeout.ns_update,
             )
 
             if output:
@@ -6949,7 +6928,7 @@ class NsLcm(LcmBase):
                                             scaling_in=True,
                                             vca_id=vca_id,
                                         ),
-                                        timeout=self.timeout_charm_delete,
+                                        timeout=self.timeout.charm_delete,
                                     )
                                 )
                                 tasks_dict_info[task] = "Terminating VCA {}".format(
@@ -6969,7 +6948,7 @@ class NsLcm(LcmBase):
                                 logging_text,
                                 tasks_dict_info,
                                 min(
-                                    self.timeout_charm_delete, self.timeout_ns_terminate
+                                    self.timeout.charm_delete, self.timeout.ns_terminate
                                 ),
                                 stage,
                                 nslcmop_id,
@@ -6991,7 +6970,7 @@ class NsLcm(LcmBase):
             # SCALE RO - BEGIN
             if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
                 scale_process = "RO"
-                if self.ro_config.get("ng"):
+                if self.ro_config.ng:
                     await self._scale_ng_ro(
                         logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
                     )
@@ -7286,7 +7265,7 @@ class NsLcm(LcmBase):
                 exc = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    self.timeout_ns_deploy,
+                    self.timeout.ns_deploy,
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
@@ -7401,11 +7380,11 @@ class NsLcm(LcmBase):
                                     primitive_name=terminate_config_primitive["name"],
                                     params=primitive_params_,
                                     db_dict=db_dict,
-                                    total_timeout=self.timeout_primitive,
+                                    total_timeout=self.timeout.primitive,
                                     vca_id=vca_id,
                                 ),
-                                timeout=self.timeout_primitive
-                                * self.timeout_primitive_outer_factor,
+                                timeout=self.timeout.primitive
+                                * self.timeout.primitive_outer_factor,
                             )
 
                 await asyncio.wait_for(
@@ -7413,15 +7392,15 @@ class NsLcm(LcmBase):
                         kdu_instance=kdu_instance,
                         scale=scale,
                         resource_name=kdu_scaling_info["resource-name"],
-                        total_timeout=self.timeout_scale_on_error,
+                        total_timeout=self.timeout.scale_on_error,
                         vca_id=vca_id,
                         cluster_uuid=cluster_uuid,
                         kdu_model=kdu_model,
                         atomic=True,
                         db_dict=db_dict,
                     ),
-                    timeout=self.timeout_scale_on_error
-                    * self.timeout_scale_on_error_outer_factor,
+                    timeout=self.timeout.scale_on_error
+                    * self.timeout.scale_on_error_outer_factor,
                 )
 
                 if kdu_scaling_info["type"] == "create":
@@ -7496,7 +7475,7 @@ class NsLcm(LcmBase):
             n2vc_key_list,
             stage=stage,
             start_deploy=time(),
-            timeout_ns_deploy=self.timeout_ns_deploy,
+            timeout_ns_deploy=self.timeout.ns_deploy,
         )
         if vdu_scaling_info.get("vdu-delete"):
             self.scale_vnfr(
@@ -7613,7 +7592,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_operate,
+                self.timeout.operate,
                 None,
                 "start_stop_rebuild",
             )
@@ -7705,7 +7684,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_migrate,
+                self.timeout.migrate,
                 operation="migrate",
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
@@ -7979,7 +7958,7 @@ class NsLcm(LcmBase):
                 exc = await self._wait_for_tasks(
                     logging_text,
                     tasks_dict_info,
-                    self.timeout_ns_deploy,
+                    self.timeout.ns_deploy,
                     stage,
                     nslcmop_id,
                     nsr_id=nsr_id,
@@ -8072,7 +8051,7 @@ class NsLcm(LcmBase):
             if ns_params and ns_params.get("timeout_ns_heal"):
                 timeout_ns_heal = ns_params["timeout_ns_heal"]
             else:
-                timeout_ns_heal = self.timeout.get("ns_heal", self.timeout_ns_heal)
+                timeout_ns_heal = self.timeout.ns_heal
 
             db_vims = {}
 
@@ -8559,7 +8538,7 @@ class NsLcm(LcmBase):
                 # n2vc_redesign STEP 5.1
                 # wait for RO (ip-address) Insert pub_key into VM
                 # IMPORTANT: We need do wait for RO to complete healing operation.
-                await self._wait_heal_ro(nsr_id, self.timeout_ns_heal)
+                await self._wait_heal_ro(nsr_id, self.timeout.ns_heal)
                 if vnfr_id:
                     if kdu_name:
                         rw_mgmt_ip = await self.wait_kdu_up(
@@ -8771,7 +8750,7 @@ class NsLcm(LcmBase):
                 action_id,
                 nslcmop_id,
                 start_deploy,
-                self.timeout_verticalscale,
+                self.timeout.verticalscale,
                 operation="verticalscale",
             )
         except (ROclient.ROClientException, DbException, LcmException) as e:
index bdd9a8d..e05836a 100644 (file)
@@ -64,7 +64,7 @@ def check_file_content(health_check_file: str) -> str:
 
 class TestLcm(TestCase):
     def setUp(self):
-        self.config_file = os.getcwd() + "/osm_lcm/tests/test_lcm_config_file.yaml"
+        self.config_file = os.getcwd() + "/osm_lcm/tests/test_lcm_config_file_ini.cfg"
         self.config_file_without_storage_path = tempfile.mkstemp()[1]
         Database.instance = None
         self.db = Mock(Database({"database": {"driver": "memory"}}).instance.db)
@@ -80,10 +80,10 @@ class TestLcm(TestCase):
     def test_get_health_check_file_from_config_file(self):
         self.assertEqual(self.my_lcm.health_check_file, "/tmp/storage/time_last_ping")
 
-    def test_health_check_file_not_in_config_file(self):
-        create_lcm_config(self.config_file, self.config_file_without_storage_path, 38)
-        with self.assertRaises(LcmException):
-            Lcm(config_file=self.config_file_without_storage_path)
+    def test_health_check_file_not_in_config_file(self):
+        create_lcm_config(self.config_file, self.config_file_without_storage_path, 38)
+        with self.assertRaises(LcmException):
+            Lcm(config_file=self.config_file_without_storage_path)
 
     def test_kafka_admin_topic_ping_command(self):
         params = {
diff --git a/osm_lcm/tests/test_lcm_config_file_ini.cfg b/osm_lcm/tests/test_lcm_config_file_ini.cfg
new file mode 100644 (file)
index 0000000..a6afb65
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2022 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+
+[global]
+    loglevel: DEBUG
+[RO]
+    host:   ro
+    port:   9090
+    tenant: osm
+[VCA]
+    host:   vca
+    port:   17070
+    user:   admin
+    secret: secret
+    cloud:  localhost
+    k8s_cloud: k8scloud
+    helmpath:    /usr/local/bin/helm
+    helm3path:    /usr/local/bin/helm3
+    kubectlpath: /usr/bin/kubectl
+    jujupath:    /usr/local/bin/juju
+[database]
+    driver: memory       # mongo or memory
+    # host:   mongo       # hostname or IP
+    port:   27017
+    name:   osm
+[storage]
+    driver: local       # local filesystem
+    path: /tmp/storage
+[message]
+    driver:   local      # local or kafka
+    path:     /tmp/kafka
+    host:     kafka
+    port:     9092
+    group_id: lcm-server
+[tsdb]    # time series database
+    driver:   prometheus
+    path:     /tmp/prometheus
+    uri:      http://prometheus:9090/
index 724b5e2..2ea9ae8 100644 (file)
@@ -23,6 +23,7 @@ from osm_lcm.lcm_helm_conn import LCMHelmConn
 from asynctest.mock import Mock
 from osm_lcm.data_utils.database.database import Database
 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
+from osm_lcm.data_utils.lcm_config import VcaConfig
 
 __author__ = "Isabel Lloret <illoret@indra.es>"
 
@@ -54,6 +55,7 @@ class TestLcmHelmConn(asynctest.TestCase):
         lcm_helm_conn.K8sHelm3Connector = asynctest.Mock(
             lcm_helm_conn.K8sHelm3Connector
         )
+        vca_config = VcaConfig(vca_config)
         self.helm_conn = LCMHelmConn(
             loop=self.loop, vca_config=vca_config, log=self.logger
         )
index 56d7ffa..1082648 100644 (file)
@@ -24,6 +24,8 @@ import copy
 from os import getenv
 from osm_lcm import ns
 from osm_common.msgkafka import MsgKafka
+
+from osm_lcm.data_utils.lcm_config import LcmCfg
 from osm_lcm.lcm_utils import TaskRegistry
 from osm_lcm.ng_ro import NgRoClient
 from osm_lcm.data_utils.database.database import Database
@@ -54,7 +56,7 @@ It allows, if some testing ENV are supplied, testing without mocking some extern
     OSMLCM_RO_XXX: configuration of RO
 """
 
-lcm_config = {
+lcm_config_dict = {
     "global": {"loglevel": "DEBUG"},
     "timeout": {},
     "VCA": {  # TODO replace with os.get_env to get other configurations
@@ -66,7 +68,7 @@ lcm_config = {
         "ca_cert": getenv("OSMLCM_VCA_CACERT", None),
         "apiproxy": getenv("OSMLCM_VCA_APIPROXY", "192.168.1.1"),
     },
-    "ro_config": {
+    "RO": {
         "uri": "http://{}:{}/openmano".format(
             getenv("OSMLCM_RO_HOST", "ro"), getenv("OSMLCM_RO_PORT", "9090")
         ),
@@ -76,6 +78,11 @@ lcm_config = {
         "ng": True,
     },
 }
+
+lcm_config = LcmCfg()
+lcm_config.set_from_dict(lcm_config_dict)
+lcm_config.transform()
+
 nsr_id = descriptors.test_ids["TEST-A"]["ns"]
 nslcmop_id = descriptors.test_ids["TEST-A"]["update"]
 vnfr_id = "6421c7c9-d865-4fb4-9a13-d4275d243e01"
@@ -295,7 +302,7 @@ class TestMyNS(asynctest.TestCase):
         # Mock RO
         if not getenv("OSMLCMTEST_RO_NOMOCK"):
             self.my_ns.RO = asynctest.Mock(
-                NgRoClient(self.loop, **lcm_config["ro_config"])
+                NgRoClient(self.loop, **lcm_config.RO.to_dict())
             )
             # TODO first time should be empty list, following should return a dict
             # self.my_ns.RO.get_list = asynctest.CoroutineMock(self.my_ns.RO.get_list, return_value=[])
index 1e5458c..0c22305 100644 (file)
@@ -56,7 +56,7 @@ class VimLcm(LcmBase):
         self.logger = logging.getLogger("lcm.vim")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
@@ -478,7 +478,7 @@ class WimLcm(LcmBase):
         self.logger = logging.getLogger("lcm.vim")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
@@ -826,7 +826,7 @@ class SdnLcm(LcmBase):
         self.logger = logging.getLogger("lcm.sdn")
         self.loop = loop
         self.lcm_tasks = lcm_tasks
-        self.ro_config = config["ro_config"]
+        self.ro_config = config["RO"]
 
         super().__init__(msg, self.logger)
 
index f2830be..41a01ba 100644 (file)
@@ -22,3 +22,4 @@ jinja2
 pyyaml==5.4.1
 pydantic
 protobuf==3.20.3
+config-man==0.0.4
\ No newline at end of file
index eb66bc2..24c09c0 100644 (file)
@@ -21,11 +21,23 @@ async-timeout==3.0.1
     #   -r requirements.in
     #   aiohttp
 attrs==22.1.0
-    # via aiohttp
+    # via
+    #   aiohttp
+    #   glom
+boltons==21.0.0
+    # via
+    #   face
+    #   glom
 chardet==4.0.0
     # via aiohttp
 checksumdir==1.2.0
     # via -r requirements.in
+config-man==0.0.4
+    # via -r requirements.in
+face==22.0.0
+    # via glom
+glom==22.1.0
+    # via config-man
 grpcio==1.50.0
     # via grpcio-tools
 grpcio-tools==1.48.1