9b62d82eef07a337eebb1445c675f0de256919ff
2 # -*- coding: utf-8 -*-
5 # Copyright 2018 Telefonica S.A.
7 # Licensed under the Apache License, Version 2.0 (the "License"); you may
8 # not use this file except in compliance with the License. You may obtain
9 # a copy of the License at
11 # http://www.apache.org/licenses/LICENSE-2.0
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16 # License for the specific language governing permissions and limitations
27 import logging
.handlers
30 from random
import SystemRandom
32 from osm_lcm
import ns
, vim_sdn
, netslice
33 from osm_lcm
.ng_ro
import NgRoException
, NgRoClient
34 from osm_lcm
.ROclient
import ROClient
, ROClientException
37 from osm_lcm
.lcm_utils
import versiontuple
, LcmException
, TaskRegistry
, LcmExceptionExit
38 from osm_lcm
import version
as lcm_version
, version_date
as lcm_version_date
40 from osm_common
import msglocal
, msgkafka
41 from osm_common
import version
as common_version
42 from osm_common
.dbbase
import DbException
43 from osm_common
.fsbase
import FsException
44 from osm_common
.msgbase
import MsgException
45 from osm_lcm
.data_utils
.database
.database
import Database
46 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
47 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
48 from osm_lcm
.lcm_hc
import get_health_check_file
49 from os
import path
, getenv
50 from n2vc
import version
as n2vc_version
53 if getenv("OSMLCM_PDB_DEBUG", None) is not None:
57 __author__
= "Alfonso Tierno"
58 min_RO_version
= "6.0.2"
59 min_n2vc_version
= "0.0.2"
61 min_common_version
= "0.1.19"
65 ping_interval_pace
= (
66 120 # how many time ping is send once is confirmed all is running
68 ping_interval_boot
= 5 # how many time ping is sent when booting
70 main_config
= LcmCfg()
72 def __init__(self
, config_file
):
74 Init, Connect to database, filesystem storage, and messaging
75 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
82 self
.pings_not_received
= 1
83 self
.consecutive_errors
= 0
84 self
.first_start
= False
87 self
.logger
= logging
.getLogger("lcm")
89 self
.worker_id
= self
.get_process_id()
91 config
= self
.read_config_file(config_file
)
92 self
.main_config
.set_from_dict(config
)
93 self
.main_config
.transform()
94 self
.main_config
.load_from_env()
95 self
.logger
.critical("Loaded configuration:" + str(self
.main_config
.to_dict()))
96 # TODO: check if lcm_hc.py is necessary
97 self
.health_check_file
= get_health_check_file(self
.main_config
.to_dict())
102 ) = self
.wim
= self
.sdn
= self
.k8scluster
= self
.vca
= self
.k8srepo
= None
105 log_format_simple
= (
106 "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
108 log_formatter_simple
= logging
.Formatter(
109 log_format_simple
, datefmt
="%Y-%m-%dT%H:%M:%S"
111 if self
.main_config
.globalConfig
.logfile
:
112 file_handler
= logging
.handlers
.RotatingFileHandler(
113 self
.main_config
.globalConfig
.logfile
,
118 file_handler
.setFormatter(log_formatter_simple
)
119 self
.logger
.addHandler(file_handler
)
120 if not self
.main_config
.globalConfig
.to_dict()["nologging"]:
121 str_handler
= logging
.StreamHandler()
122 str_handler
.setFormatter(log_formatter_simple
)
123 self
.logger
.addHandler(str_handler
)
125 if self
.main_config
.globalConfig
.to_dict()["loglevel"]:
126 self
.logger
.setLevel(self
.main_config
.globalConfig
.loglevel
)
128 # logging other modules
129 for logger
in ("message", "database", "storage", "tsdb"):
130 logger_config
= self
.main_config
.to_dict()[logger
]
131 logger_module
= logging
.getLogger(logger_config
["logger_name"])
132 if logger_config
["logfile"]:
133 file_handler
= logging
.handlers
.RotatingFileHandler(
134 logger_config
["logfile"], maxBytes
=100e6
, backupCount
=9, delay
=0
136 file_handler
.setFormatter(log_formatter_simple
)
137 logger_module
.addHandler(file_handler
)
138 if logger_config
["loglevel"]:
139 logger_module
.setLevel(logger_config
["loglevel"])
140 self
.logger
.critical(
141 "starting osm/lcm version {} {}".format(lcm_version
, lcm_version_date
)
144 # check version of N2VC
145 # TODO enhance with int conversion or from distutils.version import LooseVersion
146 # or with list(map(int, version.split(".")))
147 if versiontuple(n2vc_version
) < versiontuple(min_n2vc_version
):
149 "Not compatible osm/N2VC version '{}'. Needed '{}' or higher".format(
150 n2vc_version
, min_n2vc_version
153 # check version of common
154 if versiontuple(common_version
) < versiontuple(min_common_version
):
156 "Not compatible osm/common version '{}'. Needed '{}' or higher".format(
157 common_version
, min_common_version
162 self
.db
= Database(self
.main_config
.to_dict()).instance
.db
164 self
.fs
= Filesystem(self
.main_config
.to_dict()).instance
.fs
167 # copy message configuration in order to remove 'group_id' for msg_admin
168 config_message
= self
.main_config
.message
.to_dict()
169 config_message
["loop"] = asyncio
.get_event_loop()
170 if config_message
["driver"] == "local":
171 self
.msg
= msglocal
.MsgLocal()
172 self
.msg
.connect(config_message
)
173 self
.msg_admin
= msglocal
.MsgLocal()
174 config_message
.pop("group_id", None)
175 self
.msg_admin
.connect(config_message
)
176 elif config_message
["driver"] == "kafka":
177 self
.msg
= msgkafka
.MsgKafka()
178 self
.msg
.connect(config_message
)
179 self
.msg_admin
= msgkafka
.MsgKafka()
180 config_message
.pop("group_id", None)
181 self
.msg_admin
.connect(config_message
)
184 "Invalid configuration param '{}' at '[message]':'driver'".format(
185 self
.main_config
.message
.driver
188 except (DbException
, FsException
, MsgException
) as e
:
189 self
.logger
.critical(str(e
), exc_info
=True)
190 raise LcmException(str(e
))
192 # contains created tasks/futures to be able to cancel
193 self
.lcm_tasks
= TaskRegistry(self
.worker_id
, self
.logger
)
195 async def check_RO_version(self
):
199 ro_uri
= self
.main_config
.RO
.uri
203 # try new RO, if fail old RO
205 self
.main_config
.RO
.uri
= ro_uri
+ "ro"
206 ro_server
= NgRoClient(**self
.main_config
.RO
.to_dict())
207 ro_version
= await ro_server
.get_version()
208 self
.main_config
.RO
.ng
= True
210 self
.main_config
.RO
.uri
= ro_uri
+ "openmano"
211 ro_server
= ROClient(**self
.main_config
.RO
.to_dict())
212 ro_version
= await ro_server
.get_version()
213 self
.main_config
.RO
.ng
= False
214 if versiontuple(ro_version
) < versiontuple(min_RO_version
):
216 "Not compatible osm/RO version '{}'. Needed '{}' or higher".format(
217 ro_version
, min_RO_version
221 "Connected to RO version {} new-generation version {}".format(
222 ro_version
, self
.main_config
.RO
.ng
226 except (ROClientException
, NgRoException
) as e
:
227 self
.main_config
.RO
.uri
= ro_uri
229 traceback
.print_tb(e
.__traceback
__)
230 error_text
= "Error while connecting to RO on {}: {}".format(
231 self
.main_config
.RO
.uri
, e
234 self
.logger
.critical(error_text
)
235 raise LcmException(error_text
)
236 if last_error
!= error_text
:
237 last_error
= error_text
239 error_text
+ ". Waiting until {} seconds".format(5 * tries
)
241 await asyncio
.sleep(5)
243 async def test(self
, param
=None):
244 self
.logger
.debug("Starting/Ending test task: {}".format(param
))
246 async def kafka_ping(self
):
247 self
.logger
.debug("Task kafka_ping Enter")
248 consecutive_errors
= 0
250 kafka_has_received
= False
251 self
.pings_not_received
= 1
254 await self
.msg_admin
.aiowrite(
260 "worker_id": self
.worker_id
,
261 "version": lcm_version
,
264 # time between pings are low when it is not received and at starting
266 self
.ping_interval_boot
267 if not kafka_has_received
268 else self
.ping_interval_pace
270 if not self
.pings_not_received
:
271 kafka_has_received
= True
272 self
.pings_not_received
+= 1
273 await asyncio
.sleep(wait_time
)
274 if self
.pings_not_received
> 10:
275 raise LcmException("It is not receiving pings from Kafka bus")
276 consecutive_errors
= 0
280 except Exception as e
:
281 # if not first_start is the first time after starting. So leave more time and wait
282 # to allow kafka starts
283 if consecutive_errors
== 8 if not first_start
else 30:
285 "Task kafka_read task exit error too many errors. Exception: {}".format(
290 consecutive_errors
+= 1
292 "Task kafka_read retrying after Exception {}".format(e
)
294 wait_time
= 2 if not first_start
else 5
295 await asyncio
.sleep(wait_time
)
297 async def kafka_read_callback(self
, topic
, command
, params
):
300 if topic
!= "admin" and command
!= "ping":
302 "Task kafka_read receives {} {}: {}".format(topic
, command
, params
)
304 self
.consecutive_errors
= 0
305 self
.first_start
= False
307 if command
== "exit":
308 raise LcmExceptionExit
309 elif command
.startswith("#"):
311 elif command
== "echo":
316 elif command
== "test":
317 asyncio
.Task(self
.test(params
))
321 if command
== "ping" and params
["to"] == "lcm" and params
["from"] == "lcm":
322 if params
.get("worker_id") != self
.worker_id
:
324 self
.pings_not_received
= 0
326 with
open(self
.health_check_file
, "w") as f
:
328 except Exception as e
:
330 "Cannot write into '{}' for healthcheck: {}".format(
331 self
.health_check_file
, e
335 elif topic
== "nslcmops":
336 if command
== "cancel":
337 nslcmop_id
= params
["_id"]
338 self
.logger
.debug("Cancelling nslcmop {}".format(nslcmop_id
))
339 nsr_id
= params
["nsInstanceId"]
340 # cancel the tasks and wait
341 for task
in self
.lcm_tasks
.cancel("ns", nsr_id
, nslcmop_id
):
345 "Cancelled task ended {},{},{}".format(
346 nsr_id
, nslcmop_id
, task
349 except asyncio
.CancelledError
:
351 "Task already cancelled and finished {},{},{}".format(
352 nsr_id
, nslcmop_id
, task
356 q_filter
= {"_id": nslcmop_id
}
358 "operationState": "FAILED_TEMP",
359 "isCancelPending": False,
367 update_dict
=update_dict
,
371 self
.logger
.debug("LCM task cancelled {},{}".format(nsr_id
, nslcmop_id
))
374 if command
== "placement":
375 self
.ns
.update_nsrs_with_pla_result(params
)
377 elif topic
== "k8scluster":
378 if command
== "create" or command
== "created":
379 k8scluster_id
= params
.get("_id")
380 task
= asyncio
.ensure_future(self
.k8scluster
.create(params
, order_id
))
381 self
.lcm_tasks
.register(
382 "k8scluster", k8scluster_id
, order_id
, "k8scluster_create", task
385 elif command
== "edit" or command
== "edited":
386 k8scluster_id
= params
.get("_id")
387 task
= asyncio
.ensure_future(self
.k8scluster
.edit(params
, order_id
))
388 self
.lcm_tasks
.register(
389 "k8scluster", k8scluster_id
, order_id
, "k8scluster_edit", task
392 elif command
== "delete" or command
== "deleted":
393 k8scluster_id
= params
.get("_id")
394 task
= asyncio
.ensure_future(self
.k8scluster
.delete(params
, order_id
))
395 self
.lcm_tasks
.register(
396 "k8scluster", k8scluster_id
, order_id
, "k8scluster_delete", task
400 if command
== "create" or command
== "created":
401 vca_id
= params
.get("_id")
402 task
= asyncio
.ensure_future(self
.vca
.create(params
, order_id
))
403 self
.lcm_tasks
.register("vca", vca_id
, order_id
, "vca_create", task
)
405 elif command
== "edit" or command
== "edited":
406 vca_id
= params
.get("_id")
407 task
= asyncio
.ensure_future(self
.vca
.edit(params
, order_id
))
408 self
.lcm_tasks
.register("vca", vca_id
, order_id
, "vca_edit", task
)
410 elif command
== "delete" or command
== "deleted":
411 vca_id
= params
.get("_id")
412 task
= asyncio
.ensure_future(self
.vca
.delete(params
, order_id
))
413 self
.lcm_tasks
.register("vca", vca_id
, order_id
, "vca_delete", task
)
415 elif topic
== "k8srepo":
416 if command
== "create" or command
== "created":
417 k8srepo_id
= params
.get("_id")
418 self
.logger
.debug("k8srepo_id = {}".format(k8srepo_id
))
419 task
= asyncio
.ensure_future(self
.k8srepo
.create(params
, order_id
))
420 self
.lcm_tasks
.register(
421 "k8srepo", k8srepo_id
, order_id
, "k8srepo_create", task
424 elif command
== "delete" or command
== "deleted":
425 k8srepo_id
= params
.get("_id")
426 task
= asyncio
.ensure_future(self
.k8srepo
.delete(params
, order_id
))
427 self
.lcm_tasks
.register(
428 "k8srepo", k8srepo_id
, order_id
, "k8srepo_delete", task
432 if command
== "instantiate":
433 # self.logger.debug("Deploying NS {}".format(nsr_id))
435 nslcmop_id
= nslcmop
["_id"]
436 nsr_id
= nslcmop
["nsInstanceId"]
437 task
= asyncio
.ensure_future(self
.ns
.instantiate(nsr_id
, nslcmop_id
))
438 self
.lcm_tasks
.register(
439 "ns", nsr_id
, nslcmop_id
, "ns_instantiate", task
442 elif command
== "terminate":
443 # self.logger.debug("Deleting NS {}".format(nsr_id))
445 nslcmop_id
= nslcmop
["_id"]
446 nsr_id
= nslcmop
["nsInstanceId"]
447 self
.lcm_tasks
.cancel(topic
, nsr_id
)
448 task
= asyncio
.ensure_future(self
.ns
.terminate(nsr_id
, nslcmop_id
))
449 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_terminate", task
)
451 elif command
== "vca_status_refresh":
453 nslcmop_id
= nslcmop
["_id"]
454 nsr_id
= nslcmop
["nsInstanceId"]
455 task
= asyncio
.ensure_future(
456 self
.ns
.vca_status_refresh(nsr_id
, nslcmop_id
)
458 self
.lcm_tasks
.register(
459 "ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh", task
462 elif command
== "action":
463 # self.logger.debug("Update NS {}".format(nsr_id))
465 nslcmop_id
= nslcmop
["_id"]
466 nsr_id
= nslcmop
["nsInstanceId"]
467 task
= asyncio
.ensure_future(self
.ns
.action(nsr_id
, nslcmop_id
))
468 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_action", task
)
470 elif command
== "update":
471 # self.logger.debug("Update NS {}".format(nsr_id))
473 nslcmop_id
= nslcmop
["_id"]
474 nsr_id
= nslcmop
["nsInstanceId"]
475 task
= asyncio
.ensure_future(self
.ns
.update(nsr_id
, nslcmop_id
))
476 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_update", task
)
478 elif command
== "scale":
479 # self.logger.debug("Update NS {}".format(nsr_id))
481 nslcmop_id
= nslcmop
["_id"]
482 nsr_id
= nslcmop
["nsInstanceId"]
483 task
= asyncio
.ensure_future(self
.ns
.scale(nsr_id
, nslcmop_id
))
484 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_scale", task
)
486 elif command
== "heal":
487 # self.logger.debug("Healing NS {}".format(nsr_id))
489 nslcmop_id
= nslcmop
["_id"]
490 nsr_id
= nslcmop
["nsInstanceId"]
491 task
= asyncio
.ensure_future(self
.ns
.heal(nsr_id
, nslcmop_id
))
492 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_heal", task
)
494 elif command
== "migrate":
496 nslcmop_id
= nslcmop
["_id"]
497 nsr_id
= nslcmop
["nsInstanceId"]
498 task
= asyncio
.ensure_future(self
.ns
.migrate(nsr_id
, nslcmop_id
))
499 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "ns_migrate", task
)
501 elif command
== "verticalscale":
503 nslcmop_id
= nslcmop
["_id"]
504 nsr_id
= nslcmop
["nsInstanceId"]
505 task
= asyncio
.ensure_future(self
.ns
.vertical_scale(nsr_id
, nslcmop_id
))
507 "nsr_id,nslcmop_id,task {},{},{}".format(nsr_id
, nslcmop_id
, task
)
509 self
.lcm_tasks
.register(
510 "ns", nsr_id
, nslcmop_id
, "ns_verticalscale", task
513 "LCM task registered {},{},{} ".format(nsr_id
, nslcmop_id
, task
)
516 elif command
== "show":
519 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
521 "nsr:\n _id={}\n operational-status: {}\n config-status: {}"
522 "\n detailed-status: {}\n deploy: {}\n tasks: {}"
525 db_nsr
["operational-status"],
526 db_nsr
["config-status"],
527 db_nsr
["detailed-status"],
528 db_nsr
["_admin"]["deployed"],
529 self
.lcm_tasks
.task_registry
["ns"].get(nsr_id
, ""),
532 except Exception as e
:
533 print("nsr {} not found: {}".format(nsr_id
, e
))
536 elif command
== "deleted":
537 return # TODO cleaning of task just in case should be done
549 ): # "scaled-cooldown-time"
552 elif topic
== "nsi": # netslice LCM processes (instantiate, terminate, etc)
553 if command
== "instantiate":
554 # self.logger.debug("Instantiating Network Slice {}".format(nsilcmop["netsliceInstanceId"]))
556 nsilcmop_id
= nsilcmop
["_id"] # slice operation id
557 nsir_id
= nsilcmop
["netsliceInstanceId"] # slice record id
558 task
= asyncio
.ensure_future(
559 self
.netslice
.instantiate(nsir_id
, nsilcmop_id
)
561 self
.lcm_tasks
.register(
562 "nsi", nsir_id
, nsilcmop_id
, "nsi_instantiate", task
565 elif command
== "terminate":
566 # self.logger.debug("Terminating Network Slice NS {}".format(nsilcmop["netsliceInstanceId"]))
568 nsilcmop_id
= nsilcmop
["_id"] # slice operation id
569 nsir_id
= nsilcmop
["netsliceInstanceId"] # slice record id
570 self
.lcm_tasks
.cancel(topic
, nsir_id
)
571 task
= asyncio
.ensure_future(
572 self
.netslice
.terminate(nsir_id
, nsilcmop_id
)
574 self
.lcm_tasks
.register(
575 "nsi", nsir_id
, nsilcmop_id
, "nsi_terminate", task
578 elif command
== "show":
581 db_nsir
= self
.db
.get_one("nsirs", {"_id": nsir_id
})
583 "nsir:\n _id={}\n operational-status: {}\n config-status: {}"
584 "\n detailed-status: {}\n deploy: {}\n tasks: {}"
587 db_nsir
["operational-status"],
588 db_nsir
["config-status"],
589 db_nsir
["detailed-status"],
590 db_nsir
["_admin"]["deployed"],
591 self
.lcm_tasks
.task_registry
["nsi"].get(nsir_id
, ""),
594 except Exception as e
:
595 print("nsir {} not found: {}".format(nsir_id
, e
))
598 elif command
== "deleted":
599 return # TODO cleaning of task just in case should be done
606 ): # "scaled-cooldown-time"
608 elif topic
== "vim_account":
609 vim_id
= params
["_id"]
610 if command
in ("create", "created"):
611 if not self
.main_config
.RO
.ng
:
612 task
= asyncio
.ensure_future(self
.vim
.create(params
, order_id
))
613 self
.lcm_tasks
.register(
614 "vim_account", vim_id
, order_id
, "vim_create", task
617 elif command
== "delete" or command
== "deleted":
618 self
.lcm_tasks
.cancel(topic
, vim_id
)
619 task
= asyncio
.ensure_future(self
.vim
.delete(params
, order_id
))
620 self
.lcm_tasks
.register(
621 "vim_account", vim_id
, order_id
, "vim_delete", task
624 elif command
== "show":
625 print("not implemented show with vim_account")
628 elif command
in ("edit", "edited"):
629 if not self
.main_config
.RO
.ng
:
630 task
= asyncio
.ensure_future(self
.vim
.edit(params
, order_id
))
631 self
.lcm_tasks
.register(
632 "vim_account", vim_id
, order_id
, "vim_edit", task
635 elif command
== "deleted":
636 return # TODO cleaning of task just in case should be done
637 elif topic
== "wim_account":
638 wim_id
= params
["_id"]
639 if command
in ("create", "created"):
640 if not self
.main_config
.RO
.ng
:
641 task
= asyncio
.ensure_future(self
.wim
.create(params
, order_id
))
642 self
.lcm_tasks
.register(
643 "wim_account", wim_id
, order_id
, "wim_create", task
646 elif command
== "delete" or command
== "deleted":
647 self
.lcm_tasks
.cancel(topic
, wim_id
)
648 task
= asyncio
.ensure_future(self
.wim
.delete(params
, order_id
))
649 self
.lcm_tasks
.register(
650 "wim_account", wim_id
, order_id
, "wim_delete", task
653 elif command
== "show":
654 print("not implemented show with wim_account")
657 elif command
in ("edit", "edited"):
658 task
= asyncio
.ensure_future(self
.wim
.edit(params
, order_id
))
659 self
.lcm_tasks
.register(
660 "wim_account", wim_id
, order_id
, "wim_edit", task
663 elif command
== "deleted":
664 return # TODO cleaning of task just in case should be done
666 _sdn_id
= params
["_id"]
667 if command
in ("create", "created"):
668 if not self
.main_config
.RO
.ng
:
669 task
= asyncio
.ensure_future(self
.sdn
.create(params
, order_id
))
670 self
.lcm_tasks
.register(
671 "sdn", _sdn_id
, order_id
, "sdn_create", task
674 elif command
== "delete" or command
== "deleted":
675 self
.lcm_tasks
.cancel(topic
, _sdn_id
)
676 task
= asyncio
.ensure_future(self
.sdn
.delete(params
, order_id
))
677 self
.lcm_tasks
.register("sdn", _sdn_id
, order_id
, "sdn_delete", task
)
679 elif command
in ("edit", "edited"):
680 task
= asyncio
.ensure_future(self
.sdn
.edit(params
, order_id
))
681 self
.lcm_tasks
.register("sdn", _sdn_id
, order_id
, "sdn_edit", task
)
683 elif command
== "deleted":
684 return # TODO cleaning of task just in case should be done
685 self
.logger
.critical("unknown topic {} and command '{}'".format(topic
, command
))
687 async def kafka_read(self
):
689 "Task kafka_read Enter with worker_id={}".format(self
.worker_id
)
691 self
.consecutive_errors
= 0
692 self
.first_start
= True
693 while self
.consecutive_errors
< 10:
707 topics_admin
= ("admin",)
708 await asyncio
.gather(
711 aiocallback
=self
.kafka_read_callback
,
714 self
.msg_admin
.aioread(
716 aiocallback
=self
.kafka_read_callback
,
721 except LcmExceptionExit
:
722 self
.logger
.debug("Bye!")
724 except Exception as e
:
725 # if not first_start is the first time after starting. So leave more time and wait
726 # to allow kafka starts
727 if self
.consecutive_errors
== 8 if not self
.first_start
else 30:
729 "Task kafka_read task exit error too many errors. Exception: {}".format(
734 self
.consecutive_errors
+= 1
736 "Task kafka_read retrying after Exception {}".format(e
)
738 wait_time
= 2 if not self
.first_start
else 5
739 await asyncio
.sleep(wait_time
)
741 self
.logger
.debug("Task kafka_read exit")
743 async def kafka_read_ping(self
):
744 await asyncio
.gather(self
.kafka_read(), self
.kafka_ping())
746 async def start(self
):
748 await self
.check_RO_version()
750 self
.ns
= ns
.NsLcm(self
.msg
, self
.lcm_tasks
, self
.main_config
)
751 # TODO: modify the rest of classes to use the LcmCfg object instead of dicts
752 self
.netslice
= netslice
.NetsliceLcm(
753 self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict(), self
.ns
755 self
.vim
= vim_sdn
.VimLcm(self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict())
756 self
.wim
= vim_sdn
.WimLcm(self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict())
757 self
.sdn
= vim_sdn
.SdnLcm(self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict())
758 self
.k8scluster
= vim_sdn
.K8sClusterLcm(
759 self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict()
761 self
.vca
= vim_sdn
.VcaLcm(self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict())
762 self
.k8srepo
= vim_sdn
.K8sRepoLcm(
763 self
.msg
, self
.lcm_tasks
, self
.main_config
.to_dict()
766 await self
.kafka_read_ping()
769 # self.logger.debug("Terminating cancelling creation tasks")
770 # self.lcm_tasks.cancel("ALL", "create")
772 # while self.is_pending_tasks():
773 # self.logger.debug("Task kafka_read terminating. Waiting for tasks termination")
774 # await asyncio.sleep(2)
777 # self.lcm_tasks.cancel("ALL", "ALL")
779 self
.db
.db_disconnect()
781 self
.msg
.disconnect()
783 self
.msg_admin
.disconnect()
785 self
.fs
.fs_disconnect()
787 def read_config_file(self
, config_file
):
789 with
open(config_file
) as f
:
790 return yaml
.safe_load(f
)
791 except Exception as e
:
792 self
.logger
.critical("At config file '{}': {}".format(config_file
, e
))
796 def get_process_id():
798 Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it
799 will provide a random one
805 with
open("/proc/self/cgroup", "r") as f
:
806 text_id_
= f
.readline()
807 _
, _
, text_id
= text_id_
.rpartition("/")
808 return text_id
.replace("\n", "")[:12]
812 def generate_random_id():
813 return "".join(SystemRandom().choice("0123456789abcdef") for _
in range(12))
815 # Try getting docker id. If it fails, generate a random id
816 docker_id
= get_docker_id()
817 return docker_id
if docker_id
else generate_random_id()
822 """Usage: {} [options]
823 -c|--config [configuration_file]: loads the configuration file (default: ./lcm.cfg)
824 --health-check: do not run lcm, but inspect kafka bus to determine if lcm is healthy
825 -h|--help: shows this help
830 # --log-socket-host HOST: send logs to this host")
831 # --log-socket-port PORT: send logs using this port (default: 9022)")
834 if __name__
== "__main__":
836 # print("SYS.PATH='{}'".format(sys.path))
837 # load parameters and configuration
843 opts
, args
= getopt
.getopt(
844 sys
.argv
[1:], "hc:", ["config=", "help", "health-check"]
846 # TODO add "log-socket-host=", "log-socket-port=", "log-file="
849 if o
in ("-h", "--help"):
852 elif o
in ("-c", "--config"):
854 elif o
== "--health-check":
855 from osm_lcm
.lcm_hc
import health_check
857 health_check(config_file
, Lcm
.ping_interval_pace
)
859 print(f
"Unhandled option: {o}")
863 if not path
.isfile(config_file
):
865 "configuration file '{}' does not exist".format(config_file
),
871 __file__
[: __file__
.rfind(".")] + ".cfg",
875 if path
.isfile(config_file
):
879 "No configuration file 'lcm.cfg' found neither at local folder nor at /etc/osm/",
883 lcm
= Lcm(config_file
)
884 asyncio
.run(lcm
.start())
885 except (LcmException
, getopt
.GetoptError
) as e
:
886 print(str(e
), file=sys
.stderr
)