Fix Bug 1425 NG-RO unable to pin VNF with vim_account config
[osm/LCM.git] / osm_lcm / lcm.py
1 #!/usr/bin/python3
2 # -*- coding: utf-8 -*-
3
4 ##
5 # Copyright 2018 Telefonica S.A.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License"); you may
8 # not use this file except in compliance with the License. You may obtain
9 # a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16 # License for the specific language governing permissions and limitations
17 # under the License.
18 ##
19
20
21 # DEBUG WITH PDB
22 import os
23 import pdb
24
25 import asyncio
26 import yaml
27 import logging
28 import logging.handlers
29 import getopt
30 import sys
31
32 from osm_lcm import ns, vim_sdn, netslice
33 from osm_lcm.ng_ro import NgRoException, NgRoClient
34 from osm_lcm.ROclient import ROClient, ROClientException
35
36 from time import time
37 from osm_lcm.lcm_utils import versiontuple, LcmException, TaskRegistry, LcmExceptionExit
38 from osm_lcm import version as lcm_version, version_date as lcm_version_date
39
40 from osm_common import msglocal, msgkafka
41 from osm_common import version as common_version
42 from osm_common.dbbase import DbException
43 from osm_common.fsbase import FsException
44 from osm_common.msgbase import MsgException
45 from osm_lcm.data_utils.database.database import Database
46 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
47 from os import environ, path
48 from random import choice as random_choice
49 from n2vc import version as n2vc_version
50 import traceback
51
52 if os.getenv("OSMLCM_PDB_DEBUG", None) is not None:
53 pdb.set_trace()
54
55
56 __author__ = "Alfonso Tierno"
57 min_RO_version = "6.0.2"
58 min_n2vc_version = "0.0.2"
59
60 min_common_version = "0.1.19"
61 health_check_file = (
62 path.expanduser("~") + "/time_last_ping"
63 ) # TODO find better location for this file
64
65
66 class Lcm:
67
68 ping_interval_pace = (
69 120 # how many time ping is send once is confirmed all is running
70 )
71 ping_interval_boot = 5 # how many time ping is sent when booting
72 cfg_logger_name = {
73 "message": "lcm.msg",
74 "database": "lcm.db",
75 "storage": "lcm.fs",
76 "tsdb": "lcm.prometheus",
77 }
78 # ^ contains for each section at lcm.cfg the used logger name
79
80 def __init__(self, config_file, loop=None):
81 """
82 Init, Connect to database, filesystem storage, and messaging
83 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
84 :return: None
85 """
86 self.db = None
87 self.msg = None
88 self.msg_admin = None
89 self.fs = None
90 self.pings_not_received = 1
91 self.consecutive_errors = 0
92 self.first_start = False
93
94 # logging
95 self.logger = logging.getLogger("lcm")
96 # get id
97 self.worker_id = self.get_process_id()
98 # load configuration
99 config = self.read_config_file(config_file)
100 self.config = config
101 self.config["ro_config"] = {
102 "ng": config["RO"].get("ng", False),
103 "uri": config["RO"].get("uri"),
104 "tenant": config.get("tenant", "osm"),
105 "logger_name": "lcm.roclient",
106 "loglevel": config["RO"].get("loglevel", "ERROR"),
107 }
108 if not self.config["ro_config"]["uri"]:
109 self.config["ro_config"]["uri"] = "http://{}:{}/".format(
110 config["RO"]["host"], config["RO"]["port"]
111 )
112 elif (
113 "/ro" in self.config["ro_config"]["uri"][-4:]
114 or "/openmano" in self.config["ro_config"]["uri"][-10:]
115 ):
116 # uri ends with '/ro', '/ro/', '/openmano', '/openmano/'
117 index = self.config["ro_config"]["uri"][-1].rfind("/")
118 self.config["ro_config"]["uri"] = self.config["ro_config"]["uri"][index + 1]
119
120 self.loop = loop or asyncio.get_event_loop()
121 self.ns = (
122 self.netslice
123 ) = (
124 self.vim
125 ) = self.wim = self.sdn = self.k8scluster = self.vca = self.k8srepo = None
126
127 # logging
128 log_format_simple = (
129 "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
130 )
131 log_formatter_simple = logging.Formatter(
132 log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
133 )
134 config["database"]["logger_name"] = "lcm.db"
135 config["storage"]["logger_name"] = "lcm.fs"
136 config["message"]["logger_name"] = "lcm.msg"
137 if config["global"].get("logfile"):
138 file_handler = logging.handlers.RotatingFileHandler(
139 config["global"]["logfile"], maxBytes=100e6, backupCount=9, delay=0
140 )
141 file_handler.setFormatter(log_formatter_simple)
142 self.logger.addHandler(file_handler)
143 if not config["global"].get("nologging"):
144 str_handler = logging.StreamHandler()
145 str_handler.setFormatter(log_formatter_simple)
146 self.logger.addHandler(str_handler)
147
148 if config["global"].get("loglevel"):
149 self.logger.setLevel(config["global"]["loglevel"])
150
151 # logging other modules
152 for k1, logname in self.cfg_logger_name.items():
153 config[k1]["logger_name"] = logname
154 logger_module = logging.getLogger(logname)
155 if config[k1].get("logfile"):
156 file_handler = logging.handlers.RotatingFileHandler(
157 config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
158 )
159 file_handler.setFormatter(log_formatter_simple)
160 logger_module.addHandler(file_handler)
161 if config[k1].get("loglevel"):
162 logger_module.setLevel(config[k1]["loglevel"])
163 self.logger.critical(
164 "starting osm/lcm version {} {}".format(lcm_version, lcm_version_date)
165 )
166
167 # check version of N2VC
168 # TODO enhance with int conversion or from distutils.version import LooseVersion
169 # or with list(map(int, version.split(".")))
170 if versiontuple(n2vc_version) < versiontuple(min_n2vc_version):
171 raise LcmException(
172 "Not compatible osm/N2VC version '{}'. Needed '{}' or higher".format(
173 n2vc_version, min_n2vc_version
174 )
175 )
176 # check version of common
177 if versiontuple(common_version) < versiontuple(min_common_version):
178 raise LcmException(
179 "Not compatible osm/common version '{}'. Needed '{}' or higher".format(
180 common_version, min_common_version
181 )
182 )
183
184 try:
185 self.db = Database(config).instance.db
186
187 self.fs = Filesystem(config).instance.fs
188 self.fs.sync()
189
190 # copy message configuration in order to remove 'group_id' for msg_admin
191 config_message = config["message"].copy()
192 config_message["loop"] = self.loop
193 if config_message["driver"] == "local":
194 self.msg = msglocal.MsgLocal()
195 self.msg.connect(config_message)
196 self.msg_admin = msglocal.MsgLocal()
197 config_message.pop("group_id", None)
198 self.msg_admin.connect(config_message)
199 elif config_message["driver"] == "kafka":
200 self.msg = msgkafka.MsgKafka()
201 self.msg.connect(config_message)
202 self.msg_admin = msgkafka.MsgKafka()
203 config_message.pop("group_id", None)
204 self.msg_admin.connect(config_message)
205 else:
206 raise LcmException(
207 "Invalid configuration param '{}' at '[message]':'driver'".format(
208 config["message"]["driver"]
209 )
210 )
211 except (DbException, FsException, MsgException) as e:
212 self.logger.critical(str(e), exc_info=True)
213 raise LcmException(str(e))
214
215 # contains created tasks/futures to be able to cancel
216 self.lcm_tasks = TaskRegistry(self.worker_id, self.logger)
217
218 async def check_RO_version(self):
219 tries = 14
220 last_error = None
221 while True:
222 ro_uri = self.config["ro_config"]["uri"]
223 try:
224 # try new RO, if fail old RO
225 try:
226 self.config["ro_config"]["uri"] = ro_uri + "ro"
227 ro_server = NgRoClient(self.loop, **self.config["ro_config"])
228 ro_version = await ro_server.get_version()
229 self.config["ro_config"]["ng"] = True
230 except Exception:
231 self.config["ro_config"]["uri"] = ro_uri + "openmano"
232 ro_server = ROClient(self.loop, **self.config["ro_config"])
233 ro_version = await ro_server.get_version()
234 self.config["ro_config"]["ng"] = False
235 if versiontuple(ro_version) < versiontuple(min_RO_version):
236 raise LcmException(
237 "Not compatible osm/RO version '{}'. Needed '{}' or higher".format(
238 ro_version, min_RO_version
239 )
240 )
241 self.logger.info(
242 "Connected to RO version {} new-generation version {}".format(
243 ro_version, self.config["ro_config"]["ng"]
244 )
245 )
246 return
247 except (ROClientException, NgRoException) as e:
248 self.config["ro_config"]["uri"] = ro_uri
249 tries -= 1
250 traceback.print_tb(e.__traceback__)
251 error_text = "Error while connecting to RO on {}: {}".format(
252 self.config["ro_config"]["uri"], e
253 )
254 if tries <= 0:
255 self.logger.critical(error_text)
256 raise LcmException(error_text)
257 if last_error != error_text:
258 last_error = error_text
259 self.logger.error(
260 error_text + ". Waiting until {} seconds".format(5 * tries)
261 )
262 await asyncio.sleep(5)
263
264 async def test(self, param=None):
265 self.logger.debug("Starting/Ending test task: {}".format(param))
266
267 async def kafka_ping(self):
268 self.logger.debug("Task kafka_ping Enter")
269 consecutive_errors = 0
270 first_start = True
271 kafka_has_received = False
272 self.pings_not_received = 1
273 while True:
274 try:
275 await self.msg_admin.aiowrite(
276 "admin",
277 "ping",
278 {
279 "from": "lcm",
280 "to": "lcm",
281 "worker_id": self.worker_id,
282 "version": lcm_version,
283 },
284 self.loop,
285 )
286 # time between pings are low when it is not received and at starting
287 wait_time = (
288 self.ping_interval_boot
289 if not kafka_has_received
290 else self.ping_interval_pace
291 )
292 if not self.pings_not_received:
293 kafka_has_received = True
294 self.pings_not_received += 1
295 await asyncio.sleep(wait_time, loop=self.loop)
296 if self.pings_not_received > 10:
297 raise LcmException("It is not receiving pings from Kafka bus")
298 consecutive_errors = 0
299 first_start = False
300 except LcmException:
301 raise
302 except Exception as e:
303 # if not first_start is the first time after starting. So leave more time and wait
304 # to allow kafka starts
305 if consecutive_errors == 8 if not first_start else 30:
306 self.logger.error(
307 "Task kafka_read task exit error too many errors. Exception: {}".format(
308 e
309 )
310 )
311 raise
312 consecutive_errors += 1
313 self.logger.error(
314 "Task kafka_read retrying after Exception {}".format(e)
315 )
316 wait_time = 2 if not first_start else 5
317 await asyncio.sleep(wait_time, loop=self.loop)
318
319 def kafka_read_callback(self, topic, command, params):
320 order_id = 1
321
322 if topic != "admin" and command != "ping":
323 self.logger.debug(
324 "Task kafka_read receives {} {}: {}".format(topic, command, params)
325 )
326 self.consecutive_errors = 0
327 self.first_start = False
328 order_id += 1
329 if command == "exit":
330 raise LcmExceptionExit
331 elif command.startswith("#"):
332 return
333 elif command == "echo":
334 # just for test
335 print(params)
336 sys.stdout.flush()
337 return
338 elif command == "test":
339 asyncio.Task(self.test(params), loop=self.loop)
340 return
341
342 if topic == "admin":
343 if command == "ping" and params["to"] == "lcm" and params["from"] == "lcm":
344 if params.get("worker_id") != self.worker_id:
345 return
346 self.pings_not_received = 0
347 try:
348 with open(health_check_file, "w") as f:
349 f.write(str(time()))
350 except Exception as e:
351 self.logger.error(
352 "Cannot write into '{}' for healthcheck: {}".format(
353 health_check_file, e
354 )
355 )
356 return
357 elif topic == "pla":
358 if command == "placement":
359 self.ns.update_nsrs_with_pla_result(params)
360 return
361 elif topic == "k8scluster":
362 if command == "create" or command == "created":
363 k8scluster_id = params.get("_id")
364 task = asyncio.ensure_future(self.k8scluster.create(params, order_id))
365 self.lcm_tasks.register(
366 "k8scluster", k8scluster_id, order_id, "k8scluster_create", task
367 )
368 return
369 elif command == "delete" or command == "deleted":
370 k8scluster_id = params.get("_id")
371 task = asyncio.ensure_future(self.k8scluster.delete(params, order_id))
372 self.lcm_tasks.register(
373 "k8scluster", k8scluster_id, order_id, "k8scluster_delete", task
374 )
375 return
376 elif topic == "vca":
377 if command == "create" or command == "created":
378 vca_id = params.get("_id")
379 task = asyncio.ensure_future(self.vca.create(params, order_id))
380 self.lcm_tasks.register("vca", vca_id, order_id, "vca_create", task)
381 return
382 elif command == "delete" or command == "deleted":
383 vca_id = params.get("_id")
384 task = asyncio.ensure_future(self.vca.delete(params, order_id))
385 self.lcm_tasks.register("vca", vca_id, order_id, "vca_delete", task)
386 return
387 elif topic == "k8srepo":
388 if command == "create" or command == "created":
389 k8srepo_id = params.get("_id")
390 self.logger.debug("k8srepo_id = {}".format(k8srepo_id))
391 task = asyncio.ensure_future(self.k8srepo.create(params, order_id))
392 self.lcm_tasks.register(
393 "k8srepo", k8srepo_id, order_id, "k8srepo_create", task
394 )
395 return
396 elif command == "delete" or command == "deleted":
397 k8srepo_id = params.get("_id")
398 task = asyncio.ensure_future(self.k8srepo.delete(params, order_id))
399 self.lcm_tasks.register(
400 "k8srepo", k8srepo_id, order_id, "k8srepo_delete", task
401 )
402 return
403 elif topic == "ns":
404 if command == "instantiate":
405 # self.logger.debug("Deploying NS {}".format(nsr_id))
406 nslcmop = params
407 nslcmop_id = nslcmop["_id"]
408 nsr_id = nslcmop["nsInstanceId"]
409 task = asyncio.ensure_future(self.ns.instantiate(nsr_id, nslcmop_id))
410 self.lcm_tasks.register(
411 "ns", nsr_id, nslcmop_id, "ns_instantiate", task
412 )
413 return
414 elif command == "terminate":
415 # self.logger.debug("Deleting NS {}".format(nsr_id))
416 nslcmop = params
417 nslcmop_id = nslcmop["_id"]
418 nsr_id = nslcmop["nsInstanceId"]
419 self.lcm_tasks.cancel(topic, nsr_id)
420 task = asyncio.ensure_future(self.ns.terminate(nsr_id, nslcmop_id))
421 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_terminate", task)
422 return
423 elif command == "vca_status_refresh":
424 nslcmop = params
425 nslcmop_id = nslcmop["_id"]
426 nsr_id = nslcmop["nsInstanceId"]
427 task = asyncio.ensure_future(
428 self.ns.vca_status_refresh(nsr_id, nslcmop_id)
429 )
430 self.lcm_tasks.register(
431 "ns", nsr_id, nslcmop_id, "ns_vca_status_refresh", task
432 )
433 return
434 elif command == "action":
435 # self.logger.debug("Update NS {}".format(nsr_id))
436 nslcmop = params
437 nslcmop_id = nslcmop["_id"]
438 nsr_id = nslcmop["nsInstanceId"]
439 task = asyncio.ensure_future(self.ns.action(nsr_id, nslcmop_id))
440 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_action", task)
441 return
442 elif command == "scale":
443 # self.logger.debug("Update NS {}".format(nsr_id))
444 nslcmop = params
445 nslcmop_id = nslcmop["_id"]
446 nsr_id = nslcmop["nsInstanceId"]
447 task = asyncio.ensure_future(self.ns.scale(nsr_id, nslcmop_id))
448 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "ns_scale", task)
449 return
450 elif command == "show":
451 nsr_id = params
452 try:
453 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
454 print(
455 "nsr:\n _id={}\n operational-status: {}\n config-status: {}"
456 "\n detailed-status: {}\n deploy: {}\n tasks: {}"
457 "".format(
458 nsr_id,
459 db_nsr["operational-status"],
460 db_nsr["config-status"],
461 db_nsr["detailed-status"],
462 db_nsr["_admin"]["deployed"],
463 self.lcm_ns_tasks.get(nsr_id),
464 )
465 )
466 except Exception as e:
467 print("nsr {} not found: {}".format(nsr_id, e))
468 sys.stdout.flush()
469 return
470 elif command == "deleted":
471 return # TODO cleaning of task just in case should be done
472 elif command in (
473 "terminated",
474 "instantiated",
475 "scaled",
476 "actioned",
477 ): # "scaled-cooldown-time"
478 return
479 elif topic == "nsi": # netslice LCM processes (instantiate, terminate, etc)
480 if command == "instantiate":
481 # self.logger.debug("Instantiating Network Slice {}".format(nsilcmop["netsliceInstanceId"]))
482 nsilcmop = params
483 nsilcmop_id = nsilcmop["_id"] # slice operation id
484 nsir_id = nsilcmop["netsliceInstanceId"] # slice record id
485 task = asyncio.ensure_future(
486 self.netslice.instantiate(nsir_id, nsilcmop_id)
487 )
488 self.lcm_tasks.register(
489 "nsi", nsir_id, nsilcmop_id, "nsi_instantiate", task
490 )
491 return
492 elif command == "terminate":
493 # self.logger.debug("Terminating Network Slice NS {}".format(nsilcmop["netsliceInstanceId"]))
494 nsilcmop = params
495 nsilcmop_id = nsilcmop["_id"] # slice operation id
496 nsir_id = nsilcmop["netsliceInstanceId"] # slice record id
497 self.lcm_tasks.cancel(topic, nsir_id)
498 task = asyncio.ensure_future(
499 self.netslice.terminate(nsir_id, nsilcmop_id)
500 )
501 self.lcm_tasks.register(
502 "nsi", nsir_id, nsilcmop_id, "nsi_terminate", task
503 )
504 return
505 elif command == "show":
506 nsir_id = params
507 try:
508 db_nsir = self.db.get_one("nsirs", {"_id": nsir_id})
509 print(
510 "nsir:\n _id={}\n operational-status: {}\n config-status: {}"
511 "\n detailed-status: {}\n deploy: {}\n tasks: {}"
512 "".format(
513 nsir_id,
514 db_nsir["operational-status"],
515 db_nsir["config-status"],
516 db_nsir["detailed-status"],
517 db_nsir["_admin"]["deployed"],
518 self.lcm_netslice_tasks.get(nsir_id),
519 )
520 )
521 except Exception as e:
522 print("nsir {} not found: {}".format(nsir_id, e))
523 sys.stdout.flush()
524 return
525 elif command == "deleted":
526 return # TODO cleaning of task just in case should be done
527 elif command in (
528 "terminated",
529 "instantiated",
530 "scaled",
531 "actioned",
532 ): # "scaled-cooldown-time"
533 return
534 elif topic == "vim_account":
535 vim_id = params["_id"]
536 if command in ("create", "created"):
537 if not self.config["ro_config"].get("ng"):
538 task = asyncio.ensure_future(self.vim.create(params, order_id))
539 self.lcm_tasks.register(
540 "vim_account", vim_id, order_id, "vim_create", task
541 )
542 return
543 elif command == "delete" or command == "deleted":
544 self.lcm_tasks.cancel(topic, vim_id)
545 task = asyncio.ensure_future(self.vim.delete(params, order_id))
546 self.lcm_tasks.register(
547 "vim_account", vim_id, order_id, "vim_delete", task
548 )
549 return
550 elif command == "show":
551 print("not implemented show with vim_account")
552 sys.stdout.flush()
553 return
554 elif command in ("edit", "edited"):
555 if not self.config["ro_config"].get("ng"):
556 task = asyncio.ensure_future(self.vim.edit(params, order_id))
557 self.lcm_tasks.register(
558 "vim_account", vim_id, order_id, "vim_edit", task
559 )
560 return
561 elif command == "deleted":
562 return # TODO cleaning of task just in case should be done
563 elif topic == "wim_account":
564 wim_id = params["_id"]
565 if command in ("create", "created"):
566 if not self.config["ro_config"].get("ng"):
567 task = asyncio.ensure_future(self.wim.create(params, order_id))
568 self.lcm_tasks.register(
569 "wim_account", wim_id, order_id, "wim_create", task
570 )
571 return
572 elif command == "delete" or command == "deleted":
573 self.lcm_tasks.cancel(topic, wim_id)
574 task = asyncio.ensure_future(self.wim.delete(params, order_id))
575 self.lcm_tasks.register(
576 "wim_account", wim_id, order_id, "wim_delete", task
577 )
578 return
579 elif command == "show":
580 print("not implemented show with wim_account")
581 sys.stdout.flush()
582 return
583 elif command in ("edit", "edited"):
584 task = asyncio.ensure_future(self.wim.edit(params, order_id))
585 self.lcm_tasks.register(
586 "wim_account", wim_id, order_id, "wim_edit", task
587 )
588 return
589 elif command == "deleted":
590 return # TODO cleaning of task just in case should be done
591 elif topic == "sdn":
592 _sdn_id = params["_id"]
593 if command in ("create", "created"):
594 if not self.config["ro_config"].get("ng"):
595 task = asyncio.ensure_future(self.sdn.create(params, order_id))
596 self.lcm_tasks.register(
597 "sdn", _sdn_id, order_id, "sdn_create", task
598 )
599 return
600 elif command == "delete" or command == "deleted":
601 self.lcm_tasks.cancel(topic, _sdn_id)
602 task = asyncio.ensure_future(self.sdn.delete(params, order_id))
603 self.lcm_tasks.register("sdn", _sdn_id, order_id, "sdn_delete", task)
604 return
605 elif command in ("edit", "edited"):
606 task = asyncio.ensure_future(self.sdn.edit(params, order_id))
607 self.lcm_tasks.register("sdn", _sdn_id, order_id, "sdn_edit", task)
608 return
609 elif command == "deleted":
610 return # TODO cleaning of task just in case should be done
611 self.logger.critical("unknown topic {} and command '{}'".format(topic, command))
612
613 async def kafka_read(self):
614 self.logger.debug(
615 "Task kafka_read Enter with worker_id={}".format(self.worker_id)
616 )
617 # future = asyncio.Future()
618 self.consecutive_errors = 0
619 self.first_start = True
620 while self.consecutive_errors < 10:
621 try:
622 topics = (
623 "ns",
624 "vim_account",
625 "wim_account",
626 "sdn",
627 "nsi",
628 "k8scluster",
629 "vca",
630 "k8srepo",
631 "pla",
632 )
633 topics_admin = ("admin",)
634 await asyncio.gather(
635 self.msg.aioread(
636 topics, self.loop, self.kafka_read_callback, from_beginning=True
637 ),
638 self.msg_admin.aioread(
639 topics_admin,
640 self.loop,
641 self.kafka_read_callback,
642 group_id=False,
643 ),
644 )
645
646 except LcmExceptionExit:
647 self.logger.debug("Bye!")
648 break
649 except Exception as e:
650 # if not first_start is the first time after starting. So leave more time and wait
651 # to allow kafka starts
652 if self.consecutive_errors == 8 if not self.first_start else 30:
653 self.logger.error(
654 "Task kafka_read task exit error too many errors. Exception: {}".format(
655 e
656 )
657 )
658 raise
659 self.consecutive_errors += 1
660 self.logger.error(
661 "Task kafka_read retrying after Exception {}".format(e)
662 )
663 wait_time = 2 if not self.first_start else 5
664 await asyncio.sleep(wait_time, loop=self.loop)
665
666 # self.logger.debug("Task kafka_read terminating")
667 self.logger.debug("Task kafka_read exit")
668
669 def start(self):
670
671 # check RO version
672 self.loop.run_until_complete(self.check_RO_version())
673
674 self.ns = ns.NsLcm(self.msg, self.lcm_tasks, self.config, self.loop)
675 self.netslice = netslice.NetsliceLcm(
676 self.msg, self.lcm_tasks, self.config, self.loop, self.ns
677 )
678 self.vim = vim_sdn.VimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
679 self.wim = vim_sdn.WimLcm(self.msg, self.lcm_tasks, self.config, self.loop)
680 self.sdn = vim_sdn.SdnLcm(self.msg, self.lcm_tasks, self.config, self.loop)
681 self.k8scluster = vim_sdn.K8sClusterLcm(
682 self.msg, self.lcm_tasks, self.config, self.loop
683 )
684 self.vca = vim_sdn.VcaLcm(self.msg, self.lcm_tasks, self.config, self.loop)
685 self.k8srepo = vim_sdn.K8sRepoLcm(
686 self.msg, self.lcm_tasks, self.config, self.loop
687 )
688
689 self.loop.run_until_complete(
690 asyncio.gather(self.kafka_read(), self.kafka_ping())
691 )
692
693 # TODO
694 # self.logger.debug("Terminating cancelling creation tasks")
695 # self.lcm_tasks.cancel("ALL", "create")
696 # timeout = 200
697 # while self.is_pending_tasks():
698 # self.logger.debug("Task kafka_read terminating. Waiting for tasks termination")
699 # await asyncio.sleep(2, loop=self.loop)
700 # timeout -= 2
701 # if not timeout:
702 # self.lcm_tasks.cancel("ALL", "ALL")
703 self.loop.close()
704 self.loop = None
705 if self.db:
706 self.db.db_disconnect()
707 if self.msg:
708 self.msg.disconnect()
709 if self.msg_admin:
710 self.msg_admin.disconnect()
711 if self.fs:
712 self.fs.fs_disconnect()
713
714 def read_config_file(self, config_file):
715 # TODO make a [ini] + yaml inside parser
716 # the configparser library is not suitable, because it does not admit comments at the end of line,
717 # and not parse integer or boolean
718 try:
719 # read file as yaml format
720 with open(config_file) as f:
721 conf = yaml.load(f, Loader=yaml.Loader)
722 # Ensure all sections are not empty
723 for k in (
724 "global",
725 "timeout",
726 "RO",
727 "VCA",
728 "database",
729 "storage",
730 "message",
731 ):
732 if not conf.get(k):
733 conf[k] = {}
734
735 # read all environ that starts with OSMLCM_
736 for k, v in environ.items():
737 if not k.startswith("OSMLCM_"):
738 continue
739 subject, _, item = k[7:].lower().partition("_")
740 if not item:
741 continue
742 if subject in ("ro", "vca"):
743 # put in capital letter
744 subject = subject.upper()
745 try:
746 if item == "port" or subject == "timeout":
747 conf[subject][item] = int(v)
748 else:
749 conf[subject][item] = v
750 except Exception as e:
751 self.logger.warning(
752 "skipping environ '{}' on exception '{}'".format(k, e)
753 )
754
755 # backward compatibility of VCA parameters
756
757 if "pubkey" in conf["VCA"]:
758 conf["VCA"]["public_key"] = conf["VCA"].pop("pubkey")
759 if "cacert" in conf["VCA"]:
760 conf["VCA"]["ca_cert"] = conf["VCA"].pop("cacert")
761 if "apiproxy" in conf["VCA"]:
762 conf["VCA"]["api_proxy"] = conf["VCA"].pop("apiproxy")
763
764 if "enableosupgrade" in conf["VCA"]:
765 conf["VCA"]["enable_os_upgrade"] = conf["VCA"].pop("enableosupgrade")
766 if isinstance(conf["VCA"].get("enable_os_upgrade"), str):
767 if conf["VCA"]["enable_os_upgrade"].lower() == "false":
768 conf["VCA"]["enable_os_upgrade"] = False
769 elif conf["VCA"]["enable_os_upgrade"].lower() == "true":
770 conf["VCA"]["enable_os_upgrade"] = True
771
772 if "aptmirror" in conf["VCA"]:
773 conf["VCA"]["apt_mirror"] = conf["VCA"].pop("aptmirror")
774
775 return conf
776 except Exception as e:
777 self.logger.critical("At config file '{}': {}".format(config_file, e))
778 exit(1)
779
780 @staticmethod
781 def get_process_id():
782 """
783 Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it
784 will provide a random one
785 :return: Obtained ID
786 """
787 # Try getting docker id. If fails, get pid
788 try:
789 with open("/proc/self/cgroup", "r") as f:
790 text_id_ = f.readline()
791 _, _, text_id = text_id_.rpartition("/")
792 text_id = text_id.replace("\n", "")[:12]
793 if text_id:
794 return text_id
795 except Exception:
796 pass
797 # Return a random id
798 return "".join(random_choice("0123456789abcdef") for _ in range(12))
799
800
801 def usage():
802 print(
803 """Usage: {} [options]
804 -c|--config [configuration_file]: loads the configuration file (default: ./lcm.cfg)
805 --health-check: do not run lcm, but inspect kafka bus to determine if lcm is healthy
806 -h|--help: shows this help
807 """.format(
808 sys.argv[0]
809 )
810 )
811 # --log-socket-host HOST: send logs to this host")
812 # --log-socket-port PORT: send logs using this port (default: 9022)")
813
814
815 if __name__ == "__main__":
816
817 try:
818 # print("SYS.PATH='{}'".format(sys.path))
819 # load parameters and configuration
820 # -h
821 # -c value
822 # --config value
823 # --help
824 # --health-check
825 opts, args = getopt.getopt(
826 sys.argv[1:], "hc:", ["config=", "help", "health-check"]
827 )
828 # TODO add "log-socket-host=", "log-socket-port=", "log-file="
829 config_file = None
830 for o, a in opts:
831 if o in ("-h", "--help"):
832 usage()
833 sys.exit()
834 elif o in ("-c", "--config"):
835 config_file = a
836 elif o == "--health-check":
837 from osm_lcm.lcm_hc import health_check
838
839 health_check(health_check_file, Lcm.ping_interval_pace)
840 # elif o == "--log-socket-port":
841 # log_socket_port = a
842 # elif o == "--log-socket-host":
843 # log_socket_host = a
844 # elif o == "--log-file":
845 # log_file = a
846 else:
847 assert False, "Unhandled option"
848
849 if config_file:
850 if not path.isfile(config_file):
851 print(
852 "configuration file '{}' does not exist".format(config_file),
853 file=sys.stderr,
854 )
855 exit(1)
856 else:
857 for config_file in (
858 __file__[: __file__.rfind(".")] + ".cfg",
859 "./lcm.cfg",
860 "/etc/osm/lcm.cfg",
861 ):
862 if path.isfile(config_file):
863 break
864 else:
865 print(
866 "No configuration file 'lcm.cfg' found neither at local folder nor at /etc/osm/",
867 file=sys.stderr,
868 )
869 exit(1)
870 lcm = Lcm(config_file)
871 lcm.start()
872 except (LcmException, getopt.GetoptError) as e:
873 print(str(e), file=sys.stderr)
874 # usage()
875 exit(1)