845a4132cc7c0b05cf811621575ea74351b11192
[osm/LCM.git] / osm_lcm / lcm_helm_conn.py
1 ##
2 # Copyright 2020 Telefonica Investigacion y Desarrollo, S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 # implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 ##
18 import functools
19 import yaml
20 import asyncio
21 import socket
22 import uuid
23 import os
24
25 from grpclib.client import Channel
26
27 from osm_lcm.frontend_pb2 import PrimitiveRequest
28 from osm_lcm.frontend_pb2 import SshKeyRequest, SshKeyReply
29 from osm_lcm.frontend_grpc import FrontendExecutorStub
30 from osm_lcm.lcm_utils import LcmBase
31
32 from osm_lcm.data_utils.database.database import Database
33 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
34
35 from n2vc.n2vc_conn import N2VCConnector
36 from n2vc.k8s_helm_conn import K8sHelmConnector
37 from n2vc.k8s_helm3_conn import K8sHelm3Connector
38 from n2vc.exceptions import N2VCBadArgumentsException, N2VCException, N2VCExecutionException
39
40 from osm_lcm.lcm_utils import deep_get
41
42
43 def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"):
44 def wrapper(func):
45 retry_exceptions = (
46 ConnectionRefusedError
47 )
48
49 @functools.wraps(func)
50 async def wrapped(*args, **kwargs):
51 # default values for wait time and delay_time
52 delay_time = 10
53 max_wait_time = 300
54
55 # obtain arguments from variable names
56 self = args[0]
57 if self.__dict__.get(max_wait_time_var):
58 max_wait_time = self.__dict__.get(max_wait_time_var)
59 if self.__dict__.get(delay_time_var):
60 delay_time = self.__dict__.get(delay_time_var)
61
62 wait_time = max_wait_time
63 while wait_time > 0:
64 try:
65 return await func(*args, **kwargs)
66 except retry_exceptions:
67 wait_time = wait_time - delay_time
68 await asyncio.sleep(delay_time)
69 continue
70 else:
71 return ConnectionRefusedError
72 return wrapped
73 return wrapper
74
75
76 class LCMHelmConn(N2VCConnector, LcmBase):
77 _KUBECTL_OSM_NAMESPACE = "osm"
78 _KUBECTL_OSM_CLUSTER_NAME = "_system-osm-k8s"
79 _EE_SERVICE_PORT = 50050
80
81 # Initial max retry time
82 _MAX_INITIAL_RETRY_TIME = 600
83 # Max retry time for normal operations
84 _MAX_RETRY_TIME = 30
85 # Time beetween retries, retry time after a connection error is raised
86 _EE_RETRY_DELAY = 10
87
88 def __init__(self,
89 log: object = None,
90 loop: object = None,
91 vca_config: dict = None,
92 on_update_db=None, ):
93 """
94 Initialize EE helm connector.
95 """
96
97 self.db = Database().instance.db
98 self.fs = Filesystem().instance.fs
99
100 # parent class constructor
101 N2VCConnector.__init__(
102 self,
103 log=log,
104 loop=loop,
105 on_update_db=on_update_db,
106 db=self.db,
107 fs=self.fs
108 )
109
110 self.vca_config = vca_config
111 self.log.debug("Initialize helm N2VC connector")
112 self.log.debug("initial vca_config: {}".format(vca_config))
113
114 # TODO - Obtain data from configuration
115 self._ee_service_port = self._EE_SERVICE_PORT
116
117 self._retry_delay = self._EE_RETRY_DELAY
118
119 if self.vca_config and self.vca_config.get("eegrpcinittimeout"):
120 self._initial_retry_time = self.vca_config.get("eegrpcinittimeout")
121 self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
122 else:
123 self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME
124 self.log.debug("Applied default retry time: {}".format(self._initial_retry_time))
125
126 if self.vca_config and self.vca_config.get("eegrpctimeout"):
127 self._max_retry_time = self.vca_config.get("eegrpctimeout")
128 self.log.debug("Retry time: {}".format(self._max_retry_time))
129 else:
130 self._max_retry_time = self._MAX_RETRY_TIME
131 self.log.debug("Applied default retry time: {}".format(self._max_retry_time))
132
133 # initialize helm connector for helmv2 and helmv3
134 self._k8sclusterhelm2 = K8sHelmConnector(
135 kubectl_command=self.vca_config.get("kubectlpath"),
136 helm_command=self.vca_config.get("helmpath"),
137 fs=self.fs,
138 db=self.db,
139 log=self.log,
140 on_update_db=None,
141 )
142
143 self._k8sclusterhelm3 = K8sHelm3Connector(
144 kubectl_command=self.vca_config.get("kubectlpath"),
145 helm_command=self.vca_config.get("helm3path"),
146 fs=self.fs,
147 log=self.log,
148 db=self.db,
149 on_update_db=None,
150 )
151
152 self._system_cluster_id = None
153 self.log.info("Helm N2VC connector initialized")
154
155 # TODO - ¿reuse_ee_id?
156 async def create_execution_environment(self,
157 namespace: str,
158 db_dict: dict,
159 reuse_ee_id: str = None,
160 progress_timeout: float = None,
161 total_timeout: float = None,
162 config: dict = None,
163 artifact_path: str = None,
164 vca_type: str = None,
165 *kargs, **kwargs) -> (str, dict):
166 """
167 Creates a new helm execution environment deploying the helm-chat indicated in the
168 attifact_path
169 :param str namespace: This param is not used, all helm charts are deployed in the osm
170 system namespace
171 :param dict db_dict: where to write to database when the status changes.
172 It contains a dictionary with {collection: str, filter: {}, path: str},
173 e.g. {collection: "nsrs", filter: {_id: <nsd-id>, path:
174 "_admin.deployed.VCA.3"}
175 :param str reuse_ee_id: ee id from an older execution. TODO - right now this params is not used
176 :param float progress_timeout:
177 :param float total_timeout:
178 :param dict config: General variables to instantiate KDU
179 :param str artifact_path: path of package content
180 :param str vca_type: Type of vca, must be type helm or helm-v3
181 :returns str, dict: id of the new execution environment including namespace.helm_id
182 and credentials object set to None as all credentials should be osm kubernetes .kubeconfig
183 """
184
185 self.log.info(
186 "create_execution_environment: namespace: {}, artifact_path: {}, db_dict: {}, "
187 "reuse_ee_id: {}".format(
188 namespace, artifact_path, db_dict, reuse_ee_id)
189 )
190
191 # Validate artifact-path is provided
192 if artifact_path is None or len(artifact_path) == 0:
193 raise N2VCBadArgumentsException(
194 message="artifact_path is mandatory", bad_args=["artifact_path"]
195 )
196
197 # Validate artifact-path exists and sync path
198 from_path = os.path.split(artifact_path)[0]
199 self.fs.sync(from_path)
200
201 # remove / in charm path
202 while artifact_path.find("//") >= 0:
203 artifact_path = artifact_path.replace("//", "/")
204
205 # check charm path
206 if self.fs.file_exists(artifact_path):
207 helm_chart_path = artifact_path
208 else:
209 msg = "artifact path does not exist: {}".format(artifact_path)
210 raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
211
212 if artifact_path.startswith("/"):
213 full_path = self.fs.path + helm_chart_path
214 else:
215 full_path = self.fs.path + "/" + helm_chart_path
216
217 while full_path.find("//") >= 0:
218 full_path = full_path.replace("//", "/")
219
220 try:
221 # Call helm conn install
222 # Obtain system cluster id from database
223 system_cluster_uuid = await self._get_system_cluster_id()
224 # Add parameter osm if exist to global
225 if config and config.get("osm"):
226 if not config.get("global"):
227 config["global"] = {}
228 config["global"]["osm"] = config.get("osm")
229
230 self.log.debug("install helm chart: {}".format(full_path))
231 if vca_type == "helm":
232 helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
233 db_dict=db_dict,
234 kdu_model=full_path,
235 )
236 await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path,
237 kdu_instance=helm_id,
238 namespace=self._KUBECTL_OSM_NAMESPACE,
239 params=config,
240 db_dict=db_dict,
241 timeout=progress_timeout)
242 else:
243 helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
244 db_dict=db_dict,
245 kdu_model=full_path,
246 )
247 await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path,
248 kdu_instance=helm_id,
249 namespace=self._KUBECTL_OSM_NAMESPACE,
250 params=config,
251 db_dict=db_dict,
252 timeout=progress_timeout)
253
254 ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id)
255 return ee_id, None
256 except N2VCException:
257 raise
258 except Exception as e:
259 self.log.error("Error deploying chart ee: {}".format(e), exc_info=True)
260 raise N2VCException("Error deploying chart ee: {}".format(e))
261
262 async def register_execution_environment(self, namespace: str, credentials: dict, db_dict: dict,
263 progress_timeout: float = None, total_timeout: float = None,
264 *kargs, **kwargs) -> str:
265 # nothing to do
266 pass
267
268 async def install_configuration_sw(self, *args, **kwargs):
269 # nothing to do
270 pass
271
272 async def add_relation(self, *args, **kwargs):
273 # nothing to do
274 pass
275
276 async def remove_relation(self):
277 # nothing to to
278 pass
279
280 async def get_status(self, *args, **kwargs):
281 # not used for this connector
282 pass
283
284 async def get_ee_ssh_public__key(
285 self,
286 ee_id: str,
287 db_dict: dict,
288 progress_timeout: float = None,
289 total_timeout: float = None,
290 **kwargs,
291 ) -> str:
292 """
293 Obtains ssh-public key from ee executing GetSShKey method from the ee.
294
295 :param str ee_id: the id of the execution environment returned by
296 create_execution_environment or register_execution_environment
297 :param dict db_dict:
298 :param float progress_timeout:
299 :param float total_timeout:
300 :returns: public key of the execution environment
301 """
302
303 self.log.info(
304 "get_ee_ssh_public_key: ee_id: {}, db_dict: {}".format(
305 ee_id, db_dict)
306 )
307
308 # check arguments
309 if ee_id is None or len(ee_id) == 0:
310 raise N2VCBadArgumentsException(
311 message="ee_id is mandatory", bad_args=["ee_id"]
312 )
313
314 try:
315 # Obtain ip_addr for the ee service, it is resolved by dns from the ee name by kubernetes
316 version, namespace, helm_id = self._get_ee_id_parts(ee_id)
317 ip_addr = socket.gethostbyname(helm_id)
318
319 # Obtain ssh_key from the ee, this method will implement retries to allow the ee
320 # install libraries and start successfully
321 ssh_key = await self._get_ssh_key(ip_addr)
322 return ssh_key
323 except Exception as e:
324 self.log.error("Error obtaining ee ssh_key: {}".format(e), exc_info=True)
325 raise N2VCException("Error obtaining ee ssh_ke: {}".format(e))
326
327 async def exec_primitive(
328 self,
329 ee_id: str,
330 primitive_name: str,
331 params_dict: dict,
332 db_dict: dict = None,
333 progress_timeout: float = None,
334 total_timeout: float = None,
335 **kwargs,
336 ) -> str:
337 """
338 Execute a primitive in the execution environment
339
340 :param str ee_id: the one returned by create_execution_environment or
341 register_execution_environment with the format namespace.helm_id
342 :param str primitive_name: must be one defined in the software. There is one
343 called 'config', where, for the proxy case, the 'credentials' of VM are
344 provided
345 :param dict params_dict: parameters of the action
346 :param dict db_dict: where to write into database when the status changes.
347 It contains a dict with
348 {collection: <str>, filter: {}, path: <str>},
349 e.g. {collection: "nslcmops", filter:
350 {_id: <nslcmop_id>, path: "_admin.VCA"}
351 It will be used to store information about intermediate notifications
352 :param float progress_timeout:
353 :param float total_timeout:
354 :returns str: primitive result, if ok. It raises exceptions in case of fail
355 """
356
357 self.log.info("exec primitive for ee_id : {}, primitive_name: {}, params_dict: {}, db_dict: {}".format(
358 ee_id, primitive_name, params_dict, db_dict
359 ))
360
361 # check arguments
362 if ee_id is None or len(ee_id) == 0:
363 raise N2VCBadArgumentsException(
364 message="ee_id is mandatory", bad_args=["ee_id"]
365 )
366 if primitive_name is None or len(primitive_name) == 0:
367 raise N2VCBadArgumentsException(
368 message="action_name is mandatory", bad_args=["action_name"]
369 )
370 if params_dict is None:
371 params_dict = dict()
372
373 try:
374 version, namespace, helm_id = self._get_ee_id_parts(ee_id)
375 ip_addr = socket.gethostbyname(helm_id)
376 except Exception as e:
377 self.log.error("Error getting ee ip ee: {}".format(e))
378 raise N2VCException("Error getting ee ip ee: {}".format(e))
379
380 if primitive_name == "config":
381 try:
382 # Execute config primitive, higher timeout to check the case ee is starting
383 status, detailed_message = await self._execute_config_primitive(ip_addr, params_dict, db_dict=db_dict)
384 self.log.debug("Executed config primitive ee_id_ {}, status: {}, message: {}".format(
385 ee_id, status, detailed_message))
386 if status != "OK":
387 self.log.error("Error configuring helm ee, status: {}, message: {}".format(
388 status, detailed_message))
389 raise N2VCExecutionException(
390 message="Error configuring helm ee_id: {}, status: {}, message: {}: ".format(
391 ee_id, status, detailed_message
392 ),
393 primitive_name=primitive_name,
394 )
395 except Exception as e:
396 self.log.error("Error configuring helm ee: {}".format(e))
397 raise N2VCExecutionException(
398 message="Error configuring helm ee_id: {}, {}".format(
399 ee_id, e
400 ),
401 primitive_name=primitive_name,
402 )
403 return "CONFIG OK"
404 else:
405 try:
406 # Execute primitive
407 status, detailed_message = await self._execute_primitive(ip_addr, primitive_name,
408 params_dict, db_dict=db_dict)
409 self.log.debug("Executed primitive {} ee_id_ {}, status: {}, message: {}".format(
410 primitive_name, ee_id, status, detailed_message))
411 if status != "OK" and status != "PROCESSING":
412 self.log.error(
413 "Execute primitive {} returned not ok status: {}, message: {}".format(
414 primitive_name, status, detailed_message)
415 )
416 raise N2VCExecutionException(
417 message="Execute primitive {} returned not ok status: {}, message: {}".format(
418 primitive_name, status, detailed_message
419 ),
420 primitive_name=primitive_name,
421 )
422 except Exception as e:
423 self.log.error(
424 "Error executing primitive {}: {}".format(primitive_name, e)
425 )
426 raise N2VCExecutionException(
427 message="Error executing primitive {} into ee={} : {}".format(
428 primitive_name, ee_id, e
429 ),
430 primitive_name=primitive_name,
431 )
432 return detailed_message
433
434 async def deregister_execution_environments(self):
435 # nothing to be done
436 pass
437
438 async def delete_execution_environment(
439 self,
440 ee_id: str,
441 db_dict: dict = None,
442 total_timeout: float = None,
443 **kwargs,
444 ):
445 """
446 Delete an execution environment
447 :param str ee_id: id of the execution environment to delete, included namespace.helm_id
448 :param dict db_dict: where to write into database when the status changes.
449 It contains a dict with
450 {collection: <str>, filter: {}, path: <str>},
451 e.g. {collection: "nsrs", filter:
452 {_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
453 :param float total_timeout:
454 """
455
456 self.log.info("ee_id: {}".format(ee_id))
457
458 # check arguments
459 if ee_id is None:
460 raise N2VCBadArgumentsException(
461 message="ee_id is mandatory", bad_args=["ee_id"]
462 )
463
464 try:
465
466 # Obtain cluster_uuid
467 system_cluster_uuid = await self._get_system_cluster_id()
468
469 # Get helm_id
470 version, namespace, helm_id = self._get_ee_id_parts(ee_id)
471
472 # Uninstall chart, for backward compatibility we must assume that if there is no
473 # version it is helm-v2
474 if version == "helm-v3":
475 await self._k8sclusterhelm3.uninstall(system_cluster_uuid, helm_id)
476 else:
477 await self._k8sclusterhelm2.uninstall(system_cluster_uuid, helm_id)
478 self.log.info("ee_id: {} deleted".format(ee_id))
479 except N2VCException:
480 raise
481 except Exception as e:
482 self.log.error("Error deleting ee id: {}: {}".format(ee_id, e), exc_info=True)
483 raise N2VCException("Error deleting ee id {}: {}".format(ee_id, e))
484
485 async def delete_namespace(self, namespace: str, db_dict: dict = None, total_timeout: float = None):
486 # method not implemented for this connector, execution environments must be deleted individually
487 pass
488
489 async def install_k8s_proxy_charm(
490 self,
491 charm_name: str,
492 namespace: str,
493 artifact_path: str,
494 db_dict: dict,
495 progress_timeout: float = None,
496 total_timeout: float = None,
497 config: dict = None,
498 *kargs, **kwargs
499 ) -> str:
500 pass
501
502 @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
503 async def _get_ssh_key(self, ip_addr):
504 channel = Channel(ip_addr, self._ee_service_port)
505 try:
506 stub = FrontendExecutorStub(channel)
507 self.log.debug("get ssh key, ip_addr: {}".format(ip_addr))
508 reply: SshKeyReply = await stub.GetSshKey(SshKeyRequest())
509 return reply.message
510 finally:
511 channel.close()
512
513 @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
514 async def _execute_config_primitive(self, ip_addr, params, db_dict=None):
515 return await self._execute_primitive_internal(ip_addr, "config", params, db_dict=db_dict)
516
517 @retryer(max_wait_time_var="_max_retry_time", delay_time_var="_retry_delay")
518 async def _execute_primitive(self, ip_addr, primitive_name, params, db_dict=None):
519 return await self._execute_primitive_internal(ip_addr, primitive_name, params, db_dict=db_dict)
520
521 async def _execute_primitive_internal(self, ip_addr, primitive_name, params, db_dict=None):
522
523 channel = Channel(ip_addr, self._ee_service_port)
524 try:
525 stub = FrontendExecutorStub(channel)
526 async with stub.RunPrimitive.open() as stream:
527 primitive_id = str(uuid.uuid1())
528 result = None
529 self.log.debug("Execute primitive internal: id:{}, name:{}, params: {}".
530 format(primitive_id, primitive_name, params))
531 await stream.send_message(
532 PrimitiveRequest(id=primitive_id, name=primitive_name, params=yaml.dump(params)), end=True)
533 async for reply in stream:
534 self.log.debug("Received reply: {}".format(reply))
535 result = reply
536 # If db_dict provided write notifs in database
537 if db_dict:
538 self._write_op_detailed_status(db_dict, reply.status, reply.detailed_message)
539 if result:
540 return reply.status, reply.detailed_message
541 else:
542 return "ERROR", "No result received"
543 finally:
544 channel.close()
545
546 def _write_op_detailed_status(self, db_dict, status, detailed_message):
547
548 # write ee_id to database: _admin.deployed.VCA.x
549 try:
550 the_table = db_dict["collection"]
551 the_filter = db_dict["filter"]
552 update_dict = {"detailed-status": "{}: {}".format(status, detailed_message)}
553 # self.log.debug('Writing ee_id to database: {}'.format(the_path))
554 self.db.set_one(
555 table=the_table,
556 q_filter=the_filter,
557 update_dict=update_dict,
558 fail_on_empty=True,
559 )
560 except asyncio.CancelledError:
561 raise
562 except Exception as e:
563 self.log.error("Error writing detailedStatus to database: {}".format(e))
564
565 async def _get_system_cluster_id(self):
566 if not self._system_cluster_id:
567 db_k8cluster = self.db.get_one("k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME})
568 k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id"))
569 if not k8s_hc_id:
570 try:
571 # backward compatibility for existing clusters that have not been initialized for helm v3
572 cluster_id = db_k8cluster.get("_id")
573 k8s_credentials = yaml.safe_dump(db_k8cluster.get("credentials"))
574 k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env(k8s_credentials,
575 reuse_cluster_uuid=cluster_id)
576 db_k8scluster_update = {"_admin.helm-chart-v3.error_msg": None,
577 "_admin.helm-chart-v3.id": k8s_hc_id,
578 "_admin.helm-chart-v3}.created": uninstall_sw,
579 "_admin.helm-chart-v3.operationalState": "ENABLED"}
580 self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
581 except Exception as e:
582 self.log.error("error initializing helm-v3 cluster: {}".format(str(e)))
583 raise N2VCException("K8s system cluster '{}' has not been initialized for helm-chart-v3".format(
584 cluster_id))
585 self._system_cluster_id = k8s_hc_id
586 return self._system_cluster_id
587
588 def _get_ee_id_parts(self, ee_id):
589 """
590 Parses ee_id stored at database that can be either 'version:namespace.helm_id' or only
591 namespace.helm_id for backward compatibility
592 If exists helm version can be helm-v3 or helm (helm-v2 old version)
593 """
594 version, _, part_id = ee_id.rpartition(':')
595 namespace, _, helm_id = part_id.rpartition('.')
596 return version, namespace, helm_id