fix 1242: increase retries while adding prometheus jobs
[osm/LCM.git] / osm_lcm / prometheus.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2020 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 from time import time
21 import logging
22 import aiohttp
23 import yaml
24 import os
25 from osm_lcm.lcm_utils import LcmException
26 from osm_common.dbbase import DbException
27 from jinja2 import Template, TemplateError, TemplateNotFound, TemplateSyntaxError
28
29 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
30
31 initial_prometheus_data = {
32 "_id": "prometheus",
33 "_admin": {
34 "locked_at": 0,
35 "locked_by": None,
36 "modified": 1593445184, # 2020-06-29
37 "created": 1593445184,
38 "version": "1.0" # to allow future version updates
39 },
40 'scrape_configs': { # Dictionary at database. Converted to list before sending to prometheus
41 'mon_exporter': {'static_configs': [{'targets': ['mon:8000']}], 'job_name': 'mon_exporter'},
42 },
43 'global': {'evaluation_interval': '15s', 'scrape_interval': '15s'},
44 'rule_files': None,
45 'alerting': {'alertmanagers': [{'static_configs': [{'targets': None}]}]}
46 }
47
48
49 class Prometheus:
50 """
51 Implements a class to update Prometheus
52 """
53
54 PROMETHEUS_LOCKED_TIME = 120
55
56 def __init__(self, config, worker_id, db, loop, logger=None):
57 self.worker_id = worker_id
58 self.db = db
59 self.loop = loop
60 self.logger = logger or logging.getLogger("lcm.prometheus")
61 self.server = config["uri"]
62 self.path = config["path"]
63 if not self.path.endswith("/"):
64 self.path += "/"
65 self.cfg_file = self.path + "prometheus.yml"
66 self.cfg_file_backup = self.path + "prometheus.yml-backup"
67
68 @staticmethod
69 def parse_job(job_data: str, variables: dict) -> dict:
70 try:
71 template = Template(job_data)
72 job_parsed = template.render(variables or {})
73 return yaml.safe_load(job_parsed)
74 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
75 # TODO yaml exceptions
76 raise LcmException("Error parsing Jinja2 to prometheus job. job_data={}, variables={}. Error={}".format(
77 job_data, variables, e))
78
79 async def start(self):
80 for retry in range(4):
81 try:
82 # self.logger("Starting prometheus ")
83 # read from database
84 prometheus_data = self.db.get_one("admin", {"_id": "prometheus"}, fail_on_empty=False)
85 if not prometheus_data:
86 self.logger.info("Init db.admin.prometheus content")
87 self.db.create("admin", initial_prometheus_data)
88 # send database config file to prometheus. Ignore loading errors, as prometheus may be starting
89 # but at least an initial configuration file is set
90 await self.update()
91 return
92 except DbException as e:
93 if retry == 3:
94 raise LcmException("Max retries trying to init prometheus configuration: {}".format(e))
95 await asyncio.sleep(5, loop=self.loop)
96
97 async def update(self, add_jobs: dict = None, remove_jobs: list = None) -> bool:
98 """
99
100 :param add_jobs: dictionary with {job_id_1: job_content, job_id_2: job_content}
101 :param remove_jobs: list with jobs to remove [job_id_1, job_id_2]
102 :return: result. If false prometheus denies this configuration. Exception on error
103 """
104 for retry in range(20):
105 result = True
106 if retry: # first time do not wait
107 await asyncio.sleep(4 + retry, loop=self.loop)
108
109 # lock database
110 now = time()
111 if not self.db.set_one(
112 "admin",
113 q_filter={"_id": "prometheus", "_admin.locked_at.lt": now - self.PROMETHEUS_LOCKED_TIME},
114 update_dict={"_admin.locked_at": now, "_admin.locked_by": self.worker_id},
115 fail_on_empty=False):
116 continue
117 # read database
118 prometheus_data = self.db.get_one("admin", {"_id": "prometheus"})
119 update_dict = {"_admin.locked_at": 0,
120 "_admin.locked_by": None}
121
122 # Make changes from prometheus_incremental
123 push_dict = pull_dict = None
124 if add_jobs or remove_jobs:
125 log_text_list = []
126 if add_jobs:
127 log_text_list.append("adding jobs: {}".format(list(add_jobs.keys())))
128 prometheus_data["scrape_configs"].update(add_jobs)
129 push_dict = {"scrape_configs." + job_id: job_data for job_id, job_data in add_jobs.items()}
130 elif remove_jobs:
131 log_text_list.append("removing jobs: {}".format(list(remove_jobs)))
132 for job_id in remove_jobs:
133 prometheus_data["scrape_configs"].pop(job_id, None)
134 pull_dict = {"scrape_configs." + job_id: None for job_id in remove_jobs}
135 self.logger.debug("Updating. " + ". ".join(log_text_list))
136
137 if not await self.send_data(prometheus_data):
138 self.logger.error("Cannot update add_jobs: {}. remove_jobs: {}".format(add_jobs, remove_jobs))
139 push_dict = pull_dict = None
140 result = False
141
142 # unblock database
143 if push_dict:
144 update_dict.update(push_dict)
145 if push_dict or pull_dict:
146 update_dict["_admin.modified_at"] = now
147 if not self.db.set_one(
148 "admin", {"_id": "prometheus", "_admin.locked_at": now, "_admin.locked_by": self.worker_id},
149 update_dict=update_dict, unset=pull_dict, fail_on_empty=False):
150 continue
151 return result
152 raise LcmException("Cannot update prometheus database. Reached max retries")
153
154 async def send_data(self, new_config):
155 restore_backup = False
156 del new_config["_id"]
157 del new_config["_admin"]
158 new_scrape_configs = []
159
160 # generate a list with the values of scrape_configs
161 for scrape_config in new_config["scrape_configs"].values():
162 scrape_config = scrape_config.copy()
163 # remove nsr_id metadata from scrape_configs
164 scrape_config.pop("nsr_id", None)
165 new_scrape_configs.append(scrape_config)
166 new_config["scrape_configs"] = new_scrape_configs
167
168 try:
169 if os.path.exists(self.cfg_file):
170 os.rename(self.cfg_file, self.cfg_file_backup)
171 restore_backup = True
172 with open(self.cfg_file, "w+") as f:
173 yaml.safe_dump(new_config, f, indent=4, default_flow_style=False)
174 # self.logger.debug("new configuration: {}".format(yaml.safe_dump(new_config, indent=4,
175 # default_flow_style=False)))
176 async with aiohttp.ClientSession() as session:
177 async with session.post(self.server + "-/reload") as resp:
178 if resp.status > 204:
179 raise LcmException(await resp.text())
180 await asyncio.sleep(5, loop=self.loop)
181 # If prometheus does not admit this configuration, remains with the old one
182 # Then, to check if the configuration has been accepted, get the configuration from prometheus
183 # and compares with the inserted one
184 async with session.get(self.server + "api/v1/status/config") as resp:
185 if resp.status > 204:
186 raise LcmException(await resp.text())
187 current_config = await resp.json()
188 if not self._check_configuration_equal(current_config, new_config):
189 return False
190 else:
191 restore_backup = False
192 return True
193 except Exception as e:
194 self.logger.error("Error updating configuration url={}: {}".format(self.server, e))
195 return False
196 finally:
197 if restore_backup:
198 try:
199 os.rename(self.cfg_file_backup, self.cfg_file)
200 except Exception as e:
201 self.logger.critical("Exception while rolling back: {}".format(e))
202
203 def _check_configuration_equal(self, current_config, expected_config):
204 try:
205 # self.logger.debug("Comparing current_config='{}' with expected_config='{}'".format(current_config,
206 # expected_config))
207 current_config_yaml = yaml.safe_load(current_config['data']['yaml'])
208 current_jobs = [j["job_name"] for j in current_config_yaml["scrape_configs"]]
209 expected_jobs = [j["job_name"] for j in expected_config["scrape_configs"]]
210 if current_jobs == expected_jobs:
211 return True
212 else:
213 self.logger.error("Not all jobs have been loaded. Target jobs: {} Loaded jobs: {}".format(
214 expected_jobs, current_jobs))
215 return False
216 except Exception as e:
217 self.logger.error("Invalid obtained status from server. Error: '{}'. Obtained data: '{}'".format(
218 e, current_config))
219 # if format is not understood, cannot be compared, assume it is ok
220 return True