Bug 1609 fix
[osm/LCM.git] / osm_lcm / prometheus.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2020 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 from time import time
21 import logging
22 import aiohttp
23 import yaml
24 import os
25 from osm_lcm.lcm_utils import LcmException
26 from osm_common.dbbase import DbException
27 from osm_lcm.data_utils.database.database import Database
28 from jinja2 import Template, TemplateError, TemplateNotFound, TemplateSyntaxError
29
30 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
31
32 initial_prometheus_data = {
33 "_id": "prometheus",
34 "_admin": {
35 "locked_at": 0,
36 "locked_by": None,
37 "modified": 1593445184, # 2020-06-29
38 "created": 1593445184,
39 "version": "1.0", # to allow future version updates
40 },
41 "scrape_configs": { # Dictionary at database. Converted to list before sending to prometheus
42 "mon_exporter": {
43 "static_configs": [{"targets": ["mon:8000"]}],
44 "job_name": "mon_exporter",
45 },
46 },
47 "global": {"evaluation_interval": "15s", "scrape_interval": "15s"},
48 "rule_files": None,
49 "alerting": {"alertmanagers": [{"static_configs": [{"targets": None}]}]},
50 }
51
52
53 class Prometheus:
54 """
55 Implements a class to update Prometheus
56 """
57
58 PROMETHEUS_LOCKED_TIME = 120
59
60 def __init__(self, config, worker_id, loop, logger=None):
61 self.worker_id = worker_id
62 self.db = Database().instance.db
63 self.loop = loop
64 self.logger = logger or logging.getLogger("lcm.prometheus")
65 self.server = config["uri"]
66 self.path = config["path"]
67 if not self.path.endswith("/"):
68 self.path += "/"
69 self.cfg_file = self.path + "prometheus.yml"
70 self.cfg_file_backup = self.path + "prometheus.yml-backup"
71
72 @staticmethod
73 def parse_job(job_data: str, variables: dict) -> dict:
74 try:
75 template = Template(job_data)
76 job_parsed = template.render(variables or {})
77 return yaml.safe_load(job_parsed)
78 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
79 # TODO yaml exceptions
80 raise LcmException(
81 "Error parsing Jinja2 to prometheus job. job_data={}, variables={}. Error={}".format(
82 job_data, variables, e
83 )
84 )
85
86 async def start(self):
87 for retry in range(4):
88 try:
89 # self.logger("Starting prometheus ")
90 # read from database
91 prometheus_data = self.db.get_one(
92 "admin", {"_id": "prometheus"}, fail_on_empty=False
93 )
94 if not prometheus_data:
95 self.logger.info("Init db.admin.prometheus content")
96 self.db.create("admin", initial_prometheus_data)
97 # send database config file to prometheus. Ignore loading errors, as prometheus may be starting
98 # but at least an initial configuration file is set
99 await self.update()
100 return
101 except DbException as e:
102 if retry == 3:
103 raise LcmException(
104 "Max retries trying to init prometheus configuration: {}".format(
105 e
106 )
107 )
108 await asyncio.sleep(5, loop=self.loop)
109
110 async def update(self, add_jobs: dict = None, remove_jobs: list = None) -> bool:
111 """
112
113 :param add_jobs: dictionary with {job_id_1: job_content, job_id_2: job_content}
114 :param remove_jobs: list with jobs to remove [job_id_1, job_id_2]
115 :return: result. If false prometheus denies this configuration. Exception on error
116 """
117 for retry in range(20):
118 result = True
119 if retry: # first time do not wait
120 await asyncio.sleep(4 + retry, loop=self.loop)
121
122 # lock database
123 now = time()
124 if not self.db.set_one(
125 "admin",
126 q_filter={
127 "_id": "prometheus",
128 "_admin.locked_at.lt": now - self.PROMETHEUS_LOCKED_TIME,
129 },
130 update_dict={
131 "_admin.locked_at": now,
132 "_admin.locked_by": self.worker_id,
133 },
134 fail_on_empty=False,
135 ):
136 continue
137 # read database
138 prometheus_data = self.db.get_one("admin", {"_id": "prometheus"})
139 update_dict = {"_admin.locked_at": 0, "_admin.locked_by": None}
140
141 # Make changes from prometheus_incremental
142 push_dict = pull_dict = None
143 if add_jobs or remove_jobs:
144 log_text_list = []
145 if add_jobs:
146 log_text_list.append(
147 "adding jobs: {}".format(list(add_jobs.keys()))
148 )
149 prometheus_data["scrape_configs"].update(add_jobs)
150 push_dict = {
151 "scrape_configs." + job_id: job_data
152 for job_id, job_data in add_jobs.items()
153 }
154 elif remove_jobs:
155 log_text_list.append("removing jobs: {}".format(list(remove_jobs)))
156 for job_id in remove_jobs:
157 prometheus_data["scrape_configs"].pop(job_id, None)
158 pull_dict = {
159 "scrape_configs." + job_id: None for job_id in remove_jobs
160 }
161 self.logger.debug("Updating. " + ". ".join(log_text_list))
162
163 if not await self.send_data(prometheus_data):
164 self.logger.error(
165 "Cannot update add_jobs: {}. remove_jobs: {}".format(
166 add_jobs, remove_jobs
167 )
168 )
169 push_dict = pull_dict = None
170 result = False
171
172 # unblock database
173 if push_dict:
174 update_dict.update(push_dict)
175 if push_dict or pull_dict:
176 update_dict["_admin.modified_at"] = now
177 if not self.db.set_one(
178 "admin",
179 {
180 "_id": "prometheus",
181 "_admin.locked_at": now,
182 "_admin.locked_by": self.worker_id,
183 },
184 update_dict=update_dict,
185 unset=pull_dict,
186 fail_on_empty=False,
187 ):
188 continue
189 return result
190 raise LcmException("Cannot update prometheus database. Reached max retries")
191
192 async def send_data(self, new_config):
193 restore_backup = False
194 del new_config["_id"]
195 del new_config["_admin"]
196 new_scrape_configs = []
197
198 # generate a list with the values of scrape_configs
199 for scrape_config in new_config["scrape_configs"].values():
200 scrape_config = scrape_config.copy()
201 # remove nsr_id metadata from scrape_configs
202 scrape_config.pop("nsr_id", None)
203 new_scrape_configs.append(scrape_config)
204 new_config["scrape_configs"] = new_scrape_configs
205
206 try:
207 if os.path.exists(self.cfg_file):
208 os.rename(self.cfg_file, self.cfg_file_backup)
209 restore_backup = True
210 with open(self.cfg_file, "w+") as f:
211 yaml.safe_dump(new_config, f, indent=4, default_flow_style=False)
212 # self.logger.debug("new configuration: {}".format(yaml.safe_dump(new_config, indent=4,
213 # default_flow_style=False)))
214 async with aiohttp.ClientSession() as session:
215 async with session.post(self.server + "-/reload") as resp:
216 if resp.status > 204:
217 raise LcmException(await resp.text())
218 await asyncio.sleep(5, loop=self.loop)
219 # If prometheus does not admit this configuration, remains with the old one
220 # Then, to check if the configuration has been accepted, get the configuration from prometheus
221 # and compares with the inserted one
222 async with session.get(self.server + "api/v1/status/config") as resp:
223 if resp.status > 204:
224 raise LcmException(await resp.text())
225 current_config = await resp.json()
226 if not self._check_configuration_equal(current_config, new_config):
227 return False
228 else:
229 restore_backup = False
230 return True
231 except Exception as e:
232 self.logger.error(
233 "Error updating configuration url={}: {}".format(self.server, e)
234 )
235 return False
236 finally:
237 if restore_backup:
238 try:
239 os.rename(self.cfg_file_backup, self.cfg_file)
240 except Exception as e:
241 self.logger.critical("Exception while rolling back: {}".format(e))
242
243 def _check_configuration_equal(self, current_config, expected_config):
244 try:
245 # self.logger.debug("Comparing current_config='{}' with expected_config='{}'".format(current_config,
246 # expected_config))
247 current_config_yaml = yaml.safe_load(current_config["data"]["yaml"])
248 current_jobs = [
249 j["job_name"] for j in current_config_yaml["scrape_configs"]
250 ]
251 expected_jobs = [j["job_name"] for j in expected_config["scrape_configs"]]
252 if current_jobs == expected_jobs:
253 return True
254 else:
255 self.logger.error(
256 "Not all jobs have been loaded. Target jobs: {} Loaded jobs: {}".format(
257 expected_jobs, current_jobs
258 )
259 )
260 return False
261 except Exception as e:
262 self.logger.error(
263 "Invalid obtained status from server. Error: '{}'. Obtained data: '{}'".format(
264 e, current_config
265 )
266 )
267 # if format is not understood, cannot be compared, assume it is ok
268 return True