Adds support for cooldown-time variable in scaling-policy
[osm/POL.git] / osm_policy_module / core / agent.py
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2018 Whitestack, LLC
4 # *************************************************************
5
6 # This file is part of OSM Monitoring module
7 # All Rights Reserved to Whitestack, LLC
8
9 # Licensed under the Apache License, Version 2.0 (the "License"); you may
10 # not use this file except in compliance with the License. You may obtain
11 # a copy of the License at
12
13 # http://www.apache.org/licenses/LICENSE-2.0
14
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18 # License for the specific language governing permissions and limitations
19 # under the License.
20
21 # For those usages not covered by the Apache License, Version 2.0 please
22 # contact: bdiaz@whitestack.com or glavado@whitestack.com
23 ##
24 import datetime
25 import json
26 import logging
27 import threading
28 from json import JSONDecodeError
29
30 import yaml
31 from kafka import KafkaConsumer
32
33 from osm_policy_module.common.db_client import DbClient
34 from osm_policy_module.common.lcm_client import LcmClient
35 from osm_policy_module.common.mon_client import MonClient
36 from osm_policy_module.core import database
37 from osm_policy_module.core.config import Config
38 from osm_policy_module.core.database import ScalingGroup, ScalingAlarm, ScalingPolicy, ScalingCriteria
39
40 log = logging.getLogger(__name__)
41
42 ALLOWED_KAFKA_KEYS = ['instantiated', 'scaled', 'notify_alarm']
43
44
45 class PolicyModuleAgent:
46 def __init__(self):
47 cfg = Config.instance()
48 self.db_client = DbClient()
49 self.mon_client = MonClient()
50 self.lcm_client = LcmClient()
51 self.kafka_server = '{}:{}'.format(cfg.OSMPOL_MESSAGE_HOST,
52 cfg.OSMPOL_MESSAGE_PORT)
53
54 def run(self):
55 consumer = KafkaConsumer(bootstrap_servers=self.kafka_server,
56 key_deserializer=bytes.decode,
57 value_deserializer=bytes.decode,
58 group_id='pol-consumer')
59 consumer.subscribe(["ns", "alarm_response"])
60
61 for message in consumer:
62 t = threading.Thread(target=self._process_msg, args=(message.topic, message.key, message.value,))
63 t.start()
64
65 def _process_msg(self, topic, key, msg):
66 try:
67 log.debug("Message arrived with topic: %s, key: %s, msg: %s", topic, key, msg)
68 if key in ALLOWED_KAFKA_KEYS:
69 try:
70 content = json.loads(msg)
71 except JSONDecodeError:
72 content = yaml.safe_load(msg)
73
74 if key == 'instantiated' or key == 'scaled':
75 self._handle_instantiated_or_scaled(content)
76
77 if key == 'notify_alarm':
78 self._handle_alarm_notification(content)
79 else:
80 log.debug("Key %s is not in ALLOWED_KAFKA_KEYS", key)
81 except Exception:
82 log.exception("Error consuming message: ")
83
84 def _handle_alarm_notification(self, content):
85 alarm_id = content['notify_details']['alarm_uuid']
86 metric_name = content['notify_details']['metric_name']
87 operation = content['notify_details']['operation']
88 threshold = content['notify_details']['threshold_value']
89 vdu_name = content['notify_details']['vdu_name']
90 vnf_member_index = content['notify_details']['vnf_member_index']
91 ns_id = content['notify_details']['ns_id']
92 log.info(
93 "Received alarm notification for alarm %s, \
94 metric %s, \
95 operation %s, \
96 threshold %s, \
97 vdu_name %s, \
98 vnf_member_index %s, \
99 ns_id %s ",
100 alarm_id, metric_name, operation, threshold, vdu_name, vnf_member_index, ns_id)
101 try:
102 alarm = ScalingAlarm.select().where(ScalingAlarm.alarm_id == alarm_id).get()
103 delta = datetime.datetime.now() - alarm.scaling_criteria.scaling_policy.last_scale
104 if delta.total_seconds() < alarm.scaling_criteria.scaling_policy.cooldown_time:
105 log.info("Time between last scale and now is less than cooldown time. Skipping.")
106 return
107 log.info("Sending scaling action message for ns: %s", alarm_id)
108 self.lcm_client.scale(alarm.scaling_criteria.scaling_policy.scaling_group.nsr_id,
109 alarm.scaling_criteria.scaling_policy.scaling_group.name,
110 alarm.vnf_member_index,
111 alarm.action)
112 alarm.scaling_criteria.scaling_policy.last_scale = datetime.datetime.now()
113 alarm.scaling_criteria.scaling_policy.save()
114 except ScalingAlarm.DoesNotExist:
115 log.info("There is no action configured for alarm %s.", alarm_id)
116
117 def _handle_instantiated_or_scaled(self, content):
118 nslcmop_id = content['nslcmop_id']
119 nslcmop = self.db_client.get_nslcmop(nslcmop_id)
120 if nslcmop['operationState'] == 'COMPLETED' or nslcmop['operationState'] == 'PARTIALLY_COMPLETED':
121 nsr_id = nslcmop['nsInstanceId']
122 log.info("Configuring scaling groups for network service with nsr_id: %s", nsr_id)
123 self._configure_scaling_groups(nsr_id)
124 else:
125 log.info(
126 "Network service is not in COMPLETED or PARTIALLY_COMPLETED state. "
127 "Current state is %s. Skipping...",
128 nslcmop['operationState'])
129
130 def _configure_scaling_groups(self, nsr_id: str):
131 # TODO(diazb): Check for alarm creation on exception and clean resources if needed.
132 # TODO: Add support for non-nfvi metrics
133 with database.db.atomic():
134 vnfrs = self.db_client.get_vnfrs(nsr_id)
135 log.info("Checking %s vnfrs...", len(vnfrs))
136 for vnfr in vnfrs:
137 vnfd = self.db_client.get_vnfd(vnfr['vnfd-id'])
138 log.info("Looking for vnfd %s", vnfr['vnfd-id'])
139 scaling_groups = vnfd['scaling-group-descriptor']
140 vnf_monitoring_params = vnfd['monitoring-param']
141 for scaling_group in scaling_groups:
142 try:
143 scaling_group_record = ScalingGroup.select().where(
144 ScalingGroup.nsr_id == nsr_id,
145 ScalingGroup.name == scaling_group['name']
146 ).get()
147 except ScalingGroup.DoesNotExist:
148 log.info("Creating scaling group record in DB...")
149 scaling_group_record = ScalingGroup.create(
150 nsr_id=nsr_id,
151 name=scaling_group['name'],
152 content=json.dumps(scaling_group)
153 )
154 log.info("Created scaling group record in DB : nsr_id=%s, name=%s, content=%s",
155 scaling_group_record.nsr_id,
156 scaling_group_record.name,
157 scaling_group_record.content)
158 for scaling_policy in scaling_group['scaling-policy']:
159 if scaling_policy['scaling-type'] != 'automatic':
160 continue
161 try:
162 scaling_policy_record = ScalingPolicy.select().join(ScalingGroup).where(
163 ScalingPolicy.name == scaling_policy['name'],
164 ScalingGroup.id == scaling_group_record.id
165 ).get()
166 except ScalingPolicy.DoesNotExist:
167 log.info("Creating scaling policy record in DB...")
168 scaling_policy_record = ScalingPolicy.create(
169 nsr_id=nsr_id,
170 name=scaling_policy['name'],
171 cooldown_time=scaling_policy['cooldown-time'],
172 scaling_group=scaling_group_record
173 )
174 log.info("Created scaling policy record in DB : name=%s, scaling_group.name=%s",
175 scaling_policy_record.name,
176 scaling_policy_record.scaling_group.name)
177
178 for scaling_criteria in scaling_policy['scaling-criteria']:
179 try:
180 scaling_criteria_record = ScalingCriteria.select().join(ScalingPolicy).where(
181 ScalingPolicy.id == scaling_policy_record.id,
182 ScalingCriteria.name == scaling_criteria['name']
183 ).get()
184 except ScalingCriteria.DoesNotExist:
185 log.info("Creating scaling criteria record in DB...")
186 scaling_criteria_record = ScalingCriteria.create(
187 nsr_id=nsr_id,
188 name=scaling_criteria['name'],
189 scaling_policy=scaling_policy_record
190 )
191 log.info(
192 "Created scaling criteria record in DB : name=%s, scaling_policy.name=%s",
193 scaling_criteria_record.name,
194 scaling_criteria_record.scaling_policy.name)
195
196 for vdu_ref in scaling_group['vdu']:
197 vnf_monitoring_param = next(
198 filter(lambda param: param['id'] == scaling_criteria['vnf-monitoring-param-ref'],
199 vnf_monitoring_params))
200 if not vdu_ref['vdu-id-ref'] == vnf_monitoring_param['vdu-ref']:
201 continue
202 vdu = next(
203 filter(lambda vdu: vdu['id'] == vdu_ref['vdu-id-ref'], vnfd['vdu'])
204 )
205 vdu_monitoring_params = vdu['monitoring-param']
206 vdu_monitoring_param = next(
207 filter(
208 lambda param: param['id'] == vnf_monitoring_param['vdu-monitoring-param-ref'],
209 vdu_monitoring_params))
210 vdurs = list(filter(lambda vdur: vdur['vdu-id-ref'] == vnf_monitoring_param['vdu-ref'],
211 vnfr['vdur']))
212 for vdur in vdurs:
213 try:
214 ScalingAlarm.select().join(ScalingCriteria).where(
215 ScalingAlarm.vdu_name == vdur['name'],
216 ScalingCriteria.name == scaling_criteria['name']
217 ).get()
218 log.debug("VDU %s already has an alarm configured", vdur['name'])
219 continue
220 except ScalingAlarm.DoesNotExist:
221 pass
222 alarm_uuid = self.mon_client.create_alarm(
223 metric_name=vdu_monitoring_param['nfvi-metric'],
224 ns_id=nsr_id,
225 vdu_name=vdur['name'],
226 vnf_member_index=vnfr['member-vnf-index-ref'],
227 threshold=scaling_criteria['scale-in-threshold'],
228 operation=scaling_criteria['scale-in-relational-operation'],
229 statistic=vnf_monitoring_param['aggregation-type']
230 )
231 ScalingAlarm.create(
232 alarm_id=alarm_uuid,
233 action='scale_in',
234 vnf_member_index=int(vnfr['member-vnf-index-ref']),
235 vdu_name=vdur['name'],
236 scaling_criteria=scaling_criteria_record
237 )
238 alarm_uuid = self.mon_client.create_alarm(
239 metric_name=vdu_monitoring_param['nfvi-metric'],
240 ns_id=nsr_id,
241 vdu_name=vdur['name'],
242 vnf_member_index=vnfr['member-vnf-index-ref'],
243 threshold=scaling_criteria['scale-out-threshold'],
244 operation=scaling_criteria['scale-out-relational-operation'],
245 statistic=vnf_monitoring_param['aggregation-type']
246 )
247 ScalingAlarm.create(
248 alarm_id=alarm_uuid,
249 action='scale_out',
250 vnf_member_index=int(vnfr['member-vnf-index-ref']),
251 vdu_name=vdur['name'],
252 scaling_criteria=scaling_criteria_record
253 )