f8f82b52db8cad59aa092362fc46f1216831ec1a
[osm/MON.git] / policy_module / osm_policy_module / core / agent.py
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2018 Whitestack, LLC
4 # *************************************************************
5
6 # This file is part of OSM Monitoring module
7 # All Rights Reserved to Whitestack, LLC
8
9 # Licensed under the Apache License, Version 2.0 (the "License"); you may
10 # not use this file except in compliance with the License. You may obtain
11 # a copy of the License at
12
13 # http://www.apache.org/licenses/LICENSE-2.0
14
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18 # License for the specific language governing permissions and limitations
19 # under the License.
20
21 # For those usages not covered by the Apache License, Version 2.0 please
22 # contact: bdiaz@whitestack.com or glavado@whitestack.com
23 ##
24 import json
25 import logging
26 import yaml
27
28 from kafka import KafkaConsumer
29 from osm_policy_module.core.config import Config
30 from osm_policy_module.common.lcm_client import LcmClient
31
32 from osm_policy_module.common.alarm_config import AlarmConfig
33 from osm_policy_module.common.mon_client import MonClient
34 from osm_policy_module.core.database import ScalingRecord, ScalingAlarm
35
36 log = logging.getLogger(__name__)
37
38
39 class PolicyModuleAgent:
40 def run(self):
41 cfg = Config.instance()
42 # Initialize servers
43 kafka_server = '{}:{}'.format(cfg.get('policy_module', 'kafka_server_host'),
44 cfg.get('policy_module', 'kafka_server_port'))
45
46 # Initialize Kafka consumer
47 log.info("Connecting to Kafka server at %s", kafka_server)
48 # TODO: Add logic to handle deduplication of messages when using group_id.
49 # See: https://stackoverflow.com/a/29836412
50 consumer = KafkaConsumer(bootstrap_servers=kafka_server,
51 key_deserializer=bytes.decode,
52 value_deserializer=bytes.decode)
53 consumer.subscribe(['lcm_pm', 'alarm_response'])
54
55 for message in consumer:
56 log.info("Message arrived: %s", message)
57 try:
58 if message.key == 'configure_scaling':
59 try:
60 content = json.loads(message.value)
61 except:
62 content = yaml.safe_load(message.value)
63 log.info("Creating scaling record in DB")
64 # TODO: Use transactions: http://docs.peewee-orm.com/en/latest/peewee/transactions.html
65 scaling_record = ScalingRecord.create(
66 nsr_id=content['ns_id'],
67 name=content['scaling_group_descriptor']['name'],
68 content=json.dumps(content)
69 )
70 log.info("Created scaling record in DB : nsr_id=%s, name=%s, content=%s",
71 scaling_record.nsr_id,
72 scaling_record.name,
73 scaling_record.content)
74 alarm_configs = self._get_alarm_configs(content)
75 for config in alarm_configs:
76 mon_client = MonClient()
77 log.info("Creating alarm record in DB")
78 alarm_uuid = mon_client.create_alarm(
79 metric_name=config.metric_name,
80 resource_uuid=config.resource_uuid,
81 vim_uuid=config.vim_uuid,
82 threshold=config.threshold,
83 operation=config.operation,
84 statistic=config.statistic
85 )
86 ScalingAlarm.create(
87 alarm_id=alarm_uuid,
88 action=config.action,
89 scaling_record=scaling_record
90 )
91 if message.key == 'notify_alarm':
92 content = json.loads(message.value)
93 alarm_id = content['notify_details']['alarm_uuid']
94 alarm = ScalingAlarm.select().where(ScalingAlarm.alarm_id == alarm_id).get()
95 if alarm:
96 lcm_client = LcmClient()
97 log.info("Sending scaling action message for ns: %s", alarm_id)
98 lcm_client.scale(alarm.scaling_record.nsr_id, alarm.scaling_record.name, alarm.action)
99 except Exception:
100 log.exception("Error consuming message: ")
101
102 def _get_alarm_configs(self, message_content):
103 scaling_criterias = message_content['scaling_group_descriptor']['scaling_policy']['scaling_criteria']
104 alarm_configs = []
105 for criteria in scaling_criterias:
106 metric_name = ''
107 scale_out_threshold = criteria['scale_out_threshold']
108 scale_in_threshold = criteria['scale_in_threshold']
109 scale_out_operation = criteria['scale_out_relational_operation']
110 scale_in_operation = criteria['scale_in_relational_operation']
111 statistic = criteria['monitoring_param']['aggregation_type']
112 vim_uuid = ''
113 resource_uuid = ''
114 if 'vdu_monitoring_param' in criteria['monitoring_param']:
115 vim_uuid = criteria['monitoring_param']['vdu_monitoring_param']['vim_uuid']
116 resource_uuid = criteria['monitoring_param']['vdu_monitoring_param']['resource_id']
117 metric_name = criteria['monitoring_param']['vdu_monitoring_param']['name']
118 if 'vnf_metric' in criteria['monitoring_param']:
119 # TODO vnf_metric
120 continue
121 if 'vdu_metric' in criteria['monitoring_param']:
122 # TODO vdu_metric
123 continue
124 scale_out_alarm_config = AlarmConfig(metric_name,
125 resource_uuid,
126 vim_uuid,
127 scale_out_threshold,
128 scale_out_operation,
129 statistic,
130 'scale_out')
131 scale_in_alarm_config = AlarmConfig(metric_name,
132 resource_uuid,
133 vim_uuid,
134 scale_in_threshold,
135 scale_in_operation,
136 statistic,
137 'scale_in')
138 alarm_configs.append(scale_in_alarm_config)
139 alarm_configs.append(scale_out_alarm_config)
140 return alarm_configs