Adds support for vdu_name and vnf_member_index in configure_scaling
[osm/MON.git] / policy_module / osm_policy_module / core / agent.py
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2018 Whitestack, LLC
4 # *************************************************************
5
6 # This file is part of OSM Monitoring module
7 # All Rights Reserved to Whitestack, LLC
8
9 # Licensed under the Apache License, Version 2.0 (the "License"); you may
10 # not use this file except in compliance with the License. You may obtain
11 # a copy of the License at
12
13 # http://www.apache.org/licenses/LICENSE-2.0
14
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
17 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
18 # License for the specific language governing permissions and limitations
19 # under the License.
20
21 # For those usages not covered by the Apache License, Version 2.0 please
22 # contact: bdiaz@whitestack.com or glavado@whitestack.com
23 ##
24 import json
25 import logging
26 from typing import Dict, List
27
28 import peewee
29 import yaml
30
31 from kafka import KafkaConsumer
32 from osm_policy_module.core.config import Config
33 from osm_policy_module.common.lcm_client import LcmClient
34
35 from osm_policy_module.common.alarm_config import AlarmConfig
36 from osm_policy_module.common.mon_client import MonClient
37 from osm_policy_module.core.database import ScalingRecord, ScalingAlarm
38
39 log = logging.getLogger(__name__)
40
41
42 class PolicyModuleAgent:
43 def run(self):
44 cfg = Config.instance()
45 # Initialize servers
46 kafka_server = '{}:{}'.format(cfg.get('policy_module', 'kafka_server_host'),
47 cfg.get('policy_module', 'kafka_server_port'))
48
49 # Initialize Kafka consumer
50 log.info("Connecting to Kafka server at %s", kafka_server)
51 # TODO: Add logic to handle deduplication of messages when using group_id.
52 # See: https://stackoverflow.com/a/29836412
53 consumer = KafkaConsumer(bootstrap_servers=kafka_server,
54 key_deserializer=bytes.decode,
55 value_deserializer=bytes.decode)
56 consumer.subscribe(['lcm_pm', 'alarm_response'])
57
58 for message in consumer:
59 log.info("Message arrived: %s", message)
60 try:
61 if message.key == 'configure_scaling':
62 try:
63 content = json.loads(message.value)
64 except:
65 content = yaml.safe_load(message.value)
66 log.info("Creating scaling record in DB")
67 # TODO: Use transactions: http://docs.peewee-orm.com/en/latest/peewee/transactions.html
68 scaling_record = ScalingRecord.create(
69 nsr_id=content['ns_id'],
70 name=content['scaling_group_descriptor']['name'],
71 content=json.dumps(content)
72 )
73 log.info("Created scaling record in DB : nsr_id=%s, name=%s, content=%s",
74 scaling_record.nsr_id,
75 scaling_record.name,
76 scaling_record.content)
77 alarm_configs = self._get_alarm_configs(content)
78 for config in alarm_configs:
79 mon_client = MonClient()
80 log.info("Creating alarm record in DB")
81 alarm_uuid = mon_client.create_alarm(
82 metric_name=config.metric_name,
83 ns_id=scaling_record.nsr_id,
84 vdu_name=config.vdu_name,
85 vnf_member_index=config.vnf_member_index,
86 threshold=config.threshold,
87 operation=config.operation,
88 statistic=config.statistic
89 )
90 ScalingAlarm.create(
91 alarm_id=alarm_uuid,
92 action=config.action,
93 scaling_record=scaling_record
94 )
95 if message.key == 'notify_alarm':
96 content = json.loads(message.value)
97 alarm_id = content['notify_details']['alarm_uuid']
98 log.info("Received alarm notification for alarm %s", alarm_id)
99 try:
100 alarm = ScalingAlarm.select().where(ScalingAlarm.alarm_id == alarm_id).get()
101 lcm_client = LcmClient()
102 log.info("Sending scaling action message for ns: %s", alarm_id)
103 lcm_client.scale(alarm.scaling_record.nsr_id, alarm.scaling_record.name, alarm.action)
104 except ScalingAlarm.DoesNotExist:
105 log.info("There is no action configured for alarm %.", alarm_id)
106 except Exception:
107 log.exception("Error consuming message: ")
108
109 def _get_alarm_configs(self, message_content: Dict) -> List[AlarmConfig]:
110 scaling_criterias = message_content['scaling_group_descriptor']['scaling_policy']['scaling_criteria']
111 alarm_configs = []
112 for criteria in scaling_criterias:
113 metric_name = ''
114 scale_out_threshold = criteria['scale_out_threshold']
115 scale_in_threshold = criteria['scale_in_threshold']
116 scale_out_operation = criteria['scale_out_relational_operation']
117 scale_in_operation = criteria['scale_in_relational_operation']
118 statistic = criteria['monitoring_param']['aggregation_type']
119 vdu_name = ''
120 vnf_member_index = ''
121 if 'vdu_monitoring_param' in criteria['monitoring_param']:
122 vdu_name = criteria['monitoring_param']['vdu_monitoring_param']['vdu_name']
123 vnf_member_index = criteria['monitoring_param']['vdu_monitoring_param']['vnf_member_index']
124 metric_name = criteria['monitoring_param']['vdu_monitoring_param']['name']
125 if 'vnf_metric' in criteria['monitoring_param']:
126 # TODO vnf_metric
127 continue
128 if 'vdu_metric' in criteria['monitoring_param']:
129 # TODO vdu_metric
130 continue
131 scale_out_alarm_config = AlarmConfig(metric_name,
132 vdu_name,
133 vnf_member_index,
134 scale_out_threshold,
135 scale_out_operation,
136 statistic,
137 'scale_out')
138 scale_in_alarm_config = AlarmConfig(metric_name,
139 vdu_name,
140 vnf_member_index,
141 scale_in_threshold,
142 scale_in_operation,
143 statistic,
144 'scale_in')
145 alarm_configs.append(scale_in_alarm_config)
146 alarm_configs.append(scale_out_alarm_config)
147 return alarm_configs