threshold=alarm_descriptor["value"],
operation=alarm_descriptor["operation"],
action=str(alarm_action),
+ vnfr=vnfr,
+ vnfd=vnfd,
)
alarm = VnfAlarmRepository.create(
alarm_id=alarm_descriptor["alarm-id"],
"scale-in-relational-operation"
],
action="scale_in",
+ vnfr=vnfr,
+ vnfd=vnfd,
)
)
alarm = ScalingAlarmRepository.create(
"scale-out-relational-operation"
],
action="scale_out",
+ vnfr=vnfr,
+ vnfd=vnfd,
)
alarm = ScalingAlarmRepository.create(
alarm_uuid=alarm_uuid,
operation: str,
statistic: str = "AVERAGE",
action: str = "",
+ vnfr: object = None,
+ vnfd: object = None,
):
cor_id = random.randint(1, 10e7)
msg = self._build_create_alarm_payload(
statistic,
operation,
action,
+ vnfr,
+ vnfd,
)
log.debug("Sending create_alarm_request %s", msg)
producer = AIOKafkaProducer(
statistic: str,
operation: str,
action: str,
+ vnfr=None,
+ vnfd=None,
):
+ tags = {
+ "ns_id": ns_id,
+ "vdu_name": vdu_name,
+ "vnf_member_index": vnf_member_index,
+ }
+ if vnfr and vnfd:
+ # TODO: Change for multiple DF support
+ df = vnfd.get("df", [{}])[0]
+ metric_port = 9100
+ if "exporters-endpoints" in df:
+ metric_port = df["exporters-endpoints"].get("metric-port", 9100)
+ if metric_name.startswith("kpi_"):
+ metric_name = metric_name.replace("kpi_", "")
+ metric_name.strip()
+ for vdu in vnfr["vdur"]:
+ if vdu["name"] == vdu_name:
+ vdu_ip = vdu["ip-address"]
+ tags = {"instance": vdu_ip + ":" + str(metric_port)}
alarm_create_request = {
"correlation_id": cor_id,
"alarm_name": "osm_alarm_{}_{}_{}_{}".format(
"threshold_value": threshold,
"statistic": statistic,
"action": action,
- "tags": {
- "ns_id": ns_id,
- "vdu_name": vdu_name,
- "vnf_member_index": vnf_member_index,
- },
+ "tags": tags,
}
msg = {
"alarm_create_request": alarm_create_request,
vdu_name="cirros_ns-1-cirros_vnfd-VM-1",
vnf_member_index="1",
action="scale_out",
+ vnfr=vnfr_record_mocks[0],
+ vnfd=vnfd_record_mock,
)
create_alarm.assert_not_called_with(
metric_name="average_memory_utilization",
vdu_name="cirros_ns-1-cirros_vnfd-VM-1",
vnf_member_index="1",
action="scale_out",
+ vnfr=vnfr_record_mocks[0],
+ vnfd=vnfd_record_mock,
)
create_alarm.assert_any_call(
metric_name="average_memory_utilization",
vdu_name="cirros_ns-2-cirros_vnfd-VM-1",
vnf_member_index="2",
action="scale_out",
+ vnfr=vnfr_record_mocks[1],
+ vnfd=vnfd_record_mock,
)
create_alarm.assert_not_called_with(
metric_name="average_memory_utilization",
vdu_name="cirros_ns-2-cirros_vnfd-VM-1",
vnf_member_index="2",
action="scale_out",
+ vnfr=vnfr_record_mocks[1],
+ vnfd=vnfd_record_mock,
)
scaling_record = ScalingGroup.get()
self.assertEqual(scaling_record.name, "scale_cirros_vnfd-VM")
threshold=20.0,
operation="LT",
action="{'webhook': ['localhost:9090', 'localhost:9090', 'localhost:9090']}",
+ vnfr=vnfr_record_mocks[0],
+ vnfd=vnfd_record_mock,
)
create_alarm.assert_any_call(
metric_name="average_memory_utilization",
threshold=20.0,
operation="LT",
action="{'webhook': ['localhost:9090', 'localhost:9090', 'localhost:9090']}",
+ vnfr=vnfr_record_mocks[1],
+ vnfd=vnfd_record_mock,
)
@patch.object(DbMongo, "db_connect", Mock())