Feature 11010: Use of upstream helm charts for Prometheus and Grafana in OSM installa...
[osm/devops.git] / installers / helm / osm / values.yaml
index 0cdfe6a..3d0e1dc 100644 (file)
@@ -73,6 +73,96 @@ certauth:
 
 grafana:
   enabled: true
+  fullnameOverride: "grafana"
+  service:
+    type: NodePort
+    nodePort: 3000
+    port: 3000
+  extraVolumes:
+    - emptyDir: {}
+      name: sc-dashboard-volume-k8s
+    - emptyDir: {}
+      name: sc-dashboard-volume-osm
+  extraVolumeMounts:
+    - name: sc-dashboard-volume-k8s
+      mountPath: "/tmp/dashboards/Kubernetes Cluster"
+    - name: sc-dashboard-volume-osm
+      mountPath: "/tmp/dashboards/Open Source MANO"
+  rbac:
+    extraClusterRoleRules:
+    - apiGroups:
+      - ""
+      resources:
+      - configmaps
+      - secrets
+      verbs:
+      - get
+      - watch
+      - list
+  datasources:
+    datasource.yaml:
+      apiVersion: 1
+      datasources:
+      - name: osm_prometheus
+        type: prometheus
+        url: http://prometheus:9090
+        access: proxy
+        allowUiUpdates: true
+        isDefault: true
+      - name: Prometheus
+        type: prometheus
+        url: http://osm-monitoring-kube-promet-prometheus.monitoring:9090
+        access: proxy
+        allowUiUpdates: true
+        isDefault: false
+  dashboardProviders:
+    provider.yaml:
+     apiVersion: 1
+     providers:
+     - name: 'Kubernetes Cluster'
+       orgId: 1
+       folder: 'Kubernetes Cluster'
+       type: file
+       disableDeletion: false
+       options:
+         path: '/tmp/dashboards/Kubernetes Cluster'
+     - name: 'Open Source MANO'
+       orgId: 1
+       folder: 'Open Source MANO'
+       type: file
+       disableDeletion: false
+       options:
+         path: '/tmp/dashboards/Open Source MANO'
+  sidecar:
+    dashboards:
+      enabled: true
+      folder: "/tmp/dashboards/"
+      resource: both
+      searchNamespace: osm
+      extraMounts: 
+        - name: sc-dashboard-volume-k8s
+          mountPath: "/tmp/dashboards/Kubernetes Cluster"
+        - name: sc-dashboard-volume-osm
+          mountPath: "/tmp/dashboards/Open Source MANO"
+  extraInitContainers:
+    - env:
+      - name: METHOD
+        value: LIST
+      - name: LABEL
+        value: grafana_datasource
+      - name: FOLDER
+        value: /etc/grafana/provisioning/datasources
+      - name: RESOURCE
+        value: both
+      image: kiwigrid/k8s-sidecar:1.15.6
+      imagePullPolicy: IfNotPresent
+      name: grafana-sc-datasources
+      resources: {}
+      terminationMessagePath: /dev/termination-log
+      terminationMessagePolicy: File
+      volumeMounts:
+      - mountPath: /etc/grafana/provisioning/datasources
+        name: config
 
 kafka:
   enabled: true
@@ -173,13 +263,215 @@ pol:
 
 prometheus:
   enabled: true
-  service:
-    type: NodePort
-    nodePort: 9091
-  # replicaCount: 1
-  sidecarImage: {}
-    # repository: opensourcemano/ro
-    # tag: "testing-daily"
+  configmapReload:
+    prometheus:
+      enabled: true
+  prometheus-node-exporter:
+    enabled: false
+  kube-state-metrics:
+    enabled: false
+  server:
+    extraFlags:
+      - web.enable-lifecycle
+    statefulSet:
+      enabled: true
+    configPath: /etc/prometheus/prometheus.yml
+    fullnameOverride: "prometheus"
+    service:
+      servicePort: 9090
+      type: NodePort
+      nodePort: 9091
+    sidecarContainers:
+      prometheus-config-sidecar:
+        securityContext:
+          # readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          # runAsNonRoot: true
+        image: opensourcemano/prometheus
+        imagePullPolicy: IfNotPresent
+        command: ["/bin/sh", "-c"]
+        args: ["sleep 50; python -u ./app.py"]
+        volumeMounts:
+          - name: prom-config
+            mountPath: /etc/prometheus
+          - name: prom-config-base
+            mountPath: /etc/prometheus_base
+        resources:
+          limits:
+            memory: 1024Mi
+          requests:
+            memory: 128Mi
+        envFrom:
+          - configMapRef:
+              name: osm-prometheus-sidecar-configmap
+    extraInitContainers:
+      - name: prometheus-init-config
+        image: busybox
+        command: ["/bin/sh", "-c"]
+          #args: [' sleep 100000 ']
+        args: ['if [ ! -f "/etc/prometheus/prometheus.yml" ]; then cp /config/prometheus.yml /etc/prometheus; fi; cp /config/osm_metric_rules.yml /config/osm_alert_rules.yml /etc/prometheus']
+        volumeMounts:
+          - name: prom-config
+            mountPath: /etc/prometheus
+          - name: prom-config-base
+            mountPath: /config
+    extraVolumeMounts:
+      - name: prom-config
+        mountPath: /etc/prometheus
+    extraVolumes:
+      - name: prom-config
+        emptyDir: {}
+      - name: prom-config-base
+        projected:
+          sources:
+            - configMap:
+                name: osm-prom-configmap
+                items:
+                  - key: prometheus.yml
+                    path: prometheus.yml
+            - configMap:
+                name: osm-prometheus-recordingrules-configmap
+                items:
+                  - key: osm_metric_rules.yml
+                    path: osm_metric_rules.yml
+            - configMap:
+                name: osm-prometheus-alertingrules-configmap
+                items:
+                  - key: osm_alert_rules.yml
+                    path: osm_alert_rules.yml
+  extraManifests:
+    - |
+      apiVersion: v1
+      kind: ConfigMap
+      metadata:
+        name: osm-prometheus-alertingrules-configmap
+      data:
+        osm_alert_rules.yml: |
+          groups:
+            - name: osm_alert_rules
+              rules:
+              - alert: vdu_down
+                expr: vm_status_extended != 1
+                for: 3m
+                annotations:
+                  summary: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} is down"
+                  description: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} has been down for more than 3 minutes. NS instance id is {{ "{{" }} $labels.ns_id }}"
+    - |
+      apiVersion: v1
+      kind: ConfigMap
+      metadata:
+        name: osm-prometheus-recordingrules-configmap
+      data:
+        osm_metric_rules.yml: |
+          groups:
+            - name: osm_metric_rules
+              rules:
+              - record: vm_status_extended
+                expr: (last_over_time(vm_status[1m]) * on (vm_id, vim_id) group_left(ns_id, vnf_id, vdu_id, project_id, job, vdu_name, vnf_member_index) last_over_time(ns_topology[1m])) or (last_over_time(ns_topology[1m]) * -1)
+                labels:
+                  job: osm_prometheus
+              - record: vnf_status
+                expr: (0 * (count (vm_status_extended==0) by (ns_id, vnf_id)>=0)) or (min by (ns_id, vnf_id) (vm_status_extended))
+                labels:
+                  job: osm_prometheus
+              - record: ns_status
+                expr: (0 * (count (vm_status_extended==0) by (ns_id)>=0)) or (min by (ns_id) (vm_status_extended))
+                labels:
+                  job: osm_prometheus
+    - |
+      apiVersion: v1
+      kind: ConfigMap
+      metadata:
+        name: osm-prometheus-sidecar-configmap
+      data:
+        MONGODB_URL: "mongodb://mongodb-k8s:27017/?replicaSet=rs0"
+        PROMETHEUS_CONFIG_FILE: "/etc/prometheus/prometheus.yml"
+        PROMETHEUS_BASE_CONFIG_FILE: "/etc/prometheus_base/prometheus.yml"
+        TARGET_DATABASE: "osm"
+        PROMETHEUS_URL: "http://prometheus:9090"
+    - |
+      apiVersion: v1
+      kind: ConfigMap
+      metadata:
+        name: osm-prom-configmap
+      data:
+        prometheus.yml: |
+          global:
+            scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+            evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+            # scrape_timeout is set to the global default (10s).
+          # Alertmanager configuration
+          alerting:
+            alertmanagers:
+            - static_configs:
+              - targets:
+                - alertmanager:9093
+          rule_files:
+            - "osm_metric_rules.yml"
+            - "osm_alert_rules.yml"
+          scrape_configs:
+            - job_name: 'mon_exporter'
+              static_configs:
+              - targets: ['mon:8000']
+            - job_name: pushgateway
+              honor_labels: true
+              scrape_interval: 30s
+              static_configs:
+              - targets:
+                - pushgateway-prometheus-pushgateway:9091
+            - job_name: prometheus
+              static_configs:
+                - targets:
+                  - localhost:9090
+            - job_name: node
+              static_configs:
+                - targets: ['prometheus-node-exporter:9100']
+  alertmanager:
+    fullnameOverride: "alertmanager"
+    extraArgs:
+      log.level: debug
+    service:
+      type: NodePort
+      nodePort: 9093
+      port: 9093
+    config:
+      receivers:
+        - name: default-receiver
+        - name: vdu-webhook
+          webhook_configs:
+           - url: http://webhook-translator:9998/vdu_down
+        - name: scaleout-webhook
+          webhook_configs:
+           - url: http://webhook-translator:9998/scaleout_vdu
+        - name: scalein-webhook
+          webhook_configs:
+           - url: http://webhook-translator:9998/scalein_vdu
+        - name: alarm-webhook
+          webhook_configs:
+           - url: http://webhook-translator:9998/vdu_alarm
+      route:
+        group_wait: 10s
+        group_interval: 2m
+        receiver: default-receiver
+        routes:
+        - receiver: vdu-webhook
+          repeat_interval: 15m
+          matchers:
+          - alertname = "vdu_down"
+        - receiver: 'scaleout-webhook'
+          repeat_interval: 5m
+          matchers:
+          - alertname =~ "^scaleout_.*"
+        - receiver: 'scalein-webhook'
+          repeat_interval: 5m
+          matchers:
+          - alertname =~ "^scalein_.*"
+        - receiver: 'alarm-webhook'
+          repeat_interval: 5m
+          matchers:
+          - alertname =~ "^vdu_alarm_.*"
+  prometheus-pushgateway:
+    fullnameOverride: "pushgateway-prometheus-pushgateway"
 
 ro:
   enabled: true