Revert "Feature 11010: Use of upstream helm charts for Prometheus and Grafana in...
[osm/devops.git] / installers / helm / osm / values.yaml
index 3d0e1dc..0391a4f 100644 (file)
@@ -21,7 +21,8 @@
 global:
   replicaCount: 1
   logLevel: INFO
-
+  # hostname to be used for the ingress objects
+  hostname: ~
   image:
     repositoryBase: docker.io/opensourcemano
     pullPolicy: IfNotPresent
@@ -68,16 +69,26 @@ global:
 airflow:
   enabled: true
 
+alertmanager:
+  enabled: true
+  ingress: {}
+  #   host: alertmanager.<IP_ADDRESS>.nip.io
+
 certauth:
   enabled: true
 
 grafana:
   enabled: true
   fullnameOverride: "grafana"
-  service:
-    type: NodePort
-    nodePort: 3000
-    port: 3000
+  # service:
+  #   type: NodePort
+  #   nodePort: 3000
+  #   port: 3000
+  ingress:
+    enabled: true
+    ingressClassName: nginx
+    # hosts:
+    #   - grafana.<IP_ADDRESS>.nip.io
   extraVolumes:
     - emptyDir: {}
       name: sc-dashboard-volume-k8s
@@ -139,7 +150,7 @@ grafana:
       folder: "/tmp/dashboards/"
       resource: both
       searchNamespace: osm
-      extraMounts: 
+      extraMounts:
         - name: sc-dashboard-volume-k8s
           mountPath: "/tmp/dashboards/Kubernetes Cluster"
         - name: sc-dashboard-volume-osm
@@ -218,11 +229,13 @@ mysql:
 
 nbi:
   enabled: true
+  ingress: {}
+  #   host: nbi.<IP_ADDRESS>.nip.io
   logLevel: DEBUG
-  service:
-    type: NodePort
-    port: 9999
-    nodePort: 9999
+  service: {}
+    type: NodePort
+    port: 9999
+    nodePort: 9999
   image: {}
     # repository: opensourcemano/nbi
     # tag: "testing-daily"
@@ -232,10 +245,12 @@ nbi:
 
 ngui:
   enabled: true
-  service:
-    type: NodePort
-    port: 80
-    nodePort: 80
+  service: {}
+  #   type: NodePort
+  #   port: 80
+  #   nodePort: 80
+  ingress: {}
+  #   host: <IP_ADDRESS>.nip.io
   image: {}
     # repository: opensourcemano/ng-ui
     # tag: "testing-daily"
@@ -263,216 +278,13 @@ pol:
 
 prometheus:
   enabled: true
-  configmapReload:
-    prometheus:
-      enabled: true
-  prometheus-node-exporter:
-    enabled: false
-  kube-state-metrics:
-    enabled: false
-  server:
-    extraFlags:
-      - web.enable-lifecycle
-    statefulSet:
-      enabled: true
-    configPath: /etc/prometheus/prometheus.yml
-    fullnameOverride: "prometheus"
-    service:
-      servicePort: 9090
-      type: NodePort
-      nodePort: 9091
-    sidecarContainers:
-      prometheus-config-sidecar:
-        securityContext:
-          # readOnlyRootFilesystem: true
-          allowPrivilegeEscalation: false
-          # runAsNonRoot: true
-        image: opensourcemano/prometheus
-        imagePullPolicy: IfNotPresent
-        command: ["/bin/sh", "-c"]
-        args: ["sleep 50; python -u ./app.py"]
-        volumeMounts:
-          - name: prom-config
-            mountPath: /etc/prometheus
-          - name: prom-config-base
-            mountPath: /etc/prometheus_base
-        resources:
-          limits:
-            memory: 1024Mi
-          requests:
-            memory: 128Mi
-        envFrom:
-          - configMapRef:
-              name: osm-prometheus-sidecar-configmap
-    extraInitContainers:
-      - name: prometheus-init-config
-        image: busybox
-        command: ["/bin/sh", "-c"]
-          #args: [' sleep 100000 ']
-        args: ['if [ ! -f "/etc/prometheus/prometheus.yml" ]; then cp /config/prometheus.yml /etc/prometheus; fi; cp /config/osm_metric_rules.yml /config/osm_alert_rules.yml /etc/prometheus']
-        volumeMounts:
-          - name: prom-config
-            mountPath: /etc/prometheus
-          - name: prom-config-base
-            mountPath: /config
-    extraVolumeMounts:
-      - name: prom-config
-        mountPath: /etc/prometheus
-    extraVolumes:
-      - name: prom-config
-        emptyDir: {}
-      - name: prom-config-base
-        projected:
-          sources:
-            - configMap:
-                name: osm-prom-configmap
-                items:
-                  - key: prometheus.yml
-                    path: prometheus.yml
-            - configMap:
-                name: osm-prometheus-recordingrules-configmap
-                items:
-                  - key: osm_metric_rules.yml
-                    path: osm_metric_rules.yml
-            - configMap:
-                name: osm-prometheus-alertingrules-configmap
-                items:
-                  - key: osm_alert_rules.yml
-                    path: osm_alert_rules.yml
-  extraManifests:
-    - |
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: osm-prometheus-alertingrules-configmap
-      data:
-        osm_alert_rules.yml: |
-          groups:
-            - name: osm_alert_rules
-              rules:
-              - alert: vdu_down
-                expr: vm_status_extended != 1
-                for: 3m
-                annotations:
-                  summary: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} is down"
-                  description: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} has been down for more than 3 minutes. NS instance id is {{ "{{" }} $labels.ns_id }}"
-    - |
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: osm-prometheus-recordingrules-configmap
-      data:
-        osm_metric_rules.yml: |
-          groups:
-            - name: osm_metric_rules
-              rules:
-              - record: vm_status_extended
-                expr: (last_over_time(vm_status[1m]) * on (vm_id, vim_id) group_left(ns_id, vnf_id, vdu_id, project_id, job, vdu_name, vnf_member_index) last_over_time(ns_topology[1m])) or (last_over_time(ns_topology[1m]) * -1)
-                labels:
-                  job: osm_prometheus
-              - record: vnf_status
-                expr: (0 * (count (vm_status_extended==0) by (ns_id, vnf_id)>=0)) or (min by (ns_id, vnf_id) (vm_status_extended))
-                labels:
-                  job: osm_prometheus
-              - record: ns_status
-                expr: (0 * (count (vm_status_extended==0) by (ns_id)>=0)) or (min by (ns_id) (vm_status_extended))
-                labels:
-                  job: osm_prometheus
-    - |
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: osm-prometheus-sidecar-configmap
-      data:
-        MONGODB_URL: "mongodb://mongodb-k8s:27017/?replicaSet=rs0"
-        PROMETHEUS_CONFIG_FILE: "/etc/prometheus/prometheus.yml"
-        PROMETHEUS_BASE_CONFIG_FILE: "/etc/prometheus_base/prometheus.yml"
-        TARGET_DATABASE: "osm"
-        PROMETHEUS_URL: "http://prometheus:9090"
-    - |
-      apiVersion: v1
-      kind: ConfigMap
-      metadata:
-        name: osm-prom-configmap
-      data:
-        prometheus.yml: |
-          global:
-            scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
-            evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
-            # scrape_timeout is set to the global default (10s).
-          # Alertmanager configuration
-          alerting:
-            alertmanagers:
-            - static_configs:
-              - targets:
-                - alertmanager:9093
-          rule_files:
-            - "osm_metric_rules.yml"
-            - "osm_alert_rules.yml"
-          scrape_configs:
-            - job_name: 'mon_exporter'
-              static_configs:
-              - targets: ['mon:8000']
-            - job_name: pushgateway
-              honor_labels: true
-              scrape_interval: 30s
-              static_configs:
-              - targets:
-                - pushgateway-prometheus-pushgateway:9091
-            - job_name: prometheus
-              static_configs:
-                - targets:
-                  - localhost:9090
-            - job_name: node
-              static_configs:
-                - targets: ['prometheus-node-exporter:9100']
-  alertmanager:
-    fullnameOverride: "alertmanager"
-    extraArgs:
-      log.level: debug
-    service:
-      type: NodePort
-      nodePort: 9093
-      port: 9093
-    config:
-      receivers:
-        - name: default-receiver
-        - name: vdu-webhook
-          webhook_configs:
-           - url: http://webhook-translator:9998/vdu_down
-        - name: scaleout-webhook
-          webhook_configs:
-           - url: http://webhook-translator:9998/scaleout_vdu
-        - name: scalein-webhook
-          webhook_configs:
-           - url: http://webhook-translator:9998/scalein_vdu
-        - name: alarm-webhook
-          webhook_configs:
-           - url: http://webhook-translator:9998/vdu_alarm
-      route:
-        group_wait: 10s
-        group_interval: 2m
-        receiver: default-receiver
-        routes:
-        - receiver: vdu-webhook
-          repeat_interval: 15m
-          matchers:
-          - alertname = "vdu_down"
-        - receiver: 'scaleout-webhook'
-          repeat_interval: 5m
-          matchers:
-          - alertname =~ "^scaleout_.*"
-        - receiver: 'scalein-webhook'
-          repeat_interval: 5m
-          matchers:
-          - alertname =~ "^scalein_.*"
-        - receiver: 'alarm-webhook'
-          repeat_interval: 5m
-          matchers:
-          - alertname =~ "^vdu_alarm_.*"
-  prometheus-pushgateway:
-    fullnameOverride: "pushgateway-prometheus-pushgateway"
-
+  service:
+    type: NodePort
+    nodePort: 9091
+  # replicaCount: 1
+  sidecarImage: {}
+    # repository: opensourcemano/ro
+    # tag: "testing-daily"
 ro:
   enabled: true
   # logLevel: DEBUG
@@ -494,11 +306,14 @@ vca:
 
 webhookTranslator:
   enabled: true
-  service:
-    type: NodePort
-    nodePort: 9998
-    port: 9998
+  # replicaCount: 1
   image: {}
     # repository: opensourcemano/webhook
     # tag: "testing-daily"
+  ingress: {}
+  #   host: webhook.<IP_ADDRESS>.nip.io
   # replicaCount: 1
+  service: {}
+  #   type: NodePort
+  #   nodePort: 9998
+  #   port: 9998