Feature 11032: Integration of OSM Helm Chart with different databases
[osm/devops.git] / installers / helm / osm / values.yaml
1 #######################################################################################
2 # Copyright ETSI Contributors and Others.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #    http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 # implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #######################################################################################
17 # Default values for osm.
18 # This is a YAML-formatted file.
19 # Declare variables to be passed into your templates.
20
21 global:
22   replicaCount: 1
23   logLevel: INFO
24   # hostname to be used for the ingress objects
25   hostname: ~
26   image:
27     repositoryBase: docker.io/opensourcemano
28     pullPolicy: IfNotPresent
29     # Overrides the image tag whose default is the chart appVersion.
30     tag: "testing-daily"
31
32   imagePullSecrets: []
33   nameOverride: ""
34   fullnameOverride: ""
35
36   serviceAccount:
37     # Specifies whether a service account should be created
38     create: true
39     # Annotations to add to the service account
40     annotations: {}
41     # The name of the service account to use.
42     # If not set and create is true, a name is generated using the fullname template
43     name: ""
44
45   db:
46     mysql:
47       mysqlSecretName: mysql
48       mysqlSecretKey: mysql-root-password
49       mysqlService: mysql
50     mongo:
51       mongoService: mongodb-k8s
52       auth:
53         enabled: false
54         # secretName: mongodb-k8s
55         # secretKeyRootPassword: mongodb-root-password
56
57   podAnnotations: {}
58
59   podSecurityContext:
60     # runAsUser: 1000
61     # runAsGroup: 1000
62     fsGroup: 1000
63
64   securityContext:
65     runAsUser: 1000
66
67   nodeSelector: {}
68
69   tolerations: []
70
71   affinity: {}
72
73   behindHttpProxy: false
74   httpProxy: {}
75     # HTTP_PROXY: <HTTP_PROXY>
76     # HTTPS_PROXY: <HTTPS_PROXY>
77     # NO_PROXY: <NO_PROXY>
78
79   oldServiceAssurance: false
80
81 airflow:
82   enabled: true
83
84 alertmanager:
85   enabled: true
86   ingress: {}
87   #   host: alertmanager.<IP_ADDRESS>.nip.io
88
89 certauth:
90   enabled: true
91
92 grafana:
93   enabled: true
94   fullnameOverride: "grafana"
95   # service:
96   #   type: NodePort
97   #   nodePort: 3000
98   #   port: 3000
99   ingress:
100     enabled: true
101     ingressClassName: nginx
102     # hosts:
103     #   - grafana.<IP_ADDRESS>.nip.io
104   extraVolumes:
105     - emptyDir: {}
106       name: sc-dashboard-volume-k8s
107     - emptyDir: {}
108       name: sc-dashboard-volume-osm
109   extraVolumeMounts:
110     - name: sc-dashboard-volume-k8s
111       mountPath: "/tmp/dashboards/Kubernetes Cluster"
112     - name: sc-dashboard-volume-osm
113       mountPath: "/tmp/dashboards/Open Source MANO"
114   rbac:
115     extraClusterRoleRules:
116     - apiGroups:
117       - ""
118       resources:
119       - configmaps
120       - secrets
121       verbs:
122       - get
123       - watch
124       - list
125   datasources:
126     datasource.yaml:
127       apiVersion: 1
128       datasources:
129       - name: osm_prometheus
130         type: prometheus
131         url: http://prometheus:9090
132         access: proxy
133         allowUiUpdates: true
134         isDefault: true
135       - name: Prometheus
136         type: prometheus
137         url: http://osm-monitoring-kube-promet-prometheus.monitoring:9090
138         access: proxy
139         allowUiUpdates: true
140         isDefault: false
141   dashboardProviders:
142     provider.yaml:
143      apiVersion: 1
144      providers:
145      - name: 'Kubernetes Cluster'
146        orgId: 1
147        folder: 'Kubernetes Cluster'
148        type: file
149        disableDeletion: false
150        options:
151          path: '/tmp/dashboards/Kubernetes Cluster'
152      - name: 'Open Source MANO'
153        orgId: 1
154        folder: 'Open Source MANO'
155        type: file
156        disableDeletion: false
157        options:
158          path: '/tmp/dashboards/Open Source MANO'
159   sidecar:
160     dashboards:
161       enabled: true
162       folder: "/tmp/dashboards/"
163       resource: both
164       searchNamespace: osm
165       extraMounts:
166         - name: sc-dashboard-volume-k8s
167           mountPath: "/tmp/dashboards/Kubernetes Cluster"
168         - name: sc-dashboard-volume-osm
169           mountPath: "/tmp/dashboards/Open Source MANO"
170   extraInitContainers:
171     - env:
172       - name: METHOD
173         value: LIST
174       - name: LABEL
175         value: grafana_datasource
176       - name: FOLDER
177         value: /etc/grafana/provisioning/datasources
178       - name: RESOURCE
179         value: both
180       image: kiwigrid/k8s-sidecar:1.15.6
181       imagePullPolicy: IfNotPresent
182       name: grafana-sc-datasources
183       resources: {}
184       terminationMessagePath: /dev/termination-log
185       terminationMessagePolicy: File
186       volumeMounts:
187       - mountPath: /etc/grafana/provisioning/datasources
188         name: config
189
190 kafka:
191   enabled: true
192   listeners:
193     client:
194       protocol: "PLAINTEXT"
195   fullnameOverride: "kafka"
196   # replicaCount: 1
197
198 keystone:
199   enabled: true
200   service:
201     port: 5000
202   image: {}
203     # repository: opensourcemano/keystone
204     # tag: "testing-daily"
205   # replicaCount: 1
206   useOsmSecret: true
207   # secretName: "keystone-secret"
208
209 lcm:
210   enabled: true
211   logLevel: DEBUG
212   image: {}
213     # repository: opensourcemano/lcm
214     # tag: "testing-daily"
215   # replicaCount: 1
216   useOsmSecret: true
217   # secretName: "lcm-secret"
218   mainPostRenderer:
219     scriptName: mainPostRenderer
220     path: /app/N2VC/n2vc/post-renderer-scripts/mainPostRenderer/
221   podLabelsPostRenderer:
222     scriptName: podLabels
223     path: /app/N2VC/n2vc/post-renderer-scripts/podLabels/
224   config:
225     OSMLCM_VCA_CLOUD: "lxd-cloud"
226     OSMLCM_VCA_K8S_CLOUD: "k8scloud"
227     # OSMLCM_VCA_APIPROXY: "<VCA API proxy>"
228     # OSMLCM_VCA_ENABLEOSUPGRADE: true
229     # OSMLCM_VCA_APTMIRROR: "http://archive.ubuntu.com/ubuntu/"
230
231 mon:
232   enabled: true
233   # logLevel: DEBUG
234   image: {}
235     # repository: opensourcemano/mon
236     # tag: "testing-daily"
237   # replicaCount: 1
238   useOsmSecret: true
239   # secretName: "mon-secret"
240   config: {}
241
242 mysql:
243   enabled: true
244   image:
245     tag: "8.1-debian-11"
246   fullnameOverride: "mysql"
247
248 nbi:
249   enabled: true
250   ingress: {}
251   #   host: nbi.<IP_ADDRESS>.nip.io
252   logLevel: DEBUG
253   service: {}
254   #   type: NodePort
255   #   port: 9999
256   #   nodePort: 9999
257   image: {}
258     # repository: opensourcemano/nbi
259     # tag: "testing-daily"
260   # replicaCount: 1
261   useOsmSecret: true
262   # secretName: "nbi-secret"
263
264 ngui:
265   enabled: true
266   service: {}
267   #   type: NodePort
268   #   port: 80
269   #   nodePort: 80
270   ingress: {}
271   #   host: <IP_ADDRESS>.nip.io
272   image: {}
273     # repository: opensourcemano/ng-ui
274     # tag: "testing-daily"
275   # replicaCount: 1
276
277 # pla module is disabled by default unless global.oldServiceAssurance and pla.enabled are set to true
278 pla:
279   enabled: false
280   # logLevel: DEBUG
281   image: {}
282     # repository: opensourcemano/pla
283     # tag: "testing-daily"
284   # replicaCount: 1
285
286 # pol module is disabled by default unless global.oldServiceAssurance and pol.enabled are set to true
287 pol:
288   enabled: true
289   # logLevel: DEBUG
290   image: {}
291     # repository: opensourcemano/pol
292     # tag: "testing-daily"
293   # replicaCount: 1
294   useOsmSecret: true
295   # secretName: "pol-secret"
296
297 prometheus:
298   enabled: true
299   configmapReload:
300     prometheus:
301       enabled: true
302   prometheus-node-exporter:
303     enabled: false
304   kube-state-metrics:
305     enabled: false
306   server:
307     extraFlags:
308       - web.enable-lifecycle
309     statefulSet:
310       enabled: true
311     configPath: /etc/prometheus/prometheus.yml
312     fullnameOverride: "prometheus"
313     service:
314       servicePort: 9090
315     #   type: NodePort
316     #   nodePort: 9091
317     ingress:
318       enabled: true
319       ingressClassName: nginx
320       # hosts:
321       #   - prometheus.<IP_ADDRESS>.nip.io
322     sidecarContainers:
323       prometheus-config-sidecar:
324         securityContext:
325           # readOnlyRootFilesystem: true
326           allowPrivilegeEscalation: false
327           # runAsNonRoot: true
328         image: opensourcemano/prometheus:testing-daily
329         imagePullPolicy: IfNotPresent
330         command: ["/bin/sh", "-c"]
331         args: ["sleep 50; python -u ./app.py"]
332         volumeMounts:
333           - name: prom-config
334             mountPath: /etc/prometheus
335           - name: prom-config-base
336             mountPath: /etc/prometheus_base
337         resources:
338           limits:
339             memory: 1024Mi
340           requests:
341             memory: 128Mi
342         envFrom:
343           - configMapRef:
344               name: osm-prometheus-sidecar-configmap
345     extraInitContainers:
346       - name: prometheus-init-config
347         image: busybox
348         command: ["/bin/sh", "-c"]
349           #args: [' sleep 100000 ']
350         args: ['if [ ! -f "/etc/prometheus/prometheus.yml" ]; then cp /config/prometheus.yml /etc/prometheus; fi; cp /config/osm_metric_rules.yml /config/osm_alert_rules.yml /etc/prometheus']
351         volumeMounts:
352           - name: prom-config
353             mountPath: /etc/prometheus
354           - name: prom-config-base
355             mountPath: /config
356     extraVolumeMounts:
357       - name: prom-config
358         mountPath: /etc/prometheus
359     extraVolumes:
360       - name: prom-config
361         emptyDir: {}
362       - name: prom-config-base
363         projected:
364           sources:
365             - configMap:
366                 name: osm-prom-configmap
367                 items:
368                   - key: prometheus.yml
369                     path: prometheus.yml
370             - configMap:
371                 name: osm-prometheus-recordingrules-configmap
372                 items:
373                   - key: osm_metric_rules.yml
374                     path: osm_metric_rules.yml
375             - configMap:
376                 name: osm-prometheus-alertingrules-configmap
377                 items:
378                   - key: osm_alert_rules.yml
379                     path: osm_alert_rules.yml
380   extraManifests:
381     - |
382       apiVersion: v1
383       kind: ConfigMap
384       metadata:
385         name: osm-prometheus-alertingrules-configmap
386       data:
387         osm_alert_rules.yml: |
388           groups:
389             - name: osm_alert_rules
390               rules:
391               - alert: vdu_down
392                 expr: vm_status_extended != 1
393                 for: 3m
394                 annotations:
395                   summary: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} is down"
396                   description: "VDU {{ "{{" }} $labels.vm_id }} in VIM {{ "{{" }} $labels.vim_id }} has been down for more than 3 minutes. NS instance id is {{ "{{" }} $labels.ns_id }}"
397     - |
398       apiVersion: v1
399       kind: ConfigMap
400       metadata:
401         name: osm-prometheus-recordingrules-configmap
402       data:
403         osm_metric_rules.yml: |
404           groups:
405             - name: osm_metric_rules
406               rules:
407               - record: vm_status_extended
408                 expr: (last_over_time(vm_status[1m]) * on (vm_id, vim_id) group_left(ns_id, vnf_id, vdu_id, project_id, job, vdu_name, vnf_member_index) last_over_time(ns_topology[1m])) or (last_over_time(ns_topology[1m]) * -1)
409                 labels:
410                   job: osm_prometheus
411               - record: vnf_status
412                 expr: (0 * (count (vm_status_extended==0) by (ns_id, vnf_id)>=0)) or (min by (ns_id, vnf_id) (vm_status_extended))
413                 labels:
414                   job: osm_prometheus
415               - record: ns_status
416                 expr: (0 * (count (vm_status_extended==0) by (ns_id)>=0)) or (min by (ns_id) (vm_status_extended))
417                 labels:
418                   job: osm_prometheus
419     - |
420       apiVersion: v1
421       kind: ConfigMap
422       metadata:
423         name: osm-prometheus-sidecar-configmap
424       data:
425         MONGODB_URL: "mongodb://mongodb-k8s:27017/?replicaSet=rs0"
426         PROMETHEUS_CONFIG_FILE: "/etc/prometheus/prometheus.yml"
427         PROMETHEUS_BASE_CONFIG_FILE: "/etc/prometheus_base/prometheus.yml"
428         TARGET_DATABASE: "osm"
429         PROMETHEUS_URL: "http://prometheus:9090"
430     - |
431       apiVersion: v1
432       kind: ConfigMap
433       metadata:
434         name: osm-prom-configmap
435       data:
436         prometheus.yml: |
437           global:
438             scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
439             evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
440             # scrape_timeout is set to the global default (10s).
441           # Alertmanager configuration
442           alerting:
443             alertmanagers:
444             - static_configs:
445               - targets:
446                 - alertmanager:9093
447           rule_files:
448             - "osm_metric_rules.yml"
449             - "osm_alert_rules.yml"
450           scrape_configs:
451             - job_name: 'mon_exporter'
452               static_configs:
453               - targets: ['mon:8000']
454             - job_name: pushgateway
455               honor_labels: true
456               scrape_interval: 30s
457               static_configs:
458               - targets:
459                 - pushgateway-prometheus-pushgateway:9091
460             - job_name: prometheus
461               static_configs:
462                 - targets:
463                   - localhost:9090
464             - job_name: node
465               static_configs:
466                 - targets: ['prometheus-node-exporter:9100']
467   alertmanager:
468     enabled: true
469     fullnameOverride: "alertmanager"
470     extraArgs:
471       log.level: debug
472     # service:
473     #   type: NodePort
474     #   nodePort: 9093
475     #   port: 9093
476     # ingress:
477     #   enabled: True
478     #   className: nginx
479     #   hosts:
480     #     - host: localhost
481     #       paths:
482     #         - path: /
483     #           pathType: ImplementationSpecific
484     #   tls: []
485     config:
486       receivers:
487         - name: default-receiver
488         - name: vdu-webhook
489           webhook_configs:
490            - url: http://webhook-translator:9998/vdu_down
491         - name: scaleout-webhook
492           webhook_configs:
493            - url: http://webhook-translator:9998/scaleout_vdu
494         - name: scalein-webhook
495           webhook_configs:
496            - url: http://webhook-translator:9998/scalein_vdu
497         - name: alarm-webhook
498           webhook_configs:
499            - url: http://webhook-translator:9998/vdu_alarm
500       route:
501         group_wait: 10s
502         group_interval: 2m
503         receiver: default-receiver
504         routes:
505         - receiver: vdu-webhook
506           repeat_interval: 15m
507           matchers:
508           - alertname = "vdu_down"
509         - receiver: 'scaleout-webhook'
510           repeat_interval: 5m
511           matchers:
512           - alertname =~ "^scaleout_.*"
513         - receiver: 'scalein-webhook'
514           repeat_interval: 5m
515           matchers:
516           - alertname =~ "^scalein_.*"
517         - receiver: 'alarm-webhook'
518           repeat_interval: 5m
519           matchers:
520           - alertname =~ "^vdu_alarm_.*"
521   prometheus-pushgateway:
522     fullnameOverride: "pushgateway-prometheus-pushgateway"
523
524 ro:
525   enabled: true
526   # logLevel: DEBUG
527   service:
528     port: 9090
529   image: {}
530     # repository: opensourcemano/ro
531     # tag: "testing-daily"
532   # replicaCount: 1
533   useOsmSecret: true
534   # secretName: "ro-secret"
535
536 vca:
537   enabled: false
538   # host: ""
539   # secret: ""
540   # cacert: ""
541   # pubkey: ""
542
543 webhookTranslator:
544   enabled: true
545   # replicaCount: 1
546   image: {}
547     # repository: opensourcemano/webhook
548     # tag: "testing-daily"
549   ingress: {}
550   #   host: webhook.<IP_ADDRESS>.nip.io
551   # replicaCount: 1
552   service: {}
553   #   type: NodePort
554   #   nodePort: 9998
555   #   port: 9998