Skip to content
Snippets Groups Projects
Commit 599d5e22 authored by romeromonser's avatar romeromonser
Browse files

Added Grafana Chart to K8s based OSM. Base for being merged with k8s_monitor


Change-Id: I286edb8a424af2f5cc78684fcb11ed841f47900d
Signed-off-by: default avatarromeromonser <garomero@indra.es>
parent 40e73bfe
No related branches found
No related tags found
No related merge requests found
# Copyright 2020 Minsait - Indra S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Jose Manuel Palacios (jmpalacios@minsait.com)
# Author: Alberto Limon (alimonj@minsait.com)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: grafana
name: grafana-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- get
- watch
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: grafana
name: grafana-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: grafana-clusterrole
subjects:
- kind: ServiceAccount
name: grafana
namespace: osm
---
apiVersion: v1
data:
admin-password: YWRtaW4=
admin-user: YWRtaW4=
kind: Secret
metadata:
labels:
app: grafana
name: grafana
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: grafana
name: grafana
---
apiVersion: v1
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: 'Kubernetes Cluster'
orgId: 1
folder: 'Kubernetes Cluster'
type: file
disableDeletion: false
options:
path: '/tmp/dashboards/Kubernetes Cluster'
- name: 'Open Source MANO'
orgId: 1
folder: 'Open Source MANO'
type: file
disableDeletion: false
options:
path: '/tmp/dashboards/Open Source MANO'
kind: ConfigMap
metadata:
labels:
app: grafana
name: grafana-dashboard-provider
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
grafana_datasource: "1"
name: grafana-datasource
data:
datasource.yaml: |-
apiVersion: 1
datasources:
- name: osm_prometheus
type: prometheus
url: http://prometheus:9090
access: proxy
allowUiUpdates: true
isDefault: true
- name: Prometheus
type: prometheus
url: http://osm-monitoring-prometheus-prometheus.monitoring:9090
access: proxy
allowUiUpdates: true
isDefault: false
---
apiVersion: v1
data:
grafana.ini: |
[log]
mode = console
[paths]
data = /var/lib/grafana/data
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning
kind: ConfigMap
metadata:
labels:
app: grafana
name: grafana
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: grafana
name: grafana
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- env:
- name: LABEL
value: grafana_dashboard
- name: FOLDER
value: "/tmp/dashboards/Kubernetes Cluster"
- name: RESOURCE
value: both
- name: NAMESPACE
value: monitoring
image: kiwigrid/k8s-sidecar:0.1.20
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: "/tmp/dashboards/Kubernetes Cluster"
name: sc-dashboard-volume-k8s
- mountPath: "/tmp/dashboards/Open Source MANO"
name: sc-dashboard-volume-osm
- env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: grafana
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: grafana
image: grafana/grafana:6.3.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
- containerPort: 80
name: service
protocol: TCP
- containerPort: 3000
name: grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
- mountPath: /var/lib/grafana
name: storage
- mountPath: "/tmp/dashboards/Kubernetes Cluster"
name: sc-dashboard-volume-k8s
- mountPath: "/tmp/dashboards/Open Source MANO"
name: sc-dashboard-volume-osm
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
dnsPolicy: ClusterFirst
initContainers:
- env:
- name: METHOD
value: LIST
- name: LABEL
value: grafana_datasource
- name: FOLDER
value: /etc/grafana/provisioning/datasources
- name: RESOURCE
value: both
image: kiwigrid/k8s-sidecar:0.1.20
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsUser: 472
serviceAccount: grafana
serviceAccountName: grafana
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: grafana
name: config
- emptyDir: {}
name: storage
- emptyDir: {}
name: sc-dashboard-volume-k8s
- emptyDir: {}
name: sc-dashboard-volume-osm
- configMap:
defaultMode: 420
name: grafana-dashboard-provider
name: sc-dashboard-provider
- emptyDir: {}
name: sc-datasources-volume
---
apiVersion: v1
kind: Service
metadata:
labels:
app: grafana
name: grafana
spec:
ports:
- name: service
nodePort: 3000
port: 3000
protocol: TCP
targetPort: 3000
selector:
app: grafana
sessionAffinity: None
type: NodePort
......@@ -37,7 +37,7 @@ function usage(){
echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
echo -e " --pla: install the PLA module for placement support"
echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
echo -e " -D <devops path> use local devops installation path"
echo -e " -w <work dir> Location to store runtime installation"
echo -e " -t <docker tag> specify osm docker tag (default is latest)"
......@@ -54,7 +54,7 @@ function usage(){
echo -e " --source: install OSM from source code using the latest stable tag"
echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
# echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
# echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
echo -e " --showopts: print chosen options and exit (only for debugging)"
......@@ -167,6 +167,17 @@ function remove_k8s_namespace() {
kubectl delete ns $1
}
#removes helm only if there is nothing deployed in helm
function remove_helm() {
if [ "$(helm ls -q)" == "" ] ; then
sudo helm reset --force
kubectl delete --namespace kube-system serviceaccount tiller
kubectl delete clusterrolebinding tiller-cluster-rule
sudo rm /usr/local/bin/helm
rm -rf $HOME/.helm
fi
}
#Uninstall osmclient
function uninstall_osmclient() {
sudo apt-get remove --purge -y python-osmclient
......@@ -761,6 +772,39 @@ function deploy_osm_pla_service() {
kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
}
#Install helm and tiller
function install_helm() {
helm > /dev/null 2>&1
if [ $? != 0 ] ; then
# Helm is not installed. Install helm
curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
tar -zxvf helm-v2.15.2.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/helm
rm -r linux-amd64
rm helm-v2.15.2.tar.gz
fi
# Checking if tiller has being configured
kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
if [ $? == 1 ] ; then
# tiller account for kubernetes
kubectl --namespace kube-system create serviceaccount tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
# HELM initialization
helm init --service-account tiller
# Wait for Tiller to be up and running. If timeout expires, continue installing
tiller_timeout=120; counter=0
while (( counter < tiller_timeout ))
do
tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
num=$((counter + 2))
sleep 2
done
fi
}
function parse_yaml() {
osm_services="nbi lcm ro pol mon light-ui keystone"
TAG=$1
......@@ -1506,3 +1550,4 @@ wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/
track end
echo -e "\nDONE"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment