V_OPERATOR=""
V_MONGODB_EXPORTER=""
V_MYSQL_EXPORTER=""
+V_KAFKA_EXPORTER=""
V_OPERATOR=$PROMETHEUS_OPERATOR
V_MONGODB_EXPORTER=$PROMETHEUS_MONGODB_EXPORTER
V_MYSQL_EXPORTER=$PROMETHEUS_MYSQL_EXPORTER
+V_KAFKA_EXPORTER=$PROMETHEUS_KAFKA_EXPORTER
function usage(){
HELM=""
DEBUG=""
DUMP_VARS=""
-SERVICE_TYPE=""
+SERVICE_TYPE=""
while getopts ":h-:n:s:" o; do
case "${o}" in
h)
echo "V_OPERATOR=$V_OPERATOR"
echo "V_MONGODB_EXPORTER=$V_MONGODB_EXPORTER"
echo "V_MYSQL_EXPORTER=$V_MYSQL_EXPORTER"
+ echo "V_KAFKA_EXPORTER=$V_KAFKA_EXPORTER"
}
+# Check K8s version
+kubernetes_version=`kubectl version --short | grep Server | awk '{print $3}'`
+min_kubernetes_version="v1.16.0"
+if [[ "$kubernetes_version" < "$min_kubernetes_version" ]]
+then
+ echo "K8s monitoring could not be installed: Kube-prometheus-stack requires a Kubernetes 1.16+ (current version: $kubernetes_version)"
+ exit 1
+fi
+
if [ -n "$SERVICE_TYPE" ] ; then
if [ [ $SERVICE_TYPE != "ClusterIP" ] || [ $SERVICE_TYPE != "NodePort" ] || [ $SERVICE_TYPE != "LoadBalancer" ] ] ; then
echo "Wrong service type..."
dump_vars
fi
-# Check if helm is installed
-helm > /dev/null 2>&1
-if [ $? != 0 ] ; then
- echo "Helm is not installed, installing ....."
- curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
- tar -zxvf helm-v2.15.2.tar.gz
- sudo mv linux-amd64/helm /usr/local/bin/helm
- rm -r linux-amd64
- rm helm-v2.15.2.tar.gz
-fi
-
-echo "Checking if helm-tiller is installed..."
-kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
-if [ $? == 1 ] ; then
- # tiller account for kubernetes
- kubectl --namespace kube-system create serviceaccount tiller
- kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
- # HELM initialization
- helm init --service-account tiller
-
- # Wait for Tiller to be up and running
- while true
- do
- tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
- if [ ! -z "$tiller_status" ]
- then
- if [ $tiller_status == "1/1" ]
- then
- echo "Go...."
- break
- fi
- fi
- echo "Waiting for tiller READY...."
- sleep 2
- done
-fi
-
-# create monitoring namespace
+# Create monitoring namespace
echo "Creating namespace $NAMESPACE"
kubectl create namespace $NAMESPACE
-# Prometheus operator installation
+# Needed changes for Kube-Prometheus on Kubeadm installation
+# Kube-Controller-Manager
+sudo sed -e "s/- --bind-address=127.0.0.1/- --bind-address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-controller-manager.yaml
+# Kube-Scheduler
+sudo sed -e "s/- --bind-address=127.0.0.1/- --bind-address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-scheduler.yaml
+# Kube-Proxy
+kubectl -n kube-system get cm/kube-proxy -o yaml > $HERE/kube-proxy-cm.yaml
+sed -e "s/metricsBindAddress: \"\"/metricsBindAddress: 0.0.0.0:10249/" -i $HERE/kube-proxy-cm.yaml
+kubectl -n kube-system delete cm kube-proxy
+kubectl -n kube-system apply -f $HERE/kube-proxy-cm.yaml
+rm $HERE/kube-proxy-cm.yaml
+kubectl delete pod -l k8s-app=kube-proxy -n kube-system
+# Etcd
+sudo cp /etc/kubernetes/pki/etcd/healthcheck-client.key $HERE/healthcheck-client.key
+sudo chmod a+r $HERE/healthcheck-client.key
+kubectl -n $NAMESPACE create secret generic etcd-client-cert --from-file=/etc/kubernetes/pki/etcd/ca.crt --from-file=/etc/kubernetes/pki/etcd/healthcheck-client.crt --from-file=$HERE/healthcheck-client.key
+sudo awk '/--trusted-ca-file=\/etc\/kubernetes\/pki\/etcd\/ca.crt/ { print; print " - --metrics=extensive"; next }1' /etc/kubernetes/manifests/etcd.yaml > $HERE/tmp && sudo mv $HERE/tmp /etc/kubernetes/manifests/etcd.yaml
+sudo chown root:root /etc/kubernetes/manifests/etcd.yaml
+sudo chmod 600 /etc/kubernetes/manifests/etcd.yaml
+
+# Add Helm prometheus-community repo
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm repo update
+
+# kube-prometheus-stack installation (previously called prometheus-operator)
$HERE/change-charts-prometheus-operator.sh
-echo "Creating stable/prometheus-operator"
-helm install --namespace $NAMESPACE --version=$V_OPERATOR --name osm-monitoring --set kubelet.serviceMonitor.https=true,prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false $HERE/helm_charts/prometheus-operator
-
-# Change osm-monitoring-grafana-config-dashboards to have folders
-kubectl -n $NAMESPACE delete configmap osm-monitoring-grafana-config-dashboards
-kubectl -n $NAMESPACE apply -f $HERE/grafanaproviders.yaml
+echo "Creating stable/kube-prometheus-stack"
+cat > $HERE/kube-prometheus-stack-values.yaml <<EOF
+kubeControllerManager:
+ service:
+ enabled: true
+ port: 10257
+ targetPort: 10257
+ serviceMonitor:
+ https: true
+ insecureSkipVerify: true
+kubeScheduler:
+ service:
+ enabled: true
+ port: 10259
+ targetPort: 10259
+ serviceMonitor:
+ https: true
+ insecureSkipVerify: true
+kubelet:
+ serviceMonitor:
+ https: true
+kubeEtcd:
+ serviceMonitor:
+ scheme: https
+ insecureSkipVerify: false
+ serverName: localhost
+ caFile: /etc/prometheus/secrets/etcd-client-cert/ca.crt
+ certFile: /etc/prometheus/secrets/etcd-client-cert/healthcheck-client.crt
+ keyFile: /etc/prometheus/secrets/etcd-client-cert/healthcheck-client.key
+alertmanager:
+ service:
+ type: $SERVICE_TYPE
+grafana:
+ enabled: false
+ forceDeployDashboards: true
+prometheus:
+ service:
+ type: $SERVICE_TYPE
+ prometheusSpec:
+ serviceMonitorSelectorNilUsesHelmValues: false
+ secrets: ['etcd-client-cert']
+EOF
+helm install osm-monitoring --namespace $NAMESPACE --version=$V_OPERATOR -f $HERE/kube-prometheus-stack-values.yaml $HERE/helm_charts/kube-prometheus-stack
# Exporters installation
-
# MongoDB
# exporter
-echo "Creating stable/prometheus-mongodb-exporter"
-helm install --namespace $NAMESPACE --version=$V_MONGODB_EXPORTER --name osm-mongodb-exporter --set image.tag='0.10.0',mongodb.uri='mongodb://mongo.osm:27017' stable/prometheus-mongodb-exporter
+echo "Creating prometheus-community/prometheus-mongodb-exporter"
+helm install osm-mongodb-exporter --namespace $NAMESPACE --version=$V_MONGODB_EXPORTER --set serviceMonitor.additionalLabels.release=osm-monitoring,mongodb.uri='mongodb://mongodb-k8s.osm:27017' prometheus-community/prometheus-mongodb-exporter
#dashboard:
kubectl -n $NAMESPACE apply -f $HERE/mongodb-exporter-dashboard.yaml
-
-# Mysql
+# Mysql
# exporter
-echo "Creating stable/prometheus-mysql-exporter"
-helm install --namespace $NAMESPACE --version=$V_MYSQL_EXPORTER --name osm-mysql-exporter --set serviceMonitor.enabled=true,mysql.user="root",mysql.pass=`kubectl -n osm get secret ro-db-secret -o yaml | grep MYSQL_ROOT_PASSWORD | awk '{print $2}' | base64 -d`,mysql.host="mysql.osm",mysql.port="3306" stable/prometheus-mysql-exporter
+echo "Creating prometheus-community/prometheus-mysql-exporter"
+helm install osm-mysql-exporter --namespace $NAMESPACE --version=$V_MYSQL_EXPORTER --set serviceMonitor.enabled=true,serviceMonitor.additionalLabels.release=osm-monitoring,mysql.user="root",mysql.pass=`kubectl -n osm get secret ro-db-secret -o yaml | grep -i -A1 '^data:$' | grep MYSQL_ROOT_PASSWORD | awk '{print $2}' | base64 -d`,mysql.host="mysql.osm",mysql.port="3306",'collectors.info_schema\.tables=true' prometheus-community/prometheus-mysql-exporter
#dashboard:
kubectl -n $NAMESPACE apply -f $HERE/mysql-exporter-dashboard.yaml
-
-# Kafka
+# Kafka
# exporter
-helm install --namespace $NAMESPACE --name osm-kafka-exporter $HERE/helm_charts/prometheus-kafka-exporter
+echo "Creating prometheus-community/prometheus-kafka-exporter"
+helm install osm-kafka-exporter --namespace $NAMESPACE --version=$V_KAFKA_EXPORTER --set prometheus.serviceMonitor.enabled=true,prometheus.serviceMonitor.additionalLabels.release=osm-monitoring,kafkaServer={kafka.osm.svc.cluster.local:9092},service.port=9092 prometheus-community/prometheus-kafka-exporter
# dashboard:
kubectl -n $NAMESPACE apply -f $HERE/kafka-exporter-dashboard.yaml
# Deploy summary dashboard
kubectl -n $NAMESPACE apply -f $HERE/summary-dashboard.yaml
-# Patch prometheus, alertmanager and grafana with service type
-# By default is created with ClusterIP type
-if [ $SERVICE_TYPE == "NodePort" ] ; then
- kubectl --namespace $NAMESPACE patch service osm-monitoring-grafana -p '{"spec":{"type":"NodePort"}}'
- kubectl --namespace $NAMESPACE patch service osm-monitoring-prometheus-alertmanager -p '{"spec":{"type":"NodePort"}}'
- kubectl --namespace $NAMESPACE patch service osm-monitoring-prometheus-prometheus -p '{"spec":{"type":"NodePort"}}'
-fi
-
-if [ $SERVICE_TYPE == "LoadBalancer" ] ; then
- kubectl --namespace $NAMESPACE patch service osm-monitoring-grafana -p '{"spec":{"type":"LoadBalancer"}}'
- kubectl --namespace $NAMESPACE patch service osm-monitoring-prometheus-alertmanager -p '{"spec":{"type":"LoadBalancer"}}'
- kubectl --namespace $NAMESPACE patch service osm-monitoring-prometheus-prometheus -p '{"spec":{"type":"LoadBalancer"}}'
-fi
-
-# Restart grafana to be sure patches are applied
-echo "Restarting grafana POD..."
-pod_grafana=`kubectl -n monitoring get pods | grep grafana | awk '{print $1}'`
-kubectl --namespace $NAMESPACE delete pod $pod_grafana
+# Deploy nodes dashboards
+kubectl -n $NAMESPACE apply -f $HERE/nodes-dashboard.yaml