Feature 9912: Kubernetes VCA in K8s installation 13/9913/5
authorDavid Garcia <david.garcia@canonical.com>
Fri, 23 Oct 2020 08:40:20 +0000 (10:40 +0200)
committerDavid Garcia <david.garcia@canonical.com>
Tue, 3 Nov 2020 17:33:56 +0000 (18:33 +0100)
Changes:
- bootstrap juju to k8s
- add lxd as external cloud
- iptables are not needed anymore
- expose the vca to the outside using metallb

Change-Id: I15b33cbf621af00c53c4f12836500aecafcfcb9a
Signed-off-by: David Garcia <david.garcia@canonical.com>
installers/full_install_osm.sh
installers/k8s/metallb/config.yaml [new file with mode: 0644]
installers/k8s/metallb/metallb.yaml [new file with mode: 0644]

index 39d2850..9ae5da2 100755 (executable)
@@ -237,11 +237,11 @@ EONG
         else
             remove_volumes $OSM_STACK_NAME
             remove_network $OSM_STACK_NAME
+            [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
         fi
-        [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
         echo "Removing $OSM_DOCKER_WORK_DIR"
         $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
-        [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
+        [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
     fi
     remove_crontab_job
     [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
@@ -427,7 +427,7 @@ function juju_createcontroller() {
     if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
         # Not found created, create the controller
         sudo usermod -a -G lxd ${USER}
-        sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=2.8.1 $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
+        sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
     fi
     [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
     juju controller-config features=[k8s-operators]
@@ -437,6 +437,55 @@ function juju_addk8s() {
     cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
 }
 
+function juju_createcontroller_k8s(){
+    cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
+    juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME  \
+            --config controller-service-type=loadbalancer \
+            --agent-version=$JUJU_AGENT_VERSION
+}
+
+
+function juju_addlxd_cloud(){
+    mkdir -p /tmp/.osm
+    OSM_VCA_CLOUDNAME="lxd-cloud"
+    LXDENDPOINT=$DEFAULT_IP
+    LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
+    LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
+
+    cat << EOF > $LXD_CLOUD
+clouds:
+  $OSM_VCA_CLOUDNAME:
+    type: lxd
+    auth-types: [certificate]
+    endpoint: "https://$LXDENDPOINT:8443"
+    config:
+      ssl-hostname-verification: false
+EOF
+    openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
+    local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/        /'`
+    local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/        /'`
+    local client_key=`cat /tmp/.osm/client.key | sed 's/^/        /'`
+
+    cat << EOF > $LXD_CREDENTIALS
+credentials:
+  $OSM_VCA_CLOUDNAME:
+    lxd-cloud:
+      auth-type: certificate
+      server-cert: |
+$server_cert
+      client-cert: |
+$client_cert
+      client-key: |
+$client_key
+EOF
+    lxc config trust add local: /tmp/.osm/client.crt
+    juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
+    juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
+    sg lxd -c "lxd waitready"
+    juju controller-config features=[k8s-operators]
+}
+
+
 function juju_createproxy() {
     check_install_iptables_persistent
 
@@ -664,10 +713,12 @@ function generate_docker_env_files() {
        $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
     fi
 
-    if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
-        echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
-    else
-        $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
+    if [ -n "$OSM_VCA_APIPROXY" ]; then
+        if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
+            echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
+        else
+            $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
+        fi
     fi
 
     if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
@@ -811,6 +862,22 @@ function install_k8s_storageclass() {
     kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
 }
 
+function install_k8s_metallb() {
+    METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
+    cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
+    echo "apiVersion: v1
+kind: ConfigMap
+metadata:
+  namespace: metallb-system
+  name: config
+data:
+  config: |
+    address-pools:
+    - name: default
+      protocol: layer2
+      addresses:
+      - $METALLB_IP_RANGE" | kubectl apply -f -
+}
 #deploys flannel as daemonsets
 function deploy_cni_provider() {
     CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
@@ -1095,18 +1162,58 @@ function install_lightweight() {
 
     track prereqok
 
+    [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
+
+    echo "Creating folders for installation"
+    [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
+    [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
+    [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
+
+    #Installs Kubernetes
+    if [ -n "$KUBERNETES" ]; then
+        install_kube
+        track install_k8s
+        init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
+        kube_config_dir
+        track init_k8s
+        if [ -n "$INSTALL_K8S_MONITOR" ]; then
+            # uninstall OSM MONITORING
+            uninstall_k8s_monitoring
+            track uninstall_k8s_monitoring
+        fi
+        #remove old namespace
+        remove_k8s_namespace $OSM_STACK_NAME
+        deploy_cni_provider
+        taint_master_node
+        install_k8s_storageclass
+        track k8s_storageclass
+        install_k8s_metallb
+        track k8s_metallb
+    else
+        #install_docker_compose
+        [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
+        track docker_swarm
+    fi
+
     [ -z "$INSTALL_NOJUJU" ] && install_juju
     track juju_install
 
     if [ -z "$OSM_VCA_HOST" ]; then
         if [ -z "$CONTROLLER_NAME" ]; then
-            if [ -n "$LXD_CLOUD_FILE" ]; then
-                [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
-                OSM_VCA_CLOUDNAME="lxd-cloud"
-                juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
-                juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
+
+            if [ -n "$KUBERNETES" ]; then
+                juju_createcontroller_k8s
+                juju_addlxd_cloud
+            else
+                if [ -n "$LXD_CLOUD_FILE" ]; then
+                    [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
+                    OSM_VCA_CLOUDNAME="lxd-cloud"
+                    juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
+                    juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
+                fi
+                juju_createcontroller
+                juju_createproxy
             fi
-            juju_createcontroller
         else
             OSM_VCA_CLOUDNAME="lxd-cloud"
             if [ -n "$LXD_CLOUD_FILE" ]; then
@@ -1165,11 +1272,15 @@ EOF
         [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
        [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
     fi
-    if [ -z "$OSM_VCA_APIPROXY" ]; then
-        OSM_VCA_APIPROXY=$DEFAULT_IP
-        [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
+
+    # Set OSM_VCA_APIPROXY only when it is not a k8s installation
+    if [ -z "$KUBERNETES" ]; then
+        if [ -z "$OSM_VCA_APIPROXY" ]; then
+            OSM_VCA_APIPROXY=$DEFAULT_IP
+            [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
+        fi
+        juju_createproxy
     fi
-    juju_createproxy
     track juju
 
     if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
@@ -1177,27 +1288,7 @@ EOF
         [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
     fi
 
-    [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
-    track docker_ce
-
-    echo "Creating folders for installation"
-    [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
-    [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
-    [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
-
-    #Installs Kubernetes and deploys osm services
-    if [ -n "$KUBERNETES" ]; then
-        install_kube
-        track install_k8s
-        init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
-        kube_config_dir
-        track init_k8s
-    else
-        #install_docker_compose
-        [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
-        track docker_swarm
-    fi
-
+    # Deploy OSM services
     [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
     track docker_build
 
@@ -1212,18 +1303,9 @@ EOF
     track env_files
 
     if [ -n "$KUBERNETES" ]; then
-        if [ -n "$INSTALL_K8S_MONITOR" ]; then
-            # uninstall OSM MONITORING
-            uninstall_k8s_monitoring
-            track uninstall_k8s_monitoring
-        fi
-        #remove old namespace
-        remove_k8s_namespace $OSM_STACK_NAME
-        deploy_cni_provider
         kube_secrets
         [ ! $OSM_DOCKER_TAG == "8" ] && parse_yaml $OSM_DOCKER_TAG
         namespace_vol
-        taint_master_node
         deploy_osm_services
         if [ -n "$INSTALL_PLA"]; then
             # optional PLA install
@@ -1231,10 +1313,6 @@ EOF
             track deploy_osm_pla
         fi
         track deploy_osm_services_k8s
-        install_k8s_storageclass
-        track k8s_storageclass
-        juju_addk8s
-        track juju_addk8s
         install_helm
         track install_helm
         if [ -n "$INSTALL_K8S_MONITOR" ]; then
@@ -1413,6 +1491,7 @@ function track(){
     wget -q -O /dev/null $url
 }
 
+JUJU_AGENT_VERSION=2.8.1
 UNINSTALL=""
 DEVELOP=""
 UPDATE=""
diff --git a/installers/k8s/metallb/config.yaml b/installers/k8s/metallb/config.yaml
new file mode 100644 (file)
index 0000000..d57cc68
--- /dev/null
@@ -0,0 +1,25 @@
+#   Copyright 2020 Canonical Ltd.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  namespace: metallb-system
+  name: config
+data:
+  config: |
+    address-pools:
+    - name: default
+      protocol: layer2
+      addresses:
+      - $METALLB_IP_RANGE
diff --git a/installers/k8s/metallb/metallb.yaml b/installers/k8s/metallb/metallb.yaml
new file mode 100644 (file)
index 0000000..6829648
--- /dev/null
@@ -0,0 +1,260 @@
+#   Copyright 2020 Canonical Ltd.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: metallb-system
+  labels:
+    app: metallb
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: metallb-system
+  name: controller
+  labels:
+    app: metallb
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: metallb-system
+  name: speaker
+  labels:
+    app: metallb
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metallb-system:controller
+  labels:
+    app: metallb
+rules:
+  - apiGroups: [""]
+    resources: ["services"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["services/status"]
+    verbs: ["update"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["create", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metallb-system:speaker
+  labels:
+    app: metallb
+rules:
+  - apiGroups: [""]
+    resources: ["services", "endpoints", "nodes"]
+    verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  namespace: metallb-system
+  name: leader-election
+  labels:
+    app: metallb
+rules:
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    resourceNames: ["metallb-speaker"]
+    verbs: ["get", "update"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  namespace: metallb-system
+  name: config-watcher
+  labels:
+    app: metallb
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["create"]
+---
+## Role bindings
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: metallb-system:controller
+  labels:
+    app: metallb
+subjects:
+  - kind: ServiceAccount
+    name: controller
+    namespace: metallb-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: metallb-system:controller
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: metallb-system:speaker
+  labels:
+    app: metallb
+subjects:
+  - kind: ServiceAccount
+    name: speaker
+    namespace: metallb-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: metallb-system:speaker
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  namespace: metallb-system
+  name: config-watcher
+  labels:
+    app: metallb
+subjects:
+  - kind: ServiceAccount
+    name: controller
+  - kind: ServiceAccount
+    name: speaker
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: config-watcher
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  namespace: metallb-system
+  name: leader-election
+  labels:
+    app: metallb
+subjects:
+  - kind: ServiceAccount
+    name: speaker
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: leader-election
+---
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+  namespace: metallb-system
+  name: speaker
+  labels:
+    app: metallb
+    component: speaker
+spec:
+  selector:
+    matchLabels:
+      app: metallb
+      component: speaker
+  template:
+    metadata:
+      labels:
+        app: metallb
+        component: speaker
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "7472"
+    spec:
+      serviceAccountName: speaker
+      terminationGracePeriodSeconds: 0
+      hostNetwork: true
+      containers:
+        - name: speaker
+          image: metallb/speaker:v0.6.1
+          imagePullPolicy: IfNotPresent
+          args:
+            - --port=7472
+            - --config=config
+          env:
+            - name: METALLB_NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+          ports:
+            - name: monitoring
+              containerPort: 7472
+          resources:
+            limits:
+              cpu: 100m
+              memory: 100Mi
+          securityContext:
+            allowPrivilegeEscalation: false
+            readOnlyRootFilesystem: true
+            capabilities:
+              drop:
+                - all
+              add:
+                - net_raw
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  namespace: metallb-system
+  name: controller
+  labels:
+    app: metallb
+    component: controller
+spec:
+  revisionHistoryLimit: 3
+  selector:
+    matchLabels:
+      app: metallb
+      component: controller
+  template:
+    metadata:
+      labels:
+        app: metallb
+        component: controller
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "7472"
+    spec:
+      serviceAccountName: controller
+      terminationGracePeriodSeconds: 0
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 65534 # nobody
+      containers:
+        - name: controller
+          image: metallb/controller:v0.6.1
+          imagePullPolicy: IfNotPresent
+          args:
+            - --port=7472
+            - --config=config
+          ports:
+            - name: monitoring
+              containerPort: 7472
+          resources:
+            limits:
+              cpu: 100m
+              memory: 100Mi
+
+          securityContext:
+            allowPrivilegeEscalation: false
+            capabilities:
+              drop:
+                - all
+            readOnlyRootFilesystem: true