Fixes bug 1254 and bug 1247
[osm/devops.git] / installers / charmed_install.sh
1 #! /bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 # set -eux
17
18 K8S_CLOUD_NAME="k8s-cloud"
19 KUBECTL="microk8s.kubectl"
20 IMAGES_OVERLAY_FILE=~/.osm/images-overlay.yaml
21 function check_arguments(){
22 while [ $# -gt 0 ] ; do
23 case $1 in
24 --bundle) BUNDLE="$2" ;;
25 --k8s) KUBECFG="$2" ;;
26 --vca) CONTROLLER="$2" ;;
27 --lxd) LXD_CLOUD="$2" ;;
28 --lxd-cred) LXD_CREDENTIALS="$2" ;;
29 --microstack) MICROSTACK=y ;;
30 --ha) BUNDLE="cs:osm-ha" ;;
31 --tag) TAG="$2" ;;
32 esac
33 shift
34 done
35
36 # echo $BUNDLE $KUBECONFIG $LXDENDPOINT
37 }
38
39 function install_snaps(){
40 if [ ! -v KUBECFG ]; then
41 sudo snap install microk8s --classic
42 sudo usermod -a -G microk8s `whoami`
43 mkdir -p ~/.kube
44 sudo chown -f -R `whoami` ~/.kube
45 KUBEGRP="microk8s"
46 microk8s status --wait-ready
47 else
48 KUBECTL="kubectl"
49 sudo snap install kubectl --classic
50 export KUBECONFIG=${KUBECFG}
51 KUBEGRP=$(id -g -n)
52 fi
53 sudo snap install juju --classic --channel=2.8/stable
54 }
55
56 function bootstrap_k8s_lxd(){
57 [ -v CONTROLLER ] && ADD_K8S_OPTS="--controller ${CONTROLLER}" && CONTROLLER_NAME=$CONTROLLER
58 [ ! -v CONTROLLER ] && ADD_K8S_OPTS="--client" && BOOTSTRAP_NEEDED="yes" && CONTROLLER_NAME="osm-vca"
59
60 if [ -v BOOTSTRAP_NEEDED ]; then
61 CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
62 if [ $CONTROLLER_PRESENT -ge 1 ]; then
63 cat << EOF
64 Threre is already a VCA present with the installer reserved name of "${CONTROLLER_NAME}".
65 You may either explicitly use this VCA with the "--vca ${CONTROLLER_NAME}" option, or remove it
66 using this command:
67
68 juju destroy-controller --release-storage --destroy-all-models -y ${CONTROLLER_NAME}
69
70 Please retry the installation once this conflict has been resolved.
71 EOF
72 exit 1
73 fi
74 else
75 CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
76 if [ $CONTROLLER_PRESENT -le 0 ]; then
77 cat << EOF
78 Threre is no VCA present with the name "${CONTROLLER_NAME}". Please specify a VCA
79 that exists, or remove the --vca ${CONTROLLER_NAME} option.
80
81 Please retry the installation with one of the solutions applied.
82 EOF
83 exit 1
84 fi
85 fi
86
87 if [ -v KUBECFG ]; then
88 cat $KUBECFG | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
89 [ -v BOOTSTRAP_NEEDED ] && juju bootstrap $K8S_CLOUD_NAME $CONTROLLER_NAME \
90 --config controller-service-type=loadbalancer \
91 --agent-version=2.8.1
92 else
93 sg ${KUBEGRP} -c "echo ${DEFAULT_IP}-${DEFAULT_IP} | microk8s.enable metallb"
94 sg ${KUBEGRP} -c "microk8s.enable ingress"
95 sg ${KUBEGRP} -c "microk8s.enable storage dns"
96 TIME_TO_WAIT=30
97 start_time="$(date -u +%s)"
98 while true
99 do
100 now="$(date -u +%s)"
101 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
102 echo "Microk8s storage failed to enable"
103 sg ${KUBEGRP} -c "microk8s.status"
104 exit 1
105 fi
106 storage_status=`sg ${KUBEGRP} -c "microk8s.status -a storage"`
107 if [[ $storage_status == "enabled" ]]; then
108 break
109 fi
110 sleep 1
111 done
112
113 [ ! -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c "microk8s.config" | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
114 [ -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c \
115 "juju bootstrap microk8s $CONTROLLER_NAME --config controller-service-type=loadbalancer --agent-version=2.8.1" \
116 && K8S_CLOUD_NAME=microk8s
117 fi
118
119 if [ -v LXD_CLOUD ]; then
120 if [ ! -v LXD_CREDENTIALS ]; then
121 echo "The installer needs the LXD server certificate if the LXD is external"
122 exit 1
123 fi
124 else
125 LXDENDPOINT=$DEFAULT_IP
126 LXD_CLOUD=~/.osm/lxd-cloud.yaml
127 LXD_CREDENTIALS=~/.osm/lxd-credentials.yaml
128 # Apply sysctl production values for optimal performance
129 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
130 sudo sysctl --system
131 # Install LXD snap
132 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
133 sudo snap install lxd
134 # Configure LXD
135 sudo usermod -a -G lxd `whoami`
136 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$LXDENDPOINT':8443/' | sg lxd -c "lxd init --preseed"
137 sg lxd -c "lxd waitready"
138 DEFAULT_MTU=$(ip addr show $DEFAULT_IF | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
139 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
140 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
141
142 cat << EOF > $LXD_CLOUD
143 clouds:
144 lxd-cloud:
145 type: lxd
146 auth-types: [certificate]
147 endpoint: "https://$LXDENDPOINT:8443"
148 config:
149 ssl-hostname-verification: false
150 EOF
151 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
152 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
153 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
154 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
155
156 cat << EOF > $LXD_CREDENTIALS
157 credentials:
158 lxd-cloud:
159 lxd-cloud:
160 auth-type: certificate
161 server-cert: |
162 $server_cert
163 client-cert: |
164 $client_cert
165 client-key: |
166 $client_key
167 EOF
168 lxc config trust add local: ~/.osm/client.crt
169 fi
170
171 juju add-cloud -c $CONTROLLER_NAME lxd-cloud $LXD_CLOUD --force
172 juju add-credential -c $CONTROLLER_NAME lxd-cloud -f $LXD_CREDENTIALS
173 sg lxd -c "lxd waitready"
174 juju controller-config features=[k8s-operators]
175 }
176
177 function wait_for_port(){
178 SERVICE=$1
179 INDEX=$2
180 TIME_TO_WAIT=30
181 start_time="$(date -u +%s)"
182 while true
183 do
184 now="$(date -u +%s)"
185 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
186 echo "Failed to expose external ${SERVICE} interface port"
187 exit 1
188 fi
189
190 if [ $(sg ${KUBEGRP} -c "${KUBECTL} get ingresses.networking -n osm -o json | jq -r '.items[$INDEX].metadata.name'") == ${SERVICE} ] ; then
191 break
192 fi
193 sleep 1
194 done
195 }
196
197 function deploy_charmed_osm(){
198 create_overlay
199 echo "Creating OSM model"
200 if [ -v KUBECFG ]; then
201 juju add-model osm $K8S_CLOUD_NAME
202 else
203 sg ${KUBEGRP} -c "juju add-model osm $K8S_CLOUD_NAME"
204 fi
205 echo "Deploying OSM with charms"
206 images_overlay=""
207 [ -v TAG ] && generate_images_overlay && images_overlay="--overlay $IMAGES_OVERLAY_FILE"
208 if [ -v BUNDLE ]; then
209 juju deploy $BUNDLE --overlay ~/.osm/vca-overlay.yaml $images_overlay
210 else
211 juju deploy cs:osm --overlay ~/.osm/vca-overlay.yaml $images_overlay
212 fi
213
214 echo "Waiting for deployment to finish..."
215 check_osm_deployed
216 echo "OSM with charms deployed"
217 if [ ! -v KUBECFG ]; then
218 API_SERVER=${DEFAULT_IP}
219 else
220 API_SERVER=$(kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " ")
221 proto="$(echo $API_SERVER | grep :// | sed -e's,^\(.*://\).*,\1,g')"
222 url="$(echo ${API_SERVER/$proto/})"
223 user="$(echo $url | grep @ | cut -d@ -f1)"
224 hostport="$(echo ${url/$user@/} | cut -d/ -f1)"
225 API_SERVER="$(echo $hostport | sed -e 's,:.*,,g')"
226 fi
227
228 # Expose OSM services
229 # Expose Grafana
230 juju config grafana-k8s juju-external-hostname=grafana.${API_SERVER}.xip.io
231 juju expose grafana-k8s
232 wait_for_port grafana-k8s 0
233
234 # Expose NBI
235 juju config nbi-k8s juju-external-hostname=nbi.${API_SERVER}.xip.io
236 juju expose nbi-k8s
237 wait_for_port nbi-k8s 1
238
239 # Expose NG UI
240 juju config ng-ui juju-external-hostname=ui.${API_SERVER}.xip.io
241 juju expose ng-ui
242 wait_for_port ng-ui 2
243
244 # Expose Prometheus
245 juju config prometheus-k8s juju-external-hostname=prometheus.${API_SERVER}.xip.io
246 juju expose prometheus-k8s
247 wait_for_port prometheus-k8s 3
248
249 # Expose UI
250 juju config ui-k8s juju-external-hostname=osm.${API_SERVER}.xip.io
251 juju expose ui-k8s
252 wait_for_port ui-k8s 4
253
254 # Apply annotations
255 sg ${KUBEGRP} -c "${KUBECTL} annotate ingresses.networking nginx.ingress.kubernetes.io/backend-protocol=HTTPS -n osm -l juju-app=nbi-k8s"
256 sg ${KUBEGRP} -c "${KUBECTL} annotate ingresses.networking nginx.ingress.kubernetes.io/proxy-body-size=0 -n osm -l juju-app=nbi-k8s"
257 sg ${KUBEGRP} -c "${KUBECTL} annotate ingresses.networking nginx.ingress.kubernetes.io/proxy-body-size=0 -n osm -l juju-app=ng-ui"
258 sg ${KUBEGRP} -c "${KUBECTL} annotate ingresses.networking nginx.ingress.kubernetes.io/proxy-body-size=0 -n osm -l juju-app=ui-k8s"
259 }
260
261 function check_osm_deployed() {
262 TIME_TO_WAIT=600
263 start_time="$(date -u +%s)"
264 total_service_count=14
265 while true
266 do
267 service_count=$(juju status | grep kubernetes | grep active | wc -l)
268 echo "$service_count / $total_service_count services active"
269 if [ $service_count -eq $total_service_count ]; then
270 break
271 fi
272 now="$(date -u +%s)"
273 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
274 echo "Timed out waiting for OSM services to become ready"
275 exit 1
276 fi
277 sleep 10
278 done
279 }
280
281 function create_overlay() {
282 sudo snap install jq
283 sudo apt install python3-pip -y
284 python3 -m pip install yq
285 PATH=$PATH:$HOME/.local/bin # make yq command available
286 local HOME=/home/$USER
287 local vca_user=$(cat $HOME/.local/share/juju/accounts.yaml | yq --arg CONTROLLER_NAME $CONTROLLER_NAME '.controllers[$CONTROLLER_NAME].user')
288 local vca_password=$(cat $HOME/.local/share/juju/accounts.yaml | yq --arg CONTROLLER_NAME $CONTROLLER_NAME '.controllers[$CONTROLLER_NAME].password')
289 local vca_host=$(cat $HOME/.local/share/juju/controllers.yaml | yq --arg CONTROLLER_NAME $CONTROLLER_NAME '.controllers[$CONTROLLER_NAME]["api-endpoints"][0]' --raw-output | cut -d ":" -f 1)
290 local vca_port=$(cat $HOME/.local/share/juju/controllers.yaml | yq --arg CONTROLLER_NAME $CONTROLLER_NAME '.controllers[$CONTROLLER_NAME]["api-endpoints"][0]' --raw-output | cut -d ":" -f 2)
291 local vca_pubkey=\"$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)\"
292 local vca_cloud="lxd-cloud"
293 # Get the VCA Certificate
294 local vca_cacert=$(cat $HOME/.local/share/juju/controllers.yaml | yq --arg CONTROLLER_NAME $CONTROLLER_NAME '.controllers[$CONTROLLER_NAME]["ca-cert"]' --raw-output | base64 | tr -d \\n)
295
296 # Calculate the default route of this machine
297 local DEFAULT_IF=`ip route list match 0.0.0.0 | awk '{print $5}'`
298
299 # Generate a new overlay.yaml, overriding any existing one
300 cat << EOF > /tmp/vca-overlay.yaml
301 applications:
302 lcm-k8s:
303 options:
304 vca_user: $vca_user
305 vca_password: $vca_password
306 vca_host: $vca_host
307 vca_port: $vca_port
308 vca_pubkey: $vca_pubkey
309 vca_cacert: $vca_cacert
310 vca_cloud: $vca_cloud
311 vca_k8s_cloud: $K8S_CLOUD_NAME
312 mon-k8s:
313 options:
314 vca_user: $vca_user
315 vca_password: $vca_password
316 vca_host: $vca_host
317 vca_cacert: $vca_cacert
318 EOF
319 mv /tmp/vca-overlay.yaml ~/.osm/
320 OSM_VCA_HOST=$vca_host
321 }
322
323 function generate_images_overlay(){
324 cat << EOF > /tmp/images-overlay.yaml
325 applications:
326 lcm-k8s:
327 options:
328 image: opensourcemano/lcm:$TAG
329 mon-k8s:
330 options:
331 image: opensourcemano/mon:$TAG
332 ro-k8s:
333 options:
334 image: opensourcemano/ro:$TAG
335 nbi-k8s:
336 options:
337 image: opensourcemano/nbi:$TAG
338 pol-k8s:
339 options:
340 image: opensourcemano/pol:$TAG
341 ui-k8s:
342 options:
343 image: opensourcemano/light-ui:$TAG
344 pla:
345 options:
346 image: opensourcemano/pla:$TAG
347 ng-ui:
348 options:
349 image: opensourcemano/ng-ui:$TAG
350 keystone:
351 options:
352 image: opensourcemano/keystone:$TAG
353
354 EOF
355 mv /tmp/images-overlay.yaml $IMAGES_OVERLAY_FILE
356 }
357
358 function install_osmclient() {
359 sudo snap install osmclient
360 sudo snap alias osmclient.osm osm
361 }
362
363
364 function install_microstack() {
365 sudo snap install microstack --classic --beta
366 sudo microstack.init --auto
367 wget https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ~/.osm/
368 microstack.openstack image create \
369 --public \
370 --disk-format qcow2 \
371 --container-format bare \
372 --file ~/.osm/ubuntu-16.04-server-cloudimg-amd64-disk1.img \
373 ubuntu1604
374 ssh-keygen -t rsa -N "" -f ~/.ssh/microstack
375 microstack.openstack keypair create --public-key ~/.ssh/microstack.pub microstack
376 export OSM_HOSTNAME=`juju status --format json | jq -rc '.applications."nbi-k8s".address'`
377 osm vim-create --name microstack-site \
378 --user admin \
379 --password keystone \
380 --auth_url http://10.20.20.1:5000/v3 \
381 --tenant admin \
382 --account_type openstack \
383 --config='{security_groups: default,
384 keypair: microstack,
385 project_name: admin,
386 user_domain_name: default,
387 region_name: microstack,
388 insecure: True,
389 availability_zone: nova,
390 version: 3}'
391 }
392
393 DEFAULT_IF=`ip route list match 0.0.0.0 | awk '{print $5}'`
394 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
395
396 check_arguments $@
397 mkdir -p ~/.osm
398 install_snaps
399 bootstrap_k8s_lxd
400 deploy_charmed_osm
401 install_osmclient
402 if [ -v MICROSTACK ]; then
403 install_microstack
404 fi
405
406 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
407 echo
408 echo "1. Create the OSM_HOSTNAME environment variable with the NBI IP"
409 echo
410 echo "export OSM_HOSTNAME=nbi.$API_SERVER.xip.io:443"
411 echo
412 echo "2. Add the previous command to your .bashrc for other Shell sessions"
413 echo
414 echo "echo \"export OSM_HOSTNAME=nbi.$API_SERVER.xip.io:443\" >> ~/.bashrc"
415 echo
416 echo "DONE"