Major improvement in OSM charms
[osm/devops.git] / installers / charmed_install.sh
1 #! /bin/bash
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 # set -eux
17
18 JUJU_AGENT_VERSION=2.8.9
19 K8S_CLOUD_NAME="k8s-cloud"
20 KUBECTL="microk8s.kubectl"
21 MICROK8S_VERSION=1.19
22 OSMCLIENT_VERSION=9.0
23 IMAGES_OVERLAY_FILE=~/.osm/images-overlay.yaml
24 PATH=/snap/bin:${PATH}
25
26 MODEL_NAME=osm
27
28 OSM_BUNDLE=cs:osm-58
29 OSM_HA_BUNDLE=cs:osm-ha-43
30 TAG=testing-daily
31
32 function check_arguments(){
33 while [ $# -gt 0 ] ; do
34 case $1 in
35 --bundle) BUNDLE="$2" ;;
36 --k8s) KUBECFG="$2" ;;
37 --vca) CONTROLLER="$2" ;;
38 --lxd) LXD_CLOUD="$2" ;;
39 --lxd-cred) LXD_CREDENTIALS="$2" ;;
40 --microstack) MICROSTACK=y ;;
41 --ha) BUNDLE=$OSM_HA_BUNDLE ;;
42 --tag) TAG="$2" ;;
43 --registry) REGISTRY_INFO="$2" ;;
44 esac
45 shift
46 done
47
48 # echo $BUNDLE $KUBECONFIG $LXDENDPOINT
49 }
50
51 function install_snaps(){
52 if [ ! -v KUBECFG ]; then
53 sudo snap install microk8s --classic --channel=${MICROK8S_VERSION}/stable
54 sudo cat /var/snap/microk8s/current/args/kube-apiserver | grep advertise-address || (
55 echo "--advertise-address $DEFAULT_IP" | sudo tee -a /var/snap/microk8s/current/args/kube-apiserver
56 microk8s.stop
57 microk8s.start
58 )
59 sudo usermod -a -G microk8s `whoami`
60 mkdir -p ~/.kube
61 sudo chown -f -R `whoami` ~/.kube
62 KUBEGRP="microk8s"
63 sg ${KUBEGRP} -c "microk8s status --wait-ready"
64 KUBECONFIG=~/.osm/microk8s-config.yaml
65 sg ${KUBEGRP} -c "microk8s config" | tee ${KUBECONFIG}
66 else
67 KUBECTL="kubectl"
68 sudo snap install kubectl --classic
69 export KUBECONFIG=${KUBECFG}
70 KUBEGRP=$(id -g -n)
71 fi
72 sudo snap install juju --classic --channel=2.8/stable
73 }
74
75 function bootstrap_k8s_lxd(){
76 [ -v CONTROLLER ] && ADD_K8S_OPTS="--controller ${CONTROLLER}" && CONTROLLER_NAME=$CONTROLLER
77 [ ! -v CONTROLLER ] && ADD_K8S_OPTS="--client" && BOOTSTRAP_NEEDED="yes" && CONTROLLER_NAME="osm-vca"
78
79 if [ -v BOOTSTRAP_NEEDED ]; then
80 CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
81 if [ $CONTROLLER_PRESENT -ge 1 ]; then
82 cat << EOF
83 Threre is already a VCA present with the installer reserved name of "${CONTROLLER_NAME}".
84 You may either explicitly use this VCA with the "--vca ${CONTROLLER_NAME}" option, or remove it
85 using this command:
86
87 juju destroy-controller --release-storage --destroy-all-models -y ${CONTROLLER_NAME}
88
89 Please retry the installation once this conflict has been resolved.
90 EOF
91 exit 1
92 fi
93 else
94 CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
95 if [ $CONTROLLER_PRESENT -le 0 ]; then
96 cat << EOF
97 Threre is no VCA present with the name "${CONTROLLER_NAME}". Please specify a VCA
98 that exists, or remove the --vca ${CONTROLLER_NAME} option.
99
100 Please retry the installation with one of the solutions applied.
101 EOF
102 exit 1
103 fi
104 fi
105
106 if [ -v KUBECFG ]; then
107 cat $KUBECFG | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
108 [ -v BOOTSTRAP_NEEDED ] && juju bootstrap $K8S_CLOUD_NAME $CONTROLLER_NAME \
109 --config controller-service-type=loadbalancer \
110 --agent-version=$JUJU_AGENT_VERSION
111 else
112 sg ${KUBEGRP} -c "echo ${DEFAULT_IP}-${DEFAULT_IP} | microk8s.enable metallb"
113 sg ${KUBEGRP} -c "microk8s.enable ingress"
114 sg ${KUBEGRP} -c "microk8s.enable storage dns"
115 TIME_TO_WAIT=30
116 start_time="$(date -u +%s)"
117 while true
118 do
119 now="$(date -u +%s)"
120 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
121 echo "Microk8s storage failed to enable"
122 sg ${KUBEGRP} -c "microk8s.status"
123 exit 1
124 fi
125 storage_status=`sg ${KUBEGRP} -c "microk8s.status -a storage"`
126 if [[ $storage_status == "enabled" ]]; then
127 break
128 fi
129 sleep 1
130 done
131
132 [ ! -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c "microk8s.config" | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
133 [ -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c \
134 "juju bootstrap microk8s $CONTROLLER_NAME --config controller-service-type=loadbalancer --agent-version=$JUJU_AGENT_VERSION" \
135 && K8S_CLOUD_NAME=microk8s
136 fi
137
138 if [ -v LXD_CLOUD ]; then
139 if [ ! -v LXD_CREDENTIALS ]; then
140 echo "The installer needs the LXD server certificate if the LXD is external"
141 exit 1
142 fi
143 else
144 LXDENDPOINT=$DEFAULT_IP
145 LXD_CLOUD=~/.osm/lxd-cloud.yaml
146 LXD_CREDENTIALS=~/.osm/lxd-credentials.yaml
147 # Apply sysctl production values for optimal performance
148 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
149 sudo sysctl --system
150 # Install LXD snap
151 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
152 sudo snap install lxd
153 # Configure LXD
154 sudo usermod -a -G lxd `whoami`
155 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$LXDENDPOINT':8443/' | sg lxd -c "lxd init --preseed"
156 sg lxd -c "lxd waitready"
157 DEFAULT_MTU=$(ip addr show $DEFAULT_IF | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
158 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
159 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
160
161 cat << EOF > $LXD_CLOUD
162 clouds:
163 lxd-cloud:
164 type: lxd
165 auth-types: [certificate]
166 endpoint: "https://$LXDENDPOINT:8443"
167 config:
168 ssl-hostname-verification: false
169 EOF
170 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
171 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
172 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
173 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
174
175 cat << EOF > $LXD_CREDENTIALS
176 credentials:
177 lxd-cloud:
178 lxd-cloud:
179 auth-type: certificate
180 server-cert: |
181 $server_cert
182 client-cert: |
183 $client_cert
184 client-key: |
185 $client_key
186 EOF
187 lxc config trust add local: ~/.osm/client.crt
188 fi
189
190 juju add-cloud -c $CONTROLLER_NAME lxd-cloud $LXD_CLOUD --force
191 juju add-credential -c $CONTROLLER_NAME lxd-cloud -f $LXD_CREDENTIALS
192 sg lxd -c "lxd waitready"
193 juju controller-config features=[k8s-operators]
194 }
195
196 function wait_for_port(){
197 SERVICE=$1
198 INDEX=$2
199 TIME_TO_WAIT=30
200 start_time="$(date -u +%s)"
201 while true
202 do
203 now="$(date -u +%s)"
204 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
205 echo "Failed to expose external ${SERVICE} interface port"
206 exit 1
207 fi
208
209 if [ $(sg ${KUBEGRP} -c "${KUBECTL} get ingresses.networking -n osm -o json | jq -r '.items[$INDEX].metadata.name'") == ${SERVICE} ] ; then
210 break
211 fi
212 sleep 1
213 done
214 }
215
216 function deploy_charmed_osm(){
217 if [ -v REGISTRY_INFO ] ; then
218 registry_parts=(${REGISTRY_INFO//@/ })
219 if [ ${#registry_parts[@]} -eq 1 ] ; then
220 # No credentials supplied
221 REGISTRY_USERNAME=""
222 REGISTRY_PASSWORD=""
223 REGISTRY_URL=${registry_parts[0]}
224 else
225 credentials=${registry_parts[0]}
226 credential_parts=(${credentials//:/ })
227 REGISTRY_USERNAME=${credential_parts[0]}
228 REGISTRY_PASSWORD=${credential_parts[1]}
229 REGISTRY_URL=${registry_parts[1]}
230 fi
231 # Ensure the URL ends with a /
232 case $REGISTRY_URL in
233 */) ;;
234 *) REGISTRY_URL=${REGISTRY_URL}/
235 esac
236 fi
237
238 create_overlay
239 echo "Creating OSM model"
240 if [ -v KUBECFG ]; then
241 juju add-model $MODEL_NAME $K8S_CLOUD_NAME
242 else
243 sg ${KUBEGRP} -c "juju add-model $MODEL_NAME $K8S_CLOUD_NAME"
244 fi
245 echo "Deploying OSM with charms"
246 images_overlay=""
247 if [ -v REGISTRY_URL ]; then
248 [ ! -v TAG ] && TAG='latest'
249 fi
250 [ -v TAG ] && generate_images_overlay && images_overlay="--overlay $IMAGES_OVERLAY_FILE"
251
252 if [ -v BUNDLE ]; then
253 juju deploy -m $MODEL_NAME $BUNDLE --overlay ~/.osm/vca-overlay.yaml $images_overlay
254 else
255 juju deploy -m $MODEL_NAME $OSM_BUNDLE --overlay ~/.osm/vca-overlay.yaml $images_overlay
256 fi
257
258 if [ ! -v KUBECFG ]; then
259 API_SERVER=${DEFAULT_IP}
260 else
261 API_SERVER=$(kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " ")
262 proto="$(echo $API_SERVER | grep :// | sed -e's,^\(.*://\).*,\1,g')"
263 url="$(echo ${API_SERVER/$proto/})"
264 user="$(echo $url | grep @ | cut -d@ -f1)"
265 hostport="$(echo ${url/$user@/} | cut -d/ -f1)"
266 API_SERVER="$(echo $hostport | sed -e 's,:.*,,g')"
267 fi
268 # Expose OSM services
269 juju config -m $MODEL_NAME nbi site_url=https://nbi.${API_SERVER}.xip.io
270 juju config -m $MODEL_NAME ng-ui site_url=https://ui.${API_SERVER}.xip.io
271 juju config -m $MODEL_NAME grafana site_url=https://grafana.${API_SERVER}.xip.io
272 juju config -m $MODEL_NAME prometheus site_url=https://prometheus.${API_SERVER}.xip.io
273
274 echo "Waiting for deployment to finish..."
275 check_osm_deployed
276 echo "OSM with charms deployed"
277 }
278
279 function check_osm_deployed() {
280 TIME_TO_WAIT=600
281 start_time="$(date -u +%s)"
282 total_service_count=14
283 previous_count=0
284 while true
285 do
286 service_count=$(juju status -m $MODEL_NAME | grep kubernetes | grep active | wc -l)
287 echo "$service_count / $total_service_count services active"
288 if [ $service_count -eq $total_service_count ]; then
289 break
290 fi
291 if [ $service_count -ne $previous_count ]; then
292 previous_count=$service_count
293 start_time="$(date -u +%s)"
294 fi
295 now="$(date -u +%s)"
296 if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
297 echo "Timed out waiting for OSM services to become ready"
298 exit 1
299 fi
300 sleep 10
301 done
302 }
303
304 function create_overlay() {
305 sudo snap install jq
306 sudo snap install yq
307 local HOME=/home/$USER
308 local vca_user=$(cat $HOME/.local/share/juju/accounts.yaml | yq e .controllers.$CONTROLLER_NAME.user - )
309 local vca_password=$(cat $HOME/.local/share/juju/accounts.yaml | yq e .controllers.$CONTROLLER_NAME.password - )
310 local vca_host=$(cat $HOME/.local/share/juju/controllers.yaml | yq e .controllers.$CONTROLLER_NAME.api-endpoints[0] - | cut -d ":" -f 1)
311 local vca_port=$(cat $HOME/.local/share/juju/controllers.yaml | yq e .controllers.$CONTROLLER_NAME.api-endpoints[0] - | cut -d ":" -f 2)
312 local vca_pubkey=\"$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)\"
313 local vca_cloud="lxd-cloud"
314 # Get the VCA Certificate
315 local vca_cacert=$(cat $HOME/.local/share/juju/controllers.yaml | yq e .controllers.$CONTROLLER_NAME.ca-cert - | base64 | tr -d \\n)
316
317 # Calculate the default route of this machine
318 local DEFAULT_IF=`ip route list match 0.0.0.0 | awk '{print $5}'`
319
320 # Generate a new overlay.yaml, overriding any existing one
321 cat << EOF > /tmp/vca-overlay.yaml
322 applications:
323 lcm:
324 options:
325 vca_user: $vca_user
326 vca_password: $vca_password
327 vca_host: $vca_host
328 vca_port: $vca_port
329 vca_pubkey: $vca_pubkey
330 vca_cacert: $vca_cacert
331 vca_cloud: $vca_cloud
332 vca_k8s_cloud: $K8S_CLOUD_NAME
333 mon:
334 options:
335 vca_user: $vca_user
336 vca_password: $vca_password
337 vca_host: $vca_host
338 vca_cacert: $vca_cacert
339 EOF
340 mv /tmp/vca-overlay.yaml ~/.osm/
341 OSM_VCA_HOST=$vca_host
342 }
343
344 function generate_images_overlay(){
345 echo "applications:" > /tmp/images-overlay.yaml
346
347 charms_with_resources="nbi lcm mon pol ng-ui ro pla keystone"
348 for charm in $charms_with_resources; do
349 cat << EOF > /tmp/${charm}_registry.yaml
350 registrypath: ${REGISTRY_URL}opensourcemano/${charm}:$TAG
351 EOF
352 if [ ! -z "$REGISTRY_USERNAME" ] ; then
353 echo username: $REGISTRY_USERNAME >> /tmp/${charm}_registry.yaml
354 echo password: $REGISTRY_PASSWORD >> /tmp/${charm}_registry.yaml
355 fi
356
357 cat << EOF >> /tmp/images-overlay.yaml
358 ${charm}:
359 resources:
360 image: /tmp/${charm}_registry.yaml
361
362 EOF
363 done
364
365 mv /tmp/images-overlay.yaml $IMAGES_OVERLAY_FILE
366 }
367
368 function refresh_osmclient_snap() {
369 osmclient_snap_install_refresh refresh
370 }
371
372 function install_osm_client_snap() {
373 osmclient_snap_install_refresh install
374 }
375
376 function osmclient_snap_install_refresh() {
377 channel_preference="stable candidate beta edge"
378 for channel in $channel_preference; do
379 echo "Trying to install osmclient from channel $OSMCLIENT_VERSION/$channel"
380 sudo snap $1 osmclient --channel $OSMCLIENT_VERSION/$channel 2> /dev/null && echo osmclient snap installed && break
381 done
382 }
383 function install_osmclient() {
384 snap info osmclient | grep -E ^installed: && refresh_osmclient_snap || install_osm_client_snap
385 }
386
387 function add_local_k8scluster() {
388 osm --all-projects vim-create \
389 --name _system-osm-vim \
390 --account_type dummy \
391 --auth_url http://dummy \
392 --user osm --password osm --tenant osm \
393 --description "dummy" \
394 --config '{management_network_name: mgmt}'
395 tmpfile=$(mktemp --tmpdir=${HOME})
396 cp ${KUBECONFIG} ${tmpfile}
397 osm --all-projects k8scluster-add \
398 --creds ${tmpfile} \
399 --vim _system-osm-vim \
400 --k8s-nets '{"net1": null}' \
401 --version '1.19' \
402 --description "OSM Internal Cluster" \
403 _system-osm-k8s
404 rm -f ${tmpfile}
405 }
406
407 function install_microstack() {
408 sudo snap install microstack --classic --beta
409 sudo microstack.init --auto
410 wget https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ~/.osm/
411 microstack.openstack image create \
412 --public \
413 --disk-format qcow2 \
414 --container-format bare \
415 --file ~/.osm/ubuntu-16.04-server-cloudimg-amd64-disk1.img \
416 ubuntu1604
417 ssh-keygen -t rsa -N "" -f ~/.ssh/microstack
418 microstack.openstack keypair create --public-key ~/.ssh/microstack.pub microstack
419 export OSM_HOSTNAME=`juju status -m $MODEL_NAME --format json | jq -rc '.applications."nbi".address'`
420 osm vim-create --name microstack-site \
421 --user admin \
422 --password keystone \
423 --auth_url http://10.20.20.1:5000/v3 \
424 --tenant admin \
425 --account_type openstack \
426 --config='{security_groups: default,
427 keypair: microstack,
428 project_name: admin,
429 user_domain_name: default,
430 region_name: microstack,
431 insecure: True,
432 availability_zone: nova,
433 version: 3}'
434 }
435
436 DEFAULT_IF=`ip route list match 0.0.0.0 | awk '{print $5}'`
437 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
438
439 check_arguments $@
440 mkdir -p ~/.osm
441 install_snaps
442 bootstrap_k8s_lxd
443 deploy_charmed_osm
444 install_osmclient
445 export OSM_HOSTNAME=$(juju config -m $MODEL_NAME nbi site_url | sed "s/http.*\?:\/\///"):443
446 sleep 10
447 add_local_k8scluster
448
449 if [ -v MICROSTACK ]; then
450 install_microstack
451 fi
452
453
454 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
455 echo
456 echo "1. Create the OSM_HOSTNAME environment variable with the NBI IP"
457 echo
458 echo "export OSM_HOSTNAME=$OSM_HOSTNAME"
459 echo
460 echo "2. Add the previous command to your .bashrc for other Shell sessions"
461 echo
462 echo "echo \"export OSM_HOSTNAME=$OSM_HOSTNAME\" >> ~/.bashrc"
463 echo
464 echo "DONE"