Add HA option and minor fixes
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
64 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
70 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
71 echo -e " [--tag]: Docker image tag"
72
73 }
74
75 # takes a juju/accounts.yaml file and returns the password specific
76 # for a controller. I wrote this using only bash tools to minimize
77 # additions of other packages
78 function parse_juju_password {
79 password_file="${HOME}/.local/share/juju/accounts.yaml"
80 local controller_name=$1
81 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
82 sed -ne "s|^\($s\):|\1|" \
83 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
84 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
85 awk -F$fs -v controller=$controller_name '{
86 indent = length($1)/2;
87 vname[indent] = $2;
88 for (i in vname) {if (i > indent) {delete vname[i]}}
89 if (length($3) > 0) {
90 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
91 if (match(vn,controller) && match($2,"password")) {
92 printf("%s",$3);
93 }
94 }
95 }'
96 }
97
98 function generate_secret() {
99 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
100 }
101
102 function remove_volumes() {
103 if [ -n "$KUBERNETES" ]; then
104 k8_volume=$1
105 echo "Removing ${k8_volume}"
106 $WORKDIR_SUDO rm -rf ${k8_volume}
107 else
108 stack=$1
109 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
110 for volume in $volumes; do
111 sg docker -c "docker volume rm ${stack}_${volume}"
112 done
113 fi
114 }
115
116 function remove_network() {
117 stack=$1
118 sg docker -c "docker network rm net${stack}"
119 }
120
121 function remove_iptables() {
122 stack=$1
123 if [ -z "$OSM_VCA_HOST" ]; then
124 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
125 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
126 fi
127
128 if [ -z "$DEFAULT_IP" ]; then
129 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
130 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
131 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
132 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
133 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
134 fi
135
136 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
137 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
138 sudo netfilter-persistent save
139 fi
140 }
141
142 function remove_stack() {
143 stack=$1
144 if sg docker -c "docker stack ps ${stack}" ; then
145 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
146 COUNTER=0
147 result=1
148 while [ ${COUNTER} -lt 30 ]; do
149 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
150 #echo "Dockers running: $result"
151 if [ "${result}" == "0" ]; then
152 break
153 fi
154 let COUNTER=COUNTER+1
155 sleep 1
156 done
157 if [ "${result}" == "0" ]; then
158 echo "All dockers of the stack ${stack} were removed"
159 else
160 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
161 fi
162 sleep 5
163 fi
164 }
165
166 #removes osm deployments and services
167 function remove_k8s_namespace() {
168 kubectl delete ns $1
169 }
170
171 #removes helm only if there is nothing deployed in helm
172 function remove_helm() {
173 if [ "$(helm ls -q)" == "" ] ; then
174 sudo helm reset --force
175 kubectl delete --namespace kube-system serviceaccount tiller
176 kubectl delete clusterrolebinding tiller-cluster-rule
177 sudo rm /usr/local/bin/helm
178 rm -rf $HOME/.helm
179 fi
180 }
181
182 #Uninstall osmclient
183 function uninstall_osmclient() {
184 sudo apt-get remove --purge -y python-osmclient
185 sudo apt-get remove --purge -y python3-osmclient
186 }
187
188 #Uninstall lightweight OSM: remove dockers
189 function uninstall_lightweight() {
190 if [ -n "$INSTALL_ONLY" ]; then
191 if [ -n "$INSTALL_ELK" ]; then
192 echo -e "\nUninstalling OSM ELK stack"
193 remove_stack osm_elk
194 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
195 fi
196 else
197 echo -e "\nUninstalling OSM"
198 if [ -n "$KUBERNETES" ]; then
199 if [ -n "$INSTALL_K8S_MONITOR" ]; then
200 # uninstall OSM MONITORING
201 uninstall_k8s_monitoring
202 fi
203 remove_k8s_namespace $OSM_STACK_NAME
204 else
205
206 remove_stack $OSM_STACK_NAME
207 remove_stack osm_elk
208 fi
209 echo "Now osm docker images and volumes will be deleted"
210 newgrp docker << EONG
211 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
220 EONG
221
222 if [ -n "$KUBERNETES" ]; then
223 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
224 remove_volumes $OSM_NAMESPACE_VOL
225 else
226 remove_volumes $OSM_STACK_NAME
227 remove_network $OSM_STACK_NAME
228 fi
229 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
230 echo "Removing $OSM_DOCKER_WORK_DIR"
231 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
232 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
233 fi
234 uninstall_osmclient
235 echo "Some docker images will be kept in case they are used by other docker stacks"
236 echo "To remove them, just run 'docker image prune' in a terminal"
237 return 0
238 }
239
240 #Safe unattended install of iptables-persistent
241 function check_install_iptables_persistent(){
242 echo -e "\nChecking required packages: iptables-persistent"
243 if dpkg -l iptables-persistent &>/dev/null; then
244 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
245 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
246 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
247 sudo apt-get -yq install iptables-persistent
248 fi
249 }
250
251 #Configure NAT rules, based on the current IP addresses of containers
252 function nat(){
253 check_install_iptables_persistent
254
255 echo -e "\nConfiguring NAT rules"
256 echo -e " Required root privileges"
257 sudo $OSM_DEVOPS/installers/nat_osm
258 }
259
260 function FATAL(){
261 echo "FATAL error: Cannot install OSM due to \"$1\""
262 exit 1
263 }
264
265 function install_lxd() {
266 # Apply sysctl production values for optimal performance
267 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
268 sudo sysctl --system
269
270 # Install LXD snap
271 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
272 sudo snap install lxd
273 sudo apt-get install zfsutils-linux -y
274
275 # Configure LXD
276 sudo usermod -a -G lxd `whoami`
277 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
278 sg lxd -c "lxd waitready"
279 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
280 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
281 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
282 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
283 #sudo systemctl stop lxd-bridge
284 #sudo systemctl --system daemon-reload
285 #sudo systemctl enable lxd-bridge
286 #sudo systemctl start lxd-bridge
287 }
288
289 function ask_user(){
290 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
291 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
292 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
293 read -e -p "$1" USER_CONFIRMATION
294 while true ; do
295 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
296 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
297 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
298 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
299 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
300 done
301 }
302
303 function install_osmclient(){
304 CLIENT_RELEASE=${RELEASE#"-R "}
305 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
306 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
307 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
308 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
309 curl $key_location | sudo apt-key add -
310 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
311 sudo apt-get update
312 sudo apt-get install -y python3-pip
313 sudo -H LC_ALL=C python3 -m pip install -U pip
314 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
315 sudo apt-get install -y python3-osm-im python3-osmclient
316 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
317 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
318 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
319 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
320 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
321 echo -e "\nOSM client installed"
322 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
323 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
324 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
325 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
326 else
327 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
328 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
329 echo " export OSM_HOSTNAME=<OSM_host>"
330 fi
331 return 0
332 }
333
334 function install_prometheus_nodeexporter(){
335 if (systemctl -q is-active node_exporter)
336 then
337 echo "Node Exporter is already running."
338 else
339 echo "Node Exporter is not active, installing..."
340 if getent passwd node_exporter > /dev/null 2>&1; then
341 echo "node_exporter user exists"
342 else
343 echo "Creating user node_exporter"
344 sudo useradd --no-create-home --shell /bin/false node_exporter
345 fi
346 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
347 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
348 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
349 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
350 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
351 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
352 sudo systemctl daemon-reload
353 sudo systemctl restart node_exporter
354 sudo systemctl enable node_exporter
355 echo "Node Exporter has been activated in this host."
356 fi
357 return 0
358 }
359
360 function uninstall_prometheus_nodeexporter(){
361 sudo systemctl stop node_exporter
362 sudo systemctl disable node_exporter
363 sudo rm /etc/systemd/system/node_exporter.service
364 sudo systemctl daemon-reload
365 sudo userdel node_exporter
366 sudo rm /usr/local/bin/node_exporter
367 return 0
368 }
369
370 function install_docker_ce() {
371 # installs and configures Docker CE
372 echo "Installing Docker CE ..."
373 sudo apt-get -qq update
374 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
375 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
376 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
377 sudo apt-get -qq update
378 sudo apt-get install -y docker-ce
379 echo "Adding user to group 'docker'"
380 sudo groupadd -f docker
381 sudo usermod -aG docker $USER
382 sleep 2
383 sudo service docker restart
384 echo "... restarted Docker service"
385 sg docker -c "docker version" || FATAL "Docker installation failed"
386 echo "... Docker CE installation done"
387 return 0
388 }
389
390 function install_docker_compose() {
391 # installs and configures docker-compose
392 echo "Installing Docker Compose ..."
393 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
394 sudo chmod +x /usr/local/bin/docker-compose
395 echo "... Docker Compose installation done"
396 }
397
398 function install_juju() {
399 echo "Installing juju"
400 sudo snap install juju --classic --channel=2.7/stable
401 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
402 echo "Finished installation of juju"
403 return 0
404 }
405
406 function juju_createcontroller() {
407 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
408 # Not found created, create the controller
409 sudo usermod -a -G lxd ${USER}
410 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
411 fi
412 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
413 }
414
415 function juju_createproxy() {
416 check_install_iptables_persistent
417
418 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
419 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
420 sudo netfilter-persistent save
421 fi
422 }
423
424 function generate_docker_images() {
425 echo "Pulling and generating docker images"
426 _build_from=$COMMIT_ID
427 [ -z "$_build_from" ] && _build_from="master"
428
429 echo "OSM Docker images generated from $_build_from"
430
431 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
432 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
433 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
434 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
435
436 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
437 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
438 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
439 fi
440
441 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
442 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
443 fi
444
445 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
446 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
447 fi
448
449 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
450 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
451 fi
452
453 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
454 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
455 fi
456
457 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
458 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
459 fi
460
461 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
462 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
463 fi
464
465 if [ -n "$PULL_IMAGES" ]; then
466 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
467 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
468 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
469 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
470 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
471 fi
472
473 if [ -n "$PULL_IMAGES" ]; then
474 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
475 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
476 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
477 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
478 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
479 fi
480
481 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
482 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
483 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
484 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
485 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
486 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
487 fi
488
489 if [ -n "$PULL_IMAGES" ]; then
490 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
491 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
492 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
493 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
494 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
495 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
496 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
497 fi
498
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
501 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
502 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
503 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
504 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
505 fi
506
507 if [ -n "$PULL_IMAGES" ]; then
508 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
509 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
510 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
511 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
512 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
513 fi
514
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
517 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
518 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
519 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
520 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
521 fi
522
523 if [ -n "$PULL_IMAGES" ]; then
524 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
525 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
526 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
527 fi
528
529 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
530 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
531 fi
532
533 echo "Finished generation of docker images"
534 }
535
536 function cmp_overwrite() {
537 file1="$1"
538 file2="$2"
539 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
540 if [ -f "${file2}" ]; then
541 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
542 else
543 cp -b ${file1} ${file2}
544 fi
545 fi
546 }
547
548 function generate_docker_env_files() {
549 echo "Doing a backup of existing env files"
550 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
551 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
552 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
553 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
554 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
555 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
556 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
558 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
559
560 echo "Generating docker env files"
561 if [ -n "$KUBERNETES" ]; then
562 #Kubernetes resources
563 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
564 else
565 # Docker-compose
566 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
567 if [ -n "$INSTALL_PLA" ]; then
568 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
569 fi
570
571 # Prometheus files
572 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
573 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
574
575 # Grafana files
576 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
577 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
578 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
579 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
580 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
581
582 # Prometheus Exporters files
583 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
584 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
585 fi
586
587 # LCM
588 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
589 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
590 fi
591
592 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
593 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
594 else
595 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
596 fi
597
598 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
599 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
600 else
601 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
602 fi
603
604 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
605 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
606 else
607 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
608 fi
609
610 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
611 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
612 else
613 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
614 fi
615
616 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
617 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
618 else
619 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
620 fi
621
622 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
623 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
624 fi
625
626 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
627 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
628 fi
629
630 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
631 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
632 else
633 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
634 fi
635
636 # RO
637 MYSQL_ROOT_PASSWORD=$(generate_secret)
638 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
639 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
640 fi
641 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
642 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
643 fi
644
645 # Keystone
646 KEYSTONE_DB_PASSWORD=$(generate_secret)
647 SERVICE_PASSWORD=$(generate_secret)
648 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
649 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
650 fi
651 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
652 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
653 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
654 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
655 fi
656
657 # NBI
658 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
659 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
660 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
661 fi
662
663 # MON
664 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
665 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
666 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
667 fi
668
669 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
670 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
671 else
672 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
673 fi
674
675 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
676 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
677 else
678 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
679 fi
680
681 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
682 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
683 else
684 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
685 fi
686
687 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
688 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
689 else
690 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
691 fi
692
693
694 # POL
695 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
696 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
697 fi
698
699 # LW-UI
700 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
701 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
702 fi
703
704 echo "Finished generation of docker env files"
705 }
706
707 function generate_osmclient_script () {
708 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
709 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
710 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
711 }
712
713 #installs kubernetes packages
714 function install_kube() {
715 sudo apt-get update && sudo apt-get install -y apt-transport-https
716 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
717 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
718 sudo apt-get update
719 echo "Installing Kubernetes Packages ..."
720 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
721 }
722
723 #initializes kubernetes control plane
724 function init_kubeadm() {
725 sudo swapoff -a
726 sudo kubeadm init --config $1
727 sleep 5
728 }
729
730 function kube_config_dir() {
731 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
732 mkdir -p $HOME/.kube
733 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
734 sudo chown $(id -u):$(id -g) $HOME/.kube/config
735 }
736
737 #deploys flannel as daemonsets
738 function deploy_cni_provider() {
739 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
740 trap 'rm -rf "${CNI_DIR}"' EXIT
741 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
742 kubectl apply -f $CNI_DIR
743 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
744 }
745
746 #creates secrets from env files which will be used by containers
747 function kube_secrets(){
748 kubectl create ns $OSM_STACK_NAME
749 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
750 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
751 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
752 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
753 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
754 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
755 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
756 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
757 }
758
759 #deploys osm pods and services
760 function deploy_osm_services() {
761 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
762 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
763 sleep 5
764 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
765 }
766
767 function deploy_osm_pla_service() {
768 # corresponding to parse_yaml
769 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
770 # corresponding to namespace_vol
771 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
772 # corresponding to deploy_osm_services
773 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
774 }
775
776 #Install helm and tiller
777 function install_helm() {
778 helm > /dev/null 2>&1
779 if [ $? != 0 ] ; then
780 # Helm is not installed. Install helm
781 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
782 tar -zxvf helm-v2.15.2.tar.gz
783 sudo mv linux-amd64/helm /usr/local/bin/helm
784 rm -r linux-amd64
785 rm helm-v2.15.2.tar.gz
786 fi
787
788 # Checking if tiller has being configured
789 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
790 if [ $? == 1 ] ; then
791 # tiller account for kubernetes
792 kubectl --namespace kube-system create serviceaccount tiller
793 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
794 # HELM initialization
795 helm init --service-account tiller
796
797 # Wait for Tiller to be up and running. If timeout expires, continue installing
798 tiller_timeout=120; counter=0
799 while (( counter < tiller_timeout ))
800 do
801 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
802 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
803 num=$((counter + 2))
804 sleep 2
805 done
806 fi
807 }
808
809 function parse_yaml() {
810 osm_services="nbi lcm ro pol mon light-ui keystone"
811 TAG=$1
812 for osm in $osm_services; do
813 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
814 done
815 }
816
817 function namespace_vol() {
818 osm_services="nbi lcm ro pol mon kafka mongo mysql"
819 for osm in $osm_services; do
820 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
821 done
822 }
823
824 function init_docker_swarm() {
825 if [ "${DEFAULT_MTU}" != "1500" ]; then
826 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
827 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
828 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
829 fi
830 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
831 return 0
832 }
833
834 function create_docker_network() {
835 echo "creating network"
836 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
837 echo "creating network DONE"
838 }
839
840 function deploy_lightweight() {
841
842 echo "Deploying lightweight build"
843 OSM_NBI_PORT=9999
844 OSM_RO_PORT=9090
845 OSM_KEYSTONE_PORT=5000
846 OSM_UI_PORT=80
847 OSM_MON_PORT=8662
848 OSM_PROM_PORT=9090
849 OSM_PROM_CADVISOR_PORT=8080
850 OSM_PROM_HOSTPORT=9091
851 OSM_GRAFANA_PORT=3000
852 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
853 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
854
855 if [ -n "$NO_HOST_PORTS" ]; then
856 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
857 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
858 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
859 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
860 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
861 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
862 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
863 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
864 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
865 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
866 else
867 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
868 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
869 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
870 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
871 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
872 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
873 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
874 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
875 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
876 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
877 fi
878 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
879 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
880 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
881 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
882 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
883 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
884 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
885 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
886 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
887
888 pushd $OSM_DOCKER_WORK_DIR
889 if [ -n "$INSTALL_PLA" ]; then
890 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
891 else
892 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
893 fi
894 popd
895
896 echo "Finished deployment of lightweight build"
897 }
898
899 function deploy_elk() {
900 echo "Pulling docker images for ELK"
901 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
902 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
903 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
904 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
905 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
906 echo "Finished pulling elk docker images"
907 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
908 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
909 remove_stack osm_elk
910 echo "Deploying ELK stack"
911 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
912 echo "Waiting for ELK stack to be up and running"
913 time=0
914 step=5
915 timelength=40
916 elk_is_up=1
917 while [ $time -le $timelength ]; do
918 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
919 elk_is_up=0
920 break
921 fi
922 sleep $step
923 time=$((time+step))
924 done
925 if [ $elk_is_up -eq 0 ]; then
926 echo "ELK is up and running. Trying to create index pattern..."
927 #Create index pattern
928 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
929 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
930 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
931 #Make it the default index
932 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
933 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
934 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
935 else
936 echo "Cannot connect to Kibana to create index pattern."
937 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
938 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
939 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
940 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
941 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
942 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
943 -d"{\"value\":\"filebeat-*\"}"'
944 fi
945 echo "Finished deployment of ELK stack"
946 return 0
947 }
948
949 function install_lightweight() {
950 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
951 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
952 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
953 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
954 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
955
956 track checkingroot
957 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
958 track noroot
959
960 if [ -n "$KUBERNETES" ]; then
961 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
962 1. Install and configure LXD
963 2. Install juju
964 3. Install docker CE
965 4. Disable swap space
966 5. Install and initialize Kubernetes
967 as pre-requirements.
968 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
969
970 else
971 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
972 fi
973 track proceed
974
975 echo "Installing lightweight build of OSM"
976 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
977 trap 'rm -rf "${LWTEMPDIR}"' EXIT
978 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
979 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
980 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
981 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
982 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
983 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
984
985 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
986 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
987 need_packages_lw="snapd"
988 echo -e "Checking required packages: $need_packages_lw"
989 dpkg -l $need_packages_lw &>/dev/null \
990 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
991 || sudo apt-get update \
992 || FATAL "failed to run apt-get update"
993 dpkg -l $need_packages_lw &>/dev/null \
994 || ! echo -e "Installing $need_packages_lw requires root privileges." \
995 || sudo apt-get install -y $need_packages_lw \
996 || FATAL "failed to install $need_packages_lw"
997 install_lxd
998 fi
999
1000 track prereqok
1001
1002 [ -z "$INSTALL_NOJUJU" ] && install_juju
1003 track juju_install
1004
1005 if [ -z "$OSM_VCA_HOST" ]; then
1006 if [ -z "$CONTROLLER_NAME" ]; then
1007 if [ -n "$LXD_CLOUD_FILE" ]; then
1008 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1009 OSM_VCA_CLOUDNAME="lxd-cloud"
1010 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1011 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1012 fi
1013 juju_createcontroller
1014 else
1015 OSM_VCA_CLOUDNAME="lxd-cloud"
1016 if [ -n "$LXD_CLOUD_FILE" ]; then
1017 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1018 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1019 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1020 else
1021 mkdir -p ~/.osm
1022 cat << EOF > ~/.osm/lxd-cloud.yaml
1023 clouds:
1024 lxd-cloud:
1025 type: lxd
1026 auth-types: [certificate]
1027 endpoint: "https://$DEFAULT_IP:8443"
1028 config:
1029 ssl-hostname-verification: false
1030 EOF
1031 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1032 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1033 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1034 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1035 cat << EOF > ~/.osm/lxd-credentials.yaml
1036 credentials:
1037 lxd-cloud:
1038 lxd-cloud:
1039 auth-type: certificate
1040 server-cert: |
1041 $server_cert
1042 client-cert: |
1043 $client_cert
1044 client-key: |
1045 $client_key
1046 EOF
1047 lxc config trust add local: ~/.osm/client.crt
1048 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1049 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1050 fi
1051 fi
1052 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1053 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1054 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1055 fi
1056 track juju_controller
1057
1058 if [ -z "$OSM_VCA_SECRET" ]; then
1059 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1060 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1061 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1062 fi
1063 if [ -z "$OSM_VCA_PUBKEY" ]; then
1064 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1065 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1066 fi
1067 if [ -z "$OSM_VCA_CACERT" ]; then
1068 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1069 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1070 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1071 fi
1072 if [ -z "$OSM_VCA_APIPROXY" ]; then
1073 OSM_VCA_APIPROXY=$DEFAULT_IP
1074 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1075 fi
1076 juju_createproxy
1077 track juju
1078
1079 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1080 OSM_DATABASE_COMMONKEY=$(generate_secret)
1081 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1082 fi
1083
1084 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1085 track docker_ce
1086
1087 #Installs Kubernetes and deploys osm services
1088 if [ -n "$KUBERNETES" ]; then
1089 install_kube
1090 track install_k8s
1091 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1092 kube_config_dir
1093 track init_k8s
1094 else
1095 #install_docker_compose
1096 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1097 track docker_swarm
1098 fi
1099
1100 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1101 track docker_build
1102
1103 generate_docker_env_files
1104
1105 if [ -n "$KUBERNETES" ]; then
1106 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1107 # uninstall OSM MONITORING
1108 uninstall_k8s_monitoring
1109 track uninstall_k8s_monitoring
1110 fi
1111 #remove old namespace
1112 remove_k8s_namespace $OSM_STACK_NAME
1113 deploy_cni_provider
1114 kube_secrets
1115 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1116 namespace_vol
1117 deploy_osm_services
1118 if [ -n "$INSTALL_PLA"]; then
1119 # optional PLA install
1120 deploy_osm_pla_service
1121 fi
1122 track deploy_osm_services_k8s
1123 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1124 # install OSM MONITORING
1125 install_k8s_monitoring
1126 track install_k8s_monitoring
1127 fi
1128 else
1129 # remove old stack
1130 remove_stack $OSM_STACK_NAME
1131 create_docker_network
1132 deploy_lightweight
1133 generate_osmclient_script
1134 track docker_deploy
1135 install_prometheus_nodeexporter
1136 track nodeexporter
1137 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1138 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1139 fi
1140
1141 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1142 track osmclient
1143
1144 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1145 track end
1146 return 0
1147 }
1148
1149 function install_vimemu() {
1150 echo "\nInstalling vim-emu"
1151 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1152 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1153 # install prerequisites (OVS is a must for the emulator to work)
1154 sudo apt-get install openvswitch-switch
1155 # clone vim-emu repository (attention: branch is currently master only)
1156 echo "Cloning vim-emu repository ..."
1157 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1158 # build vim-emu docker
1159 echo "Building vim-emu Docker container..."
1160
1161 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1162 # start vim-emu container as daemon
1163 echo "Starting vim-emu Docker container 'vim-emu' ..."
1164 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1165 # in lightweight mode, the emulator needs to be attached to netOSM
1166 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1167 else
1168 # classic build mode
1169 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1170 fi
1171 echo "Waiting for 'vim-emu' container to start ..."
1172 sleep 5
1173 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1174 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1175 # print vim-emu connection info
1176 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1177 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1178 echo -e "To add the emulated VIM to OSM you should do:"
1179 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1180 }
1181
1182 function install_k8s_monitoring() {
1183 # install OSM monitoring
1184 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1185 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1186 }
1187
1188 function uninstall_k8s_monitoring() {
1189 # uninstall OSM monitoring
1190 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1191 }
1192
1193 function dump_vars(){
1194 echo "DEVELOP=$DEVELOP"
1195 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1196 echo "UNINSTALL=$UNINSTALL"
1197 echo "UPDATE=$UPDATE"
1198 echo "RECONFIGURE=$RECONFIGURE"
1199 echo "TEST_INSTALLER=$TEST_INSTALLER"
1200 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1201 echo "INSTALL_PLA=$INSTALL_PLA"
1202 echo "INSTALL_LXD=$INSTALL_LXD"
1203 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1204 echo "INSTALL_ONLY=$INSTALL_ONLY"
1205 echo "INSTALL_ELK=$INSTALL_ELK"
1206 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1207 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1208 echo "TO_REBUILD=$TO_REBUILD"
1209 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1210 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1211 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1212 echo "RELEASE=$RELEASE"
1213 echo "REPOSITORY=$REPOSITORY"
1214 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1215 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1216 echo "OSM_DEVOPS=$OSM_DEVOPS"
1217 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1218 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1219 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1220 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1221 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1222 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1223 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1224 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1225 echo "DOCKER_USER=$DOCKER_USER"
1226 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1227 echo "PULL_IMAGES=$PULL_IMAGES"
1228 echo "KUBERNETES=$KUBERNETES"
1229 echo "SHOWOPTS=$SHOWOPTS"
1230 echo "Install from specific refspec (-b): $COMMIT_ID"
1231 }
1232
1233 function track(){
1234 ctime=`date +%s`
1235 duration=$((ctime - SESSION_ID))
1236 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1237 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1238 event_name="bin"
1239 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1240 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1241 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1242 event_name="${event_name}_$1"
1243 url="${url}&event=${event_name}&ce_duration=${duration}"
1244 wget -q -O /dev/null $url
1245 }
1246
1247 UNINSTALL=""
1248 DEVELOP=""
1249 UPDATE=""
1250 RECONFIGURE=""
1251 TEST_INSTALLER=""
1252 INSTALL_LXD=""
1253 SHOWOPTS=""
1254 COMMIT_ID=""
1255 ASSUME_YES=""
1256 INSTALL_FROM_SOURCE=""
1257 RELEASE="ReleaseSEVEN"
1258 REPOSITORY="stable"
1259 INSTALL_VIMEMU=""
1260 INSTALL_PLA=""
1261 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1262 LXD_REPOSITORY_PATH=""
1263 INSTALL_LIGHTWEIGHT="y"
1264 INSTALL_ONLY=""
1265 INSTALL_ELK=""
1266 TO_REBUILD=""
1267 INSTALL_NOLXD=""
1268 INSTALL_NODOCKER=""
1269 INSTALL_NOJUJU=""
1270 KUBERNETES=""
1271 INSTALL_K8S_MONITOR=""
1272 INSTALL_NOHOSTCLIENT=""
1273 SESSION_ID=`date +%s`
1274 OSM_DEVOPS=
1275 OSM_VCA_HOST=
1276 OSM_VCA_SECRET=
1277 OSM_VCA_PUBKEY=
1278 OSM_VCA_CLOUDNAME="localhost"
1279 OSM_STACK_NAME=osm
1280 NO_HOST_PORTS=""
1281 DOCKER_NOBUILD=""
1282 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1283 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1284 WORKDIR_SUDO=sudo
1285 OSM_WORK_DIR="/etc/osm"
1286 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1287 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1288 OSM_HOST_VOL="/var/lib/osm"
1289 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1290 OSM_DOCKER_TAG=latest
1291 DOCKER_USER=opensourcemano
1292 PULL_IMAGES="y"
1293 KAFKA_TAG=2.11-1.0.2
1294 PROMETHEUS_TAG=v2.4.3
1295 GRAFANA_TAG=latest
1296 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1297 PROMETHEUS_CADVISOR_TAG=latest
1298 KEYSTONEDB_TAG=10
1299 OSM_DATABASE_COMMONKEY=
1300 ELASTIC_VERSION=6.4.2
1301 ELASTIC_CURATOR_VERSION=5.5.4
1302 POD_NETWORK_CIDR=10.244.0.0/16
1303 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1304 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1305
1306 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1307 case "${o}" in
1308 b)
1309 COMMIT_ID=${OPTARG}
1310 PULL_IMAGES=""
1311 ;;
1312 r)
1313 REPOSITORY="${OPTARG}"
1314 REPO_ARGS+=(-r "$REPOSITORY")
1315 ;;
1316 c)
1317 [ "${OPTARG}" == "swarm" ] && continue
1318 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1319 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1320 usage && exit 1
1321 ;;
1322 k)
1323 REPOSITORY_KEY="${OPTARG}"
1324 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1325 ;;
1326 u)
1327 REPOSITORY_BASE="${OPTARG}"
1328 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1329 ;;
1330 R)
1331 RELEASE="${OPTARG}"
1332 REPO_ARGS+=(-R "$RELEASE")
1333 ;;
1334 D)
1335 OSM_DEVOPS="${OPTARG}"
1336 ;;
1337 o)
1338 INSTALL_ONLY="y"
1339 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1340 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1341 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1342 ;;
1343 m)
1344 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1345 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1346 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1347 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1348 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1349 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1350 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1351 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1352 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1353 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1354 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1355 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1356 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1357 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1358 ;;
1359 H)
1360 OSM_VCA_HOST="${OPTARG}"
1361 ;;
1362 S)
1363 OSM_VCA_SECRET="${OPTARG}"
1364 ;;
1365 s)
1366 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1367 ;;
1368 w)
1369 # when specifying workdir, do not use sudo for access
1370 WORKDIR_SUDO=
1371 OSM_WORK_DIR="${OPTARG}"
1372 ;;
1373 t)
1374 OSM_DOCKER_TAG="${OPTARG}"
1375 ;;
1376 U)
1377 DOCKER_USER="${OPTARG}"
1378 ;;
1379 P)
1380 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1381 ;;
1382 A)
1383 OSM_VCA_APIPROXY="${OPTARG}"
1384 ;;
1385 l)
1386 LXD_CLOUD_FILE="${OPTARG}"
1387 ;;
1388 L)
1389 LXD_CRED_FILE="${OPTARG}"
1390 ;;
1391 K)
1392 CONTROLLER_NAME="${OPTARG}"
1393 ;;
1394 -)
1395 [ "${OPTARG}" == "help" ] && usage && exit 0
1396 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1397 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1398 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1399 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1400 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1401 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1402 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1403 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1404 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1405 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1406 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1407 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1408 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1409 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1410 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1411 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1412 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1413 [ "${OPTARG}" == "pullimages" ] && continue
1414 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1415 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1416 [ "${OPTARG}" == "bundle" ] && continue
1417 [ "${OPTARG}" == "k8s" ] && continue
1418 [ "${OPTARG}" == "lxd" ] && continue
1419 [ "${OPTARG}" == "lxd-cred" ] && continue
1420 [ "${OPTARG}" == "microstack" ] && continue
1421 [ "${OPTARG}" == "ha" ] && continue
1422 [ "${OPTARG}" == "tag" ] && continue
1423 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1424 echo -e "Invalid option: '--$OPTARG'\n" >&2
1425 usage && exit 1
1426 ;;
1427 :)
1428 echo "Option -$OPTARG requires an argument" >&2
1429 usage && exit 1
1430 ;;
1431 \?)
1432 echo -e "Invalid option: '-$OPTARG'\n" >&2
1433 usage && exit 1
1434 ;;
1435 h)
1436 usage && exit 0
1437 ;;
1438 y)
1439 ASSUME_YES="y"
1440 ;;
1441 *)
1442 usage && exit 1
1443 ;;
1444 esac
1445 done
1446
1447 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1448 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1449
1450 if [ -n "$SHOWOPTS" ]; then
1451 dump_vars
1452 exit 0
1453 fi
1454
1455 if [ -n "$CHARMED" ]; then
1456 if [ -n "$UNINSTALL" ]; then
1457 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1458 else
1459 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1460
1461 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1462 echo
1463 echo "1. Get the NBI IP with the following command:"
1464 echo
1465 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1466 echo
1467 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1468 echo
1469 echo "export OSM_HOSTNAME=\$NBI_IP"
1470 echo
1471 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1472 echo
1473 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1474 echo
1475 echo "DONE"
1476 fi
1477
1478 exit 0
1479 fi
1480
1481 # if develop, we force master
1482 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1483
1484 need_packages="git wget curl tar"
1485 echo -e "Checking required packages: $need_packages"
1486 dpkg -l $need_packages &>/dev/null \
1487 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1488 || sudo apt-get update \
1489 || FATAL "failed to run apt-get update"
1490 dpkg -l $need_packages &>/dev/null \
1491 || ! echo -e "Installing $need_packages requires root privileges." \
1492 || sudo apt-get install -y $need_packages \
1493 || FATAL "failed to install $need_packages"
1494 sudo snap install jq
1495 if [ -z "$OSM_DEVOPS" ]; then
1496 if [ -n "$TEST_INSTALLER" ]; then
1497 echo -e "\nUsing local devops repo for OSM installation"
1498 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1499 else
1500 echo -e "\nCreating temporary dir for OSM installation"
1501 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1502 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1503
1504 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1505
1506 if [ -z "$COMMIT_ID" ]; then
1507 echo -e "\nGuessing the current stable release"
1508 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1509 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1510
1511 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1512 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1513 else
1514 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1515 fi
1516 git -C $OSM_DEVOPS checkout $COMMIT_ID
1517 fi
1518 fi
1519
1520 . $OSM_DEVOPS/common/all_funcs
1521
1522 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1523 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1524 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1525 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1526 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1527 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1528
1529 #Installation starts here
1530 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1531 track start
1532
1533 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1534 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1535 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1536 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1537 fi
1538
1539 echo -e "Checking required packages: lxd"
1540 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1541 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1542
1543 # use local devops for containers
1544 export OSM_USE_LOCAL_DEVOPS=true
1545
1546 #Install osmclient
1547
1548 #Install vim-emu (optional)
1549 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1550
1551 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1552 track end
1553 echo -e "\nDONE"
1554
1555