Added Grafana Chart to K8s based OSM. Base for being merged with k8s_monitor
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: install OSM with charms"
64 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
70 echo -e " --tag: Docker image tag"
71
72 }
73
74 # takes a juju/accounts.yaml file and returns the password specific
75 # for a controller. I wrote this using only bash tools to minimize
76 # additions of other packages
77 function parse_juju_password {
78 password_file="${HOME}/.local/share/juju/accounts.yaml"
79 local controller_name=$1
80 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
81 sed -ne "s|^\($s\):|\1|" \
82 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
83 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
84 awk -F$fs -v controller=$controller_name '{
85 indent = length($1)/2;
86 vname[indent] = $2;
87 for (i in vname) {if (i > indent) {delete vname[i]}}
88 if (length($3) > 0) {
89 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
90 if (match(vn,controller) && match($2,"password")) {
91 printf("%s",$3);
92 }
93 }
94 }'
95 }
96
97 function generate_secret() {
98 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
99 }
100
101 function remove_volumes() {
102 if [ -n "$KUBERNETES" ]; then
103 k8_volume=$1
104 echo "Removing ${k8_volume}"
105 $WORKDIR_SUDO rm -rf ${k8_volume}
106 else
107 stack=$1
108 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
109 for volume in $volumes; do
110 sg docker -c "docker volume rm ${stack}_${volume}"
111 done
112 fi
113 }
114
115 function remove_network() {
116 stack=$1
117 sg docker -c "docker network rm net${stack}"
118 }
119
120 function remove_iptables() {
121 stack=$1
122 if [ -z "$OSM_VCA_HOST" ]; then
123 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
124 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
125 fi
126
127 if [ -z "$DEFAULT_IP" ]; then
128 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
129 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
130 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
131 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
132 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
133 fi
134
135 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
136 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
137 sudo netfilter-persistent save
138 fi
139 }
140
141 function remove_stack() {
142 stack=$1
143 if sg docker -c "docker stack ps ${stack}" ; then
144 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
145 COUNTER=0
146 result=1
147 while [ ${COUNTER} -lt 30 ]; do
148 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
149 #echo "Dockers running: $result"
150 if [ "${result}" == "0" ]; then
151 break
152 fi
153 let COUNTER=COUNTER+1
154 sleep 1
155 done
156 if [ "${result}" == "0" ]; then
157 echo "All dockers of the stack ${stack} were removed"
158 else
159 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
160 fi
161 sleep 5
162 fi
163 }
164
165 #removes osm deployments and services
166 function remove_k8s_namespace() {
167 kubectl delete ns $1
168 }
169
170 #removes helm only if there is nothing deployed in helm
171 function remove_helm() {
172 if [ "$(helm ls -q)" == "" ] ; then
173 sudo helm reset --force
174 kubectl delete --namespace kube-system serviceaccount tiller
175 kubectl delete clusterrolebinding tiller-cluster-rule
176 sudo rm /usr/local/bin/helm
177 rm -rf $HOME/.helm
178 fi
179 }
180
181 #Uninstall osmclient
182 function uninstall_osmclient() {
183 sudo apt-get remove --purge -y python-osmclient
184 sudo apt-get remove --purge -y python3-osmclient
185 }
186
187 #Uninstall lightweight OSM: remove dockers
188 function uninstall_lightweight() {
189 if [ -n "$INSTALL_ONLY" ]; then
190 if [ -n "$INSTALL_ELK" ]; then
191 echo -e "\nUninstalling OSM ELK stack"
192 remove_stack osm_elk
193 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
194 fi
195 else
196 echo -e "\nUninstalling OSM"
197 if [ -n "$KUBERNETES" ]; then
198 if [ -n "$INSTALL_K8S_MONITOR" ]; then
199 # uninstall OSM MONITORING
200 uninstall_k8s_monitoring
201 fi
202 remove_k8s_namespace $OSM_STACK_NAME
203 else
204
205 remove_stack $OSM_STACK_NAME
206 remove_stack osm_elk
207 fi
208 echo "Now osm docker images and volumes will be deleted"
209 newgrp docker << EONG
210 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
211 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
219 EONG
220
221 if [ -n "$KUBERNETES" ]; then
222 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
223 remove_volumes $OSM_NAMESPACE_VOL
224 else
225 remove_volumes $OSM_STACK_NAME
226 remove_network $OSM_STACK_NAME
227 fi
228 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
229 echo "Removing $OSM_DOCKER_WORK_DIR"
230 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
231 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
232 fi
233 uninstall_osmclient
234 echo "Some docker images will be kept in case they are used by other docker stacks"
235 echo "To remove them, just run 'docker image prune' in a terminal"
236 return 0
237 }
238
239 #Safe unattended install of iptables-persistent
240 function check_install_iptables_persistent(){
241 echo -e "\nChecking required packages: iptables-persistent"
242 if dpkg -l iptables-persistent &>/dev/null; then
243 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
244 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
245 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
246 sudo apt-get -yq install iptables-persistent
247 fi
248 }
249
250 #Configure NAT rules, based on the current IP addresses of containers
251 function nat(){
252 check_install_iptables_persistent
253
254 echo -e "\nConfiguring NAT rules"
255 echo -e " Required root privileges"
256 sudo $OSM_DEVOPS/installers/nat_osm
257 }
258
259 function FATAL(){
260 echo "FATAL error: Cannot install OSM due to \"$1\""
261 exit 1
262 }
263
264 function install_lxd() {
265 # Apply sysctl production values for optimal performance
266 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
267 sudo sysctl --system
268
269 # Install LXD snap
270 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
271 sudo snap install lxd
272 sudo apt-get install zfsutils-linux -y
273
274 # Configure LXD
275 sudo usermod -a -G lxd `whoami`
276 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
277 sg lxd -c "lxd waitready"
278 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
279 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
280 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
281 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
282 #sudo systemctl stop lxd-bridge
283 #sudo systemctl --system daemon-reload
284 #sudo systemctl enable lxd-bridge
285 #sudo systemctl start lxd-bridge
286 }
287
288 function ask_user(){
289 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
290 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
291 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
292 read -e -p "$1" USER_CONFIRMATION
293 while true ; do
294 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
295 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
296 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
297 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
298 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
299 done
300 }
301
302 function install_osmclient(){
303 CLIENT_RELEASE=${RELEASE#"-R "}
304 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
305 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
306 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
307 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
308 curl $key_location | sudo apt-key add -
309 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
310 sudo apt-get update
311 sudo apt-get install -y python3-pip
312 sudo -H LC_ALL=C python3 -m pip install -U pip
313 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
314 sudo apt-get install -y python3-osm-im python3-osmclient
315 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
316 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
317 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
318 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
319 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
320 echo -e "\nOSM client installed"
321 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
322 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
323 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
324 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
325 else
326 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
327 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
328 echo " export OSM_HOSTNAME=<OSM_host>"
329 fi
330 return 0
331 }
332
333 function install_prometheus_nodeexporter(){
334 if (systemctl -q is-active node_exporter)
335 then
336 echo "Node Exporter is already running."
337 else
338 echo "Node Exporter is not active, installing..."
339 if getent passwd node_exporter > /dev/null 2>&1; then
340 echo "node_exporter user exists"
341 else
342 echo "Creating user node_exporter"
343 sudo useradd --no-create-home --shell /bin/false node_exporter
344 fi
345 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
346 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
347 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
348 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
349 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
350 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
351 sudo systemctl daemon-reload
352 sudo systemctl restart node_exporter
353 sudo systemctl enable node_exporter
354 echo "Node Exporter has been activated in this host."
355 fi
356 return 0
357 }
358
359 function uninstall_prometheus_nodeexporter(){
360 sudo systemctl stop node_exporter
361 sudo systemctl disable node_exporter
362 sudo rm /etc/systemd/system/node_exporter.service
363 sudo systemctl daemon-reload
364 sudo userdel node_exporter
365 sudo rm /usr/local/bin/node_exporter
366 return 0
367 }
368
369 function install_docker_ce() {
370 # installs and configures Docker CE
371 echo "Installing Docker CE ..."
372 sudo apt-get -qq update
373 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
374 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
375 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
376 sudo apt-get -qq update
377 sudo apt-get install -y docker-ce
378 echo "Adding user to group 'docker'"
379 sudo groupadd -f docker
380 sudo usermod -aG docker $USER
381 sleep 2
382 sudo service docker restart
383 echo "... restarted Docker service"
384 sg docker -c "docker version" || FATAL "Docker installation failed"
385 echo "... Docker CE installation done"
386 return 0
387 }
388
389 function install_docker_compose() {
390 # installs and configures docker-compose
391 echo "Installing Docker Compose ..."
392 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
393 sudo chmod +x /usr/local/bin/docker-compose
394 echo "... Docker Compose installation done"
395 }
396
397 function install_juju() {
398 echo "Installing juju"
399 sudo snap install juju --classic
400 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
401 echo "Finished installation of juju"
402 return 0
403 }
404
405 function juju_createcontroller() {
406 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
407 # Not found created, create the controller
408 sudo usermod -a -G lxd ${USER}
409 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
410 fi
411 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
412 }
413
414 function juju_createproxy() {
415 check_install_iptables_persistent
416
417 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
418 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
419 sudo netfilter-persistent save
420 fi
421 }
422
423 function generate_docker_images() {
424 echo "Pulling and generating docker images"
425 _build_from=$COMMIT_ID
426 [ -z "$_build_from" ] && _build_from="master"
427
428 echo "OSM Docker images generated from $_build_from"
429
430 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
431 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
432 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
433 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
434
435 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
436 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
437 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
438 fi
439
440 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
441 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
442 fi
443
444 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
445 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
446 fi
447
448 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
449 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
450 fi
451
452 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
453 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
454 fi
455
456 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
457 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
458 fi
459
460 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
461 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
462 fi
463
464 if [ -n "$PULL_IMAGES" ]; then
465 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
466 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
467 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
468 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
469 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
470 fi
471
472 if [ -n "$PULL_IMAGES" ]; then
473 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
474 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
475 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
476 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
477 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
478 fi
479
480 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
481 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
482 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
483 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
484 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
485 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
486 fi
487
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
490 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
491 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
492 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
493 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
494 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
495 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
496 fi
497
498 if [ -n "$PULL_IMAGES" ]; then
499 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
500 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
501 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
502 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
503 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
504 fi
505
506 if [ -n "$PULL_IMAGES" ]; then
507 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
508 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
509 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
510 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
511 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
512 fi
513
514 if [ -n "$PULL_IMAGES" ]; then
515 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
516 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
517 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
518 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
519 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
520 fi
521
522 if [ -n "$PULL_IMAGES" ]; then
523 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
524 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
525 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
526 fi
527
528 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
529 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
530 fi
531
532 echo "Finished generation of docker images"
533 }
534
535 function cmp_overwrite() {
536 file1="$1"
537 file2="$2"
538 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
539 if [ -f "${file2}" ]; then
540 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
541 else
542 cp -b ${file1} ${file2}
543 fi
544 fi
545 }
546
547 function generate_docker_env_files() {
548 echo "Doing a backup of existing env files"
549 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
550 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
551 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
552 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
553 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
554 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
555 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
556 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
558
559 echo "Generating docker env files"
560 if [ -n "$KUBERNETES" ]; then
561 #Kubernetes resources
562 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
563 else
564 # Docker-compose
565 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
566 if [ -n "$INSTALL_PLA" ]; then
567 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
568 fi
569
570 # Prometheus files
571 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
572 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
573
574 # Grafana files
575 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
576 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
577 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
578 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
579 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
580
581 # Prometheus Exporters files
582 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
583 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
584 fi
585
586 # LCM
587 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
588 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
589 fi
590
591 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
592 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
593 else
594 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
595 fi
596
597 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
598 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
599 else
600 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
601 fi
602
603 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
604 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
605 else
606 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
607 fi
608
609 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
610 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
611 else
612 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
613 fi
614
615 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
616 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
617 else
618 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
619 fi
620
621 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
622 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
623 fi
624
625 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
626 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
627 fi
628
629 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
630 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
631 else
632 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
633 fi
634
635 # RO
636 MYSQL_ROOT_PASSWORD=$(generate_secret)
637 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
638 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
639 fi
640 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
641 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
642 fi
643
644 # Keystone
645 KEYSTONE_DB_PASSWORD=$(generate_secret)
646 SERVICE_PASSWORD=$(generate_secret)
647 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
648 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
649 fi
650 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
651 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
652 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
653 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
654 fi
655
656 # NBI
657 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
658 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
659 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
660 fi
661
662 # MON
663 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
664 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
665 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
666 fi
667
668 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
669 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
670 else
671 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
672 fi
673
674 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
675 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
676 else
677 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
678 fi
679
680 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
681 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
682 else
683 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
684 fi
685
686 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
687 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
688 else
689 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
690 fi
691
692
693 # POL
694 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
695 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
696 fi
697
698 # LW-UI
699 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
700 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
701 fi
702
703 echo "Finished generation of docker env files"
704 }
705
706 function generate_osmclient_script () {
707 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
708 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
709 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
710 }
711
712 #installs kubernetes packages
713 function install_kube() {
714 sudo apt-get update && sudo apt-get install -y apt-transport-https
715 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
716 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
717 sudo apt-get update
718 echo "Installing Kubernetes Packages ..."
719 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
720 }
721
722 #initializes kubernetes control plane
723 function init_kubeadm() {
724 sudo swapoff -a
725 sudo kubeadm init --config $1
726 sleep 5
727 }
728
729 function kube_config_dir() {
730 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
731 mkdir -p $HOME/.kube
732 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
733 sudo chown $(id -u):$(id -g) $HOME/.kube/config
734 }
735
736 #deploys flannel as daemonsets
737 function deploy_cni_provider() {
738 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
739 trap 'rm -rf "${CNI_DIR}"' EXIT
740 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
741 kubectl apply -f $CNI_DIR
742 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
743 }
744
745 #creates secrets from env files which will be used by containers
746 function kube_secrets(){
747 kubectl create ns $OSM_STACK_NAME
748 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
749 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
750 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
751 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
752 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
753 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
754 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
755 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
756 }
757
758 #deploys osm pods and services
759 function deploy_osm_services() {
760 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
761 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
762 sleep 5
763 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
764 }
765
766 function deploy_osm_pla_service() {
767 # corresponding to parse_yaml
768 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
769 # corresponding to namespace_vol
770 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
771 # corresponding to deploy_osm_services
772 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
773 }
774
775 #Install helm and tiller
776 function install_helm() {
777 helm > /dev/null 2>&1
778 if [ $? != 0 ] ; then
779 # Helm is not installed. Install helm
780 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
781 tar -zxvf helm-v2.15.2.tar.gz
782 sudo mv linux-amd64/helm /usr/local/bin/helm
783 rm -r linux-amd64
784 rm helm-v2.15.2.tar.gz
785 fi
786
787 # Checking if tiller has being configured
788 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
789 if [ $? == 1 ] ; then
790 # tiller account for kubernetes
791 kubectl --namespace kube-system create serviceaccount tiller
792 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
793 # HELM initialization
794 helm init --service-account tiller
795
796 # Wait for Tiller to be up and running. If timeout expires, continue installing
797 tiller_timeout=120; counter=0
798 while (( counter < tiller_timeout ))
799 do
800 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
801 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
802 num=$((counter + 2))
803 sleep 2
804 done
805 fi
806 }
807
808 function parse_yaml() {
809 osm_services="nbi lcm ro pol mon light-ui keystone"
810 TAG=$1
811 for osm in $osm_services; do
812 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
813 done
814 }
815
816 function namespace_vol() {
817 osm_services="nbi lcm ro pol mon kafka mongo mysql"
818 for osm in $osm_services; do
819 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
820 done
821 }
822
823 function init_docker_swarm() {
824 if [ "${DEFAULT_MTU}" != "1500" ]; then
825 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
826 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
827 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
828 fi
829 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
830 return 0
831 }
832
833 function create_docker_network() {
834 echo "creating network"
835 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
836 echo "creating network DONE"
837 }
838
839 function deploy_lightweight() {
840
841 echo "Deploying lightweight build"
842 OSM_NBI_PORT=9999
843 OSM_RO_PORT=9090
844 OSM_KEYSTONE_PORT=5000
845 OSM_UI_PORT=80
846 OSM_MON_PORT=8662
847 OSM_PROM_PORT=9090
848 OSM_PROM_CADVISOR_PORT=8080
849 OSM_PROM_HOSTPORT=9091
850 OSM_GRAFANA_PORT=3000
851 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
852 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
853
854 if [ -n "$NO_HOST_PORTS" ]; then
855 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
856 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
857 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
858 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
859 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
860 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
861 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
862 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
863 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
864 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
865 else
866 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
867 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
868 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
869 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
870 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
871 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
872 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
873 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
874 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
875 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
876 fi
877 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
878 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
879 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
880 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
881 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
882 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
883 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
884 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
885 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
886
887 pushd $OSM_DOCKER_WORK_DIR
888 if [ -n "$INSTALL_PLA" ]; then
889 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
890 else
891 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
892 fi
893 popd
894
895 echo "Finished deployment of lightweight build"
896 }
897
898 function deploy_elk() {
899 echo "Pulling docker images for ELK"
900 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
901 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
902 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
903 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
904 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
905 echo "Finished pulling elk docker images"
906 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
907 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
908 remove_stack osm_elk
909 echo "Deploying ELK stack"
910 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
911 echo "Waiting for ELK stack to be up and running"
912 time=0
913 step=5
914 timelength=40
915 elk_is_up=1
916 while [ $time -le $timelength ]; do
917 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
918 elk_is_up=0
919 break
920 fi
921 sleep $step
922 time=$((time+step))
923 done
924 if [ $elk_is_up -eq 0 ]; then
925 echo "ELK is up and running. Trying to create index pattern..."
926 #Create index pattern
927 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
928 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
929 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
930 #Make it the default index
931 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
932 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
933 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
934 else
935 echo "Cannot connect to Kibana to create index pattern."
936 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
937 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
938 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
939 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
940 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
941 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
942 -d"{\"value\":\"filebeat-*\"}"'
943 fi
944 echo "Finished deployment of ELK stack"
945 return 0
946 }
947
948 function install_lightweight() {
949 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
950 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
951 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
952 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
953 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
954
955 track checkingroot
956 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
957 track noroot
958
959 if [ -n "$KUBERNETES" ]; then
960 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
961 1. Install and configure LXD
962 2. Install juju
963 3. Install docker CE
964 4. Disable swap space
965 5. Install and initialize Kubernetes
966 as pre-requirements.
967 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
968
969 else
970 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
971 fi
972 track proceed
973
974 echo "Installing lightweight build of OSM"
975 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
976 trap 'rm -rf "${LWTEMPDIR}"' EXIT
977 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
978 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
979 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
980 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
981 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
982 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
983
984 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
985 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
986 need_packages_lw="snapd"
987 echo -e "Checking required packages: $need_packages_lw"
988 dpkg -l $need_packages_lw &>/dev/null \
989 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
990 || sudo apt-get update \
991 || FATAL "failed to run apt-get update"
992 dpkg -l $need_packages_lw &>/dev/null \
993 || ! echo -e "Installing $need_packages_lw requires root privileges." \
994 || sudo apt-get install -y $need_packages_lw \
995 || FATAL "failed to install $need_packages_lw"
996 install_lxd
997 fi
998
999 track prereqok
1000
1001 [ -z "$INSTALL_NOJUJU" ] && install_juju
1002 track juju_install
1003
1004 if [ -z "$OSM_VCA_HOST" ]; then
1005 if [ -z "$CONTROLLER_NAME" ]; then
1006 if [ -n "$LXD_CLOUD_FILE" ]; then
1007 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1008 OSM_VCA_CLOUDNAME="lxd-cloud"
1009 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1010 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1011 fi
1012 juju_createcontroller
1013 else
1014 OSM_VCA_CLOUDNAME="lxd-cloud"
1015 if [ -n "$LXD_CLOUD_FILE" ]; then
1016 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1017 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1018 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1019 else
1020 mkdir -p ~/.osm
1021 cat << EOF > ~/.osm/lxd-cloud.yaml
1022 clouds:
1023 lxd-cloud:
1024 type: lxd
1025 auth-types: [certificate]
1026 endpoint: "https://$DEFAULT_IP:8443"
1027 config:
1028 ssl-hostname-verification: false
1029 EOF
1030 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1031 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1032 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1033 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1034 cat << EOF > ~/.osm/lxd-credentials.yaml
1035 credentials:
1036 lxd-cloud:
1037 lxd-cloud:
1038 auth-type: certificate
1039 server-cert: |
1040 $server_cert
1041 client-cert: |
1042 $client_cert
1043 client-key: |
1044 $client_key
1045 EOF
1046 lxc config trust add local: ~/.osm/client.crt
1047 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1048 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1049 fi
1050 fi
1051 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1052 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1053 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1054 fi
1055 track juju_controller
1056
1057 if [ -z "$OSM_VCA_SECRET" ]; then
1058 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1059 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1060 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1061 fi
1062 if [ -z "$OSM_VCA_PUBKEY" ]; then
1063 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1064 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1065 fi
1066 if [ -z "$OSM_VCA_CACERT" ]; then
1067 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1068 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1069 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1070 fi
1071 if [ -z "$OSM_VCA_APIPROXY" ]; then
1072 OSM_VCA_APIPROXY=$DEFAULT_IP
1073 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1074 fi
1075 juju_createproxy
1076 track juju
1077
1078 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1079 OSM_DATABASE_COMMONKEY=$(generate_secret)
1080 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1081 fi
1082
1083 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1084 track docker_ce
1085
1086 #Installs Kubernetes and deploys osm services
1087 if [ -n "$KUBERNETES" ]; then
1088 install_kube
1089 track install_k8s
1090 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1091 kube_config_dir
1092 track init_k8s
1093 else
1094 #install_docker_compose
1095 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1096 track docker_swarm
1097 fi
1098
1099 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1100 track docker_build
1101
1102 generate_docker_env_files
1103
1104 if [ -n "$KUBERNETES" ]; then
1105 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1106 # uninstall OSM MONITORING
1107 uninstall_k8s_monitoring
1108 track uninstall_k8s_monitoring
1109 fi
1110 #remove old namespace
1111 remove_k8s_namespace $OSM_STACK_NAME
1112 deploy_cni_provider
1113 kube_secrets
1114 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1115 namespace_vol
1116 deploy_osm_services
1117 if [ -n "$INSTALL_PLA"]; then
1118 # optional PLA install
1119 deploy_osm_pla_service
1120 fi
1121 track deploy_osm_services_k8s
1122 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1123 # install OSM MONITORING
1124 install_k8s_monitoring
1125 track install_k8s_monitoring
1126 fi
1127 else
1128 # remove old stack
1129 remove_stack $OSM_STACK_NAME
1130 create_docker_network
1131 deploy_lightweight
1132 generate_osmclient_script
1133 track docker_deploy
1134 install_prometheus_nodeexporter
1135 track nodeexporter
1136 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1137 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1138 fi
1139
1140 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1141 track osmclient
1142
1143 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1144 track end
1145 return 0
1146 }
1147
1148 function install_vimemu() {
1149 echo "\nInstalling vim-emu"
1150 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1151 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1152 # install prerequisites (OVS is a must for the emulator to work)
1153 sudo apt-get install openvswitch-switch
1154 # clone vim-emu repository (attention: branch is currently master only)
1155 echo "Cloning vim-emu repository ..."
1156 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1157 # build vim-emu docker
1158 echo "Building vim-emu Docker container..."
1159
1160 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1161 # start vim-emu container as daemon
1162 echo "Starting vim-emu Docker container 'vim-emu' ..."
1163 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1164 # in lightweight mode, the emulator needs to be attached to netOSM
1165 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1166 else
1167 # classic build mode
1168 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1169 fi
1170 echo "Waiting for 'vim-emu' container to start ..."
1171 sleep 5
1172 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1173 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1174 # print vim-emu connection info
1175 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1176 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1177 echo -e "To add the emulated VIM to OSM you should do:"
1178 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1179 }
1180
1181 function install_k8s_monitoring() {
1182 # install OSM monitoring
1183 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1184 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1185 }
1186
1187 function uninstall_k8s_monitoring() {
1188 # uninstall OSM monitoring
1189 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1190 }
1191
1192 function dump_vars(){
1193 echo "DEVELOP=$DEVELOP"
1194 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1195 echo "UNINSTALL=$UNINSTALL"
1196 echo "UPDATE=$UPDATE"
1197 echo "RECONFIGURE=$RECONFIGURE"
1198 echo "TEST_INSTALLER=$TEST_INSTALLER"
1199 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1200 echo "INSTALL_PLA=$INSTALL_PLA"
1201 echo "INSTALL_LXD=$INSTALL_LXD"
1202 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1203 echo "INSTALL_ONLY=$INSTALL_ONLY"
1204 echo "INSTALL_ELK=$INSTALL_ELK"
1205 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1206 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1207 echo "TO_REBUILD=$TO_REBUILD"
1208 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1209 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1210 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1211 echo "RELEASE=$RELEASE"
1212 echo "REPOSITORY=$REPOSITORY"
1213 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1214 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1215 echo "OSM_DEVOPS=$OSM_DEVOPS"
1216 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1217 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1218 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1219 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1220 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1221 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1222 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1223 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1224 echo "DOCKER_USER=$DOCKER_USER"
1225 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1226 echo "PULL_IMAGES=$PULL_IMAGES"
1227 echo "KUBERNETES=$KUBERNETES"
1228 echo "SHOWOPTS=$SHOWOPTS"
1229 echo "Install from specific refspec (-b): $COMMIT_ID"
1230 }
1231
1232 function track(){
1233 ctime=`date +%s`
1234 duration=$((ctime - SESSION_ID))
1235 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1236 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1237 event_name="bin"
1238 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1239 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1240 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1241 event_name="${event_name}_$1"
1242 url="${url}&event=${event_name}&ce_duration=${duration}"
1243 wget -q -O /dev/null $url
1244 }
1245
1246 UNINSTALL=""
1247 DEVELOP=""
1248 UPDATE=""
1249 RECONFIGURE=""
1250 TEST_INSTALLER=""
1251 INSTALL_LXD=""
1252 SHOWOPTS=""
1253 COMMIT_ID=""
1254 ASSUME_YES=""
1255 INSTALL_FROM_SOURCE=""
1256 RELEASE="ReleaseSEVEN"
1257 REPOSITORY="stable"
1258 INSTALL_VIMEMU=""
1259 INSTALL_PLA=""
1260 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1261 LXD_REPOSITORY_PATH=""
1262 INSTALL_LIGHTWEIGHT="y"
1263 INSTALL_ONLY=""
1264 INSTALL_ELK=""
1265 TO_REBUILD=""
1266 INSTALL_NOLXD=""
1267 INSTALL_NODOCKER=""
1268 INSTALL_NOJUJU=""
1269 KUBERNETES=""
1270 INSTALL_K8S_MONITOR=""
1271 INSTALL_NOHOSTCLIENT=""
1272 SESSION_ID=`date +%s`
1273 OSM_DEVOPS=
1274 OSM_VCA_HOST=
1275 OSM_VCA_SECRET=
1276 OSM_VCA_PUBKEY=
1277 OSM_VCA_CLOUDNAME="localhost"
1278 OSM_STACK_NAME=osm
1279 NO_HOST_PORTS=""
1280 DOCKER_NOBUILD=""
1281 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1282 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1283 WORKDIR_SUDO=sudo
1284 OSM_WORK_DIR="/etc/osm"
1285 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1286 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1287 OSM_HOST_VOL="/var/lib/osm"
1288 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1289 OSM_DOCKER_TAG=latest
1290 DOCKER_USER=opensourcemano
1291 PULL_IMAGES="y"
1292 KAFKA_TAG=2.11-1.0.2
1293 PROMETHEUS_TAG=v2.4.3
1294 GRAFANA_TAG=latest
1295 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1296 PROMETHEUS_CADVISOR_TAG=latest
1297 KEYSTONEDB_TAG=10
1298 OSM_DATABASE_COMMONKEY=
1299 ELASTIC_VERSION=6.4.2
1300 ELASTIC_CURATOR_VERSION=5.5.4
1301 POD_NETWORK_CIDR=10.244.0.0/16
1302 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1303 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1304
1305 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1306 case "${o}" in
1307 b)
1308 COMMIT_ID=${OPTARG}
1309 PULL_IMAGES=""
1310 ;;
1311 r)
1312 REPOSITORY="${OPTARG}"
1313 REPO_ARGS+=(-r "$REPOSITORY")
1314 ;;
1315 c)
1316 [ "${OPTARG}" == "swarm" ] && continue
1317 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1318 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1319 usage && exit 1
1320 ;;
1321 k)
1322 REPOSITORY_KEY="${OPTARG}"
1323 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1324 ;;
1325 u)
1326 REPOSITORY_BASE="${OPTARG}"
1327 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1328 ;;
1329 R)
1330 RELEASE="${OPTARG}"
1331 REPO_ARGS+=(-R "$RELEASE")
1332 ;;
1333 D)
1334 OSM_DEVOPS="${OPTARG}"
1335 ;;
1336 o)
1337 INSTALL_ONLY="y"
1338 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1339 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1340 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1341 ;;
1342 m)
1343 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1344 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1345 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1346 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1347 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1348 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1349 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1350 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1351 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1352 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1353 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1354 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1355 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1356 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1357 ;;
1358 H)
1359 OSM_VCA_HOST="${OPTARG}"
1360 ;;
1361 S)
1362 OSM_VCA_SECRET="${OPTARG}"
1363 ;;
1364 s)
1365 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1366 ;;
1367 w)
1368 # when specifying workdir, do not use sudo for access
1369 WORKDIR_SUDO=
1370 OSM_WORK_DIR="${OPTARG}"
1371 ;;
1372 t)
1373 OSM_DOCKER_TAG="${OPTARG}"
1374 ;;
1375 U)
1376 DOCKER_USER="${OPTARG}"
1377 ;;
1378 P)
1379 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1380 ;;
1381 A)
1382 OSM_VCA_APIPROXY="${OPTARG}"
1383 ;;
1384 l)
1385 LXD_CLOUD_FILE="${OPTARG}"
1386 ;;
1387 L)
1388 LXD_CRED_FILE="${OPTARG}"
1389 ;;
1390 K)
1391 CONTROLLER_NAME="${OPTARG}"
1392 ;;
1393 -)
1394 [ "${OPTARG}" == "help" ] && usage && exit 0
1395 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1396 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1397 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1398 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1399 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1400 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1401 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1402 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1403 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1404 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1405 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1406 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1407 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1408 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1409 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1410 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1411 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1412 [ "${OPTARG}" == "pullimages" ] && continue
1413 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1414 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1415 [ "${OPTARG}" == "bundle" ] && continue
1416 [ "${OPTARG}" == "kubeconfig" ] && continue
1417 [ "${OPTARG}" == "lxdendpoint" ] && continue
1418 [ "${OPTARG}" == "lxdcert" ] && continue
1419 [ "${OPTARG}" == "microstack" ] && continue
1420 [ "${OPTARG}" == "tag" ] && continue
1421 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1422 echo -e "Invalid option: '--$OPTARG'\n" >&2
1423 usage && exit 1
1424 ;;
1425 :)
1426 echo "Option -$OPTARG requires an argument" >&2
1427 usage && exit 1
1428 ;;
1429 \?)
1430 echo -e "Invalid option: '-$OPTARG'\n" >&2
1431 usage && exit 1
1432 ;;
1433 h)
1434 usage && exit 0
1435 ;;
1436 y)
1437 ASSUME_YES="y"
1438 ;;
1439 *)
1440 usage && exit 1
1441 ;;
1442 esac
1443 done
1444
1445 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1446 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1447
1448 if [ -n "$SHOWOPTS" ]; then
1449 dump_vars
1450 exit 0
1451 fi
1452
1453 if [ -n "$CHARMED" ]; then
1454 if [ -n "$UNINSTALL" ]; then
1455 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1456 else
1457 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1458 fi
1459
1460 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1461 echo
1462 echo "1. Get the NBI IP with the following command:"
1463 echo
1464 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1465 echo
1466 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1467 echo
1468 echo "export OSM_HOSTNAME=<NBI-IP>"
1469 echo
1470 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1471 echo
1472 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1473 echo
1474 echo "DONE"
1475
1476 exit 0
1477 fi
1478
1479 # if develop, we force master
1480 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1481
1482 need_packages="git wget curl tar"
1483 echo -e "Checking required packages: $need_packages"
1484 dpkg -l $need_packages &>/dev/null \
1485 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1486 || sudo apt-get update \
1487 || FATAL "failed to run apt-get update"
1488 dpkg -l $need_packages &>/dev/null \
1489 || ! echo -e "Installing $need_packages requires root privileges." \
1490 || sudo apt-get install -y $need_packages \
1491 || FATAL "failed to install $need_packages"
1492 sudo snap install jq
1493 if [ -z "$OSM_DEVOPS" ]; then
1494 if [ -n "$TEST_INSTALLER" ]; then
1495 echo -e "\nUsing local devops repo for OSM installation"
1496 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1497 else
1498 echo -e "\nCreating temporary dir for OSM installation"
1499 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1500 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1501
1502 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1503
1504 if [ -z "$COMMIT_ID" ]; then
1505 echo -e "\nGuessing the current stable release"
1506 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1507 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1508
1509 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1510 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1511 else
1512 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1513 fi
1514 git -C $OSM_DEVOPS checkout $COMMIT_ID
1515 fi
1516 fi
1517
1518 . $OSM_DEVOPS/common/all_funcs
1519
1520 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1521 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1522 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1523 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1524 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1525 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1526
1527 #Installation starts here
1528 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1529 track start
1530
1531 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1532 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1533 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1534 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1535 fi
1536
1537 echo -e "Checking required packages: lxd"
1538 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1539 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1540
1541 # use local devops for containers
1542 export OSM_USE_LOCAL_DEVOPS=true
1543
1544 #Install osmclient
1545
1546 #Install vim-emu (optional)
1547 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1548
1549 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1550 track end
1551 echo -e "\nDONE"
1552
1553