Merge "Feature 8839: Modified Installation script to install OSM with NGUI. Default...
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
42 echo -e " -D <devops path> use local devops installation path"
43 echo -e " -w <work dir> Location to store runtime installation"
44 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
45 echo -e " -l: LXD cloud yaml file"
46 echo -e " -L: LXD credentials yaml file"
47 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
48 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
49 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
50 echo -e " --nojuju: do not juju, assumes already installed"
51 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
52 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
53 echo -e " --nohostclient: do not install the osmclient"
54 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
55 echo -e " --source: install OSM from source code using the latest stable tag"
56 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
57 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
58 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
59 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
60 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
61 echo -e " --showopts: print chosen options and exit (only for debugging)"
62 echo -e " -y: do not prompt for confirmation, assumes yes"
63 echo -e " -h / --help: print this help"
64 echo -e " --charmed: install OSM with charms"
65 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
66 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
67 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
68 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
69 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
70 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
71 echo -e " --tag: Docker image tag"
72
73 }
74
75 # takes a juju/accounts.yaml file and returns the password specific
76 # for a controller. I wrote this using only bash tools to minimize
77 # additions of other packages
78 function parse_juju_password {
79 password_file="${HOME}/.local/share/juju/accounts.yaml"
80 local controller_name=$1
81 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
82 sed -ne "s|^\($s\):|\1|" \
83 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
84 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
85 awk -F$fs -v controller=$controller_name '{
86 indent = length($1)/2;
87 vname[indent] = $2;
88 for (i in vname) {if (i > indent) {delete vname[i]}}
89 if (length($3) > 0) {
90 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
91 if (match(vn,controller) && match($2,"password")) {
92 printf("%s",$3);
93 }
94 }
95 }'
96 }
97
98 function generate_secret() {
99 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
100 }
101
102 function remove_volumes() {
103 if [ -n "$KUBERNETES" ]; then
104 k8_volume=$1
105 echo "Removing ${k8_volume}"
106 $WORKDIR_SUDO rm -rf ${k8_volume}
107 else
108 stack=$1
109 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
110 for volume in $volumes; do
111 sg docker -c "docker volume rm ${stack}_${volume}"
112 done
113 fi
114 }
115
116 function remove_network() {
117 stack=$1
118 sg docker -c "docker network rm net${stack}"
119 }
120
121 function remove_iptables() {
122 stack=$1
123 if [ -z "$OSM_VCA_HOST" ]; then
124 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
125 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
126 fi
127
128 if [ -z "$DEFAULT_IP" ]; then
129 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
130 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
131 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
132 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
133 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
134 fi
135
136 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
137 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
138 sudo netfilter-persistent save
139 fi
140 }
141
142 function remove_stack() {
143 stack=$1
144 if sg docker -c "docker stack ps ${stack}" ; then
145 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
146 COUNTER=0
147 result=1
148 while [ ${COUNTER} -lt 30 ]; do
149 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
150 #echo "Dockers running: $result"
151 if [ "${result}" == "0" ]; then
152 break
153 fi
154 let COUNTER=COUNTER+1
155 sleep 1
156 done
157 if [ "${result}" == "0" ]; then
158 echo "All dockers of the stack ${stack} were removed"
159 else
160 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
161 fi
162 sleep 5
163 fi
164 }
165
166 #removes osm deployments and services
167 function remove_k8s_namespace() {
168 kubectl delete ns $1
169 }
170
171 #Uninstall osmclient
172 function uninstall_osmclient() {
173 sudo apt-get remove --purge -y python-osmclient
174 sudo apt-get remove --purge -y python3-osmclient
175 }
176
177 #Uninstall lightweight OSM: remove dockers
178 function uninstall_lightweight() {
179 if [ -n "$INSTALL_ONLY" ]; then
180 if [ -n "$INSTALL_ELK" ]; then
181 echo -e "\nUninstalling OSM ELK stack"
182 remove_stack osm_elk
183 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
184 fi
185 else
186 echo -e "\nUninstalling OSM"
187 if [ -n "$KUBERNETES" ]; then
188 if [ -n "$INSTALL_K8S_MONITOR" ]; then
189 # uninstall OSM MONITORING
190 uninstall_k8s_monitoring
191 fi
192 remove_k8s_namespace $OSM_STACK_NAME
193 else
194
195 remove_stack $OSM_STACK_NAME
196 remove_stack osm_elk
197 fi
198 echo "Now osm docker images and volumes will be deleted"
199 newgrp docker << EONG
200 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
201 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
208 EONG
209 if [ -n "$NGUI" ]; then
210 newgrp docker << EONG
211 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
212 EONG
213 else
214 newgrp docker << EONG
215 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
216 EONG
217 fi
218
219 if [ -n "$KUBERNETES" ]; then
220 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
221 remove_volumes $OSM_NAMESPACE_VOL
222 else
223 remove_volumes $OSM_STACK_NAME
224 remove_network $OSM_STACK_NAME
225 fi
226 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
227 echo "Removing $OSM_DOCKER_WORK_DIR"
228 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
229 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
230 fi
231 uninstall_osmclient
232 echo "Some docker images will be kept in case they are used by other docker stacks"
233 echo "To remove them, just run 'docker image prune' in a terminal"
234 return 0
235 }
236
237 #Safe unattended install of iptables-persistent
238 function check_install_iptables_persistent(){
239 echo -e "\nChecking required packages: iptables-persistent"
240 if dpkg -l iptables-persistent &>/dev/null; then
241 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
242 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
243 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
244 sudo apt-get -yq install iptables-persistent
245 fi
246 }
247
248 #Configure NAT rules, based on the current IP addresses of containers
249 function nat(){
250 check_install_iptables_persistent
251
252 echo -e "\nConfiguring NAT rules"
253 echo -e " Required root privileges"
254 sudo $OSM_DEVOPS/installers/nat_osm
255 }
256
257 function FATAL(){
258 echo "FATAL error: Cannot install OSM due to \"$1\""
259 exit 1
260 }
261
262 function install_lxd() {
263 # Apply sysctl production values for optimal performance
264 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
265 sudo sysctl --system
266
267 # Install LXD snap
268 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
269 sudo snap install lxd
270 sudo apt-get install zfsutils-linux -y
271
272 # Configure LXD
273 sudo usermod -a -G lxd `whoami`
274 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
275 sg lxd -c "lxd waitready"
276 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
277 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
278 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
279 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
280 #sudo systemctl stop lxd-bridge
281 #sudo systemctl --system daemon-reload
282 #sudo systemctl enable lxd-bridge
283 #sudo systemctl start lxd-bridge
284 }
285
286 function ask_user(){
287 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
288 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
289 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
290 read -e -p "$1" USER_CONFIRMATION
291 while true ; do
292 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
293 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
294 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
295 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
296 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
297 done
298 }
299
300 function install_osmclient(){
301 CLIENT_RELEASE=${RELEASE#"-R "}
302 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
303 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
304 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
305 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
306 curl $key_location | sudo apt-key add -
307 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
308 sudo apt-get update
309 sudo apt-get install -y python3-pip
310 sudo -H LC_ALL=C python3 -m pip install -U pip
311 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
312 sudo apt-get install -y python3-osm-im python3-osmclient
313 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
314 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
315 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
316 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
317 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
318 echo -e "\nOSM client installed"
319 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
320 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
321 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
322 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
323 else
324 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
325 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
326 echo " export OSM_HOSTNAME=<OSM_host>"
327 fi
328 return 0
329 }
330
331 function install_prometheus_nodeexporter(){
332 if (systemctl -q is-active node_exporter)
333 then
334 echo "Node Exporter is already running."
335 else
336 echo "Node Exporter is not active, installing..."
337 if getent passwd node_exporter > /dev/null 2>&1; then
338 echo "node_exporter user exists"
339 else
340 echo "Creating user node_exporter"
341 sudo useradd --no-create-home --shell /bin/false node_exporter
342 fi
343 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
344 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
345 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
346 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
347 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
348 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
349 sudo systemctl daemon-reload
350 sudo systemctl restart node_exporter
351 sudo systemctl enable node_exporter
352 echo "Node Exporter has been activated in this host."
353 fi
354 return 0
355 }
356
357 function uninstall_prometheus_nodeexporter(){
358 sudo systemctl stop node_exporter
359 sudo systemctl disable node_exporter
360 sudo rm /etc/systemd/system/node_exporter.service
361 sudo systemctl daemon-reload
362 sudo userdel node_exporter
363 sudo rm /usr/local/bin/node_exporter
364 return 0
365 }
366
367 function install_docker_ce() {
368 # installs and configures Docker CE
369 echo "Installing Docker CE ..."
370 sudo apt-get -qq update
371 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
372 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
373 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
374 sudo apt-get -qq update
375 sudo apt-get install -y docker-ce
376 echo "Adding user to group 'docker'"
377 sudo groupadd -f docker
378 sudo usermod -aG docker $USER
379 sleep 2
380 sudo service docker restart
381 echo "... restarted Docker service"
382 sg docker -c "docker version" || FATAL "Docker installation failed"
383 echo "... Docker CE installation done"
384 return 0
385 }
386
387 function install_docker_compose() {
388 # installs and configures docker-compose
389 echo "Installing Docker Compose ..."
390 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
391 sudo chmod +x /usr/local/bin/docker-compose
392 echo "... Docker Compose installation done"
393 }
394
395 function install_juju() {
396 echo "Installing juju"
397 sudo snap install juju --classic
398 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
399 echo "Finished installation of juju"
400 return 0
401 }
402
403 function juju_createcontroller() {
404 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
405 # Not found created, create the controller
406 sudo usermod -a -G lxd ${USER}
407 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
408 fi
409 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
410 }
411
412 function juju_createproxy() {
413 check_install_iptables_persistent
414
415 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
416 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
417 sudo netfilter-persistent save
418 fi
419 }
420
421 function generate_docker_images() {
422 echo "Pulling and generating docker images"
423 _build_from=$COMMIT_ID
424 [ -z "$_build_from" ] && _build_from="master"
425
426 echo "OSM Docker images generated from $_build_from"
427
428 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
429 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
430 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
431 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
432
433 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
434 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
435 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
436 fi
437
438 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
439 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
440 fi
441
442 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
443 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
444 fi
445
446 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
447 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
448 fi
449
450 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
451 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
452 fi
453
454 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
455 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
456 fi
457
458 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
459 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
460 fi
461
462 if [ -n "$PULL_IMAGES" ]; then
463 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
464 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
465 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
466 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
467 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
468 fi
469
470 if [ -n "$PULL_IMAGES" ]; then
471 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
472 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
473 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
474 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
475 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
476 fi
477
478 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
479 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
480 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
481 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
482 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
483 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
484 fi
485
486 if [ -n "$PULL_IMAGES" ]; then
487 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
488 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
489 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
490 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
491 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
492 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
493 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
494 fi
495
496 if [ -n "$PULL_IMAGES" ]; then
497 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
498 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
499 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
500 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
501 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
502 fi
503
504 if [ -n "$PULL_IMAGES" ]; then
505 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
506 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
507 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
508 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
509 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
510 fi
511
512 if [ -n "$NGUI" ]; then
513 if [ -n "$PULL_IMAGES" ]; then
514 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
515 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
516 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
517 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
518 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
519 fi
520 else
521 if [ -n "$PULL_IMAGES" ]; then
522 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
523 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
524 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
525 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
526 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
527 fi
528 fi
529
530 if [ -n "$PULL_IMAGES" ]; then
531 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
532 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
533 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
534 fi
535
536 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
537 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
538 fi
539
540 echo "Finished generation of docker images"
541 }
542
543 function cmp_overwrite() {
544 file1="$1"
545 file2="$2"
546 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
547 if [ -f "${file2}" ]; then
548 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
549 else
550 cp -b ${file1} ${file2}
551 fi
552 fi
553 }
554
555 function generate_docker_env_files() {
556 echo "Doing a backup of existing env files"
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
558 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
559 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
560 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
561 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
562 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
563 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
564 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
565 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
566
567 echo "Generating docker env files"
568 if [ -n "$KUBERNETES" ]; then
569 #Kubernetes resources
570 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
571 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
572 else
573 if [ -n "$NGUI" ]; then
574 # For NG-UI
575 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
576 else
577 # Docker-compose
578 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
579 fi
580 if [ -n "$INSTALL_PLA" ]; then
581 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
582 fi
583
584 # Prometheus files
585 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
586 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
587
588 # Grafana files
589 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
590 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
591 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
592 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
593 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
594
595 # Prometheus Exporters files
596 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
597 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
598 fi
599
600 # LCM
601 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
602 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
603 fi
604
605 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
606 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
607 else
608 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
609 fi
610
611 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
612 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
613 else
614 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
615 fi
616
617 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
618 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
619 else
620 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
621 fi
622
623 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
624 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
625 else
626 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
627 fi
628
629 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
630 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
631 else
632 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
633 fi
634
635 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
636 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
637 fi
638
639 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
640 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
641 fi
642
643 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
644 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
645 else
646 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
647 fi
648
649 # RO
650 MYSQL_ROOT_PASSWORD=$(generate_secret)
651 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
652 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
653 fi
654 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
655 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
656 fi
657
658 # Keystone
659 KEYSTONE_DB_PASSWORD=$(generate_secret)
660 SERVICE_PASSWORD=$(generate_secret)
661 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
662 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
663 fi
664 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
665 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
666 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
667 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
668 fi
669
670 # NBI
671 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
672 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
673 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
674 fi
675
676 # MON
677 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
678 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
679 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
680 fi
681
682 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
683 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
684 else
685 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
686 fi
687
688 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
689 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
690 else
691 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
692 fi
693
694 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
695 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
696 else
697 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
698 fi
699
700 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
701 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
702 else
703 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
704 fi
705
706
707 # POL
708 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
709 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
710 fi
711
712 # LW-UI
713 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
714 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
715 fi
716
717 echo "Finished generation of docker env files"
718 }
719
720 function generate_osmclient_script () {
721 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
722 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
723 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
724 }
725
726 #installs kubernetes packages
727 function install_kube() {
728 sudo apt-get update && sudo apt-get install -y apt-transport-https
729 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
730 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
731 sudo apt-get update
732 echo "Installing Kubernetes Packages ..."
733 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
734 }
735
736 #initializes kubernetes control plane
737 function init_kubeadm() {
738 sudo swapoff -a
739 sudo kubeadm init --config $1
740 sleep 5
741 }
742
743 function kube_config_dir() {
744 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
745 mkdir -p $HOME/.kube
746 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
747 sudo chown $(id -u):$(id -g) $HOME/.kube/config
748 }
749
750 #deploys flannel as daemonsets
751 function deploy_cni_provider() {
752 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
753 trap 'rm -rf "${CNI_DIR}"' EXIT
754 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
755 kubectl apply -f $CNI_DIR
756 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
757 }
758
759 #creates secrets from env files which will be used by containers
760 function kube_secrets(){
761 kubectl create ns $OSM_STACK_NAME
762 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
763 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
764 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
765 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
766 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
767 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
768 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
769 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
770 }
771
772 #deploys osm pods and services
773 function deploy_osm_services() {
774 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
775 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
776 sleep 5
777 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
778 }
779
780 function deploy_osm_pla_service() {
781 # corresponding to parse_yaml
782 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
783 # corresponding to namespace_vol
784 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
785 # corresponding to deploy_osm_services
786 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
787 }
788
789 function parse_yaml() {
790 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
791 TAG=$1
792 for osm in $osm_services; do
793 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
794 done
795 }
796
797 function namespace_vol() {
798 osm_services="nbi lcm ro pol mon kafka mongo mysql"
799 for osm in $osm_services; do
800 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
801 done
802 }
803
804 function init_docker_swarm() {
805 if [ "${DEFAULT_MTU}" != "1500" ]; then
806 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
807 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
808 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
809 fi
810 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
811 return 0
812 }
813
814 function create_docker_network() {
815 echo "creating network"
816 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
817 echo "creating network DONE"
818 }
819
820 function deploy_lightweight() {
821
822 echo "Deploying lightweight build"
823 OSM_NBI_PORT=9999
824 OSM_RO_PORT=9090
825 OSM_KEYSTONE_PORT=5000
826 OSM_UI_PORT=80
827 OSM_MON_PORT=8662
828 OSM_PROM_PORT=9090
829 OSM_PROM_CADVISOR_PORT=8080
830 OSM_PROM_HOSTPORT=9091
831 OSM_GRAFANA_PORT=3000
832 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
833 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
834
835 if [ -n "$NO_HOST_PORTS" ]; then
836 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
837 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
838 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
839 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
840 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
841 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
842 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
843 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
844 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
845 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
846 else
847 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
848 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
849 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
850 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
851 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
852 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
853 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
854 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
855 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
856 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
857 fi
858 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
859 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
860 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
861 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
862 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
863 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
864 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
865 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
866 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
867
868 pushd $OSM_DOCKER_WORK_DIR
869 if [ -n "$INSTALL_PLA" ]; then
870 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
871 else
872 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
873 fi
874 popd
875
876 echo "Finished deployment of lightweight build"
877 }
878
879 function deploy_elk() {
880 echo "Pulling docker images for ELK"
881 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
882 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
883 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
884 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
885 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
886 echo "Finished pulling elk docker images"
887 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
888 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
889 remove_stack osm_elk
890 echo "Deploying ELK stack"
891 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
892 echo "Waiting for ELK stack to be up and running"
893 time=0
894 step=5
895 timelength=40
896 elk_is_up=1
897 while [ $time -le $timelength ]; do
898 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
899 elk_is_up=0
900 break
901 fi
902 sleep $step
903 time=$((time+step))
904 done
905 if [ $elk_is_up -eq 0 ]; then
906 echo "ELK is up and running. Trying to create index pattern..."
907 #Create index pattern
908 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
909 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
910 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
911 #Make it the default index
912 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
913 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
914 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
915 else
916 echo "Cannot connect to Kibana to create index pattern."
917 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
918 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
919 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
920 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
921 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
922 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
923 -d"{\"value\":\"filebeat-*\"}"'
924 fi
925 echo "Finished deployment of ELK stack"
926 return 0
927 }
928
929 function install_lightweight() {
930 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
931 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
932 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
933 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
934 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
935
936 track checkingroot
937 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
938 track noroot
939
940 if [ -n "$KUBERNETES" ]; then
941 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
942 1. Install and configure LXD
943 2. Install juju
944 3. Install docker CE
945 4. Disable swap space
946 5. Install and initialize Kubernetes
947 as pre-requirements.
948 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
949
950 else
951 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
952 fi
953 track proceed
954
955 echo "Installing lightweight build of OSM"
956 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
957 trap 'rm -rf "${LWTEMPDIR}"' EXIT
958 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
959 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
960 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
961 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
962 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
963 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
964
965 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
966 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
967 need_packages_lw="snapd"
968 echo -e "Checking required packages: $need_packages_lw"
969 dpkg -l $need_packages_lw &>/dev/null \
970 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
971 || sudo apt-get update \
972 || FATAL "failed to run apt-get update"
973 dpkg -l $need_packages_lw &>/dev/null \
974 || ! echo -e "Installing $need_packages_lw requires root privileges." \
975 || sudo apt-get install -y $need_packages_lw \
976 || FATAL "failed to install $need_packages_lw"
977 install_lxd
978 fi
979
980 track prereqok
981
982 [ -z "$INSTALL_NOJUJU" ] && install_juju
983 track juju_install
984
985 if [ -z "$OSM_VCA_HOST" ]; then
986 if [ -z "$CONTROLLER_NAME" ]; then
987 if [ -n "$LXD_CLOUD_FILE" ]; then
988 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
989 OSM_VCA_CLOUDNAME="lxd-cloud"
990 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
991 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
992 fi
993 juju_createcontroller
994 else
995 OSM_VCA_CLOUDNAME="lxd-cloud"
996 if [ -n "$LXD_CLOUD_FILE" ]; then
997 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
998 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
999 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1000 else
1001 mkdir -p ~/.osm
1002 cat << EOF > ~/.osm/lxd-cloud.yaml
1003 clouds:
1004 lxd-cloud:
1005 type: lxd
1006 auth-types: [certificate]
1007 endpoint: "https://$DEFAULT_IP:8443"
1008 config:
1009 ssl-hostname-verification: false
1010 EOF
1011 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1012 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1013 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1014 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1015 cat << EOF > ~/.osm/lxd-credentials.yaml
1016 credentials:
1017 lxd-cloud:
1018 lxd-cloud:
1019 auth-type: certificate
1020 server-cert: |
1021 $server_cert
1022 client-cert: |
1023 $client_cert
1024 client-key: |
1025 $client_key
1026 EOF
1027 lxc config trust add local: ~/.osm/client.crt
1028 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1029 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1030 fi
1031 fi
1032 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1033 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1034 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1035 fi
1036 track juju_controller
1037
1038 if [ -z "$OSM_VCA_SECRET" ]; then
1039 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1040 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1041 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1042 fi
1043 if [ -z "$OSM_VCA_PUBKEY" ]; then
1044 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1045 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1046 fi
1047 if [ -z "$OSM_VCA_CACERT" ]; then
1048 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1049 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1050 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1051 fi
1052 if [ -z "$OSM_VCA_APIPROXY" ]; then
1053 OSM_VCA_APIPROXY=$DEFAULT_IP
1054 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1055 fi
1056 juju_createproxy
1057 track juju
1058
1059 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1060 OSM_DATABASE_COMMONKEY=$(generate_secret)
1061 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1062 fi
1063
1064 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1065 track docker_ce
1066
1067 #Installs Kubernetes and deploys osm services
1068 if [ -n "$KUBERNETES" ]; then
1069 install_kube
1070 track install_k8s
1071 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1072 kube_config_dir
1073 track init_k8s
1074 else
1075 #install_docker_compose
1076 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1077 track docker_swarm
1078 fi
1079
1080 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1081 track docker_build
1082
1083 generate_docker_env_files
1084
1085 if [ -n "$KUBERNETES" ]; then
1086 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1087 # uninstall OSM MONITORING
1088 uninstall_k8s_monitoring
1089 track uninstall_k8s_monitoring
1090 fi
1091 #remove old namespace
1092 remove_k8s_namespace $OSM_STACK_NAME
1093 deploy_cni_provider
1094 kube_secrets
1095 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1096 namespace_vol
1097 deploy_osm_services
1098 if [ -n "$INSTALL_PLA"]; then
1099 # optional PLA install
1100 deploy_osm_pla_service
1101 fi
1102 track deploy_osm_services_k8s
1103 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1104 # install OSM MONITORING
1105 install_k8s_monitoring
1106 track install_k8s_monitoring
1107 fi
1108 else
1109 # remove old stack
1110 remove_stack $OSM_STACK_NAME
1111 create_docker_network
1112 deploy_lightweight
1113 generate_osmclient_script
1114 track docker_deploy
1115 install_prometheus_nodeexporter
1116 track nodeexporter
1117 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1118 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1119 fi
1120
1121 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1122 track osmclient
1123
1124 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1125 track end
1126 return 0
1127 }
1128
1129 function install_vimemu() {
1130 echo "\nInstalling vim-emu"
1131 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1132 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1133 # install prerequisites (OVS is a must for the emulator to work)
1134 sudo apt-get install openvswitch-switch
1135 # clone vim-emu repository (attention: branch is currently master only)
1136 echo "Cloning vim-emu repository ..."
1137 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1138 # build vim-emu docker
1139 echo "Building vim-emu Docker container..."
1140
1141 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1142 # start vim-emu container as daemon
1143 echo "Starting vim-emu Docker container 'vim-emu' ..."
1144 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1145 # in lightweight mode, the emulator needs to be attached to netOSM
1146 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1147 else
1148 # classic build mode
1149 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1150 fi
1151 echo "Waiting for 'vim-emu' container to start ..."
1152 sleep 5
1153 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1154 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1155 # print vim-emu connection info
1156 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1157 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1158 echo -e "To add the emulated VIM to OSM you should do:"
1159 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1160 }
1161
1162 function install_k8s_monitoring() {
1163 # install OSM monitoring
1164 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1165 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1166 }
1167
1168 function uninstall_k8s_monitoring() {
1169 # uninstall OSM monitoring
1170 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1171 }
1172
1173 function dump_vars(){
1174 echo "DEVELOP=$DEVELOP"
1175 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1176 echo "UNINSTALL=$UNINSTALL"
1177 echo "UPDATE=$UPDATE"
1178 echo "RECONFIGURE=$RECONFIGURE"
1179 echo "TEST_INSTALLER=$TEST_INSTALLER"
1180 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1181 echo "INSTALL_PLA=$INSTALL_PLA"
1182 echo "INSTALL_LXD=$INSTALL_LXD"
1183 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1184 echo "INSTALL_ONLY=$INSTALL_ONLY"
1185 echo "INSTALL_ELK=$INSTALL_ELK"
1186 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1187 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1188 echo "TO_REBUILD=$TO_REBUILD"
1189 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1190 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1191 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1192 echo "RELEASE=$RELEASE"
1193 echo "REPOSITORY=$REPOSITORY"
1194 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1195 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1196 echo "OSM_DEVOPS=$OSM_DEVOPS"
1197 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1198 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1199 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1200 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1201 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1202 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1203 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1204 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1205 echo "DOCKER_USER=$DOCKER_USER"
1206 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1207 echo "PULL_IMAGES=$PULL_IMAGES"
1208 echo "KUBERNETES=$KUBERNETES"
1209 echo "NGUI=$NGUI"
1210 echo "SHOWOPTS=$SHOWOPTS"
1211 echo "Install from specific refspec (-b): $COMMIT_ID"
1212 }
1213
1214 function track(){
1215 ctime=`date +%s`
1216 duration=$((ctime - SESSION_ID))
1217 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1218 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1219 event_name="bin"
1220 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1221 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1222 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1223 event_name="${event_name}_$1"
1224 url="${url}&event=${event_name}&ce_duration=${duration}"
1225 wget -q -O /dev/null $url
1226 }
1227
1228 UNINSTALL=""
1229 DEVELOP=""
1230 UPDATE=""
1231 RECONFIGURE=""
1232 TEST_INSTALLER=""
1233 INSTALL_LXD=""
1234 SHOWOPTS=""
1235 COMMIT_ID=""
1236 ASSUME_YES=""
1237 INSTALL_FROM_SOURCE=""
1238 RELEASE="ReleaseSEVEN"
1239 REPOSITORY="stable"
1240 INSTALL_VIMEMU=""
1241 INSTALL_PLA=""
1242 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1243 LXD_REPOSITORY_PATH=""
1244 INSTALL_LIGHTWEIGHT="y"
1245 INSTALL_ONLY=""
1246 INSTALL_ELK=""
1247 TO_REBUILD=""
1248 INSTALL_NOLXD=""
1249 INSTALL_NODOCKER=""
1250 INSTALL_NOJUJU=""
1251 KUBERNETES=""
1252 NGUI=""
1253 INSTALL_K8S_MONITOR=""
1254 INSTALL_NOHOSTCLIENT=""
1255 SESSION_ID=`date +%s`
1256 OSM_DEVOPS=
1257 OSM_VCA_HOST=
1258 OSM_VCA_SECRET=
1259 OSM_VCA_PUBKEY=
1260 OSM_VCA_CLOUDNAME="localhost"
1261 OSM_STACK_NAME=osm
1262 NO_HOST_PORTS=""
1263 DOCKER_NOBUILD=""
1264 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1265 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1266 WORKDIR_SUDO=sudo
1267 OSM_WORK_DIR="/etc/osm"
1268 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1269 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1270 OSM_HOST_VOL="/var/lib/osm"
1271 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1272 OSM_DOCKER_TAG=latest
1273 DOCKER_USER=opensourcemano
1274 PULL_IMAGES="y"
1275 KAFKA_TAG=2.11-1.0.2
1276 PROMETHEUS_TAG=v2.4.3
1277 GRAFANA_TAG=latest
1278 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1279 PROMETHEUS_CADVISOR_TAG=latest
1280 KEYSTONEDB_TAG=10
1281 OSM_DATABASE_COMMONKEY=
1282 ELASTIC_VERSION=6.4.2
1283 ELASTIC_CURATOR_VERSION=5.5.4
1284 POD_NETWORK_CIDR=10.244.0.0/16
1285 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1286 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1287
1288 while getopts ":b:r:c:n:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1289 case "${o}" in
1290 b)
1291 COMMIT_ID=${OPTARG}
1292 PULL_IMAGES=""
1293 ;;
1294 r)
1295 REPOSITORY="${OPTARG}"
1296 REPO_ARGS+=(-r "$REPOSITORY")
1297 ;;
1298 c)
1299 [ "${OPTARG}" == "swarm" ] && continue
1300 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1301 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1302 usage && exit 1
1303 ;;
1304 n)
1305 [ "${OPTARG}" == "lwui" ] && continue
1306 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1307 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1308 usage && exit 1
1309 ;;
1310 k)
1311 REPOSITORY_KEY="${OPTARG}"
1312 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1313 ;;
1314 u)
1315 REPOSITORY_BASE="${OPTARG}"
1316 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1317 ;;
1318 R)
1319 RELEASE="${OPTARG}"
1320 REPO_ARGS+=(-R "$RELEASE")
1321 ;;
1322 D)
1323 OSM_DEVOPS="${OPTARG}"
1324 ;;
1325 o)
1326 INSTALL_ONLY="y"
1327 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1328 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1329 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1330 ;;
1331 m)
1332 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1333 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1334 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1335 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1336 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1337 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1338 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1339 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1340 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1341 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1342 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1343 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1344 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1345 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1346 ;;
1347 H)
1348 OSM_VCA_HOST="${OPTARG}"
1349 ;;
1350 S)
1351 OSM_VCA_SECRET="${OPTARG}"
1352 ;;
1353 s)
1354 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1355 ;;
1356 w)
1357 # when specifying workdir, do not use sudo for access
1358 WORKDIR_SUDO=
1359 OSM_WORK_DIR="${OPTARG}"
1360 ;;
1361 t)
1362 OSM_DOCKER_TAG="${OPTARG}"
1363 ;;
1364 U)
1365 DOCKER_USER="${OPTARG}"
1366 ;;
1367 P)
1368 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1369 ;;
1370 A)
1371 OSM_VCA_APIPROXY="${OPTARG}"
1372 ;;
1373 l)
1374 LXD_CLOUD_FILE="${OPTARG}"
1375 ;;
1376 L)
1377 LXD_CRED_FILE="${OPTARG}"
1378 ;;
1379 K)
1380 CONTROLLER_NAME="${OPTARG}"
1381 ;;
1382 -)
1383 [ "${OPTARG}" == "help" ] && usage && exit 0
1384 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1385 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1386 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1387 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1388 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1389 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1390 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1391 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1392 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1393 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1394 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1395 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1396 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1397 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1398 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1399 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1400 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1401 [ "${OPTARG}" == "pullimages" ] && continue
1402 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1403 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1404 [ "${OPTARG}" == "bundle" ] && continue
1405 [ "${OPTARG}" == "kubeconfig" ] && continue
1406 [ "${OPTARG}" == "lxdendpoint" ] && continue
1407 [ "${OPTARG}" == "lxdcert" ] && continue
1408 [ "${OPTARG}" == "microstack" ] && continue
1409 [ "${OPTARG}" == "tag" ] && continue
1410 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1411 echo -e "Invalid option: '--$OPTARG'\n" >&2
1412 usage && exit 1
1413 ;;
1414 :)
1415 echo "Option -$OPTARG requires an argument" >&2
1416 usage && exit 1
1417 ;;
1418 \?)
1419 echo -e "Invalid option: '-$OPTARG'\n" >&2
1420 usage && exit 1
1421 ;;
1422 h)
1423 usage && exit 0
1424 ;;
1425 y)
1426 ASSUME_YES="y"
1427 ;;
1428 *)
1429 usage && exit 1
1430 ;;
1431 esac
1432 done
1433
1434 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1435 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1436
1437 if [ -n "$SHOWOPTS" ]; then
1438 dump_vars
1439 exit 0
1440 fi
1441
1442 if [ -n "$CHARMED" ]; then
1443 if [ -n "$UNINSTALL" ]; then
1444 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1445 else
1446 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1447 fi
1448
1449 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1450 echo
1451 echo "1. Get the NBI IP with the following command:"
1452 echo
1453 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1454 echo
1455 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1456 echo
1457 echo "export OSM_HOSTNAME=<NBI-IP>"
1458 echo
1459 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1460 echo
1461 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1462 echo
1463 echo "DONE"
1464
1465 exit 0
1466 fi
1467
1468 # if develop, we force master
1469 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1470
1471 need_packages="git wget curl tar"
1472 echo -e "Checking required packages: $need_packages"
1473 dpkg -l $need_packages &>/dev/null \
1474 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1475 || sudo apt-get update \
1476 || FATAL "failed to run apt-get update"
1477 dpkg -l $need_packages &>/dev/null \
1478 || ! echo -e "Installing $need_packages requires root privileges." \
1479 || sudo apt-get install -y $need_packages \
1480 || FATAL "failed to install $need_packages"
1481 sudo snap install jq
1482 if [ -z "$OSM_DEVOPS" ]; then
1483 if [ -n "$TEST_INSTALLER" ]; then
1484 echo -e "\nUsing local devops repo for OSM installation"
1485 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1486 else
1487 echo -e "\nCreating temporary dir for OSM installation"
1488 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1489 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1490
1491 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1492
1493 if [ -z "$COMMIT_ID" ]; then
1494 echo -e "\nGuessing the current stable release"
1495 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1496 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1497
1498 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1499 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1500 else
1501 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1502 fi
1503 git -C $OSM_DEVOPS checkout $COMMIT_ID
1504 fi
1505 fi
1506
1507 . $OSM_DEVOPS/common/all_funcs
1508
1509 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1510 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1511 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1512 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1513 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1514 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1515
1516 #Installation starts here
1517 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1518 track start
1519
1520 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1521 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1522 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1523 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1524 fi
1525
1526 echo -e "Checking required packages: lxd"
1527 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1528 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1529
1530 # use local devops for containers
1531 export OSM_USE_LOCAL_DEVOPS=true
1532
1533 #Install osmclient
1534
1535 #Install vim-emu (optional)
1536 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1537
1538 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1539 track end
1540 echo -e "\nDONE"
1541