Support for optional install of PLA
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and configured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
54 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
55 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
56 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
57 echo -e " --showopts: print chosen options and exit (only for debugging)"
58 echo -e " -y: do not prompt for confirmation, assumes yes"
59 echo -e " -h / --help: print this help"
60 echo -e " --charmed: install OSM with charms"
61 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
62 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
63 echo -e " --lxdendpoint <lxd endpoint ip>: Specify with which LXD to deploy OSM with charms (--charmed option)"
64 echo -e " --lxdcert <lxd cert path>: Specify external LXD cert to deploy OSM with charms (--charmed option)"
65 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
66
67 }
68
69 # takes a juju/accounts.yaml file and returns the password specific
70 # for a controller. I wrote this using only bash tools to minimize
71 # additions of other packages
72 function parse_juju_password {
73 password_file="${HOME}/.local/share/juju/accounts.yaml"
74 local controller_name=$1
75 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
76 sed -ne "s|^\($s\):|\1|" \
77 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
78 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
79 awk -F$fs -v controller=$controller_name '{
80 indent = length($1)/2;
81 vname[indent] = $2;
82 for (i in vname) {if (i > indent) {delete vname[i]}}
83 if (length($3) > 0) {
84 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
85 if (match(vn,controller) && match($2,"password")) {
86 printf("%s",$3);
87 }
88 }
89 }'
90 }
91
92 function generate_secret() {
93 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
94 }
95
96 function remove_volumes() {
97 if [ -n "$KUBERNETES" ]; then
98 k8_volume=$1
99 echo "Removing ${k8_volume}"
100 $WORKDIR_SUDO rm -rf ${k8_volume}
101 else
102 stack=$1
103 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
104 for volume in $volumes; do
105 sg docker -c "docker volume rm ${stack}_${volume}"
106 done
107 fi
108 }
109
110 function remove_network() {
111 stack=$1
112 sg docker -c "docker network rm net${stack}"
113 }
114
115 function remove_iptables() {
116 stack=$1
117 if [ -z "$OSM_VCA_HOST" ]; then
118 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
120 fi
121
122 if [ -z "$DEFAULT_IP" ]; then
123 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
124 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
125 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
126 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
127 fi
128
129 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
130 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
131 sudo netfilter-persistent save
132 fi
133 }
134
135 function remove_stack() {
136 stack=$1
137 if sg docker -c "docker stack ps ${stack}" ; then
138 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
139 COUNTER=0
140 result=1
141 while [ ${COUNTER} -lt 30 ]; do
142 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
143 #echo "Dockers running: $result"
144 if [ "${result}" == "0" ]; then
145 break
146 fi
147 let COUNTER=COUNTER+1
148 sleep 1
149 done
150 if [ "${result}" == "0" ]; then
151 echo "All dockers of the stack ${stack} were removed"
152 else
153 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
154 fi
155 sleep 5
156 fi
157 }
158
159 #removes osm deployments and services
160 function remove_k8s_namespace() {
161 kubectl delete ns $1
162 }
163
164 #Uninstall lightweight OSM: remove dockers
165 function uninstall_lightweight() {
166 if [ -n "$INSTALL_ONLY" ]; then
167 if [ -n "$INSTALL_ELK" ]; then
168 echo -e "\nUninstalling OSM ELK stack"
169 remove_stack osm_elk
170 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
171 fi
172 else
173 echo -e "\nUninstalling OSM"
174 if [ -n "$KUBERNETES" ]; then
175 if [ -n "$INSTALL_K8S_MONITOR" ]; then
176 # uninstall OSM MONITORING
177 uninstall_k8s_monitoring
178 fi
179 remove_k8s_namespace $OSM_STACK_NAME
180 else
181
182 remove_stack $OSM_STACK_NAME
183 remove_stack osm_elk
184 fi
185 echo "Now osm docker images and volumes will be deleted"
186 newgrp docker << EONG
187 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
188 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
189 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
190 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
191 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
196 EONG
197
198 if [ -n "$KUBERNETES" ]; then
199 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
200 remove_volumes $OSM_NAMESPACE_VOL
201 else
202 remove_volumes $OSM_STACK_NAME
203 remove_network $OSM_STACK_NAME
204 fi
205 remove_iptables $OSM_STACK_NAME
206 echo "Removing $OSM_DOCKER_WORK_DIR"
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
208 sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
209 fi
210 echo "Some docker images will be kept in case they are used by other docker stacks"
211 echo "To remove them, just run 'docker image prune' in a terminal"
212 return 0
213 }
214
215 #Safe unattended install of iptables-persistent
216 function check_install_iptables_persistent(){
217 echo -e "\nChecking required packages: iptables-persistent"
218 if dpkg -l iptables-persistent &>/dev/null; then
219 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
220 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
221 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
222 sudo apt-get -yq install iptables-persistent
223 fi
224 }
225
226 #Configure NAT rules, based on the current IP addresses of containers
227 function nat(){
228 check_install_iptables_persistent
229
230 echo -e "\nConfiguring NAT rules"
231 echo -e " Required root privileges"
232 sudo $OSM_DEVOPS/installers/nat_osm
233 }
234
235 function FATAL(){
236 echo "FATAL error: Cannot install OSM due to \"$1\""
237 exit 1
238 }
239
240 function install_lxd() {
241 # Apply sysctl production values for optimal performance
242 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
243 sudo sysctl --system
244
245 # Install LXD snap
246 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
247 sudo snap install lxd --channel=3.0/stable
248
249 # Configure LXD
250 sudo usermod -a -G lxd `whoami`
251 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sg lxd -c "lxd init --preseed"
252 sg lxd -c "lxd waitready"
253 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
254 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
255 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
256 #sudo systemctl stop lxd-bridge
257 #sudo systemctl --system daemon-reload
258 #sudo systemctl enable lxd-bridge
259 #sudo systemctl start lxd-bridge
260 }
261
262 function ask_user(){
263 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
264 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
265 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
266 read -e -p "$1" USER_CONFIRMATION
267 while true ; do
268 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
269 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
270 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
271 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
272 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
273 done
274 }
275
276 function install_osmclient(){
277 CLIENT_RELEASE=${RELEASE#"-R "}
278 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
279 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
280 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
281 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
282 curl $key_location | sudo apt-key add -
283 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
284 sudo apt-get update
285 sudo apt-get install -y python3-pip
286 sudo -H LC_ALL=C python3 -m pip install -U pip
287 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
288 sudo apt-get install -y python3-osm-im python3-osmclient
289 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
290 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
291 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
292 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
293 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
294 echo -e "\nOSM client installed"
295 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
296 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
297 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
298 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
299 else
300 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
301 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
302 echo " export OSM_HOSTNAME=<OSM_host>"
303 fi
304 return 0
305 }
306
307 function install_prometheus_nodeexporter(){
308 if (systemctl -q is-active node_exporter)
309 then
310 echo "Node Exporter is already running."
311 else
312 echo "Node Exporter is not active, installing..."
313 if getent passwd node_exporter > /dev/null 2>&1; then
314 echo "node_exporter user exists"
315 else
316 echo "Creating user node_exporter"
317 sudo useradd --no-create-home --shell /bin/false node_exporter
318 fi
319 sudo wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
320 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
321 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
322 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
323 sudo rm -rf node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
324 sudo cp ${OSM_DEVOPS}/installers/docker/files/node_exporter.service /etc/systemd/system/node_exporter.service
325 sudo systemctl daemon-reload
326 sudo systemctl restart node_exporter
327 sudo systemctl enable node_exporter
328 echo "Node Exporter has been activated in this host."
329 fi
330 return 0
331 }
332
333 function uninstall_prometheus_nodeexporter(){
334 sudo systemctl stop node_exporter
335 sudo systemctl disable node_exporter
336 sudo rm /etc/systemd/system/node_exporter.service
337 sudo systemctl daemon-reload
338 sudo userdel node_exporter
339 sudo rm /usr/local/bin/node_exporter
340 return 0
341 }
342
343 function install_docker_ce() {
344 # installs and configures Docker CE
345 echo "Installing Docker CE ..."
346 sudo apt-get -qq update
347 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
348 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
349 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
350 sudo apt-get -qq update
351 sudo apt-get install -y docker-ce
352 echo "Adding user to group 'docker'"
353 sudo groupadd -f docker
354 sudo usermod -aG docker $USER
355 sleep 2
356 sudo service docker restart
357 echo "... restarted Docker service"
358 sg docker -c "docker version" || FATAL "Docker installation failed"
359 echo "... Docker CE installation done"
360 return 0
361 }
362
363 function install_docker_compose() {
364 # installs and configures docker-compose
365 echo "Installing Docker Compose ..."
366 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
367 sudo chmod +x /usr/local/bin/docker-compose
368 echo "... Docker Compose installation done"
369 }
370
371 function install_juju() {
372 echo "Installing juju"
373 sudo snap install juju --classic
374 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
375 echo "Finished installation of juju"
376 return 0
377 }
378
379 function juju_createcontroller() {
380 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
381 # Not found created, create the controller
382 sudo usermod -a -G lxd ${USER}
383 sg lxd -c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
384 fi
385 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
386 }
387
388 function juju_createproxy() {
389 check_install_iptables_persistent
390
391 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
392 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
393 sudo netfilter-persistent save
394 fi
395 }
396
397 function generate_docker_images() {
398 echo "Pulling and generating docker images"
399 _build_from=$COMMIT_ID
400 [ -z "$_build_from" ] && _build_from="master"
401
402 echo "OSM Docker images generated from $_build_from"
403
404 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
405 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
406 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
407 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
408
409 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
410 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
411 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
412 fi
413
414 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
415 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
416 fi
417
418 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
419 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
420 fi
421
422 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
423 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
424 fi
425
426 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
427 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
428 fi
429
430 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
431 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
432 fi
433
434 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
435 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
436 fi
437
438 if [ -n "$PULL_IMAGES" ]; then
439 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
440 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
441 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
442 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
443 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
444 fi
445
446 if [ -n "$PULL_IMAGES" ]; then
447 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
448 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
449 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
450 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
451 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
452 fi
453
454 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
455 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
456 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
457 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
458 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
459 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
460 fi
461
462 if [ -n "$PULL_IMAGES" ]; then
463 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
464 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
465 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
466 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
467 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
468 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
469 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
470 fi
471
472 if [ -n "$PULL_IMAGES" ]; then
473 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
474 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
475 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
476 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
477 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
478 fi
479
480 if [ -n "$PULL_IMAGES" ]; then
481 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
482 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
483 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
484 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
485 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
486 fi
487
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
490 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
491 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
492 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
493 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
494 fi
495
496 if [ -n "$PULL_IMAGES" ]; then
497 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
498 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
499 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
500 fi
501
502 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
503 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
504 fi
505
506 echo "Finished generation of docker images"
507 }
508
509 function cmp_overwrite() {
510 file1="$1"
511 file2="$2"
512 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
513 if [ -f "${file2}" ]; then
514 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
515 else
516 cp -b ${file1} ${file2}
517 fi
518 fi
519 }
520
521 function generate_docker_env_files() {
522 echo "Doing a backup of existing env files"
523 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
524 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
525 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
526 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
527 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
528 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
532
533 echo "Generating docker env files"
534 if [ -n "$KUBERNETES" ]; then
535 #Kubernetes resources
536 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
537 else
538 # Docker-compose
539 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
540 if [ -n "$INSTALL_PLA" ]; then
541 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
542 fi
543
544 # Prometheus
545 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
546
547 # Grafana & Prometheus Exporter files
548 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
549 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
550 fi
551
552 # LCM
553 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
554 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
555 fi
556
557 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
558 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
559 else
560 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
561 fi
562
563 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
564 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
565 else
566 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
567 fi
568
569 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
570 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
571 else
572 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
573 fi
574
575 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
576 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
577 else
578 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
579 fi
580
581 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
582 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
583 else
584 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
585 fi
586
587 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
588 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
589 fi
590
591 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
592 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
593 fi
594
595 # RO
596 MYSQL_ROOT_PASSWORD=$(generate_secret)
597 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
598 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
599 fi
600 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
601 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
602 fi
603
604 # Keystone
605 KEYSTONE_DB_PASSWORD=$(generate_secret)
606 SERVICE_PASSWORD=$(generate_secret)
607 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
608 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
609 fi
610 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
611 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
612 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
613 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
614 fi
615
616 # NBI
617 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
618 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
619 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
620 fi
621
622 # MON
623 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
624 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
625 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
626 fi
627
628 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
629 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
630 else
631 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
632 fi
633
634 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
635 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
636 else
637 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
638 fi
639
640 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
641 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
642 else
643 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
644 fi
645
646 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
647 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
648 else
649 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
650 fi
651
652
653 # POL
654 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
655 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
656 fi
657
658 # LW-UI
659 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
660 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
661 fi
662
663 echo "Finished generation of docker env files"
664 }
665
666 function generate_osmclient_script () {
667 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
668 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
669 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
670 }
671
672 #installs kubernetes packages
673 function install_kube() {
674 sudo apt-get update && sudo apt-get install -y apt-transport-https
675 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
676 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
677 sudo apt-get update
678 echo "Installing Kubernetes Packages ..."
679 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
680 }
681
682 #initializes kubernetes control plane
683 function init_kubeadm() {
684 sudo swapoff -a
685 sudo kubeadm init --config $1
686 sleep 5
687 }
688
689 function kube_config_dir() {
690 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
691 mkdir -p $HOME/.kube
692 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
693 sudo chown $(id -u):$(id -g) $HOME/.kube/config
694 }
695
696 #deploys flannel as daemonsets
697 function deploy_cni_provider() {
698 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
699 trap 'rm -rf "${CNI_DIR}"' EXIT
700 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
701 kubectl apply -f $CNI_DIR
702 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
703 }
704
705 #creates secrets from env files which will be used by containers
706 function kube_secrets(){
707 kubectl create ns $OSM_STACK_NAME
708 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
709 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
710 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
711 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
712 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
713 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
714 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
715 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
716 }
717
718 #deploys osm pods and services
719 function deploy_osm_services() {
720 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
721 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
722 sleep 5
723 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
724 }
725
726 function deploy_osm_pla_service() {
727 # corresponding to parse_yaml
728 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
729 # corresponding to namespace_vol
730 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
731 # corresponding to deploy_osm_services
732 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
733 }
734
735 function parse_yaml() {
736 osm_services="nbi lcm ro pol mon light-ui keystone"
737 TAG=$1
738 for osm in $osm_services; do
739 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
740 done
741 }
742
743 function namespace_vol() {
744 osm_services="nbi lcm ro pol mon kafka mongo mysql"
745 for osm in $osm_services; do
746 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
747 done
748 }
749
750 function init_docker_swarm() {
751 if [ "${DEFAULT_MTU}" != "1500" ]; then
752 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
753 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
754 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
755 fi
756 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
757 return 0
758 }
759
760 function create_docker_network() {
761 echo "creating network"
762 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
763 echo "creating network DONE"
764 }
765
766 function deploy_lightweight() {
767
768 echo "Deploying lightweight build"
769 OSM_NBI_PORT=9999
770 OSM_RO_PORT=9090
771 OSM_KEYSTONE_PORT=5000
772 OSM_UI_PORT=80
773 OSM_MON_PORT=8662
774 OSM_PROM_PORT=9090
775 OSM_PROM_CADVISOR_PORT=8080
776 OSM_PROM_HOSTPORT=9091
777 OSM_GRAFANA_PORT=3000
778 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
779 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
780
781 if [ -n "$NO_HOST_PORTS" ]; then
782 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
783 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
784 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
785 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
786 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
787 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
788 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
789 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
790 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
791 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
792 else
793 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
794 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
795 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
796 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
797 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
798 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
799 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
800 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
801 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
802 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
803 fi
804 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
805 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
806 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
807 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
808 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
809 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
810 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
811 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
812 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
813
814 pushd $OSM_DOCKER_WORK_DIR
815 if [ -n "$INSTALL_PLA" ]; then
816 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
817 else
818 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
819 fi
820 popd
821
822 echo "Finished deployment of lightweight build"
823 }
824
825 function deploy_elk() {
826 echo "Pulling docker images for ELK"
827 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
828 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
829 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
830 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
831 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
832 echo "Finished pulling elk docker images"
833 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
834 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
835 remove_stack osm_elk
836 echo "Deploying ELK stack"
837 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
838 echo "Waiting for ELK stack to be up and running"
839 time=0
840 step=5
841 timelength=40
842 elk_is_up=1
843 while [ $time -le $timelength ]; do
844 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
845 elk_is_up=0
846 break
847 fi
848 sleep $step
849 time=$((time+step))
850 done
851 if [ $elk_is_up -eq 0 ]; then
852 echo "ELK is up and running. Trying to create index pattern..."
853 #Create index pattern
854 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
855 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
856 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
857 #Make it the default index
858 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
859 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
860 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
861 else
862 echo "Cannot connect to Kibana to create index pattern."
863 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
864 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
865 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
866 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
867 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
868 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
869 -d"{\"value\":\"filebeat-*\"}"'
870 fi
871 echo "Finished deployment of ELK stack"
872 return 0
873 }
874
875 function install_lightweight() {
876 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
877 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
878 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
879 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
880 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
881
882 track checkingroot
883 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
884 track noroot
885
886 if [ -n "$KUBERNETES" ]; then
887 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
888 1. Install and configure LXD
889 2. Install juju
890 3. Install docker CE
891 4. Disable swap space
892 5. Install and initialize Kubernetes
893 as pre-requirements.
894 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
895
896 else
897 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
898 fi
899 track proceed
900
901 echo "Installing lightweight build of OSM"
902 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
903 trap 'rm -rf "${LWTEMPDIR}"' EXIT
904 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
905 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
906 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
907 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
908 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
909
910 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
911 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
912 need_packages_lw="snapd"
913 echo -e "Checking required packages: $need_packages_lw"
914 dpkg -l $need_packages_lw &>/dev/null \
915 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
916 || sudo apt-get update \
917 || FATAL "failed to run apt-get update"
918 dpkg -l $need_packages_lw &>/dev/null \
919 || ! echo -e "Installing $need_packages_lw requires root privileges." \
920 || sudo apt-get install -y $need_packages_lw \
921 || FATAL "failed to install $need_packages_lw"
922 install_lxd
923 fi
924 track prereqok
925
926 [ -z "$INSTALL_NOJUJU" ] && install_juju
927 track juju_install
928
929 if [ -z "$OSM_VCA_HOST" ]; then
930 juju_createcontroller
931 OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
932 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
933 fi
934 track juju_controller
935
936 if [ -z "$OSM_VCA_SECRET" ]; then
937 OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
938 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
939 fi
940 if [ -z "$OSM_VCA_PUBKEY" ]; then
941 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
942 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
943 fi
944 if [ -z "$OSM_VCA_CACERT" ]; then
945 OSM_VCA_CACERT=$(juju controllers --format json | jq -r '.controllers["osm"]["ca-cert"]' | base64 | tr -d \\n)
946 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
947 fi
948 if [ -z "$OSM_VCA_APIPROXY" ]; then
949 OSM_VCA_APIPROXY=$DEFAULT_IP
950 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
951 fi
952 juju_createproxy
953 track juju
954
955 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
956 OSM_DATABASE_COMMONKEY=$(generate_secret)
957 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
958 fi
959
960 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
961 track docker_ce
962
963 #Installs Kubernetes and deploys osm services
964 if [ -n "$KUBERNETES" ]; then
965 install_kube
966 track install_k8s
967 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
968 kube_config_dir
969 track init_k8s
970 else
971 #install_docker_compose
972 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
973 track docker_swarm
974 fi
975
976 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
977 track docker_build
978
979 generate_docker_env_files
980
981 if [ -n "$KUBERNETES" ]; then
982 if [ -n "$INSTALL_K8S_MONITOR" ]; then
983 # uninstall OSM MONITORING
984 uninstall_k8s_monitoring
985 track uninstall_k8s_monitoring
986 fi
987 #remove old namespace
988 remove_k8s_namespace $OSM_STACK_NAME
989 deploy_cni_provider
990 kube_secrets
991 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
992 namespace_vol
993 deploy_osm_services
994 if [ -n "$INSTALL_PLA"]; then
995 # optional PLA install
996 deploy_osm_pla_service
997 fi
998 track deploy_osm_services_k8s
999 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1000 # install OSM MONITORING
1001 install_k8s_monitoring
1002 track install_k8s_monitoring
1003 fi
1004 else
1005 # remove old stack
1006 remove_stack $OSM_STACK_NAME
1007 create_docker_network
1008 deploy_lightweight
1009 generate_osmclient_script
1010 track docker_deploy
1011 install_prometheus_nodeexporter
1012 track nodeexporter
1013 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1014 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1015 fi
1016
1017 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1018 track osmclient
1019
1020 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1021 track end
1022 return 0
1023 }
1024
1025 function install_vimemu() {
1026 echo "\nInstalling vim-emu"
1027 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1028 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1029 # install prerequisites (OVS is a must for the emulator to work)
1030 sudo apt-get install openvswitch-switch
1031 # clone vim-emu repository (attention: branch is currently master only)
1032 echo "Cloning vim-emu repository ..."
1033 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1034 # build vim-emu docker
1035 echo "Building vim-emu Docker container..."
1036
1037 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1038 # start vim-emu container as daemon
1039 echo "Starting vim-emu Docker container 'vim-emu' ..."
1040 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1041 # in lightweight mode, the emulator needs to be attached to netOSM
1042 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1043 else
1044 # classic build mode
1045 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1046 fi
1047 echo "Waiting for 'vim-emu' container to start ..."
1048 sleep 5
1049 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1050 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1051 # print vim-emu connection info
1052 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1053 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1054 echo -e "To add the emulated VIM to OSM you should do:"
1055 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1056 }
1057
1058 function install_k8s_monitoring() {
1059 # install OSM monitoring
1060 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1061 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1062 }
1063
1064 function uninstall_k8s_monitoring() {
1065 # uninstall OSM monitoring
1066 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1067 }
1068
1069 function dump_vars(){
1070 echo "DEVELOP=$DEVELOP"
1071 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1072 echo "UNINSTALL=$UNINSTALL"
1073 echo "UPDATE=$UPDATE"
1074 echo "RECONFIGURE=$RECONFIGURE"
1075 echo "TEST_INSTALLER=$TEST_INSTALLER"
1076 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1077 echo "INSTALL_PLA=$INSTALL_PLA"
1078 echo "INSTALL_LXD=$INSTALL_LXD"
1079 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1080 echo "INSTALL_ONLY=$INSTALL_ONLY"
1081 echo "INSTALL_ELK=$INSTALL_ELK"
1082 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1083 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1084 echo "TO_REBUILD=$TO_REBUILD"
1085 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1086 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1087 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1088 echo "RELEASE=$RELEASE"
1089 echo "REPOSITORY=$REPOSITORY"
1090 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1091 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1092 echo "OSM_DEVOPS=$OSM_DEVOPS"
1093 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1094 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1095 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1096 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1097 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1098 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1099 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1100 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1101 echo "DOCKER_USER=$DOCKER_USER"
1102 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1103 echo "PULL_IMAGES=$PULL_IMAGES"
1104 echo "KUBERNETES=$KUBERNETES"
1105 echo "SHOWOPTS=$SHOWOPTS"
1106 echo "Install from specific refspec (-b): $COMMIT_ID"
1107 }
1108
1109 function track(){
1110 ctime=`date +%s`
1111 duration=$((ctime - SESSION_ID))
1112 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1113 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1114 event_name="bin"
1115 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1116 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1117 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1118 event_name="${event_name}_$1"
1119 url="${url}&event=${event_name}&ce_duration=${duration}"
1120 wget -q -O /dev/null $url
1121 }
1122
1123 UNINSTALL=""
1124 DEVELOP=""
1125 UPDATE=""
1126 RECONFIGURE=""
1127 TEST_INSTALLER=""
1128 INSTALL_LXD=""
1129 SHOWOPTS=""
1130 COMMIT_ID=""
1131 ASSUME_YES=""
1132 INSTALL_FROM_SOURCE=""
1133 RELEASE="ReleaseSEVEN"
1134 REPOSITORY="stable"
1135 INSTALL_VIMEMU=""
1136 INSTALL_PLA=""
1137 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1138 LXD_REPOSITORY_PATH=""
1139 INSTALL_LIGHTWEIGHT="y"
1140 INSTALL_ONLY=""
1141 INSTALL_ELK=""
1142 TO_REBUILD=""
1143 INSTALL_NOLXD=""
1144 INSTALL_NODOCKER=""
1145 INSTALL_NOJUJU=""
1146 KUBERNETES=""
1147 INSTALL_K8S_MONITOR=""
1148 INSTALL_NOHOSTCLIENT=""
1149 SESSION_ID=`date +%s`
1150 OSM_DEVOPS=
1151 OSM_VCA_HOST=
1152 OSM_VCA_SECRET=
1153 OSM_VCA_PUBKEY=
1154 OSM_STACK_NAME=osm
1155 NO_HOST_PORTS=""
1156 DOCKER_NOBUILD=""
1157 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1158 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1159 WORKDIR_SUDO=sudo
1160 OSM_WORK_DIR="/etc/osm"
1161 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1162 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1163 OSM_HOST_VOL="/var/lib/osm"
1164 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1165 OSM_DOCKER_TAG=latest
1166 DOCKER_USER=opensourcemano
1167 PULL_IMAGES="y"
1168 KAFKA_TAG=2.11-1.0.2
1169 PROMETHEUS_TAG=v2.4.3
1170 GRAFANA_TAG=latest
1171 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1172 PROMETHEUS_CADVISOR_TAG=latest
1173 KEYSTONEDB_TAG=10
1174 OSM_DATABASE_COMMONKEY=
1175 ELASTIC_VERSION=6.4.2
1176 ELASTIC_CURATOR_VERSION=5.5.4
1177 POD_NETWORK_CIDR=10.244.0.0/16
1178 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1179 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1180
1181 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o; do
1182 case "${o}" in
1183 b)
1184 COMMIT_ID=${OPTARG}
1185 PULL_IMAGES=""
1186 ;;
1187 r)
1188 REPOSITORY="${OPTARG}"
1189 REPO_ARGS+=(-r "$REPOSITORY")
1190 ;;
1191 c)
1192 [ "${OPTARG}" == "swarm" ] && continue
1193 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1194 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1195 usage && exit 1
1196 ;;
1197 k)
1198 REPOSITORY_KEY="${OPTARG}"
1199 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1200 ;;
1201 u)
1202 REPOSITORY_BASE="${OPTARG}"
1203 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1204 ;;
1205 R)
1206 RELEASE="${OPTARG}"
1207 REPO_ARGS+=(-R "$RELEASE")
1208 ;;
1209 D)
1210 OSM_DEVOPS="${OPTARG}"
1211 ;;
1212 o)
1213 INSTALL_ONLY="y"
1214 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1215 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1216 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1217 ;;
1218 m)
1219 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1220 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1221 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1222 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1223 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1224 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1225 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1226 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1227 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1228 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1229 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1230 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1231 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1232 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1233 ;;
1234 H)
1235 OSM_VCA_HOST="${OPTARG}"
1236 ;;
1237 S)
1238 OSM_VCA_SECRET="${OPTARG}"
1239 ;;
1240 s)
1241 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1242 ;;
1243 w)
1244 # when specifying workdir, do not use sudo for access
1245 WORKDIR_SUDO=
1246 OSM_WORK_DIR="${OPTARG}"
1247 ;;
1248 t)
1249 OSM_DOCKER_TAG="${OPTARG}"
1250 ;;
1251 U)
1252 DOCKER_USER="${OPTARG}"
1253 ;;
1254 P)
1255 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1256 ;;
1257 A)
1258 OSM_VCA_APIPROXY="${OPTARG}"
1259 ;;
1260 -)
1261 [ "${OPTARG}" == "help" ] && usage && exit 0
1262 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1263 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1264 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1265 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1266 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1267 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1268 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1269 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1270 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1271 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1272 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1273 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1274 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1275 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1276 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1277 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1278 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1279 [ "${OPTARG}" == "pullimages" ] && continue
1280 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1281 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1282 [ "${OPTARG}" == "bundle" ] && continue
1283 [ "${OPTARG}" == "kubeconfig" ] && continue
1284 [ "${OPTARG}" == "lxdendpoint" ] && continue
1285 [ "${OPTARG}" == "lxdcert" ] && continue
1286 [ "${OPTARG}" == "microstack" ] && continue
1287 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1288 echo -e "Invalid option: '--$OPTARG'\n" >&2
1289 usage && exit 1
1290 ;;
1291 :)
1292 echo "Option -$OPTARG requires an argument" >&2
1293 usage && exit 1
1294 ;;
1295 \?)
1296 echo -e "Invalid option: '-$OPTARG'\n" >&2
1297 usage && exit 1
1298 ;;
1299 h)
1300 usage && exit 0
1301 ;;
1302 y)
1303 ASSUME_YES="y"
1304 ;;
1305 *)
1306 usage && exit 1
1307 ;;
1308 esac
1309 done
1310
1311 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1312 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1313
1314 if [ -n "$SHOWOPTS" ]; then
1315 dump_vars
1316 exit 0
1317 fi
1318
1319 if [ -n "$CHARMED" ]; then
1320 if [ -n "$UNINSTALL" ]; then
1321 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1322 else
1323 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1324 fi
1325
1326 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1327 echo
1328 echo "1. Get the NBI IP with the following command:"
1329 echo
1330 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1331 echo
1332 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1333 echo
1334 echo "export OSM_HOSTNAME=<NBI-IP>"
1335 echo
1336 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1337 echo
1338 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1339 echo
1340 echo "DONE"
1341
1342 exit 0
1343 fi
1344
1345 # if develop, we force master
1346 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1347
1348 need_packages="git jq wget curl tar"
1349 echo -e "Checking required packages: $need_packages"
1350 dpkg -l $need_packages &>/dev/null \
1351 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1352 || sudo apt-get update \
1353 || FATAL "failed to run apt-get update"
1354 dpkg -l $need_packages &>/dev/null \
1355 || ! echo -e "Installing $need_packages requires root privileges." \
1356 || sudo apt-get install -y $need_packages \
1357 || FATAL "failed to install $need_packages"
1358
1359 if [ -z "$OSM_DEVOPS" ]; then
1360 if [ -n "$TEST_INSTALLER" ]; then
1361 echo -e "\nUsing local devops repo for OSM installation"
1362 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1363 else
1364 echo -e "\nCreating temporary dir for OSM installation"
1365 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1366 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1367
1368 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1369
1370 if [ -z "$COMMIT_ID" ]; then
1371 echo -e "\nGuessing the current stable release"
1372 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1373 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1374
1375 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1376 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1377 else
1378 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1379 fi
1380 git -C $OSM_DEVOPS checkout $COMMIT_ID
1381 fi
1382 fi
1383
1384 . $OSM_DEVOPS/common/all_funcs
1385
1386 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1387 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1388 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1389 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1390 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1391 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1392
1393 #Installation starts here
1394 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1395 track start
1396
1397 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1398 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1399 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1400 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1401 fi
1402
1403 echo -e "Checking required packages: lxd"
1404 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1405 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1406
1407 # use local devops for containers
1408 export OSM_USE_LOCAL_DEVOPS=true
1409
1410 #Install osmclient
1411
1412 #Install vim-emu (optional)
1413 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1414
1415 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1416 track end
1417 echo -e "\nDONE"
1418