fix getting interface with default route when there are more than one
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: install OSM with charms"
64 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
70 echo -e " --tag: Docker image tag"
71
72 }
73
74 # takes a juju/accounts.yaml file and returns the password specific
75 # for a controller. I wrote this using only bash tools to minimize
76 # additions of other packages
77 function parse_juju_password {
78 password_file="${HOME}/.local/share/juju/accounts.yaml"
79 local controller_name=$1
80 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
81 sed -ne "s|^\($s\):|\1|" \
82 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
83 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
84 awk -F$fs -v controller=$controller_name '{
85 indent = length($1)/2;
86 vname[indent] = $2;
87 for (i in vname) {if (i > indent) {delete vname[i]}}
88 if (length($3) > 0) {
89 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
90 if (match(vn,controller) && match($2,"password")) {
91 printf("%s",$3);
92 }
93 }
94 }'
95 }
96
97 function generate_secret() {
98 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
99 }
100
101 function remove_volumes() {
102 if [ -n "$KUBERNETES" ]; then
103 k8_volume=$1
104 echo "Removing ${k8_volume}"
105 $WORKDIR_SUDO rm -rf ${k8_volume}
106 else
107 stack=$1
108 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
109 for volume in $volumes; do
110 sg docker -c "docker volume rm ${stack}_${volume}"
111 done
112 fi
113 }
114
115 function remove_network() {
116 stack=$1
117 sg docker -c "docker network rm net${stack}"
118 }
119
120 function remove_iptables() {
121 stack=$1
122 if [ -z "$OSM_VCA_HOST" ]; then
123 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
124 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
125 fi
126
127 if [ -z "$DEFAULT_IP" ]; then
128 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
129 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
130 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
131 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
132 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
133 fi
134
135 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
136 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
137 sudo netfilter-persistent save
138 fi
139 }
140
141 function remove_stack() {
142 stack=$1
143 if sg docker -c "docker stack ps ${stack}" ; then
144 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
145 COUNTER=0
146 result=1
147 while [ ${COUNTER} -lt 30 ]; do
148 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
149 #echo "Dockers running: $result"
150 if [ "${result}" == "0" ]; then
151 break
152 fi
153 let COUNTER=COUNTER+1
154 sleep 1
155 done
156 if [ "${result}" == "0" ]; then
157 echo "All dockers of the stack ${stack} were removed"
158 else
159 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
160 fi
161 sleep 5
162 fi
163 }
164
165 #removes osm deployments and services
166 function remove_k8s_namespace() {
167 kubectl delete ns $1
168 }
169
170 #Uninstall osmclient
171 function uninstall_osmclient() {
172 sudo apt-get remove --purge -y python-osmclient
173 sudo apt-get remove --purge -y python3-osmclient
174 }
175
176 #Uninstall lightweight OSM: remove dockers
177 function uninstall_lightweight() {
178 if [ -n "$INSTALL_ONLY" ]; then
179 if [ -n "$INSTALL_ELK" ]; then
180 echo -e "\nUninstalling OSM ELK stack"
181 remove_stack osm_elk
182 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
183 fi
184 else
185 echo -e "\nUninstalling OSM"
186 if [ -n "$KUBERNETES" ]; then
187 if [ -n "$INSTALL_K8S_MONITOR" ]; then
188 # uninstall OSM MONITORING
189 uninstall_k8s_monitoring
190 fi
191 remove_k8s_namespace $OSM_STACK_NAME
192 else
193
194 remove_stack $OSM_STACK_NAME
195 remove_stack osm_elk
196 fi
197 echo "Now osm docker images and volumes will be deleted"
198 newgrp docker << EONG
199 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
200 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
201 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
202 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
203 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
204 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
205 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
206 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
207 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
208 EONG
209
210 if [ -n "$KUBERNETES" ]; then
211 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
212 remove_volumes $OSM_NAMESPACE_VOL
213 else
214 remove_volumes $OSM_STACK_NAME
215 remove_network $OSM_STACK_NAME
216 fi
217 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
218 echo "Removing $OSM_DOCKER_WORK_DIR"
219 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
220 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
221 fi
222 uninstall_osmclient
223 echo "Some docker images will be kept in case they are used by other docker stacks"
224 echo "To remove them, just run 'docker image prune' in a terminal"
225 return 0
226 }
227
228 #Safe unattended install of iptables-persistent
229 function check_install_iptables_persistent(){
230 echo -e "\nChecking required packages: iptables-persistent"
231 if dpkg -l iptables-persistent &>/dev/null; then
232 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
233 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
234 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
235 sudo apt-get -yq install iptables-persistent
236 fi
237 }
238
239 #Configure NAT rules, based on the current IP addresses of containers
240 function nat(){
241 check_install_iptables_persistent
242
243 echo -e "\nConfiguring NAT rules"
244 echo -e " Required root privileges"
245 sudo $OSM_DEVOPS/installers/nat_osm
246 }
247
248 function FATAL(){
249 echo "FATAL error: Cannot install OSM due to \"$1\""
250 exit 1
251 }
252
253 function install_lxd() {
254 # Apply sysctl production values for optimal performance
255 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
256 sudo sysctl --system
257
258 # Install LXD snap
259 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
260 sudo snap install lxd
261 sudo apt-get install zfsutils-linux -y
262
263 # Configure LXD
264 sudo usermod -a -G lxd `whoami`
265 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
266 sg lxd -c "lxd waitready"
267 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
268 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
269 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
270 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
271 #sudo systemctl stop lxd-bridge
272 #sudo systemctl --system daemon-reload
273 #sudo systemctl enable lxd-bridge
274 #sudo systemctl start lxd-bridge
275 }
276
277 function ask_user(){
278 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
279 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
280 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
281 read -e -p "$1" USER_CONFIRMATION
282 while true ; do
283 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
284 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
285 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
286 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
287 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
288 done
289 }
290
291 function install_osmclient(){
292 CLIENT_RELEASE=${RELEASE#"-R "}
293 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
294 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
295 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
296 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
297 curl $key_location | sudo apt-key add -
298 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
299 sudo apt-get update
300 sudo apt-get install -y python3-pip
301 sudo -H LC_ALL=C python3 -m pip install -U pip
302 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
303 sudo apt-get install -y python3-osm-im python3-osmclient
304 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
305 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
306 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
307 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
308 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
309 echo -e "\nOSM client installed"
310 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
311 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
312 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
313 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
314 else
315 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
316 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
317 echo " export OSM_HOSTNAME=<OSM_host>"
318 fi
319 return 0
320 }
321
322 function install_prometheus_nodeexporter(){
323 if (systemctl -q is-active node_exporter)
324 then
325 echo "Node Exporter is already running."
326 else
327 echo "Node Exporter is not active, installing..."
328 if getent passwd node_exporter > /dev/null 2>&1; then
329 echo "node_exporter user exists"
330 else
331 echo "Creating user node_exporter"
332 sudo useradd --no-create-home --shell /bin/false node_exporter
333 fi
334 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
335 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
336 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
337 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
338 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
339 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
340 sudo systemctl daemon-reload
341 sudo systemctl restart node_exporter
342 sudo systemctl enable node_exporter
343 echo "Node Exporter has been activated in this host."
344 fi
345 return 0
346 }
347
348 function uninstall_prometheus_nodeexporter(){
349 sudo systemctl stop node_exporter
350 sudo systemctl disable node_exporter
351 sudo rm /etc/systemd/system/node_exporter.service
352 sudo systemctl daemon-reload
353 sudo userdel node_exporter
354 sudo rm /usr/local/bin/node_exporter
355 return 0
356 }
357
358 function install_docker_ce() {
359 # installs and configures Docker CE
360 echo "Installing Docker CE ..."
361 sudo apt-get -qq update
362 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
363 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
364 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
365 sudo apt-get -qq update
366 sudo apt-get install -y docker-ce
367 echo "Adding user to group 'docker'"
368 sudo groupadd -f docker
369 sudo usermod -aG docker $USER
370 sleep 2
371 sudo service docker restart
372 echo "... restarted Docker service"
373 sg docker -c "docker version" || FATAL "Docker installation failed"
374 echo "... Docker CE installation done"
375 return 0
376 }
377
378 function install_docker_compose() {
379 # installs and configures docker-compose
380 echo "Installing Docker Compose ..."
381 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
382 sudo chmod +x /usr/local/bin/docker-compose
383 echo "... Docker Compose installation done"
384 }
385
386 function install_juju() {
387 echo "Installing juju"
388 sudo snap install juju --classic
389 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
390 echo "Finished installation of juju"
391 return 0
392 }
393
394 function juju_createcontroller() {
395 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
396 # Not found created, create the controller
397 sudo usermod -a -G lxd ${USER}
398 sg lxd -c "juju bootstrap $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
399 fi
400 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
401 }
402
403 function juju_createproxy() {
404 check_install_iptables_persistent
405
406 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
407 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
408 sudo netfilter-persistent save
409 fi
410 }
411
412 function generate_docker_images() {
413 echo "Pulling and generating docker images"
414 _build_from=$COMMIT_ID
415 [ -z "$_build_from" ] && _build_from="master"
416
417 echo "OSM Docker images generated from $_build_from"
418
419 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
420 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
421 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
422 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
423
424 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
425 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
426 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
427 fi
428
429 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
430 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
431 fi
432
433 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
434 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
435 fi
436
437 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
438 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
439 fi
440
441 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
442 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
443 fi
444
445 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
446 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
447 fi
448
449 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
450 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
451 fi
452
453 if [ -n "$PULL_IMAGES" ]; then
454 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
455 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
456 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
457 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
458 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
459 fi
460
461 if [ -n "$PULL_IMAGES" ]; then
462 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
463 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
464 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
465 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
466 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
467 fi
468
469 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
470 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
471 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
472 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
473 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
474 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
475 fi
476
477 if [ -n "$PULL_IMAGES" ]; then
478 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
479 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
480 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
481 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
482 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
483 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
484 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
485 fi
486
487 if [ -n "$PULL_IMAGES" ]; then
488 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
489 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
490 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
491 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
492 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
493 fi
494
495 if [ -n "$PULL_IMAGES" ]; then
496 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
497 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
498 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
499 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
500 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
501 fi
502
503 if [ -n "$PULL_IMAGES" ]; then
504 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
505 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
506 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
507 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
508 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
509 fi
510
511 if [ -n "$PULL_IMAGES" ]; then
512 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
513 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
514 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
515 fi
516
517 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
518 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
519 fi
520
521 echo "Finished generation of docker images"
522 }
523
524 function cmp_overwrite() {
525 file1="$1"
526 file2="$2"
527 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
528 if [ -f "${file2}" ]; then
529 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
530 else
531 cp -b ${file1} ${file2}
532 fi
533 fi
534 }
535
536 function generate_docker_env_files() {
537 echo "Doing a backup of existing env files"
538 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
539 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
540 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
541 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
542 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
543 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
544 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
545 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
546 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
547
548 echo "Generating docker env files"
549 if [ -n "$KUBERNETES" ]; then
550 #Kubernetes resources
551 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
552 else
553 # Docker-compose
554 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
555 if [ -n "$INSTALL_PLA" ]; then
556 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
557 fi
558
559 # Prometheus files
560 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
561 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
562
563 # Grafana files
564 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
565 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
566 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
567 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
568 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
569
570 # Prometheus Exporters files
571 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
572 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
573 fi
574
575 # LCM
576 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
577 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
578 fi
579
580 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
581 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
582 else
583 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
584 fi
585
586 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
587 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
588 else
589 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
590 fi
591
592 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
593 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
594 else
595 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
596 fi
597
598 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
599 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
600 else
601 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
602 fi
603
604 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
605 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
606 else
607 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
608 fi
609
610 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
611 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
612 fi
613
614 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
615 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
616 fi
617
618 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
619 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
620 else
621 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
622 fi
623
624 # RO
625 MYSQL_ROOT_PASSWORD=$(generate_secret)
626 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
627 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
628 fi
629 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
630 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
631 fi
632
633 # Keystone
634 KEYSTONE_DB_PASSWORD=$(generate_secret)
635 SERVICE_PASSWORD=$(generate_secret)
636 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
637 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
638 fi
639 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
640 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
641 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
642 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
643 fi
644
645 # NBI
646 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
647 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
648 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
649 fi
650
651 # MON
652 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
653 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
654 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
655 fi
656
657 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
658 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
659 else
660 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
661 fi
662
663 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
664 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
665 else
666 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
667 fi
668
669 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
670 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
671 else
672 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
673 fi
674
675 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
676 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
677 else
678 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
679 fi
680
681
682 # POL
683 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
684 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
685 fi
686
687 # LW-UI
688 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
689 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
690 fi
691
692 echo "Finished generation of docker env files"
693 }
694
695 function generate_osmclient_script () {
696 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
697 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
698 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
699 }
700
701 #installs kubernetes packages
702 function install_kube() {
703 sudo apt-get update && sudo apt-get install -y apt-transport-https
704 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
705 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
706 sudo apt-get update
707 echo "Installing Kubernetes Packages ..."
708 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
709 }
710
711 #initializes kubernetes control plane
712 function init_kubeadm() {
713 sudo swapoff -a
714 sudo kubeadm init --config $1
715 sleep 5
716 }
717
718 function kube_config_dir() {
719 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
720 mkdir -p $HOME/.kube
721 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
722 sudo chown $(id -u):$(id -g) $HOME/.kube/config
723 }
724
725 #deploys flannel as daemonsets
726 function deploy_cni_provider() {
727 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
728 trap 'rm -rf "${CNI_DIR}"' EXIT
729 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
730 kubectl apply -f $CNI_DIR
731 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
732 }
733
734 #creates secrets from env files which will be used by containers
735 function kube_secrets(){
736 kubectl create ns $OSM_STACK_NAME
737 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
738 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
739 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
740 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
741 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
742 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
743 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
744 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
745 }
746
747 #deploys osm pods and services
748 function deploy_osm_services() {
749 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
750 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
751 sleep 5
752 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
753 }
754
755 function deploy_osm_pla_service() {
756 # corresponding to parse_yaml
757 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
758 # corresponding to namespace_vol
759 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
760 # corresponding to deploy_osm_services
761 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
762 }
763
764 function parse_yaml() {
765 osm_services="nbi lcm ro pol mon light-ui keystone"
766 TAG=$1
767 for osm in $osm_services; do
768 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
769 done
770 }
771
772 function namespace_vol() {
773 osm_services="nbi lcm ro pol mon kafka mongo mysql"
774 for osm in $osm_services; do
775 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
776 done
777 }
778
779 function init_docker_swarm() {
780 if [ "${DEFAULT_MTU}" != "1500" ]; then
781 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
782 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
783 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
784 fi
785 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
786 return 0
787 }
788
789 function create_docker_network() {
790 echo "creating network"
791 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
792 echo "creating network DONE"
793 }
794
795 function deploy_lightweight() {
796
797 echo "Deploying lightweight build"
798 OSM_NBI_PORT=9999
799 OSM_RO_PORT=9090
800 OSM_KEYSTONE_PORT=5000
801 OSM_UI_PORT=80
802 OSM_MON_PORT=8662
803 OSM_PROM_PORT=9090
804 OSM_PROM_CADVISOR_PORT=8080
805 OSM_PROM_HOSTPORT=9091
806 OSM_GRAFANA_PORT=3000
807 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
808 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
809
810 if [ -n "$NO_HOST_PORTS" ]; then
811 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
812 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
813 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
814 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
815 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
816 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
817 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
818 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
819 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
820 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
821 else
822 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
823 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
824 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
825 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
826 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
827 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
828 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
829 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
830 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
831 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
832 fi
833 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
834 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
835 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
836 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
837 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
838 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
839 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
840 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
841 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
842
843 pushd $OSM_DOCKER_WORK_DIR
844 if [ -n "$INSTALL_PLA" ]; then
845 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
846 else
847 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
848 fi
849 popd
850
851 echo "Finished deployment of lightweight build"
852 }
853
854 function deploy_elk() {
855 echo "Pulling docker images for ELK"
856 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
857 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
858 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
859 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
860 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
861 echo "Finished pulling elk docker images"
862 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
863 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
864 remove_stack osm_elk
865 echo "Deploying ELK stack"
866 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
867 echo "Waiting for ELK stack to be up and running"
868 time=0
869 step=5
870 timelength=40
871 elk_is_up=1
872 while [ $time -le $timelength ]; do
873 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
874 elk_is_up=0
875 break
876 fi
877 sleep $step
878 time=$((time+step))
879 done
880 if [ $elk_is_up -eq 0 ]; then
881 echo "ELK is up and running. Trying to create index pattern..."
882 #Create index pattern
883 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
884 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
885 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
886 #Make it the default index
887 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
888 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
889 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
890 else
891 echo "Cannot connect to Kibana to create index pattern."
892 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
893 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
894 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
895 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
896 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
897 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
898 -d"{\"value\":\"filebeat-*\"}"'
899 fi
900 echo "Finished deployment of ELK stack"
901 return 0
902 }
903
904 function install_lightweight() {
905 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
906 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
907 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
908 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
909 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
910
911 track checkingroot
912 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
913 track noroot
914
915 if [ -n "$KUBERNETES" ]; then
916 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
917 1. Install and configure LXD
918 2. Install juju
919 3. Install docker CE
920 4. Disable swap space
921 5. Install and initialize Kubernetes
922 as pre-requirements.
923 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
924
925 else
926 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
927 fi
928 track proceed
929
930 echo "Installing lightweight build of OSM"
931 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
932 trap 'rm -rf "${LWTEMPDIR}"' EXIT
933 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
934 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
935 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
936 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
937 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
938 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
939
940 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
941 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
942 need_packages_lw="snapd"
943 echo -e "Checking required packages: $need_packages_lw"
944 dpkg -l $need_packages_lw &>/dev/null \
945 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
946 || sudo apt-get update \
947 || FATAL "failed to run apt-get update"
948 dpkg -l $need_packages_lw &>/dev/null \
949 || ! echo -e "Installing $need_packages_lw requires root privileges." \
950 || sudo apt-get install -y $need_packages_lw \
951 || FATAL "failed to install $need_packages_lw"
952 install_lxd
953 fi
954
955 track prereqok
956
957 [ -z "$INSTALL_NOJUJU" ] && install_juju
958 track juju_install
959
960 if [ -z "$OSM_VCA_HOST" ]; then
961 if [ -z "$CONTROLLER_NAME" ]; then
962 if [ -n "$LXD_CLOUD_FILE" ]; then
963 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
964 OSM_VCA_CLOUDNAME="lxd-cloud"
965 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
966 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
967 fi
968 juju_createcontroller
969 else
970 OSM_VCA_CLOUDNAME="lxd-cloud"
971 if [ -n "$LXD_CLOUD_FILE" ]; then
972 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
973 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
974 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
975 else
976 mkdir -p ~/.osm
977 cat << EOF > ~/.osm/lxd-cloud.yaml
978 clouds:
979 lxd-cloud:
980 type: lxd
981 auth-types: [certificate]
982 endpoint: "https://$DEFAULT_IP:8443"
983 config:
984 ssl-hostname-verification: false
985 EOF
986 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
987 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
988 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
989 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
990 cat << EOF > ~/.osm/lxd-credentials.yaml
991 credentials:
992 lxd-cloud:
993 lxd-cloud:
994 auth-type: certificate
995 server-cert: |
996 $server_cert
997 client-cert: |
998 $client_cert
999 client-key: |
1000 $client_key
1001 EOF
1002 lxc config trust add local: ~/.osm/client.crt
1003 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1004 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1005 fi
1006 fi
1007 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1008 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1009 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1010 fi
1011 track juju_controller
1012
1013 if [ -z "$OSM_VCA_SECRET" ]; then
1014 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1015 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1016 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1017 fi
1018 if [ -z "$OSM_VCA_PUBKEY" ]; then
1019 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1020 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1021 fi
1022 if [ -z "$OSM_VCA_CACERT" ]; then
1023 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1024 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1025 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1026 fi
1027 if [ -z "$OSM_VCA_APIPROXY" ]; then
1028 OSM_VCA_APIPROXY=$DEFAULT_IP
1029 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1030 fi
1031 juju_createproxy
1032 track juju
1033
1034 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1035 OSM_DATABASE_COMMONKEY=$(generate_secret)
1036 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1037 fi
1038
1039 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1040 track docker_ce
1041
1042 #Installs Kubernetes and deploys osm services
1043 if [ -n "$KUBERNETES" ]; then
1044 install_kube
1045 track install_k8s
1046 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1047 kube_config_dir
1048 track init_k8s
1049 else
1050 #install_docker_compose
1051 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1052 track docker_swarm
1053 fi
1054
1055 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1056 track docker_build
1057
1058 generate_docker_env_files
1059
1060 if [ -n "$KUBERNETES" ]; then
1061 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1062 # uninstall OSM MONITORING
1063 uninstall_k8s_monitoring
1064 track uninstall_k8s_monitoring
1065 fi
1066 #remove old namespace
1067 remove_k8s_namespace $OSM_STACK_NAME
1068 deploy_cni_provider
1069 kube_secrets
1070 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1071 namespace_vol
1072 deploy_osm_services
1073 if [ -n "$INSTALL_PLA"]; then
1074 # optional PLA install
1075 deploy_osm_pla_service
1076 fi
1077 track deploy_osm_services_k8s
1078 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1079 # install OSM MONITORING
1080 install_k8s_monitoring
1081 track install_k8s_monitoring
1082 fi
1083 else
1084 # remove old stack
1085 remove_stack $OSM_STACK_NAME
1086 create_docker_network
1087 deploy_lightweight
1088 generate_osmclient_script
1089 track docker_deploy
1090 install_prometheus_nodeexporter
1091 track nodeexporter
1092 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1093 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1094 fi
1095
1096 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1097 track osmclient
1098
1099 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1100 track end
1101 return 0
1102 }
1103
1104 function install_vimemu() {
1105 echo "\nInstalling vim-emu"
1106 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1107 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1108 # install prerequisites (OVS is a must for the emulator to work)
1109 sudo apt-get install openvswitch-switch
1110 # clone vim-emu repository (attention: branch is currently master only)
1111 echo "Cloning vim-emu repository ..."
1112 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1113 # build vim-emu docker
1114 echo "Building vim-emu Docker container..."
1115
1116 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1117 # start vim-emu container as daemon
1118 echo "Starting vim-emu Docker container 'vim-emu' ..."
1119 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1120 # in lightweight mode, the emulator needs to be attached to netOSM
1121 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1122 else
1123 # classic build mode
1124 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1125 fi
1126 echo "Waiting for 'vim-emu' container to start ..."
1127 sleep 5
1128 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1129 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1130 # print vim-emu connection info
1131 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1132 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1133 echo -e "To add the emulated VIM to OSM you should do:"
1134 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1135 }
1136
1137 function install_k8s_monitoring() {
1138 # install OSM monitoring
1139 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1140 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1141 }
1142
1143 function uninstall_k8s_monitoring() {
1144 # uninstall OSM monitoring
1145 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1146 }
1147
1148 function dump_vars(){
1149 echo "DEVELOP=$DEVELOP"
1150 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1151 echo "UNINSTALL=$UNINSTALL"
1152 echo "UPDATE=$UPDATE"
1153 echo "RECONFIGURE=$RECONFIGURE"
1154 echo "TEST_INSTALLER=$TEST_INSTALLER"
1155 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1156 echo "INSTALL_PLA=$INSTALL_PLA"
1157 echo "INSTALL_LXD=$INSTALL_LXD"
1158 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1159 echo "INSTALL_ONLY=$INSTALL_ONLY"
1160 echo "INSTALL_ELK=$INSTALL_ELK"
1161 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1162 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1163 echo "TO_REBUILD=$TO_REBUILD"
1164 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1165 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1166 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1167 echo "RELEASE=$RELEASE"
1168 echo "REPOSITORY=$REPOSITORY"
1169 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1170 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1171 echo "OSM_DEVOPS=$OSM_DEVOPS"
1172 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1173 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1174 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1175 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1176 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1177 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1178 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1179 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1180 echo "DOCKER_USER=$DOCKER_USER"
1181 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1182 echo "PULL_IMAGES=$PULL_IMAGES"
1183 echo "KUBERNETES=$KUBERNETES"
1184 echo "SHOWOPTS=$SHOWOPTS"
1185 echo "Install from specific refspec (-b): $COMMIT_ID"
1186 }
1187
1188 function track(){
1189 ctime=`date +%s`
1190 duration=$((ctime - SESSION_ID))
1191 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1192 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1193 event_name="bin"
1194 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1195 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1196 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1197 event_name="${event_name}_$1"
1198 url="${url}&event=${event_name}&ce_duration=${duration}"
1199 wget -q -O /dev/null $url
1200 }
1201
1202 UNINSTALL=""
1203 DEVELOP=""
1204 UPDATE=""
1205 RECONFIGURE=""
1206 TEST_INSTALLER=""
1207 INSTALL_LXD=""
1208 SHOWOPTS=""
1209 COMMIT_ID=""
1210 ASSUME_YES=""
1211 INSTALL_FROM_SOURCE=""
1212 RELEASE="ReleaseSEVEN"
1213 REPOSITORY="stable"
1214 INSTALL_VIMEMU=""
1215 INSTALL_PLA=""
1216 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1217 LXD_REPOSITORY_PATH=""
1218 INSTALL_LIGHTWEIGHT="y"
1219 INSTALL_ONLY=""
1220 INSTALL_ELK=""
1221 TO_REBUILD=""
1222 INSTALL_NOLXD=""
1223 INSTALL_NODOCKER=""
1224 INSTALL_NOJUJU=""
1225 KUBERNETES=""
1226 INSTALL_K8S_MONITOR=""
1227 INSTALL_NOHOSTCLIENT=""
1228 SESSION_ID=`date +%s`
1229 OSM_DEVOPS=
1230 OSM_VCA_HOST=
1231 OSM_VCA_SECRET=
1232 OSM_VCA_PUBKEY=
1233 OSM_VCA_CLOUDNAME="localhost"
1234 OSM_STACK_NAME=osm
1235 NO_HOST_PORTS=""
1236 DOCKER_NOBUILD=""
1237 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1238 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1239 WORKDIR_SUDO=sudo
1240 OSM_WORK_DIR="/etc/osm"
1241 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1242 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1243 OSM_HOST_VOL="/var/lib/osm"
1244 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1245 OSM_DOCKER_TAG=latest
1246 DOCKER_USER=opensourcemano
1247 PULL_IMAGES="y"
1248 KAFKA_TAG=2.11-1.0.2
1249 PROMETHEUS_TAG=v2.4.3
1250 GRAFANA_TAG=latest
1251 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1252 PROMETHEUS_CADVISOR_TAG=latest
1253 KEYSTONEDB_TAG=10
1254 OSM_DATABASE_COMMONKEY=
1255 ELASTIC_VERSION=6.4.2
1256 ELASTIC_CURATOR_VERSION=5.5.4
1257 POD_NETWORK_CIDR=10.244.0.0/16
1258 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1259 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1260
1261 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1262 case "${o}" in
1263 b)
1264 COMMIT_ID=${OPTARG}
1265 PULL_IMAGES=""
1266 ;;
1267 r)
1268 REPOSITORY="${OPTARG}"
1269 REPO_ARGS+=(-r "$REPOSITORY")
1270 ;;
1271 c)
1272 [ "${OPTARG}" == "swarm" ] && continue
1273 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1274 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1275 usage && exit 1
1276 ;;
1277 k)
1278 REPOSITORY_KEY="${OPTARG}"
1279 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1280 ;;
1281 u)
1282 REPOSITORY_BASE="${OPTARG}"
1283 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1284 ;;
1285 R)
1286 RELEASE="${OPTARG}"
1287 REPO_ARGS+=(-R "$RELEASE")
1288 ;;
1289 D)
1290 OSM_DEVOPS="${OPTARG}"
1291 ;;
1292 o)
1293 INSTALL_ONLY="y"
1294 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1295 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1296 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1297 ;;
1298 m)
1299 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1300 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1301 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1302 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1303 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1304 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1305 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1306 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1307 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1308 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1309 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1310 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1311 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1312 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1313 ;;
1314 H)
1315 OSM_VCA_HOST="${OPTARG}"
1316 ;;
1317 S)
1318 OSM_VCA_SECRET="${OPTARG}"
1319 ;;
1320 s)
1321 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1322 ;;
1323 w)
1324 # when specifying workdir, do not use sudo for access
1325 WORKDIR_SUDO=
1326 OSM_WORK_DIR="${OPTARG}"
1327 ;;
1328 t)
1329 OSM_DOCKER_TAG="${OPTARG}"
1330 ;;
1331 U)
1332 DOCKER_USER="${OPTARG}"
1333 ;;
1334 P)
1335 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1336 ;;
1337 A)
1338 OSM_VCA_APIPROXY="${OPTARG}"
1339 ;;
1340 l)
1341 LXD_CLOUD_FILE="${OPTARG}"
1342 ;;
1343 L)
1344 LXD_CRED_FILE="${OPTARG}"
1345 ;;
1346 K)
1347 CONTROLLER_NAME="${OPTARG}"
1348 ;;
1349 -)
1350 [ "${OPTARG}" == "help" ] && usage && exit 0
1351 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1352 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1353 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1354 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1355 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1356 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1357 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1358 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1359 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1360 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1361 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1362 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1363 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1364 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1365 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1366 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1367 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1368 [ "${OPTARG}" == "pullimages" ] && continue
1369 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1370 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1371 [ "${OPTARG}" == "bundle" ] && continue
1372 [ "${OPTARG}" == "kubeconfig" ] && continue
1373 [ "${OPTARG}" == "lxdendpoint" ] && continue
1374 [ "${OPTARG}" == "lxdcert" ] && continue
1375 [ "${OPTARG}" == "microstack" ] && continue
1376 [ "${OPTARG}" == "tag" ] && continue
1377 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1378 echo -e "Invalid option: '--$OPTARG'\n" >&2
1379 usage && exit 1
1380 ;;
1381 :)
1382 echo "Option -$OPTARG requires an argument" >&2
1383 usage && exit 1
1384 ;;
1385 \?)
1386 echo -e "Invalid option: '-$OPTARG'\n" >&2
1387 usage && exit 1
1388 ;;
1389 h)
1390 usage && exit 0
1391 ;;
1392 y)
1393 ASSUME_YES="y"
1394 ;;
1395 *)
1396 usage && exit 1
1397 ;;
1398 esac
1399 done
1400
1401 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1402 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1403
1404 if [ -n "$SHOWOPTS" ]; then
1405 dump_vars
1406 exit 0
1407 fi
1408
1409 if [ -n "$CHARMED" ]; then
1410 if [ -n "$UNINSTALL" ]; then
1411 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1412 else
1413 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1414 fi
1415
1416 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1417 echo
1418 echo "1. Get the NBI IP with the following command:"
1419 echo
1420 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1421 echo
1422 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1423 echo
1424 echo "export OSM_HOSTNAME=<NBI-IP>"
1425 echo
1426 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1427 echo
1428 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1429 echo
1430 echo "DONE"
1431
1432 exit 0
1433 fi
1434
1435 # if develop, we force master
1436 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1437
1438 need_packages="git wget curl tar"
1439 echo -e "Checking required packages: $need_packages"
1440 dpkg -l $need_packages &>/dev/null \
1441 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1442 || sudo apt-get update \
1443 || FATAL "failed to run apt-get update"
1444 dpkg -l $need_packages &>/dev/null \
1445 || ! echo -e "Installing $need_packages requires root privileges." \
1446 || sudo apt-get install -y $need_packages \
1447 || FATAL "failed to install $need_packages"
1448 sudo snap install jq
1449 if [ -z "$OSM_DEVOPS" ]; then
1450 if [ -n "$TEST_INSTALLER" ]; then
1451 echo -e "\nUsing local devops repo for OSM installation"
1452 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1453 else
1454 echo -e "\nCreating temporary dir for OSM installation"
1455 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1456 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1457
1458 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1459
1460 if [ -z "$COMMIT_ID" ]; then
1461 echo -e "\nGuessing the current stable release"
1462 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1463 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1464
1465 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1466 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1467 else
1468 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1469 fi
1470 git -C $OSM_DEVOPS checkout $COMMIT_ID
1471 fi
1472 fi
1473
1474 . $OSM_DEVOPS/common/all_funcs
1475
1476 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1477 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1478 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1479 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1480 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1481 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1482
1483 #Installation starts here
1484 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1485 track start
1486
1487 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1488 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1489 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1490 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1491 fi
1492
1493 echo -e "Checking required packages: lxd"
1494 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1495 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1496
1497 # use local devops for containers
1498 export OSM_USE_LOCAL_DEVOPS=true
1499
1500 #Install osmclient
1501
1502 #Install vim-emu (optional)
1503 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1504
1505 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1506 track end
1507 echo -e "\nDONE"
1508