full_install_osm.sh: minor fix, no sudo for wget node_exporter, required for installa...
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: install OSM with charms"
64 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " --controller <name>: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " --lxd-cloud <yaml path>: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " --lxd-credentials <yaml path>: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
70 echo -e " --tag: Docker image tag"
71
72 }
73
74 # takes a juju/accounts.yaml file and returns the password specific
75 # for a controller. I wrote this using only bash tools to minimize
76 # additions of other packages
77 function parse_juju_password {
78 password_file="${HOME}/.local/share/juju/accounts.yaml"
79 local controller_name=$1
80 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
81 sed -ne "s|^\($s\):|\1|" \
82 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
83 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
84 awk -F$fs -v controller=$controller_name '{
85 indent = length($1)/2;
86 vname[indent] = $2;
87 for (i in vname) {if (i > indent) {delete vname[i]}}
88 if (length($3) > 0) {
89 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
90 if (match(vn,controller) && match($2,"password")) {
91 printf("%s",$3);
92 }
93 }
94 }'
95 }
96
97 function generate_secret() {
98 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
99 }
100
101 function remove_volumes() {
102 if [ -n "$KUBERNETES" ]; then
103 k8_volume=$1
104 echo "Removing ${k8_volume}"
105 $WORKDIR_SUDO rm -rf ${k8_volume}
106 else
107 stack=$1
108 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
109 for volume in $volumes; do
110 sg docker -c "docker volume rm ${stack}_${volume}"
111 done
112 fi
113 }
114
115 function remove_network() {
116 stack=$1
117 sg docker -c "docker network rm net${stack}"
118 }
119
120 function remove_iptables() {
121 stack=$1
122 if [ -z "$OSM_VCA_HOST" ]; then
123 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
124 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
125 fi
126
127 if [ -z "$DEFAULT_IP" ]; then
128 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
129 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
130 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
131 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
132 fi
133
134 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
135 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
136 sudo netfilter-persistent save
137 fi
138 }
139
140 function remove_stack() {
141 stack=$1
142 if sg docker -c "docker stack ps ${stack}" ; then
143 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
144 COUNTER=0
145 result=1
146 while [ ${COUNTER} -lt 30 ]; do
147 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
148 #echo "Dockers running: $result"
149 if [ "${result}" == "0" ]; then
150 break
151 fi
152 let COUNTER=COUNTER+1
153 sleep 1
154 done
155 if [ "${result}" == "0" ]; then
156 echo "All dockers of the stack ${stack} were removed"
157 else
158 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
159 fi
160 sleep 5
161 fi
162 }
163
164 #removes osm deployments and services
165 function remove_k8s_namespace() {
166 kubectl delete ns $1
167 }
168
169 #Uninstall lightweight OSM: remove dockers
170 function uninstall_lightweight() {
171 if [ -n "$INSTALL_ONLY" ]; then
172 if [ -n "$INSTALL_ELK" ]; then
173 echo -e "\nUninstalling OSM ELK stack"
174 remove_stack osm_elk
175 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
176 fi
177 else
178 echo -e "\nUninstalling OSM"
179 if [ -n "$KUBERNETES" ]; then
180 if [ -n "$INSTALL_K8S_MONITOR" ]; then
181 # uninstall OSM MONITORING
182 uninstall_k8s_monitoring
183 fi
184 remove_k8s_namespace $OSM_STACK_NAME
185 else
186
187 remove_stack $OSM_STACK_NAME
188 remove_stack osm_elk
189 fi
190 echo "Now osm docker images and volumes will be deleted"
191 newgrp docker << EONG
192 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
196 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
197 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
198 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
199 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
200 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
201 EONG
202
203 if [ -n "$KUBERNETES" ]; then
204 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
205 remove_volumes $OSM_NAMESPACE_VOL
206 else
207 remove_volumes $OSM_STACK_NAME
208 remove_network $OSM_STACK_NAME
209 fi
210 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
211 echo "Removing $OSM_DOCKER_WORK_DIR"
212 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
213 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
214 fi
215 echo "Some docker images will be kept in case they are used by other docker stacks"
216 echo "To remove them, just run 'docker image prune' in a terminal"
217 return 0
218 }
219
220 #Safe unattended install of iptables-persistent
221 function check_install_iptables_persistent(){
222 echo -e "\nChecking required packages: iptables-persistent"
223 if dpkg -l iptables-persistent &>/dev/null; then
224 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
225 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
226 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
227 sudo apt-get -yq install iptables-persistent
228 fi
229 }
230
231 #Configure NAT rules, based on the current IP addresses of containers
232 function nat(){
233 check_install_iptables_persistent
234
235 echo -e "\nConfiguring NAT rules"
236 echo -e " Required root privileges"
237 sudo $OSM_DEVOPS/installers/nat_osm
238 }
239
240 function FATAL(){
241 echo "FATAL error: Cannot install OSM due to \"$1\""
242 exit 1
243 }
244
245 function install_lxd() {
246 # Apply sysctl production values for optimal performance
247 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
248 sudo sysctl --system
249
250 # Install LXD snap
251 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
252 sudo snap install lxd
253 sudo apt-get install zfsutils-linux -y
254
255 # Configure LXD
256 sudo usermod -a -G lxd `whoami`
257 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
258 sg lxd -c "lxd waitready"
259 DEFAULT_INTERFACE=$(route -n | awk '$1~/^0.0.0.0/ {print $8}')
260 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
261 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
262 #sudo systemctl stop lxd-bridge
263 #sudo systemctl --system daemon-reload
264 #sudo systemctl enable lxd-bridge
265 #sudo systemctl start lxd-bridge
266 }
267
268 function ask_user(){
269 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
270 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
271 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
272 read -e -p "$1" USER_CONFIRMATION
273 while true ; do
274 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
275 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
276 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
277 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
278 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
279 done
280 }
281
282 function install_osmclient(){
283 CLIENT_RELEASE=${RELEASE#"-R "}
284 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
285 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
286 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
287 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
288 curl $key_location | sudo apt-key add -
289 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
290 sudo apt-get update
291 sudo apt-get install -y python3-pip
292 sudo -H LC_ALL=C python3 -m pip install -U pip
293 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
294 sudo apt-get install -y python3-osm-im python3-osmclient
295 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
296 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
297 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
298 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
299 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
300 echo -e "\nOSM client installed"
301 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
302 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
303 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
304 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
305 else
306 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
307 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
308 echo " export OSM_HOSTNAME=<OSM_host>"
309 fi
310 return 0
311 }
312
313 function install_prometheus_nodeexporter(){
314 if (systemctl -q is-active node_exporter)
315 then
316 echo "Node Exporter is already running."
317 else
318 echo "Node Exporter is not active, installing..."
319 if getent passwd node_exporter > /dev/null 2>&1; then
320 echo "node_exporter user exists"
321 else
322 echo "Creating user node_exporter"
323 sudo useradd --no-create-home --shell /bin/false node_exporter
324 fi
325 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
326 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
327 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
328 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
329 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
330 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
331 sudo systemctl daemon-reload
332 sudo systemctl restart node_exporter
333 sudo systemctl enable node_exporter
334 echo "Node Exporter has been activated in this host."
335 fi
336 return 0
337 }
338
339 function uninstall_prometheus_nodeexporter(){
340 sudo systemctl stop node_exporter
341 sudo systemctl disable node_exporter
342 sudo rm /etc/systemd/system/node_exporter.service
343 sudo systemctl daemon-reload
344 sudo userdel node_exporter
345 sudo rm /usr/local/bin/node_exporter
346 return 0
347 }
348
349 function install_docker_ce() {
350 # installs and configures Docker CE
351 echo "Installing Docker CE ..."
352 sudo apt-get -qq update
353 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
354 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
355 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
356 sudo apt-get -qq update
357 sudo apt-get install -y docker-ce
358 echo "Adding user to group 'docker'"
359 sudo groupadd -f docker
360 sudo usermod -aG docker $USER
361 sleep 2
362 sudo service docker restart
363 echo "... restarted Docker service"
364 sg docker -c "docker version" || FATAL "Docker installation failed"
365 echo "... Docker CE installation done"
366 return 0
367 }
368
369 function install_docker_compose() {
370 # installs and configures docker-compose
371 echo "Installing Docker Compose ..."
372 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
373 sudo chmod +x /usr/local/bin/docker-compose
374 echo "... Docker Compose installation done"
375 }
376
377 function install_juju() {
378 echo "Installing juju"
379 sudo snap install juju --classic
380 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
381 echo "Finished installation of juju"
382 return 0
383 }
384
385 function juju_createcontroller() {
386 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
387 # Not found created, create the controller
388 sudo usermod -a -G lxd ${USER}
389 sg lxd -c "juju bootstrap $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
390 fi
391 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
392 }
393
394 function juju_createproxy() {
395 check_install_iptables_persistent
396
397 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
398 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
399 sudo netfilter-persistent save
400 fi
401 }
402
403 function generate_docker_images() {
404 echo "Pulling and generating docker images"
405 _build_from=$COMMIT_ID
406 [ -z "$_build_from" ] && _build_from="master"
407
408 echo "OSM Docker images generated from $_build_from"
409
410 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
411 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
412 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
413 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
414
415 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
416 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
417 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
418 fi
419
420 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
421 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
422 fi
423
424 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
425 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
426 fi
427
428 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
429 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
430 fi
431
432 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
433 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
434 fi
435
436 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
437 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
438 fi
439
440 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
441 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
442 fi
443
444 if [ -n "$PULL_IMAGES" ]; then
445 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
446 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
447 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
448 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
449 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
450 fi
451
452 if [ -n "$PULL_IMAGES" ]; then
453 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
454 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
455 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
456 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
457 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
458 fi
459
460 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
461 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
462 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
463 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
464 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
465 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
466 fi
467
468 if [ -n "$PULL_IMAGES" ]; then
469 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
470 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
471 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
472 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
473 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
474 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
475 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
476 fi
477
478 if [ -n "$PULL_IMAGES" ]; then
479 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
480 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
481 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
482 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
483 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
484 fi
485
486 if [ -n "$PULL_IMAGES" ]; then
487 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
488 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
489 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
490 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
491 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
492 fi
493
494 if [ -n "$PULL_IMAGES" ]; then
495 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
496 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
497 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
498 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
499 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
500 fi
501
502 if [ -n "$PULL_IMAGES" ]; then
503 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
504 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
505 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
506 fi
507
508 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
509 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
510 fi
511
512 echo "Finished generation of docker images"
513 }
514
515 function cmp_overwrite() {
516 file1="$1"
517 file2="$2"
518 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
519 if [ -f "${file2}" ]; then
520 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
521 else
522 cp -b ${file1} ${file2}
523 fi
524 fi
525 }
526
527 function generate_docker_env_files() {
528 echo "Doing a backup of existing env files"
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
532 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
533 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
534 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
535 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
536 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
537 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
538
539 echo "Generating docker env files"
540 if [ -n "$KUBERNETES" ]; then
541 #Kubernetes resources
542 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
543 else
544 # Docker-compose
545 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
546 if [ -n "$INSTALL_PLA" ]; then
547 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
548 fi
549
550 # Prometheus files
551 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
552 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
553
554 # Grafana files
555 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
556 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
557 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
558 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
559 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
560
561 # Prometheus Exporters files
562 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
563 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
564 fi
565
566 # LCM
567 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
568 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
569 fi
570
571 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
572 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
573 else
574 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
575 fi
576
577 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
578 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
579 else
580 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
581 fi
582
583 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
584 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
585 else
586 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
587 fi
588
589 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
590 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
591 else
592 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
593 fi
594
595 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
596 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
597 else
598 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
599 fi
600
601 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
602 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
603 fi
604
605 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
606 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
607 fi
608
609 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
610 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
611 else
612 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
613 fi
614
615 # RO
616 MYSQL_ROOT_PASSWORD=$(generate_secret)
617 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
618 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
619 fi
620 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
621 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
622 fi
623
624 # Keystone
625 KEYSTONE_DB_PASSWORD=$(generate_secret)
626 SERVICE_PASSWORD=$(generate_secret)
627 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
628 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
629 fi
630 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
631 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
632 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
633 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
634 fi
635
636 # NBI
637 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
638 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
639 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
640 fi
641
642 # MON
643 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
644 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
645 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
646 fi
647
648 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
649 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
650 else
651 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
652 fi
653
654 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
655 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
656 else
657 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
658 fi
659
660 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
661 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
662 else
663 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
664 fi
665
666 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
667 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
668 else
669 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
670 fi
671
672
673 # POL
674 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
675 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
676 fi
677
678 # LW-UI
679 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
680 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
681 fi
682
683 echo "Finished generation of docker env files"
684 }
685
686 function generate_osmclient_script () {
687 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
688 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
689 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
690 }
691
692 #installs kubernetes packages
693 function install_kube() {
694 sudo apt-get update && sudo apt-get install -y apt-transport-https
695 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
696 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
697 sudo apt-get update
698 echo "Installing Kubernetes Packages ..."
699 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
700 }
701
702 #initializes kubernetes control plane
703 function init_kubeadm() {
704 sudo swapoff -a
705 sudo kubeadm init --config $1
706 sleep 5
707 }
708
709 function kube_config_dir() {
710 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
711 mkdir -p $HOME/.kube
712 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
713 sudo chown $(id -u):$(id -g) $HOME/.kube/config
714 }
715
716 #deploys flannel as daemonsets
717 function deploy_cni_provider() {
718 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
719 trap 'rm -rf "${CNI_DIR}"' EXIT
720 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
721 kubectl apply -f $CNI_DIR
722 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
723 }
724
725 #creates secrets from env files which will be used by containers
726 function kube_secrets(){
727 kubectl create ns $OSM_STACK_NAME
728 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
729 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
730 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
731 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
732 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
733 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
734 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
735 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
736 }
737
738 #deploys osm pods and services
739 function deploy_osm_services() {
740 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
741 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
742 sleep 5
743 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
744 }
745
746 function deploy_osm_pla_service() {
747 # corresponding to parse_yaml
748 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
749 # corresponding to namespace_vol
750 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
751 # corresponding to deploy_osm_services
752 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
753 }
754
755 function parse_yaml() {
756 osm_services="nbi lcm ro pol mon light-ui keystone"
757 TAG=$1
758 for osm in $osm_services; do
759 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
760 done
761 }
762
763 function namespace_vol() {
764 osm_services="nbi lcm ro pol mon kafka mongo mysql"
765 for osm in $osm_services; do
766 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
767 done
768 }
769
770 function init_docker_swarm() {
771 if [ "${DEFAULT_MTU}" != "1500" ]; then
772 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
773 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
774 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
775 fi
776 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
777 return 0
778 }
779
780 function create_docker_network() {
781 echo "creating network"
782 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
783 echo "creating network DONE"
784 }
785
786 function deploy_lightweight() {
787
788 echo "Deploying lightweight build"
789 OSM_NBI_PORT=9999
790 OSM_RO_PORT=9090
791 OSM_KEYSTONE_PORT=5000
792 OSM_UI_PORT=80
793 OSM_MON_PORT=8662
794 OSM_PROM_PORT=9090
795 OSM_PROM_CADVISOR_PORT=8080
796 OSM_PROM_HOSTPORT=9091
797 OSM_GRAFANA_PORT=3000
798 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
799 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
800
801 if [ -n "$NO_HOST_PORTS" ]; then
802 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
803 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
804 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
805 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
806 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
807 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
808 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
809 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
810 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
811 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
812 else
813 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
814 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
815 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
816 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
817 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
818 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
819 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
820 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
821 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
822 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
823 fi
824 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
825 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
826 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
827 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
828 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
829 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
830 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
831 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
832 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
833
834 pushd $OSM_DOCKER_WORK_DIR
835 if [ -n "$INSTALL_PLA" ]; then
836 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
837 else
838 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
839 fi
840 popd
841
842 echo "Finished deployment of lightweight build"
843 }
844
845 function deploy_elk() {
846 echo "Pulling docker images for ELK"
847 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
848 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
849 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
850 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
851 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
852 echo "Finished pulling elk docker images"
853 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
854 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
855 remove_stack osm_elk
856 echo "Deploying ELK stack"
857 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
858 echo "Waiting for ELK stack to be up and running"
859 time=0
860 step=5
861 timelength=40
862 elk_is_up=1
863 while [ $time -le $timelength ]; do
864 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
865 elk_is_up=0
866 break
867 fi
868 sleep $step
869 time=$((time+step))
870 done
871 if [ $elk_is_up -eq 0 ]; then
872 echo "ELK is up and running. Trying to create index pattern..."
873 #Create index pattern
874 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
875 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
876 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
877 #Make it the default index
878 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
879 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
880 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
881 else
882 echo "Cannot connect to Kibana to create index pattern."
883 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
884 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
885 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
886 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
887 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
888 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
889 -d"{\"value\":\"filebeat-*\"}"'
890 fi
891 echo "Finished deployment of ELK stack"
892 return 0
893 }
894
895 function install_lightweight() {
896 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
897 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
898 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
899 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
900 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
901
902 track checkingroot
903 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
904 track noroot
905
906 if [ -n "$KUBERNETES" ]; then
907 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
908 1. Install and configure LXD
909 2. Install juju
910 3. Install docker CE
911 4. Disable swap space
912 5. Install and initialize Kubernetes
913 as pre-requirements.
914 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
915
916 else
917 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
918 fi
919 track proceed
920
921 echo "Installing lightweight build of OSM"
922 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
923 trap 'rm -rf "${LWTEMPDIR}"' EXIT
924 DEFAULT_IF=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
925 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
926 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
927 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
928 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
929
930 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
931 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
932 need_packages_lw="snapd"
933 echo -e "Checking required packages: $need_packages_lw"
934 dpkg -l $need_packages_lw &>/dev/null \
935 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
936 || sudo apt-get update \
937 || FATAL "failed to run apt-get update"
938 dpkg -l $need_packages_lw &>/dev/null \
939 || ! echo -e "Installing $need_packages_lw requires root privileges." \
940 || sudo apt-get install -y $need_packages_lw \
941 || FATAL "failed to install $need_packages_lw"
942 install_lxd
943 fi
944
945 track prereqok
946
947 [ -z "$INSTALL_NOJUJU" ] && install_juju
948 track juju_install
949
950 if [ -z "$OSM_VCA_HOST" ]; then
951 if [ -z "$CONTROLLER_NAME" ]; then
952 if [ -n "$LXD_CLOUD_FILE" ]; then
953 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
954 OSM_VCA_CLOUDNAME="lxd-cloud"
955 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
956 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
957 fi
958 juju_createcontroller
959 else
960 OSM_VCA_CLOUDNAME="lxd-cloud"
961 if [ -n "$LXD_CLOUD_FILE" ]; then
962 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
963 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
964 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
965 else
966 mkdir -p ~/.osm
967 cat << EOF > ~/.osm/lxd-cloud.yaml
968 clouds:
969 lxd-cloud:
970 type: lxd
971 auth-types: [certificate]
972 endpoint: "https://$DEFAULT_IP:8443"
973 config:
974 ssl-hostname-verification: false
975 EOF
976 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
977 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
978 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
979 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
980 cat << EOF > ~/.osm/lxd-credentials.yaml
981 credentials:
982 lxd-cloud:
983 lxd-cloud:
984 auth-type: certificate
985 server-cert: |
986 $server_cert
987 client-cert: |
988 $client_cert
989 client-key: |
990 $client_key
991 EOF
992 lxc config trust add local: ~/.osm/client.crt
993 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
994 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
995 fi
996 fi
997 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
998 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
999 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1000 fi
1001 track juju_controller
1002
1003 if [ -z "$OSM_VCA_SECRET" ]; then
1004 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1005 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1006 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1007 fi
1008 if [ -z "$OSM_VCA_PUBKEY" ]; then
1009 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1010 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1011 fi
1012 if [ -z "$OSM_VCA_CACERT" ]; then
1013 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1014 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1015 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1016 fi
1017 if [ -z "$OSM_VCA_APIPROXY" ]; then
1018 OSM_VCA_APIPROXY=$DEFAULT_IP
1019 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1020 fi
1021 juju_createproxy
1022 track juju
1023
1024 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1025 OSM_DATABASE_COMMONKEY=$(generate_secret)
1026 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1027 fi
1028
1029 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1030 track docker_ce
1031
1032 #Installs Kubernetes and deploys osm services
1033 if [ -n "$KUBERNETES" ]; then
1034 install_kube
1035 track install_k8s
1036 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1037 kube_config_dir
1038 track init_k8s
1039 else
1040 #install_docker_compose
1041 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1042 track docker_swarm
1043 fi
1044
1045 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1046 track docker_build
1047
1048 generate_docker_env_files
1049
1050 if [ -n "$KUBERNETES" ]; then
1051 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1052 # uninstall OSM MONITORING
1053 uninstall_k8s_monitoring
1054 track uninstall_k8s_monitoring
1055 fi
1056 #remove old namespace
1057 remove_k8s_namespace $OSM_STACK_NAME
1058 deploy_cni_provider
1059 kube_secrets
1060 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1061 namespace_vol
1062 deploy_osm_services
1063 if [ -n "$INSTALL_PLA"]; then
1064 # optional PLA install
1065 deploy_osm_pla_service
1066 fi
1067 track deploy_osm_services_k8s
1068 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1069 # install OSM MONITORING
1070 install_k8s_monitoring
1071 track install_k8s_monitoring
1072 fi
1073 else
1074 # remove old stack
1075 remove_stack $OSM_STACK_NAME
1076 create_docker_network
1077 deploy_lightweight
1078 generate_osmclient_script
1079 track docker_deploy
1080 install_prometheus_nodeexporter
1081 track nodeexporter
1082 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1083 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1084 fi
1085
1086 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1087 track osmclient
1088
1089 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1090 track end
1091 return 0
1092 }
1093
1094 function install_vimemu() {
1095 echo "\nInstalling vim-emu"
1096 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1097 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1098 # install prerequisites (OVS is a must for the emulator to work)
1099 sudo apt-get install openvswitch-switch
1100 # clone vim-emu repository (attention: branch is currently master only)
1101 echo "Cloning vim-emu repository ..."
1102 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1103 # build vim-emu docker
1104 echo "Building vim-emu Docker container..."
1105
1106 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1107 # start vim-emu container as daemon
1108 echo "Starting vim-emu Docker container 'vim-emu' ..."
1109 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1110 # in lightweight mode, the emulator needs to be attached to netOSM
1111 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1112 else
1113 # classic build mode
1114 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1115 fi
1116 echo "Waiting for 'vim-emu' container to start ..."
1117 sleep 5
1118 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1119 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1120 # print vim-emu connection info
1121 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1122 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1123 echo -e "To add the emulated VIM to OSM you should do:"
1124 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1125 }
1126
1127 function install_k8s_monitoring() {
1128 # install OSM monitoring
1129 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1130 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1131 }
1132
1133 function uninstall_k8s_monitoring() {
1134 # uninstall OSM monitoring
1135 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1136 }
1137
1138 function dump_vars(){
1139 echo "DEVELOP=$DEVELOP"
1140 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1141 echo "UNINSTALL=$UNINSTALL"
1142 echo "UPDATE=$UPDATE"
1143 echo "RECONFIGURE=$RECONFIGURE"
1144 echo "TEST_INSTALLER=$TEST_INSTALLER"
1145 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1146 echo "INSTALL_PLA=$INSTALL_PLA"
1147 echo "INSTALL_LXD=$INSTALL_LXD"
1148 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1149 echo "INSTALL_ONLY=$INSTALL_ONLY"
1150 echo "INSTALL_ELK=$INSTALL_ELK"
1151 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1152 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1153 echo "TO_REBUILD=$TO_REBUILD"
1154 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1155 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1156 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1157 echo "RELEASE=$RELEASE"
1158 echo "REPOSITORY=$REPOSITORY"
1159 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1160 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1161 echo "OSM_DEVOPS=$OSM_DEVOPS"
1162 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1163 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1164 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1165 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1166 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1167 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1168 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1169 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1170 echo "DOCKER_USER=$DOCKER_USER"
1171 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1172 echo "PULL_IMAGES=$PULL_IMAGES"
1173 echo "KUBERNETES=$KUBERNETES"
1174 echo "SHOWOPTS=$SHOWOPTS"
1175 echo "Install from specific refspec (-b): $COMMIT_ID"
1176 }
1177
1178 function track(){
1179 ctime=`date +%s`
1180 duration=$((ctime - SESSION_ID))
1181 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1182 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1183 event_name="bin"
1184 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1185 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1186 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1187 event_name="${event_name}_$1"
1188 url="${url}&event=${event_name}&ce_duration=${duration}"
1189 wget -q -O /dev/null $url
1190 }
1191
1192 UNINSTALL=""
1193 DEVELOP=""
1194 UPDATE=""
1195 RECONFIGURE=""
1196 TEST_INSTALLER=""
1197 INSTALL_LXD=""
1198 SHOWOPTS=""
1199 COMMIT_ID=""
1200 ASSUME_YES=""
1201 INSTALL_FROM_SOURCE=""
1202 RELEASE="ReleaseSEVEN"
1203 REPOSITORY="stable"
1204 INSTALL_VIMEMU=""
1205 INSTALL_PLA=""
1206 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1207 LXD_REPOSITORY_PATH=""
1208 INSTALL_LIGHTWEIGHT="y"
1209 INSTALL_ONLY=""
1210 INSTALL_ELK=""
1211 TO_REBUILD=""
1212 INSTALL_NOLXD=""
1213 INSTALL_NODOCKER=""
1214 INSTALL_NOJUJU=""
1215 KUBERNETES=""
1216 INSTALL_K8S_MONITOR=""
1217 INSTALL_NOHOSTCLIENT=""
1218 SESSION_ID=`date +%s`
1219 OSM_DEVOPS=
1220 OSM_VCA_HOST=
1221 OSM_VCA_SECRET=
1222 OSM_VCA_PUBKEY=
1223 OSM_VCA_CLOUDNAME="localhost"
1224 OSM_STACK_NAME=osm
1225 NO_HOST_PORTS=""
1226 DOCKER_NOBUILD=""
1227 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1228 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1229 WORKDIR_SUDO=sudo
1230 OSM_WORK_DIR="/etc/osm"
1231 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1232 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1233 OSM_HOST_VOL="/var/lib/osm"
1234 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1235 OSM_DOCKER_TAG=latest
1236 DOCKER_USER=opensourcemano
1237 PULL_IMAGES="y"
1238 KAFKA_TAG=2.11-1.0.2
1239 PROMETHEUS_TAG=v2.4.3
1240 GRAFANA_TAG=latest
1241 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1242 PROMETHEUS_CADVISOR_TAG=latest
1243 KEYSTONEDB_TAG=10
1244 OSM_DATABASE_COMMONKEY=
1245 ELASTIC_VERSION=6.4.2
1246 ELASTIC_CURATOR_VERSION=5.5.4
1247 POD_NETWORK_CIDR=10.244.0.0/16
1248 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1249 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1250
1251 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1252 case "${o}" in
1253 b)
1254 COMMIT_ID=${OPTARG}
1255 PULL_IMAGES=""
1256 ;;
1257 r)
1258 REPOSITORY="${OPTARG}"
1259 REPO_ARGS+=(-r "$REPOSITORY")
1260 ;;
1261 c)
1262 [ "${OPTARG}" == "swarm" ] && continue
1263 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1264 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1265 usage && exit 1
1266 ;;
1267 k)
1268 REPOSITORY_KEY="${OPTARG}"
1269 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1270 ;;
1271 u)
1272 REPOSITORY_BASE="${OPTARG}"
1273 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1274 ;;
1275 R)
1276 RELEASE="${OPTARG}"
1277 REPO_ARGS+=(-R "$RELEASE")
1278 ;;
1279 D)
1280 OSM_DEVOPS="${OPTARG}"
1281 ;;
1282 o)
1283 INSTALL_ONLY="y"
1284 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1285 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1286 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1287 ;;
1288 m)
1289 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1290 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1291 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1292 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1293 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1294 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1295 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1296 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1297 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1298 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1299 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1300 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1301 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1302 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1303 ;;
1304 H)
1305 OSM_VCA_HOST="${OPTARG}"
1306 ;;
1307 S)
1308 OSM_VCA_SECRET="${OPTARG}"
1309 ;;
1310 s)
1311 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1312 ;;
1313 w)
1314 # when specifying workdir, do not use sudo for access
1315 WORKDIR_SUDO=
1316 OSM_WORK_DIR="${OPTARG}"
1317 ;;
1318 t)
1319 OSM_DOCKER_TAG="${OPTARG}"
1320 ;;
1321 U)
1322 DOCKER_USER="${OPTARG}"
1323 ;;
1324 P)
1325 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1326 ;;
1327 A)
1328 OSM_VCA_APIPROXY="${OPTARG}"
1329 ;;
1330 l)
1331 LXD_CLOUD_FILE="${OPTARG}"
1332 ;;
1333 L)
1334 LXD_CRED_FILE="${OPTARG}"
1335 ;;
1336 K)
1337 CONTROLLER_NAME="${OPTARG}"
1338 ;;
1339 -)
1340 [ "${OPTARG}" == "help" ] && usage && exit 0
1341 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1342 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1343 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1344 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1345 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1346 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1347 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1348 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1349 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1350 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1351 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1352 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1353 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1354 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1355 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1356 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1357 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1358 [ "${OPTARG}" == "pullimages" ] && continue
1359 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1360 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1361 [ "${OPTARG}" == "bundle" ] && continue
1362 [ "${OPTARG}" == "kubeconfig" ] && continue
1363 [ "${OPTARG}" == "lxdendpoint" ] && continue
1364 [ "${OPTARG}" == "lxdcert" ] && continue
1365 [ "${OPTARG}" == "microstack" ] && continue
1366 [ "${OPTARG}" == "tag" ] && continue
1367 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1368 echo -e "Invalid option: '--$OPTARG'\n" >&2
1369 usage && exit 1
1370 ;;
1371 :)
1372 echo "Option -$OPTARG requires an argument" >&2
1373 usage && exit 1
1374 ;;
1375 \?)
1376 echo -e "Invalid option: '-$OPTARG'\n" >&2
1377 usage && exit 1
1378 ;;
1379 h)
1380 usage && exit 0
1381 ;;
1382 y)
1383 ASSUME_YES="y"
1384 ;;
1385 *)
1386 usage && exit 1
1387 ;;
1388 esac
1389 done
1390
1391 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1392 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1393
1394 if [ -n "$SHOWOPTS" ]; then
1395 dump_vars
1396 exit 0
1397 fi
1398
1399 if [ -n "$CHARMED" ]; then
1400 if [ -n "$UNINSTALL" ]; then
1401 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1402 else
1403 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1404 fi
1405
1406 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1407 echo
1408 echo "1. Get the NBI IP with the following command:"
1409 echo
1410 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1411 echo
1412 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1413 echo
1414 echo "export OSM_HOSTNAME=<NBI-IP>"
1415 echo
1416 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1417 echo
1418 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1419 echo
1420 echo "DONE"
1421
1422 exit 0
1423 fi
1424
1425 # if develop, we force master
1426 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1427
1428 need_packages="git wget curl tar"
1429 echo -e "Checking required packages: $need_packages"
1430 dpkg -l $need_packages &>/dev/null \
1431 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1432 || sudo apt-get update \
1433 || FATAL "failed to run apt-get update"
1434 dpkg -l $need_packages &>/dev/null \
1435 || ! echo -e "Installing $need_packages requires root privileges." \
1436 || sudo apt-get install -y $need_packages \
1437 || FATAL "failed to install $need_packages"
1438 sudo snap install jq
1439 if [ -z "$OSM_DEVOPS" ]; then
1440 if [ -n "$TEST_INSTALLER" ]; then
1441 echo -e "\nUsing local devops repo for OSM installation"
1442 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1443 else
1444 echo -e "\nCreating temporary dir for OSM installation"
1445 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1446 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1447
1448 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1449
1450 if [ -z "$COMMIT_ID" ]; then
1451 echo -e "\nGuessing the current stable release"
1452 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1453 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1454
1455 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1456 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1457 else
1458 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1459 fi
1460 git -C $OSM_DEVOPS checkout $COMMIT_ID
1461 fi
1462 fi
1463
1464 . $OSM_DEVOPS/common/all_funcs
1465
1466 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1467 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1468 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1469 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1470 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1471 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1472
1473 #Installation starts here
1474 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1475 track start
1476
1477 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1478 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1479 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1480 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1481 fi
1482
1483 echo -e "Checking required packages: lxd"
1484 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1485 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1486
1487 # use local devops for containers
1488 export OSM_USE_LOCAL_DEVOPS=true
1489
1490 #Install osmclient
1491
1492 #Install vim-emu (optional)
1493 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1494
1495 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1496 track end
1497 echo -e "\nDONE"
1498