full_install_osm.sh: fix k8s installation, node must be tainted before juju_addk8s...
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
76
77 }
78
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password {
83 password_file="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name=$1
85 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller=$controller_name '{
90 indent = length($1)/2;
91 vname[indent] = $2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
93 if (length($3) > 0) {
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
96 printf("%s",$3);
97 }
98 }
99 }'
100 }
101
102 function generate_secret() {
103 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
104 }
105
106 function remove_volumes() {
107 if [ -n "$KUBERNETES" ]; then
108 k8_volume=$1
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
111 else
112 stack=$1
113 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume in $volumes; do
115 sg docker -c "docker volume rm ${stack}_${volume}"
116 done
117 fi
118 }
119
120 function remove_network() {
121 stack=$1
122 sg docker -c "docker network rm net${stack}"
123 }
124
125 function remove_iptables() {
126 stack=$1
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
130 fi
131
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
138 fi
139
140 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
141 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
143 fi
144 }
145
146 function remove_stack() {
147 stack=$1
148 if sg docker -c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
150 COUNTER=0
151 result=1
152 while [ ${COUNTER} -lt 30 ]; do
153 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
156 break
157 fi
158 let COUNTER=COUNTER+1
159 sleep 1
160 done
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
163 else
164 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 fi
166 sleep 5
167 fi
168 }
169
170 #removes osm deployments and services
171 function remove_k8s_namespace() {
172 kubectl delete ns $1
173 }
174
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm reset --force
179 kubectl delete --namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo rm /usr/local/bin/helm
182 rm -rf $HOME/.helm
183 fi
184 }
185
186 function remove_crontab_job() {
187 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
188 }
189
190 #Uninstall osmclient
191 function uninstall_osmclient() {
192 sudo apt-get remove --purge -y python-osmclient
193 sudo apt-get remove --purge -y python3-osmclient
194 }
195
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
201 remove_stack osm_elk
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
203 fi
204 else
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
210 fi
211 remove_k8s_namespace $OSM_STACK_NAME
212 else
213
214 remove_stack $OSM_STACK_NAME
215 remove_stack osm_elk
216 fi
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker << EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
227 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
228 EONG
229
230 if [ -n "$NGUI" ]; then
231 newgrp docker << EONG
232 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
233 EONG
234 else
235 newgrp docker << EONG
236 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
237 EONG
238 fi
239
240 if [ -n "$KUBERNETES" ]; then
241 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
242 remove_volumes $OSM_NAMESPACE_VOL
243 else
244 remove_volumes $OSM_STACK_NAME
245 remove_network $OSM_STACK_NAME
246 fi
247 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
248 echo "Removing $OSM_DOCKER_WORK_DIR"
249 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
250 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
251 fi
252 remove_crontab_job
253 uninstall_osmclient
254 echo "Some docker images will be kept in case they are used by other docker stacks"
255 echo "To remove them, just run 'docker image prune' in a terminal"
256 return 0
257 }
258
259 #Safe unattended install of iptables-persistent
260 function check_install_iptables_persistent(){
261 echo -e "\nChecking required packages: iptables-persistent"
262 if dpkg -l iptables-persistent &>/dev/null; then
263 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
264 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
265 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
266 sudo apt-get -yq install iptables-persistent
267 fi
268 }
269
270 #Configure NAT rules, based on the current IP addresses of containers
271 function nat(){
272 check_install_iptables_persistent
273
274 echo -e "\nConfiguring NAT rules"
275 echo -e " Required root privileges"
276 sudo $OSM_DEVOPS/installers/nat_osm
277 }
278
279 function FATAL(){
280 echo "FATAL error: Cannot install OSM due to \"$1\""
281 exit 1
282 }
283
284 function update_juju_images(){
285 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
286 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
287 }
288
289 function install_lxd() {
290 # Apply sysctl production values for optimal performance
291 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
292 sudo sysctl --system
293
294 # Install LXD snap
295 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
296 sudo snap install lxd
297 sudo apt-get install zfsutils-linux -y
298
299 # Configure LXD
300 sudo usermod -a -G lxd `whoami`
301 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
302 sg lxd -c "lxd waitready"
303 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
304 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
305 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
306 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
307 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
308 #sudo systemctl stop lxd-bridge
309 #sudo systemctl --system daemon-reload
310 #sudo systemctl enable lxd-bridge
311 #sudo systemctl start lxd-bridge
312 }
313
314 function ask_user(){
315 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
316 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
317 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
318 read -e -p "$1" USER_CONFIRMATION
319 while true ; do
320 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
322 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
323 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
324 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
325 done
326 }
327
328 function install_osmclient(){
329 CLIENT_RELEASE=${RELEASE#"-R "}
330 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
331 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
332 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
333 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
334 curl $key_location | sudo apt-key add -
335 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
336 sudo apt-get update
337 sudo apt-get install -y python3-pip
338 sudo -H LC_ALL=C python3 -m pip install -U pip
339 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
340 sudo apt-get install -y python3-osm-im python3-osmclient
341 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
342 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
343 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
344 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
346 echo -e "\nOSM client installed"
347 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
348 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
349 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
350 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
351 else
352 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
353 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
354 echo " export OSM_HOSTNAME=<OSM_host>"
355 fi
356 return 0
357 }
358
359 function install_prometheus_nodeexporter(){
360 if (systemctl -q is-active node_exporter)
361 then
362 echo "Node Exporter is already running."
363 else
364 echo "Node Exporter is not active, installing..."
365 if getent passwd node_exporter > /dev/null 2>&1; then
366 echo "node_exporter user exists"
367 else
368 echo "Creating user node_exporter"
369 sudo useradd --no-create-home --shell /bin/false node_exporter
370 fi
371 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
372 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
373 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
374 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
375 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
376 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
377 sudo systemctl daemon-reload
378 sudo systemctl restart node_exporter
379 sudo systemctl enable node_exporter
380 echo "Node Exporter has been activated in this host."
381 fi
382 return 0
383 }
384
385 function uninstall_prometheus_nodeexporter(){
386 sudo systemctl stop node_exporter
387 sudo systemctl disable node_exporter
388 sudo rm /etc/systemd/system/node_exporter.service
389 sudo systemctl daemon-reload
390 sudo userdel node_exporter
391 sudo rm /usr/local/bin/node_exporter
392 return 0
393 }
394
395 function install_docker_ce() {
396 # installs and configures Docker CE
397 echo "Installing Docker CE ..."
398 sudo apt-get -qq update
399 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
400 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
401 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
402 sudo apt-get -qq update
403 sudo apt-get install -y docker-ce
404 echo "Adding user to group 'docker'"
405 sudo groupadd -f docker
406 sudo usermod -aG docker $USER
407 sleep 2
408 sudo service docker restart
409 echo "... restarted Docker service"
410 sg docker -c "docker version" || FATAL "Docker installation failed"
411 echo "... Docker CE installation done"
412 return 0
413 }
414
415 function install_docker_compose() {
416 # installs and configures docker-compose
417 echo "Installing Docker Compose ..."
418 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
419 sudo chmod +x /usr/local/bin/docker-compose
420 echo "... Docker Compose installation done"
421 }
422
423 function install_juju() {
424 echo "Installing juju"
425 sudo snap install juju --classic --channel=2.7/stable
426 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
427 update_juju_images
428 echo "Finished installation of juju"
429 return 0
430 }
431
432 function juju_createcontroller() {
433 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
434 # Not found created, create the controller
435 sudo usermod -a -G lxd ${USER}
436 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
437 fi
438 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
439 }
440
441 function juju_createproxy() {
442 check_install_iptables_persistent
443
444 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
445 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
446 sudo netfilter-persistent save
447 fi
448 }
449
450 function generate_docker_images() {
451 echo "Pulling and generating docker images"
452 _build_from=$COMMIT_ID
453 [ -z "$_build_from" ] && _build_from="master"
454
455 echo "OSM Docker images generated from $_build_from"
456
457 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
458 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
459 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
460 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
461
462 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
463 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
464 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
465 fi
466
467 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
468 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
469 fi
470
471 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
472 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
473 fi
474
475 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
476 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
477 fi
478
479 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
480 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
481 fi
482
483 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
484 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
485 fi
486
487 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
488 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
489 fi
490
491 if [ -n "$PULL_IMAGES" ]; then
492 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
493 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
494 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
495 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
496 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
497 fi
498
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
501 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
502 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
503 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
504 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
505 fi
506
507 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
508 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
509 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
510 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
511 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
512 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
513 fi
514
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
517 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
518 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
519 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
520 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
521 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
522 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
523 fi
524
525 if [ -n "$PULL_IMAGES" ]; then
526 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
527 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
528 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
529 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
530 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
531 fi
532
533 if [ -n "$PULL_IMAGES" ]; then
534 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
535 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
536 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
537 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
538 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
539 fi
540
541 if [ -n "$NGUI" ]; then
542 if [ -n "$PULL_IMAGES" ]; then
543 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
544 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
545 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
546 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
547 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
548 fi
549 else
550 if [ -n "$PULL_IMAGES" ]; then
551 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
552 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
553 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
554 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
555 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
556 fi
557 fi
558
559 if [ -n "$PULL_IMAGES" ]; then
560 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
561 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
562 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
563 fi
564
565 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
566 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
567 fi
568
569 echo "Finished generation of docker images"
570 }
571
572 function cmp_overwrite() {
573 file1="$1"
574 file2="$2"
575 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
576 if [ -f "${file2}" ]; then
577 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
578 else
579 cp -b ${file1} ${file2}
580 fi
581 fi
582 }
583
584 function generate_docker_env_files() {
585 echo "Doing a backup of existing env files"
586 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
587 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
588 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
589 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
590 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
591 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
592 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
593 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
594 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
595
596 echo "Generating docker env files"
597 if [ -n "$KUBERNETES" ]; then
598 #Kubernetes resources
599 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
600 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
601 else
602 if [ -n "$NGUI" ]; then
603 # For NG-UI
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
605 else
606 # Docker-compose
607 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
608 fi
609 if [ -n "$INSTALL_PLA" ]; then
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
611 fi
612
613 # Prometheus files
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
616
617 # Grafana files
618 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
619 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
621 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
622 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
623
624 # Prometheus Exporters files
625 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
626 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
627 fi
628
629 # LCM
630 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
631 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
632 fi
633
634 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
636 else
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
638 fi
639
640 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
642 else
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
644 fi
645
646 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
648 else
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
650 fi
651
652 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
654 else
655 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
656 fi
657
658 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
659 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
660 else
661 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
662 fi
663
664 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
665 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
666 fi
667
668 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
669 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
670 fi
671
672 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
673 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
674 else
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
676 fi
677
678 # RO
679 MYSQL_ROOT_PASSWORD=$(generate_secret)
680 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
681 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
682 fi
683 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
684 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
685 fi
686
687 # Keystone
688 KEYSTONE_DB_PASSWORD=$(generate_secret)
689 SERVICE_PASSWORD=$(generate_secret)
690 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
691 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
692 fi
693 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
694 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
695 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
696 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
697 fi
698
699 # NBI
700 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
701 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
702 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
703 fi
704
705 # MON
706 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
707 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
709 fi
710
711 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
712 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
713 else
714 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
715 fi
716
717 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
718 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
721 fi
722
723 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
724 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
725 else
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
727 fi
728
729 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
730 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
731 else
732 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
733 fi
734
735
736 # POL
737 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
738 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
739 fi
740
741 # LW-UI
742 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
743 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
744 fi
745
746 echo "Finished generation of docker env files"
747 }
748
749 function generate_osmclient_script () {
750 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
751 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
752 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
753 }
754
755 #installs kubernetes packages
756 function install_kube() {
757 sudo apt-get update && sudo apt-get install -y apt-transport-https
758 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
759 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
760 sudo apt-get update
761 echo "Installing Kubernetes Packages ..."
762 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
763 }
764
765 #initializes kubernetes control plane
766 function init_kubeadm() {
767 sudo swapoff -a
768 sudo kubeadm init --config $1
769 sleep 5
770 }
771
772 function kube_config_dir() {
773 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
774 mkdir -p $HOME/.kube
775 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
776 sudo chown $(id -u):$(id -g) $HOME/.kube/config
777 }
778
779 #deploys flannel as daemonsets
780 function deploy_cni_provider() {
781 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
782 trap 'rm -rf "${CNI_DIR}"' EXIT
783 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
784 kubectl apply -f $CNI_DIR
785 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
786 }
787
788 #creates secrets from env files which will be used by containers
789 function kube_secrets(){
790 kubectl create ns $OSM_STACK_NAME
791 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
792 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
793 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
794 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
795 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
796 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
797 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
798 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
799 }
800
801 #taints K8s master node
802 function taint_master_node() {
803 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
804 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
805 sleep 5
806 }
807
808 #deploys osm pods and services
809 function deploy_osm_services() {
810 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
811 }
812
813 function deploy_osm_pla_service() {
814 # corresponding to parse_yaml
815 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
816 # corresponding to namespace_vol
817 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
818 # corresponding to deploy_osm_services
819 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
820 }
821
822 #Install helm and tiller
823 function install_helm() {
824 helm > /dev/null 2>&1
825 if [ $? != 0 ] ; then
826 # Helm is not installed. Install helm
827 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
828 tar -zxvf helm-v2.15.2.tar.gz
829 sudo mv linux-amd64/helm /usr/local/bin/helm
830 rm -r linux-amd64
831 rm helm-v2.15.2.tar.gz
832 fi
833
834 # Checking if tiller has being configured
835 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
836 if [ $? == 1 ] ; then
837 # tiller account for kubernetes
838 kubectl --namespace kube-system create serviceaccount tiller
839 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
840 # HELM initialization
841 helm init --service-account tiller
842
843 # Wait for Tiller to be up and running. If timeout expires, continue installing
844 tiller_timeout=120; counter=0
845 while (( counter < tiller_timeout ))
846 do
847 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
848 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
849 num=$((counter + 2))
850 sleep 2
851 done
852 fi
853 }
854
855 function parse_yaml() {
856 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
857 TAG=$1
858 for osm in $osm_services; do
859 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
860 done
861 }
862
863 function namespace_vol() {
864 osm_services="nbi lcm ro pol mon kafka mongo mysql prometheus"
865 for osm in $osm_services; do
866 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
867 done
868 }
869
870 function init_docker_swarm() {
871 if [ "${DEFAULT_MTU}" != "1500" ]; then
872 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
873 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
874 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
875 fi
876 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
877 return 0
878 }
879
880 function create_docker_network() {
881 echo "creating network"
882 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
883 echo "creating network DONE"
884 }
885
886 function deploy_lightweight() {
887
888 echo "Deploying lightweight build"
889 OSM_NBI_PORT=9999
890 OSM_RO_PORT=9090
891 OSM_KEYSTONE_PORT=5000
892 OSM_UI_PORT=80
893 OSM_MON_PORT=8662
894 OSM_PROM_PORT=9090
895 OSM_PROM_CADVISOR_PORT=8080
896 OSM_PROM_HOSTPORT=9091
897 OSM_GRAFANA_PORT=3000
898 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
899 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
900
901 if [ -n "$NO_HOST_PORTS" ]; then
902 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
903 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
904 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
905 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
906 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
907 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
908 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
909 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
910 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
911 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
912 else
913 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
914 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
915 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
916 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
917 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
918 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
919 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
920 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
921 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
922 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
923 fi
924 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
925 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
926 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
927 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
928 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
929 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
930 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
931 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
932 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
933
934 pushd $OSM_DOCKER_WORK_DIR
935 if [ -n "$INSTALL_PLA" ]; then
936 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
937 else
938 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
939 fi
940 popd
941
942 echo "Finished deployment of lightweight build"
943 }
944
945 function deploy_elk() {
946 echo "Pulling docker images for ELK"
947 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
948 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
949 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
950 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
951 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
952 echo "Finished pulling elk docker images"
953 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
954 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
955 remove_stack osm_elk
956 echo "Deploying ELK stack"
957 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
958 echo "Waiting for ELK stack to be up and running"
959 time=0
960 step=5
961 timelength=40
962 elk_is_up=1
963 while [ $time -le $timelength ]; do
964 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
965 elk_is_up=0
966 break
967 fi
968 sleep $step
969 time=$((time+step))
970 done
971 if [ $elk_is_up -eq 0 ]; then
972 echo "ELK is up and running. Trying to create index pattern..."
973 #Create index pattern
974 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
975 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
976 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
977 #Make it the default index
978 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
979 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
980 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
981 else
982 echo "Cannot connect to Kibana to create index pattern."
983 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
984 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
985 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
986 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
987 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
988 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
989 -d"{\"value\":\"filebeat-*\"}"'
990 fi
991 echo "Finished deployment of ELK stack"
992 return 0
993 }
994
995 function add_local_k8scluster() {
996 /usr/bin/osm --all-projects vim-create \
997 --name _system-osm-vim \
998 --account_type dummy \
999 --auth_url http://dummy \
1000 --user osm --password osm --tenant osm \
1001 --description "dummy" \
1002 --config '{management_network_name: mgmt}'
1003 /usr/bin/osm --all-projects k8scluster-add \
1004 --creds ${HOME}/.kube/config \
1005 --vim _system-osm-vim \
1006 --k8s-nets '{"net1": null}' \
1007 --version '1.15' \
1008 --description "OSM Internal Cluster" \
1009 _system-osm-k8s
1010 }
1011
1012 function install_lightweight() {
1013 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1014 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1015 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1016 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1017 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1018
1019 track checkingroot
1020 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1021 track noroot
1022
1023 if [ -n "$KUBERNETES" ]; then
1024 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1025 1. Install and configure LXD
1026 2. Install juju
1027 3. Install docker CE
1028 4. Disable swap space
1029 5. Install and initialize Kubernetes
1030 as pre-requirements.
1031 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1032
1033 else
1034 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1035 fi
1036 track proceed
1037
1038 echo "Installing lightweight build of OSM"
1039 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1040 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1041 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1042 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1043 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1044 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1045 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1046 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1047
1048 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1049 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1050 need_packages_lw="snapd"
1051 echo -e "Checking required packages: $need_packages_lw"
1052 dpkg -l $need_packages_lw &>/dev/null \
1053 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1054 || sudo apt-get update \
1055 || FATAL "failed to run apt-get update"
1056 dpkg -l $need_packages_lw &>/dev/null \
1057 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1058 || sudo apt-get install -y $need_packages_lw \
1059 || FATAL "failed to install $need_packages_lw"
1060 install_lxd
1061 fi
1062
1063 track prereqok
1064
1065 [ -z "$INSTALL_NOJUJU" ] && install_juju
1066 track juju_install
1067
1068 if [ -z "$OSM_VCA_HOST" ]; then
1069 if [ -z "$CONTROLLER_NAME" ]; then
1070 if [ -n "$LXD_CLOUD_FILE" ]; then
1071 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1072 OSM_VCA_CLOUDNAME="lxd-cloud"
1073 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1074 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1075 fi
1076 juju_createcontroller
1077 else
1078 OSM_VCA_CLOUDNAME="lxd-cloud"
1079 if [ -n "$LXD_CLOUD_FILE" ]; then
1080 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1081 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1082 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1083 else
1084 mkdir -p ~/.osm
1085 cat << EOF > ~/.osm/lxd-cloud.yaml
1086 clouds:
1087 lxd-cloud:
1088 type: lxd
1089 auth-types: [certificate]
1090 endpoint: "https://$DEFAULT_IP:8443"
1091 config:
1092 ssl-hostname-verification: false
1093 EOF
1094 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1095 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1096 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1097 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1098 cat << EOF > ~/.osm/lxd-credentials.yaml
1099 credentials:
1100 lxd-cloud:
1101 lxd-cloud:
1102 auth-type: certificate
1103 server-cert: |
1104 $server_cert
1105 client-cert: |
1106 $client_cert
1107 client-key: |
1108 $client_key
1109 EOF
1110 lxc config trust add local: ~/.osm/client.crt
1111 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1112 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1113 fi
1114 fi
1115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1117 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1118 fi
1119 track juju_controller
1120
1121 if [ -z "$OSM_VCA_SECRET" ]; then
1122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1124 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1125 fi
1126 if [ -z "$OSM_VCA_PUBKEY" ]; then
1127 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1129 fi
1130 if [ -z "$OSM_VCA_CACERT" ]; then
1131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1133 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1134 fi
1135 if [ -z "$OSM_VCA_APIPROXY" ]; then
1136 OSM_VCA_APIPROXY=$DEFAULT_IP
1137 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1138 fi
1139 juju_createproxy
1140 track juju
1141
1142 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1143 OSM_DATABASE_COMMONKEY=$(generate_secret)
1144 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1145 fi
1146
1147 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1148 track docker_ce
1149
1150 #Installs Kubernetes and deploys osm services
1151 if [ -n "$KUBERNETES" ]; then
1152 install_kube
1153 track install_k8s
1154 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1155 kube_config_dir
1156 track init_k8s
1157 else
1158 #install_docker_compose
1159 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1160 track docker_swarm
1161 fi
1162
1163 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1164 track docker_build
1165
1166 generate_docker_env_files
1167
1168 if [ -n "$KUBERNETES" ]; then
1169 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1170 # uninstall OSM MONITORING
1171 uninstall_k8s_monitoring
1172 track uninstall_k8s_monitoring
1173 fi
1174 #remove old namespace
1175 remove_k8s_namespace $OSM_STACK_NAME
1176 deploy_cni_provider
1177 kube_secrets
1178 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1179 namespace_vol
1180 taint_master_node
1181 deploy_osm_services
1182 if [ -n "$INSTALL_PLA"]; then
1183 # optional PLA install
1184 deploy_osm_pla_service
1185 fi
1186 track deploy_osm_services_k8s
1187 install_k8s_storageclass
1188 track k8s_storageclass
1189 juju_addk8s
1190 track juju_addk8s
1191 install_helm
1192 track install_helm
1193 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1194 # install OSM MONITORING
1195 install_k8s_monitoring
1196 track install_k8s_monitoring
1197 fi
1198 else
1199 # remove old stack
1200 remove_stack $OSM_STACK_NAME
1201 create_docker_network
1202 deploy_lightweight
1203 generate_osmclient_script
1204 track docker_deploy
1205 install_prometheus_nodeexporter
1206 track nodeexporter
1207 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1208 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1209 fi
1210
1211 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1212 track osmclient
1213
1214 echo -e "Checking OSM health state..."
1215 if [ -n "$KUBERNETES" ]; then
1216 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1217 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1218 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1219 track osm_unhealthy
1220 else
1221 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1222 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1223 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1224 track osm_unhealthy
1225 fi
1226 track after_healthcheck
1227
1228 [ -n "$KUBERNETES" ] && add_local_k8scluster
1229 track add_local_k8scluster
1230
1231
1232 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1233 track end
1234 return 0
1235 }
1236
1237 function install_to_openstack() {
1238
1239 if [ -z "$2" ]; then
1240 FATAL "OpenStack installer requires a valid external network name"
1241 fi
1242
1243 # Install Pip for Python3
1244 $WORKDIR_SUDO apt install -y python3-pip
1245 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1246
1247 # Install Ansible, OpenStack client and SDK
1248 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1249
1250 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1251
1252 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1253
1254 # Execute the Ansible playbook based on openrc or clouds.yaml
1255 if [ -e "$1" ]; then
1256 . $1
1257 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1258 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1259 else
1260 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1261 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1262 fi
1263
1264 return 0
1265 }
1266
1267 function install_vimemu() {
1268 echo "\nInstalling vim-emu"
1269 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1270 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1271 # install prerequisites (OVS is a must for the emulator to work)
1272 sudo apt-get install openvswitch-switch
1273 # clone vim-emu repository (attention: branch is currently master only)
1274 echo "Cloning vim-emu repository ..."
1275 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1276 # build vim-emu docker
1277 echo "Building vim-emu Docker container..."
1278
1279 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1280 # start vim-emu container as daemon
1281 echo "Starting vim-emu Docker container 'vim-emu' ..."
1282 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1283 # in lightweight mode, the emulator needs to be attached to netOSM
1284 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1285 else
1286 # classic build mode
1287 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1288 fi
1289 echo "Waiting for 'vim-emu' container to start ..."
1290 sleep 5
1291 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1292 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1293 # print vim-emu connection info
1294 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1295 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1296 echo -e "To add the emulated VIM to OSM you should do:"
1297 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1298 }
1299
1300 function install_k8s_monitoring() {
1301 # install OSM monitoring
1302 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1303 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1304 }
1305
1306 function uninstall_k8s_monitoring() {
1307 # uninstall OSM monitoring
1308 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1309 }
1310
1311 function dump_vars(){
1312 echo "DEVELOP=$DEVELOP"
1313 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1314 echo "UNINSTALL=$UNINSTALL"
1315 echo "UPDATE=$UPDATE"
1316 echo "RECONFIGURE=$RECONFIGURE"
1317 echo "TEST_INSTALLER=$TEST_INSTALLER"
1318 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1319 echo "INSTALL_PLA=$INSTALL_PLA"
1320 echo "INSTALL_LXD=$INSTALL_LXD"
1321 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1322 echo "INSTALL_ONLY=$INSTALL_ONLY"
1323 echo "INSTALL_ELK=$INSTALL_ELK"
1324 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1325 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1326 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1327 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1328 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1329 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1330 echo "TO_REBUILD=$TO_REBUILD"
1331 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1332 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1333 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1334 echo "RELEASE=$RELEASE"
1335 echo "REPOSITORY=$REPOSITORY"
1336 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1337 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1338 echo "OSM_DEVOPS=$OSM_DEVOPS"
1339 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1340 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1341 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1342 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1343 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1344 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1345 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1346 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1347 echo "DOCKER_USER=$DOCKER_USER"
1348 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1349 echo "PULL_IMAGES=$PULL_IMAGES"
1350 echo "KUBERNETES=$KUBERNETES"
1351 echo "NGUI=$NGUI"
1352 echo "SHOWOPTS=$SHOWOPTS"
1353 echo "Install from specific refspec (-b): $COMMIT_ID"
1354 }
1355
1356 function track(){
1357 ctime=`date +%s`
1358 duration=$((ctime - SESSION_ID))
1359 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1360 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1361 event_name="bin"
1362 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1363 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1364 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1365 event_name="${event_name}_$1"
1366 url="${url}&event=${event_name}&ce_duration=${duration}"
1367 wget -q -O /dev/null $url
1368 }
1369
1370 UNINSTALL=""
1371 DEVELOP=""
1372 UPDATE=""
1373 RECONFIGURE=""
1374 TEST_INSTALLER=""
1375 INSTALL_LXD=""
1376 SHOWOPTS=""
1377 COMMIT_ID=""
1378 ASSUME_YES=""
1379 INSTALL_FROM_SOURCE=""
1380 RELEASE="ReleaseEIGHT"
1381 REPOSITORY="stable"
1382 INSTALL_VIMEMU=""
1383 INSTALL_PLA=""
1384 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1385 LXD_REPOSITORY_PATH=""
1386 INSTALL_LIGHTWEIGHT="y"
1387 INSTALL_TO_OPENSTACK=""
1388 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1389 OPENSTACK_PUBLIC_NET_NAME=""
1390 OPENSTACK_ATTACH_VOLUME="false"
1391 INSTALL_ONLY=""
1392 INSTALL_ELK=""
1393 TO_REBUILD=""
1394 INSTALL_NOLXD=""
1395 INSTALL_NODOCKER=""
1396 INSTALL_NOJUJU=""
1397 KUBERNETES=""
1398 NGUI=""
1399 INSTALL_K8S_MONITOR=""
1400 INSTALL_NOHOSTCLIENT=""
1401 SESSION_ID=`date +%s`
1402 OSM_DEVOPS=
1403 OSM_VCA_HOST=
1404 OSM_VCA_SECRET=
1405 OSM_VCA_PUBKEY=
1406 OSM_VCA_CLOUDNAME="localhost"
1407 OSM_STACK_NAME=osm
1408 NO_HOST_PORTS=""
1409 DOCKER_NOBUILD=""
1410 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1411 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1412 WORKDIR_SUDO=sudo
1413 OSM_WORK_DIR="/etc/osm"
1414 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1415 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1416 OSM_HOST_VOL="/var/lib/osm"
1417 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1418 OSM_DOCKER_TAG=latest
1419 DOCKER_USER=opensourcemano
1420 PULL_IMAGES="y"
1421 KAFKA_TAG=2.11-1.0.2
1422 PROMETHEUS_TAG=v2.4.3
1423 GRAFANA_TAG=latest
1424 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1425 PROMETHEUS_CADVISOR_TAG=latest
1426 KEYSTONEDB_TAG=10
1427 OSM_DATABASE_COMMONKEY=
1428 ELASTIC_VERSION=6.4.2
1429 ELASTIC_CURATOR_VERSION=5.5.4
1430 POD_NETWORK_CIDR=10.244.0.0/16
1431 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1432 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1433
1434 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1435 case "${o}" in
1436 b)
1437 COMMIT_ID=${OPTARG}
1438 PULL_IMAGES=""
1439 ;;
1440 r)
1441 REPOSITORY="${OPTARG}"
1442 REPO_ARGS+=(-r "$REPOSITORY")
1443 ;;
1444 c)
1445 [ "${OPTARG}" == "swarm" ] && continue
1446 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1447 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1448 usage && exit 1
1449 ;;
1450 n)
1451 [ "${OPTARG}" == "lwui" ] && continue
1452 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1453 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1454 usage && exit 1
1455 ;;
1456 k)
1457 REPOSITORY_KEY="${OPTARG}"
1458 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1459 ;;
1460 u)
1461 REPOSITORY_BASE="${OPTARG}"
1462 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1463 ;;
1464 R)
1465 RELEASE="${OPTARG}"
1466 REPO_ARGS+=(-R "$RELEASE")
1467 ;;
1468 D)
1469 OSM_DEVOPS="${OPTARG}"
1470 ;;
1471 o)
1472 INSTALL_ONLY="y"
1473 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1474 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1475 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1476 ;;
1477 O)
1478 INSTALL_TO_OPENSTACK="y"
1479 if [ -n "${OPTARG}" ]; then
1480 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1481 else
1482 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1483 usage && exit 1
1484 fi
1485 ;;
1486 N)
1487 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1488 ;;
1489 m)
1490 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1491 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1492 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1493 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1494 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1495 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1496 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1497 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1498 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1499 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1500 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1501 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1502 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1503 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1504 ;;
1505 H)
1506 OSM_VCA_HOST="${OPTARG}"
1507 ;;
1508 S)
1509 OSM_VCA_SECRET="${OPTARG}"
1510 ;;
1511 s)
1512 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1513 ;;
1514 w)
1515 # when specifying workdir, do not use sudo for access
1516 WORKDIR_SUDO=
1517 OSM_WORK_DIR="${OPTARG}"
1518 ;;
1519 t)
1520 OSM_DOCKER_TAG="${OPTARG}"
1521 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1522 ;;
1523 U)
1524 DOCKER_USER="${OPTARG}"
1525 ;;
1526 P)
1527 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1528 ;;
1529 A)
1530 OSM_VCA_APIPROXY="${OPTARG}"
1531 ;;
1532 l)
1533 LXD_CLOUD_FILE="${OPTARG}"
1534 ;;
1535 L)
1536 LXD_CRED_FILE="${OPTARG}"
1537 ;;
1538 K)
1539 CONTROLLER_NAME="${OPTARG}"
1540 ;;
1541 -)
1542 [ "${OPTARG}" == "help" ] && usage && exit 0
1543 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1544 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1545 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1546 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1547 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1548 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1549 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1550 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1551 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1552 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1553 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1554 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1555 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1556 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1557 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1558 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1559 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1560 [ "${OPTARG}" == "pullimages" ] && continue
1561 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1562 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1563 [ "${OPTARG}" == "bundle" ] && continue
1564 [ "${OPTARG}" == "k8s" ] && continue
1565 [ "${OPTARG}" == "lxd" ] && continue
1566 [ "${OPTARG}" == "lxd-cred" ] && continue
1567 [ "${OPTARG}" == "microstack" ] && continue
1568 [ "${OPTARG}" == "ha" ] && continue
1569 [ "${OPTARG}" == "tag" ] && continue
1570 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1571 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1572 echo -e "Invalid option: '--$OPTARG'\n" >&2
1573 usage && exit 1
1574 ;;
1575 :)
1576 echo "Option -$OPTARG requires an argument" >&2
1577 usage && exit 1
1578 ;;
1579 \?)
1580 echo -e "Invalid option: '-$OPTARG'\n" >&2
1581 usage && exit 1
1582 ;;
1583 h)
1584 usage && exit 0
1585 ;;
1586 y)
1587 ASSUME_YES="y"
1588 ;;
1589 *)
1590 usage && exit 1
1591 ;;
1592 esac
1593 done
1594
1595 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1596 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1597
1598 if [ -n "$SHOWOPTS" ]; then
1599 dump_vars
1600 exit 0
1601 fi
1602
1603 if [ -n "$CHARMED" ]; then
1604 if [ -n "$UNINSTALL" ]; then
1605 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1606 else
1607 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1608
1609 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1610 echo
1611 echo "1. Get the NBI IP with the following command:"
1612 echo
1613 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1614 echo
1615 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1616 echo
1617 echo "export OSM_HOSTNAME=\$NBI_IP"
1618 echo
1619 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1620 echo
1621 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1622 echo
1623 echo "DONE"
1624 fi
1625
1626 exit 0
1627 fi
1628
1629 # if develop, we force master
1630 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1631
1632 need_packages="git wget curl tar"
1633
1634 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1635
1636 echo -e "Checking required packages: $need_packages"
1637 dpkg -l $need_packages &>/dev/null \
1638 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1639 || sudo apt-get update \
1640 || FATAL "failed to run apt-get update"
1641 dpkg -l $need_packages &>/dev/null \
1642 || ! echo -e "Installing $need_packages requires root privileges." \
1643 || sudo apt-get install -y $need_packages \
1644 || FATAL "failed to install $need_packages"
1645 sudo snap install jq
1646 if [ -z "$OSM_DEVOPS" ]; then
1647 if [ -n "$TEST_INSTALLER" ]; then
1648 echo -e "\nUsing local devops repo for OSM installation"
1649 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1650 else
1651 echo -e "\nCreating temporary dir for OSM installation"
1652 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1653 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1654
1655 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1656
1657 if [ -z "$COMMIT_ID" ]; then
1658 echo -e "\nGuessing the current stable release"
1659 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1660 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1661
1662 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1663 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1664 else
1665 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1666 fi
1667 git -C $OSM_DEVOPS checkout $COMMIT_ID
1668 fi
1669 fi
1670
1671 . $OSM_DEVOPS/common/all_funcs
1672
1673 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1674 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1675 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1676 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1677 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1678 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1679
1680 #Installation starts here
1681 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1682 track start
1683
1684 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1685 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1686 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1687 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1688 fi
1689
1690 echo -e "Checking required packages: lxd"
1691 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1692 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1693
1694 # use local devops for containers
1695 export OSM_USE_LOCAL_DEVOPS=true
1696
1697 #Install osmclient
1698
1699 #Install vim-emu (optional)
1700 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1701
1702 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1703 track end
1704 echo -e "\nDONE"
1705
1706