feat8157-vnfindicators: added local k8s cluster to deploy snmp exporters
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
76
77 }
78
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password {
83 password_file="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name=$1
85 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller=$controller_name '{
90 indent = length($1)/2;
91 vname[indent] = $2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
93 if (length($3) > 0) {
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
96 printf("%s",$3);
97 }
98 }
99 }'
100 }
101
102 function generate_secret() {
103 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
104 }
105
106 function remove_volumes() {
107 if [ -n "$KUBERNETES" ]; then
108 k8_volume=$1
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
111 else
112 stack=$1
113 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume in $volumes; do
115 sg docker -c "docker volume rm ${stack}_${volume}"
116 done
117 fi
118 }
119
120 function remove_network() {
121 stack=$1
122 sg docker -c "docker network rm net${stack}"
123 }
124
125 function remove_iptables() {
126 stack=$1
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
130 fi
131
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
138 fi
139
140 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
141 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
143 fi
144 }
145
146 function remove_stack() {
147 stack=$1
148 if sg docker -c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
150 COUNTER=0
151 result=1
152 while [ ${COUNTER} -lt 30 ]; do
153 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
156 break
157 fi
158 let COUNTER=COUNTER+1
159 sleep 1
160 done
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
163 else
164 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 fi
166 sleep 5
167 fi
168 }
169
170 #removes osm deployments and services
171 function remove_k8s_namespace() {
172 kubectl delete ns $1
173 }
174
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm reset --force
179 kubectl delete --namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo rm /usr/local/bin/helm
182 rm -rf $HOME/.helm
183 fi
184 }
185
186 #Uninstall osmclient
187 function uninstall_osmclient() {
188 sudo apt-get remove --purge -y python-osmclient
189 sudo apt-get remove --purge -y python3-osmclient
190 }
191
192 #Uninstall lightweight OSM: remove dockers
193 function uninstall_lightweight() {
194 if [ -n "$INSTALL_ONLY" ]; then
195 if [ -n "$INSTALL_ELK" ]; then
196 echo -e "\nUninstalling OSM ELK stack"
197 remove_stack osm_elk
198 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
199 fi
200 else
201 echo -e "\nUninstalling OSM"
202 if [ -n "$KUBERNETES" ]; then
203 if [ -n "$INSTALL_K8S_MONITOR" ]; then
204 # uninstall OSM MONITORING
205 uninstall_k8s_monitoring
206 fi
207 remove_k8s_namespace $OSM_STACK_NAME
208 else
209
210 remove_stack $OSM_STACK_NAME
211 remove_stack osm_elk
212 fi
213 echo "Now osm docker images and volumes will be deleted"
214 newgrp docker << EONG
215 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
224 EONG
225
226 if [ -n "$NGUI" ]; then
227 newgrp docker << EONG
228 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
229 EONG
230 else
231 newgrp docker << EONG
232 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
233 EONG
234 fi
235
236 if [ -n "$KUBERNETES" ]; then
237 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
238 remove_volumes $OSM_NAMESPACE_VOL
239 else
240 remove_volumes $OSM_STACK_NAME
241 remove_network $OSM_STACK_NAME
242 fi
243 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
244 echo "Removing $OSM_DOCKER_WORK_DIR"
245 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
246 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
247 fi
248 uninstall_osmclient
249 echo "Some docker images will be kept in case they are used by other docker stacks"
250 echo "To remove them, just run 'docker image prune' in a terminal"
251 return 0
252 }
253
254 #Safe unattended install of iptables-persistent
255 function check_install_iptables_persistent(){
256 echo -e "\nChecking required packages: iptables-persistent"
257 if dpkg -l iptables-persistent &>/dev/null; then
258 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
259 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
260 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
261 sudo apt-get -yq install iptables-persistent
262 fi
263 }
264
265 #Configure NAT rules, based on the current IP addresses of containers
266 function nat(){
267 check_install_iptables_persistent
268
269 echo -e "\nConfiguring NAT rules"
270 echo -e " Required root privileges"
271 sudo $OSM_DEVOPS/installers/nat_osm
272 }
273
274 function FATAL(){
275 echo "FATAL error: Cannot install OSM due to \"$1\""
276 exit 1
277 }
278
279 function install_lxd() {
280 # Apply sysctl production values for optimal performance
281 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
282 sudo sysctl --system
283
284 # Install LXD snap
285 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
286 sudo snap install lxd
287 sudo apt-get install zfsutils-linux -y
288
289 # Configure LXD
290 sudo usermod -a -G lxd `whoami`
291 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
292 sg lxd -c "lxd waitready"
293 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
294 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
295 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
296 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
297 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
298 #sudo systemctl stop lxd-bridge
299 #sudo systemctl --system daemon-reload
300 #sudo systemctl enable lxd-bridge
301 #sudo systemctl start lxd-bridge
302 }
303
304 function ask_user(){
305 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
306 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
307 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
308 read -e -p "$1" USER_CONFIRMATION
309 while true ; do
310 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
311 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
312 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
313 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
314 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
315 done
316 }
317
318 function install_osmclient(){
319 CLIENT_RELEASE=${RELEASE#"-R "}
320 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
321 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
322 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
323 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
324 curl $key_location | sudo apt-key add -
325 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
326 sudo apt-get update
327 sudo apt-get install -y python3-pip
328 sudo -H LC_ALL=C python3 -m pip install -U pip
329 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
330 sudo apt-get install -y python3-osm-im python3-osmclient
331 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
332 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
333 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
334 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
335 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
336 echo -e "\nOSM client installed"
337 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
338 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
339 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
340 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
341 else
342 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
343 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
344 echo " export OSM_HOSTNAME=<OSM_host>"
345 fi
346 return 0
347 }
348
349 function install_prometheus_nodeexporter(){
350 if (systemctl -q is-active node_exporter)
351 then
352 echo "Node Exporter is already running."
353 else
354 echo "Node Exporter is not active, installing..."
355 if getent passwd node_exporter > /dev/null 2>&1; then
356 echo "node_exporter user exists"
357 else
358 echo "Creating user node_exporter"
359 sudo useradd --no-create-home --shell /bin/false node_exporter
360 fi
361 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
362 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
363 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
364 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
365 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
366 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
367 sudo systemctl daemon-reload
368 sudo systemctl restart node_exporter
369 sudo systemctl enable node_exporter
370 echo "Node Exporter has been activated in this host."
371 fi
372 return 0
373 }
374
375 function uninstall_prometheus_nodeexporter(){
376 sudo systemctl stop node_exporter
377 sudo systemctl disable node_exporter
378 sudo rm /etc/systemd/system/node_exporter.service
379 sudo systemctl daemon-reload
380 sudo userdel node_exporter
381 sudo rm /usr/local/bin/node_exporter
382 return 0
383 }
384
385 function install_docker_ce() {
386 # installs and configures Docker CE
387 echo "Installing Docker CE ..."
388 sudo apt-get -qq update
389 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
390 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
391 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
392 sudo apt-get -qq update
393 sudo apt-get install -y docker-ce
394 echo "Adding user to group 'docker'"
395 sudo groupadd -f docker
396 sudo usermod -aG docker $USER
397 sleep 2
398 sudo service docker restart
399 echo "... restarted Docker service"
400 sg docker -c "docker version" || FATAL "Docker installation failed"
401 echo "... Docker CE installation done"
402 return 0
403 }
404
405 function install_docker_compose() {
406 # installs and configures docker-compose
407 echo "Installing Docker Compose ..."
408 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
409 sudo chmod +x /usr/local/bin/docker-compose
410 echo "... Docker Compose installation done"
411 }
412
413 function install_juju() {
414 echo "Installing juju"
415 sudo snap install juju --classic --channel=2.7/stable
416 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
417 echo "Finished installation of juju"
418 return 0
419 }
420
421 function juju_createcontroller() {
422 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
423 # Not found created, create the controller
424 sudo usermod -a -G lxd ${USER}
425 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
426 fi
427 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
428 }
429
430 function juju_createproxy() {
431 check_install_iptables_persistent
432
433 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
434 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
435 sudo netfilter-persistent save
436 fi
437 }
438
439 function generate_docker_images() {
440 echo "Pulling and generating docker images"
441 _build_from=$COMMIT_ID
442 [ -z "$_build_from" ] && _build_from="master"
443
444 echo "OSM Docker images generated from $_build_from"
445
446 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
447 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
448 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
449 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
450
451 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
452 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
453 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
454 fi
455
456 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
457 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
458 fi
459
460 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
461 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
462 fi
463
464 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
465 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
466 fi
467
468 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
469 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
470 fi
471
472 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
473 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
474 fi
475
476 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
477 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
478 fi
479
480 if [ -n "$PULL_IMAGES" ]; then
481 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
482 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
483 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
484 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
485 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
486 fi
487
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
490 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
491 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
492 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
493 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
494 fi
495
496 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
497 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
498 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
499 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
500 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
501 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
502 fi
503
504 if [ -n "$PULL_IMAGES" ]; then
505 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
506 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
507 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
508 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
509 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
510 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
511 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
512 fi
513
514 if [ -n "$PULL_IMAGES" ]; then
515 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
516 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
517 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
518 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
519 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
520 fi
521
522 if [ -n "$PULL_IMAGES" ]; then
523 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
524 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
525 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
526 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
527 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
528 fi
529
530 if [ -n "$NGUI" ]; then
531 if [ -n "$PULL_IMAGES" ]; then
532 sg docker -c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull ng-ui docker image"
533 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NG-UI ; then
534 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NG-UI
535 git -C ${LWTEMPDIR}/NG-UI checkout ${COMMIT_ID}
536 sg docker -c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL "cannot build NG-UI docker image"
537 fi
538 else
539 if [ -n "$PULL_IMAGES" ]; then
540 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
541 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
542 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
543 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
544 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
545 fi
546 fi
547
548 if [ -n "$PULL_IMAGES" ]; then
549 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
550 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
551 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
552 fi
553
554 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
555 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
556 fi
557
558 echo "Finished generation of docker images"
559 }
560
561 function cmp_overwrite() {
562 file1="$1"
563 file2="$2"
564 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
565 if [ -f "${file2}" ]; then
566 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
567 else
568 cp -b ${file1} ${file2}
569 fi
570 fi
571 }
572
573 function generate_docker_env_files() {
574 echo "Doing a backup of existing env files"
575 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
576 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
577 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
578 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
579 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
580 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
581 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
582 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
583 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
584
585 echo "Generating docker env files"
586 if [ -n "$KUBERNETES" ]; then
587 #Kubernetes resources
588 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
589 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
590 else
591 if [ -n "$NGUI" ]; then
592 # For NG-UI
593 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
594 else
595 # Docker-compose
596 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
597 fi
598 if [ -n "$INSTALL_PLA" ]; then
599 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
600 fi
601
602 # Prometheus files
603 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
605
606 # Grafana files
607 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
608 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
609 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
611 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
612
613 # Prometheus Exporters files
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
616 fi
617
618 # LCM
619 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
620 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
621 fi
622
623 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
624 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
625 else
626 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
627 fi
628
629 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
630 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
631 else
632 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
633 fi
634
635 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
636 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
637 else
638 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
639 fi
640
641 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
642 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
643 else
644 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
645 fi
646
647 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
648 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
649 else
650 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
651 fi
652
653 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
654 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
655 fi
656
657 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
658 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
659 fi
660
661 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
662 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
663 else
664 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
665 fi
666
667 # RO
668 MYSQL_ROOT_PASSWORD=$(generate_secret)
669 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
670 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
671 fi
672 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
673 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
674 fi
675
676 # Keystone
677 KEYSTONE_DB_PASSWORD=$(generate_secret)
678 SERVICE_PASSWORD=$(generate_secret)
679 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
680 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
681 fi
682 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
683 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
684 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
685 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
686 fi
687
688 # NBI
689 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
690 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
691 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
692 fi
693
694 # MON
695 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
696 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
697 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
698 fi
699
700 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
701 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
702 else
703 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
704 fi
705
706 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
707 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 else
709 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
710 fi
711
712 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
713 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
716 fi
717
718 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
719 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
720 else
721 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
722 fi
723
724
725 # POL
726 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
727 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
728 fi
729
730 # LW-UI
731 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
732 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
733 fi
734
735 echo "Finished generation of docker env files"
736 }
737
738 function generate_osmclient_script () {
739 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
740 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
741 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
742 }
743
744 #installs kubernetes packages
745 function install_kube() {
746 sudo apt-get update && sudo apt-get install -y apt-transport-https
747 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
748 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
749 sudo apt-get update
750 echo "Installing Kubernetes Packages ..."
751 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
752 }
753
754 #initializes kubernetes control plane
755 function init_kubeadm() {
756 sudo swapoff -a
757 sudo kubeadm init --config $1
758 sleep 5
759 }
760
761 function kube_config_dir() {
762 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
763 mkdir -p $HOME/.kube
764 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
765 sudo chown $(id -u):$(id -g) $HOME/.kube/config
766 }
767
768 #deploys flannel as daemonsets
769 function deploy_cni_provider() {
770 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
771 trap 'rm -rf "${CNI_DIR}"' EXIT
772 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
773 kubectl apply -f $CNI_DIR
774 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
775 }
776
777 #creates secrets from env files which will be used by containers
778 function kube_secrets(){
779 kubectl create ns $OSM_STACK_NAME
780 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
781 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
782 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
783 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
784 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
785 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
786 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
787 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
788 }
789
790 #deploys osm pods and services
791 function deploy_osm_services() {
792 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
793 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
794 sleep 5
795 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
796 }
797
798 function deploy_osm_pla_service() {
799 # corresponding to parse_yaml
800 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
801 # corresponding to namespace_vol
802 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
803 # corresponding to deploy_osm_services
804 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
805 }
806
807 #Install helm and tiller
808 function install_helm() {
809 helm > /dev/null 2>&1
810 if [ $? != 0 ] ; then
811 # Helm is not installed. Install helm
812 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
813 tar -zxvf helm-v2.15.2.tar.gz
814 sudo mv linux-amd64/helm /usr/local/bin/helm
815 rm -r linux-amd64
816 rm helm-v2.15.2.tar.gz
817 fi
818
819 # Checking if tiller has being configured
820 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
821 if [ $? == 1 ] ; then
822 # tiller account for kubernetes
823 kubectl --namespace kube-system create serviceaccount tiller
824 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
825 # HELM initialization
826 helm init --service-account tiller
827
828 # Wait for Tiller to be up and running. If timeout expires, continue installing
829 tiller_timeout=120; counter=0
830 while (( counter < tiller_timeout ))
831 do
832 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
833 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
834 num=$((counter + 2))
835 sleep 2
836 done
837 fi
838 }
839
840 function parse_yaml() {
841 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone"
842 TAG=$1
843 for osm in $osm_services; do
844 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
845 done
846 }
847
848 function namespace_vol() {
849 osm_services="nbi lcm ro pol mon kafka mongo mysql"
850 for osm in $osm_services; do
851 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
852 done
853 }
854
855 function init_docker_swarm() {
856 if [ "${DEFAULT_MTU}" != "1500" ]; then
857 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
858 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
859 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
860 fi
861 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
862 return 0
863 }
864
865 function create_docker_network() {
866 echo "creating network"
867 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
868 echo "creating network DONE"
869 }
870
871 function deploy_lightweight() {
872
873 echo "Deploying lightweight build"
874 OSM_NBI_PORT=9999
875 OSM_RO_PORT=9090
876 OSM_KEYSTONE_PORT=5000
877 OSM_UI_PORT=80
878 OSM_MON_PORT=8662
879 OSM_PROM_PORT=9090
880 OSM_PROM_CADVISOR_PORT=8080
881 OSM_PROM_HOSTPORT=9091
882 OSM_GRAFANA_PORT=3000
883 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
884 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
885
886 if [ -n "$NO_HOST_PORTS" ]; then
887 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
888 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
889 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
890 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
891 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
892 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
893 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
894 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
895 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
896 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
897 else
898 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
899 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
900 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
901 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
902 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
903 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
904 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
905 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
906 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
907 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
908 fi
909 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
910 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
911 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
912 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
913 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
914 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
915 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
916 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
917 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
918
919 pushd $OSM_DOCKER_WORK_DIR
920 if [ -n "$INSTALL_PLA" ]; then
921 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
922 else
923 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
924 fi
925 popd
926
927 echo "Finished deployment of lightweight build"
928 }
929
930 function deploy_elk() {
931 echo "Pulling docker images for ELK"
932 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
933 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
934 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
935 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
936 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
937 echo "Finished pulling elk docker images"
938 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
939 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
940 remove_stack osm_elk
941 echo "Deploying ELK stack"
942 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
943 echo "Waiting for ELK stack to be up and running"
944 time=0
945 step=5
946 timelength=40
947 elk_is_up=1
948 while [ $time -le $timelength ]; do
949 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
950 elk_is_up=0
951 break
952 fi
953 sleep $step
954 time=$((time+step))
955 done
956 if [ $elk_is_up -eq 0 ]; then
957 echo "ELK is up and running. Trying to create index pattern..."
958 #Create index pattern
959 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
960 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
961 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
962 #Make it the default index
963 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
964 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
965 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
966 else
967 echo "Cannot connect to Kibana to create index pattern."
968 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
969 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
970 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
971 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
972 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
973 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
974 -d"{\"value\":\"filebeat-*\"}"'
975 fi
976 echo "Finished deployment of ELK stack"
977 return 0
978 }
979
980 function add_local_k8scluster() {
981 /usr/bin/osm --all-projects vim-create \
982 --name _system-osm-vim \
983 --account_type dummy \
984 --auth_url http://dummy \
985 --user osm --password osm --tenant osm \
986 --description "dummy" \
987 --config '{management_network_name: mgmt}'
988 /usr/bin/osm --all-projects k8scluster-add \
989 --creds ${HOME}/.kube/config \
990 --vim _system-osm-vim \
991 --k8s-nets '{"net1": null}' \
992 --version '1.15' \
993 --description "OSM Internal Cluster" \
994 _system-osm-k8s
995 }
996
997 function install_lightweight() {
998 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
999 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1000 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1001 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1002 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1003
1004 track checkingroot
1005 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1006 track noroot
1007
1008 if [ -n "$KUBERNETES" ]; then
1009 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1010 1. Install and configure LXD
1011 2. Install juju
1012 3. Install docker CE
1013 4. Disable swap space
1014 5. Install and initialize Kubernetes
1015 as pre-requirements.
1016 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1017
1018 else
1019 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1020 fi
1021 track proceed
1022
1023 echo "Installing lightweight build of OSM"
1024 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1025 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1026 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1027 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1028 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1029 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1030 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1031 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1032
1033 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1034 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1035 need_packages_lw="snapd"
1036 echo -e "Checking required packages: $need_packages_lw"
1037 dpkg -l $need_packages_lw &>/dev/null \
1038 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1039 || sudo apt-get update \
1040 || FATAL "failed to run apt-get update"
1041 dpkg -l $need_packages_lw &>/dev/null \
1042 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1043 || sudo apt-get install -y $need_packages_lw \
1044 || FATAL "failed to install $need_packages_lw"
1045 install_lxd
1046 fi
1047
1048 track prereqok
1049
1050 [ -z "$INSTALL_NOJUJU" ] && install_juju
1051 track juju_install
1052
1053 if [ -z "$OSM_VCA_HOST" ]; then
1054 if [ -z "$CONTROLLER_NAME" ]; then
1055 if [ -n "$LXD_CLOUD_FILE" ]; then
1056 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1057 OSM_VCA_CLOUDNAME="lxd-cloud"
1058 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1059 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1060 fi
1061 juju_createcontroller
1062 else
1063 OSM_VCA_CLOUDNAME="lxd-cloud"
1064 if [ -n "$LXD_CLOUD_FILE" ]; then
1065 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1066 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1067 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1068 else
1069 mkdir -p ~/.osm
1070 cat << EOF > ~/.osm/lxd-cloud.yaml
1071 clouds:
1072 lxd-cloud:
1073 type: lxd
1074 auth-types: [certificate]
1075 endpoint: "https://$DEFAULT_IP:8443"
1076 config:
1077 ssl-hostname-verification: false
1078 EOF
1079 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1080 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1081 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1082 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1083 cat << EOF > ~/.osm/lxd-credentials.yaml
1084 credentials:
1085 lxd-cloud:
1086 lxd-cloud:
1087 auth-type: certificate
1088 server-cert: |
1089 $server_cert
1090 client-cert: |
1091 $client_cert
1092 client-key: |
1093 $client_key
1094 EOF
1095 lxc config trust add local: ~/.osm/client.crt
1096 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1097 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1098 fi
1099 fi
1100 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1101 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1102 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1103 fi
1104 track juju_controller
1105
1106 if [ -z "$OSM_VCA_SECRET" ]; then
1107 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1108 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1109 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1110 fi
1111 if [ -z "$OSM_VCA_PUBKEY" ]; then
1112 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1113 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1114 fi
1115 if [ -z "$OSM_VCA_CACERT" ]; then
1116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1118 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1119 fi
1120 if [ -z "$OSM_VCA_APIPROXY" ]; then
1121 OSM_VCA_APIPROXY=$DEFAULT_IP
1122 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1123 fi
1124 juju_createproxy
1125 track juju
1126
1127 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1128 OSM_DATABASE_COMMONKEY=$(generate_secret)
1129 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1130 fi
1131
1132 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1133 track docker_ce
1134
1135 #Installs Kubernetes and deploys osm services
1136 if [ -n "$KUBERNETES" ]; then
1137 install_kube
1138 track install_k8s
1139 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1140 kube_config_dir
1141 track init_k8s
1142 else
1143 #install_docker_compose
1144 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1145 track docker_swarm
1146 fi
1147
1148 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1149 track docker_build
1150
1151 generate_docker_env_files
1152
1153 if [ -n "$KUBERNETES" ]; then
1154 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1155 # uninstall OSM MONITORING
1156 uninstall_k8s_monitoring
1157 track uninstall_k8s_monitoring
1158 fi
1159 #remove old namespace
1160 remove_k8s_namespace $OSM_STACK_NAME
1161 deploy_cni_provider
1162 kube_secrets
1163 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1164 namespace_vol
1165 deploy_osm_services
1166 if [ -n "$INSTALL_PLA"]; then
1167 # optional PLA install
1168 deploy_osm_pla_service
1169 fi
1170 track deploy_osm_services_k8s
1171 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1172 # install OSM MONITORING
1173 install_k8s_monitoring
1174 track install_k8s_monitoring
1175 fi
1176 else
1177 # remove old stack
1178 remove_stack $OSM_STACK_NAME
1179 create_docker_network
1180 deploy_lightweight
1181 generate_osmclient_script
1182 track docker_deploy
1183 install_prometheus_nodeexporter
1184 track nodeexporter
1185 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1186 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1187 fi
1188
1189 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1190 track osmclient
1191
1192 echo -e "Checking OSM health state..."
1193 if [ -n "$KUBERNETES" ]; then
1194 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1195 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1196 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1197 track osm_unhealthy
1198 else
1199 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1200 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1201 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1202 track osm_unhealthy
1203 fi
1204 track after_healthcheck
1205
1206 [ -n "$KUBERNETES" ] && add_local_k8scluster
1207 track add_local_k8scluster
1208
1209
1210 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1211 track end
1212 return 0
1213 }
1214
1215 function install_to_openstack() {
1216
1217 if [ -z "$2" ]; then
1218 FATAL "OpenStack installer requires a valid external network name"
1219 fi
1220
1221 # Install Pip for Python3
1222 $WORKDIR_SUDO apt install -y python3-pip
1223 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1224
1225 # Install Ansible, OpenStack client and SDK
1226 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1227
1228 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1229
1230 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1231
1232 # Execute the Ansible playbook based on openrc or clouds.yaml
1233 if [ -e "$1" ]; then
1234 . $1
1235 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1236 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1237 else
1238 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1239 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1240 fi
1241
1242 return 0
1243 }
1244
1245 function install_vimemu() {
1246 echo "\nInstalling vim-emu"
1247 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1248 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1249 # install prerequisites (OVS is a must for the emulator to work)
1250 sudo apt-get install openvswitch-switch
1251 # clone vim-emu repository (attention: branch is currently master only)
1252 echo "Cloning vim-emu repository ..."
1253 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1254 # build vim-emu docker
1255 echo "Building vim-emu Docker container..."
1256
1257 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1258 # start vim-emu container as daemon
1259 echo "Starting vim-emu Docker container 'vim-emu' ..."
1260 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1261 # in lightweight mode, the emulator needs to be attached to netOSM
1262 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1263 else
1264 # classic build mode
1265 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1266 fi
1267 echo "Waiting for 'vim-emu' container to start ..."
1268 sleep 5
1269 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1270 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1271 # print vim-emu connection info
1272 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1273 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1274 echo -e "To add the emulated VIM to OSM you should do:"
1275 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1276 }
1277
1278 function install_k8s_monitoring() {
1279 # install OSM monitoring
1280 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1281 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1282 }
1283
1284 function uninstall_k8s_monitoring() {
1285 # uninstall OSM monitoring
1286 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1287 }
1288
1289 function dump_vars(){
1290 echo "DEVELOP=$DEVELOP"
1291 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1292 echo "UNINSTALL=$UNINSTALL"
1293 echo "UPDATE=$UPDATE"
1294 echo "RECONFIGURE=$RECONFIGURE"
1295 echo "TEST_INSTALLER=$TEST_INSTALLER"
1296 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1297 echo "INSTALL_PLA=$INSTALL_PLA"
1298 echo "INSTALL_LXD=$INSTALL_LXD"
1299 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1300 echo "INSTALL_ONLY=$INSTALL_ONLY"
1301 echo "INSTALL_ELK=$INSTALL_ELK"
1302 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1303 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1304 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1305 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1306 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1307 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1308 echo "TO_REBUILD=$TO_REBUILD"
1309 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1310 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1311 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1312 echo "RELEASE=$RELEASE"
1313 echo "REPOSITORY=$REPOSITORY"
1314 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1315 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1316 echo "OSM_DEVOPS=$OSM_DEVOPS"
1317 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1318 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1319 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1320 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1321 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1322 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1323 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1324 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1325 echo "DOCKER_USER=$DOCKER_USER"
1326 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1327 echo "PULL_IMAGES=$PULL_IMAGES"
1328 echo "KUBERNETES=$KUBERNETES"
1329 echo "NGUI=$NGUI"
1330 echo "SHOWOPTS=$SHOWOPTS"
1331 echo "Install from specific refspec (-b): $COMMIT_ID"
1332 }
1333
1334 function track(){
1335 ctime=`date +%s`
1336 duration=$((ctime - SESSION_ID))
1337 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1338 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1339 event_name="bin"
1340 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1341 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1342 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1343 event_name="${event_name}_$1"
1344 url="${url}&event=${event_name}&ce_duration=${duration}"
1345 wget -q -O /dev/null $url
1346 }
1347
1348 UNINSTALL=""
1349 DEVELOP=""
1350 UPDATE=""
1351 RECONFIGURE=""
1352 TEST_INSTALLER=""
1353 INSTALL_LXD=""
1354 SHOWOPTS=""
1355 COMMIT_ID=""
1356 ASSUME_YES=""
1357 INSTALL_FROM_SOURCE=""
1358 RELEASE="ReleaseEIGHT"
1359 REPOSITORY="stable"
1360 INSTALL_VIMEMU=""
1361 INSTALL_PLA=""
1362 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1363 LXD_REPOSITORY_PATH=""
1364 INSTALL_LIGHTWEIGHT="y"
1365 INSTALL_TO_OPENSTACK=""
1366 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1367 OPENSTACK_PUBLIC_NET_NAME=""
1368 OPENSTACK_ATTACH_VOLUME="false"
1369 INSTALL_ONLY=""
1370 INSTALL_ELK=""
1371 TO_REBUILD=""
1372 INSTALL_NOLXD=""
1373 INSTALL_NODOCKER=""
1374 INSTALL_NOJUJU=""
1375 KUBERNETES=""
1376 NGUI=""
1377 INSTALL_K8S_MONITOR=""
1378 INSTALL_NOHOSTCLIENT=""
1379 SESSION_ID=`date +%s`
1380 OSM_DEVOPS=
1381 OSM_VCA_HOST=
1382 OSM_VCA_SECRET=
1383 OSM_VCA_PUBKEY=
1384 OSM_VCA_CLOUDNAME="localhost"
1385 OSM_STACK_NAME=osm
1386 NO_HOST_PORTS=""
1387 DOCKER_NOBUILD=""
1388 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1389 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1390 WORKDIR_SUDO=sudo
1391 OSM_WORK_DIR="/etc/osm"
1392 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1393 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1394 OSM_HOST_VOL="/var/lib/osm"
1395 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1396 OSM_DOCKER_TAG=latest
1397 DOCKER_USER=opensourcemano
1398 PULL_IMAGES="y"
1399 KAFKA_TAG=2.11-1.0.2
1400 PROMETHEUS_TAG=v2.4.3
1401 GRAFANA_TAG=latest
1402 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1403 PROMETHEUS_CADVISOR_TAG=latest
1404 KEYSTONEDB_TAG=10
1405 OSM_DATABASE_COMMONKEY=
1406 ELASTIC_VERSION=6.4.2
1407 ELASTIC_CURATOR_VERSION=5.5.4
1408 POD_NETWORK_CIDR=10.244.0.0/16
1409 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1410 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1411
1412 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1413 case "${o}" in
1414 b)
1415 COMMIT_ID=${OPTARG}
1416 PULL_IMAGES=""
1417 ;;
1418 r)
1419 REPOSITORY="${OPTARG}"
1420 REPO_ARGS+=(-r "$REPOSITORY")
1421 ;;
1422 c)
1423 [ "${OPTARG}" == "swarm" ] && continue
1424 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1425 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1426 usage && exit 1
1427 ;;
1428 n)
1429 [ "${OPTARG}" == "lwui" ] && continue
1430 [ "${OPTARG}" == "ngui" ] && NGUI="y" && continue
1431 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1432 usage && exit 1
1433 ;;
1434 k)
1435 REPOSITORY_KEY="${OPTARG}"
1436 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1437 ;;
1438 u)
1439 REPOSITORY_BASE="${OPTARG}"
1440 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1441 ;;
1442 R)
1443 RELEASE="${OPTARG}"
1444 REPO_ARGS+=(-R "$RELEASE")
1445 ;;
1446 D)
1447 OSM_DEVOPS="${OPTARG}"
1448 ;;
1449 o)
1450 INSTALL_ONLY="y"
1451 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1452 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1453 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1454 ;;
1455 O)
1456 INSTALL_TO_OPENSTACK="y"
1457 if [ -n "${OPTARG}" ]; then
1458 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1459 else
1460 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1461 usage && exit 1
1462 fi
1463 ;;
1464 N)
1465 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1466 ;;
1467 m)
1468 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1469 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1470 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1471 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1472 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1473 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1474 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1475 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1476 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1477 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1478 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1479 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1480 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1481 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1482 ;;
1483 H)
1484 OSM_VCA_HOST="${OPTARG}"
1485 ;;
1486 S)
1487 OSM_VCA_SECRET="${OPTARG}"
1488 ;;
1489 s)
1490 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1491 ;;
1492 w)
1493 # when specifying workdir, do not use sudo for access
1494 WORKDIR_SUDO=
1495 OSM_WORK_DIR="${OPTARG}"
1496 ;;
1497 t)
1498 OSM_DOCKER_TAG="${OPTARG}"
1499 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1500 ;;
1501 U)
1502 DOCKER_USER="${OPTARG}"
1503 ;;
1504 P)
1505 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1506 ;;
1507 A)
1508 OSM_VCA_APIPROXY="${OPTARG}"
1509 ;;
1510 l)
1511 LXD_CLOUD_FILE="${OPTARG}"
1512 ;;
1513 L)
1514 LXD_CRED_FILE="${OPTARG}"
1515 ;;
1516 K)
1517 CONTROLLER_NAME="${OPTARG}"
1518 ;;
1519 -)
1520 [ "${OPTARG}" == "help" ] && usage && exit 0
1521 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1522 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1523 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1524 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1525 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1526 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1527 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1528 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1529 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1530 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1531 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1532 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1533 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1534 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1535 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1536 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1537 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1538 [ "${OPTARG}" == "pullimages" ] && continue
1539 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1540 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1541 [ "${OPTARG}" == "bundle" ] && continue
1542 [ "${OPTARG}" == "k8s" ] && continue
1543 [ "${OPTARG}" == "lxd" ] && continue
1544 [ "${OPTARG}" == "lxd-cred" ] && continue
1545 [ "${OPTARG}" == "microstack" ] && continue
1546 [ "${OPTARG}" == "ha" ] && continue
1547 [ "${OPTARG}" == "tag" ] && continue
1548 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1549 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1550 echo -e "Invalid option: '--$OPTARG'\n" >&2
1551 usage && exit 1
1552 ;;
1553 :)
1554 echo "Option -$OPTARG requires an argument" >&2
1555 usage && exit 1
1556 ;;
1557 \?)
1558 echo -e "Invalid option: '-$OPTARG'\n" >&2
1559 usage && exit 1
1560 ;;
1561 h)
1562 usage && exit 0
1563 ;;
1564 y)
1565 ASSUME_YES="y"
1566 ;;
1567 *)
1568 usage && exit 1
1569 ;;
1570 esac
1571 done
1572
1573 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1574 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1575
1576 if [ -n "$SHOWOPTS" ]; then
1577 dump_vars
1578 exit 0
1579 fi
1580
1581 if [ -n "$CHARMED" ]; then
1582 if [ -n "$UNINSTALL" ]; then
1583 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1584 else
1585 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1586
1587 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1588 echo
1589 echo "1. Get the NBI IP with the following command:"
1590 echo
1591 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1592 echo
1593 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1594 echo
1595 echo "export OSM_HOSTNAME=\$NBI_IP"
1596 echo
1597 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1598 echo
1599 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1600 echo
1601 echo "DONE"
1602 fi
1603
1604 exit 0
1605 fi
1606
1607 # if develop, we force master
1608 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1609
1610 need_packages="git wget curl tar"
1611
1612 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1613
1614 echo -e "Checking required packages: $need_packages"
1615 dpkg -l $need_packages &>/dev/null \
1616 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1617 || sudo apt-get update \
1618 || FATAL "failed to run apt-get update"
1619 dpkg -l $need_packages &>/dev/null \
1620 || ! echo -e "Installing $need_packages requires root privileges." \
1621 || sudo apt-get install -y $need_packages \
1622 || FATAL "failed to install $need_packages"
1623 sudo snap install jq
1624 if [ -z "$OSM_DEVOPS" ]; then
1625 if [ -n "$TEST_INSTALLER" ]; then
1626 echo -e "\nUsing local devops repo for OSM installation"
1627 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1628 else
1629 echo -e "\nCreating temporary dir for OSM installation"
1630 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1631 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1632
1633 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1634
1635 if [ -z "$COMMIT_ID" ]; then
1636 echo -e "\nGuessing the current stable release"
1637 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1638 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1639
1640 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1641 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1642 else
1643 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1644 fi
1645 git -C $OSM_DEVOPS checkout $COMMIT_ID
1646 fi
1647 fi
1648
1649 . $OSM_DEVOPS/common/all_funcs
1650
1651 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1652 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1653 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1654 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1655 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1656 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1657
1658 #Installation starts here
1659 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README.txt &> /dev/null
1660 track start
1661
1662 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1663 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1664 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1665 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1666 fi
1667
1668 echo -e "Checking required packages: lxd"
1669 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1670 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1671
1672 # use local devops for containers
1673 export OSM_USE_LOCAL_DEVOPS=true
1674
1675 #Install osmclient
1676
1677 #Install vim-emu (optional)
1678 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1679
1680 wget -q -O- https://osm-download.etsi.org/ftp/osm-8.0-eight/README2.txt &> /dev/null
1681 track end
1682 echo -e "\nDONE"
1683
1684