Feature 8623
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
28 echo -e " ..."
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
41 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
42 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
43 echo -e " -D <devops path> use local devops installation path"
44 echo -e " -w <work dir> Location to store runtime installation"
45 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
46 echo -e " -l: LXD cloud yaml file"
47 echo -e " -L: LXD credentials yaml file"
48 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
49 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
50 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
51 echo -e " --nojuju: do not juju, assumes already installed"
52 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
53 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
54 echo -e " --nohostclient: do not install the osmclient"
55 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
56 echo -e " --source: install OSM from source code using the latest stable tag"
57 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
58 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
59 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
60 echo -e " --volume: create a VM volume when installing to OpenStack"
61 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
62 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
63 echo -e " --showopts: print chosen options and exit (only for debugging)"
64 echo -e " -y: do not prompt for confirmation, assumes yes"
65 echo -e " -h / --help: print this help"
66 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
67 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
68 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
69 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
70 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
71 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
72 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
73 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
74 echo -e " [--tag]: Docker image tag"
75
76 }
77
78 # takes a juju/accounts.yaml file and returns the password specific
79 # for a controller. I wrote this using only bash tools to minimize
80 # additions of other packages
81 function parse_juju_password {
82 password_file="${HOME}/.local/share/juju/accounts.yaml"
83 local controller_name=$1
84 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
85 sed -ne "s|^\($s\):|\1|" \
86 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
87 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
88 awk -F$fs -v controller=$controller_name '{
89 indent = length($1)/2;
90 vname[indent] = $2;
91 for (i in vname) {if (i > indent) {delete vname[i]}}
92 if (length($3) > 0) {
93 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
94 if (match(vn,controller) && match($2,"password")) {
95 printf("%s",$3);
96 }
97 }
98 }'
99 }
100
101 function generate_secret() {
102 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
103 }
104
105 function remove_volumes() {
106 if [ -n "$KUBERNETES" ]; then
107 k8_volume=$1
108 echo "Removing ${k8_volume}"
109 $WORKDIR_SUDO rm -rf ${k8_volume}
110 else
111 stack=$1
112 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
113 for volume in $volumes; do
114 sg docker -c "docker volume rm ${stack}_${volume}"
115 done
116 fi
117 }
118
119 function remove_network() {
120 stack=$1
121 sg docker -c "docker network rm net${stack}"
122 }
123
124 function remove_iptables() {
125 stack=$1
126 if [ -z "$OSM_VCA_HOST" ]; then
127 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
128 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
129 fi
130
131 if [ -z "$DEFAULT_IP" ]; then
132 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
133 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
134 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
135 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
136 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
137 fi
138
139 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
140 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
141 sudo netfilter-persistent save
142 fi
143 }
144
145 function remove_stack() {
146 stack=$1
147 if sg docker -c "docker stack ps ${stack}" ; then
148 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
149 COUNTER=0
150 result=1
151 while [ ${COUNTER} -lt 30 ]; do
152 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
153 #echo "Dockers running: $result"
154 if [ "${result}" == "0" ]; then
155 break
156 fi
157 let COUNTER=COUNTER+1
158 sleep 1
159 done
160 if [ "${result}" == "0" ]; then
161 echo "All dockers of the stack ${stack} were removed"
162 else
163 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
164 fi
165 sleep 5
166 fi
167 }
168
169 #removes osm deployments and services
170 function remove_k8s_namespace() {
171 kubectl delete ns $1
172 }
173
174 #removes helm only if there is nothing deployed in helm
175 function remove_helm() {
176 if [ "$(helm ls -q)" == "" ] ; then
177 sudo helm reset --force
178 kubectl delete --namespace kube-system serviceaccount tiller
179 kubectl delete clusterrolebinding tiller-cluster-rule
180 sudo rm /usr/local/bin/helm
181 rm -rf $HOME/.helm
182 fi
183 }
184
185 #Uninstall osmclient
186 function uninstall_osmclient() {
187 sudo apt-get remove --purge -y python-osmclient
188 sudo apt-get remove --purge -y python3-osmclient
189 }
190
191 #Uninstall lightweight OSM: remove dockers
192 function uninstall_lightweight() {
193 if [ -n "$INSTALL_ONLY" ]; then
194 if [ -n "$INSTALL_ELK" ]; then
195 echo -e "\nUninstalling OSM ELK stack"
196 remove_stack osm_elk
197 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
198 fi
199 else
200 echo -e "\nUninstalling OSM"
201 if [ -n "$KUBERNETES" ]; then
202 if [ -n "$INSTALL_K8S_MONITOR" ]; then
203 # uninstall OSM MONITORING
204 uninstall_k8s_monitoring
205 fi
206 remove_k8s_namespace $OSM_STACK_NAME
207 else
208
209 remove_stack $OSM_STACK_NAME
210 remove_stack osm_elk
211 fi
212 echo "Now osm docker images and volumes will be deleted"
213 newgrp docker << EONG
214 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
219 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
223 EONG
224
225 if [ -n "$KUBERNETES" ]; then
226 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
227 remove_volumes $OSM_NAMESPACE_VOL
228 else
229 remove_volumes $OSM_STACK_NAME
230 remove_network $OSM_STACK_NAME
231 fi
232 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
233 echo "Removing $OSM_DOCKER_WORK_DIR"
234 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
235 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
236 fi
237 uninstall_osmclient
238 echo "Some docker images will be kept in case they are used by other docker stacks"
239 echo "To remove them, just run 'docker image prune' in a terminal"
240 return 0
241 }
242
243 #Safe unattended install of iptables-persistent
244 function check_install_iptables_persistent(){
245 echo -e "\nChecking required packages: iptables-persistent"
246 if dpkg -l iptables-persistent &>/dev/null; then
247 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
248 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
249 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
250 sudo apt-get -yq install iptables-persistent
251 fi
252 }
253
254 #Configure NAT rules, based on the current IP addresses of containers
255 function nat(){
256 check_install_iptables_persistent
257
258 echo -e "\nConfiguring NAT rules"
259 echo -e " Required root privileges"
260 sudo $OSM_DEVOPS/installers/nat_osm
261 }
262
263 function FATAL(){
264 echo "FATAL error: Cannot install OSM due to \"$1\""
265 exit 1
266 }
267
268 function install_lxd() {
269 # Apply sysctl production values for optimal performance
270 sudo cp /usr/share/osm-devops/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
271 sudo sysctl --system
272
273 # Install LXD snap
274 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
275 sudo snap install lxd
276 sudo apt-get install zfsutils-linux -y
277
278 # Configure LXD
279 sudo usermod -a -G lxd `whoami`
280 cat /usr/share/osm-devops/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
281 sg lxd -c "lxd waitready"
282 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
283 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
284 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
285 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
286 #sudo systemctl stop lxd-bridge
287 #sudo systemctl --system daemon-reload
288 #sudo systemctl enable lxd-bridge
289 #sudo systemctl start lxd-bridge
290 }
291
292 function ask_user(){
293 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
294 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
295 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
296 read -e -p "$1" USER_CONFIRMATION
297 while true ; do
298 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
299 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
300 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
301 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
302 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
303 done
304 }
305
306 function install_osmclient(){
307 CLIENT_RELEASE=${RELEASE#"-R "}
308 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
309 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
310 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
311 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
312 curl $key_location | sudo apt-key add -
313 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
314 sudo apt-get update
315 sudo apt-get install -y python3-pip
316 sudo -H LC_ALL=C python3 -m pip install -U pip
317 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
318 sudo apt-get install -y python3-osm-im python3-osmclient
319 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
320 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
321 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
322 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
323 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
324 echo -e "\nOSM client installed"
325 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
326 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
327 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
328 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
329 else
330 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
331 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
332 echo " export OSM_HOSTNAME=<OSM_host>"
333 fi
334 return 0
335 }
336
337 function install_prometheus_nodeexporter(){
338 if (systemctl -q is-active node_exporter)
339 then
340 echo "Node Exporter is already running."
341 else
342 echo "Node Exporter is not active, installing..."
343 if getent passwd node_exporter > /dev/null 2>&1; then
344 echo "node_exporter user exists"
345 else
346 echo "Creating user node_exporter"
347 sudo useradd --no-create-home --shell /bin/false node_exporter
348 fi
349 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
350 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
351 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
352 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
353 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
354 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus/node_exporter.service /etc/systemd/system/node_exporter.service
355 sudo systemctl daemon-reload
356 sudo systemctl restart node_exporter
357 sudo systemctl enable node_exporter
358 echo "Node Exporter has been activated in this host."
359 fi
360 return 0
361 }
362
363 function uninstall_prometheus_nodeexporter(){
364 sudo systemctl stop node_exporter
365 sudo systemctl disable node_exporter
366 sudo rm /etc/systemd/system/node_exporter.service
367 sudo systemctl daemon-reload
368 sudo userdel node_exporter
369 sudo rm /usr/local/bin/node_exporter
370 return 0
371 }
372
373 function install_docker_ce() {
374 # installs and configures Docker CE
375 echo "Installing Docker CE ..."
376 sudo apt-get -qq update
377 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
378 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
379 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
380 sudo apt-get -qq update
381 sudo apt-get install -y docker-ce
382 echo "Adding user to group 'docker'"
383 sudo groupadd -f docker
384 sudo usermod -aG docker $USER
385 sleep 2
386 sudo service docker restart
387 echo "... restarted Docker service"
388 sg docker -c "docker version" || FATAL "Docker installation failed"
389 echo "... Docker CE installation done"
390 return 0
391 }
392
393 function install_docker_compose() {
394 # installs and configures docker-compose
395 echo "Installing Docker Compose ..."
396 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
397 sudo chmod +x /usr/local/bin/docker-compose
398 echo "... Docker Compose installation done"
399 }
400
401 function install_juju() {
402 echo "Installing juju"
403 sudo snap install juju --classic --channel=2.7/stable
404 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
405 echo "Finished installation of juju"
406 return 0
407 }
408
409 function juju_createcontroller() {
410 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
411 # Not found created, create the controller
412 sudo usermod -a -G lxd ${USER}
413 sg lxd -c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
414 fi
415 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
416 }
417
418 function juju_createproxy() {
419 check_install_iptables_persistent
420
421 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
422 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
423 sudo netfilter-persistent save
424 fi
425 }
426
427 function generate_docker_images() {
428 echo "Pulling and generating docker images"
429 _build_from=$COMMIT_ID
430 [ -z "$_build_from" ] && _build_from="master"
431
432 echo "OSM Docker images generated from $_build_from"
433
434 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
435 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
436 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
437 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
438
439 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
440 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
441 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
442 fi
443
444 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
445 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
446 fi
447
448 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
449 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
450 fi
451
452 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
453 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
454 fi
455
456 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
457 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
458 fi
459
460 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
461 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
462 fi
463
464 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
465 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
466 fi
467
468 if [ -n "$PULL_IMAGES" ]; then
469 sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
470 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MON ; then
471 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/MON
472 git -C ${LWTEMPDIR}/MON checkout ${COMMIT_ID}
473 sg docker -c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL "cannot build MON docker image"
474 fi
475
476 if [ -n "$PULL_IMAGES" ]; then
477 sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
478 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q POL ; then
479 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/POL
480 git -C ${LWTEMPDIR}/POL checkout ${COMMIT_ID}
481 sg docker -c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL "cannot build POL docker image"
482 fi
483
484 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
485 sg docker -c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL "cannot pull PLA docker image"
486 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] || echo $TO_REBUILD | grep -q PLA ; then
487 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/PLA
488 git -C ${LWTEMPDIR}/PLA checkout ${COMMIT_ID}
489 sg docker -c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL "cannot build PLA docker image"
490 fi
491
492 if [ -n "$PULL_IMAGES" ]; then
493 sg docker -c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL "cannot pull NBI docker image"
494 sg docker -c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL "cannot pull KEYSTONE docker image"
495 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI ; then
496 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/NBI
497 git -C ${LWTEMPDIR}/NBI checkout ${COMMIT_ID}
498 sg docker -c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL "cannot build NBI docker image"
499 sg docker -c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL "cannot build KEYSTONE docker image"
500 fi
501
502 if [ -n "$PULL_IMAGES" ]; then
503 sg docker -c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL "cannot pull RO docker image"
504 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
505 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/RO
506 git -C ${LWTEMPDIR}/RO checkout ${COMMIT_ID}
507 sg docker -c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL "cannot build RO docker image"
508 fi
509
510 if [ -n "$PULL_IMAGES" ]; then
511 sg docker -c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL "cannot pull LCM RO docker image"
512 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LCM ; then
513 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LCM
514 git -C ${LWTEMPDIR}/LCM checkout ${COMMIT_ID}
515 sg docker -c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL "cannot build LCM docker image"
516 fi
517
518 if [ -n "$PULL_IMAGES" ]; then
519 sg docker -c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL "cannot pull light-ui docker image"
520 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-UI ; then
521 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/LW-UI
522 git -C ${LWTEMPDIR}/LW-UI checkout ${COMMIT_ID}
523 sg docker -c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL "cannot build LW-UI docker image"
524 fi
525
526 if [ -n "$PULL_IMAGES" ]; then
527 sg docker -c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL "cannot pull osmclient docker image"
528 elif [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q LW-osmclient; then
529 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
530 fi
531
532 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
533 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
534 fi
535
536 echo "Finished generation of docker images"
537 }
538
539 function cmp_overwrite() {
540 file1="$1"
541 file2="$2"
542 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
543 if [ -f "${file2}" ]; then
544 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
545 else
546 cp -b ${file1} ${file2}
547 fi
548 fi
549 }
550
551 function generate_docker_env_files() {
552 echo "Doing a backup of existing env files"
553 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
554 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
555 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
556 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
558 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
559 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
560 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
561 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
562
563 echo "Generating docker env files"
564 if [ -n "$KUBERNETES" ]; then
565 #Kubernetes resources
566 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
567 else
568 # Docker-compose
569 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
570 if [ -n "$INSTALL_PLA" ]; then
571 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
572 fi
573
574 # Prometheus files
575 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
576 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
577
578 # Grafana files
579 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
580 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
581 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
582 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
583 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
584
585 # Prometheus Exporters files
586 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
587 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
588 fi
589
590 # LCM
591 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
592 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
593 fi
594
595 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
596 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
597 else
598 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
599 fi
600
601 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
602 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
603 else
604 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
605 fi
606
607 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
608 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
609 else
610 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
611 fi
612
613 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
614 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
615 else
616 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
617 fi
618
619 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
620 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
621 else
622 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
623 fi
624
625 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
626 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
627 fi
628
629 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
630 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
631 fi
632
633 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
634 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
635 else
636 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
637 fi
638
639 # RO
640 MYSQL_ROOT_PASSWORD=$(generate_secret)
641 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
642 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
643 fi
644 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
645 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
646 fi
647
648 # Keystone
649 KEYSTONE_DB_PASSWORD=$(generate_secret)
650 SERVICE_PASSWORD=$(generate_secret)
651 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
652 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
653 fi
654 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
655 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
656 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
657 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
658 fi
659
660 # NBI
661 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
662 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
663 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
664 fi
665
666 # MON
667 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
668 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
669 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
670 fi
671
672 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
673 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
674 else
675 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
676 fi
677
678 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
679 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
680 else
681 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
682 fi
683
684 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
685 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
686 else
687 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
688 fi
689
690 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
691 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
692 else
693 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
694 fi
695
696
697 # POL
698 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
699 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
700 fi
701
702 # LW-UI
703 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
704 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
705 fi
706
707 echo "Finished generation of docker env files"
708 }
709
710 function generate_osmclient_script () {
711 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
712 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
713 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
714 }
715
716 #installs kubernetes packages
717 function install_kube() {
718 sudo apt-get update && sudo apt-get install -y apt-transport-https
719 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
720 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
721 sudo apt-get update
722 echo "Installing Kubernetes Packages ..."
723 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
724 }
725
726 #initializes kubernetes control plane
727 function init_kubeadm() {
728 sudo swapoff -a
729 sudo kubeadm init --config $1
730 sleep 5
731 }
732
733 function kube_config_dir() {
734 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
735 mkdir -p $HOME/.kube
736 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
737 sudo chown $(id -u):$(id -g) $HOME/.kube/config
738 }
739
740 #deploys flannel as daemonsets
741 function deploy_cni_provider() {
742 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
743 trap 'rm -rf "${CNI_DIR}"' EXIT
744 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
745 kubectl apply -f $CNI_DIR
746 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
747 }
748
749 #creates secrets from env files which will be used by containers
750 function kube_secrets(){
751 kubectl create ns $OSM_STACK_NAME
752 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
753 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
754 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
755 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
756 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
757 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
758 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
759 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
760 }
761
762 #deploys osm pods and services
763 function deploy_osm_services() {
764 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
765 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
766 sleep 5
767 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
768 }
769
770 function deploy_osm_pla_service() {
771 # corresponding to parse_yaml
772 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
773 # corresponding to namespace_vol
774 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
775 # corresponding to deploy_osm_services
776 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
777 }
778
779 #Install helm and tiller
780 function install_helm() {
781 helm > /dev/null 2>&1
782 if [ $? != 0 ] ; then
783 # Helm is not installed. Install helm
784 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
785 tar -zxvf helm-v2.15.2.tar.gz
786 sudo mv linux-amd64/helm /usr/local/bin/helm
787 rm -r linux-amd64
788 rm helm-v2.15.2.tar.gz
789 fi
790
791 # Checking if tiller has being configured
792 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
793 if [ $? == 1 ] ; then
794 # tiller account for kubernetes
795 kubectl --namespace kube-system create serviceaccount tiller
796 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
797 # HELM initialization
798 helm init --service-account tiller
799
800 # Wait for Tiller to be up and running. If timeout expires, continue installing
801 tiller_timeout=120; counter=0
802 while (( counter < tiller_timeout ))
803 do
804 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
805 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
806 num=$((counter + 2))
807 sleep 2
808 done
809 fi
810 }
811
812 function parse_yaml() {
813 osm_services="nbi lcm ro pol mon light-ui keystone"
814 TAG=$1
815 for osm in $osm_services; do
816 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
817 done
818 }
819
820 function namespace_vol() {
821 osm_services="nbi lcm ro pol mon kafka mongo mysql"
822 for osm in $osm_services; do
823 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
824 done
825 }
826
827 function init_docker_swarm() {
828 if [ "${DEFAULT_MTU}" != "1500" ]; then
829 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
830 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
831 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
832 fi
833 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
834 return 0
835 }
836
837 function create_docker_network() {
838 echo "creating network"
839 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
840 echo "creating network DONE"
841 }
842
843 function deploy_lightweight() {
844
845 echo "Deploying lightweight build"
846 OSM_NBI_PORT=9999
847 OSM_RO_PORT=9090
848 OSM_KEYSTONE_PORT=5000
849 OSM_UI_PORT=80
850 OSM_MON_PORT=8662
851 OSM_PROM_PORT=9090
852 OSM_PROM_CADVISOR_PORT=8080
853 OSM_PROM_HOSTPORT=9091
854 OSM_GRAFANA_PORT=3000
855 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
856 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
857
858 if [ -n "$NO_HOST_PORTS" ]; then
859 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
860 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
861 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
862 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
863 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
864 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
865 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
866 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
867 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
868 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
869 else
870 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
871 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
872 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
873 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
874 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
875 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
876 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
877 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
878 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
879 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
880 fi
881 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
882 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
883 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
884 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
885 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
886 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
887 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
888 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
889 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
890
891 pushd $OSM_DOCKER_WORK_DIR
892 if [ -n "$INSTALL_PLA" ]; then
893 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
894 else
895 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
896 fi
897 popd
898
899 echo "Finished deployment of lightweight build"
900 }
901
902 function deploy_elk() {
903 echo "Pulling docker images for ELK"
904 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
905 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
906 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
907 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
908 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
909 echo "Finished pulling elk docker images"
910 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
911 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
912 remove_stack osm_elk
913 echo "Deploying ELK stack"
914 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
915 echo "Waiting for ELK stack to be up and running"
916 time=0
917 step=5
918 timelength=40
919 elk_is_up=1
920 while [ $time -le $timelength ]; do
921 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
922 elk_is_up=0
923 break
924 fi
925 sleep $step
926 time=$((time+step))
927 done
928 if [ $elk_is_up -eq 0 ]; then
929 echo "ELK is up and running. Trying to create index pattern..."
930 #Create index pattern
931 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
932 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
933 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
934 #Make it the default index
935 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
936 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
937 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
938 else
939 echo "Cannot connect to Kibana to create index pattern."
940 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
941 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
942 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
943 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
944 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
945 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
946 -d"{\"value\":\"filebeat-*\"}"'
947 fi
948 echo "Finished deployment of ELK stack"
949 return 0
950 }
951
952 function install_lightweight() {
953 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
954 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
955 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
956 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
957 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
958
959 track checkingroot
960 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
961 track noroot
962
963 if [ -n "$KUBERNETES" ]; then
964 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
965 1. Install and configure LXD
966 2. Install juju
967 3. Install docker CE
968 4. Disable swap space
969 5. Install and initialize Kubernetes
970 as pre-requirements.
971 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
972
973 else
974 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
975 fi
976 track proceed
977
978 echo "Installing lightweight build of OSM"
979 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
980 trap 'rm -rf "${LWTEMPDIR}"' EXIT
981 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
982 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
983 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
984 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
985 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
986 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
987
988 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
989 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
990 need_packages_lw="snapd"
991 echo -e "Checking required packages: $need_packages_lw"
992 dpkg -l $need_packages_lw &>/dev/null \
993 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
994 || sudo apt-get update \
995 || FATAL "failed to run apt-get update"
996 dpkg -l $need_packages_lw &>/dev/null \
997 || ! echo -e "Installing $need_packages_lw requires root privileges." \
998 || sudo apt-get install -y $need_packages_lw \
999 || FATAL "failed to install $need_packages_lw"
1000 install_lxd
1001 fi
1002
1003 track prereqok
1004
1005 [ -z "$INSTALL_NOJUJU" ] && install_juju
1006 track juju_install
1007
1008 if [ -z "$OSM_VCA_HOST" ]; then
1009 if [ -z "$CONTROLLER_NAME" ]; then
1010 if [ -n "$LXD_CLOUD_FILE" ]; then
1011 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1012 OSM_VCA_CLOUDNAME="lxd-cloud"
1013 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1014 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1015 fi
1016 juju_createcontroller
1017 else
1018 OSM_VCA_CLOUDNAME="lxd-cloud"
1019 if [ -n "$LXD_CLOUD_FILE" ]; then
1020 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1021 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1022 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1023 else
1024 mkdir -p ~/.osm
1025 cat << EOF > ~/.osm/lxd-cloud.yaml
1026 clouds:
1027 lxd-cloud:
1028 type: lxd
1029 auth-types: [certificate]
1030 endpoint: "https://$DEFAULT_IP:8443"
1031 config:
1032 ssl-hostname-verification: false
1033 EOF
1034 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1035 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1036 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1037 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1038 cat << EOF > ~/.osm/lxd-credentials.yaml
1039 credentials:
1040 lxd-cloud:
1041 lxd-cloud:
1042 auth-type: certificate
1043 server-cert: |
1044 $server_cert
1045 client-cert: |
1046 $client_cert
1047 client-key: |
1048 $client_key
1049 EOF
1050 lxc config trust add local: ~/.osm/client.crt
1051 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1052 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1053 fi
1054 fi
1055 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1056 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1057 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1058 fi
1059 track juju_controller
1060
1061 if [ -z "$OSM_VCA_SECRET" ]; then
1062 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1063 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1064 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1065 fi
1066 if [ -z "$OSM_VCA_PUBKEY" ]; then
1067 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1068 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1069 fi
1070 if [ -z "$OSM_VCA_CACERT" ]; then
1071 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1072 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1073 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1074 fi
1075 if [ -z "$OSM_VCA_APIPROXY" ]; then
1076 OSM_VCA_APIPROXY=$DEFAULT_IP
1077 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1078 fi
1079 juju_createproxy
1080 track juju
1081
1082 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1083 OSM_DATABASE_COMMONKEY=$(generate_secret)
1084 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1085 fi
1086
1087 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1088 track docker_ce
1089
1090 #Installs Kubernetes and deploys osm services
1091 if [ -n "$KUBERNETES" ]; then
1092 install_kube
1093 track install_k8s
1094 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1095 kube_config_dir
1096 track init_k8s
1097 else
1098 #install_docker_compose
1099 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1100 track docker_swarm
1101 fi
1102
1103 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1104 track docker_build
1105
1106 generate_docker_env_files
1107
1108 if [ -n "$KUBERNETES" ]; then
1109 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1110 # uninstall OSM MONITORING
1111 uninstall_k8s_monitoring
1112 track uninstall_k8s_monitoring
1113 fi
1114 #remove old namespace
1115 remove_k8s_namespace $OSM_STACK_NAME
1116 deploy_cni_provider
1117 kube_secrets
1118 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml $OSM_DOCKER_TAG
1119 namespace_vol
1120 deploy_osm_services
1121 if [ -n "$INSTALL_PLA"]; then
1122 # optional PLA install
1123 deploy_osm_pla_service
1124 fi
1125 track deploy_osm_services_k8s
1126 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1127 # install OSM MONITORING
1128 install_k8s_monitoring
1129 track install_k8s_monitoring
1130 fi
1131 else
1132 # remove old stack
1133 remove_stack $OSM_STACK_NAME
1134 create_docker_network
1135 deploy_lightweight
1136 generate_osmclient_script
1137 track docker_deploy
1138 install_prometheus_nodeexporter
1139 track nodeexporter
1140 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1141 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1142 fi
1143
1144 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1145 track osmclient
1146
1147 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1148 track end
1149 return 0
1150 }
1151
1152 function install_to_openstack() {
1153
1154 if [ -z "$2" ]; then
1155 FATAL "OpenStack installer requires a valid external network name"
1156 fi
1157
1158 # Install Pip for Python3
1159 $WORKDIR_SUDO apt install -y python3-pip
1160 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1161
1162 # Install Ansible, OpenStack client and SDK
1163 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1164
1165 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1166
1167 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1168
1169 # Execute the Ansible playbook based on openrc or clouds.yaml
1170 if [ -e "$1" ]; then
1171 . $1
1172 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1173 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1174 else
1175 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1176 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1177 fi
1178
1179 return 0
1180 }
1181
1182 function install_vimemu() {
1183 echo "\nInstalling vim-emu"
1184 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1185 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1186 # install prerequisites (OVS is a must for the emulator to work)
1187 sudo apt-get install openvswitch-switch
1188 # clone vim-emu repository (attention: branch is currently master only)
1189 echo "Cloning vim-emu repository ..."
1190 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1191 # build vim-emu docker
1192 echo "Building vim-emu Docker container..."
1193
1194 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1195 # start vim-emu container as daemon
1196 echo "Starting vim-emu Docker container 'vim-emu' ..."
1197 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1198 # in lightweight mode, the emulator needs to be attached to netOSM
1199 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1200 else
1201 # classic build mode
1202 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1203 fi
1204 echo "Waiting for 'vim-emu' container to start ..."
1205 sleep 5
1206 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1207 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1208 # print vim-emu connection info
1209 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1210 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1211 echo -e "To add the emulated VIM to OSM you should do:"
1212 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1213 }
1214
1215 function install_k8s_monitoring() {
1216 # install OSM monitoring
1217 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1218 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1219 }
1220
1221 function uninstall_k8s_monitoring() {
1222 # uninstall OSM monitoring
1223 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1224 }
1225
1226 function dump_vars(){
1227 echo "DEVELOP=$DEVELOP"
1228 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1229 echo "UNINSTALL=$UNINSTALL"
1230 echo "UPDATE=$UPDATE"
1231 echo "RECONFIGURE=$RECONFIGURE"
1232 echo "TEST_INSTALLER=$TEST_INSTALLER"
1233 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1234 echo "INSTALL_PLA=$INSTALL_PLA"
1235 echo "INSTALL_LXD=$INSTALL_LXD"
1236 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1237 echo "INSTALL_ONLY=$INSTALL_ONLY"
1238 echo "INSTALL_ELK=$INSTALL_ELK"
1239 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1240 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1241 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1242 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1243 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1244 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1245 echo "TO_REBUILD=$TO_REBUILD"
1246 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1247 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1248 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1249 echo "RELEASE=$RELEASE"
1250 echo "REPOSITORY=$REPOSITORY"
1251 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1252 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1253 echo "OSM_DEVOPS=$OSM_DEVOPS"
1254 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1255 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1256 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1257 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1258 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1259 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1260 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1261 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1262 echo "DOCKER_USER=$DOCKER_USER"
1263 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1264 echo "PULL_IMAGES=$PULL_IMAGES"
1265 echo "KUBERNETES=$KUBERNETES"
1266 echo "SHOWOPTS=$SHOWOPTS"
1267 echo "Install from specific refspec (-b): $COMMIT_ID"
1268 }
1269
1270 function track(){
1271 ctime=`date +%s`
1272 duration=$((ctime - SESSION_ID))
1273 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1274 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1275 event_name="bin"
1276 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1277 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1278 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1279 event_name="${event_name}_$1"
1280 url="${url}&event=${event_name}&ce_duration=${duration}"
1281 wget -q -O /dev/null $url
1282 }
1283
1284 UNINSTALL=""
1285 DEVELOP=""
1286 UPDATE=""
1287 RECONFIGURE=""
1288 TEST_INSTALLER=""
1289 INSTALL_LXD=""
1290 SHOWOPTS=""
1291 COMMIT_ID=""
1292 ASSUME_YES=""
1293 INSTALL_FROM_SOURCE=""
1294 RELEASE="ReleaseSEVEN"
1295 REPOSITORY="stable"
1296 INSTALL_VIMEMU=""
1297 INSTALL_PLA=""
1298 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1299 LXD_REPOSITORY_PATH=""
1300 INSTALL_LIGHTWEIGHT="y"
1301 INSTALL_TO_OPENSTACK=""
1302 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1303 OPENSTACK_PUBLIC_NET_NAME=""
1304 OPENSTACK_ATTACH_VOLUME="false"
1305 INSTALL_ONLY=""
1306 INSTALL_ELK=""
1307 TO_REBUILD=""
1308 INSTALL_NOLXD=""
1309 INSTALL_NODOCKER=""
1310 INSTALL_NOJUJU=""
1311 KUBERNETES=""
1312 INSTALL_K8S_MONITOR=""
1313 INSTALL_NOHOSTCLIENT=""
1314 SESSION_ID=`date +%s`
1315 OSM_DEVOPS=
1316 OSM_VCA_HOST=
1317 OSM_VCA_SECRET=
1318 OSM_VCA_PUBKEY=
1319 OSM_VCA_CLOUDNAME="localhost"
1320 OSM_STACK_NAME=osm
1321 NO_HOST_PORTS=""
1322 DOCKER_NOBUILD=""
1323 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1324 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1325 WORKDIR_SUDO=sudo
1326 OSM_WORK_DIR="/etc/osm"
1327 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1328 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1329 OSM_HOST_VOL="/var/lib/osm"
1330 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1331 OSM_DOCKER_TAG=latest
1332 DOCKER_USER=opensourcemano
1333 PULL_IMAGES="y"
1334 KAFKA_TAG=2.11-1.0.2
1335 PROMETHEUS_TAG=v2.4.3
1336 GRAFANA_TAG=latest
1337 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1338 PROMETHEUS_CADVISOR_TAG=latest
1339 KEYSTONEDB_TAG=10
1340 OSM_DATABASE_COMMONKEY=
1341 ELASTIC_VERSION=6.4.2
1342 ELASTIC_CURATOR_VERSION=5.5.4
1343 POD_NETWORK_CIDR=10.244.0.0/16
1344 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1345 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1346
1347 while getopts ":b:r:c:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o; do
1348 case "${o}" in
1349 b)
1350 COMMIT_ID=${OPTARG}
1351 PULL_IMAGES=""
1352 ;;
1353 r)
1354 REPOSITORY="${OPTARG}"
1355 REPO_ARGS+=(-r "$REPOSITORY")
1356 ;;
1357 c)
1358 [ "${OPTARG}" == "swarm" ] && continue
1359 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1360 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1361 usage && exit 1
1362 ;;
1363 k)
1364 REPOSITORY_KEY="${OPTARG}"
1365 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1366 ;;
1367 u)
1368 REPOSITORY_BASE="${OPTARG}"
1369 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1370 ;;
1371 R)
1372 RELEASE="${OPTARG}"
1373 REPO_ARGS+=(-R "$RELEASE")
1374 ;;
1375 D)
1376 OSM_DEVOPS="${OPTARG}"
1377 ;;
1378 o)
1379 INSTALL_ONLY="y"
1380 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1381 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1382 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1383 ;;
1384 O)
1385 INSTALL_TO_OPENSTACK="y"
1386 if [ -n "${OPTARG}" ]; then
1387 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1388 else
1389 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1390 usage && exit 1
1391 fi
1392 ;;
1393 N)
1394 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1395 ;;
1396 m)
1397 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1398 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1399 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1400 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1401 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1402 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1403 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1404 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1405 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1406 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1407 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1408 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1409 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1410 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1411 ;;
1412 H)
1413 OSM_VCA_HOST="${OPTARG}"
1414 ;;
1415 S)
1416 OSM_VCA_SECRET="${OPTARG}"
1417 ;;
1418 s)
1419 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1420 ;;
1421 w)
1422 # when specifying workdir, do not use sudo for access
1423 WORKDIR_SUDO=
1424 OSM_WORK_DIR="${OPTARG}"
1425 ;;
1426 t)
1427 OSM_DOCKER_TAG="${OPTARG}"
1428 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1429 ;;
1430 U)
1431 DOCKER_USER="${OPTARG}"
1432 ;;
1433 P)
1434 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1435 ;;
1436 A)
1437 OSM_VCA_APIPROXY="${OPTARG}"
1438 ;;
1439 l)
1440 LXD_CLOUD_FILE="${OPTARG}"
1441 ;;
1442 L)
1443 LXD_CRED_FILE="${OPTARG}"
1444 ;;
1445 K)
1446 CONTROLLER_NAME="${OPTARG}"
1447 ;;
1448 -)
1449 [ "${OPTARG}" == "help" ] && usage && exit 0
1450 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1451 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1452 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1453 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1454 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1455 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1456 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1457 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1458 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1459 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1460 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1461 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1462 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1463 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1464 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1465 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1466 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1467 [ "${OPTARG}" == "pullimages" ] && continue
1468 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1469 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1470 [ "${OPTARG}" == "bundle" ] && continue
1471 [ "${OPTARG}" == "k8s" ] && continue
1472 [ "${OPTARG}" == "lxd" ] && continue
1473 [ "${OPTARG}" == "lxd-cred" ] && continue
1474 [ "${OPTARG}" == "microstack" ] && continue
1475 [ "${OPTARG}" == "ha" ] && continue
1476 [ "${OPTARG}" == "tag" ] && continue
1477 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1478 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1479 echo -e "Invalid option: '--$OPTARG'\n" >&2
1480 usage && exit 1
1481 ;;
1482 :)
1483 echo "Option -$OPTARG requires an argument" >&2
1484 usage && exit 1
1485 ;;
1486 \?)
1487 echo -e "Invalid option: '-$OPTARG'\n" >&2
1488 usage && exit 1
1489 ;;
1490 h)
1491 usage && exit 0
1492 ;;
1493 y)
1494 ASSUME_YES="y"
1495 ;;
1496 *)
1497 usage && exit 1
1498 ;;
1499 esac
1500 done
1501
1502 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1503 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1504
1505 if [ -n "$SHOWOPTS" ]; then
1506 dump_vars
1507 exit 0
1508 fi
1509
1510 if [ -n "$CHARMED" ]; then
1511 if [ -n "$UNINSTALL" ]; then
1512 /usr/share/osm-devops/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1513 else
1514 /usr/share/osm-devops/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1515
1516 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1517 echo
1518 echo "1. Get the NBI IP with the following command:"
1519 echo
1520 echo NBI_IP='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1521 echo
1522 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1523 echo
1524 echo "export OSM_HOSTNAME=\$NBI_IP"
1525 echo
1526 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1527 echo
1528 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1529 echo
1530 echo "DONE"
1531 fi
1532
1533 exit 0
1534 fi
1535
1536 # if develop, we force master
1537 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1538
1539 need_packages="git wget curl tar"
1540
1541 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1542
1543 echo -e "Checking required packages: $need_packages"
1544 dpkg -l $need_packages &>/dev/null \
1545 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1546 || sudo apt-get update \
1547 || FATAL "failed to run apt-get update"
1548 dpkg -l $need_packages &>/dev/null \
1549 || ! echo -e "Installing $need_packages requires root privileges." \
1550 || sudo apt-get install -y $need_packages \
1551 || FATAL "failed to install $need_packages"
1552 sudo snap install jq
1553 if [ -z "$OSM_DEVOPS" ]; then
1554 if [ -n "$TEST_INSTALLER" ]; then
1555 echo -e "\nUsing local devops repo for OSM installation"
1556 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1557 else
1558 echo -e "\nCreating temporary dir for OSM installation"
1559 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1560 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1561
1562 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1563
1564 if [ -z "$COMMIT_ID" ]; then
1565 echo -e "\nGuessing the current stable release"
1566 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1567 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1568
1569 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1570 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1571 else
1572 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1573 fi
1574 git -C $OSM_DEVOPS checkout $COMMIT_ID
1575 fi
1576 fi
1577
1578 . $OSM_DEVOPS/common/all_funcs
1579
1580 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1581 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1582 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1583 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1584 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1585 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1586
1587 #Installation starts here
1588 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README.txt &> /dev/null
1589 track start
1590
1591 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1592 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1593 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1594 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1595 fi
1596
1597 echo -e "Checking required packages: lxd"
1598 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1599 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1600
1601 # use local devops for containers
1602 export OSM_USE_LOCAL_DEVOPS=true
1603
1604 #Install osmclient
1605
1606 #Install vim-emu (optional)
1607 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1608
1609 wget -q -O- https://osm-download.etsi.org/ftp/osm-7.0-seven/README2.txt &> /dev/null
1610 track end
1611 echo -e "\nDONE"
1612
1613