Fix bug 1647: OpenEBS error in OSM installer
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82
83 }
84
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password {
89 password_file="${HOME}/.local/share/juju/accounts.yaml"
90 local controller_name=$1
91 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
92 sed -ne "s|^\($s\):|\1|" \
93 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
94 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
95 awk -F$fs -v controller=$controller_name '{
96 indent = length($1)/2;
97 vname[indent] = $2;
98 for (i in vname) {if (i > indent) {delete vname[i]}}
99 if (length($3) > 0) {
100 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
101 if (match(vn,controller) && match($2,"password")) {
102 printf("%s",$3);
103 }
104 }
105 }'
106 }
107
108 function generate_secret() {
109 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
110 }
111
112 function remove_volumes() {
113 if [ -n "$KUBERNETES" ]; then
114 k8_volume=$1
115 echo "Removing ${k8_volume}"
116 $WORKDIR_SUDO rm -rf ${k8_volume}
117 else
118 stack=$1
119 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
120 for volume in $volumes; do
121 sg docker -c "docker volume rm ${stack}_${volume}"
122 done
123 fi
124 }
125
126 function remove_network() {
127 stack=$1
128 sg docker -c "docker network rm net${stack}"
129 }
130
131 function remove_iptables() {
132 stack=$1
133 if [ -z "$OSM_VCA_HOST" ]; then
134 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
135 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
136 fi
137
138 if [ -z "$DEFAULT_IP" ]; then
139 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
140 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
142 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
143 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
144 fi
145
146 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
147 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
148 sudo netfilter-persistent save
149 fi
150 }
151
152 function remove_stack() {
153 stack=$1
154 if sg docker -c "docker stack ps ${stack}" ; then
155 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
156 COUNTER=0
157 result=1
158 while [ ${COUNTER} -lt 30 ]; do
159 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
160 #echo "Dockers running: $result"
161 if [ "${result}" == "0" ]; then
162 break
163 fi
164 let COUNTER=COUNTER+1
165 sleep 1
166 done
167 if [ "${result}" == "0" ]; then
168 echo "All dockers of the stack ${stack} were removed"
169 else
170 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
171 fi
172 sleep 5
173 fi
174 }
175
176 #removes osm deployments and services
177 function remove_k8s_namespace() {
178 kubectl delete ns $1
179 }
180
181 #removes helm only if there is nothing deployed in helm
182 function remove_helm() {
183 if [ "$(helm ls -q)" == "" ] ; then
184 sudo helm reset --force
185 sudo rm /usr/local/bin/helm
186 rm -rf $HOME/.helm
187 fi
188 }
189
190 function remove_crontab_job() {
191 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
192 }
193
194 #Uninstall osmclient
195 function uninstall_osmclient() {
196 sudo apt-get remove --purge -y python-osmclient
197 sudo apt-get remove --purge -y python3-osmclient
198 }
199
200 #Uninstall lightweight OSM: remove dockers
201 function uninstall_lightweight() {
202 if [ -n "$INSTALL_ONLY" ]; then
203 if [ -n "$INSTALL_ELK" ]; then
204 echo -e "\nUninstalling OSM ELK stack"
205 remove_stack osm_elk
206 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
207 fi
208 else
209 echo -e "\nUninstalling OSM"
210 if [ -n "$KUBERNETES" ]; then
211 if [ -n "$INSTALL_K8S_MONITOR" ]; then
212 # uninstall OSM MONITORING
213 uninstall_k8s_monitoring
214 fi
215 remove_k8s_namespace $OSM_STACK_NAME
216 else
217 remove_stack $OSM_STACK_NAME
218 remove_stack osm_elk
219 fi
220 echo "Now osm docker images and volumes will be deleted"
221 # TODO: clean-up of images should take into account if other tags were used for specific modules
222 newgrp docker << EONG
223 for module in ro lcm keystone nbi mon pol pla osmclient; do
224 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
225 done
226 EONG
227
228 if [ -n "$NGUI" ]; then
229 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230 else
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
232 fi
233
234 if [ -n "$KUBERNETES" ]; then
235 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
236 remove_volumes $OSM_NAMESPACE_VOL
237 else
238 remove_volumes $OSM_STACK_NAME
239 remove_network $OSM_STACK_NAME
240 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
241 fi
242 echo "Removing $OSM_DOCKER_WORK_DIR"
243 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
244 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
245 fi
246 remove_crontab_job
247
248 # Cleanup Openstack installer venv
249 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
250 rm -r $OPENSTACK_PYTHON_VENV
251 fi
252
253 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
254 echo "Some docker images will be kept in case they are used by other docker stacks"
255 echo "To remove them, just run 'docker image prune' in a terminal"
256 return 0
257 }
258
259 #Safe unattended install of iptables-persistent
260 function check_install_iptables_persistent(){
261 echo -e "\nChecking required packages: iptables-persistent"
262 if ! dpkg -l iptables-persistent &>/dev/null; then
263 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
264 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
265 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
266 sudo apt-get -yq install iptables-persistent
267 fi
268 }
269
270 #Configure NAT rules, based on the current IP addresses of containers
271 function nat(){
272 check_install_iptables_persistent
273
274 echo -e "\nConfiguring NAT rules"
275 echo -e " Required root privileges"
276 sudo $OSM_DEVOPS/installers/nat_osm
277 }
278
279 function FATAL(){
280 echo "FATAL error: Cannot install OSM due to \"$1\""
281 exit 1
282 }
283
284 function update_juju_images(){
285 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
286 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
287 }
288
289 function install_lxd() {
290 # Apply sysctl production values for optimal performance
291 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
292 sudo sysctl --system
293
294 # Install LXD snap
295 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
296 sudo snap install lxd
297
298 # Configure LXD
299 sudo usermod -a -G lxd `whoami`
300 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
301 sg lxd -c "lxd waitready"
302 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
303 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
304 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
305 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
306 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
307 #sudo systemctl stop lxd-bridge
308 #sudo systemctl --system daemon-reload
309 #sudo systemctl enable lxd-bridge
310 #sudo systemctl start lxd-bridge
311 }
312
313 function ask_user(){
314 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
315 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
316 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
317 read -e -p "$1" USER_CONFIRMATION
318 while true ; do
319 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
320 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
321 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
322 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
323 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
324 done
325 }
326
327 function install_osmclient(){
328 CLIENT_RELEASE=${RELEASE#"-R "}
329 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
330 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
331 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
332 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
333 curl $key_location | sudo apt-key add -
334 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
335 sudo apt-get update
336 sudo apt-get install -y python3-pip
337 sudo -H LC_ALL=C python3 -m pip install -U pip
338 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
339 sudo apt-get install -y python3-osm-im python3-osmclient
340 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
341 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
342 fi
343 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
344 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
345 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
346 fi
347 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
348 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
349 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
350 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
351 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
352 echo -e "\nOSM client installed"
353 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
354 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
355 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
356 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
357 else
358 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
359 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
360 echo " export OSM_HOSTNAME=<OSM_host>"
361 fi
362 return 0
363 }
364
365 function install_prometheus_nodeexporter(){
366 if (systemctl -q is-active node_exporter)
367 then
368 echo "Node Exporter is already running."
369 else
370 echo "Node Exporter is not active, installing..."
371 if getent passwd node_exporter > /dev/null 2>&1; then
372 echo "node_exporter user exists"
373 else
374 echo "Creating user node_exporter"
375 sudo useradd --no-create-home --shell /bin/false node_exporter
376 fi
377 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
378 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
379 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
380 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
381 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
382 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
383 sudo systemctl daemon-reload
384 sudo systemctl restart node_exporter
385 sudo systemctl enable node_exporter
386 echo "Node Exporter has been activated in this host."
387 fi
388 return 0
389 }
390
391 function uninstall_prometheus_nodeexporter(){
392 sudo systemctl stop node_exporter
393 sudo systemctl disable node_exporter
394 sudo rm /etc/systemd/system/node_exporter.service
395 sudo systemctl daemon-reload
396 sudo userdel node_exporter
397 sudo rm /usr/local/bin/node_exporter
398 return 0
399 }
400
401 function install_docker_ce() {
402 # installs and configures Docker CE
403 echo "Installing Docker CE ..."
404 sudo apt-get -qq update
405 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
406 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
407 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
408 sudo apt-get -qq update
409 sudo apt-get install -y docker-ce
410 echo "Adding user to group 'docker'"
411 sudo groupadd -f docker
412 sudo usermod -aG docker $USER
413 sleep 2
414 sudo service docker restart
415 echo "... restarted Docker service"
416 if [ -n "${DOCKER_PROXY_URL}" ]; then
417 echo "Configuring docker proxy ..."
418 if [ -f /etc/docker/daemon.json ]; then
419 if grep -q registry-mirrors /etc/docker/daemon.json; then
420 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
421 else
422 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
423 fi
424 else
425 sudo bash -c "cat << EOF > /etc/docker/daemon.json
426 {
427 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
428 }
429 EOF"
430 fi
431 sudo systemctl daemon-reload
432 sudo service docker restart
433 echo "... restarted Docker service again"
434 fi
435 sg docker -c "docker version" || FATAL "Docker installation failed"
436 echo "... Docker CE installation done"
437 return 0
438 }
439
440 function install_docker_compose() {
441 # installs and configures docker-compose
442 echo "Installing Docker Compose ..."
443 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
444 sudo chmod +x /usr/local/bin/docker-compose
445 echo "... Docker Compose installation done"
446 }
447
448 function install_juju() {
449 echo "Installing juju"
450 sudo snap install juju --classic --channel=2.8/stable
451 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
452 update_juju_images
453 echo "Finished installation of juju"
454 return 0
455 }
456
457 function juju_createcontroller() {
458 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
459 # Not found created, create the controller
460 sudo usermod -a -G lxd ${USER}
461 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
462 fi
463 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
464 juju controller-config features=[k8s-operators]
465 }
466
467 function juju_addk8s() {
468 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
469 }
470
471 function juju_createcontroller_k8s(){
472 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
473 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
474 --config controller-service-type=loadbalancer \
475 --agent-version=$JUJU_AGENT_VERSION
476 }
477
478
479 function juju_addlxd_cloud(){
480 mkdir -p /tmp/.osm
481 OSM_VCA_CLOUDNAME="lxd-cloud"
482 LXDENDPOINT=$DEFAULT_IP
483 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
484 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
485
486 cat << EOF > $LXD_CLOUD
487 clouds:
488 $OSM_VCA_CLOUDNAME:
489 type: lxd
490 auth-types: [certificate]
491 endpoint: "https://$LXDENDPOINT:8443"
492 config:
493 ssl-hostname-verification: false
494 EOF
495 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
496 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
497 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
498 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
499
500 cat << EOF > $LXD_CREDENTIALS
501 credentials:
502 $OSM_VCA_CLOUDNAME:
503 lxd-cloud:
504 auth-type: certificate
505 server-cert: |
506 $server_cert
507 client-cert: |
508 $client_cert
509 client-key: |
510 $client_key
511 EOF
512 lxc config trust add local: /tmp/.osm/client.crt
513 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
514 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
515 sg lxd -c "lxd waitready"
516 juju controller-config features=[k8s-operators]
517 }
518
519
520 function juju_createproxy() {
521 check_install_iptables_persistent
522
523 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
524 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
525 sudo netfilter-persistent save
526 fi
527 }
528
529 function docker_login() {
530 echo "Docker login"
531 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
532 }
533
534 function generate_docker_images() {
535 echo "Pulling and generating docker images"
536 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
537
538 echo "Pulling docker images"
539
540 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
541 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
542 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
543 fi
544
545 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
546 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
547 fi
548
549 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
550 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
551 fi
552
553 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
554 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
555 fi
556
557 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
558 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
559 fi
560
561 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
562 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
563 fi
564
565 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
566 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
567 fi
568
569 if [ -n "$PULL_IMAGES" ]; then
570 echo "Pulling OSM docker images"
571 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
572 module_lower=${module,,}
573 if [ $module == "LW-UI" ]; then
574 if [ -n "$NGUI" ]; then
575 continue
576 else
577 module_lower="light-ui"
578 fi
579 fi
580 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
581 continue
582 fi
583 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
584 continue
585 fi
586 module_tag="${OSM_DOCKER_TAG}"
587 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
588 module_tag="${MODULE_DOCKER_TAG}"
589 fi
590 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
591 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
592 done
593 else
594 _build_from=$COMMIT_ID
595 [ -z "$_build_from" ] && _build_from="latest"
596 echo "OSM Docker images generated from $_build_from"
597
598 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
599 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
600 module_lower=${module,,}
601 if [ $module == "LW-UI" ]; then
602 if [ -n "$NGUI" ]; then
603 continue
604 else
605 module_lower="light-ui"
606 fi
607 fi
608 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
609 continue
610 fi
611 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
612 continue
613 fi
614 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
615 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
616 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
617 fi
618 done
619 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
620 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
621 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
622 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
623 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
624 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
625 fi
626 echo "Finished generation of docker images"
627 fi
628
629 echo "Finished pulling and generating docker images"
630 }
631
632 function cmp_overwrite() {
633 file1="$1"
634 file2="$2"
635 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
636 if [ -f "${file2}" ]; then
637 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
638 else
639 cp -b ${file1} ${file2}
640 fi
641 fi
642 }
643
644 function generate_docker_compose_files() {
645 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
646 if [ -n "$NGUI" ]; then
647 # For NG-UI
648 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
649 else
650 # Docker-compose
651 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
652 fi
653 if [ -n "$INSTALL_PLA" ]; then
654 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
655 fi
656 }
657
658 function generate_k8s_manifest_files() {
659 #Kubernetes resources
660 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
661 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
662 if [ -n "$NGUI" ]; then
663 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
664 else
665 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
666 fi
667 }
668
669 function generate_prometheus_grafana_files() {
670 [ -n "$KUBERNETES" ] && return
671 # Prometheus files
672 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
673 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
674
675 # Grafana files
676 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
677 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
678 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
679 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
680 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
681
682 # Prometheus Exporters files
683 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
684 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
685 }
686
687 function generate_docker_env_files() {
688 echo "Doing a backup of existing env files"
689 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
690 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
691 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
693 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
694 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
695 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
696 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
697 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
698
699 echo "Generating docker env files"
700 # LCM
701 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
702 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
703 fi
704
705 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
706 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
707 else
708 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
709 fi
710
711 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
712 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
713 else
714 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
715 fi
716
717 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
718 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
721 fi
722
723 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
724 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
725 else
726 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
727 fi
728
729 if [ -n "$OSM_VCA_APIPROXY" ]; then
730 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
731 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
732 else
733 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
734 fi
735 fi
736
737 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
738 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
739 fi
740
741 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
742 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
743 fi
744
745 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
746 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
747 else
748 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
749 fi
750
751 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
752 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
753 else
754 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
755 fi
756
757 # RO
758 MYSQL_ROOT_PASSWORD=$(generate_secret)
759 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
760 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
761 fi
762 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
763 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
764 fi
765 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
766 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
767 fi
768
769 # Keystone
770 KEYSTONE_DB_PASSWORD=$(generate_secret)
771 SERVICE_PASSWORD=$(generate_secret)
772 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
773 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
774 fi
775 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
776 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
777 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
778 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
779 fi
780
781 # NBI
782 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
783 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
784 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
785 fi
786
787 # MON
788 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
789 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
790 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
791 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
792 fi
793
794 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
795 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
796 else
797 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
798 fi
799
800 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
801 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
802 else
803 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
804 fi
805
806 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
807 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
808 else
809 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
810 fi
811
812 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
813 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
814 else
815 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
816 fi
817
818
819 # POL
820 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
821 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
822 fi
823
824 # LW-UI
825 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
826 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
827 fi
828
829 echo "Finished generation of docker env files"
830 }
831
832 function generate_osmclient_script () {
833 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
834 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
835 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
836 }
837
838 #installs kubernetes packages
839 function install_kube() {
840 sudo apt-get update && sudo apt-get install -y apt-transport-https
841 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
842 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
843 sudo apt-get update
844 echo "Installing Kubernetes Packages ..."
845 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
846 sudo apt-mark hold kubelet kubeadm kubectl
847 }
848
849 #initializes kubernetes control plane
850 function init_kubeadm() {
851 sudo swapoff -a
852 sudo kubeadm init --config $1
853 sleep 5
854 }
855
856 function kube_config_dir() {
857 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
858 mkdir -p $HOME/.kube
859 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
860 sudo chown $(id -u):$(id -g) $HOME/.kube/config
861 }
862
863 function install_k8s_storageclass() {
864 echo "Installing OpenEBS"
865 kubectl create ns openebs
866 helm repo add openebs https://openebs.github.io/charts
867 helm repo update
868 helm install --namespace openebs openebs openebs/openebs --version 1.12.0
869 helm ls -n openebs
870 local storageclass_timeout=400
871 local counter=0
872 local storageclass_ready=""
873 echo "Waiting for storageclass"
874 while (( counter < storageclass_timeout ))
875 do
876 kubectl get storageclass openebs-hostpath &> /dev/null
877
878 if [ $? -eq 0 ] ; then
879 echo "Storageclass available"
880 storageclass_ready="y"
881 break
882 else
883 counter=$((counter + 15))
884 sleep 15
885 fi
886 done
887 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
888 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
889 }
890
891 function install_k8s_metallb() {
892 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
893 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
894 echo "apiVersion: v1
895 kind: ConfigMap
896 metadata:
897 namespace: metallb-system
898 name: config
899 data:
900 config: |
901 address-pools:
902 - name: default
903 protocol: layer2
904 addresses:
905 - $METALLB_IP_RANGE" | kubectl apply -f -
906 }
907 #deploys flannel as daemonsets
908 function deploy_cni_provider() {
909 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
910 trap 'rm -rf "${CNI_DIR}"' EXIT
911 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
912 kubectl apply -f $CNI_DIR
913 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
914 }
915
916 #creates secrets from env files which will be used by containers
917 function kube_secrets(){
918 kubectl create ns $OSM_STACK_NAME
919 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
920 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
921 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
922 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
923 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
924 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
925 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
926 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
927 }
928
929 #taints K8s master node
930 function taint_master_node() {
931 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
932 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
933 sleep 5
934 }
935
936 #deploys osm pods and services
937 function deploy_osm_services() {
938 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
939 }
940
941 #deploy charmed services
942 function deploy_charmed_services() {
943 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
944 # deploy mongodb charm
945 namespace=$OSM_STACK_NAME
946 juju deploy cs:~charmed-osm/mongodb-k8s \
947 --config enable-sidecar=true \
948 --config replica-set=rs0 \
949 --config namespace=$namespace \
950 -m $namespace
951 }
952
953 function deploy_osm_pla_service() {
954 # corresponding to namespace_vol
955 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
956 # corresponding to deploy_osm_services
957 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
958 }
959
960 #Install Helm v3
961 function install_helm() {
962 helm > /dev/null 2>&1
963 if [ $? != 0 ] ; then
964 # Helm is not installed. Install helm
965 echo "Helm is not installed, installing ..."
966 curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz --output helm-v3.6.3.tar.gz
967 tar -zxvf helm-v3.6.3.tar.gz
968 sudo mv linux-amd64/helm /usr/local/bin/helm
969 rm -r linux-amd64
970 rm helm-v3.6.3.tar.gz
971 helm repo add stable https://charts.helm.sh/stable
972 helm repo update
973 fi
974 }
975
976 function parse_yaml() {
977 TAG=$1
978 shift
979 services=$@
980 for module in $services; do
981 if [ "$module" == "pla" ]; then
982 if [ -n "$INSTALL_PLA" ]; then
983 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
984 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
985 fi
986 else
987 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
988 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
989 fi
990 done
991 }
992
993 function update_manifest_files() {
994 if [ -n "$NGUI" ]; then
995 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
996 else
997 osm_services="nbi lcm ro pol mon light-ui keystone pla"
998 fi
999 list_of_services=""
1000 for module in $osm_services; do
1001 module_upper="${module^^}"
1002 if [ "$module_upper" == "LIGHT-UI" ]; then
1003 module_upper="LW-UI"
1004 fi
1005 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1006 list_of_services="$list_of_services $module"
1007 fi
1008 done
1009 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1010 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1011 parse_yaml $OSM_DOCKER_TAG $list_of_services
1012 fi
1013 if [ -n "$MODULE_DOCKER_TAG" ]; then
1014 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1015 fi
1016 }
1017
1018 function namespace_vol() {
1019 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1020 for osm in $osm_services; do
1021 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1022 done
1023 }
1024
1025 function init_docker_swarm() {
1026 if [ "${DEFAULT_MTU}" != "1500" ]; then
1027 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1028 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1029 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1030 fi
1031 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1032 return 0
1033 }
1034
1035 function create_docker_network() {
1036 echo "creating network"
1037 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1038 echo "creating network DONE"
1039 }
1040
1041 function deploy_lightweight() {
1042
1043 echo "Deploying lightweight build"
1044 OSM_NBI_PORT=9999
1045 OSM_RO_PORT=9090
1046 OSM_KEYSTONE_PORT=5000
1047 OSM_UI_PORT=80
1048 OSM_MON_PORT=8662
1049 OSM_PROM_PORT=9090
1050 OSM_PROM_CADVISOR_PORT=8080
1051 OSM_PROM_HOSTPORT=9091
1052 OSM_GRAFANA_PORT=3000
1053 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1054 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1055
1056 if [ -n "$NO_HOST_PORTS" ]; then
1057 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1058 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1059 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1060 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1061 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1062 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1063 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1064 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1065 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1066 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1067 else
1068 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1069 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1070 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1071 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1072 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1073 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1074 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1075 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1076 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1077 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1078 fi
1079 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1080 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1081 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1082 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1083 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1084 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1085 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1086 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1087 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1088
1089 pushd $OSM_DOCKER_WORK_DIR
1090 if [ -n "$INSTALL_PLA" ]; then
1091 track deploy_osm_pla
1092 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1093 else
1094 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1095 fi
1096 popd
1097
1098 echo "Finished deployment of lightweight build"
1099 }
1100
1101 function deploy_elk() {
1102 echo "Pulling docker images for ELK"
1103 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1104 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1105 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1106 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1107 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1108 echo "Finished pulling elk docker images"
1109 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1110 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1111 remove_stack osm_elk
1112 echo "Deploying ELK stack"
1113 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1114 echo "Waiting for ELK stack to be up and running"
1115 time=0
1116 step=5
1117 timelength=40
1118 elk_is_up=1
1119 while [ $time -le $timelength ]; do
1120 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1121 elk_is_up=0
1122 break
1123 fi
1124 sleep $step
1125 time=$((time+step))
1126 done
1127 if [ $elk_is_up -eq 0 ]; then
1128 echo "ELK is up and running. Trying to create index pattern..."
1129 #Create index pattern
1130 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1131 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1132 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1133 #Make it the default index
1134 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1135 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1136 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1137 else
1138 echo "Cannot connect to Kibana to create index pattern."
1139 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1140 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1141 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1142 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1143 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1144 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1145 -d"{\"value\":\"filebeat-*\"}"'
1146 fi
1147 echo "Finished deployment of ELK stack"
1148 return 0
1149 }
1150
1151 function add_local_k8scluster() {
1152 /usr/bin/osm --all-projects vim-create \
1153 --name _system-osm-vim \
1154 --account_type dummy \
1155 --auth_url http://dummy \
1156 --user osm --password osm --tenant osm \
1157 --description "dummy" \
1158 --config '{management_network_name: mgmt}'
1159 /usr/bin/osm --all-projects k8scluster-add \
1160 --creds ${HOME}/.kube/config \
1161 --vim _system-osm-vim \
1162 --k8s-nets '{"net1": null}' \
1163 --version '1.15' \
1164 --description "OSM Internal Cluster" \
1165 _system-osm-k8s
1166 }
1167
1168 function install_lightweight() {
1169 track checkingroot
1170 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1171 track noroot
1172
1173 if [ -n "$KUBERNETES" ]; then
1174 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1175 1. Install and configure LXD
1176 2. Install juju
1177 3. Install docker CE
1178 4. Disable swap space
1179 5. Install and initialize Kubernetes
1180 as pre-requirements.
1181 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1182
1183 else
1184 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1185 fi
1186 track proceed
1187
1188 echo "Installing lightweight build of OSM"
1189 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1190 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1191 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1192 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1193 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1194 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1195 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1196 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1197
1198 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1199 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1200 need_packages_lw="snapd"
1201 echo -e "Checking required packages: $need_packages_lw"
1202 dpkg -l $need_packages_lw &>/dev/null \
1203 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1204 || sudo apt-get update \
1205 || FATAL "failed to run apt-get update"
1206 dpkg -l $need_packages_lw &>/dev/null \
1207 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1208 || sudo apt-get install -y $need_packages_lw \
1209 || FATAL "failed to install $need_packages_lw"
1210 install_lxd
1211 fi
1212
1213 track prereqok
1214
1215 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1216
1217 echo "Creating folders for installation"
1218 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1219 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1220 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1221
1222 #Installs Kubernetes
1223 if [ -n "$KUBERNETES" ]; then
1224 install_kube
1225 track install_k8s
1226 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1227 kube_config_dir
1228 track init_k8s
1229 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1230 # uninstall OSM MONITORING
1231 uninstall_k8s_monitoring
1232 track uninstall_k8s_monitoring
1233 fi
1234 #remove old namespace
1235 remove_k8s_namespace $OSM_STACK_NAME
1236 deploy_cni_provider
1237 taint_master_node
1238 install_helm
1239 track install_helm
1240 install_k8s_storageclass
1241 track k8s_storageclass
1242 install_k8s_metallb
1243 track k8s_metallb
1244 else
1245 #install_docker_compose
1246 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1247 track docker_swarm
1248 fi
1249
1250 [ -z "$INSTALL_NOJUJU" ] && install_juju
1251 track juju_install
1252
1253 if [ -z "$OSM_VCA_HOST" ]; then
1254 if [ -z "$CONTROLLER_NAME" ]; then
1255
1256 if [ -n "$KUBERNETES" ]; then
1257 juju_createcontroller_k8s
1258 juju_addlxd_cloud
1259 else
1260 if [ -n "$LXD_CLOUD_FILE" ]; then
1261 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1262 OSM_VCA_CLOUDNAME="lxd-cloud"
1263 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1264 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1265 fi
1266 juju_createcontroller
1267 juju_createproxy
1268 fi
1269 else
1270 OSM_VCA_CLOUDNAME="lxd-cloud"
1271 if [ -n "$LXD_CLOUD_FILE" ]; then
1272 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1273 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1274 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1275 else
1276 mkdir -p ~/.osm
1277 cat << EOF > ~/.osm/lxd-cloud.yaml
1278 clouds:
1279 lxd-cloud:
1280 type: lxd
1281 auth-types: [certificate]
1282 endpoint: "https://$DEFAULT_IP:8443"
1283 config:
1284 ssl-hostname-verification: false
1285 EOF
1286 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1287 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1288 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1289 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1290 cat << EOF > ~/.osm/lxd-credentials.yaml
1291 credentials:
1292 lxd-cloud:
1293 lxd-cloud:
1294 auth-type: certificate
1295 server-cert: |
1296 $server_cert
1297 client-cert: |
1298 $client_cert
1299 client-key: |
1300 $client_key
1301 EOF
1302 lxc config trust add local: ~/.osm/client.crt
1303 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1304 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1305 fi
1306 fi
1307 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1308 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1309 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1310 fi
1311 track juju_controller
1312
1313 if [ -z "$OSM_VCA_SECRET" ]; then
1314 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1315 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1316 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1317 fi
1318 if [ -z "$OSM_VCA_PUBKEY" ]; then
1319 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1320 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1321 fi
1322 if [ -z "$OSM_VCA_CACERT" ]; then
1323 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1324 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1325 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1326 fi
1327
1328 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1329 if [ -z "$KUBERNETES" ]; then
1330 if [ -z "$OSM_VCA_APIPROXY" ]; then
1331 OSM_VCA_APIPROXY=$DEFAULT_IP
1332 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1333 fi
1334 juju_createproxy
1335 fi
1336 track juju
1337
1338 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1339 OSM_DATABASE_COMMONKEY=$(generate_secret)
1340 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1341 fi
1342
1343 # Deploy OSM services
1344 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1345 track docker_build
1346
1347 if [ -n "$KUBERNETES" ]; then
1348 generate_k8s_manifest_files
1349 else
1350 generate_docker_compose_files
1351 fi
1352 track manifest_files
1353 generate_prometheus_grafana_files
1354 generate_docker_env_files
1355 track env_files
1356
1357 if [ -n "$KUBERNETES" ]; then
1358 deploy_charmed_services
1359 kube_secrets
1360 update_manifest_files
1361 namespace_vol
1362 deploy_osm_services
1363 if [ -n "$INSTALL_PLA"]; then
1364 # optional PLA install
1365 deploy_osm_pla_service
1366 track deploy_osm_pla
1367 fi
1368 track deploy_osm_services_k8s
1369 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1370 # install OSM MONITORING
1371 install_k8s_monitoring
1372 track install_k8s_monitoring
1373 fi
1374 else
1375 # remove old stack
1376 remove_stack $OSM_STACK_NAME
1377 create_docker_network
1378 deploy_lightweight
1379 generate_osmclient_script
1380 track docker_deploy
1381 install_prometheus_nodeexporter
1382 track nodeexporter
1383 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1384 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1385 fi
1386
1387 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1388 track osmclient
1389
1390 echo -e "Checking OSM health state..."
1391 if [ -n "$KUBERNETES" ]; then
1392 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1393 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1394 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1395 track osm_unhealthy
1396 else
1397 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1398 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1399 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1400 track osm_unhealthy
1401 fi
1402 track after_healthcheck
1403
1404 [ -n "$KUBERNETES" ] && add_local_k8scluster
1405 track add_local_k8scluster
1406
1407 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1408 track end
1409 return 0
1410 }
1411
1412 function install_to_openstack() {
1413
1414 if [ -z "$2" ]; then
1415 FATAL "OpenStack installer requires a valid external network name"
1416 fi
1417
1418 # Install Pip for Python3
1419 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1420 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1421
1422 # Create a venv to avoid conflicts with the host installation
1423 python3 -m venv $OPENSTACK_PYTHON_VENV
1424
1425 source $OPENSTACK_PYTHON_VENV/bin/activate
1426
1427 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1428 python -m pip install -U wheel
1429 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1430
1431 # Install the Openstack cloud module (ansible>=2.10)
1432 ansible-galaxy collection install openstack.cloud
1433
1434 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1435
1436 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1437
1438 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1439
1440 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1441 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1442 fi
1443
1444 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1445 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1446 fi
1447
1448 # Execute the Ansible playbook based on openrc or clouds.yaml
1449 if [ -e "$1" ]; then
1450 . $1
1451 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1452 $OSM_DEVOPS/installers/openstack/site.yml
1453 else
1454 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1455 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1456 fi
1457
1458 # Exit from venv
1459 deactivate
1460
1461 return 0
1462 }
1463
1464 function install_vimemu() {
1465 echo "\nInstalling vim-emu"
1466 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1467 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1468 # install prerequisites (OVS is a must for the emulator to work)
1469 sudo apt-get install openvswitch-switch
1470 # clone vim-emu repository (attention: branch is currently master only)
1471 echo "Cloning vim-emu repository ..."
1472 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1473 # build vim-emu docker
1474 echo "Building vim-emu Docker container..."
1475
1476 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1477 # start vim-emu container as daemon
1478 echo "Starting vim-emu Docker container 'vim-emu' ..."
1479 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1480 # in lightweight mode, the emulator needs to be attached to netOSM
1481 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1482 else
1483 # classic build mode
1484 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1485 fi
1486 echo "Waiting for 'vim-emu' container to start ..."
1487 sleep 5
1488 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1489 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1490 # print vim-emu connection info
1491 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1492 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1493 echo -e "To add the emulated VIM to OSM you should do:"
1494 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1495 }
1496
1497 function install_k8s_monitoring() {
1498 # install OSM monitoring
1499 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1500 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1501 }
1502
1503 function uninstall_k8s_monitoring() {
1504 # uninstall OSM monitoring
1505 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1506 }
1507
1508 function dump_vars(){
1509 echo "DEVELOP=$DEVELOP"
1510 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1511 echo "UNINSTALL=$UNINSTALL"
1512 echo "UPDATE=$UPDATE"
1513 echo "RECONFIGURE=$RECONFIGURE"
1514 echo "TEST_INSTALLER=$TEST_INSTALLER"
1515 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1516 echo "INSTALL_PLA=$INSTALL_PLA"
1517 echo "INSTALL_LXD=$INSTALL_LXD"
1518 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1519 echo "INSTALL_ONLY=$INSTALL_ONLY"
1520 echo "INSTALL_ELK=$INSTALL_ELK"
1521 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1522 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1523 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1524 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1525 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1526 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1527 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1528 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1529 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1530 echo "TO_REBUILD=$TO_REBUILD"
1531 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1532 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1533 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1534 echo "RELEASE=$RELEASE"
1535 echo "REPOSITORY=$REPOSITORY"
1536 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1537 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1538 echo "OSM_DEVOPS=$OSM_DEVOPS"
1539 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1540 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1541 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1542 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1543 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1544 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1545 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1546 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1547 echo "DOCKER_USER=$DOCKER_USER"
1548 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1549 echo "PULL_IMAGES=$PULL_IMAGES"
1550 echo "KUBERNETES=$KUBERNETES"
1551 echo "NGUI=$NGUI"
1552 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1553 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1554 echo "SHOWOPTS=$SHOWOPTS"
1555 echo "Install from specific refspec (-b): $COMMIT_ID"
1556 }
1557
1558 function track(){
1559 ctime=`date +%s`
1560 duration=$((ctime - SESSION_ID))
1561 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1562 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1563 event_name="bin"
1564 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1565 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1566 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1567 event_name="${event_name}_$1"
1568 url="${url}&event=${event_name}&ce_duration=${duration}"
1569 wget -q -O /dev/null $url
1570 }
1571
1572 function parse_docker_registry_url() {
1573 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1574 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1575 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1576 }
1577
1578 JUJU_AGENT_VERSION=2.8.8
1579 UNINSTALL=""
1580 DEVELOP=""
1581 UPDATE=""
1582 RECONFIGURE=""
1583 TEST_INSTALLER=""
1584 INSTALL_LXD=""
1585 SHOWOPTS=""
1586 COMMIT_ID=""
1587 ASSUME_YES=""
1588 INSTALL_FROM_SOURCE=""
1589 RELEASE="ReleaseNINE"
1590 REPOSITORY="stable"
1591 INSTALL_VIMEMU=""
1592 INSTALL_PLA=""
1593 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1594 LXD_REPOSITORY_PATH=""
1595 INSTALL_LIGHTWEIGHT="y"
1596 INSTALL_TO_OPENSTACK=""
1597 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1598 OPENSTACK_PUBLIC_NET_NAME=""
1599 OPENSTACK_ATTACH_VOLUME="false"
1600 OPENSTACK_SSH_KEY_FILE=""
1601 OPENSTACK_USERDATA_FILE=""
1602 OPENSTACK_VM_NAME="server-osm"
1603 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1604 INSTALL_ONLY=""
1605 INSTALL_ELK=""
1606 TO_REBUILD=""
1607 INSTALL_NOLXD=""
1608 INSTALL_NODOCKER=""
1609 INSTALL_NOJUJU=""
1610 KUBERNETES="y"
1611 NGUI="y"
1612 INSTALL_K8S_MONITOR=""
1613 INSTALL_NOHOSTCLIENT=""
1614 SESSION_ID=`date +%s`
1615 OSM_DEVOPS=
1616 OSM_VCA_HOST=
1617 OSM_VCA_SECRET=
1618 OSM_VCA_PUBKEY=
1619 OSM_VCA_CLOUDNAME="localhost"
1620 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1621 OSM_STACK_NAME=osm
1622 NO_HOST_PORTS=""
1623 DOCKER_NOBUILD=""
1624 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1625 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1626 WORKDIR_SUDO=sudo
1627 OSM_WORK_DIR="/etc/osm"
1628 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1629 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1630 OSM_HOST_VOL="/var/lib/osm"
1631 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1632 OSM_DOCKER_TAG=latest
1633 DOCKER_USER=opensourcemano
1634 PULL_IMAGES="y"
1635 KAFKA_TAG=2.11-1.0.2
1636 PROMETHEUS_TAG=v2.4.3
1637 GRAFANA_TAG=latest
1638 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1639 PROMETHEUS_CADVISOR_TAG=latest
1640 KEYSTONEDB_TAG=10
1641 OSM_DATABASE_COMMONKEY=
1642 ELASTIC_VERSION=6.4.2
1643 ELASTIC_CURATOR_VERSION=5.5.4
1644 POD_NETWORK_CIDR=10.244.0.0/16
1645 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1646 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1647 DOCKER_REGISTRY_URL=
1648 DOCKER_PROXY_URL=
1649 MODULE_DOCKER_TAG=
1650
1651 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1652 case "${o}" in
1653 b)
1654 COMMIT_ID=${OPTARG}
1655 PULL_IMAGES=""
1656 ;;
1657 r)
1658 REPOSITORY="${OPTARG}"
1659 REPO_ARGS+=(-r "$REPOSITORY")
1660 ;;
1661 c)
1662 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1663 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1664 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1665 usage && exit 1
1666 ;;
1667 n)
1668 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1669 [ "${OPTARG}" == "ngui" ] && continue
1670 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1671 usage && exit 1
1672 ;;
1673 k)
1674 REPOSITORY_KEY="${OPTARG}"
1675 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1676 ;;
1677 u)
1678 REPOSITORY_BASE="${OPTARG}"
1679 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1680 ;;
1681 R)
1682 RELEASE="${OPTARG}"
1683 REPO_ARGS+=(-R "$RELEASE")
1684 ;;
1685 D)
1686 OSM_DEVOPS="${OPTARG}"
1687 ;;
1688 o)
1689 INSTALL_ONLY="y"
1690 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1691 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1692 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1693 ;;
1694 O)
1695 INSTALL_TO_OPENSTACK="y"
1696 if [ -n "${OPTARG}" ]; then
1697 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1698 else
1699 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1700 usage && exit 1
1701 fi
1702 ;;
1703 f)
1704 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1705 ;;
1706 F)
1707 OPENSTACK_USERDATA_FILE="${OPTARG}"
1708 ;;
1709 N)
1710 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1711 ;;
1712 m)
1713 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1714 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1715 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1716 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1717 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1718 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1719 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1720 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1721 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1722 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1723 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1724 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1725 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1726 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1727 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1728 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1729 ;;
1730 H)
1731 OSM_VCA_HOST="${OPTARG}"
1732 ;;
1733 S)
1734 OSM_VCA_SECRET="${OPTARG}"
1735 ;;
1736 s)
1737 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1738 ;;
1739 w)
1740 # when specifying workdir, do not use sudo for access
1741 WORKDIR_SUDO=
1742 OSM_WORK_DIR="${OPTARG}"
1743 ;;
1744 t)
1745 OSM_DOCKER_TAG="${OPTARG}"
1746 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1747 ;;
1748 U)
1749 DOCKER_USER="${OPTARG}"
1750 ;;
1751 P)
1752 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1753 ;;
1754 A)
1755 OSM_VCA_APIPROXY="${OPTARG}"
1756 ;;
1757 l)
1758 LXD_CLOUD_FILE="${OPTARG}"
1759 ;;
1760 L)
1761 LXD_CRED_FILE="${OPTARG}"
1762 ;;
1763 K)
1764 CONTROLLER_NAME="${OPTARG}"
1765 ;;
1766 d)
1767 DOCKER_REGISTRY_URL="${OPTARG}"
1768 ;;
1769 p)
1770 DOCKER_PROXY_URL="${OPTARG}"
1771 ;;
1772 T)
1773 MODULE_DOCKER_TAG="${OPTARG}"
1774 ;;
1775 -)
1776 [ "${OPTARG}" == "help" ] && usage && exit 0
1777 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1778 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1779 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1780 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1781 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1782 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1783 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1784 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1785 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1786 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1787 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1788 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1789 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1790 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1791 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1792 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1793 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1794 [ "${OPTARG}" == "pullimages" ] && continue
1795 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1796 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1797 [ "${OPTARG}" == "bundle" ] && continue
1798 [ "${OPTARG}" == "k8s" ] && continue
1799 [ "${OPTARG}" == "lxd" ] && continue
1800 [ "${OPTARG}" == "lxd-cred" ] && continue
1801 [ "${OPTARG}" == "microstack" ] && continue
1802 [ "${OPTARG}" == "vca" ] && continue
1803 [ "${OPTARG}" == "ha" ] && continue
1804 [ "${OPTARG}" == "tag" ] && continue
1805 [ "${OPTARG}" == "registry" ] && continue
1806 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1807 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1808 echo -e "Invalid option: '--$OPTARG'\n" >&2
1809 usage && exit 1
1810 ;;
1811 :)
1812 echo "Option -$OPTARG requires an argument" >&2
1813 usage && exit 1
1814 ;;
1815 \?)
1816 echo -e "Invalid option: '-$OPTARG'\n" >&2
1817 usage && exit 1
1818 ;;
1819 h)
1820 usage && exit 0
1821 ;;
1822 y)
1823 ASSUME_YES="y"
1824 ;;
1825 *)
1826 usage && exit 1
1827 ;;
1828 esac
1829 done
1830
1831 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1832 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1833 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1834
1835 if [ -n "$SHOWOPTS" ]; then
1836 dump_vars
1837 exit 0
1838 fi
1839
1840 if [ -n "$CHARMED" ]; then
1841 if [ -n "$UNINSTALL" ]; then
1842 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1843 else
1844 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1845 fi
1846
1847 exit 0
1848 fi
1849
1850 # if develop, we force master
1851 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1852
1853 need_packages="git wget curl tar"
1854
1855 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1856
1857 echo -e "Checking required packages: $need_packages"
1858 dpkg -l $need_packages &>/dev/null \
1859 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1860 || sudo apt-get update \
1861 || FATAL "failed to run apt-get update"
1862 dpkg -l $need_packages &>/dev/null \
1863 || ! echo -e "Installing $need_packages requires root privileges." \
1864 || sudo apt-get install -y $need_packages \
1865 || FATAL "failed to install $need_packages"
1866 sudo snap install jq
1867 if [ -z "$OSM_DEVOPS" ]; then
1868 if [ -n "$TEST_INSTALLER" ]; then
1869 echo -e "\nUsing local devops repo for OSM installation"
1870 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1871 else
1872 echo -e "\nCreating temporary dir for OSM installation"
1873 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1874 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1875
1876 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1877
1878 if [ -z "$COMMIT_ID" ]; then
1879 echo -e "\nGuessing the current stable release"
1880 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1881 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1882
1883 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1884 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1885 else
1886 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1887 fi
1888 git -C $OSM_DEVOPS checkout $COMMIT_ID
1889 fi
1890 fi
1891
1892 . $OSM_DEVOPS/common/all_funcs
1893
1894 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1895 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1896 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1897 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1898 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1899 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1900 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1901 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1902
1903 #Installation starts here
1904 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1905 track start
1906
1907 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1908 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1909 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1910 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1911 fi
1912
1913 echo -e "Checking required packages: lxd"
1914 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1915 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1916
1917 # use local devops for containers
1918 export OSM_USE_LOCAL_DEVOPS=true
1919
1920 #Install osmclient
1921
1922 #Install vim-emu (optional)
1923 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1924
1925 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1926 track end
1927 echo -e "\nDONE"