Fixed Openstack SDK and client version issues
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82
83 }
84
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password {
89 password_file="${HOME}/.local/share/juju/accounts.yaml"
90 local controller_name=$1
91 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
92 sed -ne "s|^\($s\):|\1|" \
93 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
94 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
95 awk -F$fs -v controller=$controller_name '{
96 indent = length($1)/2;
97 vname[indent] = $2;
98 for (i in vname) {if (i > indent) {delete vname[i]}}
99 if (length($3) > 0) {
100 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
101 if (match(vn,controller) && match($2,"password")) {
102 printf("%s",$3);
103 }
104 }
105 }'
106 }
107
108 function generate_secret() {
109 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
110 }
111
112 function remove_volumes() {
113 if [ -n "$KUBERNETES" ]; then
114 k8_volume=$1
115 echo "Removing ${k8_volume}"
116 $WORKDIR_SUDO rm -rf ${k8_volume}
117 else
118 stack=$1
119 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
120 for volume in $volumes; do
121 sg docker -c "docker volume rm ${stack}_${volume}"
122 done
123 fi
124 }
125
126 function remove_network() {
127 stack=$1
128 sg docker -c "docker network rm net${stack}"
129 }
130
131 function remove_iptables() {
132 stack=$1
133 if [ -z "$OSM_VCA_HOST" ]; then
134 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
135 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
136 fi
137
138 if [ -z "$DEFAULT_IP" ]; then
139 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
140 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
141 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
142 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
143 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
144 fi
145
146 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
147 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
148 sudo netfilter-persistent save
149 fi
150 }
151
152 function remove_stack() {
153 stack=$1
154 if sg docker -c "docker stack ps ${stack}" ; then
155 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
156 COUNTER=0
157 result=1
158 while [ ${COUNTER} -lt 30 ]; do
159 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
160 #echo "Dockers running: $result"
161 if [ "${result}" == "0" ]; then
162 break
163 fi
164 let COUNTER=COUNTER+1
165 sleep 1
166 done
167 if [ "${result}" == "0" ]; then
168 echo "All dockers of the stack ${stack} were removed"
169 else
170 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
171 fi
172 sleep 5
173 fi
174 }
175
176 #removes osm deployments and services
177 function remove_k8s_namespace() {
178 kubectl delete ns $1
179 }
180
181 #removes helm only if there is nothing deployed in helm
182 function remove_helm() {
183 if [ "$(helm ls -q)" == "" ] ; then
184 sudo helm reset --force
185 kubectl delete --namespace kube-system serviceaccount tiller
186 kubectl delete clusterrolebinding tiller-cluster-rule
187 sudo rm /usr/local/bin/helm
188 rm -rf $HOME/.helm
189 fi
190 }
191
192 function remove_crontab_job() {
193 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
194 }
195
196 #Uninstall osmclient
197 function uninstall_osmclient() {
198 sudo apt-get remove --purge -y python-osmclient
199 sudo apt-get remove --purge -y python3-osmclient
200 }
201
202 #Uninstall lightweight OSM: remove dockers
203 function uninstall_lightweight() {
204 if [ -n "$INSTALL_ONLY" ]; then
205 if [ -n "$INSTALL_ELK" ]; then
206 echo -e "\nUninstalling OSM ELK stack"
207 remove_stack osm_elk
208 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
209 fi
210 else
211 echo -e "\nUninstalling OSM"
212 if [ -n "$KUBERNETES" ]; then
213 if [ -n "$INSTALL_K8S_MONITOR" ]; then
214 # uninstall OSM MONITORING
215 uninstall_k8s_monitoring
216 fi
217 remove_k8s_namespace $OSM_STACK_NAME
218 else
219 remove_stack $OSM_STACK_NAME
220 remove_stack osm_elk
221 fi
222 echo "Now osm docker images and volumes will be deleted"
223 # TODO: clean-up of images should take into account if other tags were used for specific modules
224 newgrp docker << EONG
225 for module in ro lcm keystone nbi mon pol pla osmclient; do
226 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
227 done
228 EONG
229
230 if [ -n "$NGUI" ]; then
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
232 else
233 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
234 fi
235
236 if [ -n "$KUBERNETES" ]; then
237 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
238 remove_volumes $OSM_NAMESPACE_VOL
239 else
240 remove_volumes $OSM_STACK_NAME
241 remove_network $OSM_STACK_NAME
242 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
243 fi
244 echo "Removing $OSM_DOCKER_WORK_DIR"
245 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
246 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
247 fi
248 remove_crontab_job
249
250 # Cleanup Openstack installer venv
251 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
252 rm -r $OPENSTACK_PYTHON_VENV
253 fi
254
255 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
256 echo "Some docker images will be kept in case they are used by other docker stacks"
257 echo "To remove them, just run 'docker image prune' in a terminal"
258 return 0
259 }
260
261 #Safe unattended install of iptables-persistent
262 function check_install_iptables_persistent(){
263 echo -e "\nChecking required packages: iptables-persistent"
264 if ! dpkg -l iptables-persistent &>/dev/null; then
265 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
266 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
267 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
268 sudo apt-get -yq install iptables-persistent
269 fi
270 }
271
272 #Configure NAT rules, based on the current IP addresses of containers
273 function nat(){
274 check_install_iptables_persistent
275
276 echo -e "\nConfiguring NAT rules"
277 echo -e " Required root privileges"
278 sudo $OSM_DEVOPS/installers/nat_osm
279 }
280
281 function FATAL(){
282 echo "FATAL error: Cannot install OSM due to \"$1\""
283 exit 1
284 }
285
286 function update_juju_images(){
287 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
288 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
289 }
290
291 function install_lxd() {
292 # Apply sysctl production values for optimal performance
293 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
294 sudo sysctl --system
295
296 # Install LXD snap
297 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
298 sudo snap install lxd
299
300 # Configure LXD
301 sudo usermod -a -G lxd `whoami`
302 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
303 sg lxd -c "lxd waitready"
304 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
305 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
306 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
307 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
308 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
309 #sudo systemctl stop lxd-bridge
310 #sudo systemctl --system daemon-reload
311 #sudo systemctl enable lxd-bridge
312 #sudo systemctl start lxd-bridge
313 }
314
315 function ask_user(){
316 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
317 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
318 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
319 read -e -p "$1" USER_CONFIRMATION
320 while true ; do
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
322 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
323 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
324 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
325 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
326 done
327 }
328
329 function install_osmclient(){
330 CLIENT_RELEASE=${RELEASE#"-R "}
331 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
332 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
333 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
334 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
335 curl $key_location | sudo apt-key add -
336 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get update
338 sudo apt-get install -y python3-pip
339 sudo -H LC_ALL=C python3 -m pip install -U pip
340 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
341 sudo apt-get install -y python3-osm-im python3-osmclient
342 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
343 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
344 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
346 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
347 echo -e "\nOSM client installed"
348 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
349 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
350 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
351 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
352 else
353 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
354 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
355 echo " export OSM_HOSTNAME=<OSM_host>"
356 fi
357 return 0
358 }
359
360 function install_prometheus_nodeexporter(){
361 if (systemctl -q is-active node_exporter)
362 then
363 echo "Node Exporter is already running."
364 else
365 echo "Node Exporter is not active, installing..."
366 if getent passwd node_exporter > /dev/null 2>&1; then
367 echo "node_exporter user exists"
368 else
369 echo "Creating user node_exporter"
370 sudo useradd --no-create-home --shell /bin/false node_exporter
371 fi
372 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
373 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
374 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
375 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
376 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
377 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
378 sudo systemctl daemon-reload
379 sudo systemctl restart node_exporter
380 sudo systemctl enable node_exporter
381 echo "Node Exporter has been activated in this host."
382 fi
383 return 0
384 }
385
386 function uninstall_prometheus_nodeexporter(){
387 sudo systemctl stop node_exporter
388 sudo systemctl disable node_exporter
389 sudo rm /etc/systemd/system/node_exporter.service
390 sudo systemctl daemon-reload
391 sudo userdel node_exporter
392 sudo rm /usr/local/bin/node_exporter
393 return 0
394 }
395
396 function install_docker_ce() {
397 # installs and configures Docker CE
398 echo "Installing Docker CE ..."
399 sudo apt-get -qq update
400 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
401 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
402 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
403 sudo apt-get -qq update
404 sudo apt-get install -y docker-ce
405 echo "Adding user to group 'docker'"
406 sudo groupadd -f docker
407 sudo usermod -aG docker $USER
408 sleep 2
409 sudo service docker restart
410 echo "... restarted Docker service"
411 if [ -n "${DOCKER_PROXY_URL}" ]; then
412 echo "Configuring docker proxy ..."
413 if [ -f /etc/docker/daemon.json ]; then
414 if grep -q registry-mirrors /etc/docker/daemon.json; then
415 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
416 else
417 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
418 fi
419 else
420 sudo bash -c "cat << EOF > /etc/docker/daemon.json
421 {
422 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
423 }
424 EOF"
425 fi
426 sudo systemctl daemon-reload
427 sudo service docker restart
428 echo "... restarted Docker service again"
429 fi
430 sg docker -c "docker version" || FATAL "Docker installation failed"
431 echo "... Docker CE installation done"
432 return 0
433 }
434
435 function install_docker_compose() {
436 # installs and configures docker-compose
437 echo "Installing Docker Compose ..."
438 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
439 sudo chmod +x /usr/local/bin/docker-compose
440 echo "... Docker Compose installation done"
441 }
442
443 function install_juju() {
444 echo "Installing juju"
445 sudo snap install juju --classic --channel=2.8/stable
446 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
447 update_juju_images
448 echo "Finished installation of juju"
449 return 0
450 }
451
452 function juju_createcontroller() {
453 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
454 # Not found created, create the controller
455 sudo usermod -a -G lxd ${USER}
456 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
457 fi
458 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
459 juju controller-config features=[k8s-operators]
460 }
461
462 function juju_addk8s() {
463 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
464 }
465
466 function juju_createcontroller_k8s(){
467 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
468 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
469 --config controller-service-type=loadbalancer \
470 --agent-version=$JUJU_AGENT_VERSION
471 }
472
473
474 function juju_addlxd_cloud(){
475 mkdir -p /tmp/.osm
476 OSM_VCA_CLOUDNAME="lxd-cloud"
477 LXDENDPOINT=$DEFAULT_IP
478 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
479 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
480
481 cat << EOF > $LXD_CLOUD
482 clouds:
483 $OSM_VCA_CLOUDNAME:
484 type: lxd
485 auth-types: [certificate]
486 endpoint: "https://$LXDENDPOINT:8443"
487 config:
488 ssl-hostname-verification: false
489 EOF
490 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
491 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
492 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
493 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
494
495 cat << EOF > $LXD_CREDENTIALS
496 credentials:
497 $OSM_VCA_CLOUDNAME:
498 lxd-cloud:
499 auth-type: certificate
500 server-cert: |
501 $server_cert
502 client-cert: |
503 $client_cert
504 client-key: |
505 $client_key
506 EOF
507 lxc config trust add local: /tmp/.osm/client.crt
508 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
509 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
510 sg lxd -c "lxd waitready"
511 juju controller-config features=[k8s-operators]
512 }
513
514
515 function juju_createproxy() {
516 check_install_iptables_persistent
517
518 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
519 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
520 sudo netfilter-persistent save
521 fi
522 }
523
524 function docker_login() {
525 echo "Docker login"
526 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
527 }
528
529 function generate_docker_images() {
530 echo "Pulling and generating docker images"
531 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
532
533 echo "Pulling docker images"
534
535 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
536 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
537 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
538 fi
539
540 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
541 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
542 fi
543
544 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
545 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
549 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
550 fi
551
552 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
553 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
554 fi
555
556 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
557 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
561 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
562 fi
563
564 if [ -n "$PULL_IMAGES" ]; then
565 echo "Pulling OSM docker images"
566 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
567 module_lower=${module,,}
568 if [ $module == "LW-UI" ]; then
569 if [ -n "$NGUI" ]; then
570 continue
571 else
572 module_lower="light-ui"
573 fi
574 fi
575 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
576 continue
577 fi
578 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
579 continue
580 fi
581 module_tag="${OSM_DOCKER_TAG}"
582 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
583 module_tag="${MODULE_DOCKER_TAG}"
584 fi
585 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
586 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
587 done
588 else
589 _build_from=$COMMIT_ID
590 [ -z "$_build_from" ] && _build_from="latest"
591 echo "OSM Docker images generated from $_build_from"
592
593 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
594 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
595 module_lower=${module,,}
596 if [ $module == "LW-UI" ]; then
597 if [ -n "$NGUI" ]; then
598 continue
599 else
600 module_lower="light-ui"
601 fi
602 fi
603 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
604 continue
605 fi
606 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
607 continue
608 fi
609 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
610 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
611 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
612 fi
613 done
614 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
615 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
616 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
617 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
618 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
619 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
620 fi
621 echo "Finished generation of docker images"
622 fi
623
624 echo "Finished pulling and generating docker images"
625 }
626
627 function cmp_overwrite() {
628 file1="$1"
629 file2="$2"
630 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
631 if [ -f "${file2}" ]; then
632 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
633 else
634 cp -b ${file1} ${file2}
635 fi
636 fi
637 }
638
639 function generate_docker_compose_files() {
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
641 if [ -n "$NGUI" ]; then
642 # For NG-UI
643 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
644 else
645 # Docker-compose
646 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
647 fi
648 if [ -n "$INSTALL_PLA" ]; then
649 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
650 fi
651 }
652
653 function generate_k8s_manifest_files() {
654 #Kubernetes resources
655 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
656 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
657 if [ -n "$NGUI" ]; then
658 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
659 else
660 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
661 fi
662 }
663
664 function generate_prometheus_grafana_files() {
665 [ -n "$KUBERNETES" ] && return
666 # Prometheus files
667 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
668 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
669
670 # Grafana files
671 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
672 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
673 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
674 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
675 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
676
677 # Prometheus Exporters files
678 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
679 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
680 }
681
682 function generate_docker_env_files() {
683 echo "Doing a backup of existing env files"
684 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
685 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
686 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
687 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
688 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
689 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
690 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
691 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
693
694 echo "Generating docker env files"
695 # LCM
696 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
697 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
698 fi
699
700 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
701 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
702 else
703 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
704 fi
705
706 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
707 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
708 else
709 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
710 fi
711
712 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717
718 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
719 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
720 else
721 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
722 fi
723
724 if [ -n "$OSM_VCA_APIPROXY" ]; then
725 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
726 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
727 else
728 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
729 fi
730 fi
731
732 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
733 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
734 fi
735
736 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
737 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
738 fi
739
740 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
741 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
742 else
743 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
744 fi
745
746 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
747 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
748 else
749 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
750 fi
751
752 # RO
753 MYSQL_ROOT_PASSWORD=$(generate_secret)
754 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
755 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
756 fi
757 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
758 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
759 fi
760 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
761 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
762 fi
763
764 # Keystone
765 KEYSTONE_DB_PASSWORD=$(generate_secret)
766 SERVICE_PASSWORD=$(generate_secret)
767 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
768 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
769 fi
770 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
771 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
772 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
773 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
774 fi
775
776 # NBI
777 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
778 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
779 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
780 fi
781
782 # MON
783 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
784 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
785 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
786 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
787 fi
788
789 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
790 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
791 else
792 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
793 fi
794
795 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
796 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
797 else
798 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
799 fi
800
801 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
802 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
803 else
804 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
805 fi
806
807 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
808 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
809 else
810 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
811 fi
812
813
814 # POL
815 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
816 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
817 fi
818
819 # LW-UI
820 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
821 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
822 fi
823
824 echo "Finished generation of docker env files"
825 }
826
827 function generate_osmclient_script () {
828 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
829 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
830 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
831 }
832
833 #installs kubernetes packages
834 function install_kube() {
835 sudo apt-get update && sudo apt-get install -y apt-transport-https
836 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
837 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
838 sudo apt-get update
839 echo "Installing Kubernetes Packages ..."
840 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
841 }
842
843 #initializes kubernetes control plane
844 function init_kubeadm() {
845 sudo swapoff -a
846 sudo kubeadm init --config $1
847 sleep 5
848 }
849
850 function kube_config_dir() {
851 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
852 mkdir -p $HOME/.kube
853 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
854 sudo chown $(id -u):$(id -g) $HOME/.kube/config
855 }
856
857 function install_k8s_storageclass() {
858 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
859 local storageclass_timeout=300
860 local counter=0
861 echo "Waiting for storageclass"
862 while (( counter < storageclass_timeout ))
863 do
864 kubectl get storageclass openebs-hostpath &> /dev/null
865
866 if [ $? -eq 0 ] ; then
867 echo "Storageclass available"
868 break
869 else
870 counter=$((counter + 15))
871 sleep 15
872 fi
873 done
874 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
875 }
876
877 function install_k8s_metallb() {
878 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
879 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
880 echo "apiVersion: v1
881 kind: ConfigMap
882 metadata:
883 namespace: metallb-system
884 name: config
885 data:
886 config: |
887 address-pools:
888 - name: default
889 protocol: layer2
890 addresses:
891 - $METALLB_IP_RANGE" | kubectl apply -f -
892 }
893 #deploys flannel as daemonsets
894 function deploy_cni_provider() {
895 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
896 trap 'rm -rf "${CNI_DIR}"' EXIT
897 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
898 kubectl apply -f $CNI_DIR
899 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
900 }
901
902 #creates secrets from env files which will be used by containers
903 function kube_secrets(){
904 kubectl create ns $OSM_STACK_NAME
905 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
906 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
907 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
908 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
909 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
910 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
911 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
912 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
913 }
914
915 #taints K8s master node
916 function taint_master_node() {
917 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
918 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
919 sleep 5
920 }
921
922 #deploys osm pods and services
923 function deploy_osm_services() {
924 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
925 }
926
927 #deploy charmed services
928 function deploy_charmed_services() {
929 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
930 # deploy mongodb charm
931 namespace=$OSM_STACK_NAME
932 juju deploy cs:~charmed-osm/mongodb-k8s \
933 --config enable-sidecar=true \
934 --config replica-set=rs0 \
935 --config namespace=$namespace \
936 -m $namespace
937 }
938
939 function deploy_osm_pla_service() {
940 # corresponding to namespace_vol
941 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
942 # corresponding to deploy_osm_services
943 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
944 }
945
946 #Install helm and tiller
947 function install_helm() {
948 helm > /dev/null 2>&1
949 if [ $? != 0 ] ; then
950 # Helm is not installed. Install helm
951 echo "Helm is not installed, installing ..."
952 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
953 tar -zxvf helm-v2.15.2.tar.gz
954 sudo mv linux-amd64/helm /usr/local/bin/helm
955 rm -r linux-amd64
956 rm helm-v2.15.2.tar.gz
957 fi
958
959 # Checking if tiller has being configured
960 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
961 if [ $? == 1 ] ; then
962 # tiller account for kubernetes
963 kubectl --namespace kube-system create serviceaccount tiller
964 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
965 # HELM initialization
966 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
967
968 # Wait for Tiller to be up and running. If timeout expires, continue installing
969 tiller_timeout=120;
970 counter=0;
971 tiller_status=""
972 while (( counter < tiller_timeout ))
973 do
974 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
975 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
976 counter=$((counter + 5))
977 sleep 5
978 done
979 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
980 fi
981 }
982
983 function parse_yaml() {
984 TAG=$1
985 shift
986 services=$@
987 for module in $services; do
988 if [ "$module" == "pla" ]; then
989 if [ -n "$INSTALL_PLA" ]; then
990 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
991 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
992 fi
993 else
994 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
995 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
996 fi
997 done
998 }
999
1000 function update_manifest_files() {
1001 if [ -n "$NGUI" ]; then
1002 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1003 else
1004 osm_services="nbi lcm ro pol mon light-ui keystone pla"
1005 fi
1006 list_of_services=""
1007 for module in $osm_services; do
1008 module_upper="${module^^}"
1009 if [ "$module_upper" == "LIGHT-UI" ]; then
1010 module_upper="LW-UI"
1011 fi
1012 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1013 list_of_services="$list_of_services $module"
1014 fi
1015 done
1016 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1017 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1018 parse_yaml $OSM_DOCKER_TAG $list_of_services
1019 fi
1020 if [ -n "$MODULE_DOCKER_TAG" ]; then
1021 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1022 fi
1023 }
1024
1025 function namespace_vol() {
1026 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1027 for osm in $osm_services; do
1028 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1029 done
1030 }
1031
1032 function init_docker_swarm() {
1033 if [ "${DEFAULT_MTU}" != "1500" ]; then
1034 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1035 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1036 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1037 fi
1038 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1039 return 0
1040 }
1041
1042 function create_docker_network() {
1043 echo "creating network"
1044 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1045 echo "creating network DONE"
1046 }
1047
1048 function deploy_lightweight() {
1049
1050 echo "Deploying lightweight build"
1051 OSM_NBI_PORT=9999
1052 OSM_RO_PORT=9090
1053 OSM_KEYSTONE_PORT=5000
1054 OSM_UI_PORT=80
1055 OSM_MON_PORT=8662
1056 OSM_PROM_PORT=9090
1057 OSM_PROM_CADVISOR_PORT=8080
1058 OSM_PROM_HOSTPORT=9091
1059 OSM_GRAFANA_PORT=3000
1060 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1061 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1062
1063 if [ -n "$NO_HOST_PORTS" ]; then
1064 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1065 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1066 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1067 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1068 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1069 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1070 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1071 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1072 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1073 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1074 else
1075 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1076 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1077 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1078 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1079 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1080 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1081 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1082 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1083 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1084 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1085 fi
1086 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1087 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1088 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1089 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1090 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1091 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1092 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1093 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1094 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1095
1096 pushd $OSM_DOCKER_WORK_DIR
1097 if [ -n "$INSTALL_PLA" ]; then
1098 track deploy_osm_pla
1099 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1100 else
1101 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1102 fi
1103 popd
1104
1105 echo "Finished deployment of lightweight build"
1106 }
1107
1108 function deploy_elk() {
1109 echo "Pulling docker images for ELK"
1110 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1111 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1112 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1113 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1114 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1115 echo "Finished pulling elk docker images"
1116 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1117 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1118 remove_stack osm_elk
1119 echo "Deploying ELK stack"
1120 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1121 echo "Waiting for ELK stack to be up and running"
1122 time=0
1123 step=5
1124 timelength=40
1125 elk_is_up=1
1126 while [ $time -le $timelength ]; do
1127 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1128 elk_is_up=0
1129 break
1130 fi
1131 sleep $step
1132 time=$((time+step))
1133 done
1134 if [ $elk_is_up -eq 0 ]; then
1135 echo "ELK is up and running. Trying to create index pattern..."
1136 #Create index pattern
1137 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1138 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1139 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1140 #Make it the default index
1141 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1142 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1143 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1144 else
1145 echo "Cannot connect to Kibana to create index pattern."
1146 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1147 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1148 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1149 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1150 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1151 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1152 -d"{\"value\":\"filebeat-*\"}"'
1153 fi
1154 echo "Finished deployment of ELK stack"
1155 return 0
1156 }
1157
1158 function add_local_k8scluster() {
1159 /usr/bin/osm --all-projects vim-create \
1160 --name _system-osm-vim \
1161 --account_type dummy \
1162 --auth_url http://dummy \
1163 --user osm --password osm --tenant osm \
1164 --description "dummy" \
1165 --config '{management_network_name: mgmt}'
1166 /usr/bin/osm --all-projects k8scluster-add \
1167 --creds ${HOME}/.kube/config \
1168 --vim _system-osm-vim \
1169 --k8s-nets '{"net1": null}' \
1170 --version '1.15' \
1171 --description "OSM Internal Cluster" \
1172 _system-osm-k8s
1173 }
1174
1175 function install_lightweight() {
1176 track checkingroot
1177 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1178 track noroot
1179
1180 if [ -n "$KUBERNETES" ]; then
1181 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1182 1. Install and configure LXD
1183 2. Install juju
1184 3. Install docker CE
1185 4. Disable swap space
1186 5. Install and initialize Kubernetes
1187 as pre-requirements.
1188 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1189
1190 else
1191 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1192 fi
1193 track proceed
1194
1195 echo "Installing lightweight build of OSM"
1196 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1197 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1198 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1199 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1200 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1201 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1202 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1203 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1204
1205 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1206 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1207 need_packages_lw="snapd"
1208 echo -e "Checking required packages: $need_packages_lw"
1209 dpkg -l $need_packages_lw &>/dev/null \
1210 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1211 || sudo apt-get update \
1212 || FATAL "failed to run apt-get update"
1213 dpkg -l $need_packages_lw &>/dev/null \
1214 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1215 || sudo apt-get install -y $need_packages_lw \
1216 || FATAL "failed to install $need_packages_lw"
1217 install_lxd
1218 fi
1219
1220 track prereqok
1221
1222 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1223
1224 echo "Creating folders for installation"
1225 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1226 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1227 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1228
1229 #Installs Kubernetes
1230 if [ -n "$KUBERNETES" ]; then
1231 install_kube
1232 track install_k8s
1233 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1234 kube_config_dir
1235 track init_k8s
1236 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1237 # uninstall OSM MONITORING
1238 uninstall_k8s_monitoring
1239 track uninstall_k8s_monitoring
1240 fi
1241 #remove old namespace
1242 remove_k8s_namespace $OSM_STACK_NAME
1243 deploy_cni_provider
1244 taint_master_node
1245 install_k8s_storageclass
1246 track k8s_storageclass
1247 install_k8s_metallb
1248 track k8s_metallb
1249 else
1250 #install_docker_compose
1251 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1252 track docker_swarm
1253 fi
1254
1255 [ -z "$INSTALL_NOJUJU" ] && install_juju
1256 track juju_install
1257
1258 if [ -z "$OSM_VCA_HOST" ]; then
1259 if [ -z "$CONTROLLER_NAME" ]; then
1260
1261 if [ -n "$KUBERNETES" ]; then
1262 juju_createcontroller_k8s
1263 juju_addlxd_cloud
1264 else
1265 if [ -n "$LXD_CLOUD_FILE" ]; then
1266 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1267 OSM_VCA_CLOUDNAME="lxd-cloud"
1268 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1269 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1270 fi
1271 juju_createcontroller
1272 juju_createproxy
1273 fi
1274 else
1275 OSM_VCA_CLOUDNAME="lxd-cloud"
1276 if [ -n "$LXD_CLOUD_FILE" ]; then
1277 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1278 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1279 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1280 else
1281 mkdir -p ~/.osm
1282 cat << EOF > ~/.osm/lxd-cloud.yaml
1283 clouds:
1284 lxd-cloud:
1285 type: lxd
1286 auth-types: [certificate]
1287 endpoint: "https://$DEFAULT_IP:8443"
1288 config:
1289 ssl-hostname-verification: false
1290 EOF
1291 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1292 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1293 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1294 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1295 cat << EOF > ~/.osm/lxd-credentials.yaml
1296 credentials:
1297 lxd-cloud:
1298 lxd-cloud:
1299 auth-type: certificate
1300 server-cert: |
1301 $server_cert
1302 client-cert: |
1303 $client_cert
1304 client-key: |
1305 $client_key
1306 EOF
1307 lxc config trust add local: ~/.osm/client.crt
1308 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1309 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1310 fi
1311 fi
1312 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1313 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1314 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1315 fi
1316 track juju_controller
1317
1318 if [ -z "$OSM_VCA_SECRET" ]; then
1319 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1320 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1321 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1322 fi
1323 if [ -z "$OSM_VCA_PUBKEY" ]; then
1324 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1325 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1326 fi
1327 if [ -z "$OSM_VCA_CACERT" ]; then
1328 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1329 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1330 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1331 fi
1332
1333 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1334 if [ -z "$KUBERNETES" ]; then
1335 if [ -z "$OSM_VCA_APIPROXY" ]; then
1336 OSM_VCA_APIPROXY=$DEFAULT_IP
1337 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1338 fi
1339 juju_createproxy
1340 fi
1341 track juju
1342
1343 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1344 OSM_DATABASE_COMMONKEY=$(generate_secret)
1345 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1346 fi
1347
1348 # Deploy OSM services
1349 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1350 track docker_build
1351
1352 if [ -n "$KUBERNETES" ]; then
1353 generate_k8s_manifest_files
1354 else
1355 generate_docker_compose_files
1356 fi
1357 track manifest_files
1358 generate_prometheus_grafana_files
1359 generate_docker_env_files
1360 track env_files
1361
1362 if [ -n "$KUBERNETES" ]; then
1363 deploy_charmed_services
1364 kube_secrets
1365 update_manifest_files
1366 namespace_vol
1367 deploy_osm_services
1368 if [ -n "$INSTALL_PLA"]; then
1369 # optional PLA install
1370 deploy_osm_pla_service
1371 track deploy_osm_pla
1372 fi
1373 track deploy_osm_services_k8s
1374 install_helm
1375 track install_helm
1376 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1377 # install OSM MONITORING
1378 install_k8s_monitoring
1379 track install_k8s_monitoring
1380 fi
1381 else
1382 # remove old stack
1383 remove_stack $OSM_STACK_NAME
1384 create_docker_network
1385 deploy_lightweight
1386 generate_osmclient_script
1387 track docker_deploy
1388 install_prometheus_nodeexporter
1389 track nodeexporter
1390 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1391 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1392 fi
1393
1394 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1395 track osmclient
1396
1397 echo -e "Checking OSM health state..."
1398 if [ -n "$KUBERNETES" ]; then
1399 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1400 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1401 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1402 track osm_unhealthy
1403 else
1404 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1405 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1406 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1407 track osm_unhealthy
1408 fi
1409 track after_healthcheck
1410
1411 [ -n "$KUBERNETES" ] && add_local_k8scluster
1412 track add_local_k8scluster
1413
1414 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1415 track end
1416 return 0
1417 }
1418
1419 function install_to_openstack() {
1420
1421 if [ -z "$2" ]; then
1422 FATAL "OpenStack installer requires a valid external network name"
1423 fi
1424
1425 # Install Pip for Python3
1426 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1427 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1428
1429 # Create a venv to avoid conflicts with the host installation
1430 python3 -m venv $OPENSTACK_PYTHON_VENV
1431
1432 source $OPENSTACK_PYTHON_VENV/bin/activate
1433
1434 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1435 python -m pip install -U wheel
1436 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1437
1438 # Install the Openstack cloud module (ansible>=2.10)
1439 ansible-galaxy collection install openstack.cloud
1440
1441 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1442
1443 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1444
1445 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1446
1447 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1448 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1449 fi
1450
1451 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1452 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1453 fi
1454
1455 # Execute the Ansible playbook based on openrc or clouds.yaml
1456 if [ -e "$1" ]; then
1457 . $1
1458 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1459 $OSM_DEVOPS/installers/openstack/site.yml
1460 else
1461 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1462 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1463 fi
1464
1465 # Exit from venv
1466 deactivate
1467
1468 return 0
1469 }
1470
1471 function install_vimemu() {
1472 echo "\nInstalling vim-emu"
1473 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1474 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1475 # install prerequisites (OVS is a must for the emulator to work)
1476 sudo apt-get install openvswitch-switch
1477 # clone vim-emu repository (attention: branch is currently master only)
1478 echo "Cloning vim-emu repository ..."
1479 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1480 # build vim-emu docker
1481 echo "Building vim-emu Docker container..."
1482
1483 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1484 # start vim-emu container as daemon
1485 echo "Starting vim-emu Docker container 'vim-emu' ..."
1486 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1487 # in lightweight mode, the emulator needs to be attached to netOSM
1488 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1489 else
1490 # classic build mode
1491 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1492 fi
1493 echo "Waiting for 'vim-emu' container to start ..."
1494 sleep 5
1495 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1496 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1497 # print vim-emu connection info
1498 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1499 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1500 echo -e "To add the emulated VIM to OSM you should do:"
1501 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1502 }
1503
1504 function install_k8s_monitoring() {
1505 # install OSM monitoring
1506 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1507 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1508 }
1509
1510 function uninstall_k8s_monitoring() {
1511 # uninstall OSM monitoring
1512 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1513 }
1514
1515 function dump_vars(){
1516 echo "DEVELOP=$DEVELOP"
1517 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1518 echo "UNINSTALL=$UNINSTALL"
1519 echo "UPDATE=$UPDATE"
1520 echo "RECONFIGURE=$RECONFIGURE"
1521 echo "TEST_INSTALLER=$TEST_INSTALLER"
1522 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1523 echo "INSTALL_PLA=$INSTALL_PLA"
1524 echo "INSTALL_LXD=$INSTALL_LXD"
1525 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1526 echo "INSTALL_ONLY=$INSTALL_ONLY"
1527 echo "INSTALL_ELK=$INSTALL_ELK"
1528 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1529 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1530 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1531 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1532 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1533 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1534 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1535 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1536 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1537 echo "TO_REBUILD=$TO_REBUILD"
1538 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1539 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1540 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1541 echo "RELEASE=$RELEASE"
1542 echo "REPOSITORY=$REPOSITORY"
1543 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1544 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1545 echo "OSM_DEVOPS=$OSM_DEVOPS"
1546 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1547 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1548 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1549 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1550 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1551 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1552 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1553 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1554 echo "DOCKER_USER=$DOCKER_USER"
1555 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1556 echo "PULL_IMAGES=$PULL_IMAGES"
1557 echo "KUBERNETES=$KUBERNETES"
1558 echo "NGUI=$NGUI"
1559 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1560 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1561 echo "SHOWOPTS=$SHOWOPTS"
1562 echo "Install from specific refspec (-b): $COMMIT_ID"
1563 }
1564
1565 function track(){
1566 ctime=`date +%s`
1567 duration=$((ctime - SESSION_ID))
1568 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1569 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1570 event_name="bin"
1571 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1572 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1573 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1574 event_name="${event_name}_$1"
1575 url="${url}&event=${event_name}&ce_duration=${duration}"
1576 wget -q -O /dev/null $url
1577 }
1578
1579 function parse_docker_registry_url() {
1580 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1581 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1582 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1583 }
1584
1585 JUJU_AGENT_VERSION=2.8.6
1586 UNINSTALL=""
1587 DEVELOP=""
1588 UPDATE=""
1589 RECONFIGURE=""
1590 TEST_INSTALLER=""
1591 INSTALL_LXD=""
1592 SHOWOPTS=""
1593 COMMIT_ID=""
1594 ASSUME_YES=""
1595 INSTALL_FROM_SOURCE=""
1596 RELEASE="ReleaseNINE"
1597 REPOSITORY="stable"
1598 INSTALL_VIMEMU=""
1599 INSTALL_PLA=""
1600 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1601 LXD_REPOSITORY_PATH=""
1602 INSTALL_LIGHTWEIGHT="y"
1603 INSTALL_TO_OPENSTACK=""
1604 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1605 OPENSTACK_PUBLIC_NET_NAME=""
1606 OPENSTACK_ATTACH_VOLUME="false"
1607 OPENSTACK_SSH_KEY_FILE=""
1608 OPENSTACK_USERDATA_FILE=""
1609 OPENSTACK_VM_NAME="server-osm"
1610 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1611 INSTALL_ONLY=""
1612 INSTALL_ELK=""
1613 TO_REBUILD=""
1614 INSTALL_NOLXD=""
1615 INSTALL_NODOCKER=""
1616 INSTALL_NOJUJU=""
1617 KUBERNETES="y"
1618 NGUI="y"
1619 INSTALL_K8S_MONITOR=""
1620 INSTALL_NOHOSTCLIENT=""
1621 SESSION_ID=`date +%s`
1622 OSM_DEVOPS=
1623 OSM_VCA_HOST=
1624 OSM_VCA_SECRET=
1625 OSM_VCA_PUBKEY=
1626 OSM_VCA_CLOUDNAME="localhost"
1627 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1628 OSM_STACK_NAME=osm
1629 NO_HOST_PORTS=""
1630 DOCKER_NOBUILD=""
1631 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1632 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1633 WORKDIR_SUDO=sudo
1634 OSM_WORK_DIR="/etc/osm"
1635 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1636 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1637 OSM_HOST_VOL="/var/lib/osm"
1638 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1639 OSM_DOCKER_TAG=latest
1640 DOCKER_USER=opensourcemano
1641 PULL_IMAGES="y"
1642 KAFKA_TAG=2.11-1.0.2
1643 PROMETHEUS_TAG=v2.4.3
1644 GRAFANA_TAG=latest
1645 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1646 PROMETHEUS_CADVISOR_TAG=latest
1647 KEYSTONEDB_TAG=10
1648 OSM_DATABASE_COMMONKEY=
1649 ELASTIC_VERSION=6.4.2
1650 ELASTIC_CURATOR_VERSION=5.5.4
1651 POD_NETWORK_CIDR=10.244.0.0/16
1652 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1653 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1654 DOCKER_REGISTRY_URL=
1655 DOCKER_PROXY_URL=
1656 MODULE_DOCKER_TAG=
1657
1658 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1659 case "${o}" in
1660 b)
1661 COMMIT_ID=${OPTARG}
1662 PULL_IMAGES=""
1663 ;;
1664 r)
1665 REPOSITORY="${OPTARG}"
1666 REPO_ARGS+=(-r "$REPOSITORY")
1667 ;;
1668 c)
1669 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1670 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1671 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1672 usage && exit 1
1673 ;;
1674 n)
1675 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1676 [ "${OPTARG}" == "ngui" ] && continue
1677 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1678 usage && exit 1
1679 ;;
1680 k)
1681 REPOSITORY_KEY="${OPTARG}"
1682 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1683 ;;
1684 u)
1685 REPOSITORY_BASE="${OPTARG}"
1686 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1687 ;;
1688 R)
1689 RELEASE="${OPTARG}"
1690 REPO_ARGS+=(-R "$RELEASE")
1691 ;;
1692 D)
1693 OSM_DEVOPS="${OPTARG}"
1694 ;;
1695 o)
1696 INSTALL_ONLY="y"
1697 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1698 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1699 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1700 ;;
1701 O)
1702 INSTALL_TO_OPENSTACK="y"
1703 if [ -n "${OPTARG}" ]; then
1704 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1705 else
1706 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1707 usage && exit 1
1708 fi
1709 ;;
1710 f)
1711 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1712 ;;
1713 F)
1714 OPENSTACK_USERDATA_FILE="${OPTARG}"
1715 ;;
1716 N)
1717 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1718 ;;
1719 m)
1720 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1721 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1722 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1723 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1724 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1725 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1726 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1727 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1728 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1729 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1730 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1731 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1732 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1733 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1734 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1735 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1736 ;;
1737 H)
1738 OSM_VCA_HOST="${OPTARG}"
1739 ;;
1740 S)
1741 OSM_VCA_SECRET="${OPTARG}"
1742 ;;
1743 s)
1744 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1745 ;;
1746 w)
1747 # when specifying workdir, do not use sudo for access
1748 WORKDIR_SUDO=
1749 OSM_WORK_DIR="${OPTARG}"
1750 ;;
1751 t)
1752 OSM_DOCKER_TAG="${OPTARG}"
1753 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1754 ;;
1755 U)
1756 DOCKER_USER="${OPTARG}"
1757 ;;
1758 P)
1759 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1760 ;;
1761 A)
1762 OSM_VCA_APIPROXY="${OPTARG}"
1763 ;;
1764 l)
1765 LXD_CLOUD_FILE="${OPTARG}"
1766 ;;
1767 L)
1768 LXD_CRED_FILE="${OPTARG}"
1769 ;;
1770 K)
1771 CONTROLLER_NAME="${OPTARG}"
1772 ;;
1773 d)
1774 DOCKER_REGISTRY_URL="${OPTARG}"
1775 ;;
1776 p)
1777 DOCKER_PROXY_URL="${OPTARG}"
1778 ;;
1779 T)
1780 MODULE_DOCKER_TAG="${OPTARG}"
1781 ;;
1782 -)
1783 [ "${OPTARG}" == "help" ] && usage && exit 0
1784 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1785 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1786 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1787 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1788 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1789 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1790 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1791 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1792 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1793 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1794 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1795 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1796 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1797 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1798 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1799 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1800 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1801 [ "${OPTARG}" == "pullimages" ] && continue
1802 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1803 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1804 [ "${OPTARG}" == "bundle" ] && continue
1805 [ "${OPTARG}" == "k8s" ] && continue
1806 [ "${OPTARG}" == "lxd" ] && continue
1807 [ "${OPTARG}" == "lxd-cred" ] && continue
1808 [ "${OPTARG}" == "microstack" ] && continue
1809 [ "${OPTARG}" == "vca" ] && continue
1810 [ "${OPTARG}" == "ha" ] && continue
1811 [ "${OPTARG}" == "tag" ] && continue
1812 [ "${OPTARG}" == "registry" ] && continue
1813 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1814 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1815 echo -e "Invalid option: '--$OPTARG'\n" >&2
1816 usage && exit 1
1817 ;;
1818 :)
1819 echo "Option -$OPTARG requires an argument" >&2
1820 usage && exit 1
1821 ;;
1822 \?)
1823 echo -e "Invalid option: '-$OPTARG'\n" >&2
1824 usage && exit 1
1825 ;;
1826 h)
1827 usage && exit 0
1828 ;;
1829 y)
1830 ASSUME_YES="y"
1831 ;;
1832 *)
1833 usage && exit 1
1834 ;;
1835 esac
1836 done
1837
1838 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1839 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1840 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1841
1842 if [ -n "$SHOWOPTS" ]; then
1843 dump_vars
1844 exit 0
1845 fi
1846
1847 if [ -n "$CHARMED" ]; then
1848 if [ -n "$UNINSTALL" ]; then
1849 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1850 else
1851 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1852 fi
1853
1854 exit 0
1855 fi
1856
1857 # if develop, we force master
1858 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1859
1860 need_packages="git wget curl tar"
1861
1862 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1863
1864 echo -e "Checking required packages: $need_packages"
1865 dpkg -l $need_packages &>/dev/null \
1866 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1867 || sudo apt-get update \
1868 || FATAL "failed to run apt-get update"
1869 dpkg -l $need_packages &>/dev/null \
1870 || ! echo -e "Installing $need_packages requires root privileges." \
1871 || sudo apt-get install -y $need_packages \
1872 || FATAL "failed to install $need_packages"
1873 sudo snap install jq
1874 if [ -z "$OSM_DEVOPS" ]; then
1875 if [ -n "$TEST_INSTALLER" ]; then
1876 echo -e "\nUsing local devops repo for OSM installation"
1877 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1878 else
1879 echo -e "\nCreating temporary dir for OSM installation"
1880 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1881 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1882
1883 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1884
1885 if [ -z "$COMMIT_ID" ]; then
1886 echo -e "\nGuessing the current stable release"
1887 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1888 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1889
1890 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1891 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1892 else
1893 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1894 fi
1895 git -C $OSM_DEVOPS checkout $COMMIT_ID
1896 fi
1897 fi
1898
1899 . $OSM_DEVOPS/common/all_funcs
1900
1901 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1902 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1903 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1904 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1905 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1906 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1907 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1908 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1909
1910 #Installation starts here
1911 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1912 track start
1913
1914 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1915 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1916 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1917 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1918 fi
1919
1920 echo -e "Checking required packages: lxd"
1921 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1922 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1923
1924 # use local devops for containers
1925 export OSM_USE_LOCAL_DEVOPS=true
1926
1927 #Install osmclient
1928
1929 #Install vim-emu (optional)
1930 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1931
1932 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1933 track end
1934 echo -e "\nDONE"
1935
1936