Bug 1841: LTS Support update helm version in community installer
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
33 echo -e " -H <VCA host> use specific juju host controller IP"
34 echo -e " -S <VCA secret> use VCA/juju secret key"
35 echo -e " -P <VCA pubkey> use VCA/juju public key file"
36 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
39 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
40 echo -e " --pla: install the PLA module for placement support"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 sudo rm /usr/local/bin/helm
187 rm -rf $HOME/.helm
188 fi
189 }
190
191 function remove_crontab_job() {
192 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
193 }
194
195 #Uninstall osmclient
196 function uninstall_osmclient() {
197 sudo apt-get remove --purge -y python-osmclient
198 sudo apt-get remove --purge -y python3-osmclient
199 }
200
201 #Uninstall lightweight OSM: remove dockers
202 function uninstall_lightweight() {
203 if [ -n "$INSTALL_ONLY" ]; then
204 if [ -n "$INSTALL_ELK" ]; then
205 echo -e "\nUninstalling OSM ELK stack"
206 remove_stack osm_elk
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
208 fi
209 else
210 echo -e "\nUninstalling OSM"
211 if [ -n "$KUBERNETES" ]; then
212 if [ -n "$INSTALL_K8S_MONITOR" ]; then
213 # uninstall OSM MONITORING
214 uninstall_k8s_monitoring
215 fi
216 remove_k8s_namespace $OSM_STACK_NAME
217 else
218 remove_stack $OSM_STACK_NAME
219 remove_stack osm_elk
220 fi
221 echo "Now osm docker images and volumes will be deleted"
222 # TODO: clean-up of images should take into account if other tags were used for specific modules
223 newgrp docker << EONG
224 for module in ro lcm keystone nbi mon pol pla osmclient; do
225 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
226 done
227 EONG
228
229 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230
231 if [ -n "$KUBERNETES" ]; then
232 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
233 remove_volumes $OSM_NAMESPACE_VOL
234 else
235 remove_volumes $OSM_STACK_NAME
236 remove_network $OSM_STACK_NAME
237 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
238 fi
239 echo "Removing $OSM_DOCKER_WORK_DIR"
240 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
241 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
242 fi
243 remove_crontab_job
244
245 # Cleanup Openstack installer venv
246 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
247 rm -r $OPENSTACK_PYTHON_VENV
248 fi
249
250 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
251 echo "Some docker images will be kept in case they are used by other docker stacks"
252 echo "To remove them, just run 'docker image prune' in a terminal"
253 return 0
254 }
255
256 #Safe unattended install of iptables-persistent
257 function check_install_iptables_persistent(){
258 echo -e "\nChecking required packages: iptables-persistent"
259 if ! dpkg -l iptables-persistent &>/dev/null; then
260 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
261 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
262 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
263 sudo apt-get -yq install iptables-persistent
264 fi
265 }
266
267 #Configure NAT rules, based on the current IP addresses of containers
268 function nat(){
269 check_install_iptables_persistent
270
271 echo -e "\nConfiguring NAT rules"
272 echo -e " Required root privileges"
273 sudo $OSM_DEVOPS/installers/nat_osm
274 }
275
276 function FATAL(){
277 echo "FATAL error: Cannot install OSM due to \"$1\""
278 exit 1
279 }
280
281 function update_juju_images(){
282 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
283 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
284 }
285
286 function install_lxd() {
287 # Apply sysctl production values for optimal performance
288 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
289 sudo sysctl --system
290
291 # Install LXD snap
292 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
293 sudo snap install lxd --channel $LXD_VERSION/stable
294
295 # Configure LXD
296 sudo usermod -a -G lxd `whoami`
297 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
298 sg lxd -c "lxd waitready"
299 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
300 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
301 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
302 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
303 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
304 #sudo systemctl stop lxd-bridge
305 #sudo systemctl --system daemon-reload
306 #sudo systemctl enable lxd-bridge
307 #sudo systemctl start lxd-bridge
308 }
309
310 function ask_user(){
311 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
312 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
313 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
314 read -e -p "$1" USER_CONFIRMATION
315 while true ; do
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
317 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
318 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
319 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
320 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
321 done
322 }
323
324 function install_osmclient(){
325 CLIENT_RELEASE=${RELEASE#"-R "}
326 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
327 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
328 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
329 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
330 curl $key_location | sudo apt-key add -
331 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
332 sudo apt-get update
333 sudo apt-get install -y python3-pip
334 sudo -H LC_ALL=C python3 -m pip install -U pip
335 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
336 sudo apt-get install -y python3-osm-im python3-osmclient
337 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
338 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
339 fi
340 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
341 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
342 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
343 fi
344 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
345 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
346 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
347 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
348 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
349 echo -e "\nOSM client installed"
350 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
351 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
352 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
353 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
354 else
355 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
356 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
357 echo " export OSM_HOSTNAME=<OSM_host>"
358 fi
359 return 0
360 }
361
362 function install_prometheus_nodeexporter(){
363 if (systemctl -q is-active node_exporter)
364 then
365 echo "Node Exporter is already running."
366 else
367 echo "Node Exporter is not active, installing..."
368 if getent passwd node_exporter > /dev/null 2>&1; then
369 echo "node_exporter user exists"
370 else
371 echo "Creating user node_exporter"
372 sudo useradd --no-create-home --shell /bin/false node_exporter
373 fi
374 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
375 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
376 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
377 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
378 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
379 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
380 sudo systemctl daemon-reload
381 sudo systemctl restart node_exporter
382 sudo systemctl enable node_exporter
383 echo "Node Exporter has been activated in this host."
384 fi
385 return 0
386 }
387
388 function uninstall_prometheus_nodeexporter(){
389 sudo systemctl stop node_exporter
390 sudo systemctl disable node_exporter
391 sudo rm /etc/systemd/system/node_exporter.service
392 sudo systemctl daemon-reload
393 sudo userdel node_exporter
394 sudo rm /usr/local/bin/node_exporter
395 return 0
396 }
397
398 function install_docker_ce() {
399 # installs and configures Docker CE
400 echo "Installing Docker CE ..."
401 sudo apt-get -qq update
402 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
403 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
404 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
405 sudo apt-get -qq update
406 sudo apt-get install -y docker-ce
407 echo "Adding user to group 'docker'"
408 sudo groupadd -f docker
409 sudo usermod -aG docker $USER
410 sleep 2
411 sudo service docker restart
412 echo "... restarted Docker service"
413 if [ -n "${DOCKER_PROXY_URL}" ]; then
414 echo "Configuring docker proxy ..."
415 if [ -f /etc/docker/daemon.json ]; then
416 if grep -q registry-mirrors /etc/docker/daemon.json; then
417 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
418 else
419 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
420 fi
421 else
422 sudo bash -c "cat << EOF > /etc/docker/daemon.json
423 {
424 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
425 }
426 EOF"
427 fi
428 sudo systemctl daemon-reload
429 sudo service docker restart
430 echo "... restarted Docker service again"
431 fi
432 sg docker -c "docker version" || FATAL "Docker installation failed"
433 echo "... Docker CE installation done"
434 return 0
435 }
436
437 function install_docker_compose() {
438 # installs and configures docker-compose
439 echo "Installing Docker Compose ..."
440 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
441 sudo chmod +x /usr/local/bin/docker-compose
442 echo "... Docker Compose installation done"
443 }
444
445 function install_juju() {
446 echo "Installing juju"
447 sudo snap install juju --classic --channel=$JUJU_VERSION/stable
448 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
449 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
450 echo "Finished installation of juju"
451 return 0
452 }
453
454 function juju_createcontroller() {
455 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
456 # Not found created, create the controller
457 sudo usermod -a -G lxd ${USER}
458 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
459 fi
460 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
461 juju controller-config features=[k8s-operators]
462 }
463
464 function juju_addk8s() {
465 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath \
466 || FATAL "Failed to add K8s endpoint and credential for controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
467 }
468
469 function juju_createcontroller_k8s(){
470 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client \
471 || FATAL "Failed to add K8s endpoint and credential for client in cloud $OSM_VCA_K8S_CLOUDNAME"
472 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
473 --config controller-service-type=loadbalancer \
474 --agent-version=$JUJU_AGENT_VERSION \
475 || FATAL "Failed to bootstrap controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
476 }
477
478 function juju_addlxd_cloud(){
479 mkdir -p /tmp/.osm
480 OSM_VCA_CLOUDNAME="lxd-cloud"
481 LXDENDPOINT=$DEFAULT_IP
482 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
483 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
484
485 cat << EOF > $LXD_CLOUD
486 clouds:
487 $OSM_VCA_CLOUDNAME:
488 type: lxd
489 auth-types: [certificate]
490 endpoint: "https://$LXDENDPOINT:8443"
491 config:
492 ssl-hostname-verification: false
493 EOF
494 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
495 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
496 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
497 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
498
499 cat << EOF > $LXD_CREDENTIALS
500 credentials:
501 $OSM_VCA_CLOUDNAME:
502 lxd-cloud:
503 auth-type: certificate
504 server-cert: |
505 $server_cert
506 client-cert: |
507 $client_cert
508 client-key: |
509 $client_key
510 EOF
511 lxc config trust add local: /tmp/.osm/client.crt
512 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
513 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
514 sg lxd -c "lxd waitready"
515 juju controller-config features=[k8s-operators]
516 }
517
518 function juju_createproxy() {
519 check_install_iptables_persistent
520
521 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
522 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
523 sudo netfilter-persistent save
524 fi
525 }
526
527 function docker_login() {
528 echo "Docker login"
529 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
530 }
531
532 function generate_docker_images() {
533 echo "Pulling and generating docker images"
534 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
535
536 echo "Pulling docker images"
537
538 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
539 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
540 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
541 fi
542
543 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
544 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
545 fi
546
547 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
548 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
549 fi
550
551 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
552 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
553 fi
554
555 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
556 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
557 fi
558
559 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
560 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
561 fi
562
563 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
564 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
565 fi
566
567 if [ -n "$PULL_IMAGES" ]; then
568 echo "Pulling OSM docker images"
569 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
570 module_lower=${module,,}
571 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
572 continue
573 fi
574 module_tag="${OSM_DOCKER_TAG}"
575 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
576 module_tag="${MODULE_DOCKER_TAG}"
577 fi
578 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
579 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
580 done
581 else
582 _build_from=$COMMIT_ID
583 [ -z "$_build_from" ] && _build_from="latest"
584 echo "OSM Docker images generated from $_build_from"
585
586 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
587 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
588 module_lower=${module,,}
589 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
590 continue
591 fi
592 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
593 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
594 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
595 fi
596 done
597 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
598 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
599 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
600 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
601 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
602 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
603 fi
604 echo "Finished generation of docker images"
605 fi
606
607 echo "Finished pulling and generating docker images"
608 }
609
610 function cmp_overwrite() {
611 file1="$1"
612 file2="$2"
613 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
614 if [ -f "${file2}" ]; then
615 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
616 else
617 cp -b ${file1} ${file2}
618 fi
619 fi
620 }
621
622 function generate_docker_compose_files() {
623 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
624 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
625 if [ -n "$INSTALL_PLA" ]; then
626 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
627 fi
628 }
629
630 function generate_k8s_manifest_files() {
631 #Kubernetes resources
632 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
633 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
634 }
635
636 function generate_prometheus_grafana_files() {
637 [ -n "$KUBERNETES" ] && return
638 # Prometheus files
639 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
641
642 # Grafana files
643 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
644 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
645 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
646 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
647 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
648
649 # Prometheus Exporters files
650 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
651 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
652 }
653
654 function generate_docker_env_files() {
655 echo "Doing a backup of existing env files"
656 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
657 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
658 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
659 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
660 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
661 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
662 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
663 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
664
665 echo "Generating docker env files"
666 # LCM
667 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
668 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
669 fi
670
671 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
672 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
673 else
674 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
675 fi
676
677 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
678 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
679 else
680 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
681 fi
682
683 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
684 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
685 else
686 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
687 fi
688
689 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
690 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
691 else
692 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
693 fi
694
695 if [ -n "$OSM_VCA_APIPROXY" ]; then
696 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
697 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
698 else
699 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
700 fi
701 fi
702
703 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
704 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
705 fi
706
707 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
708 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
709 fi
710
711 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
712 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
713 else
714 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
715 fi
716
717 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
718 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
719 else
720 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
721 fi
722
723 # RO
724 MYSQL_ROOT_PASSWORD=$(generate_secret)
725 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
726 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
727 fi
728 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
729 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
730 fi
731 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
732 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
733 fi
734
735 # Keystone
736 KEYSTONE_DB_PASSWORD=$(generate_secret)
737 SERVICE_PASSWORD=$(generate_secret)
738 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
739 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
740 fi
741 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
742 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
743 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
744 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
745 fi
746
747 # NBI
748 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
749 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
750 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
751 fi
752
753 # MON
754 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
755 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
756 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
757 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
758 fi
759
760 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
761 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
762 else
763 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
764 fi
765
766 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
767 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
768 else
769 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
770 fi
771
772 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
773 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
774 else
775 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
776 fi
777
778 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
779 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
780 else
781 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
782 fi
783
784
785 # POL
786 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
787 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
788 fi
789
790 echo "Finished generation of docker env files"
791 }
792
793 function generate_osmclient_script () {
794 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
795 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
796 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
797 }
798
799 #installs kubernetes packages
800 function install_kube() {
801 sudo apt-get update && sudo apt-get install -y apt-transport-https
802 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
803 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
804 sudo apt-get update
805 echo "Installing Kubernetes Packages ..."
806 K8S_VERSION=1.23.3-00
807 sudo apt-get install -y kubelet=${K8S_VERSION} kubeadm=${K8S_VERSION} kubectl=${K8S_VERSION}
808 cat << EOF | sudo tee -a /etc/default/kubelet
809 KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs"
810 EOF
811 sudo apt-mark hold kubelet kubeadm kubectl
812 }
813
814 #initializes kubernetes control plane
815 function init_kubeadm() {
816 sudo swapoff -a
817 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
818 sudo kubeadm init --config $1
819 sleep 5
820 }
821
822 function kube_config_dir() {
823 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
824 mkdir -p $HOME/.kube
825 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
826 sudo chown $(id -u):$(id -g) $HOME/.kube/config
827 }
828
829 function install_k8s_storageclass() {
830 echo "Installing open-iscsi"
831 sudo apt-get update
832 sudo apt-get install open-iscsi
833 sudo systemctl enable --now iscsid
834 echo "Installing OpenEBS"
835 helm repo add openebs https://openebs.github.io/charts
836 helm repo update
837 helm install --create-namespace --namespace openebs openebs openebs/openebs --version 3.1.0
838 helm ls -n openebs
839 local storageclass_timeout=400
840 local counter=0
841 local storageclass_ready=""
842 echo "Waiting for storageclass"
843 while (( counter < storageclass_timeout ))
844 do
845 kubectl get storageclass openebs-hostpath &> /dev/null
846
847 if [ $? -eq 0 ] ; then
848 echo "Storageclass available"
849 storageclass_ready="y"
850 break
851 else
852 counter=$((counter + 15))
853 sleep 15
854 fi
855 done
856 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
857 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
858 }
859
860 function install_k8s_metallb() {
861 METALLB_IP_RANGE=$DEFAULT_IP/32
862 kubectl apply -f ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml \
863 || FATAL "Cannot install MetalLB"
864 echo "apiVersion: v1
865 kind: ConfigMap
866 metadata:
867 namespace: metallb-system
868 name: config
869 data:
870 config: |
871 address-pools:
872 - name: default
873 protocol: layer2
874 addresses:
875 - $METALLB_IP_RANGE" | kubectl apply -f - \
876 || FATAL "Cannot apply MetalLB ConfigMap"
877 }
878
879 #installs metallb from helm
880 function install_helm_metallb() {
881 METALLB_IP_RANGE=$DEFAULT_IP/32
882 echo "configInline:
883 address-pools:
884 - name: default
885 protocol: layer2
886 addresses:
887 - $METALLB_IP_RANGE" | sudo tee -a $OSM_DOCKER_WORK_DIR/metallb-config.yaml
888 helm repo add metallb https://metallb.github.io/metallb
889 helm repo update
890 helm install --create-namespace --namespace metallb-system metallb metallb/metallb -f $OSM_DOCKER_WORK_DIR/metallb-config.yaml
891 }
892
893 #checks openebs and metallb readiness
894 function check_for_readiness() {
895 # Default input values
896 sampling_period=2 # seconds
897 time_for_readiness=20 # seconds ready
898 time_for_failure=200 # seconds broken
899 OPENEBS_NAMESPACE=openebs
900 METALLB_NAMESPACE=metallb-system
901 # STACK_NAME=osm # By default, "osm"
902
903 # Equivalent number of samples
904 oks_threshold=$((time_for_readiness/${sampling_period})) # No. ok samples to declare the system ready
905 failures_threshold=$((time_for_failure/${sampling_period})) # No. nok samples to declare the system broken
906 failures_in_a_row=0
907 oks_in_a_row=0
908
909 ####################################################################################
910 # Loop to check system readiness
911 ####################################################################################
912 while [[ (${failures_in_a_row} -lt ${failures_threshold}) && (${oks_in_a_row} -lt ${oks_threshold}) ]]
913 do
914 # State of OpenEBS
915 OPENEBS_STATE=$(kubectl get pod -n ${OPENEBS_NAMESPACE} --no-headers 2>&1)
916 OPENEBS_READY=$(echo "${OPENEBS_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
917 OPENEBS_NOT_READY=$(echo "${OPENEBS_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
918 COUNT_OPENEBS_READY=$(echo "${OPENEBS_READY}"| grep -v -e '^$' | wc -l)
919 COUNT_OPENEBS_NOT_READY=$(echo "${OPENEBS_NOT_READY}" | grep -v -e '^$' | wc -l)
920
921 # State of MetalLB
922 METALLB_STATE=$(kubectl get pod -n ${METALLB_NAMESPACE} --no-headers 2>&1)
923 METALLB_READY=$(echo "${METALLB_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
924 METALLB_NOT_READY=$(echo "${METALLB_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
925 COUNT_METALLB_READY=$(echo "${METALLB_READY}" | grep -v -e '^$' | wc -l)
926 COUNT_METALLB_NOT_READY=$(echo "${METALLB_NOT_READY}" | grep -v -e '^$' | wc -l)
927
928 # OK sample
929 if [[ $((${COUNT_OPENEBS_NOT_READY}+${COUNT_METALLB_NOT_READY})) -eq 0 ]]
930 then
931 ((++oks_in_a_row))
932 failures_in_a_row=0
933 echo -ne ===\> Successful checks: "${oks_in_a_row}"/${oks_threshold}\\r
934 # NOK sample
935 else
936 ((++failures_in_a_row))
937 oks_in_a_row=0
938 echo
939 echo Bootstraping... "${failures_in_a_row}" checks of ${failures_threshold}
940
941 # Reports failed pods in OpenEBS
942 if [[ "${COUNT_OPENEBS_NOT_READY}" -ne 0 ]]
943 then
944 echo "OpenEBS: Waiting for ${COUNT_OPENEBS_NOT_READY} of $((${COUNT_OPENEBS_NOT_READY}+${COUNT_OPENEBS_READY})) pods to be ready:"
945 echo "${OPENEBS_NOT_READY}"
946 echo
947 fi
948
949 # Reports failed statefulsets
950 if [[ "${COUNT_METALLB_NOT_READY}" -ne 0 ]]
951 then
952 echo "MetalLB: Waiting for ${COUNT_METALLB_NOT_READY} of $((${COUNT_METALLB_NOT_READY}+${COUNT_METALLB_READY})) pods to be ready:"
953 echo "${METALLB_NOT_READY}"
954 echo
955 fi
956 fi
957
958 #------------ NEXT SAMPLE
959 sleep ${sampling_period}
960 done
961
962 ####################################################################################
963 # OUTCOME
964 ####################################################################################
965 if [[ (${failures_in_a_row} -ge ${failures_threshold}) ]]
966 then
967 echo
968 FATAL "K8S CLUSTER IS BROKEN"
969 else
970 echo
971 echo "K8S CLUSTER IS READY"
972 fi
973 }
974
975 #deploys flannel as daemonsets
976 function deploy_cni_provider() {
977 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
978 trap 'rm -rf "${CNI_DIR}"' EXIT
979 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
980 kubectl apply -f $CNI_DIR
981 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
982 }
983
984 #creates secrets from env files which will be used by containers
985 function kube_secrets(){
986 kubectl create ns $OSM_STACK_NAME
987 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
988 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
989 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
990 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
991 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
992 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
993 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
994 }
995
996 #taints K8s master node
997 function taint_master_node() {
998 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
999 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1000 sleep 5
1001 }
1002
1003 #deploys osm pods and services
1004 function deploy_osm_services() {
1005 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1006 }
1007
1008 #deploy charmed services
1009 function deploy_charmed_services() {
1010 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
1011 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
1012 }
1013
1014 function deploy_osm_pla_service() {
1015 # corresponding to namespace_vol
1016 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
1017 # corresponding to deploy_osm_services
1018 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
1019 }
1020
1021 #Install Helm v3
1022 #Helm releases can be found here: https://github.com/helm/helm/releases
1023 function install_helm() {
1024 HELM_VERSION="v3.7.2"
1025 if ! [[ "$(helm version --short 2>/dev/null)" =~ ^v3.* ]]; then
1026 # Helm is not installed. Install helm
1027 echo "Helm3 is not installed, installing ..."
1028 curl https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz --output helm-${HELM_VERSION}.tar.gz
1029 tar -zxvf helm-${HELM_VERSION}.tar.gz
1030 sudo mv linux-amd64/helm /usr/local/bin/helm
1031 rm -r linux-amd64
1032 rm helm-${HELM_VERSION}.tar.gz
1033 else
1034 echo "Helm3 is already installed. Skipping installation..."
1035 fi
1036 helm repo add stable https://charts.helm.sh/stable
1037 helm repo update
1038 }
1039
1040 function parse_yaml() {
1041 TAG=$1
1042 shift
1043 services=$@
1044 for module in $services; do
1045 if [ "$module" == "pla" ]; then
1046 if [ -n "$INSTALL_PLA" ]; then
1047 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1048 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1049 fi
1050 else
1051 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1052 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1053 fi
1054 done
1055 }
1056
1057 function update_manifest_files() {
1058 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1059 list_of_services=""
1060 for module in $osm_services; do
1061 module_upper="${module^^}"
1062 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1063 list_of_services="$list_of_services $module"
1064 fi
1065 done
1066 if [ ! "$OSM_DOCKER_TAG" == "10" ]; then
1067 parse_yaml $OSM_DOCKER_TAG $list_of_services
1068 fi
1069 if [ -n "$MODULE_DOCKER_TAG" ]; then
1070 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1071 fi
1072 }
1073
1074 function namespace_vol() {
1075 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1076 for osm in $osm_services; do
1077 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1078 done
1079 }
1080
1081 function init_docker_swarm() {
1082 if [ "${DEFAULT_MTU}" != "1500" ]; then
1083 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1084 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1085 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1086 fi
1087 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1088 return 0
1089 }
1090
1091 function create_docker_network() {
1092 echo "creating network"
1093 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1094 echo "creating network DONE"
1095 }
1096
1097 function deploy_lightweight() {
1098
1099 echo "Deploying lightweight build"
1100 OSM_NBI_PORT=9999
1101 OSM_RO_PORT=9090
1102 OSM_KEYSTONE_PORT=5000
1103 OSM_UI_PORT=80
1104 OSM_MON_PORT=8662
1105 OSM_PROM_PORT=9090
1106 OSM_PROM_CADVISOR_PORT=8080
1107 OSM_PROM_HOSTPORT=9091
1108 OSM_GRAFANA_PORT=3000
1109 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1110 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1111
1112 if [ -n "$NO_HOST_PORTS" ]; then
1113 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1114 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1115 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1116 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1117 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1118 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1119 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1120 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1121 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1122 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1123 else
1124 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1125 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1126 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1127 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1128 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1129 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1130 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1131 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1132 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1133 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1134 fi
1135 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1136 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1137 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1138 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1139 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1140 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1141 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1142 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1143 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1144
1145 pushd $OSM_DOCKER_WORK_DIR
1146 if [ -n "$INSTALL_PLA" ]; then
1147 track deploy_osm_pla
1148 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1149 else
1150 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1151 fi
1152 popd
1153
1154 echo "Finished deployment of lightweight build"
1155 }
1156
1157 function deploy_elk() {
1158 echo "Pulling docker images for ELK"
1159 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1160 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1161 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1162 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1163 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1164 echo "Finished pulling elk docker images"
1165 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1166 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1167 remove_stack osm_elk
1168 echo "Deploying ELK stack"
1169 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1170 echo "Waiting for ELK stack to be up and running"
1171 time=0
1172 step=5
1173 timelength=40
1174 elk_is_up=1
1175 while [ $time -le $timelength ]; do
1176 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1177 elk_is_up=0
1178 break
1179 fi
1180 sleep $step
1181 time=$((time+step))
1182 done
1183 if [ $elk_is_up -eq 0 ]; then
1184 echo "ELK is up and running. Trying to create index pattern..."
1185 #Create index pattern
1186 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1187 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1188 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1189 #Make it the default index
1190 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1191 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1192 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1193 else
1194 echo "Cannot connect to Kibana to create index pattern."
1195 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1196 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1197 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1198 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1199 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1200 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1201 -d"{\"value\":\"filebeat-*\"}"'
1202 fi
1203 echo "Finished deployment of ELK stack"
1204 return 0
1205 }
1206
1207 function add_local_k8scluster() {
1208 /usr/bin/osm --all-projects vim-create \
1209 --name _system-osm-vim \
1210 --account_type dummy \
1211 --auth_url http://dummy \
1212 --user osm --password osm --tenant osm \
1213 --description "dummy" \
1214 --config '{management_network_name: mgmt}'
1215 /usr/bin/osm --all-projects k8scluster-add \
1216 --creds ${HOME}/.kube/config \
1217 --vim _system-osm-vim \
1218 --k8s-nets '{"net1": null}' \
1219 --version '1.15' \
1220 --description "OSM Internal Cluster" \
1221 _system-osm-k8s
1222 }
1223
1224 function install_lightweight() {
1225 track checkingroot
1226 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1227 track noroot
1228
1229 if [ -n "$KUBERNETES" ]; then
1230 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1231 1. Install and configure LXD
1232 2. Install juju
1233 3. Install docker CE
1234 4. Disable swap space
1235 5. Install and initialize Kubernetes
1236 as pre-requirements.
1237 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1238
1239 else
1240 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1241 fi
1242 track proceed
1243
1244 echo "Installing lightweight build of OSM"
1245 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1246 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1247 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1248 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1249 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1250 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1251 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1252 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1253
1254 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1255 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1256 need_packages_lw="snapd"
1257 echo -e "Checking required packages: $need_packages_lw"
1258 dpkg -l $need_packages_lw &>/dev/null \
1259 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1260 || sudo apt-get update \
1261 || FATAL "failed to run apt-get update"
1262 dpkg -l $need_packages_lw &>/dev/null \
1263 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1264 || sudo apt-get install -y $need_packages_lw \
1265 || FATAL "failed to install $need_packages_lw"
1266 install_lxd
1267 fi
1268
1269 track prereqok
1270
1271 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1272
1273 echo "Creating folders for installation"
1274 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1275 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1276 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1277
1278 #Installs Kubernetes
1279 if [ -n "$KUBERNETES" ]; then
1280 install_kube
1281 track install_k8s
1282 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1283 kube_config_dir
1284 track init_k8s
1285 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1286 # uninstall OSM MONITORING
1287 uninstall_k8s_monitoring
1288 track uninstall_k8s_monitoring
1289 fi
1290 #remove old namespace
1291 remove_k8s_namespace $OSM_STACK_NAME
1292 deploy_cni_provider
1293 taint_master_node
1294 install_helm
1295 track install_helm
1296 install_k8s_storageclass
1297 track k8s_storageclass
1298 install_helm_metallb
1299 track k8s_metallb
1300 check_for_readiness
1301 else
1302 #install_docker_compose
1303 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1304 track docker_swarm
1305 fi
1306
1307 [ -z "$INSTALL_NOJUJU" ] && install_juju
1308 track juju_install
1309
1310 if [ -z "$OSM_VCA_HOST" ]; then
1311 if [ -z "$CONTROLLER_NAME" ]; then
1312
1313 if [ -n "$KUBERNETES" ]; then
1314 juju_createcontroller_k8s
1315 juju_addlxd_cloud
1316 else
1317 if [ -n "$LXD_CLOUD_FILE" ]; then
1318 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1319 OSM_VCA_CLOUDNAME="lxd-cloud"
1320 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1321 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1322 fi
1323 juju_createcontroller
1324 juju_createproxy
1325 fi
1326 else
1327 OSM_VCA_CLOUDNAME="lxd-cloud"
1328 if [ -n "$LXD_CLOUD_FILE" ]; then
1329 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1330 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1331 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1332 else
1333 mkdir -p ~/.osm
1334 cat << EOF > ~/.osm/lxd-cloud.yaml
1335 clouds:
1336 lxd-cloud:
1337 type: lxd
1338 auth-types: [certificate]
1339 endpoint: "https://$DEFAULT_IP:8443"
1340 config:
1341 ssl-hostname-verification: false
1342 EOF
1343 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1344 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1345 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1346 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1347 cat << EOF > ~/.osm/lxd-credentials.yaml
1348 credentials:
1349 lxd-cloud:
1350 lxd-cloud:
1351 auth-type: certificate
1352 server-cert: |
1353 $server_cert
1354 client-cert: |
1355 $client_cert
1356 client-key: |
1357 $client_key
1358 EOF
1359 lxc config trust add local: ~/.osm/client.crt
1360 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1361 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1362 fi
1363 fi
1364 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1365 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1366 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1367 fi
1368 track juju_controller
1369
1370 if [ -z "$OSM_VCA_SECRET" ]; then
1371 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1372 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1373 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1374 fi
1375 if [ -z "$OSM_VCA_PUBKEY" ]; then
1376 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1377 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1378 fi
1379 if [ -z "$OSM_VCA_CACERT" ]; then
1380 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1381 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1382 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1383 fi
1384
1385 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1386 if [ -z "$KUBERNETES" ]; then
1387 if [ -z "$OSM_VCA_APIPROXY" ]; then
1388 OSM_VCA_APIPROXY=$DEFAULT_IP
1389 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1390 fi
1391 juju_createproxy
1392 fi
1393 track juju
1394
1395 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1396 OSM_DATABASE_COMMONKEY=$(generate_secret)
1397 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1398 fi
1399
1400 # Deploy OSM services
1401 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1402 track docker_build
1403
1404 if [ -n "$KUBERNETES" ]; then
1405 generate_k8s_manifest_files
1406 else
1407 generate_docker_compose_files
1408 fi
1409 track manifest_files
1410 generate_prometheus_grafana_files
1411 generate_docker_env_files
1412 track env_files
1413
1414 if [ -n "$KUBERNETES" ]; then
1415 deploy_charmed_services
1416 kube_secrets
1417 update_manifest_files
1418 namespace_vol
1419 deploy_osm_services
1420 if [ -n "$INSTALL_PLA"]; then
1421 # optional PLA install
1422 deploy_osm_pla_service
1423 track deploy_osm_pla
1424 fi
1425 track deploy_osm_services_k8s
1426 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1427 # install OSM MONITORING
1428 install_k8s_monitoring
1429 track install_k8s_monitoring
1430 fi
1431 else
1432 # remove old stack
1433 remove_stack $OSM_STACK_NAME
1434 create_docker_network
1435 deploy_lightweight
1436 generate_osmclient_script
1437 track docker_deploy
1438 install_prometheus_nodeexporter
1439 track nodeexporter
1440 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1441 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1442 fi
1443
1444 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1445 track osmclient
1446
1447 echo -e "Checking OSM health state..."
1448 if [ -n "$KUBERNETES" ]; then
1449 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1450 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1451 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1452 track osm_unhealthy
1453 else
1454 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1455 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1456 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1457 track osm_unhealthy
1458 fi
1459 track after_healthcheck
1460
1461 [ -n "$KUBERNETES" ] && add_local_k8scluster
1462 track add_local_k8scluster
1463
1464 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1465 track end
1466 return 0
1467 }
1468
1469 function install_to_openstack() {
1470
1471 if [ -z "$2" ]; then
1472 FATAL "OpenStack installer requires a valid external network name"
1473 fi
1474
1475 # Install Pip for Python3
1476 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1477 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1478
1479 # Create a venv to avoid conflicts with the host installation
1480 python3 -m venv $OPENSTACK_PYTHON_VENV
1481
1482 source $OPENSTACK_PYTHON_VENV/bin/activate
1483
1484 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1485 python -m pip install -U wheel
1486 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1487
1488 # Install the Openstack cloud module (ansible>=2.10)
1489 ansible-galaxy collection install openstack.cloud
1490
1491 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1492
1493 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1494
1495 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1496
1497 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1498 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1499 fi
1500
1501 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1502 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1503 fi
1504
1505 # Execute the Ansible playbook based on openrc or clouds.yaml
1506 if [ -e "$1" ]; then
1507 . $1
1508 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1509 $OSM_DEVOPS/installers/openstack/site.yml
1510 else
1511 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1512 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1513 fi
1514
1515 # Exit from venv
1516 deactivate
1517
1518 return 0
1519 }
1520
1521 function install_vimemu() {
1522 echo "\nInstalling vim-emu"
1523 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1524 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1525 # install prerequisites (OVS is a must for the emulator to work)
1526 sudo apt-get install openvswitch-switch
1527 # clone vim-emu repository (attention: branch is currently master only)
1528 echo "Cloning vim-emu repository ..."
1529 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1530 # build vim-emu docker
1531 echo "Building vim-emu Docker container..."
1532
1533 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1534 # start vim-emu container as daemon
1535 echo "Starting vim-emu Docker container 'vim-emu' ..."
1536 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1537 # in lightweight mode, the emulator needs to be attached to netOSM
1538 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1539 else
1540 # classic build mode
1541 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1542 fi
1543 echo "Waiting for 'vim-emu' container to start ..."
1544 sleep 5
1545 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1546 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1547 # print vim-emu connection info
1548 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1549 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1550 echo -e "To add the emulated VIM to OSM you should do:"
1551 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1552 }
1553
1554 function install_k8s_monitoring() {
1555 # install OSM monitoring
1556 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1557 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1558 }
1559
1560 function uninstall_k8s_monitoring() {
1561 # uninstall OSM monitoring
1562 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1563 }
1564
1565 function dump_vars(){
1566 echo "DEVELOP=$DEVELOP"
1567 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1568 echo "UNINSTALL=$UNINSTALL"
1569 echo "UPDATE=$UPDATE"
1570 echo "RECONFIGURE=$RECONFIGURE"
1571 echo "TEST_INSTALLER=$TEST_INSTALLER"
1572 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1573 echo "INSTALL_PLA=$INSTALL_PLA"
1574 echo "INSTALL_LXD=$INSTALL_LXD"
1575 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1576 echo "INSTALL_ONLY=$INSTALL_ONLY"
1577 echo "INSTALL_ELK=$INSTALL_ELK"
1578 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1579 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1580 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1581 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1582 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1583 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1584 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1585 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1586 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1587 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1588 echo "TO_REBUILD=$TO_REBUILD"
1589 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1590 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1591 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1592 echo "RELEASE=$RELEASE"
1593 echo "REPOSITORY=$REPOSITORY"
1594 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1595 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1596 echo "OSM_DEVOPS=$OSM_DEVOPS"
1597 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1598 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1599 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1600 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1601 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1602 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1603 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1604 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1605 echo "DOCKER_USER=$DOCKER_USER"
1606 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1607 echo "PULL_IMAGES=$PULL_IMAGES"
1608 echo "KUBERNETES=$KUBERNETES"
1609 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1610 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1611 echo "SHOWOPTS=$SHOWOPTS"
1612 echo "Install from specific refspec (-b): $COMMIT_ID"
1613 }
1614
1615 function track(){
1616 ctime=`date +%s`
1617 duration=$((ctime - SESSION_ID))
1618 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1619 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1620 event_name="bin"
1621 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1622 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1623 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1624 event_name="${event_name}_$1"
1625 url="${url}&event=${event_name}&ce_duration=${duration}"
1626 wget -q -O /dev/null $url
1627 }
1628
1629 function parse_docker_registry_url() {
1630 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1631 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1632 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1633 }
1634
1635 LXD_VERSION=4.0
1636 JUJU_VERSION=2.9
1637 JUJU_AGENT_VERSION=2.9.22
1638 UNINSTALL=""
1639 DEVELOP=""
1640 UPDATE=""
1641 RECONFIGURE=""
1642 TEST_INSTALLER=""
1643 INSTALL_LXD=""
1644 SHOWOPTS=""
1645 COMMIT_ID=""
1646 ASSUME_YES=""
1647 INSTALL_FROM_SOURCE=""
1648 RELEASE="ReleaseTEN"
1649 REPOSITORY="stable"
1650 INSTALL_VIMEMU=""
1651 INSTALL_PLA=""
1652 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1653 LXD_REPOSITORY_PATH=""
1654 INSTALL_LIGHTWEIGHT="y"
1655 INSTALL_TO_OPENSTACK=""
1656 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1657 OPENSTACK_PUBLIC_NET_NAME=""
1658 OPENSTACK_ATTACH_VOLUME="false"
1659 OPENSTACK_SSH_KEY_FILE=""
1660 OPENSTACK_USERDATA_FILE=""
1661 OPENSTACK_VM_NAME="server-osm"
1662 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1663 INSTALL_ONLY=""
1664 INSTALL_ELK=""
1665 TO_REBUILD=""
1666 INSTALL_NOLXD=""
1667 INSTALL_NODOCKER=""
1668 INSTALL_NOJUJU=""
1669 KUBERNETES="y"
1670 INSTALL_K8S_MONITOR=""
1671 INSTALL_NOHOSTCLIENT=""
1672 INSTALL_NOCACHELXDIMAGES=""
1673 SESSION_ID=`date +%s`
1674 OSM_DEVOPS=
1675 OSM_VCA_HOST=
1676 OSM_VCA_SECRET=
1677 OSM_VCA_PUBKEY=
1678 OSM_VCA_CLOUDNAME="localhost"
1679 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1680 OSM_STACK_NAME=osm
1681 NO_HOST_PORTS=""
1682 DOCKER_NOBUILD=""
1683 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1684 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1685 WORKDIR_SUDO=sudo
1686 OSM_WORK_DIR="/etc/osm"
1687 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1688 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1689 OSM_HOST_VOL="/var/lib/osm"
1690 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1691 OSM_DOCKER_TAG=latest
1692 DOCKER_USER=opensourcemano
1693 PULL_IMAGES="y"
1694 KAFKA_TAG=2.11-1.0.2
1695 PROMETHEUS_TAG=v2.4.3
1696 GRAFANA_TAG=latest
1697 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1698 PROMETHEUS_CADVISOR_TAG=latest
1699 KEYSTONEDB_TAG=10
1700 OSM_DATABASE_COMMONKEY=
1701 ELASTIC_VERSION=6.4.2
1702 ELASTIC_CURATOR_VERSION=5.5.4
1703 POD_NETWORK_CIDR=10.244.0.0/16
1704 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1705 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1706 DOCKER_REGISTRY_URL=
1707 DOCKER_PROXY_URL=
1708 MODULE_DOCKER_TAG=
1709
1710 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1711 case "${o}" in
1712 b)
1713 COMMIT_ID=${OPTARG}
1714 PULL_IMAGES=""
1715 ;;
1716 r)
1717 REPOSITORY="${OPTARG}"
1718 REPO_ARGS+=(-r "$REPOSITORY")
1719 ;;
1720 c)
1721 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1722 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1723 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1724 usage && exit 1
1725 ;;
1726 k)
1727 REPOSITORY_KEY="${OPTARG}"
1728 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1729 ;;
1730 u)
1731 REPOSITORY_BASE="${OPTARG}"
1732 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1733 ;;
1734 R)
1735 RELEASE="${OPTARG}"
1736 REPO_ARGS+=(-R "$RELEASE")
1737 ;;
1738 D)
1739 OSM_DEVOPS="${OPTARG}"
1740 ;;
1741 o)
1742 INSTALL_ONLY="y"
1743 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1744 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1745 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1746 ;;
1747 O)
1748 INSTALL_TO_OPENSTACK="y"
1749 if [ -n "${OPTARG}" ]; then
1750 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1751 else
1752 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1753 usage && exit 1
1754 fi
1755 ;;
1756 f)
1757 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1758 ;;
1759 F)
1760 OPENSTACK_USERDATA_FILE="${OPTARG}"
1761 ;;
1762 N)
1763 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1764 ;;
1765 m)
1766 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1767 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1768 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1769 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1770 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1771 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1772 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1773 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1774 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1775 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1776 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1777 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1778 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1779 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1780 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1781 ;;
1782 H)
1783 OSM_VCA_HOST="${OPTARG}"
1784 ;;
1785 S)
1786 OSM_VCA_SECRET="${OPTARG}"
1787 ;;
1788 s)
1789 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1790 ;;
1791 w)
1792 # when specifying workdir, do not use sudo for access
1793 WORKDIR_SUDO=
1794 OSM_WORK_DIR="${OPTARG}"
1795 ;;
1796 t)
1797 OSM_DOCKER_TAG="${OPTARG}"
1798 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1799 ;;
1800 U)
1801 DOCKER_USER="${OPTARG}"
1802 ;;
1803 P)
1804 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1805 ;;
1806 A)
1807 OSM_VCA_APIPROXY="${OPTARG}"
1808 ;;
1809 l)
1810 LXD_CLOUD_FILE="${OPTARG}"
1811 ;;
1812 L)
1813 LXD_CRED_FILE="${OPTARG}"
1814 ;;
1815 K)
1816 CONTROLLER_NAME="${OPTARG}"
1817 ;;
1818 d)
1819 DOCKER_REGISTRY_URL="${OPTARG}"
1820 ;;
1821 p)
1822 DOCKER_PROXY_URL="${OPTARG}"
1823 ;;
1824 T)
1825 MODULE_DOCKER_TAG="${OPTARG}"
1826 ;;
1827 -)
1828 [ "${OPTARG}" == "help" ] && usage && exit 0
1829 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1830 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1831 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1832 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1833 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1834 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1835 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1836 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1837 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1838 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1839 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1840 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1841 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1842 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1843 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1844 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1845 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1846 [ "${OPTARG}" == "pullimages" ] && continue
1847 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1848 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1849 [ "${OPTARG}" == "bundle" ] && continue
1850 [ "${OPTARG}" == "k8s" ] && continue
1851 [ "${OPTARG}" == "lxd" ] && continue
1852 [ "${OPTARG}" == "lxd-cred" ] && continue
1853 [ "${OPTARG}" == "microstack" ] && continue
1854 [ "${OPTARG}" == "overlay" ] && continue
1855 [ "${OPTARG}" == "only-vca" ] && continue
1856 [ "${OPTARG}" == "vca" ] && continue
1857 [ "${OPTARG}" == "ha" ] && continue
1858 [ "${OPTARG}" == "tag" ] && continue
1859 [ "${OPTARG}" == "registry" ] && continue
1860 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1861 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1862 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1863 echo -e "Invalid option: '--$OPTARG'\n" >&2
1864 usage && exit 1
1865 ;;
1866 :)
1867 echo "Option -$OPTARG requires an argument" >&2
1868 usage && exit 1
1869 ;;
1870 \?)
1871 echo -e "Invalid option: '-$OPTARG'\n" >&2
1872 usage && exit 1
1873 ;;
1874 h)
1875 usage && exit 0
1876 ;;
1877 y)
1878 ASSUME_YES="y"
1879 ;;
1880 *)
1881 usage && exit 1
1882 ;;
1883 esac
1884 done
1885
1886 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1887 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1888 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1889
1890 if [ -n "$SHOWOPTS" ]; then
1891 dump_vars
1892 exit 0
1893 fi
1894
1895 if [ -n "$CHARMED" ]; then
1896 if [ -n "$UNINSTALL" ]; then
1897 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1898 else
1899 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1900 fi
1901
1902 exit 0
1903 fi
1904
1905 # if develop, we force master
1906 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1907
1908 need_packages="git wget curl tar"
1909
1910 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1911
1912 echo -e "Checking required packages: $need_packages"
1913 dpkg -l $need_packages &>/dev/null \
1914 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1915 || sudo apt-get update \
1916 || FATAL "failed to run apt-get update"
1917 dpkg -l $need_packages &>/dev/null \
1918 || ! echo -e "Installing $need_packages requires root privileges." \
1919 || sudo apt-get install -y $need_packages \
1920 || FATAL "failed to install $need_packages"
1921 sudo snap install jq
1922 if [ -z "$OSM_DEVOPS" ]; then
1923 if [ -n "$TEST_INSTALLER" ]; then
1924 echo -e "\nUsing local devops repo for OSM installation"
1925 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1926 else
1927 echo -e "\nCreating temporary dir for OSM installation"
1928 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1929 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1930
1931 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1932
1933 if [ -z "$COMMIT_ID" ]; then
1934 echo -e "\nGuessing the current stable release"
1935 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1936 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1937
1938 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1939 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1940 else
1941 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1942 fi
1943 git -C $OSM_DEVOPS checkout $COMMIT_ID
1944 fi
1945 fi
1946
1947 . $OSM_DEVOPS/common/all_funcs
1948
1949 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1950 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1951 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1952 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1953 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1954 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1955 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1956 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1957
1958 #Installation starts here
1959 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null
1960 track start
1961
1962 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1963 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1964 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1965 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1966 fi
1967
1968 echo -e "Checking required packages: lxd"
1969 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1970 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1971
1972 # use local devops for containers
1973 export OSM_USE_LOCAL_DEVOPS=true
1974
1975 #Install osmclient
1976
1977 #Install vim-emu (optional)
1978 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1979
1980 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1981 track end
1982 echo -e "\nDONE"