Fix bug 2059: Update lxd-credentials file
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
33 echo -e " -H <VCA host> use specific juju host controller IP"
34 echo -e " -S <VCA secret> use VCA/juju secret key"
35 echo -e " -P <VCA pubkey> use VCA/juju public key file"
36 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
39 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
40 echo -e " --pla: install the PLA module for placement support"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 sudo rm /usr/local/bin/helm
187 rm -rf $HOME/.helm
188 fi
189 }
190
191 function remove_crontab_job() {
192 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
193 }
194
195 #Uninstall osmclient
196 function uninstall_osmclient() {
197 sudo apt-get remove --purge -y python-osmclient
198 sudo apt-get remove --purge -y python3-osmclient
199 }
200
201 #Uninstall lightweight OSM: remove dockers
202 function uninstall_lightweight() {
203 if [ -n "$INSTALL_ONLY" ]; then
204 if [ -n "$INSTALL_ELK" ]; then
205 echo -e "\nUninstalling OSM ELK stack"
206 remove_stack osm_elk
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
208 fi
209 else
210 echo -e "\nUninstalling OSM"
211 if [ -n "$KUBERNETES" ]; then
212 if [ -n "$INSTALL_K8S_MONITOR" ]; then
213 # uninstall OSM MONITORING
214 uninstall_k8s_monitoring
215 fi
216 remove_k8s_namespace $OSM_STACK_NAME
217 else
218 remove_stack $OSM_STACK_NAME
219 remove_stack osm_elk
220 fi
221 echo "Now osm docker images and volumes will be deleted"
222 # TODO: clean-up of images should take into account if other tags were used for specific modules
223 newgrp docker << EONG
224 for module in ro lcm keystone nbi mon pol pla osmclient; do
225 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
226 done
227 EONG
228
229 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230
231 if [ -n "$KUBERNETES" ]; then
232 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
233 remove_volumes $OSM_NAMESPACE_VOL
234 else
235 remove_volumes $OSM_STACK_NAME
236 remove_network $OSM_STACK_NAME
237 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
238 fi
239 echo "Removing $OSM_DOCKER_WORK_DIR"
240 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
241 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
242 fi
243 remove_crontab_job
244
245 # Cleanup Openstack installer venv
246 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
247 rm -r $OPENSTACK_PYTHON_VENV
248 fi
249
250 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
251 echo "Some docker images will be kept in case they are used by other docker stacks"
252 echo "To remove them, just run 'docker image prune' in a terminal"
253 return 0
254 }
255
256 #Safe unattended install of iptables-persistent
257 function check_install_iptables_persistent(){
258 echo -e "\nChecking required packages: iptables-persistent"
259 if ! dpkg -l iptables-persistent &>/dev/null; then
260 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
261 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
262 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
263 sudo apt-get -yq install iptables-persistent
264 fi
265 }
266
267 #Configure NAT rules, based on the current IP addresses of containers
268 function nat(){
269 check_install_iptables_persistent
270
271 echo -e "\nConfiguring NAT rules"
272 echo -e " Required root privileges"
273 sudo $OSM_DEVOPS/installers/nat_osm
274 }
275
276 function FATAL(){
277 echo "FATAL error: Cannot install OSM due to \"$1\""
278 exit 1
279 }
280
281 function update_juju_images(){
282 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
283 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
284 }
285
286 function install_lxd() {
287 # Apply sysctl production values for optimal performance
288 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
289 sudo sysctl --system
290
291 # Install LXD snap
292 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
293 sudo snap install lxd --channel $LXD_VERSION/stable
294
295 # Configure LXD
296 sudo usermod -a -G lxd `whoami`
297 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
298 sg lxd -c "lxd waitready"
299 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
300 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
301 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
302 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
303 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
304 #sudo systemctl stop lxd-bridge
305 #sudo systemctl --system daemon-reload
306 #sudo systemctl enable lxd-bridge
307 #sudo systemctl start lxd-bridge
308 }
309
310 function ask_user(){
311 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
312 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
313 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
314 read -e -p "$1" USER_CONFIRMATION
315 while true ; do
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
317 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
318 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
319 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
320 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
321 done
322 }
323
324 function install_osmclient(){
325 CLIENT_RELEASE=${RELEASE#"-R "}
326 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
327 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
328 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
329 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
330 curl $key_location | sudo apt-key add -
331 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
332 sudo apt-get update
333 sudo apt-get install -y python3-pip
334 sudo -H LC_ALL=C python3 -m pip install -U pip
335 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
336 sudo apt-get install -y python3-osm-im python3-osmclient
337 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
338 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
339 fi
340 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
341 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
342 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
343 fi
344 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
345 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
346 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
347 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
348 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
349 echo -e "\nOSM client installed"
350 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
351 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
352 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
353 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
354 else
355 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
356 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
357 echo " export OSM_HOSTNAME=<OSM_host>"
358 fi
359 return 0
360 }
361
362 function install_prometheus_nodeexporter(){
363 if (systemctl -q is-active node_exporter)
364 then
365 echo "Node Exporter is already running."
366 else
367 echo "Node Exporter is not active, installing..."
368 if getent passwd node_exporter > /dev/null 2>&1; then
369 echo "node_exporter user exists"
370 else
371 echo "Creating user node_exporter"
372 sudo useradd --no-create-home --shell /bin/false node_exporter
373 fi
374 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
375 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
376 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
377 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
378 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
379 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
380 sudo systemctl daemon-reload
381 sudo systemctl restart node_exporter
382 sudo systemctl enable node_exporter
383 echo "Node Exporter has been activated in this host."
384 fi
385 return 0
386 }
387
388 function uninstall_prometheus_nodeexporter(){
389 sudo systemctl stop node_exporter
390 sudo systemctl disable node_exporter
391 sudo rm /etc/systemd/system/node_exporter.service
392 sudo systemctl daemon-reload
393 sudo userdel node_exporter
394 sudo rm /usr/local/bin/node_exporter
395 return 0
396 }
397
398 function install_docker_ce() {
399 # installs and configures Docker CE
400 echo "Installing Docker CE ..."
401 sudo apt-get -qq update
402 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
403 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
404 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
405 sudo apt-get -qq update
406 sudo apt-get install -y docker-ce
407 echo "Adding user to group 'docker'"
408 sudo groupadd -f docker
409 sudo usermod -aG docker $USER
410 sleep 2
411 sudo service docker restart
412 echo "... restarted Docker service"
413 if [ -n "${DOCKER_PROXY_URL}" ]; then
414 echo "Configuring docker proxy ..."
415 if [ -f /etc/docker/daemon.json ]; then
416 if grep -q registry-mirrors /etc/docker/daemon.json; then
417 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
418 else
419 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
420 fi
421 else
422 sudo bash -c "cat << EOF > /etc/docker/daemon.json
423 {
424 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
425 }
426 EOF"
427 fi
428 sudo systemctl daemon-reload
429 sudo service docker restart
430 echo "... restarted Docker service again"
431 fi
432 sg docker -c "docker version" || FATAL "Docker installation failed"
433 echo "... Docker CE installation done"
434 return 0
435 }
436
437 function install_docker_compose() {
438 # installs and configures docker-compose
439 echo "Installing Docker Compose ..."
440 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
441 sudo chmod +x /usr/local/bin/docker-compose
442 echo "... Docker Compose installation done"
443 }
444
445 function install_juju() {
446 echo "Installing juju"
447 sudo snap install juju --classic --channel=$JUJU_VERSION/stable
448 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
449 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
450 echo "Finished installation of juju"
451 return 0
452 }
453
454 function juju_createcontroller() {
455 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
456 # Not found created, create the controller
457 sudo usermod -a -G lxd ${USER}
458 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
459 fi
460 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
461 juju controller-config features=[k8s-operators]
462 }
463
464 function juju_addk8s() {
465 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath \
466 || FATAL "Failed to add K8s endpoint and credential for controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
467 }
468
469 function juju_createcontroller_k8s(){
470 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client \
471 || FATAL "Failed to add K8s endpoint and credential for client in cloud $OSM_VCA_K8S_CLOUDNAME"
472 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
473 --config controller-service-type=loadbalancer \
474 --agent-version=$JUJU_AGENT_VERSION \
475 || FATAL "Failed to bootstrap controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
476 }
477
478 function juju_addlxd_cloud(){
479 mkdir -p /tmp/.osm
480 OSM_VCA_CLOUDNAME="lxd-cloud"
481 LXDENDPOINT=$DEFAULT_IP
482 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
483 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
484
485 cat << EOF > $LXD_CLOUD
486 clouds:
487 $OSM_VCA_CLOUDNAME:
488 type: lxd
489 auth-types: [certificate]
490 endpoint: "https://$LXDENDPOINT:8443"
491 config:
492 ssl-hostname-verification: false
493 EOF
494 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
495 cat << EOF > $LXD_CREDENTIALS
496 credentials:
497 $OSM_VCA_CLOUDNAME:
498 lxd-cloud:
499 auth-type: certificate
500 server-cert: /var/snap/lxd/common/lxd/server.crt
501 client-cert: /tmp/.osm/client.crt
502 client-key: /tmp/.osm/client.key
503 EOF
504 lxc config trust add local: /tmp/.osm/client.crt
505 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
506 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
507 sg lxd -c "lxd waitready"
508 juju controller-config features=[k8s-operators]
509 }
510
511 function juju_createproxy() {
512 check_install_iptables_persistent
513
514 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
515 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
516 sudo netfilter-persistent save
517 fi
518 }
519
520 function docker_login() {
521 echo "Docker login"
522 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
523 }
524
525 function generate_docker_images() {
526 echo "Pulling and generating docker images"
527 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
528
529 echo "Pulling docker images"
530
531 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
532 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
533 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
534 fi
535
536 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
537 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
538 fi
539
540 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
541 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
542 fi
543
544 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
545 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
549 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
550 sg docker -c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL "cannot get kiwigrid k8s-sidecar docker image"
551 fi
552
553 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
554 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
555 fi
556
557 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
558 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
559 fi
560
561 if [ -n "$PULL_IMAGES" ]; then
562 echo "Pulling OSM docker images"
563 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
564 module_lower=${module,,}
565 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
566 continue
567 fi
568 module_tag="${OSM_DOCKER_TAG}"
569 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
570 module_tag="${MODULE_DOCKER_TAG}"
571 fi
572 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
573 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
574 done
575 else
576 _build_from=$COMMIT_ID
577 [ -z "$_build_from" ] && _build_from="latest"
578 echo "OSM Docker images generated from $_build_from"
579
580 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
581 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
582 module_lower=${module,,}
583 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
584 continue
585 fi
586 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
587 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
588 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
589 fi
590 done
591 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
592 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
593 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
594 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
595 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
596 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
597 fi
598 echo "Finished generation of docker images"
599 fi
600
601 echo "Finished pulling and generating docker images"
602 }
603
604 function cmp_overwrite() {
605 file1="$1"
606 file2="$2"
607 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
608 if [ -f "${file2}" ]; then
609 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
610 else
611 cp -b ${file1} ${file2}
612 fi
613 fi
614 }
615
616 function generate_docker_compose_files() {
617 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
618 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
619 if [ -n "$INSTALL_PLA" ]; then
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
621 fi
622 }
623
624 function generate_k8s_manifest_files() {
625 #Kubernetes resources
626 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
627 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
628 }
629
630 function generate_prometheus_grafana_files() {
631 [ -n "$KUBERNETES" ] && return
632 # Prometheus files
633 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
634 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
635
636 # Grafana files
637 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
638 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
639 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
640 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
641 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
642
643 # Prometheus Exporters files
644 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
645 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
646 }
647
648 function generate_docker_env_files() {
649 echo "Doing a backup of existing env files"
650 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
651 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
652 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
653 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
654 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
655 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
656 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
657 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
658
659 echo "Generating docker env files"
660 # LCM
661 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
662 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
663 fi
664
665 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
666 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
667 else
668 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
669 fi
670
671 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
672 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
673 else
674 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
675 fi
676
677 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
678 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
679 else
680 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
681 fi
682
683 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
684 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
685 else
686 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
687 fi
688
689 if [ -n "$OSM_VCA_APIPROXY" ]; then
690 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
691 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
692 else
693 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
694 fi
695 fi
696
697 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
698 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
699 fi
700
701 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
702 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
703 fi
704
705 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
706 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
707 else
708 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
709 fi
710
711 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
712 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
713 else
714 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
715 fi
716
717 # RO
718 MYSQL_ROOT_PASSWORD=$(generate_secret)
719 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
720 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
721 fi
722 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
723 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
724 fi
725 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
726 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
727 fi
728
729 # Keystone
730 KEYSTONE_DB_PASSWORD=$(generate_secret)
731 SERVICE_PASSWORD=$(generate_secret)
732 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
733 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
734 fi
735 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
736 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
737 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
738 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
739 fi
740
741 # NBI
742 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
743 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
744 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
745 fi
746
747 # MON
748 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
749 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
750 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
751 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
752 fi
753
754 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
755 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
756 else
757 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
758 fi
759
760 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
761 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
762 else
763 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
764 fi
765
766 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
767 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
768 else
769 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
770 fi
771
772 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
773 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
774 else
775 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
776 fi
777
778
779 # POL
780 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
781 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
782 fi
783
784 echo "Finished generation of docker env files"
785 }
786
787 function generate_osmclient_script () {
788 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
789 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
790 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
791 }
792
793 #installs kubernetes packages
794 function install_kube() {
795 sudo apt-get update && sudo apt-get install -y apt-transport-https
796 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
797 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
798 sudo apt-get update
799 echo "Installing Kubernetes Packages ..."
800 K8S_VERSION=1.23.3-00
801 sudo apt-get install -y kubelet=${K8S_VERSION} kubeadm=${K8S_VERSION} kubectl=${K8S_VERSION}
802 cat << EOF | sudo tee -a /etc/default/kubelet
803 KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs"
804 EOF
805 sudo apt-mark hold kubelet kubeadm kubectl
806 }
807
808 #initializes kubernetes control plane
809 function init_kubeadm() {
810 sudo swapoff -a
811 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
812 sudo kubeadm init --config $1
813 sleep 5
814 }
815
816 function kube_config_dir() {
817 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
818 mkdir -p $HOME/.kube
819 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
820 sudo chown $(id -u):$(id -g) $HOME/.kube/config
821 }
822
823 function install_k8s_storageclass() {
824 echo "Installing open-iscsi"
825 sudo apt-get update
826 sudo apt-get install open-iscsi
827 sudo systemctl enable --now iscsid
828 echo "Installing OpenEBS"
829 helm repo add openebs https://openebs.github.io/charts
830 helm repo update
831 helm install --create-namespace --namespace openebs openebs openebs/openebs --version 3.1.0
832 helm ls -n openebs
833 local storageclass_timeout=400
834 local counter=0
835 local storageclass_ready=""
836 echo "Waiting for storageclass"
837 while (( counter < storageclass_timeout ))
838 do
839 kubectl get storageclass openebs-hostpath &> /dev/null
840
841 if [ $? -eq 0 ] ; then
842 echo "Storageclass available"
843 storageclass_ready="y"
844 break
845 else
846 counter=$((counter + 15))
847 sleep 15
848 fi
849 done
850 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
851 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
852 }
853
854 function install_k8s_metallb() {
855 METALLB_IP_RANGE=$DEFAULT_IP/32
856 kubectl apply -f ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml \
857 || FATAL "Cannot install MetalLB"
858 echo "apiVersion: v1
859 kind: ConfigMap
860 metadata:
861 namespace: metallb-system
862 name: config
863 data:
864 config: |
865 address-pools:
866 - name: default
867 protocol: layer2
868 addresses:
869 - $METALLB_IP_RANGE" | kubectl apply -f - \
870 || FATAL "Cannot apply MetalLB ConfigMap"
871 }
872
873 #installs metallb from helm
874 function install_helm_metallb() {
875 METALLB_IP_RANGE=$DEFAULT_IP/32
876 echo "configInline:
877 address-pools:
878 - name: default
879 protocol: layer2
880 addresses:
881 - $METALLB_IP_RANGE" | sudo tee -a $OSM_DOCKER_WORK_DIR/metallb-config.yaml
882 helm repo add metallb https://metallb.github.io/metallb
883 helm repo update
884 helm install --create-namespace --namespace metallb-system metallb metallb/metallb -f $OSM_DOCKER_WORK_DIR/metallb-config.yaml
885 }
886
887 #checks openebs and metallb readiness
888 function check_for_readiness() {
889 # Default input values
890 sampling_period=2 # seconds
891 time_for_readiness=20 # seconds ready
892 time_for_failure=200 # seconds broken
893 OPENEBS_NAMESPACE=openebs
894 METALLB_NAMESPACE=metallb-system
895 # STACK_NAME=osm # By default, "osm"
896
897 # Equivalent number of samples
898 oks_threshold=$((time_for_readiness/${sampling_period})) # No. ok samples to declare the system ready
899 failures_threshold=$((time_for_failure/${sampling_period})) # No. nok samples to declare the system broken
900 failures_in_a_row=0
901 oks_in_a_row=0
902
903 ####################################################################################
904 # Loop to check system readiness
905 ####################################################################################
906 while [[ (${failures_in_a_row} -lt ${failures_threshold}) && (${oks_in_a_row} -lt ${oks_threshold}) ]]
907 do
908 # State of OpenEBS
909 OPENEBS_STATE=$(kubectl get pod -n ${OPENEBS_NAMESPACE} --no-headers 2>&1)
910 OPENEBS_READY=$(echo "${OPENEBS_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
911 OPENEBS_NOT_READY=$(echo "${OPENEBS_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
912 COUNT_OPENEBS_READY=$(echo "${OPENEBS_READY}"| grep -v -e '^$' | wc -l)
913 COUNT_OPENEBS_NOT_READY=$(echo "${OPENEBS_NOT_READY}" | grep -v -e '^$' | wc -l)
914
915 # State of MetalLB
916 METALLB_STATE=$(kubectl get pod -n ${METALLB_NAMESPACE} --no-headers 2>&1)
917 METALLB_READY=$(echo "${METALLB_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
918 METALLB_NOT_READY=$(echo "${METALLB_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
919 COUNT_METALLB_READY=$(echo "${METALLB_READY}" | grep -v -e '^$' | wc -l)
920 COUNT_METALLB_NOT_READY=$(echo "${METALLB_NOT_READY}" | grep -v -e '^$' | wc -l)
921
922 # OK sample
923 if [[ $((${COUNT_OPENEBS_NOT_READY}+${COUNT_METALLB_NOT_READY})) -eq 0 ]]
924 then
925 ((++oks_in_a_row))
926 failures_in_a_row=0
927 echo -ne ===\> Successful checks: "${oks_in_a_row}"/${oks_threshold}\\r
928 # NOK sample
929 else
930 ((++failures_in_a_row))
931 oks_in_a_row=0
932 echo
933 echo Bootstraping... "${failures_in_a_row}" checks of ${failures_threshold}
934
935 # Reports failed pods in OpenEBS
936 if [[ "${COUNT_OPENEBS_NOT_READY}" -ne 0 ]]
937 then
938 echo "OpenEBS: Waiting for ${COUNT_OPENEBS_NOT_READY} of $((${COUNT_OPENEBS_NOT_READY}+${COUNT_OPENEBS_READY})) pods to be ready:"
939 echo "${OPENEBS_NOT_READY}"
940 echo
941 fi
942
943 # Reports failed statefulsets
944 if [[ "${COUNT_METALLB_NOT_READY}" -ne 0 ]]
945 then
946 echo "MetalLB: Waiting for ${COUNT_METALLB_NOT_READY} of $((${COUNT_METALLB_NOT_READY}+${COUNT_METALLB_READY})) pods to be ready:"
947 echo "${METALLB_NOT_READY}"
948 echo
949 fi
950 fi
951
952 #------------ NEXT SAMPLE
953 sleep ${sampling_period}
954 done
955
956 ####################################################################################
957 # OUTCOME
958 ####################################################################################
959 if [[ (${failures_in_a_row} -ge ${failures_threshold}) ]]
960 then
961 echo
962 FATAL "K8S CLUSTER IS BROKEN"
963 else
964 echo
965 echo "K8S CLUSTER IS READY"
966 fi
967 }
968
969 #deploys flannel as daemonsets
970 function deploy_cni_provider() {
971 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
972 trap 'rm -rf "${CNI_DIR}"' EXIT
973 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
974 kubectl apply -f $CNI_DIR
975 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
976 }
977
978 #creates secrets from env files which will be used by containers
979 function kube_secrets(){
980 kubectl create ns $OSM_STACK_NAME
981 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
982 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
983 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
984 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
985 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
986 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
987 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
988 }
989
990 #taints K8s master node
991 function taint_master_node() {
992 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
993 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
994 sleep 5
995 }
996
997 #deploys osm pods and services
998 function deploy_osm_services() {
999 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1000 }
1001
1002 #deploy charmed services
1003 function deploy_charmed_services() {
1004 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
1005 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
1006 }
1007
1008 function deploy_osm_pla_service() {
1009 # corresponding to namespace_vol
1010 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
1011 # corresponding to deploy_osm_services
1012 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
1013 }
1014
1015 #Install Helm v3
1016 #Helm releases can be found here: https://github.com/helm/helm/releases
1017 function install_helm() {
1018 HELM_VERSION="v3.7.2"
1019 if ! [[ "$(helm version --short 2>/dev/null)" =~ ^v3.* ]]; then
1020 # Helm is not installed. Install helm
1021 echo "Helm3 is not installed, installing ..."
1022 curl https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz --output helm-${HELM_VERSION}.tar.gz
1023 tar -zxvf helm-${HELM_VERSION}.tar.gz
1024 sudo mv linux-amd64/helm /usr/local/bin/helm
1025 rm -r linux-amd64
1026 rm helm-${HELM_VERSION}.tar.gz
1027 else
1028 echo "Helm3 is already installed. Skipping installation..."
1029 fi
1030 helm repo add stable https://charts.helm.sh/stable
1031 helm repo update
1032 }
1033
1034 function parse_yaml() {
1035 TAG=$1
1036 shift
1037 services=$@
1038 for module in $services; do
1039 if [ "$module" == "pla" ]; then
1040 if [ -n "$INSTALL_PLA" ]; then
1041 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1042 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1043 fi
1044 else
1045 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1046 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1047 fi
1048 done
1049 }
1050
1051 function update_manifest_files() {
1052 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1053 list_of_services=""
1054 for module in $osm_services; do
1055 module_upper="${module^^}"
1056 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1057 list_of_services="$list_of_services $module"
1058 fi
1059 done
1060 if [ ! "$OSM_DOCKER_TAG" == "10" ]; then
1061 parse_yaml $OSM_DOCKER_TAG $list_of_services
1062 fi
1063 if [ -n "$MODULE_DOCKER_TAG" ]; then
1064 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1065 fi
1066 }
1067
1068 function namespace_vol() {
1069 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1070 for osm in $osm_services; do
1071 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1072 done
1073 }
1074
1075 function init_docker_swarm() {
1076 if [ "${DEFAULT_MTU}" != "1500" ]; then
1077 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1078 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1079 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1080 fi
1081 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1082 return 0
1083 }
1084
1085 function create_docker_network() {
1086 echo "creating network"
1087 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1088 echo "creating network DONE"
1089 }
1090
1091 function deploy_lightweight() {
1092
1093 echo "Deploying lightweight build"
1094 OSM_NBI_PORT=9999
1095 OSM_RO_PORT=9090
1096 OSM_KEYSTONE_PORT=5000
1097 OSM_UI_PORT=80
1098 OSM_MON_PORT=8662
1099 OSM_PROM_PORT=9090
1100 OSM_PROM_CADVISOR_PORT=8080
1101 OSM_PROM_HOSTPORT=9091
1102 OSM_GRAFANA_PORT=3000
1103 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1104 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1105
1106 if [ -n "$NO_HOST_PORTS" ]; then
1107 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1108 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1109 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1110 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1111 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1112 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1113 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1114 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1115 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1116 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1117 else
1118 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1119 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1120 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1121 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1122 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1123 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1124 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1125 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1126 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1127 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1128 fi
1129 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1130 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1131 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1132 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1133 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1134 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1135 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1136 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1137 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1138
1139 pushd $OSM_DOCKER_WORK_DIR
1140 if [ -n "$INSTALL_PLA" ]; then
1141 track deploy_osm_pla
1142 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1143 else
1144 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1145 fi
1146 popd
1147
1148 echo "Finished deployment of lightweight build"
1149 }
1150
1151 function deploy_elk() {
1152 echo "Pulling docker images for ELK"
1153 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1154 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1155 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1156 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1157 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1158 echo "Finished pulling elk docker images"
1159 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1160 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1161 remove_stack osm_elk
1162 echo "Deploying ELK stack"
1163 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1164 echo "Waiting for ELK stack to be up and running"
1165 time=0
1166 step=5
1167 timelength=40
1168 elk_is_up=1
1169 while [ $time -le $timelength ]; do
1170 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1171 elk_is_up=0
1172 break
1173 fi
1174 sleep $step
1175 time=$((time+step))
1176 done
1177 if [ $elk_is_up -eq 0 ]; then
1178 echo "ELK is up and running. Trying to create index pattern..."
1179 #Create index pattern
1180 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1181 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1182 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1183 #Make it the default index
1184 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1185 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1186 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1187 else
1188 echo "Cannot connect to Kibana to create index pattern."
1189 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1190 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1191 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1192 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1193 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1194 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1195 -d"{\"value\":\"filebeat-*\"}"'
1196 fi
1197 echo "Finished deployment of ELK stack"
1198 return 0
1199 }
1200
1201 function add_local_k8scluster() {
1202 /usr/bin/osm --all-projects vim-create \
1203 --name _system-osm-vim \
1204 --account_type dummy \
1205 --auth_url http://dummy \
1206 --user osm --password osm --tenant osm \
1207 --description "dummy" \
1208 --config '{management_network_name: mgmt}'
1209 /usr/bin/osm --all-projects k8scluster-add \
1210 --creds ${HOME}/.kube/config \
1211 --vim _system-osm-vim \
1212 --k8s-nets '{"net1": null}' \
1213 --version '1.15' \
1214 --description "OSM Internal Cluster" \
1215 _system-osm-k8s
1216 }
1217
1218 function install_lightweight() {
1219 track checkingroot
1220 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1221 track noroot
1222
1223 if [ -n "$KUBERNETES" ]; then
1224 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1225 1. Install and configure LXD
1226 2. Install juju
1227 3. Install docker CE
1228 4. Disable swap space
1229 5. Install and initialize Kubernetes
1230 as pre-requirements.
1231 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1232
1233 else
1234 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1235 fi
1236 track proceed
1237
1238 echo "Installing lightweight build of OSM"
1239 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1240 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1241 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1242 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1243 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1244 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1245 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1246 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1247
1248 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1249 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1250 need_packages_lw="snapd"
1251 echo -e "Checking required packages: $need_packages_lw"
1252 dpkg -l $need_packages_lw &>/dev/null \
1253 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1254 || sudo apt-get update \
1255 || FATAL "failed to run apt-get update"
1256 dpkg -l $need_packages_lw &>/dev/null \
1257 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1258 || sudo apt-get install -y $need_packages_lw \
1259 || FATAL "failed to install $need_packages_lw"
1260 install_lxd
1261 fi
1262
1263 track prereqok
1264
1265 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1266
1267 echo "Creating folders for installation"
1268 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1269 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1270 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1271
1272 #Installs Kubernetes
1273 if [ -n "$KUBERNETES" ]; then
1274 install_kube
1275 track install_k8s
1276 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1277 kube_config_dir
1278 track init_k8s
1279 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1280 # uninstall OSM MONITORING
1281 uninstall_k8s_monitoring
1282 track uninstall_k8s_monitoring
1283 fi
1284 #remove old namespace
1285 remove_k8s_namespace $OSM_STACK_NAME
1286 deploy_cni_provider
1287 taint_master_node
1288 install_helm
1289 track install_helm
1290 install_k8s_storageclass
1291 track k8s_storageclass
1292 install_helm_metallb
1293 track k8s_metallb
1294 check_for_readiness
1295 else
1296 #install_docker_compose
1297 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1298 track docker_swarm
1299 fi
1300
1301 [ -z "$INSTALL_NOJUJU" ] && install_juju
1302 track juju_install
1303
1304 if [ -z "$OSM_VCA_HOST" ]; then
1305 if [ -z "$CONTROLLER_NAME" ]; then
1306
1307 if [ -n "$KUBERNETES" ]; then
1308 juju_createcontroller_k8s
1309 juju_addlxd_cloud
1310 else
1311 if [ -n "$LXD_CLOUD_FILE" ]; then
1312 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1313 OSM_VCA_CLOUDNAME="lxd-cloud"
1314 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1315 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1316 fi
1317 juju_createcontroller
1318 juju_createproxy
1319 fi
1320 else
1321 OSM_VCA_CLOUDNAME="lxd-cloud"
1322 if [ -n "$LXD_CLOUD_FILE" ]; then
1323 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1324 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1325 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1326 else
1327 mkdir -p ~/.osm
1328 cat << EOF > ~/.osm/lxd-cloud.yaml
1329 clouds:
1330 lxd-cloud:
1331 type: lxd
1332 auth-types: [certificate]
1333 endpoint: "https://$DEFAULT_IP:8443"
1334 config:
1335 ssl-hostname-verification: false
1336 EOF
1337 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1338 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1339 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1340 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1341 cat << EOF > ~/.osm/lxd-credentials.yaml
1342 credentials:
1343 lxd-cloud:
1344 lxd-cloud:
1345 auth-type: certificate
1346 server-cert: |
1347 $server_cert
1348 client-cert: |
1349 $client_cert
1350 client-key: |
1351 $client_key
1352 EOF
1353 lxc config trust add local: ~/.osm/client.crt
1354 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1355 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1356 fi
1357 fi
1358 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1359 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1360 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1361 fi
1362 track juju_controller
1363
1364 if [ -z "$OSM_VCA_SECRET" ]; then
1365 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1366 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1367 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1368 fi
1369 if [ -z "$OSM_VCA_PUBKEY" ]; then
1370 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1371 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1372 fi
1373 if [ -z "$OSM_VCA_CACERT" ]; then
1374 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1375 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1376 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1377 fi
1378
1379 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1380 if [ -z "$KUBERNETES" ]; then
1381 if [ -z "$OSM_VCA_APIPROXY" ]; then
1382 OSM_VCA_APIPROXY=$DEFAULT_IP
1383 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1384 fi
1385 juju_createproxy
1386 fi
1387 track juju
1388
1389 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1390 OSM_DATABASE_COMMONKEY=$(generate_secret)
1391 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1392 fi
1393
1394 # Deploy OSM services
1395 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1396 track docker_build
1397
1398 if [ -n "$KUBERNETES" ]; then
1399 generate_k8s_manifest_files
1400 else
1401 generate_docker_compose_files
1402 fi
1403 track manifest_files
1404 generate_prometheus_grafana_files
1405 generate_docker_env_files
1406 track env_files
1407
1408 if [ -n "$KUBERNETES" ]; then
1409 deploy_charmed_services
1410 kube_secrets
1411 update_manifest_files
1412 namespace_vol
1413 deploy_osm_services
1414 if [ -n "$INSTALL_PLA"]; then
1415 # optional PLA install
1416 deploy_osm_pla_service
1417 track deploy_osm_pla
1418 fi
1419 track deploy_osm_services_k8s
1420 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1421 # install OSM MONITORING
1422 install_k8s_monitoring
1423 track install_k8s_monitoring
1424 fi
1425 else
1426 # remove old stack
1427 remove_stack $OSM_STACK_NAME
1428 create_docker_network
1429 deploy_lightweight
1430 generate_osmclient_script
1431 track docker_deploy
1432 install_prometheus_nodeexporter
1433 track nodeexporter
1434 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1435 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1436 fi
1437
1438 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1439 track osmclient
1440
1441 echo -e "Checking OSM health state..."
1442 if [ -n "$KUBERNETES" ]; then
1443 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1444 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1445 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1446 track osm_unhealthy
1447 else
1448 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1449 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1450 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1451 track osm_unhealthy
1452 fi
1453 track after_healthcheck
1454
1455 [ -n "$KUBERNETES" ] && add_local_k8scluster
1456 track add_local_k8scluster
1457
1458 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1459 track end
1460 return 0
1461 }
1462
1463 function install_to_openstack() {
1464
1465 if [ -z "$2" ]; then
1466 FATAL "OpenStack installer requires a valid external network name"
1467 fi
1468
1469 # Install Pip for Python3
1470 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1471 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1472
1473 # Create a venv to avoid conflicts with the host installation
1474 python3 -m venv $OPENSTACK_PYTHON_VENV
1475
1476 source $OPENSTACK_PYTHON_VENV/bin/activate
1477
1478 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1479 python -m pip install -U wheel
1480 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1481
1482 # Install the Openstack cloud module (ansible>=2.10)
1483 ansible-galaxy collection install openstack.cloud
1484
1485 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1486
1487 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1488
1489 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1490
1491 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1492 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1493 fi
1494
1495 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1496 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1497 fi
1498
1499 # Execute the Ansible playbook based on openrc or clouds.yaml
1500 if [ -e "$1" ]; then
1501 . $1
1502 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1503 $OSM_DEVOPS/installers/openstack/site.yml
1504 else
1505 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1506 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1507 fi
1508
1509 # Exit from venv
1510 deactivate
1511
1512 return 0
1513 }
1514
1515 function install_vimemu() {
1516 echo "\nInstalling vim-emu"
1517 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1518 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1519 # install prerequisites (OVS is a must for the emulator to work)
1520 sudo apt-get install openvswitch-switch
1521 # clone vim-emu repository (attention: branch is currently master only)
1522 echo "Cloning vim-emu repository ..."
1523 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1524 # build vim-emu docker
1525 echo "Building vim-emu Docker container..."
1526
1527 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1528 # start vim-emu container as daemon
1529 echo "Starting vim-emu Docker container 'vim-emu' ..."
1530 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1531 # in lightweight mode, the emulator needs to be attached to netOSM
1532 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1533 else
1534 # classic build mode
1535 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1536 fi
1537 echo "Waiting for 'vim-emu' container to start ..."
1538 sleep 5
1539 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1540 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1541 # print vim-emu connection info
1542 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1543 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1544 echo -e "To add the emulated VIM to OSM you should do:"
1545 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1546 }
1547
1548 function install_k8s_monitoring() {
1549 # install OSM monitoring
1550 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1551 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1552 }
1553
1554 function uninstall_k8s_monitoring() {
1555 # uninstall OSM monitoring
1556 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1557 }
1558
1559 function dump_vars(){
1560 echo "DEVELOP=$DEVELOP"
1561 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1562 echo "UNINSTALL=$UNINSTALL"
1563 echo "UPDATE=$UPDATE"
1564 echo "RECONFIGURE=$RECONFIGURE"
1565 echo "TEST_INSTALLER=$TEST_INSTALLER"
1566 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1567 echo "INSTALL_PLA=$INSTALL_PLA"
1568 echo "INSTALL_LXD=$INSTALL_LXD"
1569 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1570 echo "INSTALL_ONLY=$INSTALL_ONLY"
1571 echo "INSTALL_ELK=$INSTALL_ELK"
1572 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1573 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1574 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1575 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1576 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1577 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1578 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1579 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1580 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1581 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1582 echo "TO_REBUILD=$TO_REBUILD"
1583 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1584 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1585 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1586 echo "RELEASE=$RELEASE"
1587 echo "REPOSITORY=$REPOSITORY"
1588 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1589 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1590 echo "OSM_DEVOPS=$OSM_DEVOPS"
1591 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1592 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1593 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1594 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1595 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1596 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1597 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1598 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1599 echo "DOCKER_USER=$DOCKER_USER"
1600 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1601 echo "PULL_IMAGES=$PULL_IMAGES"
1602 echo "KUBERNETES=$KUBERNETES"
1603 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1604 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1605 echo "SHOWOPTS=$SHOWOPTS"
1606 echo "Install from specific refspec (-b): $COMMIT_ID"
1607 }
1608
1609 function track(){
1610 ctime=`date +%s`
1611 duration=$((ctime - SESSION_ID))
1612 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1613 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1614 event_name="bin"
1615 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1616 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1617 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1618 event_name="${event_name}_$1"
1619 url="${url}&event=${event_name}&ce_duration=${duration}"
1620 wget -q -O /dev/null $url
1621 }
1622
1623 function parse_docker_registry_url() {
1624 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1625 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1626 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1627 }
1628
1629 LXD_VERSION=4.0
1630 JUJU_VERSION=2.9
1631 JUJU_AGENT_VERSION=2.9.29
1632 UNINSTALL=""
1633 DEVELOP=""
1634 UPDATE=""
1635 RECONFIGURE=""
1636 TEST_INSTALLER=""
1637 INSTALL_LXD=""
1638 SHOWOPTS=""
1639 COMMIT_ID=""
1640 ASSUME_YES=""
1641 INSTALL_FROM_SOURCE=""
1642 RELEASE="ReleaseTEN"
1643 REPOSITORY="stable"
1644 INSTALL_VIMEMU=""
1645 INSTALL_PLA=""
1646 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1647 LXD_REPOSITORY_PATH=""
1648 INSTALL_LIGHTWEIGHT="y"
1649 INSTALL_TO_OPENSTACK=""
1650 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1651 OPENSTACK_PUBLIC_NET_NAME=""
1652 OPENSTACK_ATTACH_VOLUME="false"
1653 OPENSTACK_SSH_KEY_FILE=""
1654 OPENSTACK_USERDATA_FILE=""
1655 OPENSTACK_VM_NAME="server-osm"
1656 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1657 INSTALL_ONLY=""
1658 INSTALL_ELK=""
1659 TO_REBUILD=""
1660 INSTALL_NOLXD=""
1661 INSTALL_NODOCKER=""
1662 INSTALL_NOJUJU=""
1663 KUBERNETES="y"
1664 INSTALL_K8S_MONITOR=""
1665 INSTALL_NOHOSTCLIENT=""
1666 INSTALL_NOCACHELXDIMAGES=""
1667 SESSION_ID=`date +%s`
1668 OSM_DEVOPS=
1669 OSM_VCA_HOST=
1670 OSM_VCA_SECRET=
1671 OSM_VCA_PUBKEY=
1672 OSM_VCA_CLOUDNAME="localhost"
1673 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1674 OSM_STACK_NAME=osm
1675 NO_HOST_PORTS=""
1676 DOCKER_NOBUILD=""
1677 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1678 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1679 WORKDIR_SUDO=sudo
1680 OSM_WORK_DIR="/etc/osm"
1681 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1682 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1683 OSM_HOST_VOL="/var/lib/osm"
1684 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1685 OSM_DOCKER_TAG=latest
1686 DOCKER_USER=opensourcemano
1687 PULL_IMAGES="y"
1688 KAFKA_TAG=2.11-1.0.2
1689 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
1690 PROMETHEUS_TAG=v2.28.1
1691 GRAFANA_TAG=8.1.1
1692 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1693 PROMETHEUS_CADVISOR_TAG=latest
1694 KEYSTONEDB_TAG=10
1695 OSM_DATABASE_COMMONKEY=
1696 ELASTIC_VERSION=6.4.2
1697 ELASTIC_CURATOR_VERSION=5.5.4
1698 POD_NETWORK_CIDR=10.244.0.0/16
1699 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1700 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1701 DOCKER_REGISTRY_URL=
1702 DOCKER_PROXY_URL=
1703 MODULE_DOCKER_TAG=
1704
1705 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1706 case "${o}" in
1707 b)
1708 COMMIT_ID=${OPTARG}
1709 PULL_IMAGES=""
1710 ;;
1711 r)
1712 REPOSITORY="${OPTARG}"
1713 REPO_ARGS+=(-r "$REPOSITORY")
1714 ;;
1715 c)
1716 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1717 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1718 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1719 usage && exit 1
1720 ;;
1721 k)
1722 REPOSITORY_KEY="${OPTARG}"
1723 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1724 ;;
1725 u)
1726 REPOSITORY_BASE="${OPTARG}"
1727 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1728 ;;
1729 R)
1730 RELEASE="${OPTARG}"
1731 REPO_ARGS+=(-R "$RELEASE")
1732 ;;
1733 D)
1734 OSM_DEVOPS="${OPTARG}"
1735 ;;
1736 o)
1737 INSTALL_ONLY="y"
1738 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1739 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1740 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1741 ;;
1742 O)
1743 INSTALL_TO_OPENSTACK="y"
1744 if [ -n "${OPTARG}" ]; then
1745 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1746 else
1747 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1748 usage && exit 1
1749 fi
1750 ;;
1751 f)
1752 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1753 ;;
1754 F)
1755 OPENSTACK_USERDATA_FILE="${OPTARG}"
1756 ;;
1757 N)
1758 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1759 ;;
1760 m)
1761 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1762 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1763 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1764 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1765 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1766 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1767 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1768 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1769 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1770 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1771 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1772 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1773 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1774 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1775 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1776 ;;
1777 H)
1778 OSM_VCA_HOST="${OPTARG}"
1779 ;;
1780 S)
1781 OSM_VCA_SECRET="${OPTARG}"
1782 ;;
1783 s)
1784 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1785 ;;
1786 w)
1787 # when specifying workdir, do not use sudo for access
1788 WORKDIR_SUDO=
1789 OSM_WORK_DIR="${OPTARG}"
1790 ;;
1791 t)
1792 OSM_DOCKER_TAG="${OPTARG}"
1793 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1794 ;;
1795 U)
1796 DOCKER_USER="${OPTARG}"
1797 ;;
1798 P)
1799 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1800 ;;
1801 A)
1802 OSM_VCA_APIPROXY="${OPTARG}"
1803 ;;
1804 l)
1805 LXD_CLOUD_FILE="${OPTARG}"
1806 ;;
1807 L)
1808 LXD_CRED_FILE="${OPTARG}"
1809 ;;
1810 K)
1811 CONTROLLER_NAME="${OPTARG}"
1812 ;;
1813 d)
1814 DOCKER_REGISTRY_URL="${OPTARG}"
1815 ;;
1816 p)
1817 DOCKER_PROXY_URL="${OPTARG}"
1818 ;;
1819 T)
1820 MODULE_DOCKER_TAG="${OPTARG}"
1821 ;;
1822 -)
1823 [ "${OPTARG}" == "help" ] && usage && exit 0
1824 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1825 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1826 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1827 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1828 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1829 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1830 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1831 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1832 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1833 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1834 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1835 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1836 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1837 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1838 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1839 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1840 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1841 [ "${OPTARG}" == "pullimages" ] && continue
1842 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1843 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1844 [ "${OPTARG}" == "bundle" ] && continue
1845 [ "${OPTARG}" == "k8s" ] && continue
1846 [ "${OPTARG}" == "lxd" ] && continue
1847 [ "${OPTARG}" == "lxd-cred" ] && continue
1848 [ "${OPTARG}" == "microstack" ] && continue
1849 [ "${OPTARG}" == "overlay" ] && continue
1850 [ "${OPTARG}" == "only-vca" ] && continue
1851 [ "${OPTARG}" == "vca" ] && continue
1852 [ "${OPTARG}" == "ha" ] && continue
1853 [ "${OPTARG}" == "tag" ] && continue
1854 [ "${OPTARG}" == "registry" ] && continue
1855 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1856 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1857 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1858 echo -e "Invalid option: '--$OPTARG'\n" >&2
1859 usage && exit 1
1860 ;;
1861 :)
1862 echo "Option -$OPTARG requires an argument" >&2
1863 usage && exit 1
1864 ;;
1865 \?)
1866 echo -e "Invalid option: '-$OPTARG'\n" >&2
1867 usage && exit 1
1868 ;;
1869 h)
1870 usage && exit 0
1871 ;;
1872 y)
1873 ASSUME_YES="y"
1874 ;;
1875 *)
1876 usage && exit 1
1877 ;;
1878 esac
1879 done
1880
1881 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1882 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1883 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1884
1885 # Uninstall if "--uninstall"
1886 if [ -n "$UNINSTALL" ]; then
1887 if [ -n "$CHARMED" ]; then
1888 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1889 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
1890 else
1891 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
1892 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
1893 fi
1894 echo -e "\nDONE"
1895 exit 0
1896 fi
1897
1898 if [ -n "$CHARMED" ]; then
1899 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1900 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1901 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
1902 FATAL_TRACK charmed_install "charmed_install.sh failed"
1903 wget -q -O- https://osm-download.etsi.org/ftp/osm-11.0-eleven/README2.txt &> /dev/null
1904 track end installation_type $OSM_INSTALLATION_TYPE
1905 echo -e "\nDONE"
1906 exit 0
1907 fi
1908
1909 # if develop, we force master
1910 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1911
1912 need_packages="git wget curl tar"
1913
1914 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1915
1916 echo -e "Checking required packages: $need_packages"
1917 dpkg -l $need_packages &>/dev/null \
1918 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1919 || sudo apt-get update \
1920 || FATAL "failed to run apt-get update"
1921 dpkg -l $need_packages &>/dev/null \
1922 || ! echo -e "Installing $need_packages requires root privileges." \
1923 || sudo apt-get install -y $need_packages \
1924 || FATAL "failed to install $need_packages"
1925 sudo snap install jq
1926 if [ -z "$OSM_DEVOPS" ]; then
1927 if [ -n "$TEST_INSTALLER" ]; then
1928 echo -e "\nUsing local devops repo for OSM installation"
1929 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1930 else
1931 echo -e "\nCreating temporary dir for OSM installation"
1932 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1933 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1934
1935 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1936
1937 if [ -z "$COMMIT_ID" ]; then
1938 echo -e "\nGuessing the current stable release"
1939 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1940 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1941
1942 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1943 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1944 else
1945 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1946 fi
1947 git -C $OSM_DEVOPS checkout $COMMIT_ID
1948 fi
1949 fi
1950
1951 . $OSM_DEVOPS/common/all_funcs
1952
1953 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1954 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1955 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1956 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1957 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1958 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1959 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1960 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1961
1962 #Installation starts here
1963 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null
1964 track start
1965
1966 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1967 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1968 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1969 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1970 fi
1971
1972 echo -e "Checking required packages: lxd"
1973 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1974 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1975
1976 # use local devops for containers
1977 export OSM_USE_LOCAL_DEVOPS=true
1978
1979 #Install osmclient
1980
1981 #Install vim-emu (optional)
1982 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1983
1984 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1985 track end
1986 echo -e "\nDONE"