Charmed OSM preparation for 10.1.0
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
33 echo -e " -H <VCA host> use specific juju host controller IP"
34 echo -e " -S <VCA secret> use VCA/juju secret key"
35 echo -e " -P <VCA pubkey> use VCA/juju public key file"
36 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
39 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
40 echo -e " --pla: install the PLA module for placement support"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 sudo rm /usr/local/bin/helm
187 rm -rf $HOME/.helm
188 fi
189 }
190
191 function remove_crontab_job() {
192 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
193 }
194
195 #Uninstall osmclient
196 function uninstall_osmclient() {
197 sudo apt-get remove --purge -y python-osmclient
198 sudo apt-get remove --purge -y python3-osmclient
199 }
200
201 #Uninstall lightweight OSM: remove dockers
202 function uninstall_lightweight() {
203 if [ -n "$INSTALL_ONLY" ]; then
204 if [ -n "$INSTALL_ELK" ]; then
205 echo -e "\nUninstalling OSM ELK stack"
206 remove_stack osm_elk
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
208 fi
209 else
210 echo -e "\nUninstalling OSM"
211 if [ -n "$KUBERNETES" ]; then
212 if [ -n "$INSTALL_K8S_MONITOR" ]; then
213 # uninstall OSM MONITORING
214 uninstall_k8s_monitoring
215 fi
216 remove_k8s_namespace $OSM_STACK_NAME
217 else
218 remove_stack $OSM_STACK_NAME
219 remove_stack osm_elk
220 fi
221 echo "Now osm docker images and volumes will be deleted"
222 # TODO: clean-up of images should take into account if other tags were used for specific modules
223 newgrp docker << EONG
224 for module in ro lcm keystone nbi mon pol pla osmclient; do
225 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
226 done
227 EONG
228
229 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230
231 if [ -n "$KUBERNETES" ]; then
232 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
233 remove_volumes $OSM_NAMESPACE_VOL
234 else
235 remove_volumes $OSM_STACK_NAME
236 remove_network $OSM_STACK_NAME
237 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
238 fi
239 echo "Removing $OSM_DOCKER_WORK_DIR"
240 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
241 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
242 fi
243 remove_crontab_job
244
245 # Cleanup Openstack installer venv
246 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
247 rm -r $OPENSTACK_PYTHON_VENV
248 fi
249
250 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
251 echo "Some docker images will be kept in case they are used by other docker stacks"
252 echo "To remove them, just run 'docker image prune' in a terminal"
253 return 0
254 }
255
256 #Safe unattended install of iptables-persistent
257 function check_install_iptables_persistent(){
258 echo -e "\nChecking required packages: iptables-persistent"
259 if ! dpkg -l iptables-persistent &>/dev/null; then
260 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
261 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
262 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
263 sudo apt-get -yq install iptables-persistent
264 fi
265 }
266
267 #Configure NAT rules, based on the current IP addresses of containers
268 function nat(){
269 check_install_iptables_persistent
270
271 echo -e "\nConfiguring NAT rules"
272 echo -e " Required root privileges"
273 sudo $OSM_DEVOPS/installers/nat_osm
274 }
275
276 function FATAL(){
277 echo "FATAL error: Cannot install OSM due to \"$1\""
278 exit 1
279 }
280
281 function update_juju_images(){
282 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
283 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
284 }
285
286 function install_lxd() {
287 # Apply sysctl production values for optimal performance
288 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
289 sudo sysctl --system
290
291 # Install LXD snap
292 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
293 sudo snap install lxd --channel $LXD_VERSION/stable
294
295 # Configure LXD
296 sudo usermod -a -G lxd `whoami`
297 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
298 sg lxd -c "lxd waitready"
299 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
300 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
301 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
302 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
303 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
304 #sudo systemctl stop lxd-bridge
305 #sudo systemctl --system daemon-reload
306 #sudo systemctl enable lxd-bridge
307 #sudo systemctl start lxd-bridge
308 }
309
310 function ask_user(){
311 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
312 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
313 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
314 read -e -p "$1" USER_CONFIRMATION
315 while true ; do
316 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
317 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
318 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
319 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
320 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
321 done
322 }
323
324 function install_osmclient(){
325 CLIENT_RELEASE=${RELEASE#"-R "}
326 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
327 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
328 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
329 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
330 curl $key_location | sudo apt-key add -
331 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
332 sudo apt-get update
333 sudo apt-get install -y python3-pip
334 sudo -H LC_ALL=C python3 -m pip install -U pip
335 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
336 sudo apt-get install -y python3-osm-im python3-osmclient
337 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
338 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
339 fi
340 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
341 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
342 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
343 fi
344 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
345 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
346 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
347 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
348 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
349 echo -e "\nOSM client installed"
350 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
351 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
352 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
353 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
354 else
355 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
356 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
357 echo " export OSM_HOSTNAME=<OSM_host>"
358 fi
359 return 0
360 }
361
362 function install_prometheus_nodeexporter(){
363 if (systemctl -q is-active node_exporter)
364 then
365 echo "Node Exporter is already running."
366 else
367 echo "Node Exporter is not active, installing..."
368 if getent passwd node_exporter > /dev/null 2>&1; then
369 echo "node_exporter user exists"
370 else
371 echo "Creating user node_exporter"
372 sudo useradd --no-create-home --shell /bin/false node_exporter
373 fi
374 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
375 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
376 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
377 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
378 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
379 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
380 sudo systemctl daemon-reload
381 sudo systemctl restart node_exporter
382 sudo systemctl enable node_exporter
383 echo "Node Exporter has been activated in this host."
384 fi
385 return 0
386 }
387
388 function uninstall_prometheus_nodeexporter(){
389 sudo systemctl stop node_exporter
390 sudo systemctl disable node_exporter
391 sudo rm /etc/systemd/system/node_exporter.service
392 sudo systemctl daemon-reload
393 sudo userdel node_exporter
394 sudo rm /usr/local/bin/node_exporter
395 return 0
396 }
397
398 function install_docker_ce() {
399 # installs and configures Docker CE
400 echo "Installing Docker CE ..."
401 sudo apt-get -qq update
402 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
403 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
404 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
405 sudo apt-get -qq update
406 sudo apt-get install -y docker-ce
407 echo "Adding user to group 'docker'"
408 sudo groupadd -f docker
409 sudo usermod -aG docker $USER
410 sleep 2
411 sudo service docker restart
412 echo "... restarted Docker service"
413 if [ -n "${DOCKER_PROXY_URL}" ]; then
414 echo "Configuring docker proxy ..."
415 if [ -f /etc/docker/daemon.json ]; then
416 if grep -q registry-mirrors /etc/docker/daemon.json; then
417 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
418 else
419 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
420 fi
421 else
422 sudo bash -c "cat << EOF > /etc/docker/daemon.json
423 {
424 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
425 }
426 EOF"
427 fi
428 sudo systemctl daemon-reload
429 sudo service docker restart
430 echo "... restarted Docker service again"
431 fi
432 sg docker -c "docker version" || FATAL "Docker installation failed"
433 echo "... Docker CE installation done"
434 return 0
435 }
436
437 function install_docker_compose() {
438 # installs and configures docker-compose
439 echo "Installing Docker Compose ..."
440 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
441 sudo chmod +x /usr/local/bin/docker-compose
442 echo "... Docker Compose installation done"
443 }
444
445 function install_juju() {
446 echo "Installing juju"
447 sudo snap install juju --classic --channel=$JUJU_VERSION/stable
448 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
449 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
450 echo "Finished installation of juju"
451 return 0
452 }
453
454 function juju_createcontroller() {
455 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
456 # Not found created, create the controller
457 sudo usermod -a -G lxd ${USER}
458 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
459 fi
460 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
461 juju controller-config features=[k8s-operators]
462 }
463
464 function juju_addk8s() {
465 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath \
466 || FATAL "Failed to add K8s endpoint and credential for controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
467 }
468
469 function juju_createcontroller_k8s(){
470 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client \
471 || FATAL "Failed to add K8s endpoint and credential for client in cloud $OSM_VCA_K8S_CLOUDNAME"
472 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
473 --config controller-service-type=loadbalancer \
474 --agent-version=$JUJU_AGENT_VERSION \
475 || FATAL "Failed to bootstrap controller $OSM_STACK_NAME in cloud $OSM_VCA_K8S_CLOUDNAME"
476 }
477
478 function juju_addlxd_cloud(){
479 mkdir -p /tmp/.osm
480 OSM_VCA_CLOUDNAME="lxd-cloud"
481 LXDENDPOINT=$DEFAULT_IP
482 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
483 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
484
485 cat << EOF > $LXD_CLOUD
486 clouds:
487 $OSM_VCA_CLOUDNAME:
488 type: lxd
489 auth-types: [certificate]
490 endpoint: "https://$LXDENDPOINT:8443"
491 config:
492 ssl-hostname-verification: false
493 EOF
494 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
495 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
496 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
497 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
498
499 cat << EOF > $LXD_CREDENTIALS
500 credentials:
501 $OSM_VCA_CLOUDNAME:
502 lxd-cloud:
503 auth-type: certificate
504 server-cert: |
505 $server_cert
506 client-cert: |
507 $client_cert
508 client-key: |
509 $client_key
510 EOF
511 lxc config trust add local: /tmp/.osm/client.crt
512 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
513 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
514 sg lxd -c "lxd waitready"
515 juju controller-config features=[k8s-operators]
516 }
517
518 function juju_createproxy() {
519 check_install_iptables_persistent
520
521 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
522 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
523 sudo netfilter-persistent save
524 fi
525 }
526
527 function docker_login() {
528 echo "Docker login"
529 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
530 }
531
532 function generate_docker_images() {
533 echo "Pulling and generating docker images"
534 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
535
536 echo "Pulling docker images"
537
538 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
539 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
540 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
541 fi
542
543 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
544 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
545 fi
546
547 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
548 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
549 fi
550
551 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
552 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
553 fi
554
555 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
556 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
557 sg docker -c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL "cannot get kiwigrid k8s-sidecar docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
561 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
562 fi
563
564 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
565 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
566 fi
567
568 if [ -n "$PULL_IMAGES" ]; then
569 echo "Pulling OSM docker images"
570 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
571 module_lower=${module,,}
572 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
573 continue
574 fi
575 module_tag="${OSM_DOCKER_TAG}"
576 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
577 module_tag="${MODULE_DOCKER_TAG}"
578 fi
579 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
580 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
581 done
582 else
583 _build_from=$COMMIT_ID
584 [ -z "$_build_from" ] && _build_from="latest"
585 echo "OSM Docker images generated from $_build_from"
586
587 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
588 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
589 module_lower=${module,,}
590 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
591 continue
592 fi
593 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
594 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
595 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
596 fi
597 done
598 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
599 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
600 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
601 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
602 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
603 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
604 fi
605 echo "Finished generation of docker images"
606 fi
607
608 echo "Finished pulling and generating docker images"
609 }
610
611 function cmp_overwrite() {
612 file1="$1"
613 file2="$2"
614 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
615 if [ -f "${file2}" ]; then
616 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
617 else
618 cp -b ${file1} ${file2}
619 fi
620 fi
621 }
622
623 function generate_docker_compose_files() {
624 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
625 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
626 if [ -n "$INSTALL_PLA" ]; then
627 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
628 fi
629 }
630
631 function generate_k8s_manifest_files() {
632 #Kubernetes resources
633 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
634 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
635 }
636
637 function generate_prometheus_grafana_files() {
638 [ -n "$KUBERNETES" ] && return
639 # Prometheus files
640 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
641 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
642
643 # Grafana files
644 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
645 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
646 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
647 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
648 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
649
650 # Prometheus Exporters files
651 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
652 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
653 }
654
655 function generate_docker_env_files() {
656 echo "Doing a backup of existing env files"
657 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
658 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
659 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
660 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
661 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
662 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
663 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
664 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
665
666 echo "Generating docker env files"
667 # LCM
668 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
669 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
670 fi
671
672 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
673 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
674 else
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
676 fi
677
678 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
679 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
680 else
681 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
682 fi
683
684 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
685 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
686 else
687 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
688 fi
689
690 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
691 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
692 else
693 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
694 fi
695
696 if [ -n "$OSM_VCA_APIPROXY" ]; then
697 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
698 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
699 else
700 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
701 fi
702 fi
703
704 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
705 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
706 fi
707
708 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
709 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
710 fi
711
712 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717
718 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
719 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
720 else
721 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
722 fi
723
724 # RO
725 MYSQL_ROOT_PASSWORD=$(generate_secret)
726 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
727 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
728 fi
729 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
730 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
731 fi
732 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
733 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
734 fi
735
736 # Keystone
737 KEYSTONE_DB_PASSWORD=$(generate_secret)
738 SERVICE_PASSWORD=$(generate_secret)
739 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
740 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
741 fi
742 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
743 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
744 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
745 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
746 fi
747
748 # NBI
749 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
750 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
751 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
752 fi
753
754 # MON
755 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
756 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
757 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
758 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
759 fi
760
761 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
762 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
763 else
764 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
765 fi
766
767 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
768 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
769 else
770 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
771 fi
772
773 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
774 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
775 else
776 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
777 fi
778
779 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
780 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
781 else
782 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
783 fi
784
785
786 # POL
787 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
788 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
789 fi
790
791 echo "Finished generation of docker env files"
792 }
793
794 function generate_osmclient_script () {
795 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
796 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
797 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
798 }
799
800 #installs kubernetes packages
801 function install_kube() {
802 sudo apt-get update && sudo apt-get install -y apt-transport-https
803 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
804 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
805 sudo apt-get update
806 echo "Installing Kubernetes Packages ..."
807 K8S_VERSION=1.23.3-00
808 sudo apt-get install -y kubelet=${K8S_VERSION} kubeadm=${K8S_VERSION} kubectl=${K8S_VERSION}
809 cat << EOF | sudo tee -a /etc/default/kubelet
810 KUBELET_EXTRA_ARGS="--cgroup-driver=cgroupfs"
811 EOF
812 sudo apt-mark hold kubelet kubeadm kubectl
813 }
814
815 #initializes kubernetes control plane
816 function init_kubeadm() {
817 sudo swapoff -a
818 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
819 sudo kubeadm init --config $1
820 sleep 5
821 }
822
823 function kube_config_dir() {
824 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
825 mkdir -p $HOME/.kube
826 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
827 sudo chown $(id -u):$(id -g) $HOME/.kube/config
828 }
829
830 function install_k8s_storageclass() {
831 echo "Installing open-iscsi"
832 sudo apt-get update
833 sudo apt-get install open-iscsi
834 sudo systemctl enable --now iscsid
835 echo "Installing OpenEBS"
836 helm repo add openebs https://openebs.github.io/charts
837 helm repo update
838 helm install --create-namespace --namespace openebs openebs openebs/openebs --version 3.1.0
839 helm ls -n openebs
840 local storageclass_timeout=400
841 local counter=0
842 local storageclass_ready=""
843 echo "Waiting for storageclass"
844 while (( counter < storageclass_timeout ))
845 do
846 kubectl get storageclass openebs-hostpath &> /dev/null
847
848 if [ $? -eq 0 ] ; then
849 echo "Storageclass available"
850 storageclass_ready="y"
851 break
852 else
853 counter=$((counter + 15))
854 sleep 15
855 fi
856 done
857 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
858 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
859 }
860
861 function install_k8s_metallb() {
862 METALLB_IP_RANGE=$DEFAULT_IP/32
863 kubectl apply -f ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml \
864 || FATAL "Cannot install MetalLB"
865 echo "apiVersion: v1
866 kind: ConfigMap
867 metadata:
868 namespace: metallb-system
869 name: config
870 data:
871 config: |
872 address-pools:
873 - name: default
874 protocol: layer2
875 addresses:
876 - $METALLB_IP_RANGE" | kubectl apply -f - \
877 || FATAL "Cannot apply MetalLB ConfigMap"
878 }
879
880 #installs metallb from helm
881 function install_helm_metallb() {
882 METALLB_IP_RANGE=$DEFAULT_IP/32
883 echo "configInline:
884 address-pools:
885 - name: default
886 protocol: layer2
887 addresses:
888 - $METALLB_IP_RANGE" | sudo tee -a $OSM_DOCKER_WORK_DIR/metallb-config.yaml
889 helm repo add metallb https://metallb.github.io/metallb
890 helm repo update
891 helm install --create-namespace --namespace metallb-system metallb metallb/metallb -f $OSM_DOCKER_WORK_DIR/metallb-config.yaml
892 }
893
894 #checks openebs and metallb readiness
895 function check_for_readiness() {
896 # Default input values
897 sampling_period=2 # seconds
898 time_for_readiness=20 # seconds ready
899 time_for_failure=200 # seconds broken
900 OPENEBS_NAMESPACE=openebs
901 METALLB_NAMESPACE=metallb-system
902 # STACK_NAME=osm # By default, "osm"
903
904 # Equivalent number of samples
905 oks_threshold=$((time_for_readiness/${sampling_period})) # No. ok samples to declare the system ready
906 failures_threshold=$((time_for_failure/${sampling_period})) # No. nok samples to declare the system broken
907 failures_in_a_row=0
908 oks_in_a_row=0
909
910 ####################################################################################
911 # Loop to check system readiness
912 ####################################################################################
913 while [[ (${failures_in_a_row} -lt ${failures_threshold}) && (${oks_in_a_row} -lt ${oks_threshold}) ]]
914 do
915 # State of OpenEBS
916 OPENEBS_STATE=$(kubectl get pod -n ${OPENEBS_NAMESPACE} --no-headers 2>&1)
917 OPENEBS_READY=$(echo "${OPENEBS_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
918 OPENEBS_NOT_READY=$(echo "${OPENEBS_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
919 COUNT_OPENEBS_READY=$(echo "${OPENEBS_READY}"| grep -v -e '^$' | wc -l)
920 COUNT_OPENEBS_NOT_READY=$(echo "${OPENEBS_NOT_READY}" | grep -v -e '^$' | wc -l)
921
922 # State of MetalLB
923 METALLB_STATE=$(kubectl get pod -n ${METALLB_NAMESPACE} --no-headers 2>&1)
924 METALLB_READY=$(echo "${METALLB_STATE}" | awk '$2=="1/1" || $2=="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
925 METALLB_NOT_READY=$(echo "${METALLB_STATE}" | awk '$2!="1/1" && $2!="2/2" {printf ("%s\t%s\t\n", $1, $2)}')
926 COUNT_METALLB_READY=$(echo "${METALLB_READY}" | grep -v -e '^$' | wc -l)
927 COUNT_METALLB_NOT_READY=$(echo "${METALLB_NOT_READY}" | grep -v -e '^$' | wc -l)
928
929 # OK sample
930 if [[ $((${COUNT_OPENEBS_NOT_READY}+${COUNT_METALLB_NOT_READY})) -eq 0 ]]
931 then
932 ((++oks_in_a_row))
933 failures_in_a_row=0
934 echo -ne ===\> Successful checks: "${oks_in_a_row}"/${oks_threshold}\\r
935 # NOK sample
936 else
937 ((++failures_in_a_row))
938 oks_in_a_row=0
939 echo
940 echo Bootstraping... "${failures_in_a_row}" checks of ${failures_threshold}
941
942 # Reports failed pods in OpenEBS
943 if [[ "${COUNT_OPENEBS_NOT_READY}" -ne 0 ]]
944 then
945 echo "OpenEBS: Waiting for ${COUNT_OPENEBS_NOT_READY} of $((${COUNT_OPENEBS_NOT_READY}+${COUNT_OPENEBS_READY})) pods to be ready:"
946 echo "${OPENEBS_NOT_READY}"
947 echo
948 fi
949
950 # Reports failed statefulsets
951 if [[ "${COUNT_METALLB_NOT_READY}" -ne 0 ]]
952 then
953 echo "MetalLB: Waiting for ${COUNT_METALLB_NOT_READY} of $((${COUNT_METALLB_NOT_READY}+${COUNT_METALLB_READY})) pods to be ready:"
954 echo "${METALLB_NOT_READY}"
955 echo
956 fi
957 fi
958
959 #------------ NEXT SAMPLE
960 sleep ${sampling_period}
961 done
962
963 ####################################################################################
964 # OUTCOME
965 ####################################################################################
966 if [[ (${failures_in_a_row} -ge ${failures_threshold}) ]]
967 then
968 echo
969 FATAL "K8S CLUSTER IS BROKEN"
970 else
971 echo
972 echo "K8S CLUSTER IS READY"
973 fi
974 }
975
976 #deploys flannel as daemonsets
977 function deploy_cni_provider() {
978 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
979 trap 'rm -rf "${CNI_DIR}"' EXIT
980 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
981 kubectl apply -f $CNI_DIR
982 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
983 }
984
985 #creates secrets from env files which will be used by containers
986 function kube_secrets(){
987 kubectl create ns $OSM_STACK_NAME
988 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
989 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
990 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
991 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
992 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
993 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
994 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
995 }
996
997 #taints K8s master node
998 function taint_master_node() {
999 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
1000 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
1001 sleep 5
1002 }
1003
1004 #deploys osm pods and services
1005 function deploy_osm_services() {
1006 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
1007 }
1008
1009 #deploy charmed services
1010 function deploy_charmed_services() {
1011 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
1012 juju deploy ch:mongodb-k8s -m $OSM_STACK_NAME
1013 }
1014
1015 function deploy_osm_pla_service() {
1016 # corresponding to namespace_vol
1017 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
1018 # corresponding to deploy_osm_services
1019 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
1020 }
1021
1022 #Install Helm v3
1023 #Helm releases can be found here: https://github.com/helm/helm/releases
1024 function install_helm() {
1025 HELM_VERSION="v3.7.2"
1026 if ! [[ "$(helm version --short 2>/dev/null)" =~ ^v3.* ]]; then
1027 # Helm is not installed. Install helm
1028 echo "Helm3 is not installed, installing ..."
1029 curl https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz --output helm-${HELM_VERSION}.tar.gz
1030 tar -zxvf helm-${HELM_VERSION}.tar.gz
1031 sudo mv linux-amd64/helm /usr/local/bin/helm
1032 rm -r linux-amd64
1033 rm helm-${HELM_VERSION}.tar.gz
1034 else
1035 echo "Helm3 is already installed. Skipping installation..."
1036 fi
1037 helm repo add stable https://charts.helm.sh/stable
1038 helm repo update
1039 }
1040
1041 function parse_yaml() {
1042 TAG=$1
1043 shift
1044 services=$@
1045 for module in $services; do
1046 if [ "$module" == "pla" ]; then
1047 if [ -n "$INSTALL_PLA" ]; then
1048 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1049 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1050 fi
1051 else
1052 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1053 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1054 fi
1055 done
1056 }
1057
1058 function update_manifest_files() {
1059 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1060 list_of_services=""
1061 for module in $osm_services; do
1062 module_upper="${module^^}"
1063 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1064 list_of_services="$list_of_services $module"
1065 fi
1066 done
1067 if [ ! "$OSM_DOCKER_TAG" == "10" ]; then
1068 parse_yaml $OSM_DOCKER_TAG $list_of_services
1069 fi
1070 if [ -n "$MODULE_DOCKER_TAG" ]; then
1071 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1072 fi
1073 }
1074
1075 function namespace_vol() {
1076 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1077 for osm in $osm_services; do
1078 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1079 done
1080 }
1081
1082 function init_docker_swarm() {
1083 if [ "${DEFAULT_MTU}" != "1500" ]; then
1084 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1085 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1086 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1087 fi
1088 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1089 return 0
1090 }
1091
1092 function create_docker_network() {
1093 echo "creating network"
1094 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1095 echo "creating network DONE"
1096 }
1097
1098 function deploy_lightweight() {
1099
1100 echo "Deploying lightweight build"
1101 OSM_NBI_PORT=9999
1102 OSM_RO_PORT=9090
1103 OSM_KEYSTONE_PORT=5000
1104 OSM_UI_PORT=80
1105 OSM_MON_PORT=8662
1106 OSM_PROM_PORT=9090
1107 OSM_PROM_CADVISOR_PORT=8080
1108 OSM_PROM_HOSTPORT=9091
1109 OSM_GRAFANA_PORT=3000
1110 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1111 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1112
1113 if [ -n "$NO_HOST_PORTS" ]; then
1114 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1115 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1116 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1117 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1118 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1119 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1120 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1121 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1122 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1123 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1124 else
1125 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1126 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1127 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1128 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1129 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1130 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1131 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1132 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1133 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1134 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1135 fi
1136 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1137 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1138 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1139 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1140 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1141 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1142 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1143 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1144 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1145
1146 pushd $OSM_DOCKER_WORK_DIR
1147 if [ -n "$INSTALL_PLA" ]; then
1148 track deploy_osm_pla
1149 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1150 else
1151 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1152 fi
1153 popd
1154
1155 echo "Finished deployment of lightweight build"
1156 }
1157
1158 function deploy_elk() {
1159 echo "Pulling docker images for ELK"
1160 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1161 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1162 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1163 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1164 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1165 echo "Finished pulling elk docker images"
1166 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1167 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1168 remove_stack osm_elk
1169 echo "Deploying ELK stack"
1170 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1171 echo "Waiting for ELK stack to be up and running"
1172 time=0
1173 step=5
1174 timelength=40
1175 elk_is_up=1
1176 while [ $time -le $timelength ]; do
1177 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1178 elk_is_up=0
1179 break
1180 fi
1181 sleep $step
1182 time=$((time+step))
1183 done
1184 if [ $elk_is_up -eq 0 ]; then
1185 echo "ELK is up and running. Trying to create index pattern..."
1186 #Create index pattern
1187 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1188 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1189 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1190 #Make it the default index
1191 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1192 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1193 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1194 else
1195 echo "Cannot connect to Kibana to create index pattern."
1196 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1197 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1198 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1199 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1200 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1201 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1202 -d"{\"value\":\"filebeat-*\"}"'
1203 fi
1204 echo "Finished deployment of ELK stack"
1205 return 0
1206 }
1207
1208 function add_local_k8scluster() {
1209 /usr/bin/osm --all-projects vim-create \
1210 --name _system-osm-vim \
1211 --account_type dummy \
1212 --auth_url http://dummy \
1213 --user osm --password osm --tenant osm \
1214 --description "dummy" \
1215 --config '{management_network_name: mgmt}'
1216 /usr/bin/osm --all-projects k8scluster-add \
1217 --creds ${HOME}/.kube/config \
1218 --vim _system-osm-vim \
1219 --k8s-nets '{"net1": null}' \
1220 --version '1.15' \
1221 --description "OSM Internal Cluster" \
1222 _system-osm-k8s
1223 }
1224
1225 function install_lightweight() {
1226 track checkingroot
1227 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1228 track noroot
1229
1230 if [ -n "$KUBERNETES" ]; then
1231 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1232 1. Install and configure LXD
1233 2. Install juju
1234 3. Install docker CE
1235 4. Disable swap space
1236 5. Install and initialize Kubernetes
1237 as pre-requirements.
1238 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1239
1240 else
1241 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1242 fi
1243 track proceed
1244
1245 echo "Installing lightweight build of OSM"
1246 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1247 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1248 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1249 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1250 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1251 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1252 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1253 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1254
1255 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1256 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1257 need_packages_lw="snapd"
1258 echo -e "Checking required packages: $need_packages_lw"
1259 dpkg -l $need_packages_lw &>/dev/null \
1260 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1261 || sudo apt-get update \
1262 || FATAL "failed to run apt-get update"
1263 dpkg -l $need_packages_lw &>/dev/null \
1264 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1265 || sudo apt-get install -y $need_packages_lw \
1266 || FATAL "failed to install $need_packages_lw"
1267 install_lxd
1268 fi
1269
1270 track prereqok
1271
1272 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1273
1274 echo "Creating folders for installation"
1275 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1276 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1277 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1278
1279 #Installs Kubernetes
1280 if [ -n "$KUBERNETES" ]; then
1281 install_kube
1282 track install_k8s
1283 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1284 kube_config_dir
1285 track init_k8s
1286 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1287 # uninstall OSM MONITORING
1288 uninstall_k8s_monitoring
1289 track uninstall_k8s_monitoring
1290 fi
1291 #remove old namespace
1292 remove_k8s_namespace $OSM_STACK_NAME
1293 deploy_cni_provider
1294 taint_master_node
1295 install_helm
1296 track install_helm
1297 install_k8s_storageclass
1298 track k8s_storageclass
1299 install_helm_metallb
1300 track k8s_metallb
1301 check_for_readiness
1302 else
1303 #install_docker_compose
1304 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1305 track docker_swarm
1306 fi
1307
1308 [ -z "$INSTALL_NOJUJU" ] && install_juju
1309 track juju_install
1310
1311 if [ -z "$OSM_VCA_HOST" ]; then
1312 if [ -z "$CONTROLLER_NAME" ]; then
1313
1314 if [ -n "$KUBERNETES" ]; then
1315 juju_createcontroller_k8s
1316 juju_addlxd_cloud
1317 else
1318 if [ -n "$LXD_CLOUD_FILE" ]; then
1319 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1320 OSM_VCA_CLOUDNAME="lxd-cloud"
1321 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1322 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1323 fi
1324 juju_createcontroller
1325 juju_createproxy
1326 fi
1327 else
1328 OSM_VCA_CLOUDNAME="lxd-cloud"
1329 if [ -n "$LXD_CLOUD_FILE" ]; then
1330 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1331 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1332 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1333 else
1334 mkdir -p ~/.osm
1335 cat << EOF > ~/.osm/lxd-cloud.yaml
1336 clouds:
1337 lxd-cloud:
1338 type: lxd
1339 auth-types: [certificate]
1340 endpoint: "https://$DEFAULT_IP:8443"
1341 config:
1342 ssl-hostname-verification: false
1343 EOF
1344 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1345 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1346 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1347 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1348 cat << EOF > ~/.osm/lxd-credentials.yaml
1349 credentials:
1350 lxd-cloud:
1351 lxd-cloud:
1352 auth-type: certificate
1353 server-cert: |
1354 $server_cert
1355 client-cert: |
1356 $client_cert
1357 client-key: |
1358 $client_key
1359 EOF
1360 lxc config trust add local: ~/.osm/client.crt
1361 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1362 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1363 fi
1364 fi
1365 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1366 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1367 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1368 fi
1369 track juju_controller
1370
1371 if [ -z "$OSM_VCA_SECRET" ]; then
1372 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1373 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1374 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1375 fi
1376 if [ -z "$OSM_VCA_PUBKEY" ]; then
1377 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1378 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1379 fi
1380 if [ -z "$OSM_VCA_CACERT" ]; then
1381 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1382 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1383 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1384 fi
1385
1386 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1387 if [ -z "$KUBERNETES" ]; then
1388 if [ -z "$OSM_VCA_APIPROXY" ]; then
1389 OSM_VCA_APIPROXY=$DEFAULT_IP
1390 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1391 fi
1392 juju_createproxy
1393 fi
1394 track juju
1395
1396 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1397 OSM_DATABASE_COMMONKEY=$(generate_secret)
1398 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1399 fi
1400
1401 # Deploy OSM services
1402 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1403 track docker_build
1404
1405 if [ -n "$KUBERNETES" ]; then
1406 generate_k8s_manifest_files
1407 else
1408 generate_docker_compose_files
1409 fi
1410 track manifest_files
1411 generate_prometheus_grafana_files
1412 generate_docker_env_files
1413 track env_files
1414
1415 if [ -n "$KUBERNETES" ]; then
1416 deploy_charmed_services
1417 kube_secrets
1418 update_manifest_files
1419 namespace_vol
1420 deploy_osm_services
1421 if [ -n "$INSTALL_PLA"]; then
1422 # optional PLA install
1423 deploy_osm_pla_service
1424 track deploy_osm_pla
1425 fi
1426 track deploy_osm_services_k8s
1427 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1428 # install OSM MONITORING
1429 install_k8s_monitoring
1430 track install_k8s_monitoring
1431 fi
1432 else
1433 # remove old stack
1434 remove_stack $OSM_STACK_NAME
1435 create_docker_network
1436 deploy_lightweight
1437 generate_osmclient_script
1438 track docker_deploy
1439 install_prometheus_nodeexporter
1440 track nodeexporter
1441 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1442 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1443 fi
1444
1445 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1446 track osmclient
1447
1448 echo -e "Checking OSM health state..."
1449 if [ -n "$KUBERNETES" ]; then
1450 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1451 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1452 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1453 track osm_unhealthy
1454 else
1455 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1456 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1457 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1458 track osm_unhealthy
1459 fi
1460 track after_healthcheck
1461
1462 [ -n "$KUBERNETES" ] && add_local_k8scluster
1463 track add_local_k8scluster
1464
1465 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1466 track end
1467 return 0
1468 }
1469
1470 function install_to_openstack() {
1471
1472 if [ -z "$2" ]; then
1473 FATAL "OpenStack installer requires a valid external network name"
1474 fi
1475
1476 # Install Pip for Python3
1477 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1478 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1479
1480 # Create a venv to avoid conflicts with the host installation
1481 python3 -m venv $OPENSTACK_PYTHON_VENV
1482
1483 source $OPENSTACK_PYTHON_VENV/bin/activate
1484
1485 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1486 python -m pip install -U wheel
1487 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1488
1489 # Install the Openstack cloud module (ansible>=2.10)
1490 ansible-galaxy collection install openstack.cloud
1491
1492 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1493
1494 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1495
1496 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1497
1498 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1499 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1500 fi
1501
1502 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1503 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1504 fi
1505
1506 # Execute the Ansible playbook based on openrc or clouds.yaml
1507 if [ -e "$1" ]; then
1508 . $1
1509 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1510 $OSM_DEVOPS/installers/openstack/site.yml
1511 else
1512 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1513 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1514 fi
1515
1516 # Exit from venv
1517 deactivate
1518
1519 return 0
1520 }
1521
1522 function install_vimemu() {
1523 echo "\nInstalling vim-emu"
1524 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1525 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1526 # install prerequisites (OVS is a must for the emulator to work)
1527 sudo apt-get install openvswitch-switch
1528 # clone vim-emu repository (attention: branch is currently master only)
1529 echo "Cloning vim-emu repository ..."
1530 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1531 # build vim-emu docker
1532 echo "Building vim-emu Docker container..."
1533
1534 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1535 # start vim-emu container as daemon
1536 echo "Starting vim-emu Docker container 'vim-emu' ..."
1537 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1538 # in lightweight mode, the emulator needs to be attached to netOSM
1539 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1540 else
1541 # classic build mode
1542 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1543 fi
1544 echo "Waiting for 'vim-emu' container to start ..."
1545 sleep 5
1546 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1547 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1548 # print vim-emu connection info
1549 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1550 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1551 echo -e "To add the emulated VIM to OSM you should do:"
1552 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1553 }
1554
1555 function install_k8s_monitoring() {
1556 # install OSM monitoring
1557 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1558 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1559 }
1560
1561 function uninstall_k8s_monitoring() {
1562 # uninstall OSM monitoring
1563 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1564 }
1565
1566 function dump_vars(){
1567 echo "DEVELOP=$DEVELOP"
1568 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1569 echo "UNINSTALL=$UNINSTALL"
1570 echo "UPDATE=$UPDATE"
1571 echo "RECONFIGURE=$RECONFIGURE"
1572 echo "TEST_INSTALLER=$TEST_INSTALLER"
1573 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1574 echo "INSTALL_PLA=$INSTALL_PLA"
1575 echo "INSTALL_LXD=$INSTALL_LXD"
1576 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1577 echo "INSTALL_ONLY=$INSTALL_ONLY"
1578 echo "INSTALL_ELK=$INSTALL_ELK"
1579 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1580 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1581 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1582 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1583 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1584 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1585 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1586 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1587 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1588 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1589 echo "TO_REBUILD=$TO_REBUILD"
1590 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1591 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1592 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1593 echo "RELEASE=$RELEASE"
1594 echo "REPOSITORY=$REPOSITORY"
1595 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1596 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1597 echo "OSM_DEVOPS=$OSM_DEVOPS"
1598 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1599 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1600 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1601 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1602 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1603 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1604 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1605 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1606 echo "DOCKER_USER=$DOCKER_USER"
1607 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1608 echo "PULL_IMAGES=$PULL_IMAGES"
1609 echo "KUBERNETES=$KUBERNETES"
1610 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1611 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1612 echo "SHOWOPTS=$SHOWOPTS"
1613 echo "Install from specific refspec (-b): $COMMIT_ID"
1614 }
1615
1616 function track(){
1617 ctime=`date +%s`
1618 duration=$((ctime - SESSION_ID))
1619 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1620 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1621 event_name="bin"
1622 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1623 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1624 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1625 event_name="${event_name}_$1"
1626 url="${url}&event=${event_name}&ce_duration=${duration}"
1627 wget -q -O /dev/null $url
1628 }
1629
1630 function parse_docker_registry_url() {
1631 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1632 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1633 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1634 }
1635
1636 LXD_VERSION=4.0
1637 JUJU_VERSION=2.9
1638 JUJU_AGENT_VERSION=2.9.25
1639 UNINSTALL=""
1640 DEVELOP=""
1641 UPDATE=""
1642 RECONFIGURE=""
1643 TEST_INSTALLER=""
1644 INSTALL_LXD=""
1645 SHOWOPTS=""
1646 COMMIT_ID=""
1647 ASSUME_YES=""
1648 INSTALL_FROM_SOURCE=""
1649 RELEASE="ReleaseTEN"
1650 REPOSITORY="stable"
1651 INSTALL_VIMEMU=""
1652 INSTALL_PLA=""
1653 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1654 LXD_REPOSITORY_PATH=""
1655 INSTALL_LIGHTWEIGHT="y"
1656 INSTALL_TO_OPENSTACK=""
1657 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1658 OPENSTACK_PUBLIC_NET_NAME=""
1659 OPENSTACK_ATTACH_VOLUME="false"
1660 OPENSTACK_SSH_KEY_FILE=""
1661 OPENSTACK_USERDATA_FILE=""
1662 OPENSTACK_VM_NAME="server-osm"
1663 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1664 INSTALL_ONLY=""
1665 INSTALL_ELK=""
1666 TO_REBUILD=""
1667 INSTALL_NOLXD=""
1668 INSTALL_NODOCKER=""
1669 INSTALL_NOJUJU=""
1670 KUBERNETES="y"
1671 INSTALL_K8S_MONITOR=""
1672 INSTALL_NOHOSTCLIENT=""
1673 INSTALL_NOCACHELXDIMAGES=""
1674 SESSION_ID=`date +%s`
1675 OSM_DEVOPS=
1676 OSM_VCA_HOST=
1677 OSM_VCA_SECRET=
1678 OSM_VCA_PUBKEY=
1679 OSM_VCA_CLOUDNAME="localhost"
1680 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1681 OSM_STACK_NAME=osm
1682 NO_HOST_PORTS=""
1683 DOCKER_NOBUILD=""
1684 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1685 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1686 WORKDIR_SUDO=sudo
1687 OSM_WORK_DIR="/etc/osm"
1688 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1689 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1690 OSM_HOST_VOL="/var/lib/osm"
1691 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1692 OSM_DOCKER_TAG=latest
1693 DOCKER_USER=opensourcemano
1694 PULL_IMAGES="y"
1695 KAFKA_TAG=2.11-1.0.2
1696 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
1697 PROMETHEUS_TAG=v2.28.1
1698 GRAFANA_TAG=8.1.1
1699 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1700 PROMETHEUS_CADVISOR_TAG=latest
1701 KEYSTONEDB_TAG=10
1702 OSM_DATABASE_COMMONKEY=
1703 ELASTIC_VERSION=6.4.2
1704 ELASTIC_CURATOR_VERSION=5.5.4
1705 POD_NETWORK_CIDR=10.244.0.0/16
1706 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1707 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1708 DOCKER_REGISTRY_URL=
1709 DOCKER_PROXY_URL=
1710 MODULE_DOCKER_TAG=
1711
1712 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1713 case "${o}" in
1714 b)
1715 COMMIT_ID=${OPTARG}
1716 PULL_IMAGES=""
1717 ;;
1718 r)
1719 REPOSITORY="${OPTARG}"
1720 REPO_ARGS+=(-r "$REPOSITORY")
1721 ;;
1722 c)
1723 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1724 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1725 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1726 usage && exit 1
1727 ;;
1728 k)
1729 REPOSITORY_KEY="${OPTARG}"
1730 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1731 ;;
1732 u)
1733 REPOSITORY_BASE="${OPTARG}"
1734 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1735 ;;
1736 R)
1737 RELEASE="${OPTARG}"
1738 REPO_ARGS+=(-R "$RELEASE")
1739 ;;
1740 D)
1741 OSM_DEVOPS="${OPTARG}"
1742 ;;
1743 o)
1744 INSTALL_ONLY="y"
1745 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1746 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1747 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1748 ;;
1749 O)
1750 INSTALL_TO_OPENSTACK="y"
1751 if [ -n "${OPTARG}" ]; then
1752 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1753 else
1754 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1755 usage && exit 1
1756 fi
1757 ;;
1758 f)
1759 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1760 ;;
1761 F)
1762 OPENSTACK_USERDATA_FILE="${OPTARG}"
1763 ;;
1764 N)
1765 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1766 ;;
1767 m)
1768 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1769 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1770 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1771 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1772 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1773 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1774 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1775 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1776 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1777 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1778 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1779 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1780 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1781 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1782 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1783 ;;
1784 H)
1785 OSM_VCA_HOST="${OPTARG}"
1786 ;;
1787 S)
1788 OSM_VCA_SECRET="${OPTARG}"
1789 ;;
1790 s)
1791 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1792 ;;
1793 w)
1794 # when specifying workdir, do not use sudo for access
1795 WORKDIR_SUDO=
1796 OSM_WORK_DIR="${OPTARG}"
1797 ;;
1798 t)
1799 OSM_DOCKER_TAG="${OPTARG}"
1800 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1801 ;;
1802 U)
1803 DOCKER_USER="${OPTARG}"
1804 ;;
1805 P)
1806 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1807 ;;
1808 A)
1809 OSM_VCA_APIPROXY="${OPTARG}"
1810 ;;
1811 l)
1812 LXD_CLOUD_FILE="${OPTARG}"
1813 ;;
1814 L)
1815 LXD_CRED_FILE="${OPTARG}"
1816 ;;
1817 K)
1818 CONTROLLER_NAME="${OPTARG}"
1819 ;;
1820 d)
1821 DOCKER_REGISTRY_URL="${OPTARG}"
1822 ;;
1823 p)
1824 DOCKER_PROXY_URL="${OPTARG}"
1825 ;;
1826 T)
1827 MODULE_DOCKER_TAG="${OPTARG}"
1828 ;;
1829 -)
1830 [ "${OPTARG}" == "help" ] && usage && exit 0
1831 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1832 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1833 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1834 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1835 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1836 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1837 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1838 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1839 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1840 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1841 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1842 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1843 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1844 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1845 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1846 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1847 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1848 [ "${OPTARG}" == "pullimages" ] && continue
1849 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1850 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1851 [ "${OPTARG}" == "bundle" ] && continue
1852 [ "${OPTARG}" == "k8s" ] && continue
1853 [ "${OPTARG}" == "lxd" ] && continue
1854 [ "${OPTARG}" == "lxd-cred" ] && continue
1855 [ "${OPTARG}" == "microstack" ] && continue
1856 [ "${OPTARG}" == "overlay" ] && continue
1857 [ "${OPTARG}" == "only-vca" ] && continue
1858 [ "${OPTARG}" == "vca" ] && continue
1859 [ "${OPTARG}" == "ha" ] && continue
1860 [ "${OPTARG}" == "tag" ] && continue
1861 [ "${OPTARG}" == "registry" ] && continue
1862 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1863 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1864 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1865 echo -e "Invalid option: '--$OPTARG'\n" >&2
1866 usage && exit 1
1867 ;;
1868 :)
1869 echo "Option -$OPTARG requires an argument" >&2
1870 usage && exit 1
1871 ;;
1872 \?)
1873 echo -e "Invalid option: '-$OPTARG'\n" >&2
1874 usage && exit 1
1875 ;;
1876 h)
1877 usage && exit 0
1878 ;;
1879 y)
1880 ASSUME_YES="y"
1881 ;;
1882 *)
1883 usage && exit 1
1884 ;;
1885 esac
1886 done
1887
1888 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1889 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1890 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1891
1892 if [ -n "$SHOWOPTS" ]; then
1893 dump_vars
1894 exit 0
1895 fi
1896
1897 if [ -n "$CHARMED" ]; then
1898 if [ -n "$UNINSTALL" ]; then
1899 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1900 else
1901 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1902 fi
1903
1904 exit 0
1905 fi
1906
1907 # if develop, we force master
1908 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1909
1910 need_packages="git wget curl tar"
1911
1912 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1913
1914 echo -e "Checking required packages: $need_packages"
1915 dpkg -l $need_packages &>/dev/null \
1916 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1917 || sudo apt-get update \
1918 || FATAL "failed to run apt-get update"
1919 dpkg -l $need_packages &>/dev/null \
1920 || ! echo -e "Installing $need_packages requires root privileges." \
1921 || sudo apt-get install -y $need_packages \
1922 || FATAL "failed to install $need_packages"
1923 sudo snap install jq
1924 if [ -z "$OSM_DEVOPS" ]; then
1925 if [ -n "$TEST_INSTALLER" ]; then
1926 echo -e "\nUsing local devops repo for OSM installation"
1927 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1928 else
1929 echo -e "\nCreating temporary dir for OSM installation"
1930 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1931 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1932
1933 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1934
1935 if [ -z "$COMMIT_ID" ]; then
1936 echo -e "\nGuessing the current stable release"
1937 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1938 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1939
1940 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1941 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1942 else
1943 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1944 fi
1945 git -C $OSM_DEVOPS checkout $COMMIT_ID
1946 fi
1947 fi
1948
1949 . $OSM_DEVOPS/common/all_funcs
1950
1951 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1952 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1953 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1954 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1955 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1956 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1957 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1958 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1959
1960 #Installation starts here
1961 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README.txt &> /dev/null
1962 track start
1963
1964 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1965 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1966 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1967 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1968 fi
1969
1970 echo -e "Checking required packages: lxd"
1971 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1972 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1973
1974 # use local devops for containers
1975 export OSM_USE_LOCAL_DEVOPS=true
1976
1977 #Install osmclient
1978
1979 #Install vim-emu (optional)
1980 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1981
1982 wget -q -O- https://osm-download.etsi.org/ftp/osm-10.0-ten/README2.txt &> /dev/null
1983 track end
1984 echo -e "\nDONE"