1d9cfc3aab86b70d5f64ce22824ac5eacebf51a5
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
33 echo -e " -H <VCA host> use specific juju host controller IP"
34 echo -e " -S <VCA secret> use VCA/juju secret key"
35 echo -e " -P <VCA pubkey> use VCA/juju public key file"
36 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
39 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
40 echo -e " --pla: install the PLA module for placement support"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83
84 }
85
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password {
90 password_file="${HOME}/.local/share/juju/accounts.yaml"
91 local controller_name=$1
92 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
93 sed -ne "s|^\($s\):|\1|" \
94 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
95 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
96 awk -F$fs -v controller=$controller_name '{
97 indent = length($1)/2;
98 vname[indent] = $2;
99 for (i in vname) {if (i > indent) {delete vname[i]}}
100 if (length($3) > 0) {
101 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
102 if (match(vn,controller) && match($2,"password")) {
103 printf("%s",$3);
104 }
105 }
106 }'
107 }
108
109 function generate_secret() {
110 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
111 }
112
113 function remove_volumes() {
114 if [ -n "$KUBERNETES" ]; then
115 k8_volume=$1
116 echo "Removing ${k8_volume}"
117 $WORKDIR_SUDO rm -rf ${k8_volume}
118 else
119 stack=$1
120 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
121 for volume in $volumes; do
122 sg docker -c "docker volume rm ${stack}_${volume}"
123 done
124 fi
125 }
126
127 function remove_network() {
128 stack=$1
129 sg docker -c "docker network rm net${stack}"
130 }
131
132 function remove_iptables() {
133 stack=$1
134 if [ -z "$OSM_VCA_HOST" ]; then
135 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
136 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
137 fi
138
139 if [ -z "$DEFAULT_IP" ]; then
140 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
142 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
143 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
144 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
145 fi
146
147 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
148 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
149 sudo netfilter-persistent save
150 fi
151 }
152
153 function remove_stack() {
154 stack=$1
155 if sg docker -c "docker stack ps ${stack}" ; then
156 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
157 COUNTER=0
158 result=1
159 while [ ${COUNTER} -lt 30 ]; do
160 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
161 #echo "Dockers running: $result"
162 if [ "${result}" == "0" ]; then
163 break
164 fi
165 let COUNTER=COUNTER+1
166 sleep 1
167 done
168 if [ "${result}" == "0" ]; then
169 echo "All dockers of the stack ${stack} were removed"
170 else
171 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
172 fi
173 sleep 5
174 fi
175 }
176
177 #removes osm deployments and services
178 function remove_k8s_namespace() {
179 kubectl delete ns $1
180 }
181
182 #removes helm only if there is nothing deployed in helm
183 function remove_helm() {
184 if [ "$(helm ls -q)" == "" ] ; then
185 sudo helm reset --force
186 kubectl delete --namespace kube-system serviceaccount tiller
187 kubectl delete clusterrolebinding tiller-cluster-rule
188 sudo rm /usr/local/bin/helm
189 rm -rf $HOME/.helm
190 fi
191 }
192
193 function remove_crontab_job() {
194 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
195 }
196
197 #Uninstall osmclient
198 function uninstall_osmclient() {
199 sudo apt-get remove --purge -y python-osmclient
200 sudo apt-get remove --purge -y python3-osmclient
201 }
202
203 #Uninstall lightweight OSM: remove dockers
204 function uninstall_lightweight() {
205 if [ -n "$INSTALL_ONLY" ]; then
206 if [ -n "$INSTALL_ELK" ]; then
207 echo -e "\nUninstalling OSM ELK stack"
208 remove_stack osm_elk
209 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
210 fi
211 else
212 echo -e "\nUninstalling OSM"
213 if [ -n "$KUBERNETES" ]; then
214 if [ -n "$INSTALL_K8S_MONITOR" ]; then
215 # uninstall OSM MONITORING
216 uninstall_k8s_monitoring
217 fi
218 remove_k8s_namespace $OSM_STACK_NAME
219 else
220 remove_stack $OSM_STACK_NAME
221 remove_stack osm_elk
222 fi
223 echo "Now osm docker images and volumes will be deleted"
224 # TODO: clean-up of images should take into account if other tags were used for specific modules
225 newgrp docker << EONG
226 for module in ro lcm keystone nbi mon pol pla osmclient; do
227 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
228 done
229 EONG
230
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
232
233 if [ -n "$KUBERNETES" ]; then
234 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
235 remove_volumes $OSM_NAMESPACE_VOL
236 else
237 remove_volumes $OSM_STACK_NAME
238 remove_network $OSM_STACK_NAME
239 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
240 fi
241 echo "Removing $OSM_DOCKER_WORK_DIR"
242 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
243 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
244 fi
245 remove_crontab_job
246
247 # Cleanup Openstack installer venv
248 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
249 rm -r $OPENSTACK_PYTHON_VENV
250 fi
251
252 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
253 echo "Some docker images will be kept in case they are used by other docker stacks"
254 echo "To remove them, just run 'docker image prune' in a terminal"
255 return 0
256 }
257
258 #Safe unattended install of iptables-persistent
259 function check_install_iptables_persistent(){
260 echo -e "\nChecking required packages: iptables-persistent"
261 if ! dpkg -l iptables-persistent &>/dev/null; then
262 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
263 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
264 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
265 sudo apt-get -yq install iptables-persistent
266 fi
267 }
268
269 #Configure NAT rules, based on the current IP addresses of containers
270 function nat(){
271 check_install_iptables_persistent
272
273 echo -e "\nConfiguring NAT rules"
274 echo -e " Required root privileges"
275 sudo $OSM_DEVOPS/installers/nat_osm
276 }
277
278 function FATAL(){
279 echo "FATAL error: Cannot install OSM due to \"$1\""
280 exit 1
281 }
282
283 function update_juju_images(){
284 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
285 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
286 }
287
288 function install_lxd() {
289 # Apply sysctl production values for optimal performance
290 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
291 sudo sysctl --system
292
293 # Install LXD snap
294 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
295 sudo snap install lxd
296
297 # Configure LXD
298 sudo usermod -a -G lxd `whoami`
299 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
300 sg lxd -c "lxd waitready"
301 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
302 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
303 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
304 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
305 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
306 #sudo systemctl stop lxd-bridge
307 #sudo systemctl --system daemon-reload
308 #sudo systemctl enable lxd-bridge
309 #sudo systemctl start lxd-bridge
310 }
311
312 function ask_user(){
313 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
314 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
315 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
316 read -e -p "$1" USER_CONFIRMATION
317 while true ; do
318 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
319 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
320 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
321 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
322 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
323 done
324 }
325
326 function install_osmclient(){
327 CLIENT_RELEASE=${RELEASE#"-R "}
328 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
329 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
330 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
331 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
332 curl $key_location | sudo apt-key add -
333 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
334 sudo apt-get update
335 sudo apt-get install -y python3-pip
336 sudo -H LC_ALL=C python3 -m pip install -U pip
337 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
338 sudo apt-get install -y python3-osm-im python3-osmclient
339 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
340 python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
341 fi
342 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
343 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
344 python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
345 fi
346 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
347 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
348 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
349 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
350 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
351 echo -e "\nOSM client installed"
352 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
353 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
354 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
355 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
356 else
357 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
358 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
359 echo " export OSM_HOSTNAME=<OSM_host>"
360 fi
361 return 0
362 }
363
364 function install_prometheus_nodeexporter(){
365 if (systemctl -q is-active node_exporter)
366 then
367 echo "Node Exporter is already running."
368 else
369 echo "Node Exporter is not active, installing..."
370 if getent passwd node_exporter > /dev/null 2>&1; then
371 echo "node_exporter user exists"
372 else
373 echo "Creating user node_exporter"
374 sudo useradd --no-create-home --shell /bin/false node_exporter
375 fi
376 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
377 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
378 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
379 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
380 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
381 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
382 sudo systemctl daemon-reload
383 sudo systemctl restart node_exporter
384 sudo systemctl enable node_exporter
385 echo "Node Exporter has been activated in this host."
386 fi
387 return 0
388 }
389
390 function uninstall_prometheus_nodeexporter(){
391 sudo systemctl stop node_exporter
392 sudo systemctl disable node_exporter
393 sudo rm /etc/systemd/system/node_exporter.service
394 sudo systemctl daemon-reload
395 sudo userdel node_exporter
396 sudo rm /usr/local/bin/node_exporter
397 return 0
398 }
399
400 function install_docker_ce() {
401 # installs and configures Docker CE
402 echo "Installing Docker CE ..."
403 sudo apt-get -qq update
404 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
405 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
406 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
407 sudo apt-get -qq update
408 sudo apt-get install -y docker-ce
409 echo "Adding user to group 'docker'"
410 sudo groupadd -f docker
411 sudo usermod -aG docker $USER
412 sleep 2
413 sudo service docker restart
414 echo "... restarted Docker service"
415 if [ -n "${DOCKER_PROXY_URL}" ]; then
416 echo "Configuring docker proxy ..."
417 if [ -f /etc/docker/daemon.json ]; then
418 if grep -q registry-mirrors /etc/docker/daemon.json; then
419 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
420 else
421 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
422 fi
423 else
424 sudo bash -c "cat << EOF > /etc/docker/daemon.json
425 {
426 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
427 }
428 EOF"
429 fi
430 sudo systemctl daemon-reload
431 sudo service docker restart
432 echo "... restarted Docker service again"
433 fi
434 sg docker -c "docker version" || FATAL "Docker installation failed"
435 echo "... Docker CE installation done"
436 return 0
437 }
438
439 function install_docker_compose() {
440 # installs and configures docker-compose
441 echo "Installing Docker Compose ..."
442 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
443 sudo chmod +x /usr/local/bin/docker-compose
444 echo "... Docker Compose installation done"
445 }
446
447 function install_juju() {
448 echo "Installing juju"
449 sudo snap install juju --classic --channel=2.8/stable
450 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
451 [ -n "$INSTALL_NOCACHELXDIMAGES" ] || update_juju_images
452 echo "Finished installation of juju"
453 return 0
454 }
455
456 function juju_createcontroller() {
457 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
458 # Not found created, create the controller
459 sudo usermod -a -G lxd ${USER}
460 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
461 fi
462 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
463 juju controller-config features=[k8s-operators]
464 }
465
466 function juju_addk8s() {
467 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
468 }
469
470 function juju_createcontroller_k8s(){
471 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
472 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
473 --config controller-service-type=loadbalancer \
474 --agent-version=$JUJU_AGENT_VERSION
475 }
476
477
478 function juju_addlxd_cloud(){
479 mkdir -p /tmp/.osm
480 OSM_VCA_CLOUDNAME="lxd-cloud"
481 LXDENDPOINT=$DEFAULT_IP
482 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
483 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
484
485 cat << EOF > $LXD_CLOUD
486 clouds:
487 $OSM_VCA_CLOUDNAME:
488 type: lxd
489 auth-types: [certificate]
490 endpoint: "https://$LXDENDPOINT:8443"
491 config:
492 ssl-hostname-verification: false
493 EOF
494 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
495 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
496 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
497 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
498
499 cat << EOF > $LXD_CREDENTIALS
500 credentials:
501 $OSM_VCA_CLOUDNAME:
502 lxd-cloud:
503 auth-type: certificate
504 server-cert: |
505 $server_cert
506 client-cert: |
507 $client_cert
508 client-key: |
509 $client_key
510 EOF
511 lxc config trust add local: /tmp/.osm/client.crt
512 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
513 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
514 sg lxd -c "lxd waitready"
515 juju controller-config features=[k8s-operators]
516 }
517
518
519 function juju_createproxy() {
520 check_install_iptables_persistent
521
522 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
523 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
524 sudo netfilter-persistent save
525 fi
526 }
527
528 function docker_login() {
529 echo "Docker login"
530 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
531 }
532
533 function generate_docker_images() {
534 echo "Pulling and generating docker images"
535 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
536
537 echo "Pulling docker images"
538
539 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
540 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
541 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
542 fi
543
544 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
545 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
546 fi
547
548 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
549 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
550 fi
551
552 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
553 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
554 fi
555
556 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
557 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
558 fi
559
560 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
561 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
562 fi
563
564 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
565 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
566 fi
567
568 if [ -n "$PULL_IMAGES" ]; then
569 echo "Pulling OSM docker images"
570 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient; do
571 module_lower=${module,,}
572 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
573 continue
574 fi
575 module_tag="${OSM_DOCKER_TAG}"
576 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
577 module_tag="${MODULE_DOCKER_TAG}"
578 fi
579 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
580 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
581 done
582 else
583 _build_from=$COMMIT_ID
584 [ -z "$_build_from" ] && _build_from="latest"
585 echo "OSM Docker images generated from $_build_from"
586
587 for module in MON POL NBI KEYSTONE RO LCM NG-UI PLA; do
588 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
589 module_lower=${module,,}
590 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
591 continue
592 fi
593 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
594 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
595 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
596 fi
597 done
598 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
599 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
600 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
601 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
602 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
603 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
604 fi
605 echo "Finished generation of docker images"
606 fi
607
608 echo "Finished pulling and generating docker images"
609 }
610
611 function cmp_overwrite() {
612 file1="$1"
613 file2="$2"
614 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
615 if [ -f "${file2}" ]; then
616 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
617 else
618 cp -b ${file1} ${file2}
619 fi
620 fi
621 }
622
623 function generate_docker_compose_files() {
624 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
625 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
626 if [ -n "$INSTALL_PLA" ]; then
627 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
628 fi
629 }
630
631 function generate_k8s_manifest_files() {
632 #Kubernetes resources
633 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
634 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
635 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
636 }
637
638 function generate_prometheus_grafana_files() {
639 [ -n "$KUBERNETES" ] && return
640 # Prometheus files
641 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
642 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
643
644 # Grafana files
645 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
646 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
647 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
648 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
649 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
650
651 # Prometheus Exporters files
652 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
653 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
654 }
655
656 function generate_docker_env_files() {
657 echo "Doing a backup of existing env files"
658 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
659 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
660 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
661 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
662 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
663 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
664 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
665 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
666
667 echo "Generating docker env files"
668 # LCM
669 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
670 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
671 fi
672
673 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
674 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
675 else
676 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
677 fi
678
679 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
680 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
681 else
682 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
683 fi
684
685 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
686 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
687 else
688 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
689 fi
690
691 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
692 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
693 else
694 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
695 fi
696
697 if [ -n "$OSM_VCA_APIPROXY" ]; then
698 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
699 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
700 else
701 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
702 fi
703 fi
704
705 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
706 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
707 fi
708
709 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
710 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
711 fi
712
713 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
714 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
715 else
716 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
717 fi
718
719 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
720 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
721 else
722 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
723 fi
724
725 # RO
726 MYSQL_ROOT_PASSWORD=$(generate_secret)
727 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
728 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
729 fi
730 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
731 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
732 fi
733 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
734 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
735 fi
736
737 # Keystone
738 KEYSTONE_DB_PASSWORD=$(generate_secret)
739 SERVICE_PASSWORD=$(generate_secret)
740 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
741 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
742 fi
743 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
744 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
745 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
746 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
747 fi
748
749 # NBI
750 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
751 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
752 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
753 fi
754
755 # MON
756 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
757 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
758 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
759 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
760 fi
761
762 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
763 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
764 else
765 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
766 fi
767
768 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
769 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
770 else
771 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
772 fi
773
774 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
775 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
776 else
777 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
778 fi
779
780 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
781 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
782 else
783 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
784 fi
785
786
787 # POL
788 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
789 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
790 fi
791
792 echo "Finished generation of docker env files"
793 }
794
795 function generate_osmclient_script () {
796 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
797 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
798 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
799 }
800
801 #installs kubernetes packages
802 function install_kube() {
803 sudo apt-get update && sudo apt-get install -y apt-transport-https
804 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
805 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
806 sudo apt-get update
807 echo "Installing Kubernetes Packages ..."
808 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
809 sudo apt-mark hold kubelet kubeadm kubectl
810 }
811
812 #initializes kubernetes control plane
813 function init_kubeadm() {
814 sudo swapoff -a
815 sudo sed -i.bak '/.*none.*swap/s/^\(.*\)$/#\1/g' /etc/fstab
816 sudo kubeadm init --config $1
817 sleep 5
818 }
819
820 function kube_config_dir() {
821 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
822 mkdir -p $HOME/.kube
823 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
824 sudo chown $(id -u):$(id -g) $HOME/.kube/config
825 }
826
827 function install_k8s_storageclass() {
828 OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")"
829 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
830 wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR
831 kubectl apply -f $OPENEBS_DIR
832 local storageclass_timeout=400
833 local counter=0
834 local storageclass_ready=""
835 echo "Waiting for storageclass"
836 while (( counter < storageclass_timeout ))
837 do
838 kubectl get storageclass openebs-hostpath &> /dev/null
839
840 if [ $? -eq 0 ] ; then
841 echo "Storageclass available"
842 storageclass_ready="y"
843 break
844 else
845 counter=$((counter + 15))
846 sleep 15
847 fi
848 done
849 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
850 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
851 }
852
853 function install_k8s_metallb() {
854 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
855 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
856 echo "apiVersion: v1
857 kind: ConfigMap
858 metadata:
859 namespace: metallb-system
860 name: config
861 data:
862 config: |
863 address-pools:
864 - name: default
865 protocol: layer2
866 addresses:
867 - $METALLB_IP_RANGE" | kubectl apply -f -
868 }
869 #deploys flannel as daemonsets
870 function deploy_cni_provider() {
871 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
872 trap 'rm -rf "${CNI_DIR}"' EXIT
873 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
874 kubectl apply -f $CNI_DIR
875 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
876 }
877
878 #creates secrets from env files which will be used by containers
879 function kube_secrets(){
880 kubectl create ns $OSM_STACK_NAME
881 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
882 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
883 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
884 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
885 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
886 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
887 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
888 }
889
890 #taints K8s master node
891 function taint_master_node() {
892 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
893 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
894 sleep 5
895 }
896
897 #deploys osm pods and services
898 function deploy_osm_services() {
899 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
900 }
901
902 #deploy charmed services
903 function deploy_charmed_services() {
904 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
905 # deploy mongodb charm
906 namespace=$OSM_STACK_NAME
907 juju deploy cs:~charmed-osm/mongodb-k8s \
908 --config enable-sidecar=true \
909 --config replica-set=rs0 \
910 --config namespace=$namespace \
911 -m $namespace
912 }
913
914 function deploy_osm_pla_service() {
915 # corresponding to namespace_vol
916 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
917 # corresponding to deploy_osm_services
918 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
919 }
920
921 #Install helm and tiller
922 function install_helm() {
923 helm > /dev/null 2>&1
924 if [ $? != 0 ] ; then
925 # Helm is not installed. Install helm
926 echo "Helm is not installed, installing ..."
927 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
928 tar -zxvf helm-v2.15.2.tar.gz
929 sudo mv linux-amd64/helm /usr/local/bin/helm
930 rm -r linux-amd64
931 rm helm-v2.15.2.tar.gz
932 fi
933
934 # Checking if tiller has being configured
935 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
936 if [ $? == 1 ] ; then
937 # tiller account for kubernetes
938 kubectl --namespace kube-system create serviceaccount tiller
939 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
940 # HELM initialization
941 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
942
943 # Wait for Tiller to be up and running. If timeout expires, continue installing
944 tiller_timeout=120;
945 counter=0;
946 tiller_status=""
947 while (( counter < tiller_timeout ))
948 do
949 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
950 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
951 counter=$((counter + 5))
952 sleep 5
953 done
954 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
955 fi
956 }
957
958 function parse_yaml() {
959 TAG=$1
960 shift
961 services=$@
962 for module in $services; do
963 if [ "$module" == "pla" ]; then
964 if [ -n "$INSTALL_PLA" ]; then
965 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
966 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
967 fi
968 else
969 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
970 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
971 fi
972 done
973 }
974
975 function update_manifest_files() {
976 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
977 list_of_services=""
978 for module in $osm_services; do
979 module_upper="${module^^}"
980 if ! echo $TO_REBUILD | grep -q $module_upper ; then
981 list_of_services="$list_of_services $module"
982 fi
983 done
984 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
985 parse_yaml $OSM_DOCKER_TAG $list_of_services
986 fi
987 if [ -n "$MODULE_DOCKER_TAG" ]; then
988 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
989 fi
990 }
991
992 function namespace_vol() {
993 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
994 for osm in $osm_services; do
995 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
996 done
997 }
998
999 function init_docker_swarm() {
1000 if [ "${DEFAULT_MTU}" != "1500" ]; then
1001 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1002 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1003 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1004 fi
1005 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1006 return 0
1007 }
1008
1009 function create_docker_network() {
1010 echo "creating network"
1011 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1012 echo "creating network DONE"
1013 }
1014
1015 function deploy_lightweight() {
1016
1017 echo "Deploying lightweight build"
1018 OSM_NBI_PORT=9999
1019 OSM_RO_PORT=9090
1020 OSM_KEYSTONE_PORT=5000
1021 OSM_UI_PORT=80
1022 OSM_MON_PORT=8662
1023 OSM_PROM_PORT=9090
1024 OSM_PROM_CADVISOR_PORT=8080
1025 OSM_PROM_HOSTPORT=9091
1026 OSM_GRAFANA_PORT=3000
1027 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1028 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1029
1030 if [ -n "$NO_HOST_PORTS" ]; then
1031 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1032 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1033 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1034 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1035 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1036 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1037 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1038 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1039 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1040 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1041 else
1042 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1043 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1044 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1045 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1046 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1047 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1048 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1049 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1050 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1051 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1052 fi
1053 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1054 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1055 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1056 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1057 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1058 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1059 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1060 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1061 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1062
1063 pushd $OSM_DOCKER_WORK_DIR
1064 if [ -n "$INSTALL_PLA" ]; then
1065 track deploy_osm_pla
1066 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1067 else
1068 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1069 fi
1070 popd
1071
1072 echo "Finished deployment of lightweight build"
1073 }
1074
1075 function deploy_elk() {
1076 echo "Pulling docker images for ELK"
1077 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1078 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1079 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1080 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1081 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1082 echo "Finished pulling elk docker images"
1083 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1084 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1085 remove_stack osm_elk
1086 echo "Deploying ELK stack"
1087 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1088 echo "Waiting for ELK stack to be up and running"
1089 time=0
1090 step=5
1091 timelength=40
1092 elk_is_up=1
1093 while [ $time -le $timelength ]; do
1094 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1095 elk_is_up=0
1096 break
1097 fi
1098 sleep $step
1099 time=$((time+step))
1100 done
1101 if [ $elk_is_up -eq 0 ]; then
1102 echo "ELK is up and running. Trying to create index pattern..."
1103 #Create index pattern
1104 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1105 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1106 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1107 #Make it the default index
1108 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1109 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1110 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1111 else
1112 echo "Cannot connect to Kibana to create index pattern."
1113 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1114 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1115 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1116 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1117 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1118 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1119 -d"{\"value\":\"filebeat-*\"}"'
1120 fi
1121 echo "Finished deployment of ELK stack"
1122 return 0
1123 }
1124
1125 function add_local_k8scluster() {
1126 /usr/bin/osm --all-projects vim-create \
1127 --name _system-osm-vim \
1128 --account_type dummy \
1129 --auth_url http://dummy \
1130 --user osm --password osm --tenant osm \
1131 --description "dummy" \
1132 --config '{management_network_name: mgmt}'
1133 /usr/bin/osm --all-projects k8scluster-add \
1134 --creds ${HOME}/.kube/config \
1135 --vim _system-osm-vim \
1136 --k8s-nets '{"net1": null}' \
1137 --version '1.15' \
1138 --description "OSM Internal Cluster" \
1139 _system-osm-k8s
1140 }
1141
1142 function install_lightweight() {
1143 track checkingroot
1144 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1145 track noroot
1146
1147 if [ -n "$KUBERNETES" ]; then
1148 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1149 1. Install and configure LXD
1150 2. Install juju
1151 3. Install docker CE
1152 4. Disable swap space
1153 5. Install and initialize Kubernetes
1154 as pre-requirements.
1155 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1156
1157 else
1158 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1159 fi
1160 track proceed
1161
1162 echo "Installing lightweight build of OSM"
1163 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1164 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1165 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1166 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1167 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1168 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1169 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1170 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1171
1172 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1173 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1174 need_packages_lw="snapd"
1175 echo -e "Checking required packages: $need_packages_lw"
1176 dpkg -l $need_packages_lw &>/dev/null \
1177 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1178 || sudo apt-get update \
1179 || FATAL "failed to run apt-get update"
1180 dpkg -l $need_packages_lw &>/dev/null \
1181 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1182 || sudo apt-get install -y $need_packages_lw \
1183 || FATAL "failed to install $need_packages_lw"
1184 install_lxd
1185 fi
1186
1187 track prereqok
1188
1189 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1190
1191 echo "Creating folders for installation"
1192 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1193 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1194 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1195
1196 #Installs Kubernetes
1197 if [ -n "$KUBERNETES" ]; then
1198 install_kube
1199 track install_k8s
1200 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1201 kube_config_dir
1202 track init_k8s
1203 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1204 # uninstall OSM MONITORING
1205 uninstall_k8s_monitoring
1206 track uninstall_k8s_monitoring
1207 fi
1208 #remove old namespace
1209 remove_k8s_namespace $OSM_STACK_NAME
1210 deploy_cni_provider
1211 taint_master_node
1212 install_k8s_storageclass
1213 track k8s_storageclass
1214 install_k8s_metallb
1215 track k8s_metallb
1216 else
1217 #install_docker_compose
1218 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1219 track docker_swarm
1220 fi
1221
1222 [ -z "$INSTALL_NOJUJU" ] && install_juju
1223 track juju_install
1224
1225 if [ -z "$OSM_VCA_HOST" ]; then
1226 if [ -z "$CONTROLLER_NAME" ]; then
1227
1228 if [ -n "$KUBERNETES" ]; then
1229 juju_createcontroller_k8s
1230 juju_addlxd_cloud
1231 else
1232 if [ -n "$LXD_CLOUD_FILE" ]; then
1233 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1234 OSM_VCA_CLOUDNAME="lxd-cloud"
1235 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1236 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1237 fi
1238 juju_createcontroller
1239 juju_createproxy
1240 fi
1241 else
1242 OSM_VCA_CLOUDNAME="lxd-cloud"
1243 if [ -n "$LXD_CLOUD_FILE" ]; then
1244 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1245 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1246 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1247 else
1248 mkdir -p ~/.osm
1249 cat << EOF > ~/.osm/lxd-cloud.yaml
1250 clouds:
1251 lxd-cloud:
1252 type: lxd
1253 auth-types: [certificate]
1254 endpoint: "https://$DEFAULT_IP:8443"
1255 config:
1256 ssl-hostname-verification: false
1257 EOF
1258 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1259 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1260 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1261 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1262 cat << EOF > ~/.osm/lxd-credentials.yaml
1263 credentials:
1264 lxd-cloud:
1265 lxd-cloud:
1266 auth-type: certificate
1267 server-cert: |
1268 $server_cert
1269 client-cert: |
1270 $client_cert
1271 client-key: |
1272 $client_key
1273 EOF
1274 lxc config trust add local: ~/.osm/client.crt
1275 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1276 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1277 fi
1278 fi
1279 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1280 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1281 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1282 fi
1283 track juju_controller
1284
1285 if [ -z "$OSM_VCA_SECRET" ]; then
1286 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1287 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1288 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1289 fi
1290 if [ -z "$OSM_VCA_PUBKEY" ]; then
1291 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1292 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1293 fi
1294 if [ -z "$OSM_VCA_CACERT" ]; then
1295 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1296 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1297 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1298 fi
1299
1300 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1301 if [ -z "$KUBERNETES" ]; then
1302 if [ -z "$OSM_VCA_APIPROXY" ]; then
1303 OSM_VCA_APIPROXY=$DEFAULT_IP
1304 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1305 fi
1306 juju_createproxy
1307 fi
1308 track juju
1309
1310 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1311 OSM_DATABASE_COMMONKEY=$(generate_secret)
1312 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1313 fi
1314
1315 # Deploy OSM services
1316 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1317 track docker_build
1318
1319 if [ -n "$KUBERNETES" ]; then
1320 generate_k8s_manifest_files
1321 else
1322 generate_docker_compose_files
1323 fi
1324 track manifest_files
1325 generate_prometheus_grafana_files
1326 generate_docker_env_files
1327 track env_files
1328
1329 if [ -n "$KUBERNETES" ]; then
1330 deploy_charmed_services
1331 kube_secrets
1332 update_manifest_files
1333 namespace_vol
1334 deploy_osm_services
1335 if [ -n "$INSTALL_PLA"]; then
1336 # optional PLA install
1337 deploy_osm_pla_service
1338 track deploy_osm_pla
1339 fi
1340 track deploy_osm_services_k8s
1341 install_helm
1342 track install_helm
1343 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1344 # install OSM MONITORING
1345 install_k8s_monitoring
1346 track install_k8s_monitoring
1347 fi
1348 else
1349 # remove old stack
1350 remove_stack $OSM_STACK_NAME
1351 create_docker_network
1352 deploy_lightweight
1353 generate_osmclient_script
1354 track docker_deploy
1355 install_prometheus_nodeexporter
1356 track nodeexporter
1357 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1358 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1359 fi
1360
1361 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1362 track osmclient
1363
1364 echo -e "Checking OSM health state..."
1365 if [ -n "$KUBERNETES" ]; then
1366 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1367 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1368 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1369 track osm_unhealthy
1370 else
1371 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1372 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1373 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1374 track osm_unhealthy
1375 fi
1376 track after_healthcheck
1377
1378 [ -n "$KUBERNETES" ] && add_local_k8scluster
1379 track add_local_k8scluster
1380
1381 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1382 track end
1383 return 0
1384 }
1385
1386 function install_to_openstack() {
1387
1388 if [ -z "$2" ]; then
1389 FATAL "OpenStack installer requires a valid external network name"
1390 fi
1391
1392 # Install Pip for Python3
1393 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1394 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1395
1396 # Create a venv to avoid conflicts with the host installation
1397 python3 -m venv $OPENSTACK_PYTHON_VENV
1398
1399 source $OPENSTACK_PYTHON_VENV/bin/activate
1400
1401 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1402 python -m pip install -U wheel
1403 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1404
1405 # Install the Openstack cloud module (ansible>=2.10)
1406 ansible-galaxy collection install openstack.cloud
1407
1408 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1409
1410 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1411
1412 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1413
1414 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1415 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1416 fi
1417
1418 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1419 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1420 fi
1421
1422 # Execute the Ansible playbook based on openrc or clouds.yaml
1423 if [ -e "$1" ]; then
1424 . $1
1425 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1426 $OSM_DEVOPS/installers/openstack/site.yml
1427 else
1428 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1429 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1430 fi
1431
1432 # Exit from venv
1433 deactivate
1434
1435 return 0
1436 }
1437
1438 function install_vimemu() {
1439 echo "\nInstalling vim-emu"
1440 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1441 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1442 # install prerequisites (OVS is a must for the emulator to work)
1443 sudo apt-get install openvswitch-switch
1444 # clone vim-emu repository (attention: branch is currently master only)
1445 echo "Cloning vim-emu repository ..."
1446 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1447 # build vim-emu docker
1448 echo "Building vim-emu Docker container..."
1449
1450 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1451 # start vim-emu container as daemon
1452 echo "Starting vim-emu Docker container 'vim-emu' ..."
1453 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1454 # in lightweight mode, the emulator needs to be attached to netOSM
1455 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1456 else
1457 # classic build mode
1458 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1459 fi
1460 echo "Waiting for 'vim-emu' container to start ..."
1461 sleep 5
1462 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1463 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1464 # print vim-emu connection info
1465 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1466 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1467 echo -e "To add the emulated VIM to OSM you should do:"
1468 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1469 }
1470
1471 function install_k8s_monitoring() {
1472 # install OSM monitoring
1473 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1474 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1475 }
1476
1477 function uninstall_k8s_monitoring() {
1478 # uninstall OSM monitoring
1479 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1480 }
1481
1482 function dump_vars(){
1483 echo "DEVELOP=$DEVELOP"
1484 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1485 echo "UNINSTALL=$UNINSTALL"
1486 echo "UPDATE=$UPDATE"
1487 echo "RECONFIGURE=$RECONFIGURE"
1488 echo "TEST_INSTALLER=$TEST_INSTALLER"
1489 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1490 echo "INSTALL_PLA=$INSTALL_PLA"
1491 echo "INSTALL_LXD=$INSTALL_LXD"
1492 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1493 echo "INSTALL_ONLY=$INSTALL_ONLY"
1494 echo "INSTALL_ELK=$INSTALL_ELK"
1495 echo "INSTALL_NOCACHELXDIMAGES=$INSTALL_NOCACHELXDIMAGES"
1496 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1497 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1498 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1499 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1500 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1501 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1502 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1503 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1504 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1505 echo "TO_REBUILD=$TO_REBUILD"
1506 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1507 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1508 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1509 echo "RELEASE=$RELEASE"
1510 echo "REPOSITORY=$REPOSITORY"
1511 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1512 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1513 echo "OSM_DEVOPS=$OSM_DEVOPS"
1514 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1515 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1516 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1517 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1518 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1519 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1520 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1521 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1522 echo "DOCKER_USER=$DOCKER_USER"
1523 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1524 echo "PULL_IMAGES=$PULL_IMAGES"
1525 echo "KUBERNETES=$KUBERNETES"
1526 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1527 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1528 echo "SHOWOPTS=$SHOWOPTS"
1529 echo "Install from specific refspec (-b): $COMMIT_ID"
1530 }
1531
1532 function track(){
1533 ctime=`date +%s`
1534 duration=$((ctime - SESSION_ID))
1535 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1536 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1537 event_name="bin"
1538 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1539 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1540 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1541 event_name="${event_name}_$1"
1542 url="${url}&event=${event_name}&ce_duration=${duration}"
1543 wget -q -O /dev/null $url
1544 }
1545
1546 function parse_docker_registry_url() {
1547 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1548 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1549 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1550 }
1551
1552 JUJU_AGENT_VERSION=2.8.6
1553 UNINSTALL=""
1554 DEVELOP=""
1555 UPDATE=""
1556 RECONFIGURE=""
1557 TEST_INSTALLER=""
1558 INSTALL_LXD=""
1559 SHOWOPTS=""
1560 COMMIT_ID=""
1561 ASSUME_YES=""
1562 INSTALL_FROM_SOURCE=""
1563 RELEASE="ReleaseNINE"
1564 REPOSITORY="stable"
1565 INSTALL_VIMEMU=""
1566 INSTALL_PLA=""
1567 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1568 LXD_REPOSITORY_PATH=""
1569 INSTALL_LIGHTWEIGHT="y"
1570 INSTALL_TO_OPENSTACK=""
1571 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1572 OPENSTACK_PUBLIC_NET_NAME=""
1573 OPENSTACK_ATTACH_VOLUME="false"
1574 OPENSTACK_SSH_KEY_FILE=""
1575 OPENSTACK_USERDATA_FILE=""
1576 OPENSTACK_VM_NAME="server-osm"
1577 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1578 INSTALL_ONLY=""
1579 INSTALL_ELK=""
1580 TO_REBUILD=""
1581 INSTALL_NOLXD=""
1582 INSTALL_NODOCKER=""
1583 INSTALL_NOJUJU=""
1584 KUBERNETES="y"
1585 INSTALL_K8S_MONITOR=""
1586 INSTALL_NOHOSTCLIENT=""
1587 INSTALL_NOCACHELXDIMAGES=""
1588 SESSION_ID=`date +%s`
1589 OSM_DEVOPS=
1590 OSM_VCA_HOST=
1591 OSM_VCA_SECRET=
1592 OSM_VCA_PUBKEY=
1593 OSM_VCA_CLOUDNAME="localhost"
1594 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1595 OSM_STACK_NAME=osm
1596 NO_HOST_PORTS=""
1597 DOCKER_NOBUILD=""
1598 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1599 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1600 WORKDIR_SUDO=sudo
1601 OSM_WORK_DIR="/etc/osm"
1602 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1603 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1604 OSM_HOST_VOL="/var/lib/osm"
1605 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1606 OSM_DOCKER_TAG=latest
1607 DOCKER_USER=opensourcemano
1608 PULL_IMAGES="y"
1609 KAFKA_TAG=2.11-1.0.2
1610 PROMETHEUS_TAG=v2.4.3
1611 GRAFANA_TAG=latest
1612 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1613 PROMETHEUS_CADVISOR_TAG=latest
1614 KEYSTONEDB_TAG=10
1615 OSM_DATABASE_COMMONKEY=
1616 ELASTIC_VERSION=6.4.2
1617 ELASTIC_CURATOR_VERSION=5.5.4
1618 POD_NETWORK_CIDR=10.244.0.0/16
1619 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1620 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1621 DOCKER_REGISTRY_URL=
1622 DOCKER_PROXY_URL=
1623 MODULE_DOCKER_TAG=
1624
1625 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1626 case "${o}" in
1627 b)
1628 COMMIT_ID=${OPTARG}
1629 PULL_IMAGES=""
1630 ;;
1631 r)
1632 REPOSITORY="${OPTARG}"
1633 REPO_ARGS+=(-r "$REPOSITORY")
1634 ;;
1635 c)
1636 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1637 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1638 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1639 usage && exit 1
1640 ;;
1641 k)
1642 REPOSITORY_KEY="${OPTARG}"
1643 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1644 ;;
1645 u)
1646 REPOSITORY_BASE="${OPTARG}"
1647 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1648 ;;
1649 R)
1650 RELEASE="${OPTARG}"
1651 REPO_ARGS+=(-R "$RELEASE")
1652 ;;
1653 D)
1654 OSM_DEVOPS="${OPTARG}"
1655 ;;
1656 o)
1657 INSTALL_ONLY="y"
1658 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1659 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1660 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1661 ;;
1662 O)
1663 INSTALL_TO_OPENSTACK="y"
1664 if [ -n "${OPTARG}" ]; then
1665 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1666 else
1667 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1668 usage && exit 1
1669 fi
1670 ;;
1671 f)
1672 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1673 ;;
1674 F)
1675 OPENSTACK_USERDATA_FILE="${OPTARG}"
1676 ;;
1677 N)
1678 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1679 ;;
1680 m)
1681 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1682 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1683 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1684 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1685 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1686 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1687 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1688 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1689 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1690 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1691 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1692 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1693 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1694 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1695 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1696 ;;
1697 H)
1698 OSM_VCA_HOST="${OPTARG}"
1699 ;;
1700 S)
1701 OSM_VCA_SECRET="${OPTARG}"
1702 ;;
1703 s)
1704 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1705 ;;
1706 w)
1707 # when specifying workdir, do not use sudo for access
1708 WORKDIR_SUDO=
1709 OSM_WORK_DIR="${OPTARG}"
1710 ;;
1711 t)
1712 OSM_DOCKER_TAG="${OPTARG}"
1713 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1714 ;;
1715 U)
1716 DOCKER_USER="${OPTARG}"
1717 ;;
1718 P)
1719 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1720 ;;
1721 A)
1722 OSM_VCA_APIPROXY="${OPTARG}"
1723 ;;
1724 l)
1725 LXD_CLOUD_FILE="${OPTARG}"
1726 ;;
1727 L)
1728 LXD_CRED_FILE="${OPTARG}"
1729 ;;
1730 K)
1731 CONTROLLER_NAME="${OPTARG}"
1732 ;;
1733 d)
1734 DOCKER_REGISTRY_URL="${OPTARG}"
1735 ;;
1736 p)
1737 DOCKER_PROXY_URL="${OPTARG}"
1738 ;;
1739 T)
1740 MODULE_DOCKER_TAG="${OPTARG}"
1741 ;;
1742 -)
1743 [ "${OPTARG}" == "help" ] && usage && exit 0
1744 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1745 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1746 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1747 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1748 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1749 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1750 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1751 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1752 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1753 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1754 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1755 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1756 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1757 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1758 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1759 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1760 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1761 [ "${OPTARG}" == "pullimages" ] && continue
1762 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1763 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1764 [ "${OPTARG}" == "bundle" ] && continue
1765 [ "${OPTARG}" == "k8s" ] && continue
1766 [ "${OPTARG}" == "lxd" ] && continue
1767 [ "${OPTARG}" == "lxd-cred" ] && continue
1768 [ "${OPTARG}" == "microstack" ] && continue
1769 [ "${OPTARG}" == "overlay" ] && continue
1770 [ "${OPTARG}" == "only-vca" ] && continue
1771 [ "${OPTARG}" == "vca" ] && continue
1772 [ "${OPTARG}" == "ha" ] && continue
1773 [ "${OPTARG}" == "tag" ] && continue
1774 [ "${OPTARG}" == "registry" ] && continue
1775 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1776 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1777 [ "${OPTARG}" == "nocachelxdimages" ] && INSTALL_NOCACHELXDIMAGES="y" && continue
1778 echo -e "Invalid option: '--$OPTARG'\n" >&2
1779 usage && exit 1
1780 ;;
1781 :)
1782 echo "Option -$OPTARG requires an argument" >&2
1783 usage && exit 1
1784 ;;
1785 \?)
1786 echo -e "Invalid option: '-$OPTARG'\n" >&2
1787 usage && exit 1
1788 ;;
1789 h)
1790 usage && exit 0
1791 ;;
1792 y)
1793 ASSUME_YES="y"
1794 ;;
1795 *)
1796 usage && exit 1
1797 ;;
1798 esac
1799 done
1800
1801 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1802 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1803 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1804
1805 if [ -n "$SHOWOPTS" ]; then
1806 dump_vars
1807 exit 0
1808 fi
1809
1810 if [ -n "$CHARMED" ]; then
1811 if [ -n "$UNINSTALL" ]; then
1812 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1813 else
1814 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1815 fi
1816
1817 exit 0
1818 fi
1819
1820 # if develop, we force master
1821 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1822
1823 need_packages="git wget curl tar"
1824
1825 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1826
1827 echo -e "Checking required packages: $need_packages"
1828 dpkg -l $need_packages &>/dev/null \
1829 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1830 || sudo apt-get update \
1831 || FATAL "failed to run apt-get update"
1832 dpkg -l $need_packages &>/dev/null \
1833 || ! echo -e "Installing $need_packages requires root privileges." \
1834 || sudo apt-get install -y $need_packages \
1835 || FATAL "failed to install $need_packages"
1836 sudo snap install jq
1837 if [ -z "$OSM_DEVOPS" ]; then
1838 if [ -n "$TEST_INSTALLER" ]; then
1839 echo -e "\nUsing local devops repo for OSM installation"
1840 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1841 else
1842 echo -e "\nCreating temporary dir for OSM installation"
1843 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1844 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1845
1846 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1847
1848 if [ -z "$COMMIT_ID" ]; then
1849 echo -e "\nGuessing the current stable release"
1850 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1851 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1852
1853 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1854 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1855 else
1856 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1857 fi
1858 git -C $OSM_DEVOPS checkout $COMMIT_ID
1859 fi
1860 fi
1861
1862 . $OSM_DEVOPS/common/all_funcs
1863
1864 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1865 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1866 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1867 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1868 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1869 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1870 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1871 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1872
1873 #Installation starts here
1874 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1875 track start
1876
1877 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1878 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1879 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1880 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1881 fi
1882
1883 echo -e "Checking required packages: lxd"
1884 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1885 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1886
1887 # use local devops for containers
1888 export OSM_USE_LOCAL_DEVOPS=true
1889
1890 #Install osmclient
1891
1892 #Install vim-emu (optional)
1893 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1894
1895 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1896 track end
1897 echo -e "\nDONE"