Updates in CI pipeline and installer for the new pipeline: registry and proxy url...
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
56 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
57 echo -e " --nojuju: do not juju, assumes already installed"
58 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
59 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
60 echo -e " --nohostclient: do not install the osmclient"
61 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
62 echo -e " --source: install OSM from source code using the latest stable tag"
63 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
64 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
65 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
66 echo -e " --volume: create a VM volume when installing to OpenStack"
67 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
68 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
69 echo -e " --showopts: print chosen options and exit (only for debugging)"
70 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
71 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
72 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
73 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
74 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
75 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
76 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
77 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
78 echo -e " [--tag]: Docker image tag. (--charmed option)"
79 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
80
81 }
82
83 # takes a juju/accounts.yaml file and returns the password specific
84 # for a controller. I wrote this using only bash tools to minimize
85 # additions of other packages
86 function parse_juju_password {
87 password_file="${HOME}/.local/share/juju/accounts.yaml"
88 local controller_name=$1
89 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
90 sed -ne "s|^\($s\):|\1|" \
91 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
92 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
93 awk -F$fs -v controller=$controller_name '{
94 indent = length($1)/2;
95 vname[indent] = $2;
96 for (i in vname) {if (i > indent) {delete vname[i]}}
97 if (length($3) > 0) {
98 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
99 if (match(vn,controller) && match($2,"password")) {
100 printf("%s",$3);
101 }
102 }
103 }'
104 }
105
106 function generate_secret() {
107 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
108 }
109
110 function remove_volumes() {
111 if [ -n "$KUBERNETES" ]; then
112 k8_volume=$1
113 echo "Removing ${k8_volume}"
114 $WORKDIR_SUDO rm -rf ${k8_volume}
115 else
116 stack=$1
117 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
118 for volume in $volumes; do
119 sg docker -c "docker volume rm ${stack}_${volume}"
120 done
121 fi
122 }
123
124 function remove_network() {
125 stack=$1
126 sg docker -c "docker network rm net${stack}"
127 }
128
129 function remove_iptables() {
130 stack=$1
131 if [ -z "$OSM_VCA_HOST" ]; then
132 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
133 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
134 fi
135
136 if [ -z "$DEFAULT_IP" ]; then
137 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
138 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
139 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
140 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
141 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
142 fi
143
144 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
145 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
146 sudo netfilter-persistent save
147 fi
148 }
149
150 function remove_stack() {
151 stack=$1
152 if sg docker -c "docker stack ps ${stack}" ; then
153 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
154 COUNTER=0
155 result=1
156 while [ ${COUNTER} -lt 30 ]; do
157 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
158 #echo "Dockers running: $result"
159 if [ "${result}" == "0" ]; then
160 break
161 fi
162 let COUNTER=COUNTER+1
163 sleep 1
164 done
165 if [ "${result}" == "0" ]; then
166 echo "All dockers of the stack ${stack} were removed"
167 else
168 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
169 fi
170 sleep 5
171 fi
172 }
173
174 #removes osm deployments and services
175 function remove_k8s_namespace() {
176 kubectl delete ns $1
177 }
178
179 #removes helm only if there is nothing deployed in helm
180 function remove_helm() {
181 if [ "$(helm ls -q)" == "" ] ; then
182 sudo helm reset --force
183 kubectl delete --namespace kube-system serviceaccount tiller
184 kubectl delete clusterrolebinding tiller-cluster-rule
185 sudo rm /usr/local/bin/helm
186 rm -rf $HOME/.helm
187 fi
188 }
189
190 function remove_crontab_job() {
191 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
192 }
193
194 #Uninstall osmclient
195 function uninstall_osmclient() {
196 sudo apt-get remove --purge -y python-osmclient
197 sudo apt-get remove --purge -y python3-osmclient
198 }
199
200 #Uninstall lightweight OSM: remove dockers
201 function uninstall_lightweight() {
202 if [ -n "$INSTALL_ONLY" ]; then
203 if [ -n "$INSTALL_ELK" ]; then
204 echo -e "\nUninstalling OSM ELK stack"
205 remove_stack osm_elk
206 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
207 fi
208 else
209 echo -e "\nUninstalling OSM"
210 if [ -n "$KUBERNETES" ]; then
211 if [ -n "$INSTALL_K8S_MONITOR" ]; then
212 # uninstall OSM MONITORING
213 uninstall_k8s_monitoring
214 fi
215 remove_k8s_namespace $OSM_STACK_NAME
216 else
217 remove_stack $OSM_STACK_NAME
218 remove_stack osm_elk
219 fi
220 echo "Now osm docker images and volumes will be deleted"
221 # TODO: clean-up of images should take into account if other tags were used for specific modules
222 newgrp docker << EONG
223 for module in ro lcm keystone nbi mon pol pla osmclient; do
224 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
225 done
226 EONG
227
228 if [ -n "$NGUI" ]; then
229 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
230 else
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
232 fi
233
234 if [ -n "$KUBERNETES" ]; then
235 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
236 remove_volumes $OSM_NAMESPACE_VOL
237 else
238 remove_volumes $OSM_STACK_NAME
239 remove_network $OSM_STACK_NAME
240 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
241 fi
242 echo "Removing $OSM_DOCKER_WORK_DIR"
243 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
244 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
245 fi
246 remove_crontab_job
247 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
248 echo "Some docker images will be kept in case they are used by other docker stacks"
249 echo "To remove them, just run 'docker image prune' in a terminal"
250 return 0
251 }
252
253 #Safe unattended install of iptables-persistent
254 function check_install_iptables_persistent(){
255 echo -e "\nChecking required packages: iptables-persistent"
256 if ! dpkg -l iptables-persistent &>/dev/null; then
257 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
258 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
259 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
260 sudo apt-get -yq install iptables-persistent
261 fi
262 }
263
264 #Configure NAT rules, based on the current IP addresses of containers
265 function nat(){
266 check_install_iptables_persistent
267
268 echo -e "\nConfiguring NAT rules"
269 echo -e " Required root privileges"
270 sudo $OSM_DEVOPS/installers/nat_osm
271 }
272
273 function FATAL(){
274 echo "FATAL error: Cannot install OSM due to \"$1\""
275 exit 1
276 }
277
278 function update_juju_images(){
279 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
280 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
281 }
282
283 function install_lxd() {
284 # Apply sysctl production values for optimal performance
285 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
286 sudo sysctl --system
287
288 # Install LXD snap
289 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
290 sudo snap install lxd
291 sudo apt-get install zfsutils-linux -y
292
293 # Configure LXD
294 sudo usermod -a -G lxd `whoami`
295 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
296 sg lxd -c "lxd waitready"
297 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
298 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
299 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
300 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
301 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
302 #sudo systemctl stop lxd-bridge
303 #sudo systemctl --system daemon-reload
304 #sudo systemctl enable lxd-bridge
305 #sudo systemctl start lxd-bridge
306 }
307
308 function ask_user(){
309 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
310 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
311 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
312 read -e -p "$1" USER_CONFIRMATION
313 while true ; do
314 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
315 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
316 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
317 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
318 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
319 done
320 }
321
322 function install_osmclient(){
323 CLIENT_RELEASE=${RELEASE#"-R "}
324 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
325 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
326 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
327 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
328 curl $key_location | sudo apt-key add -
329 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
330 sudo apt-get update
331 sudo apt-get install -y python3-pip
332 sudo -H LC_ALL=C python3 -m pip install -U pip
333 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
334 sudo apt-get install -y python3-osm-im python3-osmclient
335 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
336 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
337 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
338 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
339 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
340 echo -e "\nOSM client installed"
341 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
342 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
343 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
344 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
345 else
346 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
347 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
348 echo " export OSM_HOSTNAME=<OSM_host>"
349 fi
350 return 0
351 }
352
353 function install_prometheus_nodeexporter(){
354 if (systemctl -q is-active node_exporter)
355 then
356 echo "Node Exporter is already running."
357 else
358 echo "Node Exporter is not active, installing..."
359 if getent passwd node_exporter > /dev/null 2>&1; then
360 echo "node_exporter user exists"
361 else
362 echo "Creating user node_exporter"
363 sudo useradd --no-create-home --shell /bin/false node_exporter
364 fi
365 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
366 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
367 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
368 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
369 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
370 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
371 sudo systemctl daemon-reload
372 sudo systemctl restart node_exporter
373 sudo systemctl enable node_exporter
374 echo "Node Exporter has been activated in this host."
375 fi
376 return 0
377 }
378
379 function uninstall_prometheus_nodeexporter(){
380 sudo systemctl stop node_exporter
381 sudo systemctl disable node_exporter
382 sudo rm /etc/systemd/system/node_exporter.service
383 sudo systemctl daemon-reload
384 sudo userdel node_exporter
385 sudo rm /usr/local/bin/node_exporter
386 return 0
387 }
388
389 function install_docker_ce() {
390 # installs and configures Docker CE
391 echo "Installing Docker CE ..."
392 sudo apt-get -qq update
393 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
394 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
395 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
396 sudo apt-get -qq update
397 sudo apt-get install -y docker-ce
398 echo "Adding user to group 'docker'"
399 sudo groupadd -f docker
400 sudo usermod -aG docker $USER
401 sleep 2
402 sudo service docker restart
403 echo "... restarted Docker service"
404 if [ -n "${DOCKER_PROXY_URL}" ]; then
405 echo "Configuring docker proxy ..."
406 if [ -f daemon.json ]; then
407 if grep -q registry-mirrors daemon.json; then
408 sudo sed -Ei 's/("registry-mirrors".*\[)(.*)\]/\1\2, \"'"${DOCKER_PROXY_URL}"'\"\]/' daemon.json
409 else
410 sudo sed -i 's/{/{\n "registry-mirrors": [\"'"${DOCKER_PROXY_URL}"'\"]",/' daemon.json
411 fi
412 else
413 sudo cat << EOF > daemon.json
414 {
415 "registry-mirrors": ["${DOCKER_PROXY_URL}"]
416 }
417 EOF
418 fi
419 sudo systemctl daemon-reload
420 sudo service docker restart
421 echo "... restarted Docker service again"
422 fi
423 sg docker -c "docker version" || FATAL "Docker installation failed"
424 echo "... Docker CE installation done"
425 return 0
426 }
427
428 function install_docker_compose() {
429 # installs and configures docker-compose
430 echo "Installing Docker Compose ..."
431 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
432 sudo chmod +x /usr/local/bin/docker-compose
433 echo "... Docker Compose installation done"
434 }
435
436 function install_juju() {
437 echo "Installing juju"
438 sudo snap install juju --classic --channel=2.8/stable
439 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
440 update_juju_images
441 echo "Finished installation of juju"
442 return 0
443 }
444
445 function juju_createcontroller() {
446 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
447 # Not found created, create the controller
448 sudo usermod -a -G lxd ${USER}
449 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
450 fi
451 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
452 juju controller-config features=[k8s-operators]
453 }
454
455 function juju_addk8s() {
456 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
457 }
458
459 function juju_createcontroller_k8s(){
460 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
461 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
462 --config controller-service-type=loadbalancer \
463 --agent-version=$JUJU_AGENT_VERSION
464 }
465
466
467 function juju_addlxd_cloud(){
468 mkdir -p /tmp/.osm
469 OSM_VCA_CLOUDNAME="lxd-cloud"
470 LXDENDPOINT=$DEFAULT_IP
471 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
472 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
473
474 cat << EOF > $LXD_CLOUD
475 clouds:
476 $OSM_VCA_CLOUDNAME:
477 type: lxd
478 auth-types: [certificate]
479 endpoint: "https://$LXDENDPOINT:8443"
480 config:
481 ssl-hostname-verification: false
482 EOF
483 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
484 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
485 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
486 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
487
488 cat << EOF > $LXD_CREDENTIALS
489 credentials:
490 $OSM_VCA_CLOUDNAME:
491 lxd-cloud:
492 auth-type: certificate
493 server-cert: |
494 $server_cert
495 client-cert: |
496 $client_cert
497 client-key: |
498 $client_key
499 EOF
500 lxc config trust add local: /tmp/.osm/client.crt
501 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
502 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
503 sg lxd -c "lxd waitready"
504 juju controller-config features=[k8s-operators]
505 }
506
507
508 function juju_createproxy() {
509 check_install_iptables_persistent
510
511 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
512 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
513 sudo netfilter-persistent save
514 fi
515 }
516
517 function generate_docker_images() {
518 echo "Pulling and generating docker images"
519
520 echo "Pulling docker images"
521
522 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
523 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
524 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
525 fi
526
527 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
528 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
529 fi
530
531 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
532 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
533 fi
534
535 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
536 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
537 fi
538
539 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
540 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
541 fi
542
543 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
544 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
545 fi
546
547 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
548 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
549 fi
550
551 if [ -n "$PULL_IMAGES" ]; then
552 echo "Pulling OSM docker images"
553 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
554 module_lower=${module,,}
555 if [ $module == "LW-UI" ]; then
556 if [ -n "$NGUI" ]; then
557 continue
558 else
559 module_lower="light-ui"
560 fi
561 fi
562 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
563 continue
564 fi
565 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
566 continue
567 fi
568 module_tag="${OSM_DOCKER_TAG}"
569 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
570 module_tag="${MODULE_DOCKER_TAG}"
571 fi
572 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
573 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
574 done
575 else
576 _build_from=$COMMIT_ID
577 [ -z "$_build_from" ] && _build_from="latest"
578 echo "OSM Docker images generated from $_build_from"
579
580 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
581 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
582 module_lower=${module,,}
583 if [ $module == "LW-UI" ]; then
584 if [ -n "$NGUI" ]; then
585 continue
586 else
587 module_lower="light-ui"
588 fi
589 fi
590 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
591 continue
592 fi
593 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
594 continue
595 fi
596 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
597 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
598 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
599 fi
600 done
601 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
602 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
603 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
604 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
605 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
606 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
607 fi
608 echo "Finished generation of docker images"
609 fi
610
611 echo "Finished pulling and generating docker images"
612 }
613
614 function cmp_overwrite() {
615 file1="$1"
616 file2="$2"
617 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
618 if [ -f "${file2}" ]; then
619 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
620 else
621 cp -b ${file1} ${file2}
622 fi
623 fi
624 }
625
626 function generate_docker_compose_files() {
627 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
628 if [ -n "$NGUI" ]; then
629 # For NG-UI
630 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
631 else
632 # Docker-compose
633 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
634 fi
635 if [ -n "$INSTALL_PLA" ]; then
636 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
637 fi
638 }
639
640 function generate_k8s_manifest_files() {
641 #Kubernetes resources
642 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
643 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
644 if [ -n "$NGUI" ]; then
645 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
646 else
647 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
648 fi
649 }
650
651 function generate_prometheus_grafana_files() {
652 [ -n "$KUBERNETES" ] && return
653 # Prometheus files
654 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
655 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
656
657 # Grafana files
658 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
659 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
660 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
661 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
662 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
663
664 # Prometheus Exporters files
665 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
666 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
667 }
668
669 function generate_docker_env_files() {
670 echo "Doing a backup of existing env files"
671 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
672 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
673 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
674 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
675 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
676 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
677 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
678 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
679 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
680
681 echo "Generating docker env files"
682 # LCM
683 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
684 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
685 fi
686
687 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
688 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
689 else
690 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
691 fi
692
693 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
694 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
695 else
696 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
697 fi
698
699 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
700 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
701 else
702 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
703 fi
704
705 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
706 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
707 else
708 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
709 fi
710
711 if [ -n "$OSM_VCA_APIPROXY" ]; then
712 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
713 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
714 else
715 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
716 fi
717 fi
718
719 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
720 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
721 fi
722
723 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
724 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
725 fi
726
727 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
728 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
729 else
730 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
731 fi
732
733 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
734 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
735 else
736 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
737 fi
738
739 # RO
740 MYSQL_ROOT_PASSWORD=$(generate_secret)
741 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
742 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
743 fi
744 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
745 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
746 fi
747 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
748 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
749 fi
750
751 # Keystone
752 KEYSTONE_DB_PASSWORD=$(generate_secret)
753 SERVICE_PASSWORD=$(generate_secret)
754 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
755 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
756 fi
757 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
758 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
759 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
760 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
761 fi
762
763 # NBI
764 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
765 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
766 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
767 fi
768
769 # MON
770 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
771 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
772 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
773 fi
774
775 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
776 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
777 else
778 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
779 fi
780
781 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
782 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
783 else
784 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
785 fi
786
787 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
788 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
789 else
790 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
791 fi
792
793 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
794 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
795 else
796 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
797 fi
798
799
800 # POL
801 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
802 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
803 fi
804
805 # LW-UI
806 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
807 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
808 fi
809
810 echo "Finished generation of docker env files"
811 }
812
813 function generate_osmclient_script () {
814 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
815 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
816 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
817 }
818
819 #installs kubernetes packages
820 function install_kube() {
821 sudo apt-get update && sudo apt-get install -y apt-transport-https
822 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
823 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
824 sudo apt-get update
825 echo "Installing Kubernetes Packages ..."
826 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
827 }
828
829 #initializes kubernetes control plane
830 function init_kubeadm() {
831 sudo swapoff -a
832 sudo kubeadm init --config $1
833 sleep 5
834 }
835
836 function kube_config_dir() {
837 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
838 mkdir -p $HOME/.kube
839 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
840 sudo chown $(id -u):$(id -g) $HOME/.kube/config
841 }
842
843 function install_k8s_storageclass() {
844 kubectl apply -f https://openebs.github.io/charts/openebs-operator-1.6.0.yaml
845 local storageclass_timeout=300
846 local counter=0
847 echo "Waiting for storageclass"
848 while (( counter < storageclass_timeout ))
849 do
850 kubectl get storageclass openebs-hostpath &> /dev/null
851
852 if [ $? -eq 0 ] ; then
853 echo "Storageclass available"
854 break
855 else
856 counter=$((counter + 15))
857 sleep 15
858 fi
859 done
860 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
861 }
862
863 function install_k8s_metallb() {
864 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
865 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
866 echo "apiVersion: v1
867 kind: ConfigMap
868 metadata:
869 namespace: metallb-system
870 name: config
871 data:
872 config: |
873 address-pools:
874 - name: default
875 protocol: layer2
876 addresses:
877 - $METALLB_IP_RANGE" | kubectl apply -f -
878 }
879 #deploys flannel as daemonsets
880 function deploy_cni_provider() {
881 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
882 trap 'rm -rf "${CNI_DIR}"' EXIT
883 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
884 kubectl apply -f $CNI_DIR
885 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
886 }
887
888 #creates secrets from env files which will be used by containers
889 function kube_secrets(){
890 kubectl create ns $OSM_STACK_NAME
891 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
892 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
893 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
894 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
895 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
896 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
897 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
898 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
899 }
900
901 #taints K8s master node
902 function taint_master_node() {
903 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
904 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
905 sleep 5
906 }
907
908 #deploys osm pods and services
909 function deploy_osm_services() {
910 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
911 }
912
913 #deploy charmed services
914 function deploy_charmed_services() {
915 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
916 # deploy mongodb charm
917 namespace=$OSM_STACK_NAME
918 juju deploy cs:~charmed-osm/mongodb-k8s \
919 --config enable-sidecar=true \
920 --config replica-set=rs0 \
921 --config namespace=$namespace \
922 -m $namespace
923 }
924
925 function deploy_osm_pla_service() {
926 # corresponding to namespace_vol
927 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
928 # corresponding to deploy_osm_services
929 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
930 }
931
932 #Install helm and tiller
933 function install_helm() {
934 helm > /dev/null 2>&1
935 if [ $? != 0 ] ; then
936 # Helm is not installed. Install helm
937 echo "Helm is not installed, installing ..."
938 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
939 tar -zxvf helm-v2.15.2.tar.gz
940 sudo mv linux-amd64/helm /usr/local/bin/helm
941 rm -r linux-amd64
942 rm helm-v2.15.2.tar.gz
943 fi
944
945 # Checking if tiller has being configured
946 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
947 if [ $? == 1 ] ; then
948 # tiller account for kubernetes
949 kubectl --namespace kube-system create serviceaccount tiller
950 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
951 # HELM initialization
952 helm init --service-account tiller
953
954 # Wait for Tiller to be up and running. If timeout expires, continue installing
955 tiller_timeout=120;
956 counter=0;
957 tiller_status=""
958 while (( counter < tiller_timeout ))
959 do
960 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
961 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
962 counter=$((counter + 5))
963 sleep 5
964 done
965 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
966 fi
967 }
968
969 function parse_yaml() {
970 TAG=$1
971 shift
972 services=$@
973 for module in $services; do
974 if [ "$module" == "pla" ]; then
975 if [ -n "$INSTALL_PLA" ]; then
976 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
977 $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/${DOCKER_REGISTRY_URL}${DOCKER_USER}\/\/pla:${OSM_DOCKER_TAG}/g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
978 fi
979 else
980 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
981 $WORKDIR_SUDO sed -i "s/opensourcemano\/${module}:.*/${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}/g" ${OSM_K8S_WORK_DIR}/${module}.yaml
982 fi
983 done
984 }
985
986 function update_manifest_files() {
987 osm_services="nbi lcm ro pol mon light-ui ng-ui keystone pla"
988 list_of_services=""
989 for module in $osm_services; do
990 module_upper="${module^^}"
991 if [ "$module_upper" == "LIGHT-UI" ]; then
992 module_upper="LW-UI"
993 fi
994 if ! echo $TO_REBUILD | grep -q $module_upper ; then
995 list_of_services="$list_of_services $module"
996 fi
997 done
998 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
999 if [ ! "$OSM_DOCKER_TAG" == "8" ]; then
1000 parse_yaml $OSM_DOCKER_TAG $list_of_services
1001 fi
1002 if [ -n "$MODULE_DOCKER_TAG" ]; then
1003 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1004 fi
1005 }
1006
1007 function namespace_vol() {
1008 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1009 for osm in $osm_services; do
1010 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1011 done
1012 }
1013
1014 function init_docker_swarm() {
1015 if [ "${DEFAULT_MTU}" != "1500" ]; then
1016 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1017 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1018 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1019 fi
1020 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1021 return 0
1022 }
1023
1024 function create_docker_network() {
1025 echo "creating network"
1026 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1027 echo "creating network DONE"
1028 }
1029
1030 function deploy_lightweight() {
1031
1032 echo "Deploying lightweight build"
1033 OSM_NBI_PORT=9999
1034 OSM_RO_PORT=9090
1035 OSM_KEYSTONE_PORT=5000
1036 OSM_UI_PORT=80
1037 OSM_MON_PORT=8662
1038 OSM_PROM_PORT=9090
1039 OSM_PROM_CADVISOR_PORT=8080
1040 OSM_PROM_HOSTPORT=9091
1041 OSM_GRAFANA_PORT=3000
1042 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1043 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1044
1045 if [ -n "$NO_HOST_PORTS" ]; then
1046 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1047 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1048 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1049 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1050 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1051 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1052 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1053 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1054 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1055 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1056 else
1057 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1058 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1059 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1060 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1061 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1062 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1063 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1064 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1065 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1066 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1067 fi
1068 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1069 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1070 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1071 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1072 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1073 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1074 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1075 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1076 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1077
1078 pushd $OSM_DOCKER_WORK_DIR
1079 if [ -n "$INSTALL_PLA" ]; then
1080 track deploy_osm_pla
1081 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1082 else
1083 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1084 fi
1085 popd
1086
1087 echo "Finished deployment of lightweight build"
1088 }
1089
1090 function deploy_elk() {
1091 echo "Pulling docker images for ELK"
1092 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1093 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1094 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1095 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1096 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1097 echo "Finished pulling elk docker images"
1098 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1099 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1100 remove_stack osm_elk
1101 echo "Deploying ELK stack"
1102 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1103 echo "Waiting for ELK stack to be up and running"
1104 time=0
1105 step=5
1106 timelength=40
1107 elk_is_up=1
1108 while [ $time -le $timelength ]; do
1109 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1110 elk_is_up=0
1111 break
1112 fi
1113 sleep $step
1114 time=$((time+step))
1115 done
1116 if [ $elk_is_up -eq 0 ]; then
1117 echo "ELK is up and running. Trying to create index pattern..."
1118 #Create index pattern
1119 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1120 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1121 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1122 #Make it the default index
1123 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1124 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1125 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1126 else
1127 echo "Cannot connect to Kibana to create index pattern."
1128 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1129 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1130 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1131 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1132 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1133 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1134 -d"{\"value\":\"filebeat-*\"}"'
1135 fi
1136 echo "Finished deployment of ELK stack"
1137 return 0
1138 }
1139
1140 function add_local_k8scluster() {
1141 /usr/bin/osm --all-projects vim-create \
1142 --name _system-osm-vim \
1143 --account_type dummy \
1144 --auth_url http://dummy \
1145 --user osm --password osm --tenant osm \
1146 --description "dummy" \
1147 --config '{management_network_name: mgmt}'
1148 /usr/bin/osm --all-projects k8scluster-add \
1149 --creds ${HOME}/.kube/config \
1150 --vim _system-osm-vim \
1151 --k8s-nets '{"net1": null}' \
1152 --version '1.15' \
1153 --description "OSM Internal Cluster" \
1154 _system-osm-k8s
1155 }
1156
1157 function install_lightweight() {
1158 track checkingroot
1159 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1160 track noroot
1161
1162 if [ -n "$KUBERNETES" ]; then
1163 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1164 1. Install and configure LXD
1165 2. Install juju
1166 3. Install docker CE
1167 4. Disable swap space
1168 5. Install and initialize Kubernetes
1169 as pre-requirements.
1170 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1171
1172 else
1173 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1174 fi
1175 track proceed
1176
1177 echo "Installing lightweight build of OSM"
1178 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1179 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1180 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1181 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1182 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1183 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1184 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1185 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1186
1187 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1188 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1189 need_packages_lw="snapd"
1190 echo -e "Checking required packages: $need_packages_lw"
1191 dpkg -l $need_packages_lw &>/dev/null \
1192 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1193 || sudo apt-get update \
1194 || FATAL "failed to run apt-get update"
1195 dpkg -l $need_packages_lw &>/dev/null \
1196 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1197 || sudo apt-get install -y $need_packages_lw \
1198 || FATAL "failed to install $need_packages_lw"
1199 install_lxd
1200 fi
1201
1202 track prereqok
1203
1204 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1205
1206 echo "Creating folders for installation"
1207 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1208 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1209 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1210
1211 #Installs Kubernetes
1212 if [ -n "$KUBERNETES" ]; then
1213 install_kube
1214 track install_k8s
1215 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1216 kube_config_dir
1217 track init_k8s
1218 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1219 # uninstall OSM MONITORING
1220 uninstall_k8s_monitoring
1221 track uninstall_k8s_monitoring
1222 fi
1223 #remove old namespace
1224 remove_k8s_namespace $OSM_STACK_NAME
1225 deploy_cni_provider
1226 taint_master_node
1227 install_k8s_storageclass
1228 track k8s_storageclass
1229 install_k8s_metallb
1230 track k8s_metallb
1231 else
1232 #install_docker_compose
1233 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1234 track docker_swarm
1235 fi
1236
1237 [ -z "$INSTALL_NOJUJU" ] && install_juju
1238 track juju_install
1239
1240 if [ -z "$OSM_VCA_HOST" ]; then
1241 if [ -z "$CONTROLLER_NAME" ]; then
1242
1243 if [ -n "$KUBERNETES" ]; then
1244 juju_createcontroller_k8s
1245 juju_addlxd_cloud
1246 else
1247 if [ -n "$LXD_CLOUD_FILE" ]; then
1248 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1249 OSM_VCA_CLOUDNAME="lxd-cloud"
1250 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1251 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1252 fi
1253 juju_createcontroller
1254 juju_createproxy
1255 fi
1256 else
1257 OSM_VCA_CLOUDNAME="lxd-cloud"
1258 if [ -n "$LXD_CLOUD_FILE" ]; then
1259 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1260 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1261 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1262 else
1263 mkdir -p ~/.osm
1264 cat << EOF > ~/.osm/lxd-cloud.yaml
1265 clouds:
1266 lxd-cloud:
1267 type: lxd
1268 auth-types: [certificate]
1269 endpoint: "https://$DEFAULT_IP:8443"
1270 config:
1271 ssl-hostname-verification: false
1272 EOF
1273 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1274 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1275 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1276 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1277 cat << EOF > ~/.osm/lxd-credentials.yaml
1278 credentials:
1279 lxd-cloud:
1280 lxd-cloud:
1281 auth-type: certificate
1282 server-cert: |
1283 $server_cert
1284 client-cert: |
1285 $client_cert
1286 client-key: |
1287 $client_key
1288 EOF
1289 lxc config trust add local: ~/.osm/client.crt
1290 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1291 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1292 fi
1293 fi
1294 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1295 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1296 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1297 fi
1298 track juju_controller
1299
1300 if [ -z "$OSM_VCA_SECRET" ]; then
1301 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1302 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1303 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1304 fi
1305 if [ -z "$OSM_VCA_PUBKEY" ]; then
1306 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1307 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1308 fi
1309 if [ -z "$OSM_VCA_CACERT" ]; then
1310 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1311 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1312 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1313 fi
1314
1315 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1316 if [ -z "$KUBERNETES" ]; then
1317 if [ -z "$OSM_VCA_APIPROXY" ]; then
1318 OSM_VCA_APIPROXY=$DEFAULT_IP
1319 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1320 fi
1321 juju_createproxy
1322 fi
1323 track juju
1324
1325 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1326 OSM_DATABASE_COMMONKEY=$(generate_secret)
1327 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1328 fi
1329
1330 # Deploy OSM services
1331 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1332 track docker_build
1333
1334 if [ -n "$KUBERNETES" ]; then
1335 generate_k8s_manifest_files
1336 else
1337 generate_docker_compose_files
1338 fi
1339 track manifest_files
1340 generate_prometheus_grafana_files
1341 generate_docker_env_files
1342 track env_files
1343
1344 if [ -n "$KUBERNETES" ]; then
1345 deploy_charmed_services
1346 kube_secrets
1347 update_manifest_files
1348 namespace_vol
1349 deploy_osm_services
1350 if [ -n "$INSTALL_PLA"]; then
1351 # optional PLA install
1352 deploy_osm_pla_service
1353 track deploy_osm_pla
1354 fi
1355 track deploy_osm_services_k8s
1356 install_helm
1357 track install_helm
1358 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1359 # install OSM MONITORING
1360 install_k8s_monitoring
1361 track install_k8s_monitoring
1362 fi
1363 else
1364 # remove old stack
1365 remove_stack $OSM_STACK_NAME
1366 create_docker_network
1367 deploy_lightweight
1368 generate_osmclient_script
1369 track docker_deploy
1370 install_prometheus_nodeexporter
1371 track nodeexporter
1372 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1373 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1374 fi
1375
1376 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1377 track osmclient
1378
1379 echo -e "Checking OSM health state..."
1380 if [ -n "$KUBERNETES" ]; then
1381 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1382 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1383 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1384 track osm_unhealthy
1385 else
1386 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1387 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1388 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1389 track osm_unhealthy
1390 fi
1391 track after_healthcheck
1392
1393 [ -n "$KUBERNETES" ] && add_local_k8scluster
1394 track add_local_k8scluster
1395
1396 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1397 track end
1398 return 0
1399 }
1400
1401 function install_to_openstack() {
1402
1403 if [ -z "$2" ]; then
1404 FATAL "OpenStack installer requires a valid external network name"
1405 fi
1406
1407 # Install Pip for Python3
1408 $WORKDIR_SUDO apt install -y python3-pip
1409 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1410
1411 # Install Ansible, OpenStack client and SDK
1412 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U python-openstackclient "openstacksdk<1" "ansible>=2.9,<3"
1413
1414 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1415
1416 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1417
1418 # Execute the Ansible playbook based on openrc or clouds.yaml
1419 if [ -e "$1" ]; then
1420 . $1
1421 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1422 -e setup_volume=$3 $OSM_DEVOPS/installers/openstack/site.yml
1423 else
1424 ansible-playbook -e external_network_name=$2 -e installer_args="\"$OSM_INSTALLER_ARGS\"" \
1425 -e setup_volume=$3 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1426 fi
1427
1428 return 0
1429 }
1430
1431 function install_vimemu() {
1432 echo "\nInstalling vim-emu"
1433 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1434 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1435 # install prerequisites (OVS is a must for the emulator to work)
1436 sudo apt-get install openvswitch-switch
1437 # clone vim-emu repository (attention: branch is currently master only)
1438 echo "Cloning vim-emu repository ..."
1439 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1440 # build vim-emu docker
1441 echo "Building vim-emu Docker container..."
1442
1443 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1444 # start vim-emu container as daemon
1445 echo "Starting vim-emu Docker container 'vim-emu' ..."
1446 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1447 # in lightweight mode, the emulator needs to be attached to netOSM
1448 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1449 else
1450 # classic build mode
1451 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1452 fi
1453 echo "Waiting for 'vim-emu' container to start ..."
1454 sleep 5
1455 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1456 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1457 # print vim-emu connection info
1458 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1459 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1460 echo -e "To add the emulated VIM to OSM you should do:"
1461 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1462 }
1463
1464 function install_k8s_monitoring() {
1465 # install OSM monitoring
1466 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1467 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1468 }
1469
1470 function uninstall_k8s_monitoring() {
1471 # uninstall OSM monitoring
1472 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1473 }
1474
1475 function dump_vars(){
1476 echo "DEVELOP=$DEVELOP"
1477 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1478 echo "UNINSTALL=$UNINSTALL"
1479 echo "UPDATE=$UPDATE"
1480 echo "RECONFIGURE=$RECONFIGURE"
1481 echo "TEST_INSTALLER=$TEST_INSTALLER"
1482 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1483 echo "INSTALL_PLA=$INSTALL_PLA"
1484 echo "INSTALL_LXD=$INSTALL_LXD"
1485 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1486 echo "INSTALL_ONLY=$INSTALL_ONLY"
1487 echo "INSTALL_ELK=$INSTALL_ELK"
1488 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1489 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1490 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1491 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1492 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1493 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1494 echo "TO_REBUILD=$TO_REBUILD"
1495 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1496 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1497 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1498 echo "RELEASE=$RELEASE"
1499 echo "REPOSITORY=$REPOSITORY"
1500 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1501 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1502 echo "OSM_DEVOPS=$OSM_DEVOPS"
1503 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1504 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1505 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1506 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1507 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1508 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1509 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1510 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1511 echo "DOCKER_USER=$DOCKER_USER"
1512 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1513 echo "PULL_IMAGES=$PULL_IMAGES"
1514 echo "KUBERNETES=$KUBERNETES"
1515 echo "NGUI=$NGUI"
1516 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1517 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1518 echo "SHOWOPTS=$SHOWOPTS"
1519 echo "Install from specific refspec (-b): $COMMIT_ID"
1520 }
1521
1522 function track(){
1523 ctime=`date +%s`
1524 duration=$((ctime - SESSION_ID))
1525 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1526 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1527 event_name="bin"
1528 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1529 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1530 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1531 event_name="${event_name}_$1"
1532 url="${url}&event=${event_name}&ce_duration=${duration}"
1533 wget -q -O /dev/null $url
1534 }
1535
1536 JUJU_AGENT_VERSION=2.8.6
1537 UNINSTALL=""
1538 DEVELOP=""
1539 UPDATE=""
1540 RECONFIGURE=""
1541 TEST_INSTALLER=""
1542 INSTALL_LXD=""
1543 SHOWOPTS=""
1544 COMMIT_ID=""
1545 ASSUME_YES=""
1546 INSTALL_FROM_SOURCE=""
1547 RELEASE="ReleaseNINE"
1548 REPOSITORY="stable"
1549 INSTALL_VIMEMU=""
1550 INSTALL_PLA=""
1551 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1552 LXD_REPOSITORY_PATH=""
1553 INSTALL_LIGHTWEIGHT="y"
1554 INSTALL_TO_OPENSTACK=""
1555 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1556 OPENSTACK_PUBLIC_NET_NAME=""
1557 OPENSTACK_ATTACH_VOLUME="false"
1558 INSTALL_ONLY=""
1559 INSTALL_ELK=""
1560 TO_REBUILD=""
1561 INSTALL_NOLXD=""
1562 INSTALL_NODOCKER=""
1563 INSTALL_NOJUJU=""
1564 KUBERNETES="y"
1565 NGUI="y"
1566 INSTALL_K8S_MONITOR=""
1567 INSTALL_NOHOSTCLIENT=""
1568 SESSION_ID=`date +%s`
1569 OSM_DEVOPS=
1570 OSM_VCA_HOST=
1571 OSM_VCA_SECRET=
1572 OSM_VCA_PUBKEY=
1573 OSM_VCA_CLOUDNAME="localhost"
1574 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1575 OSM_STACK_NAME=osm
1576 NO_HOST_PORTS=""
1577 DOCKER_NOBUILD=""
1578 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1579 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1580 WORKDIR_SUDO=sudo
1581 OSM_WORK_DIR="/etc/osm"
1582 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1583 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1584 OSM_HOST_VOL="/var/lib/osm"
1585 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1586 OSM_DOCKER_TAG=latest
1587 DOCKER_USER=opensourcemano
1588 PULL_IMAGES="y"
1589 KAFKA_TAG=2.11-1.0.2
1590 PROMETHEUS_TAG=v2.4.3
1591 GRAFANA_TAG=latest
1592 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1593 PROMETHEUS_CADVISOR_TAG=latest
1594 KEYSTONEDB_TAG=10
1595 OSM_DATABASE_COMMONKEY=
1596 ELASTIC_VERSION=6.4.2
1597 ELASTIC_CURATOR_VERSION=5.5.4
1598 POD_NETWORK_CIDR=10.244.0.0/16
1599 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1600 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1601 DOCKER_REGISTRY_URL=
1602 DOCKER_PROXY_URL=
1603 MODULE_DOCKER_TAG=
1604
1605 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:-: hy" o; do
1606 case "${o}" in
1607 b)
1608 COMMIT_ID=${OPTARG}
1609 PULL_IMAGES=""
1610 ;;
1611 r)
1612 REPOSITORY="${OPTARG}"
1613 REPO_ARGS+=(-r "$REPOSITORY")
1614 ;;
1615 c)
1616 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && continue
1617 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1618 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1619 usage && exit 1
1620 ;;
1621 n)
1622 [ "${OPTARG}" == "lwui" ] && NGUI="" && continue
1623 [ "${OPTARG}" == "ngui" ] && continue
1624 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1625 usage && exit 1
1626 ;;
1627 k)
1628 REPOSITORY_KEY="${OPTARG}"
1629 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1630 ;;
1631 u)
1632 REPOSITORY_BASE="${OPTARG}"
1633 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1634 ;;
1635 R)
1636 RELEASE="${OPTARG}"
1637 REPO_ARGS+=(-R "$RELEASE")
1638 ;;
1639 D)
1640 OSM_DEVOPS="${OPTARG}"
1641 ;;
1642 o)
1643 INSTALL_ONLY="y"
1644 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1645 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1646 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1647 ;;
1648 O)
1649 INSTALL_TO_OPENSTACK="y"
1650 if [ -n "${OPTARG}" ]; then
1651 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1652 else
1653 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1654 usage && exit 1
1655 fi
1656 ;;
1657 N)
1658 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1659 ;;
1660 m)
1661 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1662 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1663 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1664 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1665 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1666 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1667 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1668 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1669 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1670 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1671 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1672 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1673 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1674 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1675 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1676 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1677 ;;
1678 H)
1679 OSM_VCA_HOST="${OPTARG}"
1680 ;;
1681 S)
1682 OSM_VCA_SECRET="${OPTARG}"
1683 ;;
1684 s)
1685 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1686 ;;
1687 w)
1688 # when specifying workdir, do not use sudo for access
1689 WORKDIR_SUDO=
1690 OSM_WORK_DIR="${OPTARG}"
1691 ;;
1692 t)
1693 OSM_DOCKER_TAG="${OPTARG}"
1694 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1695 ;;
1696 U)
1697 DOCKER_USER="${OPTARG}"
1698 ;;
1699 P)
1700 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1701 ;;
1702 A)
1703 OSM_VCA_APIPROXY="${OPTARG}"
1704 ;;
1705 l)
1706 LXD_CLOUD_FILE="${OPTARG}"
1707 ;;
1708 L)
1709 LXD_CRED_FILE="${OPTARG}"
1710 ;;
1711 K)
1712 CONTROLLER_NAME="${OPTARG}"
1713 ;;
1714 d)
1715 DOCKER_REGISTRY_URL="${OPTARG}"
1716 ;;
1717 p)
1718 DOCKER_PROXY_URL="${OPTARG}"
1719 ;;
1720 T)
1721 MODULE_DOCKER_TAG="${OPTARG}"
1722 ;;
1723 -)
1724 [ "${OPTARG}" == "help" ] && usage && exit 0
1725 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1726 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1727 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1728 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1729 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1730 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1731 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1732 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1733 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1734 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1735 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1736 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1737 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1738 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1739 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1740 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1741 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1742 [ "${OPTARG}" == "pullimages" ] && continue
1743 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1744 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1745 [ "${OPTARG}" == "bundle" ] && continue
1746 [ "${OPTARG}" == "k8s" ] && continue
1747 [ "${OPTARG}" == "lxd" ] && continue
1748 [ "${OPTARG}" == "lxd-cred" ] && continue
1749 [ "${OPTARG}" == "microstack" ] && continue
1750 [ "${OPTARG}" == "vca" ] && continue
1751 [ "${OPTARG}" == "ha" ] && continue
1752 [ "${OPTARG}" == "tag" ] && continue
1753 [ "${OPTARG}" == "registry" ] && continue
1754 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1755 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1756 echo -e "Invalid option: '--$OPTARG'\n" >&2
1757 usage && exit 1
1758 ;;
1759 :)
1760 echo "Option -$OPTARG requires an argument" >&2
1761 usage && exit 1
1762 ;;
1763 \?)
1764 echo -e "Invalid option: '-$OPTARG'\n" >&2
1765 usage && exit 1
1766 ;;
1767 h)
1768 usage && exit 0
1769 ;;
1770 y)
1771 ASSUME_YES="y"
1772 ;;
1773 *)
1774 usage && exit 1
1775 ;;
1776 esac
1777 done
1778
1779 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1780 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1781
1782 if [ -n "$SHOWOPTS" ]; then
1783 dump_vars
1784 exit 0
1785 fi
1786
1787 if [ -n "$CHARMED" ]; then
1788 if [ -n "$UNINSTALL" ]; then
1789 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1790 else
1791 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1792 fi
1793
1794 exit 0
1795 fi
1796
1797 # if develop, we force master
1798 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1799
1800 need_packages="git wget curl tar"
1801
1802 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1803
1804 echo -e "Checking required packages: $need_packages"
1805 dpkg -l $need_packages &>/dev/null \
1806 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1807 || sudo apt-get update \
1808 || FATAL "failed to run apt-get update"
1809 dpkg -l $need_packages &>/dev/null \
1810 || ! echo -e "Installing $need_packages requires root privileges." \
1811 || sudo apt-get install -y $need_packages \
1812 || FATAL "failed to install $need_packages"
1813 sudo snap install jq
1814 if [ -z "$OSM_DEVOPS" ]; then
1815 if [ -n "$TEST_INSTALLER" ]; then
1816 echo -e "\nUsing local devops repo for OSM installation"
1817 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1818 else
1819 echo -e "\nCreating temporary dir for OSM installation"
1820 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1821 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1822
1823 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1824
1825 if [ -z "$COMMIT_ID" ]; then
1826 echo -e "\nGuessing the current stable release"
1827 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1828 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1829
1830 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1831 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1832 else
1833 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1834 fi
1835 git -C $OSM_DEVOPS checkout $COMMIT_ID
1836 fi
1837 fi
1838
1839 . $OSM_DEVOPS/common/all_funcs
1840
1841 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1842 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1843 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1844 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1845 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1846 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1847 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1848 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1849
1850 #Installation starts here
1851 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1852 track start
1853
1854 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1855 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1856 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1857 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1858 fi
1859
1860 echo -e "Checking required packages: lxd"
1861 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1862 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1863
1864 # use local devops for containers
1865 export OSM_USE_LOCAL_DEVOPS=true
1866
1867 #Install osmclient
1868
1869 #Install vim-emu (optional)
1870 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1871
1872 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1873 track end
1874 echo -e "\nDONE"
1875
1876