OSM Client Installation Fix
[osm/devops.git] / installers / full_install_osm.sh
1 #!/bin/bash
2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 function usage(){
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
19 echo -e " OPTIONS"
20 echo -e " -h / --help: print this help"
21 echo -e " -y: do not prompt for confirmation, assumes yes"
22 echo -e " -r <repo>: use specified repository name for osm packages"
23 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
24 echo -e " -u <repo base>: use specified repository url for osm packages"
25 echo -e " -k <repo key>: use specified repository public key url"
26 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
27 echo -e " -b master (main dev branch)"
28 echo -e " -b v2.0 (v2.0 branch)"
29 echo -e " -b tags/v1.1.0 (a specific tag)"
30 echo -e " ..."
31 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
32 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
38 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
39 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
40 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
41 echo -e " --pla: install the PLA module for placement support"
42 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (LW-UI, NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
43 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
44 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
45 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
46 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
47 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
48 echo -e " -D <devops path> use local devops installation path"
49 echo -e " -w <work dir> Location to store runtime installation"
50 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
51 echo -e " -l: LXD cloud yaml file"
52 echo -e " -L: LXD credentials yaml file"
53 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
54 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
55 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
56 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
57 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
58 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
59 echo -e " --nojuju: do not juju, assumes already installed"
60 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
61 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
62 echo -e " --nohostclient: do not install the osmclient"
63 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
64 echo -e " --source: install OSM from source code using the latest stable tag"
65 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
66 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
67 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
68 echo -e " --volume: create a VM volume when installing to OpenStack"
69 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
70 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
80 echo -e " [--tag]: Docker image tag. (--charmed option)"
81 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
82
83 }
84
85 # takes a juju/accounts.yaml file and returns the password specific
86 # for a controller. I wrote this using only bash tools to minimize
87 # additions of other packages
88 function parse_juju_password {
89 password_file="${HOME}/.local/share/juju/accounts.yaml"
90 local controller_name=$1
91 local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
92 sed -ne "s|^\($s\):|\1|" \
93 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
94 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
95 awk -F$fs -v controller=$controller_name '{
96 indent = length($1)/2;
97 vname[indent] = $2;
98 for (i in vname) {if (i > indent) {delete vname[i]}}
99 if (length($3) > 0) {
100 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
101 if (match(vn,controller) && match($2,"password")) {
102 printf("%s",$3);
103 }
104 }
105 }'
106 }
107
108 function generate_secret() {
109 head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
110 }
111
112 function remove_volumes() {
113 if [ -n "$KUBERNETES" ]; then
114 k8_volume=$1
115 echo "Removing ${k8_volume}"
116 $WORKDIR_SUDO rm -rf ${k8_volume}
117 else
118 stack=$1
119 volumes="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
120 for volume in $volumes; do
121 sg docker -c "docker volume rm ${stack}_${volume}"
122 done
123 fi
124 }
125
126 function remove_network() {
127 stack=$1
128 sg docker -c "docker network rm net${stack}"
129 }
130
131 function remove_iptables() {
132 stack=$1
133 if [ -z "$OSM_VCA_HOST" ]; then
134 OSM_VCA_HOST=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
135 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
136 fi
137
138 if [ -z "$DEFAULT_IP" ]; then
139 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
140 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
141 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
142 DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
143 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
144 fi
145
146 if sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
147 sudo iptables -t nat -D PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
148 sudo netfilter-persistent save
149 fi
150 }
151
152 function remove_stack() {
153 stack=$1
154 if sg docker -c "docker stack ps ${stack}" ; then
155 echo -e "\nRemoving stack ${stack}" && sg docker -c "docker stack rm ${stack}"
156 COUNTER=0
157 result=1
158 while [ ${COUNTER} -lt 30 ]; do
159 result=$(sg docker -c "docker stack ps ${stack}" | wc -l)
160 #echo "Dockers running: $result"
161 if [ "${result}" == "0" ]; then
162 break
163 fi
164 let COUNTER=COUNTER+1
165 sleep 1
166 done
167 if [ "${result}" == "0" ]; then
168 echo "All dockers of the stack ${stack} were removed"
169 else
170 FATAL "Some dockers of the stack ${stack} could not be removed. Could not clean it."
171 fi
172 sleep 5
173 fi
174 }
175
176 #removes osm deployments and services
177 function remove_k8s_namespace() {
178 kubectl delete ns $1
179 }
180
181 #removes helm only if there is nothing deployed in helm
182 function remove_helm() {
183 if [ "$(helm ls -q)" == "" ] ; then
184 sudo helm reset --force
185 kubectl delete --namespace kube-system serviceaccount tiller
186 kubectl delete clusterrolebinding tiller-cluster-rule
187 sudo rm /usr/local/bin/helm
188 rm -rf $HOME/.helm
189 fi
190 }
191
192 function remove_crontab_job() {
193 crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
194 }
195
196 #Uninstall osmclient
197 function uninstall_osmclient() {
198 sudo apt-get remove --purge -y python-osmclient
199 sudo apt-get remove --purge -y python3-osmclient
200 }
201
202 #Uninstall lightweight OSM: remove dockers
203 function uninstall_lightweight() {
204 if [ -n "$INSTALL_ONLY" ]; then
205 if [ -n "$INSTALL_ELK" ]; then
206 echo -e "\nUninstalling OSM ELK stack"
207 remove_stack osm_elk
208 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
209 fi
210 else
211 echo -e "\nUninstalling OSM"
212 if [ -n "$KUBERNETES" ]; then
213 if [ -n "$INSTALL_K8S_MONITOR" ]; then
214 # uninstall OSM MONITORING
215 uninstall_k8s_monitoring
216 fi
217 remove_k8s_namespace $OSM_STACK_NAME
218 else
219 remove_stack $OSM_STACK_NAME
220 remove_stack osm_elk
221 fi
222 echo "Now osm docker images and volumes will be deleted"
223 # TODO: clean-up of images should take into account if other tags were used for specific modules
224 newgrp docker << EONG
225 for module in ro lcm keystone nbi mon pol pla osmclient; do
226 docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${OSM_DOCKER_TAG}
227 done
228 EONG
229
230 if [ -n "$NGUI" ]; then
231 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}"
232 else
233 sg docker -c "docker image rm ${DOCKER_REGISTRY_URL}${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}"
234 fi
235
236 if [ -n "$KUBERNETES" ]; then
237 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
238 remove_volumes $OSM_NAMESPACE_VOL
239 else
240 remove_volumes $OSM_STACK_NAME
241 remove_network $OSM_STACK_NAME
242 [ -z "$CONTROLLER_NAME" ] && remove_iptables $OSM_STACK_NAME
243 fi
244 echo "Removing $OSM_DOCKER_WORK_DIR"
245 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
246 [ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_STACK_NAME"
247 fi
248 remove_crontab_job
249
250 # Cleanup Openstack installer venv
251 if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
252 rm -r $OPENSTACK_PYTHON_VENV
253 fi
254
255 [ -z "$INSTALL_NOHOSTCLIENT" ] && uninstall_osmclient
256 echo "Some docker images will be kept in case they are used by other docker stacks"
257 echo "To remove them, just run 'docker image prune' in a terminal"
258 return 0
259 }
260
261 #Safe unattended install of iptables-persistent
262 function check_install_iptables_persistent(){
263 echo -e "\nChecking required packages: iptables-persistent"
264 if ! dpkg -l iptables-persistent &>/dev/null; then
265 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
266 echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
267 echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
268 sudo apt-get -yq install iptables-persistent
269 fi
270 }
271
272 #Configure NAT rules, based on the current IP addresses of containers
273 function nat(){
274 check_install_iptables_persistent
275
276 echo -e "\nConfiguring NAT rules"
277 echo -e " Required root privileges"
278 sudo $OSM_DEVOPS/installers/nat_osm
279 }
280
281 function FATAL(){
282 echo "FATAL error: Cannot install OSM due to \"$1\""
283 exit 1
284 }
285
286 function update_juju_images(){
287 crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
288 ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
289 }
290
291 function install_lxd() {
292 # Apply sysctl production values for optimal performance
293 sudo cp ${OSM_DEVOPS}/installers/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
294 sudo sysctl --system
295
296 # Install LXD snap
297 sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
298 sudo snap install lxd
299
300 # Configure LXD
301 sudo usermod -a -G lxd `whoami`
302 cat ${OSM_DEVOPS}/installers/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
303 sg lxd -c "lxd waitready"
304 DEFAULT_INTERFACE=$(ip route list|awk '$1=="default" {print $5; exit}')
305 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
306 DEFAULT_MTU=$(ip addr show $DEFAULT_INTERFACE | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
307 sg lxd -c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
308 sg lxd -c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
309 #sudo systemctl stop lxd-bridge
310 #sudo systemctl --system daemon-reload
311 #sudo systemctl enable lxd-bridge
312 #sudo systemctl start lxd-bridge
313 }
314
315 function ask_user(){
316 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
317 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
318 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
319 read -e -p "$1" USER_CONFIRMATION
320 while true ; do
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
322 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
323 [ "${USER_CONFIRMATION,,}" == "yes" ] || [ "${USER_CONFIRMATION,,}" == "y" ] && return 0
324 [ "${USER_CONFIRMATION,,}" == "no" ] || [ "${USER_CONFIRMATION,,}" == "n" ] && return 1
325 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
326 done
327 }
328
329 function install_osmclient(){
330 CLIENT_RELEASE=${RELEASE#"-R "}
331 CLIENT_REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
332 CLIENT_REPOSITORY=${REPOSITORY#"-r "}
333 CLIENT_REPOSITORY_BASE=${REPOSITORY_BASE#"-u "}
334 key_location=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
335 curl $key_location | sudo apt-key add -
336 sudo add-apt-repository -y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get update
338 sudo apt-get install -y python3-pip
339 sudo -H LC_ALL=C python3 -m pip install -U pip
340 sudo -H LC_ALL=C python3 -m pip install -U python-magic pyangbind verboselogs
341 sudo apt-get install -y python3-osm-im python3-osmclient
342 if [ -f /usr/lib/python3/dist-packages/osm_im/requirements.txt ]; then
343 sudo -H LC_ALL=C python3 -m pip install -r /usr/lib/python3/dist-packages/osm_im/requirements.txt
344 fi
345 if [ -f /usr/lib/python3/dist-packages/osmclient/requirements.txt ]; then
346 sudo apt-get install -y libcurl4-openssl-dev libssl-dev
347 sudo -H LC_ALL=C python3 -m pip install -r /usr/lib/python3/dist-packages/osmclient/requirements.txt
348 fi
349 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
350 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
351 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
352 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME=`lxc list | awk '($2=="SO-ub"){print $6}'`
353 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME=`lxc list | awk '($2=="RO"){print $6}'`
354 echo -e "\nOSM client installed"
355 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
356 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
357 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
358 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
359 else
360 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
361 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
362 echo " export OSM_HOSTNAME=<OSM_host>"
363 fi
364 return 0
365 }
366
367 function install_prometheus_nodeexporter(){
368 if (systemctl -q is-active node_exporter)
369 then
370 echo "Node Exporter is already running."
371 else
372 echo "Node Exporter is not active, installing..."
373 if getent passwd node_exporter > /dev/null 2>&1; then
374 echo "node_exporter user exists"
375 else
376 echo "Creating user node_exporter"
377 sudo useradd --no-create-home --shell /bin/false node_exporter
378 fi
379 wget -q https://github.com/prometheus/node_exporter/releases/download/v$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz -P /tmp/
380 sudo tar -C /tmp -xf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.tar.gz
381 sudo cp /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64/node_exporter /usr/local/bin
382 sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
383 sudo rm -rf /tmp/node_exporter-$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64*
384 sudo cp ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service /etc/systemd/system/node_exporter.service
385 sudo systemctl daemon-reload
386 sudo systemctl restart node_exporter
387 sudo systemctl enable node_exporter
388 echo "Node Exporter has been activated in this host."
389 fi
390 return 0
391 }
392
393 function uninstall_prometheus_nodeexporter(){
394 sudo systemctl stop node_exporter
395 sudo systemctl disable node_exporter
396 sudo rm /etc/systemd/system/node_exporter.service
397 sudo systemctl daemon-reload
398 sudo userdel node_exporter
399 sudo rm /usr/local/bin/node_exporter
400 return 0
401 }
402
403 function install_docker_ce() {
404 # installs and configures Docker CE
405 echo "Installing Docker CE ..."
406 sudo apt-get -qq update
407 sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
408 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
409 sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
410 sudo apt-get -qq update
411 sudo apt-get install -y docker-ce
412 echo "Adding user to group 'docker'"
413 sudo groupadd -f docker
414 sudo usermod -aG docker $USER
415 sleep 2
416 sudo service docker restart
417 echo "... restarted Docker service"
418 if [ -n "${DOCKER_PROXY_URL}" ]; then
419 echo "Configuring docker proxy ..."
420 if [ -f /etc/docker/daemon.json ]; then
421 if grep -q registry-mirrors /etc/docker/daemon.json; then
422 sudo sed -i "s|registry-mirrors.*|registry-mirrors\": [\"${DOCKER_PROXY_URL}\"] |" /etc/docker/daemon.json
423 else
424 sudo sed -i "s|{|{\n \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"],|" /etc/docker/daemon.json
425 fi
426 else
427 sudo bash -c "cat << EOF > /etc/docker/daemon.json
428 {
429 \"registry-mirrors\": [\"${DOCKER_PROXY_URL}\"]
430 }
431 EOF"
432 fi
433 sudo systemctl daemon-reload
434 sudo service docker restart
435 echo "... restarted Docker service again"
436 fi
437 sg docker -c "docker version" || FATAL "Docker installation failed"
438 echo "... Docker CE installation done"
439 return 0
440 }
441
442 function install_docker_compose() {
443 # installs and configures docker-compose
444 echo "Installing Docker Compose ..."
445 sudo curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
446 sudo chmod +x /usr/local/bin/docker-compose
447 echo "... Docker Compose installation done"
448 }
449
450 function install_juju() {
451 echo "Installing juju"
452 sudo snap install juju --classic --channel=2.8/stable
453 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
454 update_juju_images
455 echo "Finished installation of juju"
456 return 0
457 }
458
459 function juju_createcontroller() {
460 if ! juju show-controller $OSM_STACK_NAME &> /dev/null; then
461 # Not found created, create the controller
462 sudo usermod -a -G lxd ${USER}
463 sg lxd -c "juju bootstrap --bootstrap-series=xenial --agent-version=$JUJU_AGENT_VERSION $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
464 fi
465 [ $(juju controllers | awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|wc -l) -eq 1 ] || FATAL "Juju installation failed"
466 juju controller-config features=[k8s-operators]
467 }
468
469 function juju_addk8s() {
470 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --controller $OSM_STACK_NAME --storage openebs-hostpath
471 }
472
473 function juju_createcontroller_k8s(){
474 cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client
475 juju bootstrap $OSM_VCA_K8S_CLOUDNAME $OSM_STACK_NAME \
476 --config controller-service-type=loadbalancer \
477 --agent-version=$JUJU_AGENT_VERSION
478 }
479
480
481 function juju_addlxd_cloud(){
482 mkdir -p /tmp/.osm
483 OSM_VCA_CLOUDNAME="lxd-cloud"
484 LXDENDPOINT=$DEFAULT_IP
485 LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
486 LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
487
488 cat << EOF > $LXD_CLOUD
489 clouds:
490 $OSM_VCA_CLOUDNAME:
491 type: lxd
492 auth-types: [certificate]
493 endpoint: "https://$LXDENDPOINT:8443"
494 config:
495 ssl-hostname-verification: false
496 EOF
497 openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
498 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
499 local client_cert=`cat /tmp/.osm/client.crt | sed 's/^/ /'`
500 local client_key=`cat /tmp/.osm/client.key | sed 's/^/ /'`
501
502 cat << EOF > $LXD_CREDENTIALS
503 credentials:
504 $OSM_VCA_CLOUDNAME:
505 lxd-cloud:
506 auth-type: certificate
507 server-cert: |
508 $server_cert
509 client-cert: |
510 $client_cert
511 client-key: |
512 $client_key
513 EOF
514 lxc config trust add local: /tmp/.osm/client.crt
515 juju add-cloud -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
516 juju add-credential -c $OSM_STACK_NAME $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
517 sg lxd -c "lxd waitready"
518 juju controller-config features=[k8s-operators]
519 }
520
521
522 function juju_createproxy() {
523 check_install_iptables_persistent
524
525 if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
526 sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
527 sudo netfilter-persistent save
528 fi
529 }
530
531 function docker_login() {
532 echo "Docker login"
533 sg docker -c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD}"
534 }
535
536 function generate_docker_images() {
537 echo "Pulling and generating docker images"
538 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
539
540 echo "Pulling docker images"
541
542 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
543 sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
544 sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
545 fi
546
547 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q MONGO ; then
548 sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
549 fi
550
551 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
552 sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
553 fi
554
555 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS-CADVISOR ; then
556 sg docker -c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL "cannot get prometheus cadvisor docker image"
557 fi
558
559 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q GRAFANA ; then
560 sg docker -c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL "cannot get grafana docker image"
561 fi
562
563 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q NBI || echo $TO_REBUILD | grep -q KEYSTONE-DB ; then
564 sg docker -c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL "cannot get keystone-db docker image"
565 fi
566
567 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q RO ; then
568 sg docker -c "docker pull mysql:5" || FATAL "cannot get mysql docker image"
569 fi
570
571 if [ -n "$PULL_IMAGES" ]; then
572 echo "Pulling OSM docker images"
573 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA osmclient; do
574 module_lower=${module,,}
575 if [ $module == "LW-UI" ]; then
576 if [ -n "$NGUI" ]; then
577 continue
578 else
579 module_lower="light-ui"
580 fi
581 fi
582 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
583 continue
584 fi
585 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
586 continue
587 fi
588 module_tag="${OSM_DOCKER_TAG}"
589 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD | grep -q $module ; then
590 module_tag="${MODULE_DOCKER_TAG}"
591 fi
592 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
593 sg docker -c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL "cannot pull $module docker image"
594 done
595 else
596 _build_from=$COMMIT_ID
597 [ -z "$_build_from" ] && _build_from="latest"
598 echo "OSM Docker images generated from $_build_from"
599
600 for module in MON POL NBI KEYSTONE RO LCM NG-UI LW-UI PLA; do
601 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q ${module} ; then
602 module_lower=${module,,}
603 if [ $module == "LW-UI" ]; then
604 if [ -n "$NGUI" ]; then
605 continue
606 else
607 module_lower="light-ui"
608 fi
609 fi
610 if [ $module == "NG-UI" -a ! -n "$NGUI" ]; then
611 continue
612 fi
613 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
614 continue
615 fi
616 git -C ${LWTEMPDIR} clone https://osm.etsi.org/gerrit/osm/$module
617 git -C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
618 sg docker -c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
619 fi
620 done
621 if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q osmclient; then
622 BUILD_ARGS+=(--build-arg REPOSITORY="$REPOSITORY")
623 BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
624 BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
625 BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
626 sg docker -c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
627 fi
628 echo "Finished generation of docker images"
629 fi
630
631 echo "Finished pulling and generating docker images"
632 }
633
634 function cmp_overwrite() {
635 file1="$1"
636 file2="$2"
637 if ! $(cmp "${file1}" "${file2}" >/dev/null 2>&1); then
638 if [ -f "${file2}" ]; then
639 ask_user "The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
640 else
641 cp -b ${file1} ${file2}
642 fi
643 fi
644 }
645
646 function generate_docker_compose_files() {
647 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
648 if [ -n "$NGUI" ]; then
649 # For NG-UI
650 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
651 else
652 # Docker-compose
653 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-lightui.yaml $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml
654 fi
655 if [ -n "$INSTALL_PLA" ]; then
656 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
657 fi
658 }
659
660 function generate_k8s_manifest_files() {
661 #Kubernetes resources
662 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
663 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/mongo.yaml
664 if [ -n "$NGUI" ]; then
665 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/light-ui.yaml
666 else
667 $WORKDIR_SUDO rm -f $OSM_K8S_WORK_DIR/ng-ui.yaml
668 fi
669 }
670
671 function generate_prometheus_grafana_files() {
672 [ -n "$KUBERNETES" ] && return
673 # Prometheus files
674 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
675 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
676
677 # Grafana files
678 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
679 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
680 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
681 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
682 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
683
684 # Prometheus Exporters files
685 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
686 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
687 }
688
689 function generate_docker_env_files() {
690 echo "Doing a backup of existing env files"
691 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
692 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
693 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
694 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
695 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
696 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
697 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
698 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
699 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
700
701 echo "Generating docker env files"
702 # LCM
703 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
704 echo "OSMLCM_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
705 fi
706
707 if ! grep -Fq "OSMLCM_VCA_HOST" $OSM_DOCKER_WORK_DIR/lcm.env; then
708 echo "OSMLCM_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
709 else
710 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.*|OSMLCM_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/lcm.env
711 fi
712
713 if ! grep -Fq "OSMLCM_VCA_SECRET" $OSM_DOCKER_WORK_DIR/lcm.env; then
714 echo "OSMLCM_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
715 else
716 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.*|OSMLCM_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/lcm.env
717 fi
718
719 if ! grep -Fq "OSMLCM_VCA_PUBKEY" $OSM_DOCKER_WORK_DIR/lcm.env; then
720 echo "OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
721 else
722 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.*|OSMLCM_VCA_PUBKEY=${OSM_VCA_PUBKEY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
723 fi
724
725 if ! grep -Fq "OSMLCM_VCA_CACERT" $OSM_DOCKER_WORK_DIR/lcm.env; then
726 echo "OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
727 else
728 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.*|OSMLCM_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/lcm.env
729 fi
730
731 if [ -n "$OSM_VCA_APIPROXY" ]; then
732 if ! grep -Fq "OSMLCM_VCA_APIPROXY" $OSM_DOCKER_WORK_DIR/lcm.env; then
733 echo "OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
734 else
735 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.*|OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
736 fi
737 fi
738
739 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE" $OSM_DOCKER_WORK_DIR/lcm.env; then
740 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
741 fi
742
743 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env; then
744 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
745 fi
746
747 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
748 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
749 else
750 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
751 fi
752
753 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env; then
754 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
755 else
756 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
757 fi
758
759 # RO
760 MYSQL_ROOT_PASSWORD=$(generate_secret)
761 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env ]; then
762 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
763 fi
764 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env ]; then
765 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
766 fi
767 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env; then
768 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/ro.env
769 fi
770
771 # Keystone
772 KEYSTONE_DB_PASSWORD=$(generate_secret)
773 SERVICE_PASSWORD=$(generate_secret)
774 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env ]; then
775 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
776 fi
777 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env ]; then
778 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
779 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
780 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
781 fi
782
783 # NBI
784 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env ]; then
785 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
786 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
787 fi
788
789 # MON
790 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env ]; then
791 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
792 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
793 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
794 fi
795
796 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env; then
797 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
798 else
799 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
800 fi
801
802 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env; then
803 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
804 else
805 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
806 fi
807
808 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env; then
809 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
810 else
811 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
812 fi
813
814 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env; then
815 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
816 else
817 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
818 fi
819
820
821 # POL
822 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env ]; then
823 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
824 fi
825
826 # LW-UI
827 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env ]; then
828 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
829 fi
830
831 echo "Finished generation of docker env files"
832 }
833
834 function generate_osmclient_script () {
835 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_REGISTRY_URL}${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
836 $WORKDIR_SUDO chmod +x "$OSM_DOCKER_WORK_DIR/osm"
837 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
838 }
839
840 #installs kubernetes packages
841 function install_kube() {
842 sudo apt-get update && sudo apt-get install -y apt-transport-https
843 curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
844 sudo add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
845 sudo apt-get update
846 echo "Installing Kubernetes Packages ..."
847 sudo apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
848 sudo apt-mark hold kubelet kubeadm kubectl
849 }
850
851 #initializes kubernetes control plane
852 function init_kubeadm() {
853 sudo swapoff -a
854 sudo kubeadm init --config $1
855 sleep 5
856 }
857
858 function kube_config_dir() {
859 [ ! -d $K8S_MANIFEST_DIR ] && FATAL "Cannot Install Kubernetes"
860 mkdir -p $HOME/.kube
861 sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
862 sudo chown $(id -u):$(id -g) $HOME/.kube/config
863 }
864
865 function install_k8s_storageclass() {
866 OPENEBS_DIR="$(mktemp -d -q --tmpdir "openebs.XXXXXX")"
867 trap 'rm -rf "${OPENEBS_DIR}"' EXIT
868 wget -q https://openebs.github.io/charts/openebs-operator-1.6.0.yaml -P $OPENEBS_DIR
869 kubectl apply -f $OPENEBS_DIR
870 local storageclass_timeout=400
871 local counter=0
872 local storageclass_ready=""
873 echo "Waiting for storageclass"
874 while (( counter < storageclass_timeout ))
875 do
876 kubectl get storageclass openebs-hostpath &> /dev/null
877
878 if [ $? -eq 0 ] ; then
879 echo "Storageclass available"
880 storageclass_ready="y"
881 break
882 else
883 counter=$((counter + 15))
884 sleep 15
885 fi
886 done
887 [ -n "$storageclass_ready" ] || FATAL "Storageclass not ready after $storageclass_timeout seconds. Cannot install openebs"
888 kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
889 }
890
891 function install_k8s_metallb() {
892 METALLB_IP_RANGE=$DEFAULT_IP-$DEFAULT_IP
893 cat ${OSM_DEVOPS}/installers/k8s/metallb/metallb.yaml | kubectl apply -f -
894 echo "apiVersion: v1
895 kind: ConfigMap
896 metadata:
897 namespace: metallb-system
898 name: config
899 data:
900 config: |
901 address-pools:
902 - name: default
903 protocol: layer2
904 addresses:
905 - $METALLB_IP_RANGE" | kubectl apply -f -
906 }
907 #deploys flannel as daemonsets
908 function deploy_cni_provider() {
909 CNI_DIR="$(mktemp -d -q --tmpdir "flannel.XXXXXX")"
910 trap 'rm -rf "${CNI_DIR}"' EXIT
911 wget -q https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -P $CNI_DIR
912 kubectl apply -f $CNI_DIR
913 [ $? -ne 0 ] && FATAL "Cannot Install Flannel"
914 }
915
916 #creates secrets from env files which will be used by containers
917 function kube_secrets(){
918 kubectl create ns $OSM_STACK_NAME
919 kubectl create secret generic lcm-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
920 kubectl create secret generic mon-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
921 kubectl create secret generic nbi-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
922 kubectl create secret generic ro-db-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
923 kubectl create secret generic ro-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
924 kubectl create secret generic keystone-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
925 kubectl create secret generic lwui-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
926 kubectl create secret generic pol-secret -n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
927 }
928
929 #taints K8s master node
930 function taint_master_node() {
931 K8S_MASTER=$(kubectl get nodes | awk '$3~/master/'| awk '{print $1}')
932 kubectl taint node $K8S_MASTER node-role.kubernetes.io/master:NoSchedule-
933 sleep 5
934 }
935
936 #deploys osm pods and services
937 function deploy_osm_services() {
938 kubectl apply -n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
939 }
940
941 #deploy charmed services
942 function deploy_charmed_services() {
943 juju add-model $OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
944 # deploy mongodb charm
945 namespace=$OSM_STACK_NAME
946 juju deploy cs:~charmed-osm/mongodb-k8s \
947 --config enable-sidecar=true \
948 --config replica-set=rs0 \
949 --config namespace=$namespace \
950 -m $namespace
951 }
952
953 function deploy_osm_pla_service() {
954 # corresponding to namespace_vol
955 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla/pla.yaml
956 # corresponding to deploy_osm_services
957 kubectl apply -n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
958 }
959
960 #Install helm and tiller
961 function install_helm() {
962 helm > /dev/null 2>&1
963 if [ $? != 0 ] ; then
964 # Helm is not installed. Install helm
965 echo "Helm is not installed, installing ..."
966 curl https://get.helm.sh/helm-v2.15.2-linux-amd64.tar.gz --output helm-v2.15.2.tar.gz
967 tar -zxvf helm-v2.15.2.tar.gz
968 sudo mv linux-amd64/helm /usr/local/bin/helm
969 rm -r linux-amd64
970 rm helm-v2.15.2.tar.gz
971 fi
972
973 # Checking if tiller has being configured
974 kubectl --namespace kube-system get serviceaccount tiller > /dev/null 2>&1
975 if [ $? == 1 ] ; then
976 # tiller account for kubernetes
977 kubectl --namespace kube-system create serviceaccount tiller
978 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
979 # HELM initialization
980 helm init --stable-repo-url https://charts.helm.sh/stable --service-account tiller
981
982 # Wait for Tiller to be up and running. If timeout expires, continue installing
983 tiller_timeout=120;
984 counter=0;
985 tiller_status=""
986 while (( counter < tiller_timeout ))
987 do
988 tiller_status=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
989 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && echo "Tiller ready" && break
990 counter=$((counter + 5))
991 sleep 5
992 done
993 [ "$tiller_status" != "1/1" ] && echo "Tiller is NOT READY YET. Installation will continue"
994 fi
995 }
996
997 function parse_yaml() {
998 TAG=$1
999 shift
1000 services=$@
1001 for module in $services; do
1002 if [ "$module" == "pla" ]; then
1003 if [ -n "$INSTALL_PLA" ]; then
1004 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1005 $WORKDIR_SUDO sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
1006 fi
1007 else
1008 echo "Updating K8s manifest file from opensourcemano\/${module}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${module}:${TAG}"
1009 $WORKDIR_SUDO sed -i "s#opensourcemano/${module}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
1010 fi
1011 done
1012 }
1013
1014 function update_manifest_files() {
1015 if [ -n "$NGUI" ]; then
1016 osm_services="nbi lcm ro pol mon ng-ui keystone pla"
1017 else
1018 osm_services="nbi lcm ro pol mon light-ui keystone pla"
1019 fi
1020 list_of_services=""
1021 for module in $osm_services; do
1022 module_upper="${module^^}"
1023 if [ "$module_upper" == "LIGHT-UI" ]; then
1024 module_upper="LW-UI"
1025 fi
1026 if ! echo $TO_REBUILD | grep -q $module_upper ; then
1027 list_of_services="$list_of_services $module"
1028 fi
1029 done
1030 list_of_services_to_rebuild=$(echo ${TO_REBUILD,,} |sed "s/lw-ui/light-ui/g")
1031 if [ ! "$OSM_DOCKER_TAG" == "9" ]; then
1032 parse_yaml $OSM_DOCKER_TAG $list_of_services
1033 fi
1034 if [ -n "$MODULE_DOCKER_TAG" ]; then
1035 parse_yaml $MODULE_DOCKER_TAG $list_of_services_to_rebuild
1036 fi
1037 }
1038
1039 function namespace_vol() {
1040 osm_services="nbi lcm ro pol mon kafka mysql prometheus"
1041 for osm in $osm_services; do
1042 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
1043 done
1044 }
1045
1046 function init_docker_swarm() {
1047 if [ "${DEFAULT_MTU}" != "1500" ]; then
1048 DOCKER_NETS=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
1049 DOCKER_GW_NET=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
1050 sg docker -c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
1051 fi
1052 sg docker -c "docker swarm init --advertise-addr ${DEFAULT_IP}"
1053 return 0
1054 }
1055
1056 function create_docker_network() {
1057 echo "creating network"
1058 sg docker -c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
1059 echo "creating network DONE"
1060 }
1061
1062 function deploy_lightweight() {
1063
1064 echo "Deploying lightweight build"
1065 OSM_NBI_PORT=9999
1066 OSM_RO_PORT=9090
1067 OSM_KEYSTONE_PORT=5000
1068 OSM_UI_PORT=80
1069 OSM_MON_PORT=8662
1070 OSM_PROM_PORT=9090
1071 OSM_PROM_CADVISOR_PORT=8080
1072 OSM_PROM_HOSTPORT=9091
1073 OSM_GRAFANA_PORT=3000
1074 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
1075 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
1076
1077 if [ -n "$NO_HOST_PORTS" ]; then
1078 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
1079 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
1080 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
1081 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
1082 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
1083 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
1084 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT)
1085 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT)
1086 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
1087 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
1088 else
1089 OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT:$OSM_NBI_PORT)
1090 OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT:$OSM_RO_PORT)
1091 OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
1092 OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
1093 OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
1094 OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
1095 OSM_PORTS+=(OSM_PROM_CADVISOR_PORTS=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
1096 OSM_PORTS+=(OSM_GRAFANA_PORTS=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
1097 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
1098 [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
1099 fi
1100 echo "export ${OSM_PORTS[@]}" | $WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
1101 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1102 echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1103 echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1104 echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1105 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1106 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1107 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1108 echo "export GRAFANA_TAG=${GRAFANA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
1109
1110 pushd $OSM_DOCKER_WORK_DIR
1111 if [ -n "$INSTALL_PLA" ]; then
1112 track deploy_osm_pla
1113 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
1114 else
1115 sg docker -c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/docker-compose-ui.yaml $OSM_STACK_NAME"
1116 fi
1117 popd
1118
1119 echo "Finished deployment of lightweight build"
1120 }
1121
1122 function deploy_elk() {
1123 echo "Pulling docker images for ELK"
1124 sg docker -c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL "cannot get elasticsearch docker image"
1125 sg docker -c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL "cannot get metricbeat docker image"
1126 sg docker -c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL "cannot get filebeat docker image"
1127 sg docker -c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL "cannot get kibana docker image"
1128 sg docker -c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL "cannot get curator docker image"
1129 echo "Finished pulling elk docker images"
1130 $WORKDIR_SUDO mkdir -p "$OSM_DOCKER_WORK_DIR/osm_elk"
1131 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_elk/* $OSM_DOCKER_WORK_DIR/osm_elk
1132 remove_stack osm_elk
1133 echo "Deploying ELK stack"
1134 sg docker -c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
1135 echo "Waiting for ELK stack to be up and running"
1136 time=0
1137 step=5
1138 timelength=40
1139 elk_is_up=1
1140 while [ $time -le $timelength ]; do
1141 if [[ $(curl -f -XGET http://127.0.0.1:5601/status -I 2>/dev/null | grep "HTTP/1.1 200 OK" | wc -l ) -eq 1 ]]; then
1142 elk_is_up=0
1143 break
1144 fi
1145 sleep $step
1146 time=$((time+step))
1147 done
1148 if [ $elk_is_up -eq 0 ]; then
1149 echo "ELK is up and running. Trying to create index pattern..."
1150 #Create index pattern
1151 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1152 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1153 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev/null
1154 #Make it the default index
1155 curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1156 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1157 -d"{\"value\":\"filebeat-*\"}" 2>/dev/null
1158 else
1159 echo "Cannot connect to Kibana to create index pattern."
1160 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
1161 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1162 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
1163 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
1164 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
1165 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
1166 -d"{\"value\":\"filebeat-*\"}"'
1167 fi
1168 echo "Finished deployment of ELK stack"
1169 return 0
1170 }
1171
1172 function add_local_k8scluster() {
1173 /usr/bin/osm --all-projects vim-create \
1174 --name _system-osm-vim \
1175 --account_type dummy \
1176 --auth_url http://dummy \
1177 --user osm --password osm --tenant osm \
1178 --description "dummy" \
1179 --config '{management_network_name: mgmt}'
1180 /usr/bin/osm --all-projects k8scluster-add \
1181 --creds ${HOME}/.kube/config \
1182 --vim _system-osm-vim \
1183 --k8s-nets '{"net1": null}' \
1184 --version '1.15' \
1185 --description "OSM Internal Cluster" \
1186 _system-osm-k8s
1187 }
1188
1189 function install_lightweight() {
1190 track checkingroot
1191 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1192 track noroot
1193
1194 if [ -n "$KUBERNETES" ]; then
1195 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
1196 1. Install and configure LXD
1197 2. Install juju
1198 3. Install docker CE
1199 4. Disable swap space
1200 5. Install and initialize Kubernetes
1201 as pre-requirements.
1202 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1203
1204 else
1205 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
1206 fi
1207 track proceed
1208
1209 echo "Installing lightweight build of OSM"
1210 LWTEMPDIR="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX")"
1211 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1212 DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
1213 [ -z "$DEFAULT_IF" ] && DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
1214 [ -z "$DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
1215 DEFAULT_IP=`ip -o -4 a s ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1216 [ -z "$DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
1217 DEFAULT_MTU=$(ip addr show ${DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
1218
1219 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1220 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1221 need_packages_lw="snapd"
1222 echo -e "Checking required packages: $need_packages_lw"
1223 dpkg -l $need_packages_lw &>/dev/null \
1224 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1225 || sudo apt-get update \
1226 || FATAL "failed to run apt-get update"
1227 dpkg -l $need_packages_lw &>/dev/null \
1228 || ! echo -e "Installing $need_packages_lw requires root privileges." \
1229 || sudo apt-get install -y $need_packages_lw \
1230 || FATAL "failed to install $need_packages_lw"
1231 install_lxd
1232 fi
1233
1234 track prereqok
1235
1236 [ -n "$INSTALL_NODOCKER" ] || (install_docker_ce && track docker_ce)
1237
1238 echo "Creating folders for installation"
1239 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR
1240 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/osm_pla
1241 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers/docker/cluster-config.yaml $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1242
1243 #Installs Kubernetes
1244 if [ -n "$KUBERNETES" ]; then
1245 install_kube
1246 track install_k8s
1247 init_kubeadm $OSM_DOCKER_WORK_DIR/cluster-config.yaml
1248 kube_config_dir
1249 track init_k8s
1250 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1251 # uninstall OSM MONITORING
1252 uninstall_k8s_monitoring
1253 track uninstall_k8s_monitoring
1254 fi
1255 #remove old namespace
1256 remove_k8s_namespace $OSM_STACK_NAME
1257 deploy_cni_provider
1258 taint_master_node
1259 install_k8s_storageclass
1260 track k8s_storageclass
1261 install_k8s_metallb
1262 track k8s_metallb
1263 else
1264 #install_docker_compose
1265 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1266 track docker_swarm
1267 fi
1268
1269 [ -z "$INSTALL_NOJUJU" ] && install_juju
1270 track juju_install
1271
1272 if [ -z "$OSM_VCA_HOST" ]; then
1273 if [ -z "$CONTROLLER_NAME" ]; then
1274
1275 if [ -n "$KUBERNETES" ]; then
1276 juju_createcontroller_k8s
1277 juju_addlxd_cloud
1278 else
1279 if [ -n "$LXD_CLOUD_FILE" ]; then
1280 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1281 OSM_VCA_CLOUDNAME="lxd-cloud"
1282 juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1283 juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
1284 fi
1285 juju_createcontroller
1286 juju_createproxy
1287 fi
1288 else
1289 OSM_VCA_CLOUDNAME="lxd-cloud"
1290 if [ -n "$LXD_CLOUD_FILE" ]; then
1291 [ -z "$LXD_CRED_FILE" ] && FATAL "The installer needs the LXD credential yaml if the LXD is external"
1292 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1293 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
1294 else
1295 mkdir -p ~/.osm
1296 cat << EOF > ~/.osm/lxd-cloud.yaml
1297 clouds:
1298 lxd-cloud:
1299 type: lxd
1300 auth-types: [certificate]
1301 endpoint: "https://$DEFAULT_IP:8443"
1302 config:
1303 ssl-hostname-verification: false
1304 EOF
1305 openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1306 local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1307 local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
1308 local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
1309 cat << EOF > ~/.osm/lxd-credentials.yaml
1310 credentials:
1311 lxd-cloud:
1312 lxd-cloud:
1313 auth-type: certificate
1314 server-cert: |
1315 $server_cert
1316 client-cert: |
1317 $client_cert
1318 client-key: |
1319 $client_key
1320 EOF
1321 lxc config trust add local: ~/.osm/client.crt
1322 juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
1323 juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
1324 fi
1325 fi
1326 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1327 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1328 [ -z "$OSM_VCA_HOST" ] && FATAL "Cannot obtain juju controller IP address"
1329 fi
1330 track juju_controller
1331
1332 if [ -z "$OSM_VCA_SECRET" ]; then
1333 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $OSM_STACK_NAME)
1334 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET=$(parse_juju_password $CONTROLLER_NAME)
1335 [ -z "$OSM_VCA_SECRET" ] && FATAL "Cannot obtain juju secret"
1336 fi
1337 if [ -z "$OSM_VCA_PUBKEY" ]; then
1338 OSM_VCA_PUBKEY=$(cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub)
1339 [ -z "$OSM_VCA_PUBKEY" ] && FATAL "Cannot obtain juju public key"
1340 fi
1341 if [ -z "$OSM_VCA_CACERT" ]; then
1342 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1343 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT=$(juju controllers --format json | jq -r --arg controller $CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 | tr -d \\n)
1344 [ -z "$OSM_VCA_CACERT" ] && FATAL "Cannot obtain juju CA certificate"
1345 fi
1346
1347 # Set OSM_VCA_APIPROXY only when it is not a k8s installation
1348 if [ -z "$KUBERNETES" ]; then
1349 if [ -z "$OSM_VCA_APIPROXY" ]; then
1350 OSM_VCA_APIPROXY=$DEFAULT_IP
1351 [ -z "$OSM_VCA_APIPROXY" ] && FATAL "Cannot obtain juju api proxy"
1352 fi
1353 juju_createproxy
1354 fi
1355 track juju
1356
1357 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1358 OSM_DATABASE_COMMONKEY=$(generate_secret)
1359 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL "Cannot generate common db secret"
1360 fi
1361
1362 # Deploy OSM services
1363 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1364 track docker_build
1365
1366 if [ -n "$KUBERNETES" ]; then
1367 generate_k8s_manifest_files
1368 else
1369 generate_docker_compose_files
1370 fi
1371 track manifest_files
1372 generate_prometheus_grafana_files
1373 generate_docker_env_files
1374 track env_files
1375
1376 if [ -n "$KUBERNETES" ]; then
1377 deploy_charmed_services
1378 kube_secrets
1379 update_manifest_files
1380 namespace_vol
1381 deploy_osm_services
1382 if [ -n "$INSTALL_PLA"]; then
1383 # optional PLA install
1384 deploy_osm_pla_service
1385 track deploy_osm_pla
1386 fi
1387 track deploy_osm_services_k8s
1388 install_helm
1389 track install_helm
1390 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1391 # install OSM MONITORING
1392 install_k8s_monitoring
1393 track install_k8s_monitoring
1394 fi
1395 else
1396 # remove old stack
1397 remove_stack $OSM_STACK_NAME
1398 create_docker_network
1399 deploy_lightweight
1400 generate_osmclient_script
1401 track docker_deploy
1402 install_prometheus_nodeexporter
1403 track nodeexporter
1404 [ -n "$INSTALL_VIMEMU" ] && install_vimemu && track vimemu
1405 [ -n "$INSTALL_ELK" ] && deploy_elk && track elk
1406 fi
1407
1408 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1409 track osmclient
1410
1411 echo -e "Checking OSM health state..."
1412 if [ -n "$KUBERNETES" ]; then
1413 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} -k || \
1414 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1415 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1416 track osm_unhealthy
1417 else
1418 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_STACK_NAME} || \
1419 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1420 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1421 track osm_unhealthy
1422 fi
1423 track after_healthcheck
1424
1425 [ -n "$KUBERNETES" ] && add_local_k8scluster
1426 track add_local_k8scluster
1427
1428 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1429 track end
1430 return 0
1431 }
1432
1433 function install_to_openstack() {
1434
1435 if [ -z "$2" ]; then
1436 FATAL "OpenStack installer requires a valid external network name"
1437 fi
1438
1439 # Install Pip for Python3
1440 $WORKDIR_SUDO apt install -y python3-pip python3-venv
1441 $WORKDIR_SUDO -H LC_ALL=C python3 -m pip install -U pip
1442
1443 # Create a venv to avoid conflicts with the host installation
1444 python3 -m venv $OPENSTACK_PYTHON_VENV
1445
1446 source $OPENSTACK_PYTHON_VENV/bin/activate
1447
1448 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
1449 python -m pip install -U wheel
1450 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
1451
1452 # Install the Openstack cloud module (ansible>=2.10)
1453 ansible-galaxy collection install openstack.cloud
1454
1455 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1456
1457 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
1458
1459 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
1460
1461 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
1462 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
1463 fi
1464
1465 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
1466 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
1467 fi
1468
1469 # Execute the Ansible playbook based on openrc or clouds.yaml
1470 if [ -e "$1" ]; then
1471 . $1
1472 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1473 $OSM_DEVOPS/installers/openstack/site.yml
1474 else
1475 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
1476 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
1477 fi
1478
1479 # Exit from venv
1480 deactivate
1481
1482 return 0
1483 }
1484
1485 function install_vimemu() {
1486 echo "\nInstalling vim-emu"
1487 EMUTEMPDIR="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX")"
1488 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1489 # install prerequisites (OVS is a must for the emulator to work)
1490 sudo apt-get install openvswitch-switch
1491 # clone vim-emu repository (attention: branch is currently master only)
1492 echo "Cloning vim-emu repository ..."
1493 git clone https://osm.etsi.org/gerrit/osm/vim-emu.git $EMUTEMPDIR
1494 # build vim-emu docker
1495 echo "Building vim-emu Docker container..."
1496
1497 sg docker -c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL "cannot build vim-emu-img docker image"
1498 # start vim-emu container as daemon
1499 echo "Starting vim-emu Docker container 'vim-emu' ..."
1500 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1501 # in lightweight mode, the emulator needs to be attached to netOSM
1502 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1503 else
1504 # classic build mode
1505 sg docker -c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1506 fi
1507 echo "Waiting for 'vim-emu' container to start ..."
1508 sleep 5
1509 export VIMEMU_HOSTNAME=$(sg docker -c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1510 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1511 # print vim-emu connection info
1512 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1513 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1514 echo -e "To add the emulated VIM to OSM you should do:"
1515 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1516 }
1517
1518 function install_k8s_monitoring() {
1519 # install OSM monitoring
1520 $WORKDIR_SUDO chmod +x $OSM_DEVOPS/installers/k8s/*.sh
1521 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh
1522 }
1523
1524 function uninstall_k8s_monitoring() {
1525 # uninstall OSM monitoring
1526 $WORKDIR_SUDO $OSM_DEVOPS/installers/k8s/uninstall_osm_k8s_monitoring.sh
1527 }
1528
1529 function dump_vars(){
1530 echo "DEVELOP=$DEVELOP"
1531 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1532 echo "UNINSTALL=$UNINSTALL"
1533 echo "UPDATE=$UPDATE"
1534 echo "RECONFIGURE=$RECONFIGURE"
1535 echo "TEST_INSTALLER=$TEST_INSTALLER"
1536 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1537 echo "INSTALL_PLA=$INSTALL_PLA"
1538 echo "INSTALL_LXD=$INSTALL_LXD"
1539 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1540 echo "INSTALL_ONLY=$INSTALL_ONLY"
1541 echo "INSTALL_ELK=$INSTALL_ELK"
1542 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1543 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1544 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1545 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1546 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1547 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
1548 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
1549 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
1550 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1551 echo "TO_REBUILD=$TO_REBUILD"
1552 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1553 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1554 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1555 echo "RELEASE=$RELEASE"
1556 echo "REPOSITORY=$REPOSITORY"
1557 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1558 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1559 echo "OSM_DEVOPS=$OSM_DEVOPS"
1560 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1561 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1562 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1563 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1564 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1565 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1566 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
1567 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1568 echo "DOCKER_USER=$DOCKER_USER"
1569 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1570 echo "PULL_IMAGES=$PULL_IMAGES"
1571 echo "KUBERNETES=$KUBERNETES"
1572 echo "NGUI=$NGUI"
1573 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
1574 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
1575 echo "SHOWOPTS=$SHOWOPTS"
1576 echo "Install from specific refspec (-b): $COMMIT_ID"
1577 }
1578
1579 function track(){
1580 ctime=`date +%s`
1581 duration=$((ctime - SESSION_ID))
1582 url="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1583 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1584 event_name="bin"
1585 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name="binsrc"
1586 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name="lxd"
1587 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name="lw"
1588 event_name="${event_name}_$1"
1589 url="${url}&event=${event_name}&ce_duration=${duration}"
1590 wget -q -O /dev/null $url
1591 }
1592
1593 function parse_docker_registry_url() {
1594 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
1595 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
1596 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a,"@"); print a[2]}')
1597 }
1598
1599 JUJU_AGENT_VERSION=2.8.6
1600 UNINSTALL=""
1601 DEVELOP=""
1602 UPDATE=""
1603 RECONFIGURE=""
1604 TEST_INSTALLER=""
1605 INSTALL_LXD=""
1606 SHOWOPTS=""
1607 COMMIT_ID=""
1608 ASSUME_YES=""
1609 INSTALL_FROM_SOURCE=""
1610 RELEASE="ReleaseNINE"
1611 REPOSITORY="stable"
1612 INSTALL_VIMEMU=""
1613 INSTALL_PLA=""
1614 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
1615 LXD_REPOSITORY_PATH=""
1616 INSTALL_LIGHTWEIGHT="y"
1617 INSTALL_TO_OPENSTACK=""
1618 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
1619 OPENSTACK_PUBLIC_NET_NAME=""
1620 OPENSTACK_ATTACH_VOLUME="false"
1621 OPENSTACK_SSH_KEY_FILE=""
1622 OPENSTACK_USERDATA_FILE=""
1623 OPENSTACK_VM_NAME="server-osm"
1624 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
1625 INSTALL_ONLY=""
1626 INSTALL_ELK=""
1627 TO_REBUILD=""
1628 INSTALL_NOLXD=""
1629 INSTALL_NODOCKER=""
1630 INSTALL_NOJUJU=""
1631 KUBERNETES="y"
1632 NGUI="y"
1633 INSTALL_K8S_MONITOR=""
1634 INSTALL_NOHOSTCLIENT=""
1635 SESSION_ID=`date +%s`
1636 OSM_DEVOPS=
1637 OSM_VCA_HOST=
1638 OSM_VCA_SECRET=
1639 OSM_VCA_PUBKEY=
1640 OSM_VCA_CLOUDNAME="localhost"
1641 OSM_VCA_K8S_CLOUDNAME="k8scloud"
1642 OSM_STACK_NAME=osm
1643 NO_HOST_PORTS=""
1644 DOCKER_NOBUILD=""
1645 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
1646 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
1647 WORKDIR_SUDO=sudo
1648 OSM_WORK_DIR="/etc/osm"
1649 OSM_DOCKER_WORK_DIR="/etc/osm/docker"
1650 OSM_K8S_WORK_DIR="${OSM_DOCKER_WORK_DIR}/osm_pods"
1651 OSM_HOST_VOL="/var/lib/osm"
1652 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1653 OSM_DOCKER_TAG=latest
1654 DOCKER_USER=opensourcemano
1655 PULL_IMAGES="y"
1656 KAFKA_TAG=2.11-1.0.2
1657 PROMETHEUS_TAG=v2.4.3
1658 GRAFANA_TAG=latest
1659 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
1660 PROMETHEUS_CADVISOR_TAG=latest
1661 KEYSTONEDB_TAG=10
1662 OSM_DATABASE_COMMONKEY=
1663 ELASTIC_VERSION=6.4.2
1664 ELASTIC_CURATOR_VERSION=5.5.4
1665 POD_NETWORK_CIDR=10.244.0.0/16
1666 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
1667 RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1668 DOCKER_REGISTRY_URL=
1669 DOCKER_PROXY_URL=
1670 MODULE_DOCKER_TAG=
1671
1672 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
1673 case "${o}" in
1674 b)
1675 COMMIT_ID=${OPTARG}
1676 PULL_IMAGES=""
1677 ;;
1678 r)
1679 REPOSITORY="${OPTARG}"
1680 REPO_ARGS+=(-r "$REPOSITORY")
1681 ;;
1682 c)
1683 [ "${OPTARG}" == "swarm" ] && KUBERNETES="" && REPO_ARGS+=(-c "${OPTARG}") && continue
1684 [ "${OPTARG}" == "k8s" ] && KUBERNETES="y" && continue
1685 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1686 usage && exit 1
1687 ;;
1688 n)
1689 [ "${OPTARG}" == "lwui" ] && NGUI="" && REPO_ARGS+=(-n "${OPTARG}") && continue
1690 [ "${OPTARG}" == "ngui" ] && continue
1691 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1692 usage && exit 1
1693 ;;
1694 k)
1695 REPOSITORY_KEY="${OPTARG}"
1696 REPO_ARGS+=(-k "$REPOSITORY_KEY")
1697 ;;
1698 u)
1699 REPOSITORY_BASE="${OPTARG}"
1700 REPO_ARGS+=(-u "$REPOSITORY_BASE")
1701 ;;
1702 R)
1703 RELEASE="${OPTARG}"
1704 REPO_ARGS+=(-R "$RELEASE")
1705 ;;
1706 D)
1707 OSM_DEVOPS="${OPTARG}"
1708 ;;
1709 o)
1710 INSTALL_ONLY="y"
1711 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1712 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1713 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1714 ;;
1715 O)
1716 INSTALL_TO_OPENSTACK="y"
1717 if [ -n "${OPTARG}" ]; then
1718 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
1719 else
1720 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1721 usage && exit 1
1722 fi
1723 ;;
1724 f)
1725 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
1726 ;;
1727 F)
1728 OPENSTACK_USERDATA_FILE="${OPTARG}"
1729 ;;
1730 N)
1731 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
1732 ;;
1733 m)
1734 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD="$TO_REBUILD LW-UI" && continue
1735 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
1736 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
1737 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
1738 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
1739 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
1740 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
1741 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
1742 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
1743 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
1744 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
1745 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
1746 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1747 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
1748 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
1749 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
1750 ;;
1751 H)
1752 OSM_VCA_HOST="${OPTARG}"
1753 ;;
1754 S)
1755 OSM_VCA_SECRET="${OPTARG}"
1756 ;;
1757 s)
1758 OSM_STACK_NAME="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1759 ;;
1760 w)
1761 # when specifying workdir, do not use sudo for access
1762 WORKDIR_SUDO=
1763 OSM_WORK_DIR="${OPTARG}"
1764 ;;
1765 t)
1766 OSM_DOCKER_TAG="${OPTARG}"
1767 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
1768 ;;
1769 U)
1770 DOCKER_USER="${OPTARG}"
1771 ;;
1772 P)
1773 OSM_VCA_PUBKEY=$(cat ${OPTARG})
1774 ;;
1775 A)
1776 OSM_VCA_APIPROXY="${OPTARG}"
1777 ;;
1778 l)
1779 LXD_CLOUD_FILE="${OPTARG}"
1780 ;;
1781 L)
1782 LXD_CRED_FILE="${OPTARG}"
1783 ;;
1784 K)
1785 CONTROLLER_NAME="${OPTARG}"
1786 ;;
1787 d)
1788 DOCKER_REGISTRY_URL="${OPTARG}"
1789 ;;
1790 p)
1791 DOCKER_PROXY_URL="${OPTARG}"
1792 ;;
1793 T)
1794 MODULE_DOCKER_TAG="${OPTARG}"
1795 ;;
1796 -)
1797 [ "${OPTARG}" == "help" ] && usage && exit 0
1798 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
1799 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
1800 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
1801 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
1802 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
1803 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
1804 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
1805 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
1806 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
1807 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT="y" && continue
1808 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU="y" && continue
1809 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK="y" && continue
1810 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
1811 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
1812 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="y" && continue
1813 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
1814 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
1815 [ "${OPTARG}" == "pullimages" ] && continue
1816 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
1817 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && continue
1818 [ "${OPTARG}" == "bundle" ] && continue
1819 [ "${OPTARG}" == "k8s" ] && continue
1820 [ "${OPTARG}" == "lxd" ] && continue
1821 [ "${OPTARG}" == "lxd-cred" ] && continue
1822 [ "${OPTARG}" == "microstack" ] && continue
1823 [ "${OPTARG}" == "vca" ] && continue
1824 [ "${OPTARG}" == "ha" ] && continue
1825 [ "${OPTARG}" == "tag" ] && continue
1826 [ "${OPTARG}" == "registry" ] && continue
1827 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
1828 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
1829 echo -e "Invalid option: '--$OPTARG'\n" >&2
1830 usage && exit 1
1831 ;;
1832 :)
1833 echo "Option -$OPTARG requires an argument" >&2
1834 usage && exit 1
1835 ;;
1836 \?)
1837 echo -e "Invalid option: '-$OPTARG'\n" >&2
1838 usage && exit 1
1839 ;;
1840 h)
1841 usage && exit 0
1842 ;;
1843 y)
1844 ASSUME_YES="y"
1845 ;;
1846 *)
1847 usage && exit 1
1848 ;;
1849 esac
1850 done
1851
1852 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
1853 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1854 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1855
1856 if [ -n "$SHOWOPTS" ]; then
1857 dump_vars
1858 exit 0
1859 fi
1860
1861 if [ -n "$CHARMED" ]; then
1862 if [ -n "$UNINSTALL" ]; then
1863 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1864 else
1865 ${OSM_DEVOPS}/installers/charmed_install.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr/share/osm-devops -t $DOCKER_TAG "$@"
1866 fi
1867
1868 exit 0
1869 fi
1870
1871 # if develop, we force master
1872 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1873
1874 need_packages="git wget curl tar"
1875
1876 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1877
1878 echo -e "Checking required packages: $need_packages"
1879 dpkg -l $need_packages &>/dev/null \
1880 || ! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1881 || sudo apt-get update \
1882 || FATAL "failed to run apt-get update"
1883 dpkg -l $need_packages &>/dev/null \
1884 || ! echo -e "Installing $need_packages requires root privileges." \
1885 || sudo apt-get install -y $need_packages \
1886 || FATAL "failed to install $need_packages"
1887 sudo snap install jq
1888 if [ -z "$OSM_DEVOPS" ]; then
1889 if [ -n "$TEST_INSTALLER" ]; then
1890 echo -e "\nUsing local devops repo for OSM installation"
1891 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
1892 else
1893 echo -e "\nCreating temporary dir for OSM installation"
1894 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
1895 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1896
1897 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
1898
1899 if [ -z "$COMMIT_ID" ]; then
1900 echo -e "\nGuessing the current stable release"
1901 LATEST_STABLE_DEVOPS=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1902 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1903
1904 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1905 COMMIT_ID="tags/$LATEST_STABLE_DEVOPS"
1906 else
1907 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1908 fi
1909 git -C $OSM_DEVOPS checkout $COMMIT_ID
1910 fi
1911 fi
1912
1913 . $OSM_DEVOPS/common/all_funcs
1914
1915 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1916 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1917 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight && echo -e "\nDONE" && exit 0
1918 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1919 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1920 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1921 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1922 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1923
1924 #Installation starts here
1925 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README.txt &> /dev/null
1926 track start
1927
1928 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight && echo -e "\nDONE" && exit 0
1929 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1930 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1931 ! ask_user "The installation will take about 75-90 minutes. Continue (Y/n)? " y && echo "Cancelled!" && exit 1
1932 fi
1933
1934 echo -e "Checking required packages: lxd"
1935 lxd --version &>/dev/null || FATAL "lxd not present, exiting."
1936 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1937
1938 # use local devops for containers
1939 export OSM_USE_LOCAL_DEVOPS=true
1940
1941 #Install osmclient
1942
1943 #Install vim-emu (optional)
1944 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce && install_vimemu
1945
1946 wget -q -O- https://osm-download.etsi.org/ftp/osm-9.0-nine/README2.txt &> /dev/null
1947 track end
1948 echo -e "\nDONE"