2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and configured)"
45 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
46 echo -e " --nojuju: do not juju, assumes already installed"
47 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
48 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
49 echo -e " --nohostclient: do not install the osmclient"
50 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
51 echo -e " --source: install OSM from source code using the latest stable tag"
52 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
53 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
54 echo -e " --k8s_monitor: install the OSM kubernetes moitoring with prometheus and grafana"
55 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
56 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
57 echo -e " --showopts: print chosen options and exit (only for debugging)"
58 echo -e " -y: do not prompt for confirmation, assumes yes"
59 echo -e " -h / --help: print this help"
60 echo -e " --charmed: install OSM with charms"
61 echo -e " --bundle <bundle path>: Specify with which bundle to deploy OSM with charms (--charmed option)"
62 echo -e " --kubeconfig <kubeconfig path>: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
63 echo -e " --lxdendpoint <lxd endpoint ip>: Specify with which LXD to deploy OSM with charms (--charmed option)"
64 echo -e " --lxdcert <lxd cert path>: Specify external LXD cert to deploy OSM with charms (--charmed option)"
65 echo -e " --microstack: Installs microstack as a vim. (--charmed option)"
69 # takes a juju/accounts.yaml file and returns the password specific
70 # for a controller. I wrote this using only bash tools to minimize
71 # additions of other packages
72 function parse_juju_password
{
73 password_file
="${HOME}/.local/share/juju/accounts.yaml"
74 local controller_name
=$1
75 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
76 sed -ne "s|^\($s\):|\1|" \
77 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
78 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
79 awk -F$fs -v controller
=$controller_name '{
80 indent = length($1)/2;
82 for (i in vname) {if (i > indent) {delete vname[i]}}
84 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
85 if (match(vn,controller) && match($2,"password")) {
92 function generate_secret
() {
93 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
96 function remove_volumes
() {
97 if [ -n "$KUBERNETES" ]; then
99 echo "Removing ${k8_volume}"
100 $WORKDIR_SUDO rm -rf ${k8_volume}
103 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
104 for volume
in $volumes; do
105 sg docker
-c "docker volume rm ${stack}_${volume}"
110 function remove_network
() {
112 sg docker
-c "docker network rm net${stack}"
115 function remove_iptables
() {
117 if [ -z "$OSM_VCA_HOST" ]; then
118 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
119 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
122 if [ -z "$DEFAULT_IP" ]; then
123 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
124 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
125 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
126 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
129 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
130 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
131 sudo netfilter-persistent save
135 function remove_stack
() {
137 if sg docker
-c "docker stack ps ${stack}" ; then
138 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
141 while [ ${COUNTER} -lt 30 ]; do
142 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
143 #echo "Dockers running: $result"
144 if [ "${result}" == "0" ]; then
147 let COUNTER
=COUNTER
+1
150 if [ "${result}" == "0" ]; then
151 echo "All dockers of the stack ${stack} were removed"
153 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
159 #removes osm deployments and services
160 function remove_k8s_namespace
() {
164 #Uninstall lightweight OSM: remove dockers
165 function uninstall_lightweight
() {
166 if [ -n "$INSTALL_ONLY" ]; then
167 if [ -n "$INSTALL_ELK" ]; then
168 echo -e "\nUninstalling OSM ELK stack"
170 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
173 echo -e "\nUninstalling OSM"
174 if [ -n "$KUBERNETES" ]; then
175 if [ -n "$INSTALL_K8S_MONITOR" ]; then
176 # uninstall OSM MONITORING
177 uninstall_k8s_monitoring
179 remove_k8s_namespace
$OSM_STACK_NAME
182 remove_stack
$OSM_STACK_NAME
185 echo "Now osm docker images and volumes will be deleted"
186 newgrp docker
<< EONG
187 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
188 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
189 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
190 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
191 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
192 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
193 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
194 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
195 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
198 if [ -n "$KUBERNETES" ]; then
199 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
200 remove_volumes
$OSM_NAMESPACE_VOL
202 remove_volumes
$OSM_STACK_NAME
203 remove_network
$OSM_STACK_NAME
205 remove_iptables
$OSM_STACK_NAME
206 echo "Removing $OSM_DOCKER_WORK_DIR"
207 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
208 sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
210 echo "Some docker images will be kept in case they are used by other docker stacks"
211 echo "To remove them, just run 'docker image prune' in a terminal"
215 #Safe unattended install of iptables-persistent
216 function check_install_iptables_persistent
(){
217 echo -e "\nChecking required packages: iptables-persistent"
218 if dpkg
-l iptables-persistent
&>/dev
/null
; then
219 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
220 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
221 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
222 sudo apt-get
-yq install iptables-persistent
226 #Configure NAT rules, based on the current IP addresses of containers
228 check_install_iptables_persistent
230 echo -e "\nConfiguring NAT rules"
231 echo -e " Required root privileges"
232 sudo
$OSM_DEVOPS/installers
/nat_osm
236 echo "FATAL error: Cannot install OSM due to \"$1\""
240 function install_lxd
() {
241 # Apply sysctl production values for optimal performance
242 sudo
cp /usr
/share
/osm-devops
/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
246 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
247 sudo snap
install lxd
--channel=3.0/stable
250 sudo usermod
-a -G lxd
`whoami`
251 cat /usr
/share
/osm-devops
/installers
/lxd-preseed.conf | sg lxd
-c "lxd init --preseed"
252 sg lxd
-c "lxd waitready"
253 DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8}')
254 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
255 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
256 #sudo systemctl stop lxd-bridge
257 #sudo systemctl --system daemon-reload
258 #sudo systemctl enable lxd-bridge
259 #sudo systemctl start lxd-bridge
263 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
264 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
265 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
266 read -e -p "$1" USER_CONFIRMATION
268 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
269 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
270 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
271 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
272 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
276 function install_osmclient
(){
277 CLIENT_RELEASE
=${RELEASE#"-R "}
278 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
279 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
280 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
281 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
282 curl
$key_location | sudo apt-key add
-
283 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
285 sudo apt-get
install -y python3-pip
286 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
287 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
288 sudo apt-get
install -y python3-osm-im python3-osmclient
289 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
290 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
291 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
292 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
293 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
294 echo -e "\nOSM client installed"
295 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
296 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
297 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
298 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
300 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
301 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
302 echo " export OSM_HOSTNAME=<OSM_host>"
307 function install_prometheus_nodeexporter
(){
308 if (systemctl
-q is-active node_exporter
)
310 echo "Node Exporter is already running."
312 echo "Node Exporter is not active, installing..."
313 if getent passwd node_exporter
> /dev
/null
2>&1; then
314 echo "node_exporter user exists"
316 echo "Creating user node_exporter"
317 sudo useradd
--no-create-home --shell /bin
/false node_exporter
319 sudo wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
320 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
321 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
322 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
323 sudo
rm -rf node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
324 sudo
cp ${OSM_DEVOPS}/installers
/docker
/files
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
325 sudo systemctl daemon-reload
326 sudo systemctl restart node_exporter
327 sudo systemctl
enable node_exporter
328 echo "Node Exporter has been activated in this host."
333 function uninstall_prometheus_nodeexporter
(){
334 sudo systemctl stop node_exporter
335 sudo systemctl disable node_exporter
336 sudo
rm /etc
/systemd
/system
/node_exporter.service
337 sudo systemctl daemon-reload
338 sudo userdel node_exporter
339 sudo
rm /usr
/local
/bin
/node_exporter
343 function install_docker_ce
() {
344 # installs and configures Docker CE
345 echo "Installing Docker CE ..."
346 sudo apt-get
-qq update
347 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
348 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
349 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
350 sudo apt-get
-qq update
351 sudo apt-get
install -y docker-ce
352 echo "Adding user to group 'docker'"
353 sudo groupadd
-f docker
354 sudo usermod
-aG docker
$USER
356 sudo service docker restart
357 echo "... restarted Docker service"
358 sg docker
-c "docker version" || FATAL
"Docker installation failed"
359 echo "... Docker CE installation done"
363 function install_docker_compose
() {
364 # installs and configures docker-compose
365 echo "Installing Docker Compose ..."
366 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
367 sudo
chmod +x
/usr
/local
/bin
/docker-compose
368 echo "... Docker Compose installation done"
371 function install_juju
() {
372 echo "Installing juju"
373 sudo snap
install juju
--classic
374 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
375 echo "Finished installation of juju"
379 function juju_createcontroller
() {
380 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
381 # Not found created, create the controller
382 sudo usermod
-a -G lxd
${USER}
383 sg lxd
-c "juju bootstrap --bootstrap-series=xenial localhost $OSM_STACK_NAME"
385 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
388 function juju_createproxy
() {
389 check_install_iptables_persistent
391 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
392 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
393 sudo netfilter-persistent save
397 function generate_docker_images
() {
398 echo "Pulling and generating docker images"
399 _build_from
=$COMMIT_ID
400 [ -z "$_build_from" ] && _build_from
="master"
402 echo "OSM Docker images generated from $_build_from"
404 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
405 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
406 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
407 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
409 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
410 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
411 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
414 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
415 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
418 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
419 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
422 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
423 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
426 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
427 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
430 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
431 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
434 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
435 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
438 if [ -n "$PULL_IMAGES" ]; then
439 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
440 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
441 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
442 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
443 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
446 if [ -n "$PULL_IMAGES" ]; then
447 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
448 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
449 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
450 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
451 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
454 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
455 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
456 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
457 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
458 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
459 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
462 if [ -n "$PULL_IMAGES" ]; then
463 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
464 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
465 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
466 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
467 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
468 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
469 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
472 if [ -n "$PULL_IMAGES" ]; then
473 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
474 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
475 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
476 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
477 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
480 if [ -n "$PULL_IMAGES" ]; then
481 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
482 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
483 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
484 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
485 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
490 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
491 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
492 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
493 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
496 if [ -n "$PULL_IMAGES" ]; then
497 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
498 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
499 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
502 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
503 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
506 echo "Finished generation of docker images"
509 function cmp_overwrite
() {
512 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
513 if [ -f "${file2}" ]; then
514 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
516 cp -b ${file1} ${file2}
521 function generate_docker_env_files() {
522 echo "Doing a backup of existing env files
"
523 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
524 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
525 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
526 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
527 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
528 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
529 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
530 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
531 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
533 echo "Generating docker env files
"
534 if [ -n "$KUBERNETES" ]; then
535 #Kubernetes resources
536 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
539 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
540 if [ -n "$INSTALL_PLA" ]; then
541 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
545 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
547 # Grafana & Prometheus Exporter files
548 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/files
549 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/files/* $OSM_DOCKER_WORK_DIR/files/
553 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
554 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
557 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
558 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
560 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
563 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
564 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
566 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
569 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
570 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
572 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
575 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
576 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
578 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
581 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
582 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
584 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
587 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
588 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
591 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
592 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
596 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
597 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
598 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
600 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
601 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
605 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
606 SERVICE_PASSWORD
=$
(generate_secret
)
607 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
608 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
610 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
611 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
612 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
613 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
617 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
618 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
619 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
623 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
624 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
625 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
628 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
629 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
631 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
634 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
635 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
637 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
640 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
641 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
643 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
646 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
647 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
649 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
654 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
655 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
659 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
660 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
663 echo "Finished generation of docker env files"
666 function generate_osmclient_script
() {
667 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
668 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
669 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
672 #installs kubernetes packages
673 function install_kube
() {
674 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
675 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
676 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
678 echo "Installing Kubernetes Packages ..."
679 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
682 #initializes kubernetes control plane
683 function init_kubeadm
() {
685 sudo kubeadm init
--config $1
689 function kube_config_dir
() {
690 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
692 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
693 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
696 #deploys flannel as daemonsets
697 function deploy_cni_provider
() {
698 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
699 trap 'rm -rf "${CNI_DIR}"' EXIT
700 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
701 kubectl apply
-f $CNI_DIR
702 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
705 #creates secrets from env files which will be used by containers
706 function kube_secrets
(){
707 kubectl create ns
$OSM_STACK_NAME
708 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
709 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
710 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
711 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
712 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
713 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
714 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
715 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
718 #deploys osm pods and services
719 function deploy_osm_services
() {
720 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
721 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
723 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
726 function deploy_osm_pla_service
() {
727 # corresponding to parse_yaml
728 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
729 # corresponding to namespace_vol
730 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
731 # corresponding to deploy_osm_services
732 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
735 function parse_yaml
() {
736 osm_services
="nbi lcm ro pol mon light-ui keystone"
738 for osm
in $osm_services; do
739 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
743 function namespace_vol
() {
744 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
745 for osm
in $osm_services; do
746 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
750 function init_docker_swarm
() {
751 if [ "${DEFAULT_MTU}" != "1500" ]; then
752 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
753 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
754 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
756 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
760 function create_docker_network
() {
761 echo "creating network"
762 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
763 echo "creating network DONE"
766 function deploy_lightweight
() {
768 echo "Deploying lightweight build"
771 OSM_KEYSTONE_PORT
=5000
775 OSM_PROM_CADVISOR_PORT
=8080
776 OSM_PROM_HOSTPORT
=9091
777 OSM_GRAFANA_PORT
=3000
778 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
779 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
781 if [ -n "$NO_HOST_PORTS" ]; then
782 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
783 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
784 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
785 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
786 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
787 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
788 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
789 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
790 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
791 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
793 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
794 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
795 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
796 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
797 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
798 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
799 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
800 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
801 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
802 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
804 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
805 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
806 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
807 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
808 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
809 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
810 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
811 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
812 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
814 pushd $OSM_DOCKER_WORK_DIR
815 if [ -n "$INSTALL_PLA" ]; then
816 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
818 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
822 echo "Finished deployment of lightweight build"
825 function deploy_elk
() {
826 echo "Pulling docker images for ELK"
827 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
828 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
829 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
830 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
831 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
832 echo "Finished pulling elk docker images"
833 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
834 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
836 echo "Deploying ELK stack"
837 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
838 echo "Waiting for ELK stack to be up and running"
843 while [ $time -le $timelength ]; do
844 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
851 if [ $elk_is_up -eq 0 ]; then
852 echo "ELK is up and running. Trying to create index pattern..."
853 #Create index pattern
854 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
855 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
856 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
857 #Make it the default index
858 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
859 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
860 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
862 echo "Cannot connect to Kibana to create index pattern."
863 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
864 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
865 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
866 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
867 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
868 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
869 -d"{\"value\":\"filebeat-*\"}"'
871 echo "Finished deployment of ELK stack"
875 function install_lightweight
() {
876 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
877 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
878 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
879 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
880 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
883 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
886 if [ -n "$KUBERNETES" ]; then
887 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
888 1. Install and configure LXD
891 4. Disable swap space
892 5. Install and initialize Kubernetes
894 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
897 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
901 echo "Installing lightweight build of OSM"
902 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
903 trap 'rm -rf "${LWTEMPDIR}"' EXIT
904 DEFAULT_IF
=`route -n |awk '$1~/^0.0.0.0/ {print $8}'`
905 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
906 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
907 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
908 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
910 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
911 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ]; then
912 need_packages_lw
="snapd"
913 echo -e "Checking required packages: $need_packages_lw"
914 dpkg
-l $need_packages_lw &>/dev
/null \
915 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
916 || sudo apt-get update \
917 || FATAL
"failed to run apt-get update"
918 dpkg
-l $need_packages_lw &>/dev
/null \
919 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
920 || sudo apt-get
install -y $need_packages_lw \
921 || FATAL
"failed to install $need_packages_lw"
926 [ -z "$INSTALL_NOJUJU" ] && install_juju
929 if [ -z "$OSM_VCA_HOST" ]; then
930 juju_createcontroller
931 OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
932 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
934 track juju_controller
936 if [ -z "$OSM_VCA_SECRET" ]; then
937 OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
938 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
940 if [ -z "$OSM_VCA_PUBKEY" ]; then
941 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
942 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
944 if [ -z "$OSM_VCA_CACERT" ]; then
945 OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r '.controllers["osm"]["ca-cert"]' | base64 |
tr -d \\n
)
946 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
948 if [ -z "$OSM_VCA_APIPROXY" ]; then
949 OSM_VCA_APIPROXY
=$DEFAULT_IP
950 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
955 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
956 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
957 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
960 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
963 #Installs Kubernetes and deploys osm services
964 if [ -n "$KUBERNETES" ]; then
967 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
971 #install_docker_compose
972 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
976 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
979 generate_docker_env_files
981 if [ -n "$KUBERNETES" ]; then
982 if [ -n "$INSTALL_K8S_MONITOR" ]; then
983 # uninstall OSM MONITORING
984 uninstall_k8s_monitoring
985 track uninstall_k8s_monitoring
987 #remove old namespace
988 remove_k8s_namespace
$OSM_STACK_NAME
991 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
994 if [ -n "$INSTALL_PLA"]; then
995 # optional PLA install
996 deploy_osm_pla_service
998 track deploy_osm_services_k8s
999 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1000 # install OSM MONITORING
1001 install_k8s_monitoring
1002 track install_k8s_monitoring
1006 remove_stack
$OSM_STACK_NAME
1007 create_docker_network
1009 generate_osmclient_script
1011 install_prometheus_nodeexporter
1013 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1014 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1017 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1020 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1025 function install_vimemu
() {
1026 echo "\nInstalling vim-emu"
1027 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1028 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1029 # install prerequisites (OVS is a must for the emulator to work)
1030 sudo apt-get
install openvswitch-switch
1031 # clone vim-emu repository (attention: branch is currently master only)
1032 echo "Cloning vim-emu repository ..."
1033 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1034 # build vim-emu docker
1035 echo "Building vim-emu Docker container..."
1037 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1038 # start vim-emu container as daemon
1039 echo "Starting vim-emu Docker container 'vim-emu' ..."
1040 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1041 # in lightweight mode, the emulator needs to be attached to netOSM
1042 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1044 # classic build mode
1045 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1047 echo "Waiting for 'vim-emu' container to start ..."
1049 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1050 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1051 # print vim-emu connection info
1052 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1053 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1054 echo -e "To add the emulated VIM to OSM you should do:"
1055 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1058 function install_k8s_monitoring
() {
1059 # install OSM monitoring
1060 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1061 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1064 function uninstall_k8s_monitoring
() {
1065 # uninstall OSM monitoring
1066 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1069 function dump_vars
(){
1070 echo "DEVELOP=$DEVELOP"
1071 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1072 echo "UNINSTALL=$UNINSTALL"
1073 echo "UPDATE=$UPDATE"
1074 echo "RECONFIGURE=$RECONFIGURE"
1075 echo "TEST_INSTALLER=$TEST_INSTALLER"
1076 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1077 echo "INSTALL_PLA=$INSTALL_PLA"
1078 echo "INSTALL_LXD=$INSTALL_LXD"
1079 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1080 echo "INSTALL_ONLY=$INSTALL_ONLY"
1081 echo "INSTALL_ELK=$INSTALL_ELK"
1082 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1083 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1084 echo "TO_REBUILD=$TO_REBUILD"
1085 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1086 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1087 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1088 echo "RELEASE=$RELEASE"
1089 echo "REPOSITORY=$REPOSITORY"
1090 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1091 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1092 echo "OSM_DEVOPS=$OSM_DEVOPS"
1093 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1094 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1095 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1096 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1097 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1098 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1099 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1100 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1101 echo "DOCKER_USER=$DOCKER_USER"
1102 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1103 echo "PULL_IMAGES=$PULL_IMAGES"
1104 echo "KUBERNETES=$KUBERNETES"
1105 echo "SHOWOPTS=$SHOWOPTS"
1106 echo "Install from specific refspec (-b): $COMMIT_ID"
1111 duration
=$
((ctime
- SESSION_ID
))
1112 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1113 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1115 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1116 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1117 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1118 event_name
="${event_name}_$1"
1119 url
="${url}&event=${event_name}&ce_duration=${duration}"
1120 wget
-q -O /dev
/null
$url
1132 INSTALL_FROM_SOURCE
=""
1133 RELEASE
="ReleaseSEVEN"
1137 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1138 LXD_REPOSITORY_PATH
=""
1139 INSTALL_LIGHTWEIGHT
="y"
1147 INSTALL_K8S_MONITOR
=""
1148 INSTALL_NOHOSTCLIENT
=""
1149 SESSION_ID
=`date +%s`
1157 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1158 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1160 OSM_WORK_DIR
="/etc/osm"
1161 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1162 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1163 OSM_HOST_VOL
="/var/lib/osm"
1164 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1165 OSM_DOCKER_TAG
=latest
1166 DOCKER_USER
=opensourcemano
1168 KAFKA_TAG
=2.11-1.0
.2
1169 PROMETHEUS_TAG
=v2.4
.3
1171 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1172 PROMETHEUS_CADVISOR_TAG
=latest
1174 OSM_DATABASE_COMMONKEY
=
1175 ELASTIC_VERSION
=6.4.2
1176 ELASTIC_CURATOR_VERSION
=5.5.4
1177 POD_NETWORK_CIDR
=10.244.0.0/16
1178 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1179 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1181 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:-: hy" o
; do
1188 REPOSITORY
="${OPTARG}"
1189 REPO_ARGS
+=(-r "$REPOSITORY")
1192 [ "${OPTARG}" == "swarm" ] && continue
1193 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1194 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1198 REPOSITORY_KEY
="${OPTARG}"
1199 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1202 REPOSITORY_BASE
="${OPTARG}"
1203 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1207 REPO_ARGS
+=(-R "$RELEASE")
1210 OSM_DEVOPS
="${OPTARG}"
1214 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1215 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1216 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1219 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1220 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1221 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1222 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1223 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1224 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1225 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1226 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1227 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1228 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1229 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1230 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1231 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1232 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1235 OSM_VCA_HOST
="${OPTARG}"
1238 OSM_VCA_SECRET
="${OPTARG}"
1241 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1244 # when specifying workdir, do not use sudo for access
1246 OSM_WORK_DIR
="${OPTARG}"
1249 OSM_DOCKER_TAG
="${OPTARG}"
1252 DOCKER_USER
="${OPTARG}"
1255 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1258 OSM_VCA_APIPROXY
="${OPTARG}"
1261 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1262 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1263 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1264 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1265 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1266 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1267 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1268 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1269 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1270 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1271 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1272 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1273 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1274 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1275 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1276 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1277 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1278 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1279 [ "${OPTARG}" == "pullimages" ] && continue
1280 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1281 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1282 [ "${OPTARG}" == "bundle" ] && continue
1283 [ "${OPTARG}" == "kubeconfig" ] && continue
1284 [ "${OPTARG}" == "lxdendpoint" ] && continue
1285 [ "${OPTARG}" == "lxdcert" ] && continue
1286 [ "${OPTARG}" == "microstack" ] && continue
1287 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1288 echo -e "Invalid option: '--$OPTARG'\n" >&2
1292 echo "Option -$OPTARG requires an argument" >&2
1296 echo -e "Invalid option: '-$OPTARG'\n" >&2
1311 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1312 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1314 if [ -n "$SHOWOPTS" ]; then
1319 if [ -n "$CHARMED" ]; then
1320 if [ -n "$UNINSTALL" ]; then
1321 /usr
/share
/osm-devops
/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1323 /usr
/share
/osm-devops
/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1326 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1328 echo "1. Get the NBI IP with the following command:"
1330 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1332 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1334 echo "export OSM_HOSTNAME=<NBI-IP>"
1336 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1338 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1345 # if develop, we force master
1346 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1348 need_packages
="git jq wget curl tar"
1349 echo -e "Checking required packages: $need_packages"
1350 dpkg
-l $need_packages &>/dev
/null \
1351 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1352 || sudo apt-get update \
1353 || FATAL
"failed to run apt-get update"
1354 dpkg
-l $need_packages &>/dev
/null \
1355 ||
! echo -e "Installing $need_packages requires root privileges." \
1356 || sudo apt-get
install -y $need_packages \
1357 || FATAL
"failed to install $need_packages"
1359 if [ -z "$OSM_DEVOPS" ]; then
1360 if [ -n "$TEST_INSTALLER" ]; then
1361 echo -e "\nUsing local devops repo for OSM installation"
1362 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1364 echo -e "\nCreating temporary dir for OSM installation"
1365 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1366 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1368 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1370 if [ -z "$COMMIT_ID" ]; then
1371 echo -e "\nGuessing the current stable release"
1372 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1373 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1375 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1376 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1378 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1380 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1384 .
$OSM_DEVOPS/common
/all_funcs
1386 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1387 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1388 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1389 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1390 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1391 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1393 #Installation starts here
1394 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1397 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1398 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1399 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1400 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1403 echo -e "Checking required packages: lxd"
1404 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1405 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1407 # use local devops for containers
1408 export OSM_USE_LOCAL_DEVOPS
=true
1412 #Install vim-emu (optional)
1413 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1415 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null