2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
31 echo -e " -H <VCA host> use specific juju host controller IP"
32 echo -e " -S <VCA secret> use VCA/juju secret key"
33 echo -e " -P <VCA pubkey> use VCA/juju public key file"
34 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
35 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
36 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
37 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
40 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
41 echo -e " -D <devops path> use local devops installation path"
42 echo -e " -w <work dir> Location to store runtime installation"
43 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
44 echo -e " -l: LXD cloud yaml file"
45 echo -e " -L: LXD credentials yaml file"
46 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
47 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
48 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
49 echo -e " --nojuju: do not juju, assumes already installed"
50 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
51 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
52 echo -e " --nohostclient: do not install the osmclient"
53 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
54 echo -e " --source: install OSM from source code using the latest stable tag"
55 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
56 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
57 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
58 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
59 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
60 echo -e " --showopts: print chosen options and exit (only for debugging)"
61 echo -e " -y: do not prompt for confirmation, assumes yes"
62 echo -e " -h / --help: print this help"
63 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
64 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
65 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
66 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
67 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
68 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
69 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
70 echo -e " [--tag]: Docker image tag"
74 # takes a juju/accounts.yaml file and returns the password specific
75 # for a controller. I wrote this using only bash tools to minimize
76 # additions of other packages
77 function parse_juju_password
{
78 password_file
="${HOME}/.local/share/juju/accounts.yaml"
79 local controller_name
=$1
80 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
81 sed -ne "s|^\($s\):|\1|" \
82 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
83 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
84 awk -F$fs -v controller
=$controller_name '{
85 indent = length($1)/2;
87 for (i in vname) {if (i > indent) {delete vname[i]}}
89 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
90 if (match(vn,controller) && match($2,"password")) {
97 function generate_secret
() {
98 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
101 function remove_volumes
() {
102 if [ -n "$KUBERNETES" ]; then
104 echo "Removing ${k8_volume}"
105 $WORKDIR_SUDO rm -rf ${k8_volume}
108 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
109 for volume
in $volumes; do
110 sg docker
-c "docker volume rm ${stack}_${volume}"
115 function remove_network
() {
117 sg docker
-c "docker network rm net${stack}"
120 function remove_iptables
() {
122 if [ -z "$OSM_VCA_HOST" ]; then
123 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
124 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
127 if [ -z "$DEFAULT_IP" ]; then
128 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
129 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
130 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
131 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
132 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
135 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
136 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
137 sudo netfilter-persistent save
141 function remove_stack
() {
143 if sg docker
-c "docker stack ps ${stack}" ; then
144 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
147 while [ ${COUNTER} -lt 30 ]; do
148 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
149 #echo "Dockers running: $result"
150 if [ "${result}" == "0" ]; then
153 let COUNTER
=COUNTER
+1
156 if [ "${result}" == "0" ]; then
157 echo "All dockers of the stack ${stack} were removed"
159 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
165 #removes osm deployments and services
166 function remove_k8s_namespace
() {
170 #removes helm only if there is nothing deployed in helm
171 function remove_helm
() {
172 if [ "$(helm ls -q)" == "" ] ; then
173 sudo helm
reset --force
174 kubectl delete
--namespace kube-system serviceaccount tiller
175 kubectl delete clusterrolebinding tiller-cluster-rule
176 sudo
rm /usr
/local
/bin
/helm
182 function uninstall_osmclient
() {
183 sudo apt-get remove
--purge -y python-osmclient
184 sudo apt-get remove
--purge -y python3-osmclient
187 #Uninstall lightweight OSM: remove dockers
188 function uninstall_lightweight
() {
189 if [ -n "$INSTALL_ONLY" ]; then
190 if [ -n "$INSTALL_ELK" ]; then
191 echo -e "\nUninstalling OSM ELK stack"
193 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
196 echo -e "\nUninstalling OSM"
197 if [ -n "$KUBERNETES" ]; then
198 if [ -n "$INSTALL_K8S_MONITOR" ]; then
199 # uninstall OSM MONITORING
200 uninstall_k8s_monitoring
202 remove_k8s_namespace
$OSM_STACK_NAME
205 remove_stack
$OSM_STACK_NAME
208 echo "Now osm docker images and volumes will be deleted"
209 newgrp docker
<< EONG
210 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
211 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
212 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
213 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
214 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
215 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
216 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
217 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
218 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
221 if [ -n "$KUBERNETES" ]; then
222 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
223 remove_volumes
$OSM_NAMESPACE_VOL
225 remove_volumes
$OSM_STACK_NAME
226 remove_network
$OSM_STACK_NAME
228 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
229 echo "Removing $OSM_DOCKER_WORK_DIR"
230 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
231 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
234 echo "Some docker images will be kept in case they are used by other docker stacks"
235 echo "To remove them, just run 'docker image prune' in a terminal"
239 #Safe unattended install of iptables-persistent
240 function check_install_iptables_persistent
(){
241 echo -e "\nChecking required packages: iptables-persistent"
242 if dpkg
-l iptables-persistent
&>/dev
/null
; then
243 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
244 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
245 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
246 sudo apt-get
-yq install iptables-persistent
250 #Configure NAT rules, based on the current IP addresses of containers
252 check_install_iptables_persistent
254 echo -e "\nConfiguring NAT rules"
255 echo -e " Required root privileges"
256 sudo
$OSM_DEVOPS/installers
/nat_osm
260 echo "FATAL error: Cannot install OSM due to \"$1\""
264 function install_lxd
() {
265 # Apply sysctl production values for optimal performance
266 sudo
cp /usr
/share
/osm-devops
/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
270 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
271 sudo snap
install lxd
272 sudo apt-get
install zfsutils-linux
-y
275 sudo usermod
-a -G lxd
`whoami`
276 cat /usr
/share
/osm-devops
/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
277 sg lxd
-c "lxd waitready"
278 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
279 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
280 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
281 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
282 #sudo systemctl stop lxd-bridge
283 #sudo systemctl --system daemon-reload
284 #sudo systemctl enable lxd-bridge
285 #sudo systemctl start lxd-bridge
289 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
290 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
291 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
292 read -e -p "$1" USER_CONFIRMATION
294 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
295 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
296 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
297 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
298 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
302 function install_osmclient
(){
303 CLIENT_RELEASE
=${RELEASE#"-R "}
304 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
305 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
306 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
307 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
308 curl
$key_location | sudo apt-key add
-
309 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
311 sudo apt-get
install -y python3-pip
312 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
313 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
314 sudo apt-get
install -y python3-osm-im python3-osmclient
315 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
316 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
317 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
318 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
319 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
320 echo -e "\nOSM client installed"
321 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
322 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
323 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
324 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
326 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
327 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
328 echo " export OSM_HOSTNAME=<OSM_host>"
333 function install_prometheus_nodeexporter
(){
334 if (systemctl
-q is-active node_exporter
)
336 echo "Node Exporter is already running."
338 echo "Node Exporter is not active, installing..."
339 if getent passwd node_exporter
> /dev
/null
2>&1; then
340 echo "node_exporter user exists"
342 echo "Creating user node_exporter"
343 sudo useradd
--no-create-home --shell /bin
/false node_exporter
345 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
346 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
347 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
348 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
349 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
350 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
351 sudo systemctl daemon-reload
352 sudo systemctl restart node_exporter
353 sudo systemctl
enable node_exporter
354 echo "Node Exporter has been activated in this host."
359 function uninstall_prometheus_nodeexporter
(){
360 sudo systemctl stop node_exporter
361 sudo systemctl disable node_exporter
362 sudo
rm /etc
/systemd
/system
/node_exporter.service
363 sudo systemctl daemon-reload
364 sudo userdel node_exporter
365 sudo
rm /usr
/local
/bin
/node_exporter
369 function install_docker_ce
() {
370 # installs and configures Docker CE
371 echo "Installing Docker CE ..."
372 sudo apt-get
-qq update
373 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
374 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
375 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
376 sudo apt-get
-qq update
377 sudo apt-get
install -y docker-ce
378 echo "Adding user to group 'docker'"
379 sudo groupadd
-f docker
380 sudo usermod
-aG docker
$USER
382 sudo service docker restart
383 echo "... restarted Docker service"
384 sg docker
-c "docker version" || FATAL
"Docker installation failed"
385 echo "... Docker CE installation done"
389 function install_docker_compose
() {
390 # installs and configures docker-compose
391 echo "Installing Docker Compose ..."
392 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
393 sudo
chmod +x
/usr
/local
/bin
/docker-compose
394 echo "... Docker Compose installation done"
397 function install_juju
() {
398 echo "Installing juju"
399 sudo snap
install juju
--classic
400 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
401 echo "Finished installation of juju"
405 function juju_createcontroller
() {
406 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
407 # Not found created, create the controller
408 sudo usermod
-a -G lxd
${USER}
409 sg lxd
-c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
411 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
414 function juju_createproxy
() {
415 check_install_iptables_persistent
417 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
418 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
419 sudo netfilter-persistent save
423 function generate_docker_images
() {
424 echo "Pulling and generating docker images"
425 _build_from
=$COMMIT_ID
426 [ -z "$_build_from" ] && _build_from
="master"
428 echo "OSM Docker images generated from $_build_from"
430 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
431 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
432 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
433 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
435 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
436 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
437 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
440 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
441 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
444 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
445 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
448 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
449 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
452 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
453 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
456 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
457 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
460 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
461 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
464 if [ -n "$PULL_IMAGES" ]; then
465 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
466 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
467 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
468 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
469 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
472 if [ -n "$PULL_IMAGES" ]; then
473 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
474 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
475 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
476 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
477 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
480 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
481 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
482 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
483 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
484 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
485 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
488 if [ -n "$PULL_IMAGES" ]; then
489 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
490 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
491 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
492 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
493 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
494 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
495 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
498 if [ -n "$PULL_IMAGES" ]; then
499 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
500 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
501 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
502 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
503 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
506 if [ -n "$PULL_IMAGES" ]; then
507 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
508 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
509 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
510 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
511 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
514 if [ -n "$PULL_IMAGES" ]; then
515 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
516 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
517 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
518 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
519 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
522 if [ -n "$PULL_IMAGES" ]; then
523 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
524 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
525 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
528 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
529 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
532 echo "Finished generation of docker images"
535 function cmp_overwrite
() {
538 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
539 if [ -f "${file2}" ]; then
540 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
542 cp -b ${file1} ${file2}
547 function generate_docker_env_files() {
548 echo "Doing a backup of existing env files
"
549 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
550 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
551 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
552 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
553 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
554 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
555 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
556 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
557 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
559 echo "Generating docker env files
"
560 if [ -n "$KUBERNETES" ]; then
561 #Kubernetes resources
562 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
565 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
566 if [ -n "$INSTALL_PLA" ]; then
567 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
571 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
572 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
575 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
576 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
577 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
578 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
579 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
581 # Prometheus Exporters files
582 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
583 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
587 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
588 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
591 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
592 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
594 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
597 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
598 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
600 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
603 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
604 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
606 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
609 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
610 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
612 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
615 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
616 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
618 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
621 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
622 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
625 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
626 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
629 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
630 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
632 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
636 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
637 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
638 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
640 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
641 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
645 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
646 SERVICE_PASSWORD
=$
(generate_secret
)
647 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
648 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
650 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
651 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
652 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
653 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
657 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
658 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
659 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
663 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
664 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
665 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
668 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
669 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
671 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
674 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
675 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
677 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
680 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
681 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
683 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
686 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
687 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
689 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
694 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
695 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
699 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
700 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
703 echo "Finished generation of docker env files"
706 function generate_osmclient_script
() {
707 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
708 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
709 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
712 #installs kubernetes packages
713 function install_kube
() {
714 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
715 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
716 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
718 echo "Installing Kubernetes Packages ..."
719 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
722 #initializes kubernetes control plane
723 function init_kubeadm
() {
725 sudo kubeadm init
--config $1
729 function kube_config_dir
() {
730 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
732 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
733 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
736 #deploys flannel as daemonsets
737 function deploy_cni_provider
() {
738 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
739 trap 'rm -rf "${CNI_DIR}"' EXIT
740 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
741 kubectl apply
-f $CNI_DIR
742 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
745 #creates secrets from env files which will be used by containers
746 function kube_secrets
(){
747 kubectl create ns
$OSM_STACK_NAME
748 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
749 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
750 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
751 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
752 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
753 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
754 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
755 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
758 #deploys osm pods and services
759 function deploy_osm_services
() {
760 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
761 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
763 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
766 function deploy_osm_pla_service
() {
767 # corresponding to parse_yaml
768 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
769 # corresponding to namespace_vol
770 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
771 # corresponding to deploy_osm_services
772 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
775 #Install helm and tiller
776 function install_helm
() {
777 helm
> /dev
/null
2>&1
778 if [ $?
!= 0 ] ; then
779 # Helm is not installed. Install helm
780 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
781 tar -zxvf helm-v2.15
.2.
tar.gz
782 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
784 rm helm-v2.15
.2.
tar.gz
787 # Checking if tiller has being configured
788 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
789 if [ $?
== 1 ] ; then
790 # tiller account for kubernetes
791 kubectl
--namespace kube-system create serviceaccount tiller
792 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
793 # HELM initialization
794 helm init
--service-account tiller
796 # Wait for Tiller to be up and running. If timeout expires, continue installing
797 tiller_timeout
=120; counter
=0
798 while (( counter
< tiller_timeout
))
800 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
801 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
808 function parse_yaml
() {
809 osm_services
="nbi lcm ro pol mon light-ui keystone"
811 for osm
in $osm_services; do
812 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/opensourcemano\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
816 function namespace_vol
() {
817 osm_services
="nbi lcm ro pol mon kafka mongo mysql"
818 for osm
in $osm_services; do
819 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
823 function init_docker_swarm
() {
824 if [ "${DEFAULT_MTU}" != "1500" ]; then
825 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
826 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
827 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
829 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
833 function create_docker_network
() {
834 echo "creating network"
835 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
836 echo "creating network DONE"
839 function deploy_lightweight
() {
841 echo "Deploying lightweight build"
844 OSM_KEYSTONE_PORT
=5000
848 OSM_PROM_CADVISOR_PORT
=8080
849 OSM_PROM_HOSTPORT
=9091
850 OSM_GRAFANA_PORT
=3000
851 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
852 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
854 if [ -n "$NO_HOST_PORTS" ]; then
855 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
856 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
857 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
858 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
859 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
860 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
861 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
862 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
863 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
864 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
866 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
867 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
868 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
869 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
870 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
871 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
872 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
873 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
874 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
875 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
877 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
878 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
879 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
880 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
881 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
882 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
883 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
884 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
885 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
887 pushd $OSM_DOCKER_WORK_DIR
888 if [ -n "$INSTALL_PLA" ]; then
889 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
891 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
895 echo "Finished deployment of lightweight build"
898 function deploy_elk
() {
899 echo "Pulling docker images for ELK"
900 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
901 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
902 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
903 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
904 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
905 echo "Finished pulling elk docker images"
906 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
907 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
909 echo "Deploying ELK stack"
910 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
911 echo "Waiting for ELK stack to be up and running"
916 while [ $time -le $timelength ]; do
917 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
924 if [ $elk_is_up -eq 0 ]; then
925 echo "ELK is up and running. Trying to create index pattern..."
926 #Create index pattern
927 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
928 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
929 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
930 #Make it the default index
931 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
932 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
933 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
935 echo "Cannot connect to Kibana to create index pattern."
936 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
937 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
938 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
939 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
940 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
941 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
942 -d"{\"value\":\"filebeat-*\"}"'
944 echo "Finished deployment of ELK stack"
948 function install_lightweight
() {
949 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
950 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
951 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
952 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
953 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
956 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
959 if [ -n "$KUBERNETES" ]; then
960 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
961 1. Install and configure LXD
964 4. Disable swap space
965 5. Install and initialize Kubernetes
967 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
970 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
974 echo "Installing lightweight build of OSM"
975 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
976 trap 'rm -rf "${LWTEMPDIR}"' EXIT
977 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
978 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
979 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
980 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
981 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
982 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
984 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
985 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
986 need_packages_lw
="snapd"
987 echo -e "Checking required packages: $need_packages_lw"
988 dpkg
-l $need_packages_lw &>/dev
/null \
989 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
990 || sudo apt-get update \
991 || FATAL
"failed to run apt-get update"
992 dpkg
-l $need_packages_lw &>/dev
/null \
993 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
994 || sudo apt-get
install -y $need_packages_lw \
995 || FATAL
"failed to install $need_packages_lw"
1001 [ -z "$INSTALL_NOJUJU" ] && install_juju
1004 if [ -z "$OSM_VCA_HOST" ]; then
1005 if [ -z "$CONTROLLER_NAME" ]; then
1006 if [ -n "$LXD_CLOUD_FILE" ]; then
1007 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1008 OSM_VCA_CLOUDNAME
="lxd-cloud"
1009 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1010 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1012 juju_createcontroller
1014 OSM_VCA_CLOUDNAME
="lxd-cloud"
1015 if [ -n "$LXD_CLOUD_FILE" ]; then
1016 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1017 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1018 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1021 cat << EOF > ~/.osm/lxd-cloud.yaml
1025 auth-types: [certificate]
1026 endpoint: "https://$DEFAULT_IP:8443"
1028 ssl-hostname-verification: false
1030 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1031 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1032 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1033 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1034 cat << EOF > ~/.osm/lxd-credentials.yaml
1038 auth-type: certificate
1046 lxc config trust add
local: ~
/.osm
/client.crt
1047 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1048 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1051 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1052 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1053 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1055 track juju_controller
1057 if [ -z "$OSM_VCA_SECRET" ]; then
1058 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1059 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1060 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1062 if [ -z "$OSM_VCA_PUBKEY" ]; then
1063 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1064 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1066 if [ -z "$OSM_VCA_CACERT" ]; then
1067 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1068 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1069 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1071 if [ -z "$OSM_VCA_APIPROXY" ]; then
1072 OSM_VCA_APIPROXY
=$DEFAULT_IP
1073 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1078 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1079 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1080 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1083 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1086 #Installs Kubernetes and deploys osm services
1087 if [ -n "$KUBERNETES" ]; then
1090 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1094 #install_docker_compose
1095 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1099 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1102 generate_docker_env_files
1104 if [ -n "$KUBERNETES" ]; then
1105 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1106 # uninstall OSM MONITORING
1107 uninstall_k8s_monitoring
1108 track uninstall_k8s_monitoring
1110 #remove old namespace
1111 remove_k8s_namespace
$OSM_STACK_NAME
1114 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1117 if [ -n "$INSTALL_PLA"]; then
1118 # optional PLA install
1119 deploy_osm_pla_service
1121 track deploy_osm_services_k8s
1122 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1123 # install OSM MONITORING
1124 install_k8s_monitoring
1125 track install_k8s_monitoring
1129 remove_stack
$OSM_STACK_NAME
1130 create_docker_network
1132 generate_osmclient_script
1134 install_prometheus_nodeexporter
1136 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1137 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1140 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1143 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null
1148 function install_vimemu
() {
1149 echo "\nInstalling vim-emu"
1150 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1151 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1152 # install prerequisites (OVS is a must for the emulator to work)
1153 sudo apt-get
install openvswitch-switch
1154 # clone vim-emu repository (attention: branch is currently master only)
1155 echo "Cloning vim-emu repository ..."
1156 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1157 # build vim-emu docker
1158 echo "Building vim-emu Docker container..."
1160 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1161 # start vim-emu container as daemon
1162 echo "Starting vim-emu Docker container 'vim-emu' ..."
1163 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1164 # in lightweight mode, the emulator needs to be attached to netOSM
1165 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1167 # classic build mode
1168 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1170 echo "Waiting for 'vim-emu' container to start ..."
1172 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1173 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1174 # print vim-emu connection info
1175 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1176 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1177 echo -e "To add the emulated VIM to OSM you should do:"
1178 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1181 function install_k8s_monitoring
() {
1182 # install OSM monitoring
1183 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1184 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1187 function uninstall_k8s_monitoring
() {
1188 # uninstall OSM monitoring
1189 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1192 function dump_vars
(){
1193 echo "DEVELOP=$DEVELOP"
1194 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1195 echo "UNINSTALL=$UNINSTALL"
1196 echo "UPDATE=$UPDATE"
1197 echo "RECONFIGURE=$RECONFIGURE"
1198 echo "TEST_INSTALLER=$TEST_INSTALLER"
1199 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1200 echo "INSTALL_PLA=$INSTALL_PLA"
1201 echo "INSTALL_LXD=$INSTALL_LXD"
1202 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1203 echo "INSTALL_ONLY=$INSTALL_ONLY"
1204 echo "INSTALL_ELK=$INSTALL_ELK"
1205 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1206 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1207 echo "TO_REBUILD=$TO_REBUILD"
1208 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1209 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1210 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1211 echo "RELEASE=$RELEASE"
1212 echo "REPOSITORY=$REPOSITORY"
1213 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1214 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1215 echo "OSM_DEVOPS=$OSM_DEVOPS"
1216 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1217 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1218 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1219 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1220 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1221 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1222 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1223 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1224 echo "DOCKER_USER=$DOCKER_USER"
1225 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1226 echo "PULL_IMAGES=$PULL_IMAGES"
1227 echo "KUBERNETES=$KUBERNETES"
1228 echo "SHOWOPTS=$SHOWOPTS"
1229 echo "Install from specific refspec (-b): $COMMIT_ID"
1234 duration
=$
((ctime
- SESSION_ID
))
1235 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1236 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1238 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1239 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1240 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1241 event_name
="${event_name}_$1"
1242 url
="${url}&event=${event_name}&ce_duration=${duration}"
1243 wget
-q -O /dev
/null
$url
1255 INSTALL_FROM_SOURCE
=""
1256 RELEASE
="ReleaseSEVEN"
1260 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1261 LXD_REPOSITORY_PATH
=""
1262 INSTALL_LIGHTWEIGHT
="y"
1270 INSTALL_K8S_MONITOR
=""
1271 INSTALL_NOHOSTCLIENT
=""
1272 SESSION_ID
=`date +%s`
1277 OSM_VCA_CLOUDNAME
="localhost"
1281 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1282 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1284 OSM_WORK_DIR
="/etc/osm"
1285 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1286 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1287 OSM_HOST_VOL
="/var/lib/osm"
1288 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1289 OSM_DOCKER_TAG
=latest
1290 DOCKER_USER
=opensourcemano
1292 KAFKA_TAG
=2.11-1.0
.2
1293 PROMETHEUS_TAG
=v2.4
.3
1295 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1296 PROMETHEUS_CADVISOR_TAG
=latest
1298 OSM_DATABASE_COMMONKEY
=
1299 ELASTIC_VERSION
=6.4.2
1300 ELASTIC_CURATOR_VERSION
=5.5.4
1301 POD_NETWORK_CIDR
=10.244.0.0/16
1302 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1303 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1305 while getopts ":b:r:c:k:u:R:D:o:m:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1312 REPOSITORY
="${OPTARG}"
1313 REPO_ARGS
+=(-r "$REPOSITORY")
1316 [ "${OPTARG}" == "swarm" ] && continue
1317 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1318 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1322 REPOSITORY_KEY
="${OPTARG}"
1323 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1326 REPOSITORY_BASE
="${OPTARG}"
1327 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1331 REPO_ARGS
+=(-R "$RELEASE")
1334 OSM_DEVOPS
="${OPTARG}"
1338 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1339 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1340 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1343 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1344 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1345 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1346 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1347 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1348 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1349 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1350 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1351 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1352 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1353 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1354 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1355 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1356 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1359 OSM_VCA_HOST
="${OPTARG}"
1362 OSM_VCA_SECRET
="${OPTARG}"
1365 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1368 # when specifying workdir, do not use sudo for access
1370 OSM_WORK_DIR
="${OPTARG}"
1373 OSM_DOCKER_TAG
="${OPTARG}"
1376 DOCKER_USER
="${OPTARG}"
1379 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1382 OSM_VCA_APIPROXY
="${OPTARG}"
1385 LXD_CLOUD_FILE
="${OPTARG}"
1388 LXD_CRED_FILE
="${OPTARG}"
1391 CONTROLLER_NAME
="${OPTARG}"
1394 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1395 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1396 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1397 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1398 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1399 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1400 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1401 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1402 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1403 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1404 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1405 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1406 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1407 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1408 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1409 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1410 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1411 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1412 [ "${OPTARG}" == "pullimages" ] && continue
1413 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1414 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1415 [ "${OPTARG}" == "bundle" ] && continue
1416 [ "${OPTARG}" == "k8s" ] && continue
1417 [ "${OPTARG}" == "lxd" ] && continue
1418 [ "${OPTARG}" == "lxd-cred" ] && continue
1419 [ "${OPTARG}" == "microstack" ] && continue
1420 [ "${OPTARG}" == "tag" ] && continue
1421 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1422 echo -e "Invalid option: '--$OPTARG'\n" >&2
1426 echo "Option -$OPTARG requires an argument" >&2
1430 echo -e "Invalid option: '-$OPTARG'\n" >&2
1445 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1446 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1448 if [ -n "$SHOWOPTS" ]; then
1453 if [ -n "$CHARMED" ]; then
1454 if [ -n "$UNINSTALL" ]; then
1455 /usr
/share
/osm-devops
/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1457 /usr
/share
/osm-devops
/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1460 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1462 echo "1. Get the NBI IP with the following command:"
1464 echo "juju status --format yaml | yq r - applications.nbi-k8s.address"
1466 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1468 echo "export OSM_HOSTNAME=<NBI-IP>"
1470 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1472 echo "export OSM_HOSTNAME=<previous-IP> >> ~/.bashrc"
1479 # if develop, we force master
1480 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1482 need_packages
="git wget curl tar"
1483 echo -e "Checking required packages: $need_packages"
1484 dpkg
-l $need_packages &>/dev
/null \
1485 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1486 || sudo apt-get update \
1487 || FATAL
"failed to run apt-get update"
1488 dpkg
-l $need_packages &>/dev
/null \
1489 ||
! echo -e "Installing $need_packages requires root privileges." \
1490 || sudo apt-get
install -y $need_packages \
1491 || FATAL
"failed to install $need_packages"
1492 sudo snap
install jq
1493 if [ -z "$OSM_DEVOPS" ]; then
1494 if [ -n "$TEST_INSTALLER" ]; then
1495 echo -e "\nUsing local devops repo for OSM installation"
1496 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1498 echo -e "\nCreating temporary dir for OSM installation"
1499 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1500 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1502 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1504 if [ -z "$COMMIT_ID" ]; then
1505 echo -e "\nGuessing the current stable release"
1506 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1507 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1509 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1510 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1512 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1514 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1518 .
$OSM_DEVOPS/common
/all_funcs
1520 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1521 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1522 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1523 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1524 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1525 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1527 #Installation starts here
1528 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README.txt
&> /dev
/null
1531 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1532 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1533 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1534 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1537 echo -e "Checking required packages: lxd"
1538 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1539 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1541 # use local devops for containers
1542 export OSM_USE_LOCAL_DEVOPS
=true
1546 #Install vim-emu (optional)
1547 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1549 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-7.0
-seven/README2.txt
&> /dev
/null