2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password
{
83 password_file
="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name
=$1
85 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller
=$controller_name '{
90 indent = length($1)/2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
102 function generate_secret
() {
103 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
106 function remove_volumes
() {
107 if [ -n "$KUBERNETES" ]; then
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
113 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume
in $volumes; do
115 sg docker
-c "docker volume rm ${stack}_${volume}"
120 function remove_network
() {
122 sg docker
-c "docker network rm net${stack}"
125 function remove_iptables
() {
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
140 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
141 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
146 function remove_stack
() {
148 if sg docker
-c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
152 while [ ${COUNTER} -lt 30 ]; do
153 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
158 let COUNTER
=COUNTER
+1
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
164 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
170 #removes osm deployments and services
171 function remove_k8s_namespace
() {
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm
() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm
reset --force
179 kubectl delete
--namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo
rm /usr
/local
/bin
/helm
186 function remove_crontab_job
() {
187 crontab
-l |
grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab
-
191 function uninstall_osmclient
() {
192 sudo apt-get remove
--purge -y python-osmclient
193 sudo apt-get remove
--purge -y python3-osmclient
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight
() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
211 remove_k8s_namespace
$OSM_STACK_NAME
214 remove_stack
$OSM_STACK_NAME
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker
<< EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
227 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
230 if [ -n "$NGUI" ]; then
231 newgrp docker
<< EONG
232 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
235 newgrp docker
<< EONG
236 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
240 if [ -n "$KUBERNETES" ]; then
241 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
242 remove_volumes
$OSM_NAMESPACE_VOL
244 remove_volumes
$OSM_STACK_NAME
245 remove_network
$OSM_STACK_NAME
247 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
248 echo "Removing $OSM_DOCKER_WORK_DIR"
249 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
250 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
254 echo "Some docker images will be kept in case they are used by other docker stacks"
255 echo "To remove them, just run 'docker image prune' in a terminal"
259 #Safe unattended install of iptables-persistent
260 function check_install_iptables_persistent
(){
261 echo -e "\nChecking required packages: iptables-persistent"
262 if dpkg
-l iptables-persistent
&>/dev
/null
; then
263 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
264 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
265 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
266 sudo apt-get
-yq install iptables-persistent
270 #Configure NAT rules, based on the current IP addresses of containers
272 check_install_iptables_persistent
274 echo -e "\nConfiguring NAT rules"
275 echo -e " Required root privileges"
276 sudo
$OSM_DEVOPS/installers
/nat_osm
280 echo "FATAL error: Cannot install OSM due to \"$1\""
284 function update_juju_images
(){
285 crontab
-l |
grep update-juju-lxc-images ||
(crontab
-l 2>/dev
/null
; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab
-
286 ${OSM_DEVOPS}/installers
/update-juju-lxc-images
--xenial --bionic
289 function install_lxd
() {
290 # Apply sysctl production values for optimal performance
291 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
295 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
296 sudo snap
install lxd
297 sudo apt-get
install zfsutils-linux
-y
300 sudo usermod
-a -G lxd
`whoami`
301 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
302 sg lxd
-c "lxd waitready"
303 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
304 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
305 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
306 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
307 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
308 #sudo systemctl stop lxd-bridge
309 #sudo systemctl --system daemon-reload
310 #sudo systemctl enable lxd-bridge
311 #sudo systemctl start lxd-bridge
315 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
316 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
317 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
318 read -e -p "$1" USER_CONFIRMATION
320 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
322 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
323 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
324 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
328 function install_osmclient
(){
329 CLIENT_RELEASE
=${RELEASE#"-R "}
330 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
331 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
332 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
333 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
334 curl
$key_location | sudo apt-key add
-
335 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get
install -y python3-pip
338 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
339 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
340 sudo apt-get
install -y python3-osm-im python3-osmclient
341 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
342 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
343 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
344 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
346 echo -e "\nOSM client installed"
347 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
348 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
349 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
350 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
352 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
353 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
354 echo " export OSM_HOSTNAME=<OSM_host>"
359 function install_prometheus_nodeexporter
(){
360 if (systemctl
-q is-active node_exporter
)
362 echo "Node Exporter is already running."
364 echo "Node Exporter is not active, installing..."
365 if getent passwd node_exporter
> /dev
/null
2>&1; then
366 echo "node_exporter user exists"
368 echo "Creating user node_exporter"
369 sudo useradd
--no-create-home --shell /bin
/false node_exporter
371 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
372 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
373 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
374 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
375 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
376 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus_exporters
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
377 sudo systemctl daemon-reload
378 sudo systemctl restart node_exporter
379 sudo systemctl
enable node_exporter
380 echo "Node Exporter has been activated in this host."
385 function uninstall_prometheus_nodeexporter
(){
386 sudo systemctl stop node_exporter
387 sudo systemctl disable node_exporter
388 sudo
rm /etc
/systemd
/system
/node_exporter.service
389 sudo systemctl daemon-reload
390 sudo userdel node_exporter
391 sudo
rm /usr
/local
/bin
/node_exporter
395 function install_docker_ce
() {
396 # installs and configures Docker CE
397 echo "Installing Docker CE ..."
398 sudo apt-get
-qq update
399 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
400 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
401 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
402 sudo apt-get
-qq update
403 sudo apt-get
install -y docker-ce
404 echo "Adding user to group 'docker'"
405 sudo groupadd
-f docker
406 sudo usermod
-aG docker
$USER
408 sudo service docker restart
409 echo "... restarted Docker service"
410 sg docker
-c "docker version" || FATAL
"Docker installation failed"
411 echo "... Docker CE installation done"
415 function install_docker_compose
() {
416 # installs and configures docker-compose
417 echo "Installing Docker Compose ..."
418 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
419 sudo
chmod +x
/usr
/local
/bin
/docker-compose
420 echo "... Docker Compose installation done"
423 function install_juju
() {
424 echo "Installing juju"
425 sudo snap
install juju
--classic --channel=2.7/stable
426 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
428 echo "Finished installation of juju"
432 function juju_createcontroller
() {
433 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
434 # Not found created, create the controller
435 sudo usermod
-a -G lxd
${USER}
436 sg lxd
-c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
438 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
441 function juju_createproxy
() {
442 check_install_iptables_persistent
444 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
445 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
446 sudo netfilter-persistent save
450 function generate_docker_images
() {
451 echo "Pulling and generating docker images"
452 _build_from
=$COMMIT_ID
453 [ -z "$_build_from" ] && _build_from
="master"
455 echo "OSM Docker images generated from $_build_from"
457 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
458 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
459 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
460 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
462 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
463 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
464 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
467 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
468 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
471 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
472 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
475 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
476 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
479 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
480 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
483 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
484 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
487 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
488 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
491 if [ -n "$PULL_IMAGES" ]; then
492 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
493 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
494 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
495 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
496 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
501 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
502 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
503 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
504 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
507 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
508 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
509 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
510 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
511 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
512 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
517 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
518 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
519 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
520 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
521 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
522 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
525 if [ -n "$PULL_IMAGES" ]; then
526 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
527 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
528 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
529 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
530 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
533 if [ -n "$PULL_IMAGES" ]; then
534 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
535 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
536 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
537 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
538 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
541 if [ -n "$NGUI" ]; then
542 if [ -n "$PULL_IMAGES" ]; then
543 sg docker
-c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull ng-ui docker image"
544 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NG-UI
; then
545 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NG-UI
546 git
-C ${LWTEMPDIR}/NG-UI checkout
${COMMIT_ID}
547 sg docker
-c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL
"cannot build NG-UI docker image"
550 if [ -n "$PULL_IMAGES" ]; then
551 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
552 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
553 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
554 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
555 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
559 if [ -n "$PULL_IMAGES" ]; then
560 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
561 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
562 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
565 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
566 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
569 echo "Finished generation of docker images"
572 function cmp_overwrite
() {
575 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
576 if [ -f "${file2}" ]; then
577 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
579 cp -b ${file1} ${file2}
584 function generate_docker_env_files() {
585 echo "Doing a backup of existing env files
"
586 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
587 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
588 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
589 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
590 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
591 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
592 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
593 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
594 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
596 echo "Generating docker env files
"
597 if [ -n "$KUBERNETES" ]; then
598 #Kubernetes resources
599 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
600 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
602 if [ -n "$NGUI" ]; then
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
607 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
609 if [ -n "$INSTALL_PLA" ]; then
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
618 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
619 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
621 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
622 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
624 # Prometheus Exporters files
625 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
626 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
630 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
631 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
634 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
640 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
646 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
652 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
655 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
658 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
659 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
661 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
664 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
665 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
668 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
669 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
672 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
673 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
679 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
680 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
681 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
683 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
684 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
688 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
689 SERVICE_PASSWORD
=$
(generate_secret
)
690 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
691 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
693 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
694 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
695 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
696 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
700 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
701 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
702 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
706 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
707 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
711 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
712 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
714 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
717 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
718 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
723 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
724 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
729 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
730 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
732 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
737 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
738 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
742 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
743 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
746 echo "Finished generation of docker env files"
749 function generate_osmclient_script
() {
750 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
751 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
752 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
755 #installs kubernetes packages
756 function install_kube
() {
757 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
758 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
759 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
761 echo "Installing Kubernetes Packages ..."
762 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
765 #initializes kubernetes control plane
766 function init_kubeadm
() {
768 sudo kubeadm init
--config $1
772 function kube_config_dir
() {
773 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
775 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
776 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
779 #deploys flannel as daemonsets
780 function deploy_cni_provider
() {
781 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
782 trap 'rm -rf "${CNI_DIR}"' EXIT
783 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
784 kubectl apply
-f $CNI_DIR
785 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
788 #creates secrets from env files which will be used by containers
789 function kube_secrets
(){
790 kubectl create ns
$OSM_STACK_NAME
791 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
792 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
793 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
794 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
795 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
796 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
797 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
798 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
801 #taints K8s master node
802 function taint_master_node
() {
803 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
804 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
808 #deploys osm pods and services
809 function deploy_osm_services
() {
810 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
813 function deploy_osm_pla_service
() {
814 # corresponding to parse_yaml
815 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
816 # corresponding to namespace_vol
817 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
818 # corresponding to deploy_osm_services
819 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
822 #Install helm and tiller
823 function install_helm
() {
824 helm
> /dev
/null
2>&1
825 if [ $?
!= 0 ] ; then
826 # Helm is not installed. Install helm
827 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
828 tar -zxvf helm-v2.15
.2.
tar.gz
829 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
831 rm helm-v2.15
.2.
tar.gz
834 # Checking if tiller has being configured
835 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
836 if [ $?
== 1 ] ; then
837 # tiller account for kubernetes
838 kubectl
--namespace kube-system create serviceaccount tiller
839 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
840 # HELM initialization
841 helm init
--service-account tiller
843 # Wait for Tiller to be up and running. If timeout expires, continue installing
844 tiller_timeout
=120; counter
=0
845 while (( counter
< tiller_timeout
))
847 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
848 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
855 function parse_yaml
() {
856 osm_services
="nbi lcm ro pol mon light-ui ng-ui keystone"
858 for osm
in $osm_services; do
859 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
863 function namespace_vol
() {
864 osm_services
="nbi lcm ro pol mon kafka mongo mysql prometheus"
865 for osm
in $osm_services; do
866 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
870 function init_docker_swarm
() {
871 if [ "${DEFAULT_MTU}" != "1500" ]; then
872 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
873 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
874 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
876 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
880 function create_docker_network
() {
881 echo "creating network"
882 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
883 echo "creating network DONE"
886 function deploy_lightweight
() {
888 echo "Deploying lightweight build"
891 OSM_KEYSTONE_PORT
=5000
895 OSM_PROM_CADVISOR_PORT
=8080
896 OSM_PROM_HOSTPORT
=9091
897 OSM_GRAFANA_PORT
=3000
898 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
899 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
901 if [ -n "$NO_HOST_PORTS" ]; then
902 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
903 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
904 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
905 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
906 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
907 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
908 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
909 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
910 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
911 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
913 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
914 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
915 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
916 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
917 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
918 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
919 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
920 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
921 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
922 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
924 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
925 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
926 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
927 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
928 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
929 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
930 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
931 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
932 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
934 pushd $OSM_DOCKER_WORK_DIR
935 if [ -n "$INSTALL_PLA" ]; then
936 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
938 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
942 echo "Finished deployment of lightweight build"
945 function deploy_elk
() {
946 echo "Pulling docker images for ELK"
947 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
948 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
949 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
950 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
951 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
952 echo "Finished pulling elk docker images"
953 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
954 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
956 echo "Deploying ELK stack"
957 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
958 echo "Waiting for ELK stack to be up and running"
963 while [ $time -le $timelength ]; do
964 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
971 if [ $elk_is_up -eq 0 ]; then
972 echo "ELK is up and running. Trying to create index pattern..."
973 #Create index pattern
974 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
975 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
976 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
977 #Make it the default index
978 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
979 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
980 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
982 echo "Cannot connect to Kibana to create index pattern."
983 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
984 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
985 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
986 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
987 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
988 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
989 -d"{\"value\":\"filebeat-*\"}"'
991 echo "Finished deployment of ELK stack"
995 function add_local_k8scluster
() {
996 /usr
/bin
/osm
--all-projects vim-create \
997 --name _system-osm-vim \
998 --account_type dummy \
999 --auth_url http
://dummy \
1000 --user osm
--password osm
--tenant osm \
1001 --description "dummy" \
1002 --config '{management_network_name: mgmt}'
1003 /usr
/bin
/osm
--all-projects k8scluster-add \
1004 --creds ${HOME}/.kube
/config \
1005 --vim _system-osm-vim \
1006 --k8s-nets '{"net1": null}' \
1008 --description "OSM Internal Cluster" \
1012 function install_lightweight
() {
1013 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1014 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1015 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1016 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
1017 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1020 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1023 if [ -n "$KUBERNETES" ]; then
1024 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1025 1. Install and configure LXD
1027 3. Install docker CE
1028 4. Disable swap space
1029 5. Install and initialize Kubernetes
1030 as pre-requirements.
1031 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1034 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1038 echo "Installing lightweight build of OSM"
1039 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1040 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1041 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
1042 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
1043 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1044 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1045 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1046 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1048 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1049 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1050 need_packages_lw
="snapd"
1051 echo -e "Checking required packages: $need_packages_lw"
1052 dpkg
-l $need_packages_lw &>/dev
/null \
1053 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1054 || sudo apt-get update \
1055 || FATAL
"failed to run apt-get update"
1056 dpkg
-l $need_packages_lw &>/dev
/null \
1057 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1058 || sudo apt-get
install -y $need_packages_lw \
1059 || FATAL
"failed to install $need_packages_lw"
1065 [ -z "$INSTALL_NOJUJU" ] && install_juju
1068 if [ -z "$OSM_VCA_HOST" ]; then
1069 if [ -z "$CONTROLLER_NAME" ]; then
1070 if [ -n "$LXD_CLOUD_FILE" ]; then
1071 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1072 OSM_VCA_CLOUDNAME
="lxd-cloud"
1073 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1074 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1076 juju_createcontroller
1078 OSM_VCA_CLOUDNAME
="lxd-cloud"
1079 if [ -n "$LXD_CLOUD_FILE" ]; then
1080 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1081 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1082 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1085 cat << EOF > ~/.osm/lxd-cloud.yaml
1089 auth-types: [certificate]
1090 endpoint: "https://$DEFAULT_IP:8443"
1092 ssl-hostname-verification: false
1094 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1095 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1096 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1097 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1098 cat << EOF > ~/.osm/lxd-credentials.yaml
1102 auth-type: certificate
1110 lxc config trust add
local: ~
/.osm
/client.crt
1111 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1112 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1119 track juju_controller
1121 if [ -z "$OSM_VCA_SECRET" ]; then
1122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1124 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1126 if [ -z "$OSM_VCA_PUBKEY" ]; then
1127 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1130 if [ -z "$OSM_VCA_CACERT" ]; then
1131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1133 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1135 if [ -z "$OSM_VCA_APIPROXY" ]; then
1136 OSM_VCA_APIPROXY
=$DEFAULT_IP
1137 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1142 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1143 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1144 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1147 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1150 #Installs Kubernetes and deploys osm services
1151 if [ -n "$KUBERNETES" ]; then
1154 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1158 #install_docker_compose
1159 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1163 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1166 generate_docker_env_files
1168 if [ -n "$KUBERNETES" ]; then
1169 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1170 # uninstall OSM MONITORING
1171 uninstall_k8s_monitoring
1172 track uninstall_k8s_monitoring
1174 #remove old namespace
1175 remove_k8s_namespace
$OSM_STACK_NAME
1178 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1182 if [ -n "$INSTALL_PLA"]; then
1183 # optional PLA install
1184 deploy_osm_pla_service
1186 track deploy_osm_services_k8s
1187 install_k8s_storageclass
1188 track k8s_storageclass
1193 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1194 # install OSM MONITORING
1195 install_k8s_monitoring
1196 track install_k8s_monitoring
1200 remove_stack
$OSM_STACK_NAME
1201 create_docker_network
1203 generate_osmclient_script
1205 install_prometheus_nodeexporter
1207 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1208 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1211 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1214 echo -e "Checking OSM health state..."
1215 if [ -n "$KUBERNETES" ]; then
1216 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
1217 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1218 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1221 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} || \
1222 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1223 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1226 track after_healthcheck
1228 [ -n "$KUBERNETES" ] && add_local_k8scluster
1229 track add_local_k8scluster
1232 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null
1237 function install_to_openstack
() {
1239 if [ -z "$2" ]; then
1240 FATAL
"OpenStack installer requires a valid external network name"
1243 # Install Pip for Python3
1244 $WORKDIR_SUDO apt
install -y python3-pip
1245 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U pip
1247 # Install Ansible, OpenStack client and SDK
1248 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U python-openstackclient
"openstacksdk<1" "ansible>=2.9,<3"
1250 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1252 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
1254 # Execute the Ansible playbook based on openrc or clouds.yaml
1255 if [ -e "$1" ]; then
1257 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1258 -e setup_volume
=$3 $OSM_DEVOPS/installers
/openstack
/site.yml
1260 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1261 -e setup_volume
=$3 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
1267 function install_vimemu
() {
1268 echo "\nInstalling vim-emu"
1269 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1270 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1271 # install prerequisites (OVS is a must for the emulator to work)
1272 sudo apt-get
install openvswitch-switch
1273 # clone vim-emu repository (attention: branch is currently master only)
1274 echo "Cloning vim-emu repository ..."
1275 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1276 # build vim-emu docker
1277 echo "Building vim-emu Docker container..."
1279 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1280 # start vim-emu container as daemon
1281 echo "Starting vim-emu Docker container 'vim-emu' ..."
1282 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1283 # in lightweight mode, the emulator needs to be attached to netOSM
1284 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1286 # classic build mode
1287 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1289 echo "Waiting for 'vim-emu' container to start ..."
1291 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1292 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1293 # print vim-emu connection info
1294 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1295 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1296 echo -e "To add the emulated VIM to OSM you should do:"
1297 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1300 function install_k8s_monitoring
() {
1301 # install OSM monitoring
1302 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1303 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1306 function uninstall_k8s_monitoring
() {
1307 # uninstall OSM monitoring
1308 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1311 function dump_vars
(){
1312 echo "DEVELOP=$DEVELOP"
1313 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1314 echo "UNINSTALL=$UNINSTALL"
1315 echo "UPDATE=$UPDATE"
1316 echo "RECONFIGURE=$RECONFIGURE"
1317 echo "TEST_INSTALLER=$TEST_INSTALLER"
1318 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1319 echo "INSTALL_PLA=$INSTALL_PLA"
1320 echo "INSTALL_LXD=$INSTALL_LXD"
1321 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1322 echo "INSTALL_ONLY=$INSTALL_ONLY"
1323 echo "INSTALL_ELK=$INSTALL_ELK"
1324 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1325 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1326 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1327 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1328 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1329 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1330 echo "TO_REBUILD=$TO_REBUILD"
1331 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1332 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1333 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1334 echo "RELEASE=$RELEASE"
1335 echo "REPOSITORY=$REPOSITORY"
1336 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1337 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1338 echo "OSM_DEVOPS=$OSM_DEVOPS"
1339 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1340 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1341 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1342 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1343 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1344 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1345 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1346 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1347 echo "DOCKER_USER=$DOCKER_USER"
1348 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1349 echo "PULL_IMAGES=$PULL_IMAGES"
1350 echo "KUBERNETES=$KUBERNETES"
1352 echo "SHOWOPTS=$SHOWOPTS"
1353 echo "Install from specific refspec (-b): $COMMIT_ID"
1358 duration
=$
((ctime
- SESSION_ID
))
1359 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1360 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1362 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1363 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1364 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1365 event_name
="${event_name}_$1"
1366 url
="${url}&event=${event_name}&ce_duration=${duration}"
1367 wget
-q -O /dev
/null
$url
1379 INSTALL_FROM_SOURCE
=""
1380 RELEASE
="ReleaseEIGHT"
1384 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1385 LXD_REPOSITORY_PATH
=""
1386 INSTALL_LIGHTWEIGHT
="y"
1387 INSTALL_TO_OPENSTACK
=""
1388 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1389 OPENSTACK_PUBLIC_NET_NAME
=""
1390 OPENSTACK_ATTACH_VOLUME
="false"
1399 INSTALL_K8S_MONITOR
=""
1400 INSTALL_NOHOSTCLIENT
=""
1401 SESSION_ID
=`date +%s`
1406 OSM_VCA_CLOUDNAME
="localhost"
1410 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1411 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1413 OSM_WORK_DIR
="/etc/osm"
1414 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1415 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1416 OSM_HOST_VOL
="/var/lib/osm"
1417 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1418 OSM_DOCKER_TAG
=latest
1419 DOCKER_USER
=opensourcemano
1421 KAFKA_TAG
=2.11-1.0
.2
1422 PROMETHEUS_TAG
=v2.4
.3
1424 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1425 PROMETHEUS_CADVISOR_TAG
=latest
1427 OSM_DATABASE_COMMONKEY
=
1428 ELASTIC_VERSION
=6.4.2
1429 ELASTIC_CURATOR_VERSION
=5.5.4
1430 POD_NETWORK_CIDR
=10.244.0.0/16
1431 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1432 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1434 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1441 REPOSITORY
="${OPTARG}"
1442 REPO_ARGS
+=(-r "$REPOSITORY")
1445 [ "${OPTARG}" == "swarm" ] && continue
1446 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1447 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1451 [ "${OPTARG}" == "lwui" ] && continue
1452 [ "${OPTARG}" == "ngui" ] && NGUI
="y" && continue
1453 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1457 REPOSITORY_KEY
="${OPTARG}"
1458 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1461 REPOSITORY_BASE
="${OPTARG}"
1462 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1466 REPO_ARGS
+=(-R "$RELEASE")
1469 OSM_DEVOPS
="${OPTARG}"
1473 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1474 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1475 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1478 INSTALL_TO_OPENSTACK
="y"
1479 if [ -n "${OPTARG}" ]; then
1480 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1482 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1487 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1490 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1491 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1492 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1493 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1494 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1495 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1496 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1497 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1498 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1499 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1500 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1501 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1502 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1503 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1506 OSM_VCA_HOST
="${OPTARG}"
1509 OSM_VCA_SECRET
="${OPTARG}"
1512 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1515 # when specifying workdir, do not use sudo for access
1517 OSM_WORK_DIR
="${OPTARG}"
1520 OSM_DOCKER_TAG
="${OPTARG}"
1521 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1524 DOCKER_USER
="${OPTARG}"
1527 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1530 OSM_VCA_APIPROXY
="${OPTARG}"
1533 LXD_CLOUD_FILE
="${OPTARG}"
1536 LXD_CRED_FILE
="${OPTARG}"
1539 CONTROLLER_NAME
="${OPTARG}"
1542 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1543 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1544 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1545 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1546 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1547 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1548 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1549 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1550 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1551 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1552 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1553 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1554 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1555 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1556 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1557 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1558 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1559 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1560 [ "${OPTARG}" == "pullimages" ] && continue
1561 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1562 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1563 [ "${OPTARG}" == "bundle" ] && continue
1564 [ "${OPTARG}" == "k8s" ] && continue
1565 [ "${OPTARG}" == "lxd" ] && continue
1566 [ "${OPTARG}" == "lxd-cred" ] && continue
1567 [ "${OPTARG}" == "microstack" ] && continue
1568 [ "${OPTARG}" == "ha" ] && continue
1569 [ "${OPTARG}" == "tag" ] && continue
1570 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1571 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1572 echo -e "Invalid option: '--$OPTARG'\n" >&2
1576 echo "Option -$OPTARG requires an argument" >&2
1580 echo -e "Invalid option: '-$OPTARG'\n" >&2
1595 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1596 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1598 if [ -n "$SHOWOPTS" ]; then
1603 if [ -n "$CHARMED" ]; then
1604 if [ -n "$UNINSTALL" ]; then
1605 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1607 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1609 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1611 echo "1. Get the NBI IP with the following command:"
1613 echo NBI_IP
='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1615 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1617 echo "export OSM_HOSTNAME=\$NBI_IP"
1619 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1621 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1629 # if develop, we force master
1630 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1632 need_packages
="git wget curl tar"
1634 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1636 echo -e "Checking required packages: $need_packages"
1637 dpkg
-l $need_packages &>/dev
/null \
1638 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1639 || sudo apt-get update \
1640 || FATAL
"failed to run apt-get update"
1641 dpkg
-l $need_packages &>/dev
/null \
1642 ||
! echo -e "Installing $need_packages requires root privileges." \
1643 || sudo apt-get
install -y $need_packages \
1644 || FATAL
"failed to install $need_packages"
1645 sudo snap
install jq
1646 if [ -z "$OSM_DEVOPS" ]; then
1647 if [ -n "$TEST_INSTALLER" ]; then
1648 echo -e "\nUsing local devops repo for OSM installation"
1649 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1651 echo -e "\nCreating temporary dir for OSM installation"
1652 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1653 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1655 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1657 if [ -z "$COMMIT_ID" ]; then
1658 echo -e "\nGuessing the current stable release"
1659 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1660 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1662 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1663 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1665 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1667 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1671 .
$OSM_DEVOPS/common
/all_funcs
1673 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1674 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1675 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1676 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1677 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1678 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1680 #Installation starts here
1681 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README.txt
&> /dev
/null
1684 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1685 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1686 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1687 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1690 echo -e "Checking required packages: lxd"
1691 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1692 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1694 # use local devops for containers
1695 export OSM_USE_LOCAL_DEVOPS
=true
1699 #Install vim-emu (optional)
1700 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1702 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null