2 # Copyright 2016 Telefónica Investigación y Desarrollo S.A.U.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
17 echo -e "usage: $0 [OPTIONS]"
18 echo -e "Install OSM from binaries or source code (by default, from binaries)"
20 echo -e " -r <repo>: use specified repository name for osm packages"
21 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
22 echo -e " -u <repo base>: use specified repository url for osm packages"
23 echo -e " -k <repo key>: use specified repository public key url"
24 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
25 echo -e " -b master (main dev branch)"
26 echo -e " -b v2.0 (v2.0 branch)"
27 echo -e " -b tags/v1.1.0 (a specific tag)"
29 echo -e " -c <orchestrator> deploy osm services using container <orchestrator>. Valid values are <k8s> or <swarm>. If -c is not used then osm will be deployed using default orchestrator. When used with --uninstall, osm services deployed by the orchestrator will be uninstalled"
30 echo -e " -n <ui> install OSM with Next Gen UI. Valid values are <lwui> or <ngui>. If -n is not specified osm will be installed with light-ui. When used with uninstall, osm along with the UI specified will be uninstalled"
31 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
32 echo -e " -H <VCA host> use specific juju host controller IP"
33 echo -e " -S <VCA secret> use VCA/juju secret key"
34 echo -e " -P <VCA pubkey> use VCA/juju public key file"
35 echo -e " -C <VCA cacert> use VCA/juju CA certificate file"
36 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
37 echo -e " --vimemu: additionally deploy the VIM emulator as a docker container"
38 echo -e " --elk_stack: additionally deploy an ELK docker stack for event logging"
39 echo -e " --pla: install the PLA module for placement support"
40 echo -e " -m <MODULE>: install OSM but only rebuild the specified docker images (LW-UI, NBI, LCM, RO, MON, POL, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, PLA, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (vimemu, elk_stack, k8s_monitor)"
42 echo -e " -O <openrc file/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -D <devops path> use local devops installation path"
45 echo -e " -w <work dir> Location to store runtime installation"
46 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
47 echo -e " -l: LXD cloud yaml file"
48 echo -e " -L: LXD credentials yaml file"
49 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
50 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
51 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
52 echo -e " --nojuju: do not juju, assumes already installed"
53 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
54 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
55 echo -e " --nohostclient: do not install the osmclient"
56 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
57 echo -e " --source: install OSM from source code using the latest stable tag"
58 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
59 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
60 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
61 echo -e " --volume: create a VM volume when installing to OpenStack"
62 # echo -e " --reconfigure: reconfigure the modules (DO NOT change NAT rules)"
63 # echo -e " --update: update to the latest stable release or to the latest commit if using a specific branch"
64 echo -e " --showopts: print chosen options and exit (only for debugging)"
65 echo -e " -y: do not prompt for confirmation, assumes yes"
66 echo -e " -h / --help: print this help"
67 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
68 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
69 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
70 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
71 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
72 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
73 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
74 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
75 echo -e " [--tag]: Docker image tag"
79 # takes a juju/accounts.yaml file and returns the password specific
80 # for a controller. I wrote this using only bash tools to minimize
81 # additions of other packages
82 function parse_juju_password
{
83 password_file
="${HOME}/.local/share/juju/accounts.yaml"
84 local controller_name
=$1
85 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
86 sed -ne "s|^\($s\):|\1|" \
87 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
88 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
89 awk -F$fs -v controller
=$controller_name '{
90 indent = length($1)/2;
92 for (i in vname) {if (i > indent) {delete vname[i]}}
94 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
95 if (match(vn,controller) && match($2,"password")) {
102 function generate_secret
() {
103 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
106 function remove_volumes
() {
107 if [ -n "$KUBERNETES" ]; then
109 echo "Removing ${k8_volume}"
110 $WORKDIR_SUDO rm -rf ${k8_volume}
113 volumes
="mongo_db mon_db osm_packages ro_db pol_db prom_db ro"
114 for volume
in $volumes; do
115 sg docker
-c "docker volume rm ${stack}_${volume}"
120 function remove_network
() {
122 sg docker
-c "docker network rm net${stack}"
125 function remove_iptables
() {
127 if [ -z "$OSM_VCA_HOST" ]; then
128 OSM_VCA_HOST
=`sg lxd -c "juju show-controller ${stack}"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
129 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
132 if [ -z "$DEFAULT_IP" ]; then
133 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
134 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
135 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
136 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]}'`
137 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
140 if sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
141 sudo iptables
-t nat
-D PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
142 sudo netfilter-persistent save
146 function remove_stack
() {
148 if sg docker
-c "docker stack ps ${stack}" ; then
149 echo -e "\nRemoving stack ${stack}" && sg docker
-c "docker stack rm ${stack}"
152 while [ ${COUNTER} -lt 30 ]; do
153 result
=$
(sg docker
-c "docker stack ps ${stack}" |
wc -l)
154 #echo "Dockers running: $result"
155 if [ "${result}" == "0" ]; then
158 let COUNTER
=COUNTER
+1
161 if [ "${result}" == "0" ]; then
162 echo "All dockers of the stack ${stack} were removed"
164 FATAL
"Some dockers of the stack ${stack} could not be removed. Could not clean it."
170 #removes osm deployments and services
171 function remove_k8s_namespace
() {
175 #removes helm only if there is nothing deployed in helm
176 function remove_helm
() {
177 if [ "$(helm ls -q)" == "" ] ; then
178 sudo helm
reset --force
179 kubectl delete
--namespace kube-system serviceaccount tiller
180 kubectl delete clusterrolebinding tiller-cluster-rule
181 sudo
rm /usr
/local
/bin
/helm
186 function remove_crontab_job
() {
187 crontab
-l |
grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab
-
191 function uninstall_osmclient
() {
192 sudo apt-get remove
--purge -y python-osmclient
193 sudo apt-get remove
--purge -y python3-osmclient
196 #Uninstall lightweight OSM: remove dockers
197 function uninstall_lightweight
() {
198 if [ -n "$INSTALL_ONLY" ]; then
199 if [ -n "$INSTALL_ELK" ]; then
200 echo -e "\nUninstalling OSM ELK stack"
202 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR/osm_elk
205 echo -e "\nUninstalling OSM"
206 if [ -n "$KUBERNETES" ]; then
207 if [ -n "$INSTALL_K8S_MONITOR" ]; then
208 # uninstall OSM MONITORING
209 uninstall_k8s_monitoring
211 remove_k8s_namespace
$OSM_STACK_NAME
214 remove_stack
$OSM_STACK_NAME
217 echo "Now osm docker images and volumes will be deleted"
218 newgrp docker
<< EONG
219 docker image rm ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}
220 docker image rm ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}
221 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
222 docker image rm ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}
223 docker image rm ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}
224 docker image rm ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}
225 docker image rm ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}
226 docker image rm ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}
227 docker image rm ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}
230 if [ -n "$NGUI" ]; then
231 newgrp docker
<< EONG
232 docker image rm ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}
235 newgrp docker
<< EONG
236 docker image rm ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}
240 if [ -n "$KUBERNETES" ]; then
241 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
242 remove_volumes
$OSM_NAMESPACE_VOL
244 remove_volumes
$OSM_STACK_NAME
245 remove_network
$OSM_STACK_NAME
247 [ -z "$CONTROLLER_NAME" ] && remove_iptables
$OSM_STACK_NAME
248 echo "Removing $OSM_DOCKER_WORK_DIR"
249 $WORKDIR_SUDO rm -rf $OSM_DOCKER_WORK_DIR
250 [ -z "$CONTROLLER_NAME" ] && sg lxd
-c "juju destroy-controller --destroy-all-models --yes $OSM_STACK_NAME"
254 echo "Some docker images will be kept in case they are used by other docker stacks"
255 echo "To remove them, just run 'docker image prune' in a terminal"
259 #Safe unattended install of iptables-persistent
260 function check_install_iptables_persistent
(){
261 echo -e "\nChecking required packages: iptables-persistent"
262 if dpkg
-l iptables-persistent
&>/dev
/null
; then
263 echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
264 echo iptables-persistent iptables-persistent
/autosave_v4 boolean true | sudo debconf-set-selections
265 echo iptables-persistent iptables-persistent
/autosave_v6 boolean true | sudo debconf-set-selections
266 sudo apt-get
-yq install iptables-persistent
270 #Configure NAT rules, based on the current IP addresses of containers
272 check_install_iptables_persistent
274 echo -e "\nConfiguring NAT rules"
275 echo -e " Required root privileges"
276 sudo
$OSM_DEVOPS/installers
/nat_osm
280 echo "FATAL error: Cannot install OSM due to \"$1\""
284 function update_juju_images
(){
285 crontab
-l |
grep update-juju-lxc-images ||
(crontab
-l 2>/dev
/null
; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab
-
286 ${OSM_DEVOPS}/installers
/update-juju-lxc-images
--xenial --bionic
289 function install_lxd
() {
290 # Apply sysctl production values for optimal performance
291 sudo
cp ${OSM_DEVOPS}/installers
/60-lxd-production.conf
/etc
/sysctl.d
/60-lxd-production.conf
295 sudo apt-get remove
--purge -y liblxc1 lxc-common lxcfs lxd lxd-client
296 sudo snap
install lxd
297 sudo apt-get
install zfsutils-linux
-y
300 sudo usermod
-a -G lxd
`whoami`
301 cat ${OSM_DEVOPS}/installers
/lxd-preseed.conf |
sed 's/^config: {}/config:\n core.https_address: '$DEFAULT_IP':8443/' | sg lxd
-c "lxd init --preseed"
302 sg lxd
-c "lxd waitready"
303 DEFAULT_INTERFACE
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
304 [ -z "$DEFAULT_INTERFACE" ] && DEFAULT_INTERFACE
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
305 DEFAULT_MTU
=$
(ip addr show
$DEFAULT_INTERFACE | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
306 sg lxd
-c "lxc profile device set default eth0 mtu $DEFAULT_MTU"
307 sg lxd
-c "lxc network set lxdbr0 bridge.mtu $DEFAULT_MTU"
308 #sudo systemctl stop lxd-bridge
309 #sudo systemctl --system daemon-reload
310 #sudo systemctl enable lxd-bridge
311 #sudo systemctl start lxd-bridge
315 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
316 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
317 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
318 read -e -p "$1" USER_CONFIRMATION
320 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
321 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
322 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
323 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
324 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
328 function install_osmclient
(){
329 CLIENT_RELEASE
=${RELEASE#"-R "}
330 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
331 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
332 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
333 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
334 curl
$key_location | sudo apt-key add
-
335 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
337 sudo apt-get
install -y python3-pip
338 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
339 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
340 sudo apt-get
install -y python3-osm-im python3-osmclient
341 #sed 's,OSM_SOL005=[^$]*,OSM_SOL005=True,' -i ${HOME}/.bashrc
342 #echo 'export OSM_HOSTNAME=localhost' >> ${HOME}/.bashrc
343 #echo 'export OSM_SOL005=True' >> ${HOME}/.bashrc
344 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
345 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
346 echo -e "\nOSM client installed"
347 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
348 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
349 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
350 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
352 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
353 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
354 echo " export OSM_HOSTNAME=<OSM_host>"
359 function install_prometheus_nodeexporter
(){
360 if (systemctl
-q is-active node_exporter
)
362 echo "Node Exporter is already running."
364 echo "Node Exporter is not active, installing..."
365 if getent passwd node_exporter
> /dev
/null
2>&1; then
366 echo "node_exporter user exists"
368 echo "Creating user node_exporter"
369 sudo useradd
--no-create-home --shell /bin
/false node_exporter
371 wget
-q https
://github.com
/prometheus
/node_exporter
/releases
/download
/v
$PROMETHEUS_NODE_EXPORTER_TAG/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
-P /tmp
/
372 sudo
tar -C /tmp
-xf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64.
tar.gz
373 sudo
cp /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
/node_exporter
/usr
/local
/bin
374 sudo chown node_exporter
:node_exporter
/usr
/local
/bin
/node_exporter
375 sudo
rm -rf /tmp
/node_exporter-
$PROMETHEUS_NODE_EXPORTER_TAG.linux-amd64
*
376 sudo
cp ${OSM_DEVOPS}/installers
/docker
/prometheus_exporters
/node_exporter.service
/etc
/systemd
/system
/node_exporter.service
377 sudo systemctl daemon-reload
378 sudo systemctl restart node_exporter
379 sudo systemctl
enable node_exporter
380 echo "Node Exporter has been activated in this host."
385 function uninstall_prometheus_nodeexporter
(){
386 sudo systemctl stop node_exporter
387 sudo systemctl disable node_exporter
388 sudo
rm /etc
/systemd
/system
/node_exporter.service
389 sudo systemctl daemon-reload
390 sudo userdel node_exporter
391 sudo
rm /usr
/local
/bin
/node_exporter
395 function install_docker_ce
() {
396 # installs and configures Docker CE
397 echo "Installing Docker CE ..."
398 sudo apt-get
-qq update
399 sudo apt-get
install -y apt-transport-https ca-certificates software-properties-common
400 curl
-fsSL https
://download.docker.com
/linux
/ubuntu
/gpg | sudo apt-key add
-
401 sudo add-apt-repository
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
402 sudo apt-get
-qq update
403 sudo apt-get
install -y docker-ce
404 echo "Adding user to group 'docker'"
405 sudo groupadd
-f docker
406 sudo usermod
-aG docker
$USER
408 sudo service docker restart
409 echo "... restarted Docker service"
410 sg docker
-c "docker version" || FATAL
"Docker installation failed"
411 echo "... Docker CE installation done"
415 function install_docker_compose
() {
416 # installs and configures docker-compose
417 echo "Installing Docker Compose ..."
418 sudo curl
-L https
://github.com
/docker
/compose
/releases
/download
/1.18.0/docker-compose-
`uname -s`-`uname -m` -o /usr
/local
/bin
/docker-compose
419 sudo
chmod +x
/usr
/local
/bin
/docker-compose
420 echo "... Docker Compose installation done"
423 function install_juju
() {
424 echo "Installing juju"
425 sudo snap
install juju
--classic --channel=2.7/stable
426 [[ ":$PATH": != *":/snap/bin:"* ]] && PATH
="/snap/bin:${PATH}"
428 echo "Finished installation of juju"
432 function juju_createcontroller
() {
433 if ! juju show-controller
$OSM_STACK_NAME &> /dev
/null
; then
434 # Not found created, create the controller
435 sudo usermod
-a -G lxd
${USER}
436 sg lxd
-c "juju bootstrap --bootstrap-series=xenial $OSM_VCA_CLOUDNAME $OSM_STACK_NAME"
438 [ $
(juju controllers |
awk "/^${OSM_STACK_NAME}[\*| ]/{print $1}"|
wc -l) -eq 1 ] || FATAL
"Juju installation failed"
441 function juju_createproxy
() {
442 check_install_iptables_persistent
444 if ! sudo iptables
-t nat
-C PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST; then
445 sudo iptables
-t nat
-A PREROUTING
-p tcp
-m tcp
-d $DEFAULT_IP --dport 17070 -j DNAT
--to-destination $OSM_VCA_HOST
446 sudo netfilter-persistent save
450 function generate_docker_images
() {
451 echo "Pulling and generating docker images"
452 _build_from
=$COMMIT_ID
453 [ -z "$_build_from" ] && _build_from
="master"
455 echo "OSM Docker images generated from $_build_from"
457 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
458 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
459 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
460 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
462 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
463 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
464 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
467 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MONGO
; then
468 sg docker
-c "docker pull mongo" || FATAL
"cannot get mongo docker image"
471 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
472 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
475 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
476 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
479 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
480 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
483 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
484 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
487 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
488 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
491 if [ -n "$PULL_IMAGES" ]; then
492 sg docker
-c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL
"cannot pull MON docker image"
493 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q MON
; then
494 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/MON
495 git
-C ${LWTEMPDIR}/MON checkout
${COMMIT_ID}
496 sg docker
-c "docker build ${LWTEMPDIR}/MON -f ${LWTEMPDIR}/MON/docker/Dockerfile -t ${DOCKER_USER}/mon --no-cache" || FATAL
"cannot build MON docker image"
499 if [ -n "$PULL_IMAGES" ]; then
500 sg docker
-c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL
"cannot pull POL docker image"
501 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q POL
; then
502 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/POL
503 git
-C ${LWTEMPDIR}/POL checkout
${COMMIT_ID}
504 sg docker
-c "docker build ${LWTEMPDIR}/POL -f ${LWTEMPDIR}/POL/docker/Dockerfile -t ${DOCKER_USER}/pol --no-cache" || FATAL
"cannot build POL docker image"
507 if [ -n "$PULL_IMAGES" -a -n "$INSTALL_PLA" ]; then
508 sg docker
-c "docker pull ${DOCKER_USER}/pla:${OSM_DOCKER_TAG}" || FATAL
"cannot pull PLA docker image"
509 elif [ -z "$TO_REBUILD" -a -n "$INSTALL_PLA" ] ||
echo $TO_REBUILD |
grep -q PLA
; then
510 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/PLA
511 git
-C ${LWTEMPDIR}/PLA checkout
${COMMIT_ID}
512 sg docker
-c "docker build ${LWTEMPDIR}/PLA -f ${LWTEMPDIR}/PLA/docker/Dockerfile -t ${DOCKER_USER}/pla --no-cache" || FATAL
"cannot build PLA docker image"
515 if [ -n "$PULL_IMAGES" ]; then
516 sg docker
-c "docker pull ${DOCKER_USER}/nbi:${OSM_DOCKER_TAG}" || FATAL
"cannot pull NBI docker image"
517 sg docker
-c "docker pull ${DOCKER_USER}/keystone:${OSM_DOCKER_TAG}" || FATAL
"cannot pull KEYSTONE docker image"
518 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI
; then
519 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NBI
520 git
-C ${LWTEMPDIR}/NBI checkout
${COMMIT_ID}
521 sg docker
-c "docker build ${LWTEMPDIR}/NBI -f ${LWTEMPDIR}/NBI/Dockerfile.local -t ${DOCKER_USER}/nbi --no-cache" || FATAL
"cannot build NBI docker image"
522 sg docker
-c "docker build ${LWTEMPDIR}/NBI/keystone -f ${LWTEMPDIR}/NBI/keystone/Dockerfile -t ${DOCKER_USER}/keystone --no-cache" || FATAL
"cannot build KEYSTONE docker image"
525 if [ -n "$PULL_IMAGES" ]; then
526 sg docker
-c "docker pull ${DOCKER_USER}/ro:${OSM_DOCKER_TAG}" || FATAL
"cannot pull RO docker image"
527 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
528 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/RO
529 git
-C ${LWTEMPDIR}/RO checkout
${COMMIT_ID}
530 sg docker
-c "docker build ${LWTEMPDIR}/RO -f ${LWTEMPDIR}/RO/Dockerfile-local -t ${DOCKER_USER}/ro --no-cache" || FATAL
"cannot build RO docker image"
533 if [ -n "$PULL_IMAGES" ]; then
534 sg docker
-c "docker pull ${DOCKER_USER}/lcm:${OSM_DOCKER_TAG}" || FATAL
"cannot pull LCM RO docker image"
535 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LCM
; then
536 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LCM
537 git
-C ${LWTEMPDIR}/LCM checkout
${COMMIT_ID}
538 sg docker
-c "docker build ${LWTEMPDIR}/LCM -f ${LWTEMPDIR}/LCM/Dockerfile.local -t ${DOCKER_USER}/lcm --no-cache" || FATAL
"cannot build LCM docker image"
541 if [ -n "$NGUI" ]; then
542 if [ -n "$PULL_IMAGES" ]; then
543 sg docker
-c "docker pull ${DOCKER_USER}/ng-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull ng-ui docker image"
544 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NG-UI
; then
545 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/NG-UI
546 git
-C ${LWTEMPDIR}/NG-UI checkout
${COMMIT_ID}
547 sg docker
-c "docker build ${LWTEMPDIR}/NG-UI -f ${LWTEMPDIR}/NG-UI/docker/Dockerfile -t ${DOCKER_USER}/ng-ui --no-cache" || FATAL
"cannot build NG-UI docker image"
550 if [ -n "$PULL_IMAGES" ]; then
551 sg docker
-c "docker pull ${DOCKER_USER}/light-ui:${OSM_DOCKER_TAG}" || FATAL
"cannot pull light-ui docker image"
552 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-UI
; then
553 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/LW-UI
554 git
-C ${LWTEMPDIR}/LW-UI checkout
${COMMIT_ID}
555 sg docker
-c "docker build ${LWTEMPDIR}/LW-UI -f ${LWTEMPDIR}/LW-UI/docker/Dockerfile -t ${DOCKER_USER}/light-ui --no-cache" || FATAL
"cannot build LW-UI docker image"
559 if [ -n "$PULL_IMAGES" ]; then
560 sg docker
-c "docker pull ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" || FATAL
"cannot pull osmclient docker image"
561 elif [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q LW-osmclient
; then
562 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
565 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
566 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
569 echo "Finished generation of docker images"
572 function cmp_overwrite
() {
575 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
576 if [ -f "${file2}" ]; then
577 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
579 cp -b ${file1} ${file2}
584 function generate_docker_env_files() {
585 echo "Doing a backup of existing env files
"
586 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
587 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
588 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
589 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/lwui.env{,~}
590 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
591 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
592 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
593 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
594 $WORKDIR_SUDO cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
596 echo "Generating docker env files
"
597 if [ -n "$KUBERNETES" ]; then
598 #Kubernetes resources
599 $WORKDIR_SUDO cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
600 [ -n "$NGUI" ] && $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pods/ng-ui.yaml $OSM_K8S_WORK_DIR/ng-ui.yaml && $WORKDIR_SUDO rm $OSM_K8S_WORK_DIR/light-ui.yaml
602 if [ -n "$NGUI" ]; then
604 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose-ngui.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
607 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
609 if [ -n "$INSTALL_PLA" ]; then
610 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/osm_pla/docker-compose.yaml $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml
614 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus
615 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus/prometheus.yml
618 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/grafana
619 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/dashboards-osm.yml $OSM_DOCKER_WORK_DIR/grafana/dashboards-osm.yml
620 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/datasource-prometheus.yml $OSM_DOCKER_WORK_DIR/grafana/datasource-prometheus.yml
621 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-sample-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-sample-dashboard.json
622 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/grafana/osm-system-dashboard.json $OSM_DOCKER_WORK_DIR/grafana/osm-system-dashboard.json
624 # Prometheus Exporters files
625 $WORKDIR_SUDO mkdir -p $OSM_DOCKER_WORK_DIR/prometheus_exporters
626 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus_exporters/node_exporter.service $OSM_DOCKER_WORK_DIR/prometheus_exporters/node_exporter.service
630 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
631 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
634 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
635 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
637 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
640 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
641 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
643 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
646 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
647 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
649 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
652 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
653 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
655 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
658 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
659 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
661 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
664 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
665 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | $WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
668 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
669 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
672 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
673 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lcm.env
675 $WORKDIR_SUDO sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
679 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
680 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
681 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro-db.env
683 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
684 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/ro.env
688 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
689 SERVICE_PASSWORD
=$
(generate_secret
)
690 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
691 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone-db.env
693 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
694 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/keystone.env
695 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
696 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/keystone.env
700 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
701 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/nbi.env
702 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/nbi.env
706 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
707 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
708 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
711 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
712 echo "OS_NOTIFIER_URI=http://${DEFAULT_IP}:8662" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
714 $WORKDIR_SUDO sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
717 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
718 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
720 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
723 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
724 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
726 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
729 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
730 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/mon.env
732 $WORKDIR_SUDO sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
737 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
738 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/pol.env
742 if [ ! -f $OSM_DOCKER_WORK_DIR/lwui.env
]; then
743 echo "OSMUI_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/lwui" |
$WORKDIR_SUDO tee -a $OSM_DOCKER_WORK_DIR/lwui.env
746 echo "Finished generation of docker env files"
749 function generate_osmclient_script
() {
750 echo "docker run -ti --network net${OSM_STACK_NAME} ${DOCKER_USER}/osmclient:${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm
751 $WORKDIR_SUDO chmod +x
"$OSM_DOCKER_WORK_DIR/osm"
752 echo "osmclient sidecar container can be found at: $OSM_DOCKER_WORK_DIR/osm"
755 #installs kubernetes packages
756 function install_kube
() {
757 sudo apt-get update
&& sudo apt-get
install -y apt-transport-https
758 curl
-fsSL https
://packages.cloud.google.com
/apt
/doc
/apt-key.gpg | sudo apt-key add
-
759 sudo add-apt-repository
"deb https://apt.kubernetes.io/ kubernetes-xenial main"
761 echo "Installing Kubernetes Packages ..."
762 sudo apt-get
install -y kubelet
=1.15.0-00 kubeadm
=1.15.0-00 kubectl
=1.15.0-00
765 #initializes kubernetes control plane
766 function init_kubeadm
() {
768 sudo kubeadm init
--config $1
772 function kube_config_dir
() {
773 [ ! -d $K8S_MANIFEST_DIR ] && FATAL
"Cannot Install Kubernetes"
775 sudo
cp /etc
/kubernetes
/admin.conf
$HOME/.kube
/config
776 sudo chown $
(id
-u):$
(id
-g) $HOME/.kube
/config
779 #deploys flannel as daemonsets
780 function deploy_cni_provider
() {
781 CNI_DIR
="$(mktemp -d -q --tmpdir "flannel.XXXXXX
")"
782 trap 'rm -rf "${CNI_DIR}"' EXIT
783 wget
-q https
://raw.githubusercontent.com
/coreos
/flannel
/master
/Documentation
/kube-flannel.yml
-P $CNI_DIR
784 kubectl apply
-f $CNI_DIR
785 [ $?
-ne 0 ] && FATAL
"Cannot Install Flannel"
788 #creates secrets from env files which will be used by containers
789 function kube_secrets
(){
790 kubectl create ns
$OSM_STACK_NAME
791 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
792 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
793 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
794 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
795 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
796 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
797 kubectl create secret generic lwui-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lwui.env
798 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
801 #taints K8s master node
802 function taint_master_node
() {
803 K8S_MASTER
=$
(kubectl get nodes |
awk '$3~/master/'|
awk '{print $1}')
804 kubectl taint node
$K8S_MASTER node-role.kubernetes.io
/master
:NoSchedule-
808 #deploys osm pods and services
809 function deploy_osm_services
() {
810 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
813 function deploy_osm_pla_service
() {
814 # corresponding to parse_yaml
815 [ ! $OSM_DOCKER_TAG == "7" ] && $WORKDIR_SUDO sed -i "s/opensourcemano\/pla:.*/opensourcemano\/pla:$OSM_DOCKER_TAG/g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
816 # corresponding to namespace_vol
817 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_DOCKER_WORK_DIR/osm_pla
/pla.yaml
818 # corresponding to deploy_osm_services
819 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
822 #Install helm and tiller
823 function install_helm
() {
824 helm
> /dev
/null
2>&1
825 if [ $?
!= 0 ] ; then
826 # Helm is not installed. Install helm
827 curl https
://get.helm.sh
/helm-v2.15
.2-linux-amd64.
tar.gz
--output helm-v2.15
.2.
tar.gz
828 tar -zxvf helm-v2.15
.2.
tar.gz
829 sudo
mv linux-amd64
/helm
/usr
/local
/bin
/helm
831 rm helm-v2.15
.2.
tar.gz
834 # Checking if tiller has being configured
835 kubectl
--namespace kube-system get serviceaccount tiller
> /dev
/null
2>&1
836 if [ $?
== 1 ] ; then
837 # tiller account for kubernetes
838 kubectl
--namespace kube-system create serviceaccount tiller
839 kubectl create clusterrolebinding tiller-cluster-rule
--clusterrole=cluster-admin
--serviceaccount=kube-system
:tiller
840 # HELM initialization
841 helm init
--service-account tiller
843 # Wait for Tiller to be up and running. If timeout expires, continue installing
844 tiller_timeout
=120; counter
=0
845 while (( counter
< tiller_timeout
))
847 tiller_status
=`kubectl -n kube-system get deployment.apps/tiller-deploy --no-headers | awk '{print $2'}`
848 ( [ ! -z "$tiller_status" ] && [ $tiller_status == "1/1" ] ) && break
855 function parse_yaml
() {
856 osm_services
="nbi lcm ro pol mon light-ui ng-ui keystone"
858 for osm
in $osm_services; do
859 $WORKDIR_SUDO sed -i "s/opensourcemano\/$osm:.*/$DOCKER_USER\/$osm:$TAG/g" $OSM_K8S_WORK_DIR/$osm.yaml
863 function namespace_vol
() {
864 osm_services
="nbi lcm ro pol mon kafka mongo mysql prometheus"
865 for osm
in $osm_services; do
866 $WORKDIR_SUDO sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
870 function init_docker_swarm
() {
871 if [ "${DEFAULT_MTU}" != "1500" ]; then
872 DOCKER_NETS
=`sg docker -c "docker network list" | awk '{print $2}' | egrep -v "^ID$" | paste -d " " -s`
873 DOCKER_GW_NET
=`sg docker -c "docker network inspect ${DOCKER_NETS}" | grep Subnet | awk -F\" '{print $4}' | egrep "^172" | sort -u | tail -1 | awk -F\. '{if ($2 != 255) print $1"."$2+1"."$3"."$4; else print "-1";}'`
874 sg docker
-c "docker network create --subnet ${DOCKER_GW_NET} --opt com.docker.network.bridge.name=docker_gwbridge --opt com.docker.network.bridge.enable_icc=false --opt com.docker.network.bridge.enable_ip_masquerade=true --opt com.docker.network.driver.mtu=${DEFAULT_MTU} docker_gwbridge"
876 sg docker
-c "docker swarm init --advertise-addr ${DEFAULT_IP}"
880 function create_docker_network
() {
881 echo "creating network"
882 sg docker
-c "docker network create --driver=overlay --attachable --opt com.docker.network.driver.mtu=${DEFAULT_MTU} net${OSM_STACK_NAME}"
883 echo "creating network DONE"
886 function deploy_lightweight
() {
888 echo "Deploying lightweight build"
891 OSM_KEYSTONE_PORT
=5000
895 OSM_PROM_CADVISOR_PORT
=8080
896 OSM_PROM_HOSTPORT
=9091
897 OSM_GRAFANA_PORT
=3000
898 [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT
=5601
899 #[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
901 if [ -n "$NO_HOST_PORTS" ]; then
902 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT)
903 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT)
904 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT)
905 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT)
906 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT)
907 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_PORT)
908 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT)
909 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT)
910 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
911 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT)
913 OSM_PORTS
+=(OSM_NBI_PORTS
=$OSM_NBI_PORT:$OSM_NBI_PORT)
914 OSM_PORTS
+=(OSM_RO_PORTS
=$OSM_RO_PORT:$OSM_RO_PORT)
915 OSM_PORTS
+=(OSM_KEYSTONE_PORTS
=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
916 OSM_PORTS
+=(OSM_UI_PORTS
=$OSM_UI_PORT:$OSM_UI_PORT)
917 OSM_PORTS
+=(OSM_MON_PORTS
=$OSM_MON_PORT:$OSM_MON_PORT)
918 OSM_PORTS
+=(OSM_PROM_PORTS
=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
919 OSM_PORTS
+=(OSM_PROM_CADVISOR_PORTS
=$OSM_PROM_CADVISOR_PORT:$OSM_PROM_CADVISOR_PORT)
920 OSM_PORTS
+=(OSM_GRAFANA_PORTS
=$OSM_GRAFANA_PORT:$OSM_GRAFANA_PORT)
921 #[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
922 [ -n "$INSTALL_ELK" ] && OSM_PORTS
+=(OSM_ELK_PORTS
=$OSM_ELK_PORT:$OSM_ELK_PORT)
924 echo "export ${OSM_PORTS[@]}" |
$WORKDIR_SUDO tee $OSM_DOCKER_WORK_DIR/osm_ports.sh
925 echo "export OSM_NETWORK=net${OSM_STACK_NAME}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
926 echo "export TAG=${OSM_DOCKER_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
927 echo "export DOCKER_USER=${DOCKER_USER}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
928 echo "export KAFKA_TAG=${KAFKA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
929 echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
930 echo "export KEYSTONEDB_TAG=${KEYSTONEDB_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
931 echo "export PROMETHEUS_CADVISOR_TAG=${PROMETHEUS_CADVISOR_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
932 echo "export GRAFANA_TAG=${GRAFANA_TAG}" |
$WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
934 pushd $OSM_DOCKER_WORK_DIR
935 if [ -n "$INSTALL_PLA" ]; then
936 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml -c $OSM_DOCKER_WORK_DIR/osm_pla/docker-compose.yaml $OSM_STACK_NAME"
938 sg docker
-c ". ./osm_ports.sh; docker stack deploy -c $OSM_DOCKER_WORK_DIR/docker-compose.yaml $OSM_STACK_NAME"
942 echo "Finished deployment of lightweight build"
945 function deploy_elk
() {
946 echo "Pulling docker images for ELK"
947 sg docker
-c "docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTIC_VERSION}" || FATAL
"cannot get elasticsearch docker image"
948 sg docker
-c "docker pull docker.elastic.co/beats/metricbeat:${ELASTIC_VERSION}" || FATAL
"cannot get metricbeat docker image"
949 sg docker
-c "docker pull docker.elastic.co/beats/filebeat:${ELASTIC_VERSION}" || FATAL
"cannot get filebeat docker image"
950 sg docker
-c "docker pull docker.elastic.co/kibana/kibana-oss:${ELASTIC_VERSION}" || FATAL
"cannot get kibana docker image"
951 sg docker
-c "docker pull bobrik/curator:${ELASTIC_CURATOR_VERSION}" || FATAL
"cannot get curator docker image"
952 echo "Finished pulling elk docker images"
953 $WORKDIR_SUDO mkdir
-p "$OSM_DOCKER_WORK_DIR/osm_elk"
954 $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers
/docker
/osm_elk
/* $OSM_DOCKER_WORK_DIR/osm_elk
956 echo "Deploying ELK stack"
957 sg docker
-c "OSM_NETWORK=net${OSM_STACK_NAME} docker stack deploy -c $OSM_DOCKER_WORK_DIR/osm_elk/docker-compose.yml osm_elk"
958 echo "Waiting for ELK stack to be up and running"
963 while [ $time -le $timelength ]; do
964 if [[ $
(curl
-f -XGET http
://127.0.0.1:5601/status
-I 2>/dev
/null |
grep "HTTP/1.1 200 OK" |
wc -l ) -eq 1 ]]; then
971 if [ $elk_is_up -eq 0 ]; then
972 echo "ELK is up and running. Trying to create index pattern..."
973 #Create index pattern
974 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
975 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
976 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}" 2>/dev
/null
977 #Make it the default index
978 curl
-f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
979 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
980 -d"{\"value\":\"filebeat-*\"}" 2>/dev
/null
982 echo "Cannot connect to Kibana to create index pattern."
983 echo "Once Kibana is running, you can use the following instructions to create index pattern:"
984 echo 'curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
985 "http://127.0.0.1:5601/api/saved_objects/index-pattern/filebeat-*" \
986 -d"{\"attributes\":{\"title\":\"filebeat-*\",\"timeFieldName\":\"@timestamp\"}}"'
987 echo 'curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \
988 "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" \
989 -d"{\"value\":\"filebeat-*\"}"'
991 echo "Finished deployment of ELK stack"
995 function add_local_k8scluster
() {
996 /usr
/bin
/osm
--all-projects vim-create \
997 --name _system-osm-vim \
998 --account_type dummy \
999 --auth_url http
://dummy \
1000 --user osm
--password osm
--tenant osm \
1001 --description "dummy" \
1002 --config '{management_network_name: mgmt}'
1003 /usr
/bin
/osm
--all-projects k8scluster-add \
1004 --creds ${HOME}/.kube
/config \
1005 --vim _system-osm-vim \
1006 --k8s-nets '{"net1": null}' \
1008 --description "OSM Internal Cluster" \
1012 function install_lightweight
() {
1013 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
1014 [ -n "$KUBERNETES" ] && OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1015 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR
1016 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && $WORKDIR_SUDO mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
1017 [ -n "$KUBERNETES" ] && $WORKDIR_SUDO cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1020 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
1023 if [ -n "$KUBERNETES" ]; then
1024 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
1025 1. Install and configure LXD
1027 3. Install docker CE
1028 4. Disable swap space
1029 5. Install and initialize Kubernetes
1030 as pre-requirements.
1031 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1034 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will configure LXD, install juju, install docker CE and init a docker swarm, as pre-requirements. Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
1038 echo "Installing lightweight build of OSM"
1039 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
1040 trap 'rm -rf "${LWTEMPDIR}"' EXIT
1041 DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
1042 [ -z "$DEFAULT_IF" ] && DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
1043 [ -z "$DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
1044 DEFAULT_IP
=`ip -o -4 a |grep ${DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]}'`
1045 [ -z "$DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
1046 DEFAULT_MTU
=$
(ip addr show
${DEFAULT_IF} | perl
-ne 'if (/mtu\s(\d+)/) {print $1;}')
1048 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
1049 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
1050 need_packages_lw
="snapd"
1051 echo -e "Checking required packages: $need_packages_lw"
1052 dpkg
-l $need_packages_lw &>/dev
/null \
1053 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1054 || sudo apt-get update \
1055 || FATAL
"failed to run apt-get update"
1056 dpkg
-l $need_packages_lw &>/dev
/null \
1057 ||
! echo -e "Installing $need_packages_lw requires root privileges." \
1058 || sudo apt-get
install -y $need_packages_lw \
1059 || FATAL
"failed to install $need_packages_lw"
1065 [ -z "$INSTALL_NOJUJU" ] && install_juju
1068 if [ -z "$OSM_VCA_HOST" ]; then
1069 if [ -z "$CONTROLLER_NAME" ]; then
1070 if [ -n "$LXD_CLOUD_FILE" ]; then
1071 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1072 OSM_VCA_CLOUDNAME
="lxd-cloud"
1073 juju add-cloud
$OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud
$OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
1074 juju add-credential
$OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential
$OSM_VCA_CLOUDNAME lxd-cloud-creds
-f $LXD_CRED_FILE
1076 juju_createcontroller
1078 OSM_VCA_CLOUDNAME
="lxd-cloud"
1079 if [ -n "$LXD_CLOUD_FILE" ]; then
1080 [ -z "$LXD_CRED_FILE" ] && FATAL
"The installer needs the LXD credential yaml if the LXD is external"
1081 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
1082 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f $LXD_CRED_FILE
1085 cat << EOF > ~/.osm/lxd-cloud.yaml
1089 auth-types: [certificate]
1090 endpoint: "https://$DEFAULT_IP:8443"
1092 ssl-hostname-verification: false
1094 openssl req
-nodes -new -x509 -keyout ~
/.osm
/client.key
-out ~
/.osm
/client.crt
-days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
1095 local server_cert
=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
1096 local client_cert
=`cat ~/.osm/client.crt | sed 's/^/ /'`
1097 local client_key
=`cat ~/.osm/client.key | sed 's/^/ /'`
1098 cat << EOF > ~/.osm/lxd-credentials.yaml
1102 auth-type: certificate
1110 lxc config trust add
local: ~
/.osm
/client.crt
1111 juju add-cloud
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~
/.osm
/lxd-cloud.yaml
--force || juju update-cloud lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-cloud.yaml
1112 juju add-credential
-c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~
/.osm
/lxd-credentials.yaml || juju update-credential lxd-cloud
-c $CONTROLLER_NAME -f ~
/.osm
/lxd-credentials.yaml
1115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
1117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
1119 track juju_controller
1121 if [ -z "$OSM_VCA_SECRET" ]; then
1122 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
1123 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
1124 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
1126 if [ -z "$OSM_VCA_PUBKEY" ]; then
1127 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
1128 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
1130 if [ -z "$OSM_VCA_CACERT" ]; then
1131 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1132 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
1133 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
1135 if [ -z "$OSM_VCA_APIPROXY" ]; then
1136 OSM_VCA_APIPROXY
=$DEFAULT_IP
1137 [ -z "$OSM_VCA_APIPROXY" ] && FATAL
"Cannot obtain juju api proxy"
1142 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
1143 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
1144 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
1147 [ -n "$INSTALL_NODOCKER" ] || install_docker_ce
1150 #Installs Kubernetes and deploys osm services
1151 if [ -n "$KUBERNETES" ]; then
1154 init_kubeadm
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
1158 #install_docker_compose
1159 [ -n "$INSTALL_NODOCKER" ] || init_docker_swarm
1163 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
1166 generate_docker_env_files
1168 if [ -n "$KUBERNETES" ]; then
1169 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1170 # uninstall OSM MONITORING
1171 uninstall_k8s_monitoring
1172 track uninstall_k8s_monitoring
1174 #remove old namespace
1175 remove_k8s_namespace
$OSM_STACK_NAME
1178 [ ! $OSM_DOCKER_TAG == "7" ] && parse_yaml
$OSM_DOCKER_TAG
1182 if [ -n "$INSTALL_PLA"]; then
1183 # optional PLA install
1184 deploy_osm_pla_service
1186 track deploy_osm_services_k8s
1187 if [ -n "$INSTALL_K8S_MONITOR" ]; then
1188 # install OSM MONITORING
1189 install_k8s_monitoring
1190 track install_k8s_monitoring
1194 remove_stack
$OSM_STACK_NAME
1195 create_docker_network
1197 generate_osmclient_script
1199 install_prometheus_nodeexporter
1201 [ -n "$INSTALL_VIMEMU" ] && install_vimemu
&& track vimemu
1202 [ -n "$INSTALL_ELK" ] && deploy_elk
&& track elk
1205 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
1208 echo -e "Checking OSM health state..."
1209 if [ -n "$KUBERNETES" ]; then
1210 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
1211 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1212 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
1215 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} || \
1216 echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
1217 echo -e "Check OSM status with: docker service ls; docker stack ps ${OSM_STACK_NAME}" && \
1220 track after_healthcheck
1222 [ -n "$KUBERNETES" ] && add_local_k8scluster
1223 track add_local_k8scluster
1226 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null
1231 function install_to_openstack
() {
1233 if [ -z "$2" ]; then
1234 FATAL
"OpenStack installer requires a valid external network name"
1237 # Install Pip for Python3
1238 $WORKDIR_SUDO apt
install -y python3-pip
1239 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U pip
1241 # Install Ansible, OpenStack client and SDK
1242 $WORKDIR_SUDO -H LC_ALL
=C python3
-m pip
install -U python-openstackclient
"openstacksdk<1" "ansible>=2.9,<3"
1244 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
1246 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
1248 # Execute the Ansible playbook based on openrc or clouds.yaml
1249 if [ -e "$1" ]; then
1251 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1252 -e setup_volume
=$3 $OSM_DEVOPS/installers
/openstack
/site.yml
1254 ansible-playbook
-e external_network_name
=$2 -e installer_args
="\"$OSM_INSTALLER_ARGS\"" \
1255 -e setup_volume
=$3 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
1261 function install_vimemu
() {
1262 echo "\nInstalling vim-emu"
1263 EMUTEMPDIR
="$(mktemp -d -q --tmpdir "installosmvimemu.XXXXXX
")"
1264 trap 'rm -rf "${EMUTEMPDIR}"' EXIT
1265 # install prerequisites (OVS is a must for the emulator to work)
1266 sudo apt-get
install openvswitch-switch
1267 # clone vim-emu repository (attention: branch is currently master only)
1268 echo "Cloning vim-emu repository ..."
1269 git clone https
://osm.etsi.org
/gerrit
/osm
/vim-emu.git
$EMUTEMPDIR
1270 # build vim-emu docker
1271 echo "Building vim-emu Docker container..."
1273 sg docker
-c "docker build -t vim-emu-img -f $EMUTEMPDIR/Dockerfile --no-cache $EMUTEMPDIR/" || FATAL
"cannot build vim-emu-img docker image"
1274 # start vim-emu container as daemon
1275 echo "Starting vim-emu Docker container 'vim-emu' ..."
1276 if [ -n "$INSTALL_LIGHTWEIGHT" ]; then
1277 # in lightweight mode, the emulator needs to be attached to netOSM
1278 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' --network=net${OSM_STACK_NAME} -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1280 # classic build mode
1281 sg docker
-c "docker run --name vim-emu -t -d --restart always --privileged --pid='host' -v /var/run/docker.sock:/var/run/docker.sock vim-emu-img python examples/osm_default_daemon_topology_2_pop.py"
1283 echo "Waiting for 'vim-emu' container to start ..."
1285 export VIMEMU_HOSTNAME
=$
(sg docker
-c "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vim-emu")
1286 echo "vim-emu running at ${VIMEMU_HOSTNAME} ..."
1287 # print vim-emu connection info
1288 echo -e "\nYou might be interested in adding the following vim-emu env variables to your .bashrc file:"
1289 echo " export VIMEMU_HOSTNAME=${VIMEMU_HOSTNAME}"
1290 echo -e "To add the emulated VIM to OSM you should do:"
1291 echo " osm vim-create --name emu-vim1 --user username --password password --auth_url http://${VIMEMU_HOSTNAME}:6001/v2.0 --tenant tenantName --account_type openstack"
1294 function install_k8s_monitoring
() {
1295 # install OSM monitoring
1296 $WORKDIR_SUDO chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
1297 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh
1300 function uninstall_k8s_monitoring
() {
1301 # uninstall OSM monitoring
1302 $WORKDIR_SUDO $OSM_DEVOPS/installers
/k8s
/uninstall_osm_k8s_monitoring.sh
1305 function dump_vars
(){
1306 echo "DEVELOP=$DEVELOP"
1307 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
1308 echo "UNINSTALL=$UNINSTALL"
1309 echo "UPDATE=$UPDATE"
1310 echo "RECONFIGURE=$RECONFIGURE"
1311 echo "TEST_INSTALLER=$TEST_INSTALLER"
1312 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
1313 echo "INSTALL_PLA=$INSTALL_PLA"
1314 echo "INSTALL_LXD=$INSTALL_LXD"
1315 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
1316 echo "INSTALL_ONLY=$INSTALL_ONLY"
1317 echo "INSTALL_ELK=$INSTALL_ELK"
1318 #echo "INSTALL_PERFMON=$INSTALL_PERFMON"
1319 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
1320 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
1321 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
1322 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
1323 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
1324 echo "TO_REBUILD=$TO_REBUILD"
1325 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
1326 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
1327 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
1328 echo "RELEASE=$RELEASE"
1329 echo "REPOSITORY=$REPOSITORY"
1330 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
1331 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
1332 echo "OSM_DEVOPS=$OSM_DEVOPS"
1333 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
1334 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
1335 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
1336 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
1337 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
1338 echo "WORKDIR_SUDO=$WORKDIR_SUDO"
1339 echo "OSM_WORK_DIR=$OSM_STACK_NAME"
1340 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
1341 echo "DOCKER_USER=$DOCKER_USER"
1342 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
1343 echo "PULL_IMAGES=$PULL_IMAGES"
1344 echo "KUBERNETES=$KUBERNETES"
1346 echo "SHOWOPTS=$SHOWOPTS"
1347 echo "Install from specific refspec (-b): $COMMIT_ID"
1352 duration
=$
((ctime
- SESSION_ID
))
1353 url
="http://www.woopra.com/track/ce?project=osm.etsi.org&cookie=${SESSION_ID}"
1354 #url="${url}&ce_campaign_name=${CAMPAIGN_NAME}"
1356 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_SOURCE" ] && event_name
="binsrc"
1357 [ -z "$INSTALL_LIGHTWEIGHT" ] && [ -n "$INSTALL_FROM_LXDIMAGES" ] && event_name
="lxd"
1358 [ -n "$INSTALL_LIGHTWEIGHT" ] && event_name
="lw"
1359 event_name
="${event_name}_$1"
1360 url
="${url}&event=${event_name}&ce_duration=${duration}"
1361 wget
-q -O /dev
/null
$url
1373 INSTALL_FROM_SOURCE
=""
1374 RELEASE
="ReleaseEIGHT"
1378 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1379 LXD_REPOSITORY_PATH
=""
1380 INSTALL_LIGHTWEIGHT
="y"
1381 INSTALL_TO_OPENSTACK
=""
1382 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1383 OPENSTACK_PUBLIC_NET_NAME
=""
1384 OPENSTACK_ATTACH_VOLUME
="false"
1393 INSTALL_K8S_MONITOR
=""
1394 INSTALL_NOHOSTCLIENT
=""
1395 SESSION_ID
=`date +%s`
1400 OSM_VCA_CLOUDNAME
="localhost"
1404 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1405 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1407 OSM_WORK_DIR
="/etc/osm"
1408 OSM_DOCKER_WORK_DIR
="/etc/osm/docker"
1409 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1410 OSM_HOST_VOL
="/var/lib/osm"
1411 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1412 OSM_DOCKER_TAG
=latest
1413 DOCKER_USER
=opensourcemano
1415 KAFKA_TAG
=2.11-1.0
.2
1416 PROMETHEUS_TAG
=v2.4
.3
1418 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1419 PROMETHEUS_CADVISOR_TAG
=latest
1421 OSM_DATABASE_COMMONKEY
=
1422 ELASTIC_VERSION
=6.4.2
1423 ELASTIC_CURATOR_VERSION
=5.5.4
1424 POD_NETWORK_CIDR
=10.244.0.0/16
1425 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1426 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1428 while getopts ":b:r:c:n:k:u:R:D:o:O:m:N:H:S:s:w:t:U:P:A:l:L:K:-: hy" o
; do
1435 REPOSITORY
="${OPTARG}"
1436 REPO_ARGS
+=(-r "$REPOSITORY")
1439 [ "${OPTARG}" == "swarm" ] && continue
1440 [ "${OPTARG}" == "k8s" ] && KUBERNETES
="y" && continue
1441 echo -e "Invalid argument for -i : ' $OPTARG'\n" >&2
1445 [ "${OPTARG}" == "lwui" ] && continue
1446 [ "${OPTARG}" == "ngui" ] && NGUI
="y" && continue
1447 echo -e "Invalid argument for -n : ' $OPTARG'\n" >&2
1451 REPOSITORY_KEY
="${OPTARG}"
1452 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1455 REPOSITORY_BASE
="${OPTARG}"
1456 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1460 REPO_ARGS
+=(-R "$RELEASE")
1463 OSM_DEVOPS
="${OPTARG}"
1467 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1468 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1469 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1472 INSTALL_TO_OPENSTACK
="y"
1473 if [ -n "${OPTARG}" ]; then
1474 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1476 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1481 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1484 [ "${OPTARG}" == "LW-UI" ] && TO_REBUILD
="$TO_REBUILD LW-UI" && continue
1485 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1486 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1487 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1488 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1489 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1490 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1491 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1492 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1493 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1494 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1495 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1496 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1497 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1500 OSM_VCA_HOST
="${OPTARG}"
1503 OSM_VCA_SECRET
="${OPTARG}"
1506 OSM_STACK_NAME
="${OPTARG}" && [ -n "$KUBERNETES" ] && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1509 # when specifying workdir, do not use sudo for access
1511 OSM_WORK_DIR
="${OPTARG}"
1514 OSM_DOCKER_TAG
="${OPTARG}"
1515 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1518 DOCKER_USER
="${OPTARG}"
1521 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1524 OSM_VCA_APIPROXY
="${OPTARG}"
1527 LXD_CLOUD_FILE
="${OPTARG}"
1530 LXD_CRED_FILE
="${OPTARG}"
1533 CONTROLLER_NAME
="${OPTARG}"
1536 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1537 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1538 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1539 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1540 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1541 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1542 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1543 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1544 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1545 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1546 [ "${OPTARG}" == "lightweight" ] && INSTALL_LIGHTWEIGHT
="y" && continue
1547 [ "${OPTARG}" == "vimemu" ] && INSTALL_VIMEMU
="y" && continue
1548 [ "${OPTARG}" == "elk_stack" ] && INSTALL_ELK
="y" && continue
1549 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1550 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1551 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="y" && continue
1552 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1553 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1554 [ "${OPTARG}" == "pullimages" ] && continue
1555 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1556 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && continue
1557 [ "${OPTARG}" == "bundle" ] && continue
1558 [ "${OPTARG}" == "k8s" ] && continue
1559 [ "${OPTARG}" == "lxd" ] && continue
1560 [ "${OPTARG}" == "lxd-cred" ] && continue
1561 [ "${OPTARG}" == "microstack" ] && continue
1562 [ "${OPTARG}" == "ha" ] && continue
1563 [ "${OPTARG}" == "tag" ] && continue
1564 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1565 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1566 echo -e "Invalid option: '--$OPTARG'\n" >&2
1570 echo "Option -$OPTARG requires an argument" >&2
1574 echo -e "Invalid option: '-$OPTARG'\n" >&2
1589 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1590 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1592 if [ -n "$SHOWOPTS" ]; then
1597 if [ -n "$CHARMED" ]; then
1598 if [ -n "$UNINSTALL" ]; then
1599 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1601 ${OSM_DEVOPS}/installers
/charmed_install.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D /usr
/share
/osm-devops
-t $DOCKER_TAG "$@"
1603 echo "Your installation is now complete, follow these steps for configuring the osmclient:"
1605 echo "1. Get the NBI IP with the following command:"
1607 echo NBI_IP
='`juju status --format json | jq -rc '"'"'.applications."nbi-k8s".address'"'"'`'
1609 echo "2. Create the OSM_HOSTNAME environment variable with the NBI IP"
1611 echo "export OSM_HOSTNAME=\$NBI_IP"
1613 echo "3. Add the previous command to your .bashrc for other Shell sessions"
1615 echo "echo \"export OSM_HOSTNAME=\$NBI_IP\" >> ~/.bashrc"
1623 # if develop, we force master
1624 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1626 need_packages
="git wget curl tar"
1628 [ -n "$INSTALL_TO_OPENSTACK" ] && install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME && echo -e "\nDONE" && exit 0
1630 echo -e "Checking required packages: $need_packages"
1631 dpkg
-l $need_packages &>/dev
/null \
1632 ||
! echo -e "One or several required packages are not installed. Updating apt cache requires root privileges." \
1633 || sudo apt-get update \
1634 || FATAL
"failed to run apt-get update"
1635 dpkg
-l $need_packages &>/dev
/null \
1636 ||
! echo -e "Installing $need_packages requires root privileges." \
1637 || sudo apt-get
install -y $need_packages \
1638 || FATAL
"failed to install $need_packages"
1639 sudo snap
install jq
1640 if [ -z "$OSM_DEVOPS" ]; then
1641 if [ -n "$TEST_INSTALLER" ]; then
1642 echo -e "\nUsing local devops repo for OSM installation"
1643 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
1645 echo -e "\nCreating temporary dir for OSM installation"
1646 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
1647 trap 'rm -rf "$OSM_DEVOPS"' EXIT
1649 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
1651 if [ -z "$COMMIT_ID" ]; then
1652 echo -e "\nGuessing the current stable release"
1653 LATEST_STABLE_DEVOPS
=`git -C $OSM_DEVOPS tag -l v[0-9].* | sort -V | tail -n1`
1654 [ -z "$LATEST_STABLE_DEVOPS" ] && echo "Could not find the current latest stable release" && exit 0
1656 echo "Latest tag in devops repo: $LATEST_STABLE_DEVOPS"
1657 COMMIT_ID
="tags/$LATEST_STABLE_DEVOPS"
1659 echo -e "\nDEVOPS Using commit $COMMIT_ID"
1661 git
-C $OSM_DEVOPS checkout
$COMMIT_ID
1665 .
$OSM_DEVOPS/common
/all_funcs
1667 [ -n "$INSTALL_LIGHTWEIGHT" ] && [ -n "$UNINSTALL" ] && uninstall_lightweight
&& echo -e "\nDONE" && exit 0
1668 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_ELK" ] && deploy_elk
1669 #[ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_PERFMON" ] && deploy_perfmon
1670 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_VIMEMU" ] && install_vimemu
1671 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1672 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1674 #Installation starts here
1675 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README.txt
&> /dev
/null
1678 [ -n "$INSTALL_LIGHTWEIGHT" ] && install_lightweight
&& echo -e "\nDONE" && exit 0
1679 echo -e "\nInstalling OSM from refspec: $COMMIT_ID"
1680 if [ -n "$INSTALL_FROM_SOURCE" ] && [ -z "$ASSUME_YES" ]; then
1681 ! ask_user
"The installation will take about 75-90 minutes. Continue (Y/n)? " y
&& echo "Cancelled!" && exit 1
1684 echo -e "Checking required packages: lxd"
1685 lxd
--version &>/dev
/null || FATAL
"lxd not present, exiting."
1686 [ -n "$INSTALL_LXD" ] && echo -e "\nInstalling and configuring lxd" && install_lxd
1688 # use local devops for containers
1689 export OSM_USE_LOCAL_DEVOPS
=true
1693 #Install vim-emu (optional)
1694 [ -n "$INSTALL_VIMEMU" ] && install_docker_ce
&& install_vimemu
1696 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-8.0
-eight/README2.txt
&> /dev
/null