3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " --old-sa: install old Service Assurance framework (MON, POL); do not install Airflow and Pushgateway"
40 echo -e " --ng-sa: install new Service Assurance framework (Airflow, AlertManager and Pushgateway) (enabled by default)"
41 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
42 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
43 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
44 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
45 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
46 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
47 echo -e " -D <devops path> use local devops installation path"
48 echo -e " -w <work dir> Location to store runtime installation"
49 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
50 echo -e " -l: LXD cloud yaml file"
51 echo -e " -L: LXD credentials yaml file"
52 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
53 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
54 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
55 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
56 echo -e " --debug: debug mode"
57 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
58 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
59 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
60 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
61 echo -e " --nojuju: do not juju, assumes already installed"
62 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
63 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
64 echo -e " --nohostclient: do not install the osmclient"
65 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
66 echo -e " --source: install OSM from source code using the latest stable tag"
67 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
68 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
69 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
70 echo -e " --volume: create a VM volume when installing to OpenStack"
71 echo -e " --showopts: print chosen options and exit (only for debugging)"
72 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
73 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
74 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
75 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
76 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
77 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
78 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
79 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
80 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
81 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
82 echo -e " [--tag]: Docker image tag. (--charmed option)"
83 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
84 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
87 # takes a juju/accounts.yaml file and returns the password specific
88 # for a controller. I wrote this using only bash tools to minimize
89 # additions of other packages
90 function parse_juju_password
{
91 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
92 password_file
="${HOME}/.local/share/juju/accounts.yaml"
93 local controller_name
=$1
94 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
95 sed -ne "s|^\($s\):|\1|" \
96 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
97 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
98 awk -F$fs -v controller
=$controller_name '{
99 indent = length($1)/2;
101 for (i in vname) {if (i > indent) {delete vname[i]}}
102 if (length($3) > 0) {
103 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
104 if (match(vn,controller) && match($2,"password")) {
109 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
112 function set_vca_variables
() {
113 OSM_VCA_CLOUDNAME
="lxd-cloud"
114 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
115 if [ -z "$OSM_VCA_HOST" ]; then
116 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
118 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
120 if [ -z "$OSM_VCA_SECRET" ]; then
121 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_NAMESPACE)
122 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
123 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
125 if [ -z "$OSM_VCA_PUBKEY" ]; then
126 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
127 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
129 if [ -z "$OSM_VCA_CACERT" ]; then
130 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
132 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
136 function generate_secret
() {
137 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
138 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
139 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
142 function check_packages
() {
144 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
145 for PACKAGE
in ${NEEDED_PACKAGES} ; do
147 if [ $?
-ne 0 ]; then
148 echo -e "Package ${PACKAGE} is not installed."
149 echo -e "Updating apt-cache ..."
151 echo -e "Installing ${PACKAGE} ..."
152 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
155 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
159 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
160 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
161 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
162 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
163 read -e -p "$1" USER_CONFIRMATION
165 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
166 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
167 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
168 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
169 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
171 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
174 function install_osmclient
(){
175 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
176 CLIENT_RELEASE
=${RELEASE#"-R "}
177 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
178 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
179 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
180 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
181 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
182 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
184 sudo apt-get
install -y python3-pip
185 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
186 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
187 sudo apt-get
install -y python3-osm-im python3-osmclient
188 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
189 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
191 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
192 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
193 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
196 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
197 echo -e "\nOSM client installed"
198 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
199 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
200 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
201 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
203 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
204 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
205 echo " export OSM_HOSTNAME=<OSM_host>"
207 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
211 function docker_login
() {
212 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
214 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
215 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
216 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
219 function create_k8s_secret_regcred
() {
220 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
221 echo "Creating K8s secret regcred with the docker registry credentials from $HOME/.docker/config.json"
222 kubectl
-n osm create secret generic regcred \
223 --from-file=.dockerconfigjson
=$HOME/.docker
/config.json \
224 --type=kubernetes.io
/dockerconfigjson
225 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
228 function pull_docker_images
() {
229 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
230 echo "Pulling docker images"
231 if [ -n "${DOCKER_REGISTRY_URL}" ]; then
233 create_k8s_secret_registrycreds
236 echo "Pulling non-OSM docker images"
237 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
238 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
239 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
242 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
243 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
246 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
247 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
250 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
251 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
252 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
255 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
256 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
259 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
260 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
263 list_of_modules
="MON NBI KEYSTONE RO LCM NG-UI osmclient"
264 if [ -n "$INSTALL_NGSA" ]; then
265 list_of_modules
="${list_of_modules} Airflow Webhook"
267 list_of_modules
="${list_of_modules} POL"
269 if [ -n "$INSTALL_PLA" ]; then
270 list_of_modules
="${list_of_modules} PLA"
272 echo "Pulling OSM docker images for the following modules: ${list_of_modules}"
273 for module
in ${list_of_modules}; do
274 module_lower
=${module,,}
275 module_tag
="${OSM_DOCKER_TAG}"
276 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
277 module_tag
="${MODULE_DOCKER_TAG}"
279 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
280 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
283 echo "Finished pulling docker images"
284 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
287 function generate_docker_images
() {
288 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
289 echo "Generating docker images"
290 _build_from
=$COMMIT_ID
291 [ -z "$_build_from" ] && _build_from
="latest"
292 echo "OSM Docker images generated from $_build_from"
293 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
294 trap 'rm -rf "${LWTEMPDIR}"' EXIT
295 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
296 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
297 module_lower
=${module,,}
298 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
301 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
302 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
303 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
306 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
307 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
308 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
309 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
310 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
311 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
313 echo "Finished generation of docker images"
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
317 function cmp_overwrite
() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
321 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
322 if [ -f "${file2}" ]; then
323 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
325 cp -b ${file1} ${file2}
328 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
331 #deploys osm pods and services
332 function deploy_osm_services() {
333 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
334 # helm is already installed as part of install_kubeadm_cluster.sh
336 # Generate helm values to be passed with -f osm-values.yaml
337 sudo mkdir -p ${OSM_HELM_WORK_DIR}
338 sudo bash -c "cat << EOF > ${OSM_HELM_WORK_DIR}/osm-values.yaml
340 pubkey: \"${OSM_VCA_PUBKEY}\"
343 # Generate helm values to be passed with --set
345 # OSM_HELM_OPTS="${OSM_HELM_OPTS} --set nbi.useOsmSecret
=false
"
346 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.repositoryBase=${DOCKER_REGISTRY_URL}${DOCKER_USER}"
347 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mysql.dbHostPath
=${OSM_NAMESPACE_VOL}"
348 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set mon.config.OS_NOTIFIER_URI
=http
://${OSM_DEFAULT_IP}:8662"
349 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.
host=${OSM_VCA_HOST}"
350 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret
=${OSM_VCA_SECRET}"
351 OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert
=${OSM_VCA_CACERT}"
352 [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}"
353 [ ! "$OSM_DOCKER_TAG" == "13" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.image.tag
=${OSM_DOCKER_TAG}"
354 [ -n "${INSTALL_NGSA}" ] || OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.oldServiceAssurance
=true
"
356 echo "helm
-n $OSM_NAMESPACE install $OSM_NAMESPACE $OSM_DEVOPS/installers
/helm
/osm
-f ${OSM_HELM_WORK_DIR}/osm-values.yaml
${OSM_HELM_OPTS}"
357 helm -n $OSM_NAMESPACE install $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm -f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}
359 # Override existing values.yaml with the final values.yaml used to install OSM
360 helm -n $OSM_NAMESPACE get values $OSM_NAMESPACE | sudo tee -a ${OSM_HELM_WORK_DIR}/osm-values.yaml
361 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
364 #deploy charmed services
365 function deploy_charmed_services() {
366 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
367 juju add-model $OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
368 juju deploy ch:mongodb-k8s -m $OSM_NAMESPACE
369 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
372 function install_osm_ngsa_service() {
373 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
374 NGSA_OPTS="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
375 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
376 $OSM_DEVOPS/installers/install_ngsa.sh ${NGSA_OPTS} || \
377 FATAL_TRACK install_osm_ngsa_service "install_ngsa.sh failed
"
378 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
381 function add_local_k8scluster() {
382 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
383 /usr/bin/osm --all-projects vim-create \
384 --name _system-osm-vim \
385 --account_type dummy \
386 --auth_url http://dummy \
387 --user osm --password osm --tenant osm \
388 --description "dummy
" \
389 --config '{management_network_name: mgmt}'
390 /usr/bin/osm --all-projects k8scluster-add \
391 --creds ${HOME}/.kube/config \
392 --vim _system-osm-vim \
393 --k8s-nets '{"net1
": null}' \
395 --description "OSM Internal Cluster
" \
397 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
400 function configure_apt_proxy() {
401 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
403 OSM_APT_PROXY_FILE="/etc
/apt
/apt.conf.d
/osm-apt
"
404 echo "Configuring apt proxy
in file ${OSM_APT_PROXY_FILE}"
405 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
406 sudo bash -c "cat <<EOF > ${OSM_APT_PROXY}
407 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
410 sudo sed -i "s|Proxy.
*|Proxy
\"${OSM_APT_PROXY}\"; }|
" ${OSM_APT_PROXY_FILE}
412 sudo apt-get update || FATAL "Configured apt proxy
, but couldn
't run 'apt-get update
'. Check ${OSM_APT_PROXY_FILE}"
413 track prereq apt_proxy_configured_ok
414 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
417 function ask_proceed() {
418 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
420 [ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
421 1. Install and configure LXD
424 4. Disable swap space
425 5. Install and initialize Kubernetes
427 Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
429 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
432 function check_osm_behind_proxy() {
433 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
435 export OSM_BEHIND_PROXY=""
436 export OSM_PROXY_ENV_VARIABLES=""
437 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
438 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
439 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
440 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
441 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
442 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
444 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
445 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
447 if [ -n "${OSM_BEHIND_PROXY}" ]; then
448 [ -z "$ASSUME_YES" ] && ! ask_user "
449 The following env variables have been found for the current user:
450 ${OSM_PROXY_ENV_VARIABLES}.
452 This suggests that this machine is behind a proxy and a special configuration is required.
453 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
456 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
457 Depending on the program, the env variables to work behind a proxy might be different
458 (e.g. http_proxy vs HTTP_PROXY).
460 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
461 and HTTPS_PROXY are defined.
463 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
464 those env variables are also set for root user. If you are not sure whether those variables
465 are configured for the root user, you can stop the installation now.
467 Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
469 echo "This machine is not behind a proxy"
472 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
475 function find_devops_folder() {
476 if [ -z "$OSM_DEVOPS" ]; then
477 if [ -n "$TEST_INSTALLER" ]; then
478 echo -e "\nUsing local devops repo for OSM installation"
479 OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
481 echo -e "\nCreating temporary dir for OSM installation"
482 OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
483 trap 'rm -rf "$OSM_DEVOPS"' EXIT
484 git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
489 function install_osm() {
490 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
494 # TODO: move this under start
495 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
497 check_osm_behind_proxy
499 check_packages "git wget curl tar snapd"
501 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
505 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
507 track checks checkingroot_ok
508 [ "$USER" == "root" ] && FATAL "You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
509 track checks noroot_ok
512 track checks proceed_ok
514 echo "Installing OSM"
516 echo "Determining IP address of the interface with the default route"
517 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print
$5; exit}')
518 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~
/^
0.0.0.0/ {print
$8; exit}')
519 [ -z "$OSM_DEFAULT_IF" ] && FATAL "Not possible to determine the interface with the default route 0.0.0.0"
520 OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a
,"/"); print a
[1]; exit}'`
521 [ -z "$OSM_DEFAULT_IP" ] && FATAL "Not possible to determine the IP address of the interface with the default route"
523 # configure apt proxy
524 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
526 # if no host is passed in, we need to install lxd, unless explicilty asked not to
527 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
528 LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
529 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
530 $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
533 track prereq prereqok_ok
535 if [ ! -n "$INSTALL_NODOCKER" ]; then
536 DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
537 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
538 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -P"
539 $OSM_DEVOPS/installers/install_docker_ce.sh ${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce "install_docker_ce.sh failed"
542 track docker_ce docker_ce_ok
544 echo "Creating folders for installation"
545 [ ! -d "$OSM_WORK_DIR" ] && sudo mkdir -p $OSM_WORK_DIR
546 sudo cp -b $OSM_DEVOPS/installers/kubeadm-config.yaml $OSM_WORK_DIR/kubeadm-config.yaml
548 $OSM_DEVOPS/installers/install_kubeadm_cluster.sh -i ${OSM_DEFAULT_IP} -d ${OSM_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
549 FATAL_TRACK k8scluster "install_kubeadm_cluster.sh failed"
550 track k8scluster k8scluster_ok
552 JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
553 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
554 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
555 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
556 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
557 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
558 $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
562 # Deploy OSM services
563 [ -z "$DOCKER_NOBUILD" ] && pull_docker_images
564 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
565 track docker_images docker_images_ok
567 deploy_charmed_services
568 track deploy_osm deploy_mongodb_ok
570 track deploy_osm deploy_osm_services_k8s_ok
571 if [ -n "$INSTALL_K8S_MONITOR" ]; then
572 # install OSM MONITORING
573 install_k8s_monitoring
574 track deploy_osm install_k8s_monitoring_ok
576 if [ -n "$INSTALL_NGSA" ]; then
577 # optional NGSA install
578 install_osm_ngsa_service
579 track deploy_osm install_osm_ngsa_ok
582 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
583 track osmclient osmclient_ok
585 echo -e "Checking OSM health state..."
586 $OSM_DEVOPS/installers/osm_health.sh -s ${OSM_NAMESPACE} -k || \
587 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
588 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
589 track healthchecks osm_unhealthy didnotconverge)
590 track healthchecks after_healthcheck_ok
593 track final_ops add_local_k8scluster_ok
595 arrange_docker_default_network_policy
597 wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README2.txt &> /dev/null
600 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
604 function install_to_openstack() {
605 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
608 FATAL "OpenStack installer requires a valid external network name"
611 # Install Pip for Python3
612 sudo apt install -y python3-pip python3-venv
613 sudo -H LC_ALL=C python3 -m pip install -U pip
615 # Create a venv to avoid conflicts with the host installation
616 python3 -m venv $OPENSTACK_PYTHON_VENV
618 source $OPENSTACK_PYTHON_VENV/bin/activate
620 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
621 python -m pip install -U wheel
622 python -m pip install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
624 # Install the Openstack cloud module (ansible>=2.10)
625 ansible-galaxy collection install openstack.cloud
627 export ANSIBLE_CONFIG="$OSM_DEVOPS/installers/openstack/ansible.cfg"
629 OSM_INSTALLER_ARGS="${REPO_ARGS[@]}"
631 ANSIBLE_VARS="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
633 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
634 ANSIBLE_VARS+=" key_file=$OPENSTACK_SSH_KEY_FILE"
637 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
638 ANSIBLE_VARS+=" userdata_file=$OPENSTACK_USERDATA_FILE"
641 # Execute the Ansible playbook based on openrc or clouds.yaml
644 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
645 $OSM_DEVOPS/installers/openstack/site.yml
647 ansible-playbook -e installer_args="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
648 -e cloud_name=$1 $OSM_DEVOPS/installers/openstack/site.yml
654 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
658 function arrange_docker_default_network_policy() {
659 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
660 sudo iptables -I DOCKER-USER -j ACCEPT
661 sudo iptables-save | sudo tee /etc/iptables/rules.v4
662 sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
665 function install_k8s_monitoring() {
666 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
667 # install OSM monitoring
668 sudo chmod +x $OSM_DEVOPS/installers/k8s/*.sh
669 sudo $OSM_DEVOPS/installers/k8s/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring "k8s/install_osm_k8s_monitoring.sh failed"
670 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
673 function dump_vars(){
674 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
675 echo "APT_PROXY_URL=$APT_PROXY_URL"
676 echo "DEVELOP=$DEVELOP"
677 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
678 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
679 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
680 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
681 echo "DOCKER_USER=$DOCKER_USER"
682 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
683 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
684 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
685 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
686 echo "INSTALL_LXD=$INSTALL_LXD"
687 echo "INSTALL_NGSA=$INSTALL_NGSA"
688 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
689 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
690 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
691 echo "INSTALL_ONLY=$INSTALL_ONLY"
692 echo "INSTALL_PLA=$INSTALL_PLA"
693 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
694 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
695 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
696 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
697 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
698 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
699 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
700 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
701 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
702 echo "OSM_DEVOPS=$OSM_DEVOPS"
703 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
704 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
705 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
706 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
707 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
708 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
709 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
710 echo "PULL_IMAGES=$PULL_IMAGES"
711 echo "RECONFIGURE=$RECONFIGURE"
712 echo "RELEASE=$RELEASE"
713 echo "REPOSITORY=$REPOSITORY"
714 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
715 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
716 echo "SHOWOPTS=$SHOWOPTS"
717 echo "TEST_INSTALLER=$TEST_INSTALLER"
718 echo "TO_REBUILD=$TO_REBUILD"
719 echo "UNINSTALL=$UNINSTALL"
720 echo "UPDATE=$UPDATE"
721 echo "Install from specific refspec (-b): $COMMIT_ID"
722 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
725 function parse_docker_registry_url() {
726 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
727 DOCKER_REGISTRY_USER=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); split(a
[1],b
,":"); print b
[1]}')
728 DOCKER_REGISTRY_PASSWORD=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); split(a
[1],b
,":"); print b
[2]}')
729 DOCKER_REGISTRY_URL=$(echo "$DOCKER_REGISTRY_URL" | awk '{split($1,a
,"@"); print a
[2]}')
730 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
734 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
735 echo "** Trapped CTRL-C"
736 FATAL "User stopped the installation"
737 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
750 INSTALL_FROM_SOURCE=""
754 INSTALL_K8S_MONITOR=""
758 LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
759 LXD_REPOSITORY_PATH=""
760 INSTALL_LIGHTWEIGHT="y"
761 INSTALL_TO_OPENSTACK=""
762 OPENSTACK_OPENRC_FILE_OR_CLOUD=""
763 OPENSTACK_PUBLIC_NET_NAME=""
764 OPENSTACK_ATTACH_VOLUME="false"
765 OPENSTACK_SSH_KEY_FILE=""
766 OPENSTACK_USERDATA_FILE=""
767 OPENSTACK_VM_NAME="server-osm"
768 OPENSTACK_PYTHON_VENV="$HOME/.virtual-envs/osm"
774 INSTALL_NOHOSTCLIENT=""
775 INSTALL_CACHELXDIMAGES=""
780 OSM_VCA_CLOUDNAME="localhost"
781 OSM_VCA_K8S_CLOUDNAME="k8scloud"
785 REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
786 REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
787 OSM_WORK_DIR="/etc/osm"
788 OSM_HELM_WORK_DIR="${OSM_WORK_DIR}/helm"
789 OSM_HOST_VOL="/var/lib/osm"
790 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
791 OSM_DOCKER_TAG=latest
792 DOCKER_USER=opensourcemano
795 KIWIGRID_K8S_SIDECAR_TAG="1.15.6"
796 PROMETHEUS_TAG=v2.28.1
798 PROMETHEUS_NODE_EXPORTER_TAG=0.18.1
799 PROMETHEUS_CADVISOR_TAG=latest
801 OSM_DATABASE_COMMONKEY=
802 ELASTIC_VERSION=6.4.2
803 ELASTIC_CURATOR_VERSION=5.5.4
804 POD_NETWORK_CIDR=10.244.0.0/16
805 K8S_MANIFEST_DIR="/etc/kubernetes/manifests"
806 RE_CHECK='^
[a-z0-9
]([-a-z0-9]*[a-z0-9
])?$
'
810 OSM_INSTALLATION_TYPE="Default"
812 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o; do
815 APT_PROXY_URL=${OPTARG}
822 REPOSITORY="${OPTARG}"
823 REPO_ARGS+=(-r "$REPOSITORY")
826 REPOSITORY_KEY="${OPTARG}"
827 REPO_ARGS+=(-k "$REPOSITORY_KEY")
830 REPOSITORY_BASE="${OPTARG}"
831 REPO_ARGS+=(-u "$REPOSITORY_BASE")
835 REPO_ARGS+=(-R "$RELEASE")
838 OSM_DEVOPS="${OPTARG}"
842 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
843 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
846 INSTALL_TO_OPENSTACK="y"
847 if [ -n "${OPTARG}" ]; then
848 OPENSTACK_OPENRC_FILE_OR_CLOUD="${OPTARG}"
850 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
855 OPENSTACK_SSH_KEY_FILE="${OPTARG}"
858 OPENSTACK_USERDATA_FILE="${OPTARG}"
861 OPENSTACK_PUBLIC_NET_NAME="${OPTARG}"
864 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD="$TO_REBUILD NG-UI" && continue
865 [ "${OPTARG}" == "NBI" ] && TO_REBUILD="$TO_REBUILD NBI" && continue
866 [ "${OPTARG}" == "LCM" ] && TO_REBUILD="$TO_REBUILD LCM" && continue
867 [ "${OPTARG}" == "RO" ] && TO_REBUILD="$TO_REBUILD RO" && continue
868 [ "${OPTARG}" == "MON" ] && TO_REBUILD="$TO_REBUILD MON" && continue
869 [ "${OPTARG}" == "POL" ] && TO_REBUILD="$TO_REBUILD POL" && continue
870 [ "${OPTARG}" == "PLA" ] && TO_REBUILD="$TO_REBUILD PLA" && continue
871 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD="$TO_REBUILD osmclient" && continue
872 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD="$TO_REBUILD KAFKA" && continue
873 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD="$TO_REBUILD MONGO" && continue
874 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS" && continue
875 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
876 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD="$TO_REBUILD KEYSTONE-DB" && continue
877 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD="$TO_REBUILD GRAFANA" && continue
878 [ "${OPTARG}" == "NONE" ] && TO_REBUILD="$TO_REBUILD NONE" && continue
881 OSM_VCA_HOST="${OPTARG}"
884 OSM_VCA_SECRET="${OPTARG}"
887 OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
890 OSM_DOCKER_TAG="${OPTARG}"
891 REPO_ARGS+=(-t "$OSM_DOCKER_TAG")
894 DOCKER_USER="${OPTARG}"
897 OSM_VCA_PUBKEY=$(cat ${OPTARG})
900 OSM_VCA_APIPROXY="${OPTARG}"
903 LXD_CLOUD_FILE="${OPTARG}"
906 LXD_CRED_FILE="${OPTARG}"
909 CONTROLLER_NAME="${OPTARG}"
912 DOCKER_REGISTRY_URL="${OPTARG}"
915 DOCKER_PROXY_URL="${OPTARG}"
918 MODULE_DOCKER_TAG="${OPTARG}"
921 [ "${OPTARG}" == "help" ] && usage && exit 0
922 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE="y" && PULL_IMAGES="" && continue
923 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
924 [ "${OPTARG}" == "develop" ] && DEVELOP="y" && continue
925 [ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
926 [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
927 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
928 [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
929 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
930 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD="y" && continue
931 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER="y" && continue
932 [ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
933 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS="y" && continue
934 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU="--nojuju" && continue
935 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD="y" && continue
936 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
937 [ "${OPTARG}" == "pullimages" ] && continue
938 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
939 [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
940 [ "${OPTARG}" == "bundle" ] && continue
941 [ "${OPTARG}" == "k8s" ] && continue
942 [ "${OPTARG}" == "lxd" ] && continue
943 [ "${OPTARG}" == "lxd-cred" ] && continue
944 [ "${OPTARG}" == "microstack" ] && continue
945 [ "${OPTARG}" == "overlay" ] && continue
946 [ "${OPTARG}" == "only-vca" ] && continue
947 [ "${OPTARG}" == "small-profile" ] && continue
948 [ "${OPTARG}" == "vca" ] && continue
949 [ "${OPTARG}" == "ha" ] && continue
950 [ "${OPTARG}" == "tag" ] && continue
951 [ "${OPTARG}" == "registry" ] && continue
952 [ "${OPTARG}" == "pla" ] && INSTALL_PLA="y" && continue
953 [ "${OPTARG}" == "old-sa" ] && INSTALL_NGSA="" && continue
954 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA="y" && continue
955 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME="true" && continue
956 [ "${OPTARG}" == "nocachelxdimages" ] && continue
957 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
958 echo -e "Invalid option: '--$OPTARG'\n" >&2
962 echo "Option -$OPTARG requires an argument" >&2
966 echo -e "Invalid option: '-$OPTARG'\n" >&2
981 source $OSM_DEVOPS/common/all_funcs
983 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
984 [ -n "$SHOWOPTS" ] && dump_vars && exit 0
986 # Uninstall if "--uninstall"
987 if [ -n "$UNINSTALL" ]; then
988 if [ -n "$CHARMED" ]; then
989 ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
990 FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
992 ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
993 FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
999 # Charmed installation
1000 if [ -n "$CHARMED" ]; then
1001 sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
1002 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1003 track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none
1004 ${OSM_DEVOPS}/installers/charmed_install.sh --tag $OSM_DOCKER_TAG "$@" || \
1005 FATAL_TRACK charmed_install "charmed_install.sh failed"
1006 wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README2.txt &> /dev/null
1007 track end installation_type $OSM_INSTALLATION_TYPE
1012 # Installation to Openstack
1013 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1014 install_to_openstack $OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1019 # Community_installer
1021 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD | grep -q NONE && FATAL "Incompatible option: -m NONE cannot be used with other -m options"
1022 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL "Incompatible option: -m PLA cannot be used without --pla option"
1023 # if develop, we force master
1024 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID="master"
1025 OSM_NAMESPACE_VOL="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
1026 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1027 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1028 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1030 #Installation starts here
1031 wget -q -O- https://osm-download.etsi.org/ftp/osm-13.0-thirteen/README.txt &> /dev/null
1032 export OSM_TRACK_INSTALLATION_ID="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"