3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " --ng-sa: install Airflow and Pushgateway to get VNF and NS status (experimental)"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 echo -e " --showopts: print chosen options and exit (only for debugging)"
71 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
72 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
73 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
74 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
75 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password
{
90 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
91 password_file
="${HOME}/.local/share/juju/accounts.yaml"
92 local controller_name
=$1
93 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
94 sed -ne "s|^\($s\):|\1|" \
95 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
96 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
97 awk -F$fs -v controller
=$controller_name '{
98 indent = length($1)/2;
100 for (i in vname) {if (i > indent) {delete vname[i]}}
101 if (length($3) > 0) {
102 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
103 if (match(vn,controller) && match($2,"password")) {
108 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
111 function set_vca_variables
() {
112 OSM_VCA_CLOUDNAME
="lxd-cloud"
113 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
114 if [ -z "$OSM_VCA_HOST" ]; then
115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
119 if [ -z "$OSM_VCA_SECRET" ]; then
120 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_NAMESPACE)
121 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
122 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
124 if [ -z "$OSM_VCA_PUBKEY" ]; then
125 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
126 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
128 if [ -z "$OSM_VCA_CACERT" ]; then
129 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
135 function generate_secret
() {
136 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
137 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
141 function check_packages
() {
143 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
144 for PACKAGE
in ${NEEDED_PACKAGES} ; do
146 if [ $?
-ne 0 ]; then
147 echo -e "Package ${PACKAGE} is not installed."
148 echo -e "Updating apt-cache ..."
150 echo -e "Installing ${PACKAGE} ..."
151 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
154 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
158 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
159 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
160 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
161 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
162 read -e -p "$1" USER_CONFIRMATION
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
165 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
166 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
167 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
168 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
170 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
173 function install_osmclient
(){
174 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
175 CLIENT_RELEASE
=${RELEASE#"-R "}
176 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
177 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
178 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
179 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
180 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
181 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
183 sudo apt-get
install -y python3-pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
185 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
186 sudo apt-get
install -y python3-osm-im python3-osmclient
187 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
188 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
190 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
191 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
192 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
196 echo -e "\nOSM client installed"
197 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
198 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
199 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
200 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
203 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
204 echo " export OSM_HOSTNAME=<OSM_host>"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
210 function docker_login
() {
211 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
213 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
214 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
215 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
218 function pull_docker_images
() {
219 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
220 echo "Pulling docker images"
221 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
223 echo "Pulling non-OSM docker images"
224 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
225 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
226 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
229 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
230 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
233 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
234 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
237 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
238 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
239 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
242 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
243 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
246 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
247 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
250 echo "Pulling OSM docker images"
251 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient Airflow
; do
252 module_lower
=${module,,}
253 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
256 module_tag
="${OSM_DOCKER_TAG}"
257 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
258 module_tag
="${MODULE_DOCKER_TAG}"
260 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
261 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
264 echo "Finished pulling docker images"
265 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
268 function generate_docker_images
() {
269 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
270 echo "Generating docker images"
271 _build_from
=$COMMIT_ID
272 [ -z "$_build_from" ] && _build_from
="latest"
273 echo "OSM Docker images generated from $_build_from"
274 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
275 trap 'rm -rf "${LWTEMPDIR}"' EXIT
276 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
277 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
278 module_lower
=${module,,}
279 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
282 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
283 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
284 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
287 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
288 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
289 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
290 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
291 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
292 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
294 echo "Finished generation of docker images"
295 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
298 function cmp_overwrite
() {
299 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
302 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
303 if [ -f "${file2}" ]; then
304 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
306 cp -b ${file1} ${file2}
309 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
312 function generate_k8s_manifest_files() {
313 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
314 #Kubernetes resources
315 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
316 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
319 function generate_docker_env_files() {
320 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
321 echo "Doing a backup of existing env files
"
322 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
323 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
324 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
325 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
326 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
327 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
328 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
329 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
330 if [ -n "${INSTALL_NGSA}" ]; then
331 sudo cp $OSM_DOCKER_WORK_DIR/ngsa.env{,~}
332 sudo cp $OSM_DOCKER_WORK_DIR/webhook-translator.env{,~}
335 echo "Generating docker env files
"
337 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
338 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
341 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
342 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
344 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
347 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
348 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
350 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
353 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
354 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
356 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
359 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
360 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
362 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
365 if [ -n "$OSM_VCA_APIPROXY" ]; then
366 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
367 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
369 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
373 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
374 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
377 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
378 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
381 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
382 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
384 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
387 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
388 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
390 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
392 if [ -n "${OSM_BEHIND_PROXY}" ]; then
393 if ! grep -Fq "HTTP_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
394 echo "HTTP_PROXY=${HTTP_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
396 sudo
sed -i "s|HTTP_PROXY.*|HTTP_PROXY=${HTTP_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
398 if ! grep -Fq "HTTPS_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
399 echo "HTTPS_PROXY=${HTTPS_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
401 sudo
sed -i "s|HTTPS_PROXY.*|HTTPS_PROXY=${HTTPS_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
403 if ! grep -Fq "NO_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
404 echo "NO_PROXY=${NO_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
406 sudo
sed -i "s|NO_PROXY.*|NO_PROXY=${NO_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
411 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
412 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
413 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
415 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
416 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
418 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
419 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
423 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
424 SERVICE_PASSWORD
=$
(generate_secret
)
425 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
426 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
428 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
429 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
430 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
431 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
435 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
436 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
437 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
441 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
442 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
443 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
444 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
447 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
448 echo "OS_NOTIFIER_URI=http://${OSM_DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
450 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$OSM_DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
453 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
454 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
456 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
459 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
460 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
462 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
465 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
466 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
468 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
472 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
473 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
477 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/ngsa.env
]; then
478 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ngsa.env
482 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/webhook-translator.env
]; then
483 echo "AIRFLOW_HOST=airflow-webserver" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
484 echo "AIRFLOW_PORT=8080" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
485 echo "AIRFLOW_USER=admin" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
486 echo "AIRFLOW_PASS=admin" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
489 echo "Finished generation of docker env files"
490 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
493 #creates secrets from env files which will be used by containers
494 function kube_secrets
(){
495 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
496 kubectl create ns
$OSM_NAMESPACE
497 kubectl create secret generic lcm-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
498 kubectl create secret generic mon-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
499 kubectl create secret generic nbi-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
500 kubectl create secret generic ro-db-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
501 kubectl create secret generic ro-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
502 kubectl create secret generic keystone-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
503 kubectl create secret generic pol-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
504 if [ -n "${INSTALL_NGSA}" ]; then
505 kubectl create secret generic ngsa-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ngsa.env
506 kubectl create secret generic webhook-translator-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/webhook-translator.env
508 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
511 #deploys osm pods and services
512 function deploy_osm_services
() {
513 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
514 kubectl apply
-n $OSM_NAMESPACE -f $OSM_K8S_WORK_DIR
515 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
518 #deploy charmed services
519 function deploy_charmed_services
() {
520 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
521 juju add-model
$OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
522 juju deploy ch
:mongodb-k8s
-m $OSM_NAMESPACE
523 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
526 function deploy_osm_pla_service
() {
527 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
528 # corresponding to deploy_osm_services
529 kubectl apply
-n $OSM_NAMESPACE -f $OSM_DOCKER_WORK_DIR/osm_pla
530 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
533 function install_osm_ngsa_service
() {
534 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
535 NGSA_OPTS
="-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} -U ${DOCKER_USER} ${DEBUG_INSTALL}"
536 [ -n "${DOCKER_REGISTRY_URL}" ] && NGSA_OPTS="${NGSA_OPTS} -r ${DOCKER_REGISTRY_URL}"
537 $OSM_DEVOPS/installers
/install_ngsa.sh
${NGSA_OPTS} || \
538 FATAL_TRACK install_osm_ngsa_service
"install_ngsa.sh failed"
539 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
542 function parse_yaml
() {
543 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
547 for module
in $services; do
548 if [ "$module" == "pla" ]; then
549 if [ -n "$INSTALL_PLA" ]; then
550 echo "Updating K8s manifest file from opensourcemano\/pla:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/pla:${TAG}"
551 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
555 if [ "$module" == "ng-prometheus
" ]; then
557 elif [ "$module" == "ng-mon
" ]; then
560 echo "Updating K8s manifest
file from opensourcemano\
/${image}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${image}:${TAG}"
561 sudo sed -i "s
#opensourcemano/${image}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${image}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
564 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
567 function update_manifest_files
() {
568 osm_services
="nbi lcm ro pol mon ng-mon ng-ui keystone pla prometheus ng-prometheus"
570 for module
in $osm_services; do
571 module_upper
="${module^^}"
572 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
573 list_of_services
="$list_of_services $module"
576 if [ ! "$OSM_DOCKER_TAG" == "13" ]; then
577 parse_yaml
$OSM_DOCKER_TAG $list_of_services
579 if [ -n "$MODULE_DOCKER_TAG" ]; then
580 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
582 # The manifest for prometheus is prometheus.yaml or ng-prometheus.yaml, depending on the installation option
583 # If NG-SA is installed, it will include ng-mon (only mon-dashboarder), ng-prometheus and webhook translator. It won't include pol, mon and prometheus
584 if [ -n "$INSTALL_NGSA" ]; then
585 sudo
rm -f ${OSM_K8S_WORK_DIR}/prometheus.yaml
586 sudo
rm -f ${OSM_K8S_WORK_DIR}/mon.yaml
587 sudo
rm -f ${OSM_K8S_WORK_DIR}/pol.yaml
589 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-mon.yaml
590 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-prometheus.yaml
591 sudo
rm -f ${OSM_K8S_WORK_DIR}/webhook-translator.yaml
593 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
596 function namespace_vol
() {
597 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
598 # List of services with a volume mounted in path /var/lib/osm
600 for osm
in $osm_services; do
601 if [ -f "$OSM_K8S_WORK_DIR/$osm.yaml" ] ; then
602 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
605 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
608 function add_local_k8scluster
() {
609 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
610 /usr
/bin
/osm
--all-projects vim-create \
611 --name _system-osm-vim \
612 --account_type dummy \
613 --auth_url http
://dummy \
614 --user osm
--password osm
--tenant osm \
615 --description "dummy" \
616 --config '{management_network_name: mgmt}'
617 /usr
/bin
/osm
--all-projects k8scluster-add \
618 --creds ${HOME}/.kube
/config \
619 --vim _system-osm-vim \
620 --k8s-nets '{"net1": null}' \
622 --description "OSM Internal Cluster" \
624 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
627 function configure_apt_proxy
() {
628 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
630 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
631 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
632 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
633 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
634 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
637 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
639 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
640 track prereq apt_proxy_configured_ok
641 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
644 function ask_proceed
() {
645 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
647 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
648 1. Install and configure LXD
651 4. Disable swap space
652 5. Install and initialize Kubernetes
654 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
656 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
659 function check_osm_behind_proxy
() {
660 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
662 export OSM_BEHIND_PROXY
=""
663 export OSM_PROXY_ENV_VARIABLES
=""
664 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
665 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
666 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
667 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
668 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
669 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
671 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
672 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
674 if [ -n "${OSM_BEHIND_PROXY}" ]; then
675 [ -z "$ASSUME_YES" ] && ! ask_user
"
676 The following env variables have been found for the current user:
677 ${OSM_PROXY_ENV_VARIABLES}.
679 This suggests that this machine is behind a proxy and a special configuration is required.
680 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
683 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
684 Depending on the program, the env variables to work behind a proxy might be different
685 (e.g. http_proxy vs HTTP_PROXY).
687 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
688 and HTTPS_PROXY are defined.
690 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
691 those env variables are also set for root user. If you are not sure whether those variables
692 are configured for the root user, you can stop the installation now.
694 Do you want to proceed with the installation (Y/n)? " y
&& echo "Cancelled!" && exit 1
696 echo "This machine is not behind a proxy"
699 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
702 function find_devops_folder
() {
703 if [ -z "$OSM_DEVOPS" ]; then
704 if [ -n "$TEST_INSTALLER" ]; then
705 echo -e "\nUsing local devops repo for OSM installation"
706 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
708 echo -e "\nCreating temporary dir for OSM installation"
709 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
710 trap 'rm -rf "$OSM_DEVOPS"' EXIT
711 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
716 function install_osm
() {
717 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
721 # TODO: move this under start
722 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
724 check_osm_behind_proxy
725 track checks proxy_ok
727 check_packages
"git wget curl tar snapd"
729 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
733 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
735 track checks checkingroot_ok
736 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
737 track checks noroot_ok
740 track checks proceed_ok
742 echo "Installing OSM"
744 echo "Determining IP address of the interface with the default route"
745 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
746 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
747 [ -z "$OSM_DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
748 OSM_DEFAULT_IP
=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
749 [ -z "$OSM_DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
751 # configure apt proxy
752 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
754 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
755 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
756 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
757 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS
="${LXD_INSTALL_OPTS} -P"
758 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
761 track prereq prereqok_ok
763 if [ ! -n "$INSTALL_NODOCKER" ]; then
764 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
765 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
766 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS
="${DOCKER_CE_OPTS} -P"
767 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
770 track docker_ce docker_ce_ok
772 echo "Creating folders for installation"
773 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
774 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
775 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
777 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${OSM_DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
778 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
779 track k8scluster k8scluster_ok
781 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
782 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
783 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
784 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
785 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
786 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS
="${JUJU_OPTS} -P"
787 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
791 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
792 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
793 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
796 # Deploy OSM services
797 [ -z "$DOCKER_NOBUILD" ] && pull_docker_images
798 [ -z "$DOCKER_NOBUILD" ] && [ -z "$PULL_IMAGES" ] && generate_docker_images
799 track docker_images docker_images_ok
801 generate_k8s_manifest_files
802 track osm_files manifest_files_ok
803 generate_docker_env_files
804 track osm_files env_files_ok
806 deploy_charmed_services
807 track deploy_osm deploy_charmed_services_ok
809 track deploy_osm kube_secrets_ok
810 update_manifest_files
811 track deploy_osm update_manifest_files_ok
813 track deploy_osm namespace_vol_ok
815 track deploy_osm deploy_osm_services_k8s_ok
816 if [ -n "$INSTALL_PLA" ]; then
817 # optional PLA install
818 deploy_osm_pla_service
819 track deploy_osm deploy_osm_pla_ok
821 if [ -n "$INSTALL_K8S_MONITOR" ]; then
822 # install OSM MONITORING
823 install_k8s_monitoring
824 track deploy_osm install_k8s_monitoring_ok
826 if [ -n "$INSTALL_NGSA" ]; then
827 # optional PLA install
828 install_osm_ngsa_service
829 track deploy_osm install_osm_ngsa_ok
832 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
833 track osmclient osmclient_ok
835 echo -e "Checking OSM health state..."
836 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_NAMESPACE} -k || \
837 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
838 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
839 track healthchecks osm_unhealthy didnotconverge
)
840 track healthchecks after_healthcheck_ok
843 track final_ops add_local_k8scluster_ok
845 arrange_docker_default_network_policy
847 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
850 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
854 function install_to_openstack
() {
855 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
858 FATAL
"OpenStack installer requires a valid external network name"
861 # Install Pip for Python3
862 sudo apt
install -y python3-pip python3-venv
863 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
865 # Create a venv to avoid conflicts with the host installation
866 python3
-m venv
$OPENSTACK_PYTHON_VENV
868 source $OPENSTACK_PYTHON_VENV/bin
/activate
870 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
871 python
-m pip
install -U wheel
872 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
874 # Install the Openstack cloud module (ansible>=2.10)
875 ansible-galaxy collection
install openstack.cloud
877 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
879 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
881 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
883 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
884 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
887 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
888 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
891 # Execute the Ansible playbook based on openrc or clouds.yaml
894 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
895 $OSM_DEVOPS/installers
/openstack
/site.yml
897 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
898 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
904 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
908 function arrange_docker_default_network_policy
() {
909 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
910 sudo iptables
-I DOCKER-USER
-j ACCEPT
911 sudo iptables-save | sudo
tee /etc
/iptables
/rules.v4
912 sudo ip6tables-save | sudo
tee /etc
/iptables
/rules.v6
915 function install_k8s_monitoring
() {
916 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
917 # install OSM monitoring
918 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
919 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
920 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
923 function dump_vars
(){
924 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
925 echo "APT_PROXY_URL=$APT_PROXY_URL"
926 echo "DEVELOP=$DEVELOP"
927 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
928 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
929 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
930 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
931 echo "DOCKER_USER=$DOCKER_USER"
932 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
933 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
934 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
935 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
936 echo "INSTALL_LXD=$INSTALL_LXD"
937 echo "INSTALL_NGSA=$INSTALL_NGSA"
938 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
939 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
940 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
941 echo "INSTALL_ONLY=$INSTALL_ONLY"
942 echo "INSTALL_PLA=$INSTALL_PLA"
943 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
944 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
945 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
946 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
947 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
948 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
949 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
950 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
951 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
952 echo "OSM_DEVOPS=$OSM_DEVOPS"
953 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
954 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
955 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
956 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
957 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
958 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
959 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
960 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
961 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
962 echo "PULL_IMAGES=$PULL_IMAGES"
963 echo "RECONFIGURE=$RECONFIGURE"
964 echo "RELEASE=$RELEASE"
965 echo "REPOSITORY=$REPOSITORY"
966 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
967 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
968 echo "SHOWOPTS=$SHOWOPTS"
969 echo "TEST_INSTALLER=$TEST_INSTALLER"
970 echo "TO_REBUILD=$TO_REBUILD"
971 echo "UNINSTALL=$UNINSTALL"
972 echo "UPDATE=$UPDATE"
973 echo "Install from specific refspec (-b): $COMMIT_ID"
974 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
977 function parse_docker_registry_url
() {
978 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
979 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
980 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
981 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
982 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
986 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
987 echo "** Trapped CTRL-C"
988 FATAL
"User stopped the installation"
989 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
1002 INSTALL_FROM_SOURCE
=""
1004 RELEASE
="ReleaseTEN"
1006 INSTALL_K8S_MONITOR
=""
1010 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1011 LXD_REPOSITORY_PATH
=""
1012 INSTALL_LIGHTWEIGHT
="y"
1013 INSTALL_TO_OPENSTACK
=""
1014 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1015 OPENSTACK_PUBLIC_NET_NAME
=""
1016 OPENSTACK_ATTACH_VOLUME
="false"
1017 OPENSTACK_SSH_KEY_FILE
=""
1018 OPENSTACK_USERDATA_FILE
=""
1019 OPENSTACK_VM_NAME
="server-osm"
1020 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
1026 INSTALL_NOHOSTCLIENT
=""
1027 INSTALL_CACHELXDIMAGES
=""
1032 OSM_VCA_CLOUDNAME
="localhost"
1033 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
1037 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1038 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1039 OSM_WORK_DIR
="/etc/osm"
1040 OSM_DOCKER_WORK_DIR
="${OSM_WORK_DIR}/docker"
1041 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1042 OSM_HELM_WORK_DIR
="${OSM_WORK_DIR}/helm"
1043 OSM_HOST_VOL
="/var/lib/osm"
1044 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
1045 OSM_DOCKER_TAG
=latest
1046 DOCKER_USER
=opensourcemano
1048 KAFKA_TAG
=2.11-1.0
.2
1049 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
1050 PROMETHEUS_TAG
=v2.28
.1
1052 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1053 PROMETHEUS_CADVISOR_TAG
=latest
1055 OSM_DATABASE_COMMONKEY
=
1056 ELASTIC_VERSION
=6.4.2
1057 ELASTIC_CURATOR_VERSION
=5.5.4
1058 POD_NETWORK_CIDR
=10.244.0.0/16
1059 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1060 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1061 DOCKER_REGISTRY_URL
=
1064 OSM_INSTALLATION_TYPE
="Default"
1066 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
1069 APT_PROXY_URL
=${OPTARG}
1076 REPOSITORY
="${OPTARG}"
1077 REPO_ARGS
+=(-r "$REPOSITORY")
1080 REPOSITORY_KEY
="${OPTARG}"
1081 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1084 REPOSITORY_BASE
="${OPTARG}"
1085 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1089 REPO_ARGS
+=(-R "$RELEASE")
1092 OSM_DEVOPS
="${OPTARG}"
1096 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1097 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1100 INSTALL_TO_OPENSTACK
="y"
1101 if [ -n "${OPTARG}" ]; then
1102 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1104 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1109 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
1112 OPENSTACK_USERDATA_FILE
="${OPTARG}"
1115 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1118 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
1119 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1120 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1121 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1122 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1123 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1124 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1125 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
1126 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1127 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1128 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1129 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1130 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1131 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1132 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1135 OSM_VCA_HOST
="${OPTARG}"
1138 OSM_VCA_SECRET
="${OPTARG}"
1141 OSM_NAMESPACE
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1144 OSM_DOCKER_TAG
="${OPTARG}"
1145 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1148 DOCKER_USER
="${OPTARG}"
1151 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1154 OSM_VCA_APIPROXY
="${OPTARG}"
1157 LXD_CLOUD_FILE
="${OPTARG}"
1160 LXD_CRED_FILE
="${OPTARG}"
1163 CONTROLLER_NAME
="${OPTARG}"
1166 DOCKER_REGISTRY_URL
="${OPTARG}"
1169 DOCKER_PROXY_URL
="${OPTARG}"
1172 MODULE_DOCKER_TAG
="${OPTARG}"
1175 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1176 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1177 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1178 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1179 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1180 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1181 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1182 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1183 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1184 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1185 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1186 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1187 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1188 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1189 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1190 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1191 [ "${OPTARG}" == "pullimages" ] && continue
1192 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1193 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1194 [ "${OPTARG}" == "bundle" ] && continue
1195 [ "${OPTARG}" == "k8s" ] && continue
1196 [ "${OPTARG}" == "lxd" ] && continue
1197 [ "${OPTARG}" == "lxd-cred" ] && continue
1198 [ "${OPTARG}" == "microstack" ] && continue
1199 [ "${OPTARG}" == "overlay" ] && continue
1200 [ "${OPTARG}" == "only-vca" ] && continue
1201 [ "${OPTARG}" == "small-profile" ] && continue
1202 [ "${OPTARG}" == "vca" ] && continue
1203 [ "${OPTARG}" == "ha" ] && continue
1204 [ "${OPTARG}" == "tag" ] && continue
1205 [ "${OPTARG}" == "registry" ] && continue
1206 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1207 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1208 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1209 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1210 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1211 echo -e "Invalid option: '--$OPTARG'\n" >&2
1215 echo "Option -$OPTARG requires an argument" >&2
1219 echo -e "Invalid option: '-$OPTARG'\n" >&2
1234 source $OSM_DEVOPS/common
/all_funcs
1236 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1237 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1239 # Uninstall if "--uninstall"
1240 if [ -n "$UNINSTALL" ]; then
1241 if [ -n "$CHARMED" ]; then
1242 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1243 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1245 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1246 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1252 # Charmed installation
1253 if [ -n "$CHARMED" ]; then
1254 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
1255 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1256 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1257 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1258 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1259 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
1260 track end installation_type
$OSM_INSTALLATION_TYPE
1265 # Installation to Openstack
1266 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1267 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1272 # Community_installer
1274 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1275 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1276 # if develop, we force master
1277 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1278 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
1279 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1280 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1281 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1283 #Installation starts here
1284 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README.txt
&> /dev
/null
1285 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"