3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <namespace> namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " --ng-sa: install Airflow and Pushgateway to get VNF and NS status (experimental)"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 echo -e " --showopts: print chosen options and exit (only for debugging)"
71 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
72 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
73 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
74 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
75 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password
{
90 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
91 password_file
="${HOME}/.local/share/juju/accounts.yaml"
92 local controller_name
=$1
93 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
94 sed -ne "s|^\($s\):|\1|" \
95 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
96 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
97 awk -F$fs -v controller
=$controller_name '{
98 indent = length($1)/2;
100 for (i in vname) {if (i > indent) {delete vname[i]}}
101 if (length($3) > 0) {
102 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
103 if (match(vn,controller) && match($2,"password")) {
108 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
111 function set_vca_variables
() {
112 OSM_VCA_CLOUDNAME
="lxd-cloud"
113 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
114 if [ -z "$OSM_VCA_HOST" ]; then
115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
119 if [ -z "$OSM_VCA_SECRET" ]; then
120 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_NAMESPACE)
121 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
122 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
124 if [ -z "$OSM_VCA_PUBKEY" ]; then
125 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
126 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
128 if [ -z "$OSM_VCA_CACERT" ]; then
129 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_NAMESPACE '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
135 function generate_secret
() {
136 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
137 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
141 function check_packages
() {
143 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
144 for PACKAGE
in ${NEEDED_PACKAGES} ; do
146 if [ $?
-ne 0 ]; then
147 echo -e "Package ${PACKAGE} is not installed."
148 echo -e "Updating apt-cache ..."
150 echo -e "Installing ${PACKAGE} ..."
151 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
154 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
158 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
159 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
160 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
161 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
162 read -e -p "$1" USER_CONFIRMATION
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
165 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
166 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
167 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
168 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
170 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
173 function install_osmclient
(){
174 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
175 CLIENT_RELEASE
=${RELEASE#"-R "}
176 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
177 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
178 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
179 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
180 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
181 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
183 sudo apt-get
install -y python3-pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
185 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
186 sudo apt-get
install -y python3-osm-im python3-osmclient
187 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
188 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
190 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
191 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
192 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
196 echo -e "\nOSM client installed"
197 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
198 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
199 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
200 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
203 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
204 echo " export OSM_HOSTNAME=<OSM_host>"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
210 function docker_login
() {
211 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
213 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
214 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
215 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
218 function generate_docker_images
() {
219 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
220 echo "Pulling and generating docker images"
221 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
223 echo "Pulling docker images"
225 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
226 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
227 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
230 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
231 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
234 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
235 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
238 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
239 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
240 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
243 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
244 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
247 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
248 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
251 if [ -n "$PULL_IMAGES" ]; then
252 echo "Pulling OSM docker images"
253 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
254 module_lower
=${module,,}
255 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
258 module_tag
="${OSM_DOCKER_TAG}"
259 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
260 module_tag
="${MODULE_DOCKER_TAG}"
262 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
263 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
266 _build_from
=$COMMIT_ID
267 [ -z "$_build_from" ] && _build_from
="latest"
268 echo "OSM Docker images generated from $_build_from"
269 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
270 trap 'rm -rf "${LWTEMPDIR}"' EXIT
271 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
272 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
273 module_lower
=${module,,}
274 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
277 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
278 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
279 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
282 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
283 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
284 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
285 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
286 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
287 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
289 echo "Finished generation of docker images"
292 echo "Finished pulling and generating docker images"
293 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
296 function cmp_overwrite
() {
297 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
300 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
301 if [ -f "${file2}" ]; then
302 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
304 cp -b ${file1} ${file2}
307 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
310 function generate_k8s_manifest_files() {
311 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
312 #Kubernetes resources
313 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 function generate_docker_env_files() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
319 echo "Doing a backup of existing env files
"
320 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
321 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
322 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
323 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
324 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
325 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
326 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
327 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
328 if [ -n "${INSTALL_NGSA}" ]; then
329 sudo cp $OSM_DOCKER_WORK_DIR/ngsa.env{,~}
330 sudo cp $OSM_DOCKER_WORK_DIR/webhook-translator.env{,~}
333 echo "Generating docker env files
"
335 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
336 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
339 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
340 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
342 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
345 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
346 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
348 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
351 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
352 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
354 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
357 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
358 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
360 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
363 if [ -n "$OSM_VCA_APIPROXY" ]; then
364 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
365 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
367 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
371 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
372 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
375 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
376 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
379 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
380 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
382 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
385 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
386 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
388 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
390 if [ -n "${OSM_BEHIND_PROXY}" ]; then
391 if ! grep -Fq "HTTP_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
392 echo "HTTP_PROXY=${HTTP_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
394 sudo
sed -i "s|HTTP_PROXY.*|HTTP_PROXY=${HTTP_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
396 if ! grep -Fq "HTTPS_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
397 echo "HTTPS_PROXY=${HTTPS_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
399 sudo
sed -i "s|HTTPS_PROXY.*|HTTPS_PROXY=${HTTPS_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
401 if ! grep -Fq "NO_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
402 echo "NO_PROXY=${NO_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
404 sudo
sed -i "s|NO_PROXY.*|NO_PROXY=${NO_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
409 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
410 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
411 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
413 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
414 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
416 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
417 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
421 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
422 SERVICE_PASSWORD
=$
(generate_secret
)
423 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
424 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
426 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
427 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
428 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
429 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
433 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
434 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
435 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
439 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
440 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
441 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
442 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
445 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
446 echo "OS_NOTIFIER_URI=http://${OSM_DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
448 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$OSM_DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
451 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
452 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
454 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
457 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
458 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
460 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
463 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
464 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
466 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
470 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
471 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
475 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/ngsa.env
]; then
476 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ngsa.env
480 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/webhook-translator.env
]; then
481 echo "AIRFLOW_HOST=airflow-webserver" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
482 echo "AIRFLOW_PORT=8080" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
483 echo "AIRFLOW_USER=admin" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
484 echo "AIRFLOW_PASS=admin" | sudo
tee -a $OSM_DOCKER_WORK_DIR/webhook-translator.env
487 echo "Finished generation of docker env files"
488 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
491 #creates secrets from env files which will be used by containers
492 function kube_secrets
(){
493 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
494 kubectl create ns
$OSM_NAMESPACE
495 kubectl create secret generic lcm-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
496 kubectl create secret generic mon-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
497 kubectl create secret generic nbi-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
498 kubectl create secret generic ro-db-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
499 kubectl create secret generic ro-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
500 kubectl create secret generic keystone-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
501 kubectl create secret generic pol-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
502 if [ -n "${INSTALL_NGSA}" ]; then
503 kubectl create secret generic ngsa-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/ngsa.env
504 kubectl create secret generic webhook-translator-secret
-n $OSM_NAMESPACE --from-env-file=$OSM_DOCKER_WORK_DIR/webhook-translator.env
506 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
509 #deploys osm pods and services
510 function deploy_osm_services
() {
511 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
512 kubectl apply
-n $OSM_NAMESPACE -f $OSM_K8S_WORK_DIR
513 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
516 #deploy charmed services
517 function deploy_charmed_services
() {
518 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
519 juju add-model
$OSM_NAMESPACE $OSM_VCA_K8S_CLOUDNAME
520 juju deploy ch
:mongodb-k8s
-m $OSM_NAMESPACE
521 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
524 function deploy_osm_pla_service
() {
525 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
526 # corresponding to deploy_osm_services
527 kubectl apply
-n $OSM_NAMESPACE -f $OSM_DOCKER_WORK_DIR/osm_pla
528 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
531 function install_osm_ngsa_service
() {
532 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
533 $OSM_DEVOPS/installers
/install_ngsa.sh
-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} ${DEBUG_INSTALL} || \
534 FATAL_TRACK install_osm_ngsa_service
"install_ngsa.sh failed"
535 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
538 function parse_yaml
() {
539 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
543 for module
in $services; do
544 if [ "$module" == "pla" ]; then
545 if [ -n "$INSTALL_PLA" ]; then
546 echo "Updating K8s manifest file from opensourcemano\/pla:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/pla:${TAG}"
547 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
551 if [ "$module" == "ng-prometheus
" ]; then
553 elif [ "$module" == "ng-mon
" ]; then
556 echo "Updating K8s manifest
file from opensourcemano\
/${image}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${image}:${TAG}"
557 sudo sed -i "s
#opensourcemano/${image}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${image}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
560 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
563 function update_manifest_files
() {
564 osm_services
="nbi lcm ro pol mon ng-mon ng-ui keystone pla prometheus ng-prometheus"
566 for module
in $osm_services; do
567 module_upper
="${module^^}"
568 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
569 list_of_services
="$list_of_services $module"
572 if [ ! "$OSM_DOCKER_TAG" == "13" ]; then
573 parse_yaml
$OSM_DOCKER_TAG $list_of_services
575 if [ -n "$MODULE_DOCKER_TAG" ]; then
576 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
578 # The manifest for prometheus is prometheus.yaml or ng-prometheus.yaml, depending on the installation option
579 # If NG-SA is installed, it will include ng-mon (only mon-dashboarder), ng-prometheus and webhook translator. It won't include pol, mon and prometheus
580 if [ -n "$INSTALL_NGSA" ]; then
581 sudo
rm -f ${OSM_K8S_WORK_DIR}/prometheus.yaml
582 sudo
rm -f ${OSM_K8S_WORK_DIR}/mon.yaml
583 sudo
rm -f ${OSM_K8S_WORK_DIR}/pol.yaml
585 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-mon.yaml
586 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-prometheus.yaml
587 sudo
rm -f ${OSM_K8S_WORK_DIR}/webhook-translator.yaml
589 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
592 function namespace_vol
() {
593 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
594 # List of services with a volume mounted in path /var/lib/osm
596 for osm
in $osm_services; do
597 if [ -f "$OSM_K8S_WORK_DIR/$osm.yaml" ] ; then
598 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
601 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
604 function add_local_k8scluster
() {
605 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
606 /usr
/bin
/osm
--all-projects vim-create \
607 --name _system-osm-vim \
608 --account_type dummy \
609 --auth_url http
://dummy \
610 --user osm
--password osm
--tenant osm \
611 --description "dummy" \
612 --config '{management_network_name: mgmt}'
613 /usr
/bin
/osm
--all-projects k8scluster-add \
614 --creds ${HOME}/.kube
/config \
615 --vim _system-osm-vim \
616 --k8s-nets '{"net1": null}' \
618 --description "OSM Internal Cluster" \
620 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
623 function configure_apt_proxy
() {
624 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
626 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
627 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
628 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
629 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
630 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
633 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
635 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
636 track prereq apt_proxy_configured_ok
637 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
640 function ask_proceed
() {
641 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
643 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
644 1. Install and configure LXD
647 4. Disable swap space
648 5. Install and initialize Kubernetes
650 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
652 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
655 function check_osm_behind_proxy
() {
656 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
658 export OSM_BEHIND_PROXY
=""
659 export OSM_PROXY_ENV_VARIABLES
=""
660 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
661 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
662 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
663 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
664 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
665 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
667 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
668 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
670 if [ -n "${OSM_BEHIND_PROXY}" ]; then
671 [ -z "$ASSUME_YES" ] && ! ask_user
"
672 The following env variables have been found for the current user:
673 ${OSM_PROXY_ENV_VARIABLES}.
675 This suggests that this machine is behind a proxy and a special configuration is required.
676 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
679 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
680 Depending on the program, the env variables to work behind a proxy might be different
681 (e.g. http_proxy vs HTTP_PROXY).
683 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
684 and HTTPS_PROXY are defined.
686 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
687 those env variables are also set for root user. If you are not sure whether those variables
688 are configured for the root user, you can stop the installation now.
690 Do you want to proceed with the installation (Y/n)? " y
&& echo "Cancelled!" && exit 1
692 echo "This machine is not behind a proxy"
695 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
698 function find_devops_folder
() {
699 if [ -z "$OSM_DEVOPS" ]; then
700 if [ -n "$TEST_INSTALLER" ]; then
701 echo -e "\nUsing local devops repo for OSM installation"
702 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
704 echo -e "\nCreating temporary dir for OSM installation"
705 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
706 trap 'rm -rf "$OSM_DEVOPS"' EXIT
707 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
712 function install_osm
() {
713 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
717 # TODO: move this under start
718 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
720 check_osm_behind_proxy
721 track checks proxy_ok
723 check_packages
"git wget curl tar snapd"
725 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
729 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
731 track checks checkingroot_ok
732 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
733 track checks noroot_ok
736 track checks proceed_ok
738 echo "Installing OSM"
740 echo "Determining IP address of the interface with the default route"
741 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
742 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
743 [ -z "$OSM_DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
744 OSM_DEFAULT_IP
=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
745 [ -z "$OSM_DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
747 # configure apt proxy
748 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
750 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
751 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
752 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
753 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS
="${LXD_INSTALL_OPTS} -P"
754 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
757 track prereq prereqok_ok
759 if [ ! -n "$INSTALL_NODOCKER" ]; then
760 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
761 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
762 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS
="${DOCKER_CE_OPTS} -P"
763 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
766 track docker_ce docker_ce_ok
768 echo "Creating folders for installation"
769 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
770 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
771 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
773 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${OSM_DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
774 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
775 track k8scluster k8scluster_ok
777 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
778 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
779 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
780 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
781 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
782 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS
="${JUJU_OPTS} -P"
783 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
787 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
788 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
789 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
792 # Deploy OSM services
793 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
794 track docker_images docker_images_ok
796 generate_k8s_manifest_files
797 track osm_files manifest_files_ok
798 generate_docker_env_files
799 track osm_files env_files_ok
801 deploy_charmed_services
802 track deploy_osm deploy_charmed_services_ok
804 track deploy_osm kube_secrets_ok
805 update_manifest_files
806 track deploy_osm update_manifest_files_ok
808 track deploy_osm namespace_vol_ok
810 track deploy_osm deploy_osm_services_k8s_ok
811 if [ -n "$INSTALL_PLA" ]; then
812 # optional PLA install
813 deploy_osm_pla_service
814 track deploy_osm deploy_osm_pla_ok
816 if [ -n "$INSTALL_K8S_MONITOR" ]; then
817 # install OSM MONITORING
818 install_k8s_monitoring
819 track deploy_osm install_k8s_monitoring_ok
821 if [ -n "$INSTALL_NGSA" ]; then
822 # optional PLA install
823 install_osm_ngsa_service
824 track deploy_osm install_osm_ngsa_ok
827 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
828 track osmclient osmclient_ok
830 echo -e "Checking OSM health state..."
831 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_NAMESPACE} -k || \
832 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
833 echo -e "Check OSM status with: kubectl -n ${OSM_NAMESPACE} get all" && \
834 track healthchecks osm_unhealthy didnotconverge
)
835 track healthchecks after_healthcheck_ok
838 track final_ops add_local_k8scluster_ok
840 arrange_docker_default_network_policy
842 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
845 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
849 function install_to_openstack
() {
850 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
853 FATAL
"OpenStack installer requires a valid external network name"
856 # Install Pip for Python3
857 sudo apt
install -y python3-pip python3-venv
858 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
860 # Create a venv to avoid conflicts with the host installation
861 python3
-m venv
$OPENSTACK_PYTHON_VENV
863 source $OPENSTACK_PYTHON_VENV/bin
/activate
865 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
866 python
-m pip
install -U wheel
867 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
869 # Install the Openstack cloud module (ansible>=2.10)
870 ansible-galaxy collection
install openstack.cloud
872 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
874 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
876 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
878 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
879 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
882 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
883 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
886 # Execute the Ansible playbook based on openrc or clouds.yaml
889 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
890 $OSM_DEVOPS/installers
/openstack
/site.yml
892 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
893 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
899 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
903 function arrange_docker_default_network_policy
() {
904 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
905 sudo iptables
-I DOCKER-USER
-j ACCEPT
906 sudo iptables-save | sudo
tee /etc
/iptables
/rules.v4
907 sudo ip6tables-save | sudo
tee /etc
/iptables
/rules.v6
910 function install_k8s_monitoring
() {
911 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
912 # install OSM monitoring
913 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
914 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
915 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
918 function dump_vars
(){
919 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
920 echo "APT_PROXY_URL=$APT_PROXY_URL"
921 echo "DEVELOP=$DEVELOP"
922 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
923 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
924 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
925 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
926 echo "DOCKER_USER=$DOCKER_USER"
927 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
928 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
929 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
930 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
931 echo "INSTALL_LXD=$INSTALL_LXD"
932 echo "INSTALL_NGSA=$INSTALL_NGSA"
933 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
934 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
935 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
936 echo "INSTALL_ONLY=$INSTALL_ONLY"
937 echo "INSTALL_PLA=$INSTALL_PLA"
938 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
939 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
940 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
941 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
942 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
943 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
944 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
945 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
946 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
947 echo "OSM_DEVOPS=$OSM_DEVOPS"
948 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
949 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
950 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
951 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
952 echo "OSM_NAMESPACE=$OSM_NAMESPACE"
953 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
954 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
955 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
956 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
957 echo "PULL_IMAGES=$PULL_IMAGES"
958 echo "RECONFIGURE=$RECONFIGURE"
959 echo "RELEASE=$RELEASE"
960 echo "REPOSITORY=$REPOSITORY"
961 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
962 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
963 echo "SHOWOPTS=$SHOWOPTS"
964 echo "TEST_INSTALLER=$TEST_INSTALLER"
965 echo "TO_REBUILD=$TO_REBUILD"
966 echo "UNINSTALL=$UNINSTALL"
967 echo "UPDATE=$UPDATE"
968 echo "Install from specific refspec (-b): $COMMIT_ID"
969 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
972 function parse_docker_registry_url
() {
973 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
974 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
975 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
976 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
977 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
981 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
982 echo "** Trapped CTRL-C"
983 FATAL
"User stopped the installation"
984 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
997 INSTALL_FROM_SOURCE
=""
1001 INSTALL_K8S_MONITOR
=""
1005 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
1006 LXD_REPOSITORY_PATH
=""
1007 INSTALL_LIGHTWEIGHT
="y"
1008 INSTALL_TO_OPENSTACK
=""
1009 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
1010 OPENSTACK_PUBLIC_NET_NAME
=""
1011 OPENSTACK_ATTACH_VOLUME
="false"
1012 OPENSTACK_SSH_KEY_FILE
=""
1013 OPENSTACK_USERDATA_FILE
=""
1014 OPENSTACK_VM_NAME
="server-osm"
1015 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
1021 INSTALL_NOHOSTCLIENT
=""
1022 INSTALL_CACHELXDIMAGES
=""
1027 OSM_VCA_CLOUDNAME
="localhost"
1028 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
1032 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1033 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1034 OSM_WORK_DIR
="/etc/osm"
1035 OSM_DOCKER_WORK_DIR
="${OSM_WORK_DIR}/docker"
1036 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1037 OSM_HELM_WORK_DIR
="${OSM_WORK_DIR}/helm"
1038 OSM_HOST_VOL
="/var/lib/osm"
1039 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
1040 OSM_DOCKER_TAG
=latest
1041 DOCKER_USER
=opensourcemano
1043 KAFKA_TAG
=2.11-1.0
.2
1044 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
1045 PROMETHEUS_TAG
=v2.28
.1
1047 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1048 PROMETHEUS_CADVISOR_TAG
=latest
1050 OSM_DATABASE_COMMONKEY
=
1051 ELASTIC_VERSION
=6.4.2
1052 ELASTIC_CURATOR_VERSION
=5.5.4
1053 POD_NETWORK_CIDR
=10.244.0.0/16
1054 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1055 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1056 DOCKER_REGISTRY_URL
=
1059 OSM_INSTALLATION_TYPE
="Default"
1061 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
1064 APT_PROXY_URL
=${OPTARG}
1071 REPOSITORY
="${OPTARG}"
1072 REPO_ARGS
+=(-r "$REPOSITORY")
1075 REPOSITORY_KEY
="${OPTARG}"
1076 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1079 REPOSITORY_BASE
="${OPTARG}"
1080 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1084 REPO_ARGS
+=(-R "$RELEASE")
1087 OSM_DEVOPS
="${OPTARG}"
1091 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1092 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1095 INSTALL_TO_OPENSTACK
="y"
1096 if [ -n "${OPTARG}" ]; then
1097 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1099 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1104 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
1107 OPENSTACK_USERDATA_FILE
="${OPTARG}"
1110 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1113 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
1114 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1115 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1116 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1117 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1118 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1119 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1120 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
1121 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1122 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1123 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1124 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1125 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1126 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1127 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1130 OSM_VCA_HOST
="${OPTARG}"
1133 OSM_VCA_SECRET
="${OPTARG}"
1136 OSM_NAMESPACE
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1139 OSM_DOCKER_TAG
="${OPTARG}"
1140 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1143 DOCKER_USER
="${OPTARG}"
1146 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1149 OSM_VCA_APIPROXY
="${OPTARG}"
1152 LXD_CLOUD_FILE
="${OPTARG}"
1155 LXD_CRED_FILE
="${OPTARG}"
1158 CONTROLLER_NAME
="${OPTARG}"
1161 DOCKER_REGISTRY_URL
="${OPTARG}"
1164 DOCKER_PROXY_URL
="${OPTARG}"
1167 MODULE_DOCKER_TAG
="${OPTARG}"
1170 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1171 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1172 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1173 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1174 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1175 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1176 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1177 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1178 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1179 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1180 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1181 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1182 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1183 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1184 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1185 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1186 [ "${OPTARG}" == "pullimages" ] && continue
1187 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1188 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1189 [ "${OPTARG}" == "bundle" ] && continue
1190 [ "${OPTARG}" == "k8s" ] && continue
1191 [ "${OPTARG}" == "lxd" ] && continue
1192 [ "${OPTARG}" == "lxd-cred" ] && continue
1193 [ "${OPTARG}" == "microstack" ] && continue
1194 [ "${OPTARG}" == "overlay" ] && continue
1195 [ "${OPTARG}" == "only-vca" ] && continue
1196 [ "${OPTARG}" == "small-profile" ] && continue
1197 [ "${OPTARG}" == "vca" ] && continue
1198 [ "${OPTARG}" == "ha" ] && continue
1199 [ "${OPTARG}" == "tag" ] && continue
1200 [ "${OPTARG}" == "registry" ] && continue
1201 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1202 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1203 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1204 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1205 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1206 echo -e "Invalid option: '--$OPTARG'\n" >&2
1210 echo "Option -$OPTARG requires an argument" >&2
1214 echo -e "Invalid option: '-$OPTARG'\n" >&2
1229 source $OSM_DEVOPS/common
/all_funcs
1231 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1232 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1234 # Uninstall if "--uninstall"
1235 if [ -n "$UNINSTALL" ]; then
1236 if [ -n "$CHARMED" ]; then
1237 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1238 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1240 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1241 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1247 # Charmed installation
1248 if [ -n "$CHARMED" ]; then
1249 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
1250 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1251 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1252 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1253 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1254 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
1255 track end installation_type
$OSM_INSTALLATION_TYPE
1260 # Installation to Openstack
1261 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1262 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1267 # Community_installer
1269 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1270 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1271 # if develop, we force master
1272 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1273 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_NAMESPACE}"
1274 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1275 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1276 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1278 #Installation starts here
1279 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README.txt
&> /dev
/null
1280 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"