3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
17 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
18 echo -e "usage: $0 [OPTIONS]"
19 echo -e "Install OSM from binaries or source code (by default, from binaries)"
21 echo -e " -h / --help: print this help"
22 echo -e " -y: do not prompt for confirmation, assumes yes"
23 echo -e " -r <repo>: use specified repository name for osm packages"
24 echo -e " -R <release>: use specified release for osm binaries (deb packages, lxd images, ...)"
25 echo -e " -u <repo base>: use specified repository url for osm packages"
26 echo -e " -k <repo key>: use specified repository public key url"
27 echo -e " -b <refspec>: install OSM from source code using a specific branch (master, v2.0, ...) or tag"
28 echo -e " -b master (main dev branch)"
29 echo -e " -b v2.0 (v2.0 branch)"
30 echo -e " -b tags/v1.1.0 (a specific tag)"
32 echo -e " -a <apt proxy url>: use this apt proxy url when downloading apt packages (air-gapped installation)"
33 echo -e " -s <stack name> or <namespace> user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
34 echo -e " -H <VCA host> use specific juju host controller IP"
35 echo -e " -S <VCA secret> use VCA/juju secret key"
36 echo -e " -P <VCA pubkey> use VCA/juju public key file"
37 echo -e " -A <VCA apiproxy> use VCA/juju API proxy"
38 echo -e " --pla: install the PLA module for placement support"
39 echo -e " --ng-sa: install Airflow and Pushgateway to get VNF and NS status (experimental)"
40 echo -e " -m <MODULE>: install OSM but only rebuild or pull the specified docker images (NG-UI, NBI, LCM, RO, MON, POL, PLA, KAFKA, MONGO, PROMETHEUS, PROMETHEUS-CADVISOR, KEYSTONE-DB, NONE)"
41 echo -e " -o <ADDON>: ONLY (un)installs one of the addons (k8s_monitor, ng-sa)"
42 echo -e " -O <openrc file path/cloud name>: Install OSM to an OpenStack infrastructure. <openrc file/cloud name> is required. If a <cloud name> is used, the clouds.yaml file should be under ~/.config/openstack/ or /etc/openstack/"
43 echo -e " -N <openstack public network name/ID>: Public network name required to setup OSM to OpenStack"
44 echo -e " -f <path to SSH public key>: Public SSH key to use to deploy OSM to OpenStack"
45 echo -e " -F <path to cloud-init file>: Cloud-Init userdata file to deploy OSM to OpenStack"
46 echo -e " -D <devops path> use local devops installation path"
47 echo -e " -w <work dir> Location to store runtime installation"
48 echo -e " -t <docker tag> specify osm docker tag (default is latest)"
49 echo -e " -l: LXD cloud yaml file"
50 echo -e " -L: LXD credentials yaml file"
51 echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
52 echo -e " -d <docker registry URL> use docker registry URL instead of dockerhub"
53 echo -e " -p <docker proxy URL> set docker proxy URL as part of docker CE configuration"
54 echo -e " -T <docker tag> specify docker tag for the modules specified with option -m"
55 echo -e " --debug: debug mode"
56 echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
57 echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
58 echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
59 echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
60 echo -e " --nojuju: do not juju, assumes already installed"
61 echo -e " --nodockerbuild:do not build docker images (use existing locally cached images)"
62 echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
63 echo -e " --nohostclient: do not install the osmclient"
64 echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
65 echo -e " --source: install OSM from source code using the latest stable tag"
66 echo -e " --develop: (deprecated, use '-b master') install OSM from source code using the master branch"
67 echo -e " --pullimages: pull/run osm images from docker.io/opensourcemano"
68 echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
69 echo -e " --volume: create a VM volume when installing to OpenStack"
70 echo -e " --showopts: print chosen options and exit (only for debugging)"
71 echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
72 echo -e " [--bundle <bundle path>]: Specify with which bundle to deploy OSM with charms (--charmed option)"
73 echo -e " [--k8s <kubeconfig path>]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
74 echo -e " [--vca <name>]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
75 echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
76 echo -e " [--lxd <yaml path>]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
77 echo -e " [--lxd-cred <yaml path>]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
78 echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
79 echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
80 echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
81 echo -e " [--tag]: Docker image tag. (--charmed option)"
82 echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
83 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
86 # takes a juju/accounts.yaml file and returns the password specific
87 # for a controller. I wrote this using only bash tools to minimize
88 # additions of other packages
89 function parse_juju_password
{
90 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
91 password_file
="${HOME}/.local/share/juju/accounts.yaml"
92 local controller_name
=$1
93 local s
='[[:space:]]*' w
='[a-zA-Z0-9_-]*' fs
=$
(echo @|
tr @
'\034')
94 sed -ne "s|^\($s\):|\1|" \
95 -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
96 -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
97 awk -F$fs -v controller
=$controller_name '{
98 indent = length($1)/2;
100 for (i in vname) {if (i > indent) {delete vname[i]}}
101 if (length($3) > 0) {
102 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
103 if (match(vn,controller) && match($2,"password")) {
108 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
111 function set_vca_variables
() {
112 OSM_VCA_CLOUDNAME
="lxd-cloud"
113 [ -n "$OSM_VCA_HOST" ] && OSM_VCA_CLOUDNAME
="localhost"
114 if [ -z "$OSM_VCA_HOST" ]; then
115 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`sg lxd -c "juju show-controller $OSM_STACK_NAME"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
116 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST
=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
117 [ -z "$OSM_VCA_HOST" ] && FATAL
"Cannot obtain juju controller IP address"
119 if [ -z "$OSM_VCA_SECRET" ]; then
120 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$OSM_STACK_NAME)
121 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_SECRET
=$
(parse_juju_password
$CONTROLLER_NAME)
122 [ -z "$OSM_VCA_SECRET" ] && FATAL
"Cannot obtain juju secret"
124 if [ -z "$OSM_VCA_PUBKEY" ]; then
125 OSM_VCA_PUBKEY
=$
(cat $HOME/.local
/share
/juju
/ssh
/juju_id_rsa.pub
)
126 [ -z "$OSM_VCA_PUBKEY" ] && FATAL
"Cannot obtain juju public key"
128 if [ -z "$OSM_VCA_CACERT" ]; then
129 [ -z "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$OSM_STACK_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
130 [ -n "$CONTROLLER_NAME" ] && OSM_VCA_CACERT
=$
(juju controllers
--format json | jq
-r --arg controller
$CONTROLLER_NAME '.controllers[$controller]["ca-cert"]' | base64 |
tr -d \\n
)
131 [ -z "$OSM_VCA_CACERT" ] && FATAL
"Cannot obtain juju CA certificate"
135 function generate_secret
() {
136 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
137 head /dev
/urandom |
tr -dc A-Za-z0-9 |
head -c 32
138 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
141 function check_packages
() {
143 echo -e "Checking required packages: ${NEEDED_PACKAGES}"
144 for PACKAGE
in ${NEEDED_PACKAGES} ; do
146 if [ $?
-ne 0 ]; then
147 echo -e "Package ${PACKAGE} is not installed."
148 echo -e "Updating apt-cache ..."
150 echo -e "Installing ${PACKAGE} ..."
151 sudo apt-get
install -y ${PACKAGE} || FATAL
"failed to install ${PACKAGE}"
154 echo -e "Required packages are present: ${NEEDED_PACKAGES}"
158 # ask to the user and parse a response among 'y', 'yes', 'n' or 'no'. Case insensitive
159 # Params: $1 text to ask; $2 Action by default, can be 'y' for yes, 'n' for no, other or empty for not allowed
160 # Return: true(0) if user type 'yes'; false (1) if user type 'no'
161 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
162 read -e -p "$1" USER_CONFIRMATION
164 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'y' ] && return 0
165 [ -z "$USER_CONFIRMATION" ] && [ "$2" == 'n' ] && return 1
166 [ "${USER_CONFIRMATION,,}" == "yes" ] ||
[ "${USER_CONFIRMATION,,}" == "y" ] && return 0
167 [ "${USER_CONFIRMATION,,}" == "no" ] ||
[ "${USER_CONFIRMATION,,}" == "n" ] && return 1
168 read -e -p "Please type 'yes' or 'no': " USER_CONFIRMATION
170 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
173 function install_osmclient
(){
174 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
175 CLIENT_RELEASE
=${RELEASE#"-R "}
176 CLIENT_REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
177 CLIENT_REPOSITORY
=${REPOSITORY#"-r "}
178 CLIENT_REPOSITORY_BASE
=${REPOSITORY_BASE#"-u "}
179 key_location
=$CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE/$CLIENT_REPOSITORY_KEY
180 curl
$key_location | sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE
=1 apt-key add
-
181 sudo add-apt-repository
-y "deb [arch=amd64] $CLIENT_REPOSITORY_BASE/$CLIENT_RELEASE $CLIENT_REPOSITORY osmclient IM"
183 sudo apt-get
install -y python3-pip
184 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
185 sudo
-H LC_ALL
=C python3
-m pip
install -U python-magic pyangbind verboselogs
186 sudo apt-get
install -y python3-osm-im python3-osmclient
187 if [ -f /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
]; then
188 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osm_im
/requirements.txt
190 if [ -f /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
]; then
191 sudo apt-get
install -y libcurl4-openssl-dev libssl-dev libmagic1
192 python3
-m pip
install -r /usr
/lib
/python
3/dist-packages
/osmclient
/requirements.txt
194 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_HOSTNAME
=`lxc list | awk '($2=="SO-ub"){print $6}'`
195 [ -z "$INSTALL_LIGHTWEIGHT" ] && export OSM_RO_HOSTNAME
=`lxc list | awk '($2=="RO"){print $6}'`
196 echo -e "\nOSM client installed"
197 if [ -z "$INSTALL_LIGHTWEIGHT" ]; then
198 echo -e "You might be interested in adding the following OSM client env variables to your .bashrc file:"
199 echo " export OSM_HOSTNAME=${OSM_HOSTNAME}"
200 echo " export OSM_RO_HOSTNAME=${OSM_RO_HOSTNAME}"
202 echo -e "OSM client assumes that OSM host is running in localhost (127.0.0.1)."
203 echo -e "In case you want to interact with a different OSM host, you will have to configure this env variable in your .bashrc file:"
204 echo " export OSM_HOSTNAME=<OSM_host>"
206 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
210 function docker_login
() {
211 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
213 [ -z "${DEBUG_INSTALL}" ] || DEBUG
"Docker registry user: ${DOCKER_REGISTRY_USER}"
214 sg docker
-c "docker login -u ${DOCKER_REGISTRY_USER} -p ${DOCKER_REGISTRY_PASSWORD} --password-stdin"
215 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
218 function generate_docker_images
() {
219 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
220 echo "Pulling and generating docker images"
221 [ -n "${DOCKER_REGISTRY_URL}" ] && docker_login
223 echo "Pulling docker images"
225 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q KAFKA
; then
226 sg docker
-c "docker pull wurstmeister/zookeeper" || FATAL
"cannot get zookeeper docker image"
227 sg docker
-c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL
"cannot get kafka docker image"
230 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS
; then
231 sg docker
-c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL
"cannot get prometheus docker image"
234 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q PROMETHEUS-CADVISOR
; then
235 sg docker
-c "docker pull google/cadvisor:${PROMETHEUS_CADVISOR_TAG}" || FATAL
"cannot get prometheus cadvisor docker image"
238 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q GRAFANA
; then
239 sg docker
-c "docker pull grafana/grafana:${GRAFANA_TAG}" || FATAL
"cannot get grafana docker image"
240 sg docker
-c "docker pull kiwigrid/k8s-sidecar:${KIWIGRID_K8S_SIDECAR_TAG}" || FATAL
"cannot get kiwigrid k8s-sidecar docker image"
243 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q NBI ||
echo $TO_REBUILD |
grep -q KEYSTONE-DB
; then
244 sg docker
-c "docker pull mariadb:${KEYSTONEDB_TAG}" || FATAL
"cannot get keystone-db docker image"
247 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q RO
; then
248 sg docker
-c "docker pull mysql:5" || FATAL
"cannot get mysql docker image"
251 if [ -n "$PULL_IMAGES" ]; then
252 echo "Pulling OSM docker images"
253 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA osmclient
; do
254 module_lower
=${module,,}
255 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
258 module_tag
="${OSM_DOCKER_TAG}"
259 if [ -n "${MODULE_DOCKER_TAG}" ] && echo $TO_REBUILD |
grep -q $module ; then
260 module_tag
="${MODULE_DOCKER_TAG}"
262 echo "Pulling ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag} docker image"
263 sg docker
-c "docker pull ${DOCKER_REGISTRY_URL}${DOCKER_USER}/${module_lower}:${module_tag}" || FATAL
"cannot pull $module docker image"
266 _build_from
=$COMMIT_ID
267 [ -z "$_build_from" ] && _build_from
="latest"
268 echo "OSM Docker images generated from $_build_from"
269 LWTEMPDIR
="$(mktemp -d -q --tmpdir "installosmlight.XXXXXX
")"
270 trap 'rm -rf "${LWTEMPDIR}"' EXIT
271 for module
in MON POL NBI KEYSTONE RO LCM NG-UI PLA
; do
272 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q ${module} ; then
273 module_lower
=${module,,}
274 if [ $module == "PLA" -a ! -n "$INSTALL_PLA" ]; then
277 git
-C ${LWTEMPDIR} clone https
://osm.etsi.org
/gerrit
/osm
/$module
278 git
-C ${LWTEMPDIR}/${module} checkout ${COMMIT_ID}
279 sg docker
-c "docker build ${LWTEMPDIR}/${module} -f ${LWTEMPDIR}/${module}/docker/Dockerfile -t ${DOCKER_USER}/${module_lower} --no-cache" || FATAL "cannot build ${module} docker image"
282 if [ -z "$TO_REBUILD" ] ||
echo $TO_REBUILD |
grep -q osmclient
; then
283 BUILD_ARGS
+=(--build-arg REPOSITORY
="$REPOSITORY")
284 BUILD_ARGS
+=(--build-arg RELEASE
="$RELEASE")
285 BUILD_ARGS
+=(--build-arg REPOSITORY_KEY
="$REPOSITORY_KEY")
286 BUILD_ARGS
+=(--build-arg REPOSITORY_BASE
="$REPOSITORY_BASE")
287 sg docker
-c "docker build -t ${DOCKER_USER}/osmclient ${BUILD_ARGS[@]} -f $OSM_DEVOPS/docker/osmclient ."
289 echo "Finished generation of docker images"
292 echo "Finished pulling and generating docker images"
293 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
296 function cmp_overwrite
() {
297 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
300 if ! $
(cmp "${file1}" "${file2}" >/dev
/null
2>&1); then
301 if [ -f "${file2}" ]; then
302 ask_user
"The file ${file2} already exists. Overwrite (y/N)? " n && cp -b ${file1} ${file2}
304 cp -b ${file1} ${file2}
307 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
310 function generate_k8s_manifest_files() {
311 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
312 #Kubernetes resources
313 sudo cp -bR ${OSM_DEVOPS}/installers/docker/osm_pods $OSM_DOCKER_WORK_DIR
314 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
317 function generate_docker_env_files() {
318 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
319 echo "Doing a backup of existing env files
"
320 sudo cp $OSM_DOCKER_WORK_DIR/keystone-db.env{,~}
321 sudo cp $OSM_DOCKER_WORK_DIR/keystone.env{,~}
322 sudo cp $OSM_DOCKER_WORK_DIR/lcm.env{,~}
323 sudo cp $OSM_DOCKER_WORK_DIR/mon.env{,~}
324 sudo cp $OSM_DOCKER_WORK_DIR/nbi.env{,~}
325 sudo cp $OSM_DOCKER_WORK_DIR/pol.env{,~}
326 sudo cp $OSM_DOCKER_WORK_DIR/ro-db.env{,~}
327 sudo cp $OSM_DOCKER_WORK_DIR/ro.env{,~}
328 if [ -n "${INSTALL_NGSA}" ]; then
329 sudo cp $OSM_DOCKER_WORK_DIR/ngsa.env{,~}
332 echo "Generating docker env files
"
334 if [ ! -f $OSM_DOCKER_WORK_DIR/lcm.env ]; then
335 echo "OSMLCM_DATABASE_COMMONKEY
=${OSM_DATABASE_COMMONKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
338 if ! grep -Fq "OSMLCM_VCA_HOST
" $OSM_DOCKER_WORK_DIR/lcm.env; then
339 echo "OSMLCM_VCA_HOST
=${OSM_VCA_HOST}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
341 sudo sed -i "s|OSMLCM_VCA_HOST.
*|OSMLCM_VCA_HOST
=$OSM_VCA_HOST|g
" $OSM_DOCKER_WORK_DIR/lcm.env
344 if ! grep -Fq "OSMLCM_VCA_SECRET
" $OSM_DOCKER_WORK_DIR/lcm.env; then
345 echo "OSMLCM_VCA_SECRET
=${OSM_VCA_SECRET}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
347 sudo sed -i "s|OSMLCM_VCA_SECRET.
*|OSMLCM_VCA_SECRET
=$OSM_VCA_SECRET|g
" $OSM_DOCKER_WORK_DIR/lcm.env
350 if ! grep -Fq "OSMLCM_VCA_PUBKEY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
351 echo "OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
353 sudo sed -i "s|OSMLCM_VCA_PUBKEY.
*|OSMLCM_VCA_PUBKEY
=${OSM_VCA_PUBKEY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
356 if ! grep -Fq "OSMLCM_VCA_CACERT
" $OSM_DOCKER_WORK_DIR/lcm.env; then
357 echo "OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
359 sudo sed -i "s|OSMLCM_VCA_CACERT.
*|OSMLCM_VCA_CACERT
=${OSM_VCA_CACERT}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
362 if [ -n "$OSM_VCA_APIPROXY" ]; then
363 if ! grep -Fq "OSMLCM_VCA_APIPROXY
" $OSM_DOCKER_WORK_DIR/lcm.env; then
364 echo "OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
366 sudo sed -i "s|OSMLCM_VCA_APIPROXY.
*|OSMLCM_VCA_APIPROXY
=${OSM_VCA_APIPROXY}|g
" $OSM_DOCKER_WORK_DIR/lcm.env
370 if ! grep -Fq "OSMLCM_VCA_ENABLEOSUPGRADE
" $OSM_DOCKER_WORK_DIR/lcm.env; then
371 echo "# OSMLCM_VCA_ENABLEOSUPGRADE=false" | sudo tee -a $OSM_DOCKER_WORK_DIR/lcm.env
374 if ! grep -Fq "OSMLCM_VCA_APTMIRROR" $OSM_DOCKER_WORK_DIR/lcm.env
; then
375 echo "# OSMLCM_VCA_APTMIRROR=http://archive.ubuntu.com/ubuntu/" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
378 if ! grep -Fq "OSMLCM_VCA_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
379 echo "OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
381 sudo
sed -i "s|OSMLCM_VCA_CLOUD.*|OSMLCM_VCA_CLOUD=${OSM_VCA_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
384 if ! grep -Fq "OSMLCM_VCA_K8S_CLOUD" $OSM_DOCKER_WORK_DIR/lcm.env
; then
385 echo "OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
387 sudo
sed -i "s|OSMLCM_VCA_K8S_CLOUD.*|OSMLCM_VCA_K8S_CLOUD=${OSM_VCA_K8S_CLOUDNAME}|g" $OSM_DOCKER_WORK_DIR/lcm.env
389 if [ -n "${OSM_BEHIND_PROXY}" ]; then
390 if ! grep -Fq "HTTP_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
391 echo "HTTP_PROXY=${HTTP_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
393 sudo
sed -i "s|HTTP_PROXY.*|HTTP_PROXY=${HTTP_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
395 if ! grep -Fq "HTTPS_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
396 echo "HTTPS_PROXY=${HTTPS_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
398 sudo
sed -i "s|HTTPS_PROXY.*|HTTPS_PROXY=${HTTPS_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
400 if ! grep -Fq "NO_PROXY" $OSM_DOCKER_WORK_DIR/lcm.env
; then
401 echo "NO_PROXY=${NO_PROXY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/lcm.env
403 sudo
sed -i "s|NO_PROXY.*|NO_PROXY=${NO_PROXY}|g" $OSM_DOCKER_WORK_DIR/lcm.env
408 MYSQL_ROOT_PASSWORD
=$
(generate_secret
)
409 if [ ! -f $OSM_DOCKER_WORK_DIR/ro-db.env
]; then
410 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro-db.env
412 if [ ! -f $OSM_DOCKER_WORK_DIR/ro.env
]; then
413 echo "RO_DB_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/ro.env
415 if ! grep -Fq "OSMRO_DATABASE_COMMONKEY" $OSM_DOCKER_WORK_DIR/ro.env
; then
416 echo "OSMRO_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ro.env
420 KEYSTONE_DB_PASSWORD
=$
(generate_secret
)
421 SERVICE_PASSWORD
=$
(generate_secret
)
422 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone-db.env
]; then
423 echo "MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone-db.env
425 if [ ! -f $OSM_DOCKER_WORK_DIR/keystone.env
]; then
426 echo "ROOT_DB_PASSWORD=${MYSQL_ROOT_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/keystone.env
427 echo "KEYSTONE_DB_PASSWORD=${KEYSTONE_DB_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
428 echo "SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee -a $OSM_DOCKER_WORK_DIR/keystone.env
432 if [ ! -f $OSM_DOCKER_WORK_DIR/nbi.env
]; then
433 echo "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD=${SERVICE_PASSWORD}" |sudo
tee $OSM_DOCKER_WORK_DIR/nbi.env
434 echo "OSMNBI_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/nbi.env
438 if [ ! -f $OSM_DOCKER_WORK_DIR/mon.env
]; then
439 echo "OSMMON_KEYSTONE_SERVICE_PASSWORD=${SERVICE_PASSWORD}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
440 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
441 echo "OSMMON_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/mon" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
444 if ! grep -Fq "OS_NOTIFIER_URI" $OSM_DOCKER_WORK_DIR/mon.env
; then
445 echo "OS_NOTIFIER_URI=http://${OSM_DEFAULT_IP}:8662" |sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
447 sudo
sed -i "s|OS_NOTIFIER_URI.*|OS_NOTIFIER_URI=http://$OSM_DEFAULT_IP:8662|g" $OSM_DOCKER_WORK_DIR/mon.env
450 if ! grep -Fq "OSMMON_VCA_HOST" $OSM_DOCKER_WORK_DIR/mon.env
; then
451 echo "OSMMON_VCA_HOST=${OSM_VCA_HOST}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
453 sudo
sed -i "s|OSMMON_VCA_HOST.*|OSMMON_VCA_HOST=$OSM_VCA_HOST|g" $OSM_DOCKER_WORK_DIR/mon.env
456 if ! grep -Fq "OSMMON_VCA_SECRET" $OSM_DOCKER_WORK_DIR/mon.env
; then
457 echo "OSMMON_VCA_SECRET=${OSM_VCA_SECRET}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
459 sudo
sed -i "s|OSMMON_VCA_SECRET.*|OSMMON_VCA_SECRET=$OSM_VCA_SECRET|g" $OSM_DOCKER_WORK_DIR/mon.env
462 if ! grep -Fq "OSMMON_VCA_CACERT" $OSM_DOCKER_WORK_DIR/mon.env
; then
463 echo "OSMMON_VCA_CACERT=${OSM_VCA_CACERT}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/mon.env
465 sudo
sed -i "s|OSMMON_VCA_CACERT.*|OSMMON_VCA_CACERT=${OSM_VCA_CACERT}|g" $OSM_DOCKER_WORK_DIR/mon.env
469 if [ ! -f $OSM_DOCKER_WORK_DIR/pol.env
]; then
470 echo "OSMPOL_SQL_DATABASE_URI=mysql://root:${MYSQL_ROOT_PASSWORD}@mysql:3306/pol" | sudo
tee -a $OSM_DOCKER_WORK_DIR/pol.env
474 if [ -n "${INSTALL_NGSA}" ] && [ ! -f $OSM_DOCKER_WORK_DIR/ngsa.env
]; then
475 echo "OSMMON_DATABASE_COMMONKEY=${OSM_DATABASE_COMMONKEY}" | sudo
tee -a $OSM_DOCKER_WORK_DIR/ngsa.env
478 echo "Finished generation of docker env files"
479 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
482 #creates secrets from env files which will be used by containers
483 function kube_secrets
(){
484 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
485 kubectl create ns
$OSM_STACK_NAME
486 kubectl create secret generic lcm-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/lcm.env
487 kubectl create secret generic mon-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/mon.env
488 kubectl create secret generic nbi-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/nbi.env
489 kubectl create secret generic ro-db-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro-db.env
490 kubectl create secret generic ro-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ro.env
491 kubectl create secret generic keystone-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/keystone.env
492 kubectl create secret generic pol-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/pol.env
493 if [ -n "${INSTALL_NGSA}" ]; then
494 kubectl create secret generic ngsa-secret
-n $OSM_STACK_NAME --from-env-file=$OSM_DOCKER_WORK_DIR/ngsa.env
496 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
499 #deploys osm pods and services
500 function deploy_osm_services
() {
501 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
502 kubectl apply
-n $OSM_STACK_NAME -f $OSM_K8S_WORK_DIR
503 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
506 #deploy charmed services
507 function deploy_charmed_services
() {
508 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
509 juju add-model
$OSM_STACK_NAME $OSM_VCA_K8S_CLOUDNAME
510 juju deploy ch
:mongodb-k8s
-m $OSM_STACK_NAME
511 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
514 function deploy_osm_pla_service
() {
515 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
516 # corresponding to deploy_osm_services
517 kubectl apply
-n $OSM_STACK_NAME -f $OSM_DOCKER_WORK_DIR/osm_pla
518 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
521 function install_osm_ngsa_service
() {
522 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
523 $OSM_DEVOPS/installers
/install_ngsa.sh
-d ${OSM_HELM_WORK_DIR} -D ${OSM_DEVOPS} -t ${OSM_DOCKER_TAG} ${DEBUG_INSTALL} || \
524 FATAL_TRACK install_osm_ngsa_service
"install_ngsa.sh failed"
525 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
528 function parse_yaml
() {
529 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
533 for module
in $services; do
534 if [ "$module" == "pla" ]; then
535 if [ -n "$INSTALL_PLA" ]; then
536 echo "Updating K8s manifest file from opensourcemano\/pla:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/pla:${TAG}"
537 sudo
sed -i "s#opensourcemano/pla:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/pla:${TAG}#g" ${OSM_DOCKER_WORK_DIR}/osm_pla/pla.yaml
541 if [ "$module" == "ng-prometheus
" ]; then
544 echo "Updating K8s manifest
file from opensourcemano\
/${image}:.* to ${DOCKER_REGISTRY_URL}${DOCKER_USER}\/${image}:${TAG}"
545 sudo sed -i "s
#opensourcemano/${image}:.*#${DOCKER_REGISTRY_URL}${DOCKER_USER}/${image}:${TAG}#g" ${OSM_K8S_WORK_DIR}/${module}.yaml
548 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
551 function update_manifest_files
() {
552 osm_services
="nbi lcm ro pol mon ng-ui keystone pla prometheus ng-prometheus"
554 for module
in $osm_services; do
555 module_upper
="${module^^}"
556 if ! echo $TO_REBUILD |
grep -q $module_upper ; then
557 list_of_services
="$list_of_services $module"
560 if [ ! "$OSM_DOCKER_TAG" == "13" ]; then
561 parse_yaml
$OSM_DOCKER_TAG $list_of_services
563 if [ -n "$MODULE_DOCKER_TAG" ]; then
564 parse_yaml
$MODULE_DOCKER_TAG $list_of_services_to_rebuild
566 # The manifest for prometheus is prometheus.yaml or ng-prometheus.yaml, depending on the installation option
567 if [ -n "$INSTALL_NGSA" ]; then
568 sudo
rm -f ${OSM_K8S_WORK_DIR}/prometheus.yaml
570 sudo
rm -f ${OSM_K8S_WORK_DIR}/ng-prometheus.yaml
572 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
575 function namespace_vol
() {
576 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
577 # List of services with a volume mounted in path /var/lib/osm
579 for osm
in $osm_services; do
580 if [ -f "$OSM_K8S_WORK_DIR/$osm.yaml" ] ; then
581 sudo
sed -i "s#path: /var/lib/osm#path: $OSM_NAMESPACE_VOL#g" $OSM_K8S_WORK_DIR/$osm.yaml
584 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
587 function add_local_k8scluster
() {
588 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
589 /usr
/bin
/osm
--all-projects vim-create \
590 --name _system-osm-vim \
591 --account_type dummy \
592 --auth_url http
://dummy \
593 --user osm
--password osm
--tenant osm \
594 --description "dummy" \
595 --config '{management_network_name: mgmt}'
596 /usr
/bin
/osm
--all-projects k8scluster-add \
597 --creds ${HOME}/.kube
/config \
598 --vim _system-osm-vim \
599 --k8s-nets '{"net1": null}' \
601 --description "OSM Internal Cluster" \
603 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
606 function configure_apt_proxy
() {
607 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
609 OSM_APT_PROXY_FILE
="/etc/apt/apt.conf.d/osm-apt"
610 echo "Configuring apt proxy in file ${OSM_APT_PROXY_FILE}"
611 if [ ! -f ${OSM_APT_PROXY_FILE} ]; then
612 sudo bash
-c "cat <<EOF > ${OSM_APT_PROXY}
613 Acquire::http { Proxy \"${OSM_APT_PROXY}\"; }
616 sudo
sed -i "s|Proxy.*|Proxy \"${OSM_APT_PROXY}\"; }|" ${OSM_APT_PROXY_FILE}
618 sudo apt-get update || FATAL
"Configured apt proxy, but couldn't run 'apt-get update'. Check ${OSM_APT_PROXY_FILE}"
619 track prereq apt_proxy_configured_ok
620 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
623 function ask_proceed
() {
624 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
626 [ -z "$ASSUME_YES" ] && ! ask_user
"The installation will do the following
627 1. Install and configure LXD
630 4. Disable swap space
631 5. Install and initialize Kubernetes
633 Do you want to proceed (Y/n)? " y
&& echo "Cancelled!" && exit 1
635 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
638 function check_osm_behind_proxy
() {
639 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
641 export OSM_BEHIND_PROXY
=""
642 export OSM_PROXY_ENV_VARIABLES
=""
643 [ -n "${http_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "http_proxy=${http_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} http_proxy"
644 [ -n "${https_proxy}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${https_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} https_proxy"
645 [ -n "${HTTP_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "HTTP_PROXY=${HTTP_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTP_PROXY"
646 [ -n "${HTTPS_PROXY}" ] && OSM_BEHIND_PROXY="y" && echo "https_proxy=${HTTPS_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} HTTPS_PROXY"
647 [ -n "${no_proxy}" ] && echo "no_proxy=${no_proxy}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} no_proxy"
648 [ -n "${NO_PROXY}" ] && echo "NO_PROXY=${NO_PROXY}" && OSM_PROXY_ENV_VARIABLES="${OSM_PROXY_ENV_VARIABLES} NO_PROXY"
650 echo "OSM_BEHIND_PROXY=${OSM_BEHIND_PROXY}"
651 echo "OSM_PROXY_ENV_VARIABLES=${OSM_PROXY_ENV_VARIABLES}"
653 if [ -n "${OSM_BEHIND_PROXY}" ]; then
654 [ -z "$ASSUME_YES" ] && ! ask_user
"
655 The following env variables have been found for the current user:
656 ${OSM_PROXY_ENV_VARIABLES}.
658 This suggests that this machine is behind a proxy and a special configuration is required.
659 The installer will install Docker CE, LXD and Juju to work behind a proxy using those
662 Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
663 Depending on the program, the env variables to work behind a proxy might be different
664 (e.g. http_proxy vs HTTP_PROXY).
666 For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
667 and HTTPS_PROXY are defined.
669 Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
670 those env variables are also set for root user. If you are not sure whether those variables
671 are configured for the root user, you can stop the installation now.
673 Do you want to proceed with the installation (Y/n)? " y
&& echo "Cancelled!" && exit 1
675 echo "This machine is not behind a proxy"
678 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
681 function find_devops_folder
() {
682 if [ -z "$OSM_DEVOPS" ]; then
683 if [ -n "$TEST_INSTALLER" ]; then
684 echo -e "\nUsing local devops repo for OSM installation"
685 OSM_DEVOPS
="$(dirname $(realpath $(dirname $0)))"
687 echo -e "\nCreating temporary dir for OSM installation"
688 OSM_DEVOPS
="$(mktemp -d -q --tmpdir "installosm.XXXXXX
")"
689 trap 'rm -rf "$OSM_DEVOPS"' EXIT
690 git clone https
://osm.etsi.org
/gerrit
/osm
/devops.git
$OSM_DEVOPS
695 function install_osm
() {
696 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
700 # TODO: move this under start
701 [ -n "$DOCKER_REGISTRY_URL" ] && parse_docker_registry_url
703 check_osm_behind_proxy
704 track checks proxy_ok
706 check_packages
"git wget curl tar snapd"
708 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
712 # TODO: the use of stacks come from docker-compose. We should probably remove
713 [ "${OSM_STACK_NAME}" == "osm" ] || OSM_DOCKER_WORK_DIR
="$OSM_WORK_DIR/stack/$OSM_STACK_NAME"
715 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
717 track checks checkingroot_ok
718 [ "$USER" == "root" ] && FATAL
"You are running the installer as root. The installer is prepared to be executed as a normal user with sudo privileges."
719 track checks noroot_ok
722 track checks proceed_ok
724 echo "Installing OSM"
726 echo "Determining IP address of the interface with the default route"
727 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(ip route list|
awk '$1=="default" {print $5; exit}')
728 [ -z "$OSM_DEFAULT_IF" ] && OSM_DEFAULT_IF
=$
(route
-n |
awk '$1~/^0.0.0.0/ {print $8; exit}')
729 [ -z "$OSM_DEFAULT_IF" ] && FATAL
"Not possible to determine the interface with the default route 0.0.0.0"
730 OSM_DEFAULT_IP
=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
731 [ -z "$OSM_DEFAULT_IP" ] && FATAL
"Not possible to determine the IP address of the interface with the default route"
733 # configure apt proxy
734 [ -n "$APT_PROXY_URL" ] && configure_apt_proxy
$APT_PROXY_URL
736 # if no host is passed in, we need to install lxd/juju, unless explicilty asked not to
737 if [ -z "$OSM_VCA_HOST" ] && [ -z "$INSTALL_NOLXD" ] && [ -z "$LXD_CLOUD_FILE" ]; then
738 LXD_INSTALL_OPTS
="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
739 [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS
="${LXD_INSTALL_OPTS} -P"
740 $OSM_DEVOPS/installers
/install_lxd.sh
${LXD_INSTALL_OPTS} || FATAL_TRACK lxd
"install_lxd.sh failed"
743 track prereq prereqok_ok
745 if [ ! -n "$INSTALL_NODOCKER" ]; then
746 DOCKER_CE_OPTS
="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
747 [ -n "${DOCKER_PROXY_URL}" ] && DOCKER_CE_OPTS="${DOCKER_CE_OPTS} -p ${DOCKER_PROXY_URL}"
748 [ -n "${OSM_BEHIND_PROXY}" ] && DOCKER_CE_OPTS
="${DOCKER_CE_OPTS} -P"
749 $OSM_DEVOPS/installers
/install_docker_ce.sh
${DOCKER_CE_OPTS} || FATAL_TRACK docker_ce
"install_docker_ce.sh failed"
752 track docker_ce docker_ce_ok
754 echo "Creating folders for installation"
755 [ ! -d "$OSM_DOCKER_WORK_DIR" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR
756 [ ! -d "$OSM_DOCKER_WORK_DIR/osm_pla" -a -n "$INSTALL_PLA" ] && sudo mkdir
-p $OSM_DOCKER_WORK_DIR/osm_pla
757 sudo
cp -b $OSM_DEVOPS/installers
/docker
/cluster-config.yaml
$OSM_DOCKER_WORK_DIR/cluster-config.yaml
759 $OSM_DEVOPS/installers
/install_kubeadm_cluster.sh
-i ${OSM_DEFAULT_IP} -d ${OSM_DOCKER_WORK_DIR} -D ${OSM_DEVOPS} ${DEBUG_INSTALL} || \
760 FATAL_TRACK k8scluster
"install_kubeadm_cluster.sh failed"
761 track k8scluster k8scluster_ok
763 JUJU_OPTS
="-D ${OSM_DEVOPS} -s ${OSM_STACK_NAME} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_NOJUJU} ${INSTALL_CACHELXDIMAGES}"
764 [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS
="$JUJU_OPTS -H ${OSM_VCA_HOST}"
765 [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
766 [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS
="$JUJU_OPTS -L ${LXD_CRED_FILE}"
767 [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS
="$JUJU_OPTS -K ${CONTROLLER_NAME}"
768 [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS
="${JUJU_OPTS} -P"
769 $OSM_DEVOPS/installers
/install_juju.sh
${JUJU_OPTS} || FATAL_TRACK juju
"install_juju.sh failed"
773 if [ -z "$OSM_DATABASE_COMMONKEY" ]; then
774 OSM_DATABASE_COMMONKEY
=$
(generate_secret
)
775 [ -z "OSM_DATABASE_COMMONKEY" ] && FATAL
"Cannot generate common db secret"
778 # Deploy OSM services
779 [ -z "$DOCKER_NOBUILD" ] && generate_docker_images
780 track docker_images docker_images_ok
782 generate_k8s_manifest_files
783 track osm_files manifest_files_ok
784 generate_docker_env_files
785 track osm_files env_files_ok
787 deploy_charmed_services
788 track deploy_osm deploy_charmed_services_ok
790 track deploy_osm kube_secrets_ok
791 update_manifest_files
792 track deploy_osm update_manifest_files_ok
794 track deploy_osm namespace_vol_ok
796 track deploy_osm deploy_osm_services_k8s_ok
797 if [ -n "$INSTALL_PLA" ]; then
798 # optional PLA install
799 deploy_osm_pla_service
800 track deploy_osm deploy_osm_pla_ok
802 if [ -n "$INSTALL_K8S_MONITOR" ]; then
803 # install OSM MONITORING
804 install_k8s_monitoring
805 track deploy_osm install_k8s_monitoring_ok
807 if [ -n "$INSTALL_NGSA" ]; then
808 # optional PLA install
809 install_osm_ngsa_service
810 track deploy_osm install_osm_ngsa_ok
813 [ -z "$INSTALL_NOHOSTCLIENT" ] && install_osmclient
814 track osmclient osmclient_ok
816 echo -e "Checking OSM health state..."
817 $OSM_DEVOPS/installers
/osm_health.sh
-s ${OSM_STACK_NAME} -k || \
818 (echo -e "OSM is not healthy, but will probably converge to a healthy state soon." && \
819 echo -e "Check OSM status with: kubectl -n ${OSM_STACK_NAME} get all" && \
820 track healthchecks osm_unhealthy didnotconverge
)
821 track healthchecks after_healthcheck_ok
824 track final_ops add_local_k8scluster_ok
826 arrange_docker_default_network_policy
828 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
831 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
835 function install_to_openstack
() {
836 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
839 FATAL
"OpenStack installer requires a valid external network name"
842 # Install Pip for Python3
843 sudo apt
install -y python3-pip python3-venv
844 sudo
-H LC_ALL
=C python3
-m pip
install -U pip
846 # Create a venv to avoid conflicts with the host installation
847 python3
-m venv
$OPENSTACK_PYTHON_VENV
849 source $OPENSTACK_PYTHON_VENV/bin
/activate
851 # Install Ansible, OpenStack client and SDK, latest openstack version supported is Train
852 python
-m pip
install -U wheel
853 python
-m pip
install -U "python-openstackclient<=4.0.2" "openstacksdk>=0.12.0,<=0.36.2" "ansible>=2.10,<2.11"
855 # Install the Openstack cloud module (ansible>=2.10)
856 ansible-galaxy collection
install openstack.cloud
858 export ANSIBLE_CONFIG
="$OSM_DEVOPS/installers/openstack/ansible.cfg"
860 OSM_INSTALLER_ARGS
="${REPO_ARGS[@]}"
862 ANSIBLE_VARS
="external_network_name=$2 setup_volume=$3 server_name=$OPENSTACK_VM_NAME"
864 if [ -n "$OPENSTACK_SSH_KEY_FILE" ]; then
865 ANSIBLE_VARS
+=" key_file=$OPENSTACK_SSH_KEY_FILE"
868 if [ -n "$OPENSTACK_USERDATA_FILE" ]; then
869 ANSIBLE_VARS
+=" userdata_file=$OPENSTACK_USERDATA_FILE"
872 # Execute the Ansible playbook based on openrc or clouds.yaml
875 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
876 $OSM_DEVOPS/installers
/openstack
/site.yml
878 ansible-playbook
-e installer_args
="\"$OSM_INSTALLER_ARGS\"" -e "$ANSIBLE_VARS" \
879 -e cloud_name
=$1 $OSM_DEVOPS/installers
/openstack
/site.yml
885 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
889 function arrange_docker_default_network_policy
() {
890 echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
891 sudo iptables
-I DOCKER-USER
-j ACCEPT
892 sudo iptables-save | sudo
tee /etc
/iptables
/rules.v4
893 sudo ip6tables-save | sudo
tee /etc
/iptables
/rules.v6
896 function install_k8s_monitoring
() {
897 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
898 # install OSM monitoring
899 sudo
chmod +x
$OSM_DEVOPS/installers
/k8s
/*.sh
900 sudo
$OSM_DEVOPS/installers
/k8s
/install_osm_k8s_monitoring.sh || FATAL_TRACK install_k8s_monitoring
"k8s/install_osm_k8s_monitoring.sh failed"
901 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
904 function dump_vars
(){
905 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
906 echo "APT_PROXY_URL=$APT_PROXY_URL"
907 echo "DEVELOP=$DEVELOP"
908 echo "DEBUG_INSTALL=$DEBUG_INSTALL"
909 echo "DOCKER_NOBUILD=$DOCKER_NOBUILD"
910 echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
911 echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
912 echo "DOCKER_USER=$DOCKER_USER"
913 echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
914 echo "INSTALL_FROM_SOURCE=$INSTALL_FROM_SOURCE"
915 echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
916 echo "INSTALL_LIGHTWEIGHT=$INSTALL_LIGHTWEIGHT"
917 echo "INSTALL_LXD=$INSTALL_LXD"
918 echo "INSTALL_NGSA=$INSTALL_NGSA"
919 echo "INSTALL_NODOCKER=$INSTALL_NODOCKER"
920 echo "INSTALL_NOJUJU=$INSTALL_NOJUJU"
921 echo "INSTALL_NOLXD=$INSTALL_NOLXD"
922 echo "INSTALL_ONLY=$INSTALL_ONLY"
923 echo "INSTALL_PLA=$INSTALL_PLA"
924 echo "INSTALL_TO_OPENSTACK=$INSTALL_TO_OPENSTACK"
925 echo "INSTALL_VIMEMU=$INSTALL_VIMEMU"
926 echo "NO_HOST_PORTS=$NO_HOST_PORTS"
927 echo "OPENSTACK_PUBLIC_NET_NAME=$OPENSTACK_PUBLIC_NET_NAME"
928 echo "OPENSTACK_OPENRC_FILE_OR_CLOUD=$OPENSTACK_OPENRC_FILE_OR_CLOUD"
929 echo "OPENSTACK_ATTACH_VOLUME=$OPENSTACK_ATTACH_VOLUME"
930 echo "OPENSTACK_SSH_KEY_FILE"="$OPENSTACK_SSH_KEY_FILE"
931 echo "OPENSTACK_USERDATA_FILE"="$OPENSTACK_USERDATA_FILE"
932 echo "OPENSTACK_VM_NAME"="$OPENSTACK_VM_NAME"
933 echo "OSM_DEVOPS=$OSM_DEVOPS"
934 echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
935 echo "OSM_DOCKER_WORK_DIR=$OSM_DOCKER_WORK_DIR"
936 echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
937 echo "OSM_K8S_WORK_DIR=$OSM_K8S_WORK_DIR"
938 echo "OSM_STACK_NAME=$OSM_STACK_NAME"
939 echo "OSM_VCA_HOST=$OSM_VCA_HOST"
940 echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
941 echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
942 echo "OSM_WORK_DIR=$OSM_WORK_DIR"
943 echo "PULL_IMAGES=$PULL_IMAGES"
944 echo "RECONFIGURE=$RECONFIGURE"
945 echo "RELEASE=$RELEASE"
946 echo "REPOSITORY=$REPOSITORY"
947 echo "REPOSITORY_BASE=$REPOSITORY_BASE"
948 echo "REPOSITORY_KEY=$REPOSITORY_KEY"
949 echo "SHOWOPTS=$SHOWOPTS"
950 echo "TEST_INSTALLER=$TEST_INSTALLER"
951 echo "TO_REBUILD=$TO_REBUILD"
952 echo "UNINSTALL=$UNINSTALL"
953 echo "UPDATE=$UPDATE"
954 echo "Install from specific refspec (-b): $COMMIT_ID"
955 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
958 function parse_docker_registry_url
() {
959 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
960 DOCKER_REGISTRY_USER
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[1]}')
961 DOCKER_REGISTRY_PASSWORD
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); split(a[1],b,":"); print b[2]}')
962 DOCKER_REGISTRY_URL
=$
(echo "$DOCKER_REGISTRY_URL" |
awk '{split($1,a,"@"); print a[2]}')
963 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
967 [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of
function
968 echo "** Trapped CTRL-C"
969 FATAL
"User stopped the installation"
970 [ -z "${DEBUG_INSTALL}" ] || DEBUG end of
function
983 INSTALL_FROM_SOURCE
=""
987 INSTALL_K8S_MONITOR
=""
991 LXD_REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/lxd"
992 LXD_REPOSITORY_PATH
=""
993 INSTALL_LIGHTWEIGHT
="y"
994 INSTALL_TO_OPENSTACK
=""
995 OPENSTACK_OPENRC_FILE_OR_CLOUD
=""
996 OPENSTACK_PUBLIC_NET_NAME
=""
997 OPENSTACK_ATTACH_VOLUME
="false"
998 OPENSTACK_SSH_KEY_FILE
=""
999 OPENSTACK_USERDATA_FILE
=""
1000 OPENSTACK_VM_NAME
="server-osm"
1001 OPENSTACK_PYTHON_VENV
="$HOME/.virtual-envs/osm"
1007 INSTALL_NOHOSTCLIENT
=""
1008 INSTALL_CACHELXDIMAGES
=""
1013 OSM_VCA_CLOUDNAME
="localhost"
1014 OSM_VCA_K8S_CLOUDNAME
="k8scloud"
1018 REPOSITORY_KEY
="OSM%20ETSI%20Release%20Key.gpg"
1019 REPOSITORY_BASE
="https://osm-download.etsi.org/repository/osm/debian"
1020 OSM_WORK_DIR
="/etc/osm"
1021 OSM_DOCKER_WORK_DIR
="${OSM_WORK_DIR}/docker"
1022 OSM_K8S_WORK_DIR
="${OSM_DOCKER_WORK_DIR}/osm_pods"
1023 OSM_HELM_WORK_DIR
="${OSM_WORK_DIR}/helm"
1024 OSM_HOST_VOL
="/var/lib/osm"
1025 OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1026 OSM_DOCKER_TAG
=latest
1027 DOCKER_USER
=opensourcemano
1029 KAFKA_TAG
=2.11-1.0
.2
1030 KIWIGRID_K8S_SIDECAR_TAG
="1.15.6"
1031 PROMETHEUS_TAG
=v2.28
.1
1033 PROMETHEUS_NODE_EXPORTER_TAG
=0.18.1
1034 PROMETHEUS_CADVISOR_TAG
=latest
1036 OSM_DATABASE_COMMONKEY
=
1037 ELASTIC_VERSION
=6.4.2
1038 ELASTIC_CURATOR_VERSION
=5.5.4
1039 POD_NETWORK_CIDR
=10.244.0.0/16
1040 K8S_MANIFEST_DIR
="/etc/kubernetes/manifests"
1041 RE_CHECK
='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
1042 DOCKER_REGISTRY_URL
=
1045 OSM_INSTALLATION_TYPE
="Default"
1047 while getopts ":a:b:r:n:k:u:R:D:o:O:m:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:-: hy" o
; do
1050 APT_PROXY_URL
=${OPTARG}
1057 REPOSITORY
="${OPTARG}"
1058 REPO_ARGS
+=(-r "$REPOSITORY")
1061 REPOSITORY_KEY
="${OPTARG}"
1062 REPO_ARGS
+=(-k "$REPOSITORY_KEY")
1065 REPOSITORY_BASE
="${OPTARG}"
1066 REPO_ARGS
+=(-u "$REPOSITORY_BASE")
1070 REPO_ARGS
+=(-R "$RELEASE")
1073 OSM_DEVOPS
="${OPTARG}"
1077 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1078 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1081 INSTALL_TO_OPENSTACK
="y"
1082 if [ -n "${OPTARG}" ]; then
1083 OPENSTACK_OPENRC_FILE_OR_CLOUD
="${OPTARG}"
1085 echo -e "Invalid argument for -O : ' $OPTARG'\n" >&2
1090 OPENSTACK_SSH_KEY_FILE
="${OPTARG}"
1093 OPENSTACK_USERDATA_FILE
="${OPTARG}"
1096 OPENSTACK_PUBLIC_NET_NAME
="${OPTARG}"
1099 [ "${OPTARG}" == "NG-UI" ] && TO_REBUILD
="$TO_REBUILD NG-UI" && continue
1100 [ "${OPTARG}" == "NBI" ] && TO_REBUILD
="$TO_REBUILD NBI" && continue
1101 [ "${OPTARG}" == "LCM" ] && TO_REBUILD
="$TO_REBUILD LCM" && continue
1102 [ "${OPTARG}" == "RO" ] && TO_REBUILD
="$TO_REBUILD RO" && continue
1103 [ "${OPTARG}" == "MON" ] && TO_REBUILD
="$TO_REBUILD MON" && continue
1104 [ "${OPTARG}" == "POL" ] && TO_REBUILD
="$TO_REBUILD POL" && continue
1105 [ "${OPTARG}" == "PLA" ] && TO_REBUILD
="$TO_REBUILD PLA" && continue
1106 [ "${OPTARG}" == "osmclient" ] && TO_REBUILD
="$TO_REBUILD osmclient" && continue
1107 [ "${OPTARG}" == "KAFKA" ] && TO_REBUILD
="$TO_REBUILD KAFKA" && continue
1108 [ "${OPTARG}" == "MONGO" ] && TO_REBUILD
="$TO_REBUILD MONGO" && continue
1109 [ "${OPTARG}" == "PROMETHEUS" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS" && continue
1110 [ "${OPTARG}" == "PROMETHEUS-CADVISOR" ] && TO_REBUILD
="$TO_REBUILD PROMETHEUS-CADVISOR" && continue
1111 [ "${OPTARG}" == "KEYSTONE-DB" ] && TO_REBUILD
="$TO_REBUILD KEYSTONE-DB" && continue
1112 [ "${OPTARG}" == "GRAFANA" ] && TO_REBUILD
="$TO_REBUILD GRAFANA" && continue
1113 [ "${OPTARG}" == "NONE" ] && TO_REBUILD
="$TO_REBUILD NONE" && continue
1116 OSM_VCA_HOST
="${OPTARG}"
1119 OSM_VCA_SECRET
="${OPTARG}"
1122 OSM_STACK_NAME
="${OPTARG}" && [[ ! "${OPTARG}" =~
$RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
1125 OSM_DOCKER_TAG
="${OPTARG}"
1126 REPO_ARGS
+=(-t "$OSM_DOCKER_TAG")
1129 DOCKER_USER
="${OPTARG}"
1132 OSM_VCA_PUBKEY
=$
(cat ${OPTARG})
1135 OSM_VCA_APIPROXY
="${OPTARG}"
1138 LXD_CLOUD_FILE
="${OPTARG}"
1141 LXD_CRED_FILE
="${OPTARG}"
1144 CONTROLLER_NAME
="${OPTARG}"
1147 DOCKER_REGISTRY_URL
="${OPTARG}"
1150 DOCKER_PROXY_URL
="${OPTARG}"
1153 MODULE_DOCKER_TAG
="${OPTARG}"
1156 [ "${OPTARG}" == "help" ] && usage
&& exit 0
1157 [ "${OPTARG}" == "source" ] && INSTALL_FROM_SOURCE
="y" && PULL_IMAGES
="" && continue
1158 [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL
="--debug" && continue
1159 [ "${OPTARG}" == "develop" ] && DEVELOP
="y" && continue
1160 [ "${OPTARG}" == "uninstall" ] && UNINSTALL
="y" && continue
1161 [ "${OPTARG}" == "update" ] && UPDATE
="y" && continue
1162 [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE
="y" && continue
1163 [ "${OPTARG}" == "test" ] && TEST_INSTALLER
="y" && continue
1164 [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD
="y" && continue
1165 [ "${OPTARG}" == "nolxd" ] && INSTALL_NOLXD
="y" && continue
1166 [ "${OPTARG}" == "nodocker" ] && INSTALL_NODOCKER
="y" && continue
1167 [ "${OPTARG}" == "showopts" ] && SHOWOPTS
="y" && continue
1168 [ "${OPTARG}" == "nohostports" ] && NO_HOST_PORTS
="y" && continue
1169 [ "${OPTARG}" == "nojuju" ] && INSTALL_NOJUJU
="--nojuju" && continue
1170 [ "${OPTARG}" == "nodockerbuild" ] && DOCKER_NOBUILD
="y" && continue
1171 [ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT
="y" && continue
1172 [ "${OPTARG}" == "pullimages" ] && continue
1173 [ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR
="y" && continue
1174 [ "${OPTARG}" == "charmed" ] && CHARMED
="y" && OSM_INSTALLATION_TYPE
="Charmed" && continue
1175 [ "${OPTARG}" == "bundle" ] && continue
1176 [ "${OPTARG}" == "k8s" ] && continue
1177 [ "${OPTARG}" == "lxd" ] && continue
1178 [ "${OPTARG}" == "lxd-cred" ] && continue
1179 [ "${OPTARG}" == "microstack" ] && continue
1180 [ "${OPTARG}" == "overlay" ] && continue
1181 [ "${OPTARG}" == "only-vca" ] && continue
1182 [ "${OPTARG}" == "small-profile" ] && continue
1183 [ "${OPTARG}" == "vca" ] && continue
1184 [ "${OPTARG}" == "ha" ] && continue
1185 [ "${OPTARG}" == "tag" ] && continue
1186 [ "${OPTARG}" == "registry" ] && continue
1187 [ "${OPTARG}" == "pla" ] && INSTALL_PLA
="y" && continue
1188 [ "${OPTARG}" == "ng-sa" ] && INSTALL_NGSA
="y" && continue
1189 [ "${OPTARG}" == "volume" ] && OPENSTACK_ATTACH_VOLUME
="true" && continue
1190 [ "${OPTARG}" == "nocachelxdimages" ] && continue
1191 [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES
="--cachelxdimages" && continue
1192 echo -e "Invalid option: '--$OPTARG'\n" >&2
1196 echo "Option -$OPTARG requires an argument" >&2
1200 echo -e "Invalid option: '-$OPTARG'\n" >&2
1215 source $OSM_DEVOPS/common
/all_funcs
1217 [ -z "${DEBUG_INSTALL}" ] || DEBUG Debug is on
1218 [ -n "$SHOWOPTS" ] && dump_vars
&& exit 0
1220 # Uninstall if "--uninstall"
1221 if [ -n "$UNINSTALL" ]; then
1222 if [ -n "$CHARMED" ]; then
1223 ${OSM_DEVOPS}/installers
/charmed_uninstall.sh
-R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
1224 FATAL_TRACK charmed_uninstall
"charmed_uninstall.sh failed"
1226 ${OSM_DEVOPS}/installers
/uninstall_osm.sh
"$@" || \
1227 FATAL_TRACK community_uninstall
"uninstall_osm.sh failed"
1233 # Charmed installation
1234 if [ -n "$CHARMED" ]; then
1235 sudo snap
install jq || FATAL
"Could not install jq (snap package). Make sure that snap works"
1236 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"
1237 track start release
$RELEASE none none docker_tag
$OSM_DOCKER_TAG none none installation_type
$OSM_INSTALLATION_TYPE none none
1238 ${OSM_DEVOPS}/installers
/charmed_install.sh
--tag $OSM_DOCKER_TAG "$@" || \
1239 FATAL_TRACK charmed_install
"charmed_install.sh failed"
1240 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README2.txt
&> /dev
/null
1241 track end installation_type
$OSM_INSTALLATION_TYPE
1246 # Installation to Openstack
1247 if [ -n "$INSTALL_TO_OPENSTACK" ]; then
1248 install_to_openstack
$OPENSTACK_OPENRC_FILE_OR_CLOUD $OPENSTACK_PUBLIC_NET_NAME $OPENSTACK_ATTACH_VOLUME
1253 # Community_installer
1255 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" != " NONE" ] && echo $TO_REBUILD |
grep -q NONE
&& FATAL
"Incompatible option: -m NONE cannot be used with other -m options"
1256 [ -n "$TO_REBUILD" ] && [ "$TO_REBUILD" == " PLA" ] && [ -z "$INSTALL_PLA" ] && FATAL
"Incompatible option: -m PLA cannot be used without --pla option"
1257 # if develop, we force master
1258 [ -z "$COMMIT_ID" ] && [ -n "$DEVELOP" ] && COMMIT_ID
="master"
1259 OSM_K8S_WORK_DIR
="$OSM_DOCKER_WORK_DIR/osm_pods" && OSM_NAMESPACE_VOL
="${OSM_HOST_VOL}/${OSM_STACK_NAME}"
1260 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_K8S_MONITOR" ] && install_k8s_monitoring
1261 [ -n "$INSTALL_ONLY" ] && [ -n "$INSTALL_NGSA" ] && install_osm_ngsa_service
1262 [ -n "$INSTALL_ONLY" ] && echo -e "\nDONE" && exit 0
1264 #Installation starts here
1265 wget
-q -O- https
://osm-download.etsi.org
/ftp
/osm-13.0
-thirteen/README.txt
&> /dev
/null
1266 export OSM_TRACK_INSTALLATION_ID
="$(date +%s)-$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16)"