From 99df54fa016eaba860d2672a914dc2260b785d52 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 8 Apr 2022 14:47:47 +0200 Subject: [PATCH] Add OAI charms --- oai/.gitignore | 5 + oai/README.md | 102 ++++++ oai/build.sh | 17 + oai/bundle.yaml | 65 ++++ oai/charmcraft.yaml | 1 + oai/deploy.sh | 3 + oai/oai-amf-operator/.flake8 | 9 + oai/oai-amf-operator/.gitignore | 7 + oai/oai-amf-operator/.jujuignore | 3 + oai/oai-amf-operator/CONTRIBUTING.md | 34 ++ oai/oai-amf-operator/LICENSE | 202 ++++++++++ oai/oai-amf-operator/README.md | 1 + oai/oai-amf-operator/charmcraft.yaml | 10 + oai/oai-amf-operator/config.yaml | 12 + oai/oai-amf-operator/metadata.yaml | 31 ++ oai/oai-amf-operator/requirements-dev.txt | 3 + oai/oai-amf-operator/requirements.txt | 2 + oai/oai-amf-operator/run_tests | 17 + oai/oai-amf-operator/src/charm.py | 346 ++++++++++++++++++ oai/oai-amf-operator/src/utils.py | 325 ++++++++++++++++ oai/oai-amf-operator/tests/__init__.py | 0 oai/oai-amf-operator/tests/test_charm.py | 68 ++++ oai/oai-db-operator/.flake8 | 9 + oai/oai-db-operator/.gitignore | 7 + oai/oai-db-operator/.jujuignore | 3 + oai/oai-db-operator/CONTRIBUTING.md | 34 ++ oai/oai-db-operator/LICENSE | 202 ++++++++++ oai/oai-db-operator/README.md | 1 + oai/oai-db-operator/charmcraft.yaml | 10 + oai/oai-db-operator/config.yaml | 6 + oai/oai-db-operator/metadata.yaml | 21 ++ oai/oai-db-operator/requirements-dev.txt | 3 + oai/oai-db-operator/requirements.txt | 2 + oai/oai-db-operator/run_tests | 17 + oai/oai-db-operator/src/charm.py | 158 ++++++++ oai/oai-db-operator/src/utils.py | 325 ++++++++++++++++ oai/oai-db-operator/templates/db.sql | 213 +++++++++++ oai/oai-db-operator/tests/__init__.py | 0 oai/oai-db-operator/tests/test_charm.py | 68 ++++ oai/oai-gnb-operator/.flake8 | 9 + oai/oai-gnb-operator/.gitignore | 7 + oai/oai-gnb-operator/.jujuignore | 3 + oai/oai-gnb-operator/CONTRIBUTING.md | 34 ++ oai/oai-gnb-operator/LICENSE | 202 ++++++++++ oai/oai-gnb-operator/README.md | 24 ++ oai/oai-gnb-operator/charmcraft.yaml | 10 + oai/oai-gnb-operator/config.yaml | 12 + oai/oai-gnb-operator/metadata.yaml | 33 ++ oai/oai-gnb-operator/requirements-dev.txt | 3 + oai/oai-gnb-operator/requirements.txt | 2 + oai/oai-gnb-operator/run_tests | 17 + oai/oai-gnb-operator/src/charm.py | 303 +++++++++++++++ oai/oai-gnb-operator/src/utils.py | 325 ++++++++++++++++ oai/oai-gnb-operator/tests/__init__.py | 0 oai/oai-gnb-operator/tests/test_charm.py | 68 ++++ oai/oai-nr-ue-operator/.flake8 | 9 + oai/oai-nr-ue-operator/.gitignore | 7 + oai/oai-nr-ue-operator/.jujuignore | 3 + oai/oai-nr-ue-operator/CONTRIBUTING.md | 34 ++ oai/oai-nr-ue-operator/LICENSE | 202 ++++++++++ oai/oai-nr-ue-operator/README.md | 24 ++ oai/oai-nr-ue-operator/actions.yaml | 4 + oai/oai-nr-ue-operator/charmcraft.yaml | 10 + oai/oai-nr-ue-operator/config.yaml | 12 + oai/oai-nr-ue-operator/metadata.yaml | 27 ++ oai/oai-nr-ue-operator/requirements-dev.txt | 3 + oai/oai-nr-ue-operator/requirements.txt | 2 + oai/oai-nr-ue-operator/run_tests | 17 + oai/oai-nr-ue-operator/src/charm.py | 253 +++++++++++++ oai/oai-nr-ue-operator/src/utils.py | 325 ++++++++++++++++ oai/oai-nr-ue-operator/tests/__init__.py | 0 oai/oai-nr-ue-operator/tests/test_charm.py | 68 ++++ oai/oai-nrf-operator/.flake8 | 9 + oai/oai-nrf-operator/.gitignore | 7 + oai/oai-nrf-operator/.jujuignore | 3 + oai/oai-nrf-operator/CONTRIBUTING.md | 34 ++ oai/oai-nrf-operator/LICENSE | 202 ++++++++++ oai/oai-nrf-operator/README.md | 1 + oai/oai-nrf-operator/charmcraft.yaml | 10 + oai/oai-nrf-operator/config.yaml | 12 + oai/oai-nrf-operator/metadata.yaml | 26 ++ oai/oai-nrf-operator/requirements-dev.txt | 3 + oai/oai-nrf-operator/requirements.txt | 2 + oai/oai-nrf-operator/run_tests | 17 + oai/oai-nrf-operator/src/charm.py | 146 ++++++++ oai/oai-nrf-operator/src/utils.py | 325 ++++++++++++++++ oai/oai-nrf-operator/tests/__init__.py | 0 oai/oai-nrf-operator/tests/test_charm.py | 68 ++++ oai/oai-smf-operator/.flake8 | 9 + oai/oai-smf-operator/.gitignore | 7 + oai/oai-smf-operator/.jujuignore | 3 + oai/oai-smf-operator/CONTRIBUTING.md | 34 ++ oai/oai-smf-operator/LICENSE | 202 ++++++++++ oai/oai-smf-operator/README.md | 1 + oai/oai-smf-operator/charmcraft.yaml | 10 + oai/oai-smf-operator/config.yaml | 12 + oai/oai-smf-operator/metadata.yaml | 33 ++ oai/oai-smf-operator/requirements-dev.txt | 3 + oai/oai-smf-operator/requirements.txt | 2 + oai/oai-smf-operator/run_tests | 17 + oai/oai-smf-operator/src/charm.py | 272 ++++++++++++++ oai/oai-smf-operator/src/utils.py | 312 ++++++++++++++++ oai/oai-smf-operator/tests/__init__.py | 0 oai/oai-smf-operator/tests/test_charm.py | 68 ++++ oai/oai-spgwu-tiny-operator/.flake8 | 9 + oai/oai-spgwu-tiny-operator/.gitignore | 7 + oai/oai-spgwu-tiny-operator/.jujuignore | 3 + oai/oai-spgwu-tiny-operator/CONTRIBUTING.md | 34 ++ oai/oai-spgwu-tiny-operator/LICENSE | 202 ++++++++++ oai/oai-spgwu-tiny-operator/README.md | 24 ++ oai/oai-spgwu-tiny-operator/charmcraft.yaml | 10 + oai/oai-spgwu-tiny-operator/config.yaml | 12 + oai/oai-spgwu-tiny-operator/metadata.yaml | 33 ++ .../requirements-dev.txt | 3 + oai/oai-spgwu-tiny-operator/requirements.txt | 2 + oai/oai-spgwu-tiny-operator/run_tests | 17 + oai/oai-spgwu-tiny-operator/src/charm.py | 268 ++++++++++++++ oai/oai-spgwu-tiny-operator/src/utils.py | 312 ++++++++++++++++ oai/oai-spgwu-tiny-operator/tests/__init__.py | 0 .../tests/test_charm.py | 68 ++++ 120 files changed, 7248 insertions(+) create mode 100644 oai/.gitignore create mode 100644 oai/README.md create mode 100755 oai/build.sh create mode 100644 oai/bundle.yaml create mode 100644 oai/charmcraft.yaml create mode 100755 oai/deploy.sh create mode 100644 oai/oai-amf-operator/.flake8 create mode 100644 oai/oai-amf-operator/.gitignore create mode 100644 oai/oai-amf-operator/.jujuignore create mode 100644 oai/oai-amf-operator/CONTRIBUTING.md create mode 100644 oai/oai-amf-operator/LICENSE create mode 100644 oai/oai-amf-operator/README.md create mode 100644 oai/oai-amf-operator/charmcraft.yaml create mode 100644 oai/oai-amf-operator/config.yaml create mode 100644 oai/oai-amf-operator/metadata.yaml create mode 100644 oai/oai-amf-operator/requirements-dev.txt create mode 100644 oai/oai-amf-operator/requirements.txt create mode 100755 oai/oai-amf-operator/run_tests create mode 100755 oai/oai-amf-operator/src/charm.py create mode 100644 oai/oai-amf-operator/src/utils.py create mode 100644 oai/oai-amf-operator/tests/__init__.py create mode 100644 oai/oai-amf-operator/tests/test_charm.py create mode 100644 oai/oai-db-operator/.flake8 create mode 100644 oai/oai-db-operator/.gitignore create mode 100644 oai/oai-db-operator/.jujuignore create mode 100644 oai/oai-db-operator/CONTRIBUTING.md create mode 100644 oai/oai-db-operator/LICENSE create mode 100644 oai/oai-db-operator/README.md create mode 100644 oai/oai-db-operator/charmcraft.yaml create mode 100644 oai/oai-db-operator/config.yaml create mode 100644 oai/oai-db-operator/metadata.yaml create mode 100644 oai/oai-db-operator/requirements-dev.txt create mode 100644 oai/oai-db-operator/requirements.txt create mode 100755 oai/oai-db-operator/run_tests create mode 100755 oai/oai-db-operator/src/charm.py create mode 100644 oai/oai-db-operator/src/utils.py create mode 100644 oai/oai-db-operator/templates/db.sql create mode 100644 oai/oai-db-operator/tests/__init__.py create mode 100644 oai/oai-db-operator/tests/test_charm.py create mode 100644 oai/oai-gnb-operator/.flake8 create mode 100644 oai/oai-gnb-operator/.gitignore create mode 100644 oai/oai-gnb-operator/.jujuignore create mode 100644 oai/oai-gnb-operator/CONTRIBUTING.md create mode 100644 oai/oai-gnb-operator/LICENSE create mode 100644 oai/oai-gnb-operator/README.md create mode 100644 oai/oai-gnb-operator/charmcraft.yaml create mode 100644 oai/oai-gnb-operator/config.yaml create mode 100644 oai/oai-gnb-operator/metadata.yaml create mode 100644 oai/oai-gnb-operator/requirements-dev.txt create mode 100644 oai/oai-gnb-operator/requirements.txt create mode 100755 oai/oai-gnb-operator/run_tests create mode 100755 oai/oai-gnb-operator/src/charm.py create mode 100644 oai/oai-gnb-operator/src/utils.py create mode 100644 oai/oai-gnb-operator/tests/__init__.py create mode 100644 oai/oai-gnb-operator/tests/test_charm.py create mode 100644 oai/oai-nr-ue-operator/.flake8 create mode 100644 oai/oai-nr-ue-operator/.gitignore create mode 100644 oai/oai-nr-ue-operator/.jujuignore create mode 100644 oai/oai-nr-ue-operator/CONTRIBUTING.md create mode 100644 oai/oai-nr-ue-operator/LICENSE create mode 100644 oai/oai-nr-ue-operator/README.md create mode 100644 oai/oai-nr-ue-operator/actions.yaml create mode 100644 oai/oai-nr-ue-operator/charmcraft.yaml create mode 100644 oai/oai-nr-ue-operator/config.yaml create mode 100644 oai/oai-nr-ue-operator/metadata.yaml create mode 100644 oai/oai-nr-ue-operator/requirements-dev.txt create mode 100644 oai/oai-nr-ue-operator/requirements.txt create mode 100755 oai/oai-nr-ue-operator/run_tests create mode 100755 oai/oai-nr-ue-operator/src/charm.py create mode 100644 oai/oai-nr-ue-operator/src/utils.py create mode 100644 oai/oai-nr-ue-operator/tests/__init__.py create mode 100644 oai/oai-nr-ue-operator/tests/test_charm.py create mode 100644 oai/oai-nrf-operator/.flake8 create mode 100644 oai/oai-nrf-operator/.gitignore create mode 100644 oai/oai-nrf-operator/.jujuignore create mode 100644 oai/oai-nrf-operator/CONTRIBUTING.md create mode 100644 oai/oai-nrf-operator/LICENSE create mode 100644 oai/oai-nrf-operator/README.md create mode 100644 oai/oai-nrf-operator/charmcraft.yaml create mode 100644 oai/oai-nrf-operator/config.yaml create mode 100644 oai/oai-nrf-operator/metadata.yaml create mode 100644 oai/oai-nrf-operator/requirements-dev.txt create mode 100644 oai/oai-nrf-operator/requirements.txt create mode 100755 oai/oai-nrf-operator/run_tests create mode 100755 oai/oai-nrf-operator/src/charm.py create mode 100644 oai/oai-nrf-operator/src/utils.py create mode 100644 oai/oai-nrf-operator/tests/__init__.py create mode 100644 oai/oai-nrf-operator/tests/test_charm.py create mode 100644 oai/oai-smf-operator/.flake8 create mode 100644 oai/oai-smf-operator/.gitignore create mode 100644 oai/oai-smf-operator/.jujuignore create mode 100644 oai/oai-smf-operator/CONTRIBUTING.md create mode 100644 oai/oai-smf-operator/LICENSE create mode 100644 oai/oai-smf-operator/README.md create mode 100644 oai/oai-smf-operator/charmcraft.yaml create mode 100644 oai/oai-smf-operator/config.yaml create mode 100644 oai/oai-smf-operator/metadata.yaml create mode 100644 oai/oai-smf-operator/requirements-dev.txt create mode 100644 oai/oai-smf-operator/requirements.txt create mode 100755 oai/oai-smf-operator/run_tests create mode 100755 oai/oai-smf-operator/src/charm.py create mode 100644 oai/oai-smf-operator/src/utils.py create mode 100644 oai/oai-smf-operator/tests/__init__.py create mode 100644 oai/oai-smf-operator/tests/test_charm.py create mode 100644 oai/oai-spgwu-tiny-operator/.flake8 create mode 100644 oai/oai-spgwu-tiny-operator/.gitignore create mode 100644 oai/oai-spgwu-tiny-operator/.jujuignore create mode 100644 oai/oai-spgwu-tiny-operator/CONTRIBUTING.md create mode 100644 oai/oai-spgwu-tiny-operator/LICENSE create mode 100644 oai/oai-spgwu-tiny-operator/README.md create mode 100644 oai/oai-spgwu-tiny-operator/charmcraft.yaml create mode 100644 oai/oai-spgwu-tiny-operator/config.yaml create mode 100644 oai/oai-spgwu-tiny-operator/metadata.yaml create mode 100644 oai/oai-spgwu-tiny-operator/requirements-dev.txt create mode 100644 oai/oai-spgwu-tiny-operator/requirements.txt create mode 100755 oai/oai-spgwu-tiny-operator/run_tests create mode 100755 oai/oai-spgwu-tiny-operator/src/charm.py create mode 100644 oai/oai-spgwu-tiny-operator/src/utils.py create mode 100644 oai/oai-spgwu-tiny-operator/tests/__init__.py create mode 100644 oai/oai-spgwu-tiny-operator/tests/test_charm.py diff --git a/oai/.gitignore b/oai/.gitignore new file mode 100644 index 00000000..0ee250d5 --- /dev/null +++ b/oai/.gitignore @@ -0,0 +1,5 @@ +.vscode +oai.zip +logs/ +*.charm +build/ \ No newline at end of file diff --git a/oai/README.md b/oai/README.md new file mode 100644 index 00000000..d3a935ed --- /dev/null +++ b/oai/README.md @@ -0,0 +1,102 @@ +# OPENAIR-CN-5G: An implementation of the 5G Core network by the OpenAirInterface community. + +This bundle deploys the OPENAIR-CN-5G charm operators. + +## Deployment + +```bash +juju add-model oai +juju deploy ch:oai --channel edge --trust +``` + +Verify the deployment has been successfully deployed: + +```bash +$ juju status +Model Controller Cloud/Region Version SLA Timestamp +oa osm-vca microk8s/localhost 2.9.9 unsupported 14:59:42+02:00 + +App Version Status Scale Charm Store Channel Rev OS Address Message +amf active 1 oai-amf charmhub edge 2 kubernetes 10.152.183.250 +db active 1 oai-db charmhub edge 2 kubernetes 10.152.183.21 +gnb active 1 oai-gnb charmhub edge 2 kubernetes 10.152.183.108 +nr-ue active 1 oai-nr-ue charmhub edge 2 kubernetes 10.152.183.16 +nrf active 1 oai-nrf charmhub edge 2 kubernetes 10.152.183.105 +smf active 1 oai-smf charmhub edge 2 kubernetes 10.152.183.73 +spgwu-tiny active 1 oai-spgwu-tiny charmhub edge 2 kubernetes 10.152.183.243 + +Unit Workload Agent Address Ports Message +amf/0* active idle 10.1.245.70 +db/0* active idle 10.1.245.109 +gnb/0* active idle 10.1.245.85 registered +nr-ue/0* active idle 10.1.245.107 registered +nrf/0* active idle 10.1.245.80 +smf/0* active idle 10.1.245.125 +spgwu-tiny/0* active idle 10.1.245.118 +``` + +## Test network connectivity from UE + +```bash +$ kubectl -n oai exec -it nr-ue-0 -c nr-ue -- ping -I oaitun_ue1 google.fr -c 1 +PING google.fr (142.250.201.67) from 12.1.1.129 oaitun_ue1: 56(84) bytes of data. +64 bytes from mad07s25-in-f3.1e100.net (142.250.201.67): icmp_seq=1 ttl=115 time=23.2 ms + +--- google.fr ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 23.250/23.250/23.250/0.000 ms +``` + +## Scale the GNB + +```bash +juju scale-application gnb 3 +``` + +## Start and stop UE + +Stop: + +```bash +$ juju run-action nr-ue/0 stop --wait +unit-nr-ue-0: + UnitId: nr-ue/0 + id: "2" + results: + output: service has been stopped successfully + status: completed + timing: + completed: 2021-09-28 13:02:34 +0000 UTC + enqueued: 2021-09-28 13:02:29 +0000 UTC + started: 2021-09-28 13:02:32 +0000 UTC +$ kubectl -n oai exec -it nr-ue-0 -c nr-ue -- ping -I oaitun_ue1 google.fr -c 1 +ping: SO_BINDTODEVICE: Invalid argument +command terminated with exit code 2 +``` + +Start: + +```bash +$ juju run-action nr-ue/0 start --wait +unit-nr-ue-0: + UnitId: nr-ue/0 + id: "4" + results: + output: service has been started successfully + status: completed + timing: + completed: 2021-09-28 13:03:38 +0000 UTC + enqueued: 2021-09-28 13:03:32 +0000 UTC + started: 2021-09-28 13:03:36 +0000 UTC +$ kubectl -n oai exec -it nr-ue-0 -c nr-ue -- ping -I oaitun_ue1 google.fr -c 1 +PING google.fr (142.250.185.3) from 12.1.1.130 oaitun_ue1: 56(84) bytes of data. +64 bytes from mad41s11-in-f3.1e100.net (142.250.185.3): icmp_seq=1 ttl=115 time=25.2 ms + +--- google.fr ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 25.205/25.205/25.205/0.000 ms +``` + +# Reference links + +- https://gitlab.eurecom.fr/oai/cn5g/oai-cn5g-fed diff --git a/oai/build.sh b/oai/build.sh new file mode 100755 index 00000000..4f80e8cc --- /dev/null +++ b/oai/build.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +function build() { + charm=$1 + cd oai-$charm-operator/ + # charmcraft clean + charmcraft build + mv oai-${charm}_ubuntu-20.04-amd64.charm $charm.charm + cd .. +} + +charms="nrf amf smf spgwu-tiny db gnb nr-ue" +for charm in $charms; do + build $charm & +done + +wait diff --git a/oai/bundle.yaml b/oai/bundle.yaml new file mode 100644 index 00000000..4bc8b1c1 --- /dev/null +++ b/oai/bundle.yaml @@ -0,0 +1,65 @@ +bundle: kubernetes +name: oai +applications: + db: + charm: ch:oai-db + scale: 1 + channel: edge + trust: true + nrf: + charm: ch:oai-nrf + scale: 1 + channel: edge + trust: true + options: + start-tcpdump: false + amf: + charm: ch:oai-amf + trust: true + options: + start-tcpdump: false + scale: 1 + channel: edge + smf: + charm: ch:oai-smf + scale: 1 + channel: edge + trust: true + options: + start-tcpdump: false + spgwu-tiny: + charm: ch:oai-spgwu-tiny + scale: 1 + channel: edge + trust: true + options: + start-tcpdump: false + gnb: + charm: ch:oai-gnb + scale: 1 + channel: edge + trust: true + nr-ue: + charm: ch:oai-nr-ue + scale: 1 + channel: edge + trust: true +relations: + - - db:db + - amf:db + - - nrf:nrf + - amf:nrf + - - nrf:nrf + - smf:nrf + - - smf:amf + - amf:amf + - - nrf:nrf + - spgwu-tiny:nrf + - - spgwu-tiny:smf + - smf:smf + - - gnb:amf + - amf:amf + - - spgwu-tiny:spgwu + - gnb:spgwu + - - nr-ue:gnb + - gnb:gnb diff --git a/oai/charmcraft.yaml b/oai/charmcraft.yaml new file mode 100644 index 00000000..d7a57a87 --- /dev/null +++ b/oai/charmcraft.yaml @@ -0,0 +1 @@ +type: bundle diff --git a/oai/deploy.sh b/oai/deploy.sh new file mode 100755 index 00000000..2fc4b779 --- /dev/null +++ b/oai/deploy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +juju deploy ./bundle.yaml --trust \ No newline at end of file diff --git a/oai/oai-amf-operator/.flake8 b/oai/oai-amf-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-amf-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-amf-operator/.gitignore b/oai/oai-amf-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-amf-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-amf-operator/.jujuignore b/oai/oai-amf-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-amf-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-amf-operator/CONTRIBUTING.md b/oai/oai-amf-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-amf-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-amf-operator/LICENSE b/oai/oai-amf-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-amf-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-amf-operator/README.md b/oai/oai-amf-operator/README.md new file mode 100644 index 00000000..c7162b91 --- /dev/null +++ b/oai/oai-amf-operator/README.md @@ -0,0 +1 @@ +# oai-amf diff --git a/oai/oai-amf-operator/charmcraft.yaml b/oai/oai-amf-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-amf-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-amf-operator/config.yaml b/oai/oai-amf-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-amf-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-amf-operator/metadata.yaml b/oai/oai-amf-operator/metadata.yaml new file mode 100644 index 00000000..d0f062e3 --- /dev/null +++ b/oai/oai-amf-operator/metadata.yaml @@ -0,0 +1,31 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-amf +display-name: OAI amf +description: OAI amf +summary: OAI amf + +containers: + amf: + resource: oai-amf-image + tcpdump: + resource: tcpdump-image + +resources: + oai-amf-image: + type: oci-image + description: OCI image for oai-amf (rdefosseoai/oai-amf:v1.1.0) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +requires: + nrf: + interface: nrf + db: + interface: mysql +provides: + amf: + interface: amf diff --git a/oai/oai-amf-operator/requirements-dev.txt b/oai/oai-amf-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-amf-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-amf-operator/requirements.txt b/oai/oai-amf-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-amf-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-amf-operator/run_tests b/oai/oai-amf-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-amf-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-amf-operator/src/charm.py b/oai/oai-amf-operator/src/charm.py new file mode 100755 index 00000000..503f8b16 --- /dev/null +++ b/oai/oai-amf-operator/src/charm.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +SCTP_PORT = 38412 +HTTP1_PORT = 80 +HTTP2_PORT = 9090 + + +class OaiAmfCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("oai-amf", SCTP_PORT, SCTP_PORT, "SCTP"), + ("http1", HTTP1_PORT, HTTP1_PORT, "TCP"), + ("http2", HTTP2_PORT, HTTP2_PORT, "TCP"), + ], + privileged=True, + container_name="amf", + service_name="oai_amf", + ) + # Observe charm events + event_observer_mapping = { + self.on.amf_pebble_ready: self._on_oai_amf_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.amf_relation_joined: self._on_amf_relation_joined, + self.on.amf_relation_changed: self._on_amf_relation_changed, + self.on.nrf_relation_changed: self._update_service, + self.on.nrf_relation_broken: self._update_service, + self.on.db_relation_changed: self._update_service, + self.on.db_relation_broken: self._update_service, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + # Set defaults in Stored State for the relation data + self._stored.set_default( + nrf_host=None, + nrf_port=None, + nrf_api_version=None, + db_host=None, + db_port=None, + db_user=None, + db_password=None, + db_database=None, + ) + + #################################### + # Charm events handlers + #################################### + + def _on_oai_amf_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_amf_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + # def _on_stop(self, event): + # if self.unit.is_leader(): + # self._clear_service_info() + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _on_amf_relation_joined(self, event): + try: + if self.unit.is_leader() and self.is_service_running(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_amf_relation_changed(self, event): + if event.unit in event.relation.data and all( + key in event.relation.data[event.unit] for key in ("gnb-name", "gnb-status") + ): + gnb_name = event.relation.data[event.unit]["gnb-name"] + gnb_status = event.relation.data[event.unit]["gnb-status"] + if gnb_status == "started": + self._wait_gnb_is_registered(gnb_name) + event.relation.data[self.app][gnb_name] = "registered" + if event.app in event.relation.data and all( + key in event.relation.data[event.app] for key in ("ue-imsi", "ue-status") + ): + ue_imsi = event.relation.data[event.app]["ue-imsi"] + ue_status = event.relation.data[event.app]["ue-status"] + if ue_status == "started": + self._wait_ue_is_registered(ue_imsi) + event.relation.data[self.app][ue_imsi] = "registered" + + def _update_service(self, event): + try: + logger.info("Updating service...") + if not self.service_exists(): + logger.warning("service does not exist") + return + # Load data from dependent relations + self._load_nrf_data() + self._load_db_data() + relations_ready = self.is_nrf_ready and self.is_db_ready + if not relations_ready: + self.unit.status = BlockedStatus("need nrf and db relations") + if self.is_service_running(): + self.stop_service() + elif not self.is_service_running(): + self._configure_service() + self.start_service() + self._wait_until_service_is_active() + if self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + if pod_ip := self.pod_ip: + for relation in self.framework.model.relations["amf"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = self.app.name + relation.data[self.app]["ip-address"] = str(pod_ip) + relation.data[self.app]["port"] = str(HTTP1_PORT) + relation.data[self.app]["api-version"] = "v1" + logger.info( + f"Info provided in relation {relation.name} (id {relation.id})" + ) + + def _clear_service_info(self): + for relation in self.framework.model.relations["amf"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = "" + relation.data[self.app]["ip-address"] = "" + relation.data[self.app]["port"] = "" + relation.data[self.app]["api-version"] = "" + logger.info(f"Info cleared in relation {relation.name} (id {relation.id})") + + def _wait_gnb_is_registered(self, gnb_name): + self.unit.status = WaitingStatus(f"waiting for gnb {gnb_name} to be registered") + self.search_logs(subsets_in_line={"Connected", gnb_name}, wait=True) + self.unit.status = ActiveStatus() + + def _wait_ue_is_registered(self, ue_imsi): + self.unit.status = WaitingStatus(f"waiting for ue {ue_imsi} to be registered") + self.search_logs(subsets_in_line={"5GMM-REGISTERED", ue_imsi}, wait=True) + self.unit.status = ActiveStatus() + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active...") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs( + { + "amf_n2 started", + "amf_n11 started", + "Initiating all registered modules", + "-----gNBs' information----", + }, + wait=True, + ) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + @property + def is_nrf_ready(self): + is_ready = ( + self._stored.nrf_host + and self._stored.nrf_port + and self._stored.nrf_api_version + ) + logger.info(f'nrf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_nrf_data(self): + logger.debug("Loading nrf data from relation") + relation = self.framework.model.get_relation("nrf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.nrf_host = relation_data.get("host") + self._stored.nrf_port = relation_data.get("port") + self._stored.nrf_api_version = relation_data.get("api-version") + logger.info("nrf data loaded") + else: + self._stored.nrf_host = None + self._stored.nrf_port = None + self._stored.nrf_api_version = None + logger.warning("no relation found") + + @property + def is_db_ready(self): + is_ready = ( + self._stored.db_host + and self._stored.db_port + and self._stored.db_user + and self._stored.db_password + and self._stored.db_database + ) + logger.info(f'db is{" " if is_ready else " not "}ready') + return is_ready + + def _load_db_data(self): + logger.debug("Loading db data from relation") + relation = self.framework.model.get_relation("db") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.db_host = relation_data.get("host") + self._stored.db_port = relation_data.get("port") + self._stored.db_user = relation_data.get("user") + self._stored.db_password = relation_data.get("password") + self._stored.db_database = relation_data.get("database") + logger.info("db data loaded") + else: + self._stored.db_host = None + self._stored.db_port = None + self._stored.db_user = None + self._stored.db_password = None + self._stored.db_database = None + logger.warning("no relation found") + + def _configure_service(self): + if not self.service_exists(): + logger.debug("Cannot configure service: service does not exist yet") + return + logger.debug("Configuring amf service") + container = self.unit.get_container("amf") + container.add_layer( + "oai_amf", + { + "services": { + "oai_amf": { + "override": "merge", + "environment": { + "NRF_FQDN": self._stored.nrf_host, + "NRF_IPV4_ADDRESS": "0.0.0.0", + "NRF_PORT": self._stored.nrf_port, + "NRF_API_VERSION": self._stored.nrf_api_version, + "MYSQL_SERVER": f"{self._stored.db_host}", + "MYSQL_USER": self._stored.db_user, + "MYSQL_PASS": self._stored.db_password, + "MYSQL_DB": self._stored.db_database, + }, + } + }, + }, + combine=True, + ) + logger.info("amf service configured") + + def _add_oai_amf_layer(self, container): + entrypoint = "/bin/bash /openair-amf/bin/entrypoint.sh" + command = " ".join( + ["/openair-amf/bin/oai_amf", "-c", "/openair-amf/etc/amf.conf", "-o"] + ) + pebble_layer = { + "summary": "oai_amf layer", + "description": "pebble config layer for oai_amf", + "services": { + "oai_amf": { + "override": "replace", + "summary": "oai_amf", + "command": f"{entrypoint} {command}", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + "INSTANCE": "0", + "PID_DIRECTORY": "/var/run", + "MCC": "208", + "MNC": "95", + "REGION_ID": "128", + "AMF_SET_ID": "1", + "SERVED_GUAMI_MCC_0": "208", + "SERVED_GUAMI_MNC_0": "95", + "SERVED_GUAMI_REGION_ID_0": "128", + "SERVED_GUAMI_AMF_SET_ID_0": "1", + "SERVED_GUAMI_MCC_1": "460", + "SERVED_GUAMI_MNC_1": "11", + "SERVED_GUAMI_REGION_ID_1": "10", + "SERVED_GUAMI_AMF_SET_ID_1": "1", + "PLMN_SUPPORT_MCC": "208", + "PLMN_SUPPORT_MNC": "95", + "PLMN_SUPPORT_TAC": "0x0001", + "SST_0": "1", + "SD_0": "1", + "SST_1": "111", + "SD_1": "124", + "AMF_INTERFACE_NAME_FOR_NGAP": "eth0", + "AMF_INTERFACE_NAME_FOR_N11": "eth0", + "SMF_INSTANCE_ID_0": "1", + "SMF_IPV4_ADDR_0": "0.0.0.0", + "SMF_HTTP_VERSION_0": "v1", + "SMF_FQDN_0": "localhost", + "SMF_INSTANCE_ID_1": "2", + "SMF_IPV4_ADDR_1": "0.0.0.0", + "SMF_HTTP_VERSION_1": "v1", + "SMF_FQDN_1": "localhost", + "AUSF_IPV4_ADDRESS": "127.0.0.1", + "AUSF_PORT": 80, + "AUSF_API_VERSION": "v1", + "NF_REGISTRATION": "yes", + "SMF_SELECTION": "yes", + "USE_FQDN_DNS": "yes", + "OPERATOR_KEY": "63bfa50ee6523365ff14c1f45f88737d", + }, + } + }, + } + container.add_layer("oai_amf", pebble_layer, combine=True) + logger.info("oai_amf layer added") + + +if __name__ == "__main__": + main(OaiAmfCharm, use_juju_for_storage=True) diff --git a/oai/oai-amf-operator/src/utils.py b/oai/oai-amf-operator/src/utils.py new file mode 100644 index 00000000..2fd511ce --- /dev/null +++ b/oai/oai-amf-operator/src/utils.py @@ -0,0 +1,325 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs( + self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False + ) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + if logs and subsets_in_line: + raise Exception("logs and subsets_in_line cannot both be defined") + elif not logs and not subsets_in_line: + raise Exception("logs or subsets_in_line must be defined") + + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + if logs: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info("all logs found") + break + else: + if all(subset in line for subset in subsets_in_line): + all_logs_found = True + logger.info("subset of strings found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-amf-operator/tests/__init__.py b/oai/oai-amf-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-amf-operator/tests/test_charm.py b/oai/oai-amf-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-amf-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-db-operator/.flake8 b/oai/oai-db-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-db-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-db-operator/.gitignore b/oai/oai-db-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-db-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-db-operator/.jujuignore b/oai/oai-db-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-db-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-db-operator/CONTRIBUTING.md b/oai/oai-db-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-db-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-db-operator/LICENSE b/oai/oai-db-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-db-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-db-operator/README.md b/oai/oai-db-operator/README.md new file mode 100644 index 00000000..d323041b --- /dev/null +++ b/oai/oai-db-operator/README.md @@ -0,0 +1 @@ +# oai-db \ No newline at end of file diff --git a/oai/oai-db-operator/charmcraft.yaml b/oai/oai-db-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-db-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-db-operator/config.yaml b/oai/oai-db-operator/config.yaml new file mode 100644 index 00000000..a7ca3a90 --- /dev/null +++ b/oai/oai-db-operator/config.yaml @@ -0,0 +1,6 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: {} \ No newline at end of file diff --git a/oai/oai-db-operator/metadata.yaml b/oai/oai-db-operator/metadata.yaml new file mode 100644 index 00000000..573b8690 --- /dev/null +++ b/oai/oai-db-operator/metadata.yaml @@ -0,0 +1,21 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-db +display-name: OAI DB +description: OAI mysql database +summary: OAI mysql database + +containers: + db: + resource: oai-db-image + +resources: + oai-db-image: + type: oci-image + description: OCI image for oai-nrf +provides: + db: + interface: mysql diff --git a/oai/oai-db-operator/requirements-dev.txt b/oai/oai-db-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-db-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-db-operator/requirements.txt b/oai/oai-db-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-db-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-db-operator/run_tests b/oai/oai-db-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-db-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-db-operator/src/charm.py b/oai/oai-db-operator/src/charm.py new file mode 100755 index 00000000..2fc8425d --- /dev/null +++ b/oai/oai-db-operator/src/charm.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +from pathlib import Path +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +MYSQL_PORT = 3306 + + +class OaiDbCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + ports=[("myqsl", MYSQL_PORT, MYSQL_PORT, "TCP")], + container_name="db", + service_name="oai_db", + ) + # Observe charm events + event_observer_mapping = { + # self.on.stop: self._on_stop, + self.on.db_pebble_ready: self._on_oai_db_pebble_ready, + self.on.db_relation_joined: self._on_db_relation_joined, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + + #################################### + # Charm Events handlers + #################################### + + def _on_oai_db_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_db_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + # def _on_stop(self, event): + # if self.unit.is_leader(): + # self._clear_service_info() + + def _on_db_relation_joined(self, event): + try: + if self.is_service_running() and self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _update_service(self, event): + try: + if self.service_exists() and not self.is_service_running(): + self._initialize_db() + self.start_service() + self._wait_until_service_is_active() + if self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + for relation in self.framework.model.relations["db"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = self.app.name + relation.data[self.app]["port"] = str(MYSQL_PORT) + relation.data[self.app]["user"] = "root" + relation.data[self.app]["password"] = "root" + relation.data[self.app]["database"] = "oai_db" + logger.info(f"Info provided in relation {relation.name} (id {relation.id})") + + def _clear_service_info(self): + for relation in self.framework.model.relations["db"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = "" + relation.data[self.app]["port"] = "" + relation.data[self.app]["user"] = "" + relation.data[self.app]["password"] = "" + relation.data[self.app]["database"] = "" + logger.info(f"Info cleared in relation {relation.name} (id {relation.id})") + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs({"[Note] mysqld: ready for connections."}, wait=True) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + def _initialize_db(self): + try: + logger.debug("Initializing DB") + container = self.unit.get_container("db") + db_sql_data = Path("templates/db.sql").read_text() + container.push("/docker-entrypoint-initdb.d/db.sql", db_sql_data) + logger.info("DB has been successfully initialized") + except Exception as e: + logger.error(f"failed initializing the DB: {e}") + + def _add_oai_db_layer(self, container): + container.add_layer( + "oai_db", + { + "summary": "oai_db layer", + "description": "pebble config layer for oai_db", + "services": { + "oai_db": { + "override": "replace", + "summary": "oai_db", + "command": "docker-entrypoint.sh mysqld", + "environment": { + "MYSQL_ROOT_PASSWORD": "root", + "MYSQL_DATABASE": "oai_db", + "GOSU_VERSION": "1.13", + "MARIADB_MAJOR": "10.3", + "MARIADB_VERSION": "1:10.3.31+maria~focal", + }, + } + }, + }, + combine=True, + ) + logger.info("oai_db layer added") + + +if __name__ == "__main__": + main(OaiDbCharm, use_juju_for_storage=True) diff --git a/oai/oai-db-operator/src/utils.py b/oai/oai-db-operator/src/utils.py new file mode 100644 index 00000000..2fd511ce --- /dev/null +++ b/oai/oai-db-operator/src/utils.py @@ -0,0 +1,325 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs( + self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False + ) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + if logs and subsets_in_line: + raise Exception("logs and subsets_in_line cannot both be defined") + elif not logs and not subsets_in_line: + raise Exception("logs or subsets_in_line must be defined") + + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + if logs: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info("all logs found") + break + else: + if all(subset in line for subset in subsets_in_line): + all_logs_found = True + logger.info("subset of strings found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-db-operator/templates/db.sql b/oai/oai-db-operator/templates/db.sql new file mode 100644 index 00000000..c8a0e435 --- /dev/null +++ b/oai/oai-db-operator/templates/db.sql @@ -0,0 +1,213 @@ +-- MySQL dump 10.13 Distrib 5.5.46, for debian-linux-gnu (x86_64) +-- +-- Host: localhost Database: oai_db +-- ------------------------------------------------------ +-- Server version 5.5.46-0ubuntu0.14.04.2 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `apn` +-- + +DROP TABLE IF EXISTS `apn`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `apn` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `apn-name` varchar(60) NOT NULL, + `pdn-type` enum('IPv4','IPv6','IPv4v6','IPv4_or_IPv6') NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `apn-name` (`apn-name`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `apn` +-- + +LOCK TABLES `apn` WRITE; +/*!40000 ALTER TABLE `apn` DISABLE KEYS */; +/*!40000 ALTER TABLE `apn` ENABLE KEYS */; +UNLOCK TABLES; + +-- +-- Table structure for table `mmeidentity` +-- + +DROP TABLE IF EXISTS `mmeidentity`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `mmeidentity` ( + `idmmeidentity` int(11) NOT NULL AUTO_INCREMENT, + `mmehost` varchar(255) DEFAULT NULL, + `mmerealm` varchar(200) DEFAULT NULL, + `UE-Reachability` tinyint(1) NOT NULL COMMENT 'Indicates whether the MME supports UE Reachability Notifcation', + PRIMARY KEY (`idmmeidentity`) +) ENGINE=MyISAM AUTO_INCREMENT=46 DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `mmeidentity` +-- + +LOCK TABLES `mmeidentity` WRITE; +/*!40000 ALTER TABLE `mmeidentity` DISABLE KEYS */; +INSERT INTO `mmeidentity` VALUES (2,'mme2.openair4G.eur','openair4G.eur',0),(1,'nano.openair4G.eur','openair4G.eur',0),(5,'abeille.openair4G.eur','openair4G.eur',0),(4,'yang.openair4G.eur','openair4G.eur',0),(3,'mme3.openair4G.eur','openair4G.eur',0),(6,'calisson.openair4G.eur','openair4G.eur',0); +/*!40000 ALTER TABLE `mmeidentity` ENABLE KEYS */; +UNLOCK TABLES; + +-- +-- Table structure for table `pdn` +-- + +DROP TABLE IF EXISTS `pdn`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `pdn` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `apn` varchar(60) NOT NULL, + `pdn_type` enum('IPv4','IPv6','IPv4v6','IPv4_or_IPv6') NOT NULL DEFAULT 'IPv4', + `pdn_ipv4` varchar(15) DEFAULT '0.0.0.0', + `pdn_ipv6` varchar(45) CHARACTER SET latin1 COLLATE latin1_general_ci DEFAULT '0:0:0:0:0:0:0:0', + `aggregate_ambr_ul` int(10) unsigned DEFAULT '50000000', + `aggregate_ambr_dl` int(10) unsigned DEFAULT '100000000', + `pgw_id` int(11) NOT NULL, + `users_imsi` varchar(15) NOT NULL, + `qci` tinyint(3) unsigned NOT NULL DEFAULT '9', + `priority_level` tinyint(3) unsigned NOT NULL DEFAULT '15', + `pre_emp_cap` enum('ENABLED','DISABLED') DEFAULT 'DISABLED', + `pre_emp_vul` enum('ENABLED','DISABLED') DEFAULT 'DISABLED', + `LIPA-Permissions` enum('LIPA-prohibited','LIPA-only','LIPA-conditional') NOT NULL DEFAULT 'LIPA-only', + PRIMARY KEY (`id`,`pgw_id`,`users_imsi`), + KEY `fk_pdn_pgw1_idx` (`pgw_id`), + KEY `fk_pdn_users1_idx` (`users_imsi`) +) ENGINE=MyISAM AUTO_INCREMENT=60 DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `pdn` +-- + +LOCK TABLES `pdn` WRITE; +/*!40000 ALTER TABLE `pdn` DISABLE KEYS */; +INSERT INTO `pdn` VALUES (1,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000001',9,15,'DISABLED','ENABLED','LIPA-only'),(41,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'20834123456789',9,15,'DISABLED','ENABLED','LIPA-only'),(40,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'20810000001234',9,15,'DISABLED','ENABLED','LIPA-only'),(42,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'31002890832150',9,15,'DISABLED','ENABLED','LIPA-only'),(16,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000002',9,15,'DISABLED','ENABLED','LIPA-only'),(43,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'001010123456789',9,15,'DISABLED','ENABLED','LIPA-only'),(2,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000002',9,15,'DISABLED','ENABLED','LIPA-only'),(3,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000003',9,15,'DISABLED','ENABLED','LIPA-only'),(4,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000004',9,15,'DISABLED','ENABLED','LIPA-only'),(5,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000005',9,15,'DISABLED','ENABLED','LIPA-only'),(6,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000006',9,15,'DISABLED','ENABLED','LIPA-only'),(7,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930000000007',9,15,'DISABLED','ENABLED','LIPA-only'),(8,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000001',9,15,'DISABLED','ENABLED','LIPA-only'),(9,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000002',9,15,'DISABLED','ENABLED','LIPA-only'),(10,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000003',9,15,'DISABLED','ENABLED','LIPA-only'),(11,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000004',9,15,'DISABLED','ENABLED','LIPA-only'),(12,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000005',9,15,'DISABLED','ENABLED','LIPA-only'),(13,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000006',9,15,'DISABLED','ENABLED','LIPA-only'),(14,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208940000000007',9,15,'DISABLED','ENABLED','LIPA-only'),(15,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000001',9,15,'DISABLED','ENABLED','LIPA-only'),(17,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000003',9,15,'DISABLED','ENABLED','LIPA-only'),(18,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000004',9,15,'DISABLED','ENABLED','LIPA-only'),(19,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000005',9,15,'DISABLED','ENABLED','LIPA-only'),(20,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000006',9,15,'DISABLED','ENABLED','LIPA-only'),(21,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000007',9,15,'DISABLED','ENABLED','LIPA-only'),(22,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001100',9,15,'DISABLED','ENABLED','LIPA-only'),(23,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001101',9,15,'DISABLED','ENABLED','LIPA-only'),(24,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001102',9,15,'DISABLED','ENABLED','LIPA-only'),(25,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001103',9,15,'DISABLED','ENABLED','LIPA-only'),(26,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001104',9,15,'DISABLED','ENABLED','LIPA-only'),(27,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001105',9,15,'DISABLED','ENABLED','LIPA-only'),(28,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001106',9,15,'DISABLED','ENABLED','LIPA-only'),(29,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001107',9,15,'DISABLED','ENABLED','LIPA-only'),(30,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001108',9,15,'DISABLED','ENABLED','LIPA-only'),(31,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001109',9,15,'DISABLED','ENABLED','LIPA-only'),(32,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001110',9,15,'DISABLED','ENABLED','LIPA-only'),(33,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001111',9,15,'DISABLED','ENABLED','LIPA-only'),(34,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001112',9,15,'DISABLED','ENABLED','LIPA-only'),(35,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001113',9,15,'DISABLED','ENABLED','LIPA-only'),(44,'operator','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001113',9,15,'DISABLED','ENABLED','LIPA-only'),(45,'operator','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001112',9,15,'DISABLED','ENABLED','LIPA-only'),(46,'operator','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208930100001111',9,15,'DISABLED','ENABLED','LIPA-only'),(47,'operator','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000002',9,15,'DISABLED','ENABLED','LIPA-only'),(48,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000008',9,15,'DISABLED','ENABLED','LIPA-only'),(49,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000009',9,15,'DISABLED','ENABLED','LIPA-only'),(50,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000010',9,15,'DISABLED','ENABLED','LIPA-only'),(51,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000011',9,15,'DISABLED','ENABLED','LIPA-only'),(52,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000012',9,15,'DISABLED','ENABLED','LIPA-only'),(53,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000013',9,15,'DISABLED','ENABLED','LIPA-only'),(54,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000014',9,15,'DISABLED','ENABLED','LIPA-only'),(55,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208950000000015',9,15,'DISABLED','ENABLED','LIPA-only'),(56,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001118',9,15,'DISABLED','ENABLED','LIPA-only'),(57,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001121',9,15,'DISABLED','ENABLED','LIPA-only'),(58,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001120',9,15,'DISABLED','ENABLED','LIPA-only'),(59,'oai.ipv4','IPv4','0.0.0.0','0:0:0:0:0:0:0:0',50000000,100000000,3,'208920100001119',9,15,'DISABLED','ENABLED','LIPA-only'); +/*!40000 ALTER TABLE `pdn` ENABLE KEYS */; +UNLOCK TABLES; + +-- +-- Table structure for table `pgw` +-- + +DROP TABLE IF EXISTS `pgw`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `pgw` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `ipv4` varchar(15) NOT NULL, + `ipv6` varchar(39) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `ipv4` (`ipv4`), + UNIQUE KEY `ipv6` (`ipv6`) +) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `pgw` +-- + +LOCK TABLES `pgw` WRITE; +/*!40000 ALTER TABLE `pgw` DISABLE KEYS */; +INSERT INTO `pgw` VALUES (1,'127.0.0.1','0:0:0:0:0:0:0:1'),(2,'192.168.56.101',''),(3,'10.0.0.2','0'); +/*!40000 ALTER TABLE `pgw` ENABLE KEYS */; +UNLOCK TABLES; + +-- +-- Table structure for table `terminal-info` +-- + +DROP TABLE IF EXISTS `terminal-info`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `terminal-info` ( + `imei` varchar(15) NOT NULL, + `sv` varchar(2) NOT NULL, + UNIQUE KEY `imei` (`imei`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `terminal-info` +-- + +LOCK TABLES `terminal-info` WRITE; +/*!40000 ALTER TABLE `terminal-info` DISABLE KEYS */; +/*!40000 ALTER TABLE `terminal-info` ENABLE KEYS */; +UNLOCK TABLES; + +-- +-- Table structure for table `users` +-- + +DROP TABLE IF EXISTS `users`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `users` ( + `imsi` varchar(15) NOT NULL COMMENT 'IMSI is the main reference key.', + `msisdn` varchar(46) DEFAULT NULL COMMENT 'The basic MSISDN of the UE (Presence of MSISDN is optional).', + `imei` varchar(15) DEFAULT NULL COMMENT 'International Mobile Equipment Identity', + `imei_sv` varchar(2) DEFAULT NULL COMMENT 'International Mobile Equipment Identity Software Version Number', + `ms_ps_status` enum('PURGED','NOT_PURGED') DEFAULT 'PURGED' COMMENT 'Indicates that ESM and EMM status are purged from MME', + `rau_tau_timer` int(10) unsigned DEFAULT '120', + `ue_ambr_ul` bigint(20) unsigned DEFAULT '50000000' COMMENT 'The Maximum Aggregated uplink MBRs to be shared across all Non-GBR bearers according to the subscription of the user.', + `ue_ambr_dl` bigint(20) unsigned DEFAULT '100000000' COMMENT 'The Maximum Aggregated downlink MBRs to be shared across all Non-GBR bearers according to the subscription of the user.', + `access_restriction` int(10) unsigned DEFAULT '60' COMMENT 'Indicates the access restriction subscription information. 3GPP TS.29272 #7.3.31', + `mme_cap` int(10) unsigned zerofill DEFAULT NULL COMMENT 'Indicates the capabilities of the MME with respect to core functionality e.g. regional access restrictions.', + `mmeidentity_idmmeidentity` int(11) NOT NULL DEFAULT '0', + `key` varbinary(16) NOT NULL DEFAULT '0' COMMENT 'UE security key', + `RFSP-Index` smallint(5) unsigned NOT NULL DEFAULT '1' COMMENT 'An index to specific RRM configuration in the E-UTRAN. Possible values from 1 to 256', + `urrp_mme` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'UE Reachability Request Parameter indicating that UE activity notification from MME has been requested by the HSS.', + `sqn` bigint(20) unsigned zerofill NOT NULL, + `rand` varbinary(16) NOT NULL, + `OPc` varbinary(16) DEFAULT NULL COMMENT 'Can be computed by HSS', + PRIMARY KEY (`imsi`,`mmeidentity_idmmeidentity`), + KEY `fk_users_mmeidentity_idx1` (`mmeidentity_idmmeidentity`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `users` +-- + +LOCK TABLES `users` WRITE; +/*!40000 ALTER TABLE `users` DISABLE KEYS */; +INSERT INTO `users` VALUES ('208950000000030','380561234567','55000000000001',NULL,'PURGED',50,40000000,100000000,47,0000000000,1,0x0C0A34601D4F07677303652C0462535B,0,0,0x40,'ebd07771ace8677a',0x63bfa50ee6523365ff14c1f45f88737d); +INSERT INTO `users` VALUES ('208950000000031','380561234567','55000000000001',NULL,'PURGED',50,40000000,100000000,47,0000000000,1,0x0C0A34601D4F07677303652C0462535B,0,0,0x40,'ebd07771ace8677a',0x63bfa50ee6523365ff14c1f45f88737d); +INSERT INTO `users` VALUES ('208950000000032','380561234567','55000000000001',NULL,'PURGED',50,40000000,100000000,47,0000000000,1,0x0C0A34601D4F07677303652C0462535B,0,0,0x40,'ebd07771ace8677a',0x63bfa50ee6523365ff14c1f45f88737d); +INSERT INTO `users` VALUES ('208950000000033','380561234567','55000000000001',NULL,'PURGED',50,40000000,100000000,47,0000000000,1,0x0C0A34601D4F07677303652C0462535B,0,0,0x40,'ebd07771ace8677a',0x63bfa50ee6523365ff14c1f45f88737d); +INSERT INTO `users` VALUES ('208950000000034','380561234567','55000000000001',NULL,'PURGED',50,40000000,100000000,47,0000000000,1,0x0C0A34601D4F07677303652C0462535B,0,0,0x40,'ebd07771ace8677a',0x63bfa50ee6523365ff14c1f45f88737d); +-- +-- Add a new user entry here using use above insert statement as template +-- +/*!40000 ALTER TABLE `users` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2016-06-28 11:41:40 \ No newline at end of file diff --git a/oai/oai-db-operator/tests/__init__.py b/oai/oai-db-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-db-operator/tests/test_charm.py b/oai/oai-db-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-db-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-gnb-operator/.flake8 b/oai/oai-gnb-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-gnb-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-gnb-operator/.gitignore b/oai/oai-gnb-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-gnb-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-gnb-operator/.jujuignore b/oai/oai-gnb-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-gnb-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-gnb-operator/CONTRIBUTING.md b/oai/oai-gnb-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-gnb-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-gnb-operator/LICENSE b/oai/oai-gnb-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-gnb-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-gnb-operator/README.md b/oai/oai-gnb-operator/README.md new file mode 100644 index 00000000..a710fea1 --- /dev/null +++ b/oai/oai-gnb-operator/README.md @@ -0,0 +1,24 @@ +# oai-gnb + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/oai/oai-gnb-operator/charmcraft.yaml b/oai/oai-gnb-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-gnb-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-gnb-operator/config.yaml b/oai/oai-gnb-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-gnb-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-gnb-operator/metadata.yaml b/oai/oai-gnb-operator/metadata.yaml new file mode 100644 index 00000000..6b078345 --- /dev/null +++ b/oai/oai-gnb-operator/metadata.yaml @@ -0,0 +1,33 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-gnb +display-name: OAI gnb +description: OAI gnb +summary: OAI gnb + +containers: + gnb: + resource: oai-gnb-image + tcpdump: + resource: tcpdump-image + +resources: + oai-gnb-image: + type: oci-image + description: OCI image for oai-gnb (rdefosseoai/oai-gnb:develop) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +requires: + amf: + interface: amf + limit: 1 + spgwu: + interface: spgwu + limit: 1 +provides: + gnb: + interface: gnb diff --git a/oai/oai-gnb-operator/requirements-dev.txt b/oai/oai-gnb-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-gnb-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-gnb-operator/requirements.txt b/oai/oai-gnb-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-gnb-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-gnb-operator/run_tests b/oai/oai-gnb-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-gnb-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-gnb-operator/src/charm.py b/oai/oai-gnb-operator/src/charm.py new file mode 100755 index 00000000..68a78571 --- /dev/null +++ b/oai/oai-gnb-operator/src/charm.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +S1C_PORT = 36412 +S1U_PORT = 2152 +X2C_PORT = 36422 + + +class OaiGnbCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("s1c", S1C_PORT, S1C_PORT, "UDP"), + ("s1u", S1U_PORT, S1U_PORT, "UDP"), + ("x2c", X2C_PORT, X2C_PORT, "UDP"), + ], + privileged=True, + container_name="gnb", + service_name="oai_gnb", + ) + # Observe charm events + event_observer_mapping = { + self.on.gnb_pebble_ready: self._on_oai_gnb_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.gnb_relation_joined: self._on_gnb_relation_joined, + self.on.gnb_relation_changed: self._on_gnb_relation_changed, + self.on.amf_relation_changed: self._update_service, + self.on.amf_relation_broken: self._update_service, + self.on.spgwu_relation_changed: self._update_service, + self.on.spgwu_relation_broken: self._update_service, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + # Set defaults in Stored State for the relation data + self._stored.set_default( + amf_host=None, + amf_port=None, + amf_api_version=None, + gnb_registered=False, + ue_registered=False, + spgwu_ready=False, + ) + + @property + def imsi(self): + return "208950000000031" + + @property + def gnb_name(self): + return f'gnb-rfsim-{self.unit.name.replace("/", "-")}' + + #################################### + # Charm Events handlers + #################################### + + def _on_oai_gnb_pebble_ready(self, event): + pod_ip = self.pod_ip + if not pod_ip: + event.defer() + return + try: + container = event.workload + self._patch_gnb_id(container) + self._add_oai_gnb_layer(container, pod_ip) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_stop(self, event): + pass + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _on_gnb_relation_joined(self, event): + try: + if self.unit.is_leader() and self._stored.gnb_registered: + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_gnb_relation_changed(self, event): + if self.unit.is_leader() and all( + key in event.relation.data[event.app] for key in ("ue-imsi", "ue-status") + ): + ue_imsi = event.relation.data[event.app]["ue-imsi"] + ue_status = event.relation.data[event.app]["ue-status"] + relation = self.framework.model.get_relation("amf") + relation.data[self.app]["ue-imsi"] = ue_imsi + relation.data[self.app]["ue-status"] = ue_status + + def _update_service(self, event): + try: + logger.info("Updating service...") + if not self.service_exists(): + logger.warning("service does not exist") + return + # Load data from dependent relations + self._load_amf_data() + self._load_spgwu_data() + relations_ready = self.is_amf_ready and self.is_spgwu_ready + if not relations_ready: + self.unit.status = BlockedStatus("need amf and spgwu relations") + if self.is_service_running(): + self.stop_service() + else: + if not self.is_service_running(): + self._configure_service() + self.start_service() + self._wait_until_service_is_active() + relation = self.framework.model.get_relation("amf") + if self._stored.gnb_registered: + if relation and self.unit in relation.data: + relation.data[self.unit]["gnb-status"] = "registered" + else: + if relation and self.unit in relation.data: + relation.data[self.unit]["gnb-name"] = self.gnb_name + relation.data[self.unit]["gnb-status"] = "started" + if self._stored.gnb_registered: + if self.unit.is_leader(): + self._provide_service_info() + self.unit.status = ActiveStatus("registered") + if self.unit.is_leader(): + if self._stored.ue_registered: + relation = self.framework.model.get_relation("gnb") + relation.data[self.app][self.imsi] = "registered" + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + if pod_ip := self.pod_ip: + for relation in self.framework.model.relations["gnb"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = str(pod_ip) + logger.info( + f"Info provided in relation {relation.name} (id {relation.id})" + ) + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active...") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs({"ALL RUs ready - ALL gNBs ready"}, wait=True) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + @property + def is_amf_ready(self): + is_ready = ( + self._stored.amf_host + and self._stored.amf_port + and self._stored.amf_api_version + ) + logger.info(f'amf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_amf_data(self): + logger.debug("Loading amf data from relation") + relation = self.framework.model.get_relation("amf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.amf_host = relation_data.get("ip-address") + self._stored.amf_port = relation_data.get("port") + self._stored.amf_api_version = relation_data.get("api-version") + self._stored.gnb_registered = ( + relation_data.get(self.gnb_name) == "registered" + ) + self._stored.ue_registered = relation_data.get(self.imsi) == "registered" + logger.info("amf data loaded") + else: + self._stored.amf_host = None + self._stored.amf_port = None + self._stored.amf_api_version = None + self._stored.gnb_registered = False + logger.warning("no relation found") + + @property + def is_spgwu_ready(self): + is_ready = self._stored.spgwu_ready + logger.info(f'spgwu is{" " if is_ready else " not "}ready') + return is_ready + + def _load_spgwu_data(self): + logger.debug("Loading spgwu data from relation") + relation = self.framework.model.get_relation("spgwu") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.spgwu_ready = relation_data.get("ready") == "True" + logger.info("spgwu data loaded") + else: + self._stored.spgwu_ready = False + logger.warning("no relation found") + + def _configure_service(self): + if not self.service_exists(): + logger.debug("Cannot configure service: service does not exist yet") + return + logger.debug("Configuring gnb service") + container = self.unit.get_container("gnb") + container.add_layer( + "oai_gnb", + { + "services": { + "oai_gnb": { + "override": "merge", + "environment": { + "AMF_IP_ADDRESS": self._stored.amf_host, + }, + } + }, + }, + combine=True, + ) + logger.info("gnb service configured") + + def _patch_gnb_id(self, container): + logger.debug("Patching GNB id...") + gnb_id = self.unit.name[::-1].split("/")[0][::-1] + gnb_sa_tdd_conf = ( + container.pull("/opt/oai-gnb/etc/gnb.sa.tdd.conf") + .read() + .replace("0xe00", f"0xe0{gnb_id}") + ) + container.push("/opt/oai-gnb/etc/gnb.sa.tdd.conf", gnb_sa_tdd_conf) + logger.info(f"GNB patched with id {gnb_id}") + + def _add_oai_gnb_layer(self, container, pod_ip): + entrypoint = "/opt/oai-gnb/bin/entrypoint.sh" + command = " ".join( + ["/opt/oai-gnb/bin/nr-softmodem.Rel15", "-O", "/opt/oai-gnb/etc/gnb.conf"] + ) + pebble_layer = { + "summary": "oai_gnb layer", + "description": "pebble config layer for oai_gnb", + "services": { + "oai_gnb": { + "override": "replace", + "summary": "oai_gnb", + "command": f"{entrypoint} {command}", + "environment": { + "TZ": "Europe/Paris", + "RFSIMULATOR": "server", + "USE_SA_TDD_MONO": "yes", + "GNB_NAME": self.gnb_name, + "MCC": "208", + "MNC": "95", + "MNC_LENGTH": "2", + "TAC": "1", + "NSSAI_SST": "1", + "NSSAI_SD0": "1", + "NSSAI_SD1": "112233", + "GNB_NGA_IF_NAME": "eth0", + "GNB_NGA_IP_ADDRESS": str(pod_ip), + "GNB_NGU_IF_NAME": "eth0", + "GNB_NGU_IP_ADDRESS": str(pod_ip), + "USE_ADDITIONAL_OPTIONS": "--sa -E --rfsim", + }, + } + }, + } + container.add_layer("oai_gnb", pebble_layer, combine=True) + logger.info("oai_gnb layer added") + + +if __name__ == "__main__": + main(OaiGnbCharm) diff --git a/oai/oai-gnb-operator/src/utils.py b/oai/oai-gnb-operator/src/utils.py new file mode 100644 index 00000000..2fd511ce --- /dev/null +++ b/oai/oai-gnb-operator/src/utils.py @@ -0,0 +1,325 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs( + self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False + ) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + if logs and subsets_in_line: + raise Exception("logs and subsets_in_line cannot both be defined") + elif not logs and not subsets_in_line: + raise Exception("logs or subsets_in_line must be defined") + + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + if logs: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info("all logs found") + break + else: + if all(subset in line for subset in subsets_in_line): + all_logs_found = True + logger.info("subset of strings found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-gnb-operator/tests/__init__.py b/oai/oai-gnb-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-gnb-operator/tests/test_charm.py b/oai/oai-gnb-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-gnb-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-nr-ue-operator/.flake8 b/oai/oai-nr-ue-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-nr-ue-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-nr-ue-operator/.gitignore b/oai/oai-nr-ue-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-nr-ue-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-nr-ue-operator/.jujuignore b/oai/oai-nr-ue-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-nr-ue-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-nr-ue-operator/CONTRIBUTING.md b/oai/oai-nr-ue-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-nr-ue-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-nr-ue-operator/LICENSE b/oai/oai-nr-ue-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-nr-ue-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-nr-ue-operator/README.md b/oai/oai-nr-ue-operator/README.md new file mode 100644 index 00000000..a394aad8 --- /dev/null +++ b/oai/oai-nr-ue-operator/README.md @@ -0,0 +1,24 @@ +# oai-nr-ue + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/oai/oai-nr-ue-operator/actions.yaml b/oai/oai-nr-ue-operator/actions.yaml new file mode 100644 index 00000000..0844c615 --- /dev/null +++ b/oai/oai-nr-ue-operator/actions.yaml @@ -0,0 +1,4 @@ +stop: + description: Stop the NR-UE service +start: + description: Start the NR-UE service \ No newline at end of file diff --git a/oai/oai-nr-ue-operator/charmcraft.yaml b/oai/oai-nr-ue-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-nr-ue-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-nr-ue-operator/config.yaml b/oai/oai-nr-ue-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-nr-ue-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-nr-ue-operator/metadata.yaml b/oai/oai-nr-ue-operator/metadata.yaml new file mode 100644 index 00000000..e56a2d04 --- /dev/null +++ b/oai/oai-nr-ue-operator/metadata.yaml @@ -0,0 +1,27 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-nr-ue +display-name: OAI nr-ue +description: OAI nr-ue +summary: OAI nr-ue + +containers: + nr-ue: + resource: oai-nr-ue-image + tcpdump: + resource: tcpdump-image + +resources: + oai-nr-ue-image: + type: oci-image + description: OCI image for oai-nr-ue (rdefosseoai/oai-nr-ue:develop) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +requires: + gnb: + interface: gnb + limit: 1 diff --git a/oai/oai-nr-ue-operator/requirements-dev.txt b/oai/oai-nr-ue-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-nr-ue-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-nr-ue-operator/requirements.txt b/oai/oai-nr-ue-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-nr-ue-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-nr-ue-operator/run_tests b/oai/oai-nr-ue-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-nr-ue-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-nr-ue-operator/src/charm.py b/oai/oai-nr-ue-operator/src/charm.py new file mode 100755 index 00000000..f1ead30c --- /dev/null +++ b/oai/oai-nr-ue-operator/src/charm.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +S1C_PORT = 36412 +S1U_PORT = 2152 +X2C_PORT = 36422 + + +class OaiNrUeCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("s1c", S1C_PORT, S1C_PORT, "UDP"), + ("s1u", S1U_PORT, S1U_PORT, "UDP"), + ("x2c", X2C_PORT, X2C_PORT, "UDP"), + ], + privileged=True, + container_name="nr-ue", + service_name="oai_nr_ue", + ) + # Observe charm events + event_observer_mapping = { + self.on.nr_ue_pebble_ready: self._on_oai_nr_ue_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.gnb_relation_changed: self._update_service, + self.on.gnb_relation_broken: self._update_service, + self.on.start_action: self._on_start_action, + self.on.stop_action: self._on_stop_action, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + # Set defaults in Stored State for the relation data + self._stored.set_default( + gnb_host=None, + gnb_port=None, + gnb_api_version=None, + ue_registered=False, + ) + + @property + def imsi(self): + return "208950000000031" + + #################################### + # Charm Events handlers + #################################### + + def _on_oai_nr_ue_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_nr_ue_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_stop(self, event): + pass + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _update_service(self, event): + try: + if not self.unit.is_leader(): + logger.warning("HA is not supported") + self.unit.status = BlockedStatus("HA is not supported") + return + logger.info("Updating service...") + if not self.service_exists(): + logger.warning("service does not exist") + return + # Load data from dependent relations + self._load_gnb_data() + relations_ready = self.is_gnb_ready + if not relations_ready: + self.unit.status = BlockedStatus("need gnb relation") + if self.is_service_running(): + self.stop_service() + return + else: + if not self.is_service_running(): + self._configure_service() + time.sleep(10) + self._backup_conf_files() + self.start_service() + relation = self.framework.model.get_relation("gnb") + if self._stored.ue_registered: + if relation and self.app in relation.data: + relation.data[self.app]["ue-status"] = "registered" + else: + if relation and self.app in relation.data: + relation.data[self.app]["ue-imsi"] = self.imsi + relation.data[self.app]["ue-status"] = "started" + if self._stored.ue_registered: + self.unit.status = ActiveStatus("registered") + else: + self.unit.status = WaitingStatus("waiting for registration") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_start_action(self, event): + try: + if not self.service_exists(): + event.set_results({"output": "service does not exist yet"}) + return + if not self.is_service_running(): + self.start_service() + event.set_results({"output": "service has been started successfully"}) + else: + event.set_results({"output": "service is already running"}) + except Exception as e: + event.fail(f"Action failed. Reason: {e}") + + def _on_stop_action(self, event): + try: + if not self.service_exists(): + event.set_results({"output": "service does not exist yet"}) + return + if self.is_service_running(): + self.stop_service() + self._restore_conf_files() + event.set_results({"output": "service has been stopped successfully"}) + else: + event.set_results({"output": "service is already stopped"}) + except Exception as e: + event.fail(f"Action failed. Reason: {e}") + + #################################### + # Utils - Services and configuration + #################################### + + @property + def is_gnb_ready(self): + is_ready = self._stored.gnb_host + logger.info(f'gnb is{" " if is_ready else " not "}ready') + return is_ready + + def _load_gnb_data(self): + logger.debug("Loading gnb data from relation") + relation = self.framework.model.get_relation("gnb") + if relation and relation.app in relation.data: + self._stored.gnb_host = relation.data[relation.app].get("host") + self._stored.ue_registered = ( + relation.data[relation.app].get(self.imsi) == "registered" + ) + logger.info("gnb data loaded") + else: + self._stored.gnb_host = None + self._stored.ue_registered = False + logger.warning("no relation found") + + def _configure_service(self): + if not self.service_exists(): + logger.debug("Cannot configure service: service does not exist yet") + return + logger.debug("Configuring nr-ue service") + container = self.unit.get_container("nr-ue") + container.add_layer( + "oai_nr_ue", + { + "services": { + "oai_nr_ue": { + "override": "merge", + "environment": { + "RFSIMULATOR": self._stored.gnb_host, + }, + } + }, + }, + combine=True, + ) + logger.info("nr-ue service configured") + + def _add_oai_nr_ue_layer(self, container): + entrypoint = "/opt/oai-nr-ue/bin/entrypoint.sh" + command = " ".join( + [ + "/opt/oai-nr-ue/bin/nr-uesoftmodem.Rel15", + "-O", + "/opt/oai-nr-ue/etc/nr-ue-sim.conf", + ] + ) + pebble_layer = { + "summary": "oai_nr_ue layer", + "description": "pebble config layer for oai_nr_ue", + "services": { + "oai_nr_ue": { + "override": "replace", + "summary": "oai_nr_ue", + "command": f"{entrypoint} {command}", + "environment": { + "TZ": "Europe/Paris", + "FULL_IMSI": self.imsi, + "FULL_KEY": "0C0A34601D4F07677303652C0462535B", + "OPC": "63bfa50ee6523365ff14c1f45f88737d", + "DNN": "oai", + "NSSAI_SST": "1", + "NSSAI_SD": "1", + "USE_ADDITIONAL_OPTIONS": "-E --sa --rfsim -r 106 --numerology 1 -C 3619200000 --nokrnmod", + }, + } + }, + } + container.add_layer("oai_nr_ue", pebble_layer, combine=True) + logger.info("oai_nr_ue layer added") + + def _backup_conf_files(self): + container = self.unit.get_container("nr-ue") + root_folder = "/opt/oai-nr-ue" + files = {f"{root_folder}/etc/nr-ue-sim.conf"} + for file in files: + file_content = container.pull(file).read() + container.push(f"{file}_bkp", file_content) + + def _restore_conf_files(self): + container = self.unit.get_container("nr-ue") + root_folder = "/opt/oai-nr-ue" + files = {f"{root_folder}/etc/nr-ue-sim.conf"} + for file in files: + file_content = container.pull(f"{file}_bkp").read() + container.push(file, file_content) + +if __name__ == "__main__": + main(OaiNrUeCharm) diff --git a/oai/oai-nr-ue-operator/src/utils.py b/oai/oai-nr-ue-operator/src/utils.py new file mode 100644 index 00000000..2fd511ce --- /dev/null +++ b/oai/oai-nr-ue-operator/src/utils.py @@ -0,0 +1,325 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs( + self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False + ) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + if logs and subsets_in_line: + raise Exception("logs and subsets_in_line cannot both be defined") + elif not logs and not subsets_in_line: + raise Exception("logs or subsets_in_line must be defined") + + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + if logs: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info("all logs found") + break + else: + if all(subset in line for subset in subsets_in_line): + all_logs_found = True + logger.info("subset of strings found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-nr-ue-operator/tests/__init__.py b/oai/oai-nr-ue-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-nr-ue-operator/tests/test_charm.py b/oai/oai-nr-ue-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-nr-ue-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-nrf-operator/.flake8 b/oai/oai-nrf-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-nrf-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-nrf-operator/.gitignore b/oai/oai-nrf-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-nrf-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-nrf-operator/.jujuignore b/oai/oai-nrf-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-nrf-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-nrf-operator/CONTRIBUTING.md b/oai/oai-nrf-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-nrf-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-nrf-operator/LICENSE b/oai/oai-nrf-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-nrf-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-nrf-operator/README.md b/oai/oai-nrf-operator/README.md new file mode 100644 index 00000000..b4bbf863 --- /dev/null +++ b/oai/oai-nrf-operator/README.md @@ -0,0 +1 @@ +# oai-nrf diff --git a/oai/oai-nrf-operator/charmcraft.yaml b/oai/oai-nrf-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-nrf-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-nrf-operator/config.yaml b/oai/oai-nrf-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-nrf-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-nrf-operator/metadata.yaml b/oai/oai-nrf-operator/metadata.yaml new file mode 100644 index 00000000..a93445cd --- /dev/null +++ b/oai/oai-nrf-operator/metadata.yaml @@ -0,0 +1,26 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-nrf +display-name: OAI nrf +description: OAI nrf +summary: OAI nrf + +containers: + nrf: + resource: oai-nrf-image + tcpdump: + resource: tcpdump-image + +resources: + oai-nrf-image: + type: oci-image + description: OCI image for oai-nrf (rdefosseoai/oai-nrf:v1.1.0) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +provides: + nrf: + interface: nrf diff --git a/oai/oai-nrf-operator/requirements-dev.txt b/oai/oai-nrf-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-nrf-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-nrf-operator/requirements.txt b/oai/oai-nrf-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-nrf-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-nrf-operator/run_tests b/oai/oai-nrf-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-nrf-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-nrf-operator/src/charm.py b/oai/oai-nrf-operator/src/charm.py new file mode 100755 index 00000000..ce1bf4b6 --- /dev/null +++ b/oai/oai-nrf-operator/src/charm.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +HTTP1_PORT = 80 +HTTP2_PORT = 9090 + + +class OaiNrfCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("http1", HTTP1_PORT, HTTP1_PORT, "TCP"), + ("http2", HTTP2_PORT, HTTP2_PORT, "TCP"), + ], + privileged=True, + container_name="nrf", + service_name="oai_nrf", + ) + # Observe charm events + event_observer_mapping = { + self.on.nrf_pebble_ready: self._on_oai_nrf_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.nrf_relation_joined: self._on_nrf_relation_joined, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + + #################################### + # Charm Events handlers + #################################### + + def _on_oai_nrf_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_nrf_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_stop(self, event): + pass + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _on_nrf_relation_joined(self, event): + try: + if self.is_service_running() and self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _update_service(self, event): + try: + if self.service_exists() and not self.is_service_running(): + self.start_service() + self._wait_until_service_is_active() + if self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + for relation in self.framework.model.relations["nrf"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["host"] = self.app.name + relation.data[self.app]["port"] = str(HTTP1_PORT) + relation.data[self.app]["api-version"] = "v1" + logger.info(f"Info provided in relation {relation.name} (id {relation.id})") + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs({"[info ] HTTP1 server started"}, wait=True) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + def _add_oai_nrf_layer(self, container): + entrypoint = "/bin/bash /openair-nrf/bin/entrypoint.sh" + command = " ".join( + ["/openair-nrf/bin/oai_nrf", "-c", "/openair-nrf/etc/nrf.conf", "-o"] + ) + pebble_layer = { + "summary": "oai_nrf layer", + "description": "pebble config layer for oai_nrf", + "services": { + "oai_nrf": { + "override": "replace", + "summary": "oai_nrf", + "command": f"{entrypoint} {command}", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + "INSTANCE": "0", + "PID_DIRECTORY": "/var/run", + "NRF_INTERFACE_NAME_FOR_SBI": "eth0", + "NRF_INTERFACE_PORT_FOR_SBI": "80", + "NRF_INTERFACE_HTTP2_PORT_FOR_SBI": "9090", + "NRF_API_VERSION": "v1", + }, + } + }, + } + container.add_layer("oai_nrf", pebble_layer, combine=True) + logger.info("oai_nrf layer added") + + +if __name__ == "__main__": + main(OaiNrfCharm, use_juju_for_storage=True) diff --git a/oai/oai-nrf-operator/src/utils.py b/oai/oai-nrf-operator/src/utils.py new file mode 100644 index 00000000..2fd511ce --- /dev/null +++ b/oai/oai-nrf-operator/src/utils.py @@ -0,0 +1,325 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs( + self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False + ) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + if logs and subsets_in_line: + raise Exception("logs and subsets_in_line cannot both be defined") + elif not logs and not subsets_in_line: + raise Exception("logs or subsets_in_line must be defined") + + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + if logs: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info("all logs found") + break + else: + if all(subset in line for subset in subsets_in_line): + all_logs_found = True + logger.info("subset of strings found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-nrf-operator/tests/__init__.py b/oai/oai-nrf-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-nrf-operator/tests/test_charm.py b/oai/oai-nrf-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-nrf-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-smf-operator/.flake8 b/oai/oai-smf-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-smf-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-smf-operator/.gitignore b/oai/oai-smf-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-smf-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-smf-operator/.jujuignore b/oai/oai-smf-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-smf-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-smf-operator/CONTRIBUTING.md b/oai/oai-smf-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-smf-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-smf-operator/LICENSE b/oai/oai-smf-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-smf-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-smf-operator/README.md b/oai/oai-smf-operator/README.md new file mode 100644 index 00000000..e2231095 --- /dev/null +++ b/oai/oai-smf-operator/README.md @@ -0,0 +1 @@ +# oai-smf diff --git a/oai/oai-smf-operator/charmcraft.yaml b/oai/oai-smf-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-smf-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-smf-operator/config.yaml b/oai/oai-smf-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-smf-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-smf-operator/metadata.yaml b/oai/oai-smf-operator/metadata.yaml new file mode 100644 index 00000000..a7d37f2f --- /dev/null +++ b/oai/oai-smf-operator/metadata.yaml @@ -0,0 +1,33 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-smf +display-name: OAI smf +description: OAI smf +summary: OAI smf + +containers: + smf: + resource: oai-smf-image + tcpdump: + resource: tcpdump-image + +resources: + oai-smf-image: + type: oci-image + description: OCI image for oai-smf (rdefosseoai/oai-smf:v1.1.0) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +requires: + nrf: + interface: nrf + limit: 1 + amf: + interface: amf + limit: 1 +provides: + smf: + interface: smf diff --git a/oai/oai-smf-operator/requirements-dev.txt b/oai/oai-smf-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-smf-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-smf-operator/requirements.txt b/oai/oai-smf-operator/requirements.txt new file mode 100644 index 00000000..3b241650 --- /dev/null +++ b/oai/oai-smf-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes diff --git a/oai/oai-smf-operator/run_tests b/oai/oai-smf-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-smf-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-smf-operator/src/charm.py b/oai/oai-smf-operator/src/charm.py new file mode 100755 index 00000000..e4bba162 --- /dev/null +++ b/oai/oai-smf-operator/src/charm.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +SMF_PORT = 8805 +HTTP1_PORT = 80 +HTTP2_PORT = 9090 + + +class OaiSmfCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("oai-smf", SMF_PORT, SMF_PORT, "UDP"), + ("http1", HTTP1_PORT, HTTP1_PORT, "TCP"), + ("http2", HTTP2_PORT, HTTP2_PORT, "TCP"), + ], + privileged=True, + container_name="smf", + service_name="oai_smf", + ) + # Observe charm events + event_observer_mapping = { + self.on.smf_pebble_ready: self._on_oai_smf_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.smf_relation_joined: self._on_smf_relation_joined, + self.on.amf_relation_changed: self._update_service, + self.on.amf_relation_broken: self._update_service, + self.on.nrf_relation_changed: self._update_service, + self.on.nrf_relation_broken: self._update_service, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + # Set defaults in Stored State for the relation data + self._stored.set_default( + amf_host=None, + amf_port=None, + amf_api_version=None, + nrf_host=None, + nrf_port=None, + nrf_api_version=None, + ) + + #################################### + # Charm events handlers + #################################### + + def _on_oai_smf_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_smf_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_stop(self, event): + pass + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _on_smf_relation_joined(self, event): + try: + if self.unit.is_leader() and self.is_service_running(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _update_service(self, event): + try: + logger.info("Updating service...") + if not self.service_exists(): + logger.warning("service does not exist") + return + # Load data from dependent relations + self._load_amf_data() + self._load_nrf_data() + relations_ready = self.is_nrf_ready and self.is_amf_ready + if not relations_ready: + self.unit.status = BlockedStatus("need nrf and amf relations") + if self.is_service_running(): + self.stop_service() + elif not self.is_service_running(): + self._configure_service() + self.start_service() + self._wait_until_service_is_active() + if self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + for relation in self.framework.model.relations["smf"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["ready"] = str(True) + logger.info(f"Info provided in relation {relation.name} (id {relation.id})") + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active...") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs( + { + "[smf_sbi] [start] Started", + "[smf_app] [start] Started", + "[sbi_srv] [info ] HTTP1 server started", + "[sbi_srv] [info ] HTTP2 server started", + }, + wait=True, + ) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + @property + def is_amf_ready(self): + is_ready = ( + self._stored.amf_host + and self._stored.amf_port + and self._stored.amf_api_version + ) + logger.info(f'amf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_amf_data(self): + logger.debug("Loading nrf data from relation") + relation = self.framework.model.get_relation("amf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.amf_host = relation_data.get("host") + self._stored.amf_port = relation_data.get("port") + self._stored.amf_api_version = relation_data.get("api-version") + logger.info("amf data loaded") + else: + self._stored.amf_host = None + self._stored.amf_port = None + self._stored.amf_api_version = None + logger.warning("no relation found") + + @property + def is_nrf_ready(self): + is_ready = ( + self._stored.nrf_host + and self._stored.nrf_port + and self._stored.nrf_api_version + ) + logger.info(f'nrf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_nrf_data(self): + logger.debug("Loading nrf data from relation") + relation = self.framework.model.get_relation("nrf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.nrf_host = relation_data.get("host") + self._stored.nrf_port = relation_data.get("port") + self._stored.nrf_api_version = relation_data.get("api-version") + logger.info("nrf data loaded") + else: + self._stored.nrf_host = None + self._stored.nrf_port = None + self._stored.nrf_api_version = None + logger.warning("no relation found") + + def _configure_service(self): + if not self.service_exists(): + logger.debug("Cannot configure service: service does not exist yet") + return + logger.debug("Configuring smf service") + container = self.unit.get_container("smf") + container.add_layer( + "oai_smf", + { + "services": { + "oai_smf": { + "override": "merge", + "environment": { + "NRF_FQDN": self._stored.nrf_host, + "NRF_IPV4_ADDRESS": "127.0.0.1", + "NRF_PORT": self._stored.nrf_port, + "NRF_API_VERSION": self._stored.nrf_api_version, + "AMF_IPV4_ADDRESS": "127.0.0.1", + "AMF_PORT": self._stored.amf_port, + "AMF_API_VERSION": self._stored.amf_api_version, + "AMF_FQDN": self._stored.amf_host, + }, + } + }, + }, + combine=True, + ) + logger.info("smf service configured") + + def _add_oai_smf_layer(self, container): + entrypoint = "/bin/bash /openair-smf/bin/entrypoint.sh" + command = " ".join( + ["/openair-smf/bin/oai_smf", "-c", "/openair-smf/etc/smf.conf", "-o"] + ) + pebble_layer = { + "summary": "oai_smf layer", + "description": "pebble config layer for oai_smf", + "services": { + "oai_smf": { + "override": "replace", + "summary": "oai_smf", + "command": f"{entrypoint} {command}", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + "INSTANCE": "0", + "PID_DIRECTORY": "/var/run", + "SMF_INTERFACE_NAME_FOR_N4": "eth0", + "SMF_INTERFACE_NAME_FOR_SBI": "eth0", + "SMF_INTERFACE_PORT_FOR_SBI": "80", + "SMF_INTERFACE_HTTP2_PORT_FOR_SBI": "9090", + "SMF_API_VERSION": "v1", + "DEFAULT_DNS_IPV4_ADDRESS": "8.8.8.8", + "DEFAULT_DNS_SEC_IPV4_ADDRESS": "8.8.4.4", + "REGISTER_NRF": "yes", + "DISCOVER_UPF": "yes", + "USE_FQDN_DNS": "yes", + "UDM_IPV4_ADDRESS": "127.0.0.1", + "UDM_PORT": "80", + "UDM_API_VERSION": "v1", + "UDM_FQDN": "localhost", + "UPF_IPV4_ADDRESS": "127.0.0.1", + "UPF_FQDN_0": "localhost", + }, + } + }, + } + container.add_layer("oai_smf", pebble_layer, combine=True) + logger.info("oai_smf layer added") + + +if __name__ == "__main__": + main(OaiSmfCharm) diff --git a/oai/oai-smf-operator/src/utils.py b/oai/oai-smf-operator/src/utils.py new file mode 100644 index 00000000..93fb5bc3 --- /dev/null +++ b/oai/oai-smf-operator/src/utils.py @@ -0,0 +1,312 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs(self, logs: Set, wait: bool = False) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info(f"all logs found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-smf-operator/tests/__init__.py b/oai/oai-smf-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-smf-operator/tests/test_charm.py b/oai/oai-smf-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-smf-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/oai/oai-spgwu-tiny-operator/.flake8 b/oai/oai-spgwu-tiny-operator/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/oai/oai-spgwu-tiny-operator/.gitignore b/oai/oai-spgwu-tiny-operator/.gitignore new file mode 100644 index 00000000..2c3f0e5e --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/.gitignore @@ -0,0 +1,7 @@ +venv/ +build/ +*.charm + +.coverage +__pycache__/ +*.py[cod] diff --git a/oai/oai-spgwu-tiny-operator/.jujuignore b/oai/oai-spgwu-tiny-operator/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/oai/oai-spgwu-tiny-operator/CONTRIBUTING.md b/oai/oai-spgwu-tiny-operator/CONTRIBUTING.md new file mode 100644 index 00000000..3bba37ce --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# oai-amf + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/oai/oai-spgwu-tiny-operator/LICENSE b/oai/oai-spgwu-tiny-operator/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/oai/oai-spgwu-tiny-operator/README.md b/oai/oai-spgwu-tiny-operator/README.md new file mode 100644 index 00000000..6713c5e1 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/README.md @@ -0,0 +1,24 @@ +# oai-amf + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/oai/oai-spgwu-tiny-operator/charmcraft.yaml b/oai/oai-spgwu-tiny-operator/charmcraft.yaml new file mode 100644 index 00000000..048d4544 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/charmcraft.yaml @@ -0,0 +1,10 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" diff --git a/oai/oai-spgwu-tiny-operator/config.yaml b/oai/oai-spgwu-tiny-operator/config.yaml new file mode 100644 index 00000000..72416729 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/config.yaml @@ -0,0 +1,12 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about config at: https://juju.is/docs/sdk/config + +options: + start-tcpdump: + default: False + description: | + start tcpdump collection to analyse but beware + it will take a lot of space in the container/persistent volume. + type: boolean diff --git a/oai/oai-spgwu-tiny-operator/metadata.yaml b/oai/oai-spgwu-tiny-operator/metadata.yaml new file mode 100644 index 00000000..032009c7 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/metadata.yaml @@ -0,0 +1,33 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +# For a complete list of supported options, see: +# https://discourse.charmhub.io/t/charm-metadata-v2/3674/15 +name: oai-spgwu-tiny +display-name: OAI spgwu-tiny +description: OAI spgwu-tiny +summary: OAI spgwu-tiny + +containers: + spgwu-tiny: + resource: oai-spgwu-tiny-image + tcpdump: + resource: tcpdump-image + +resources: + oai-spgwu-tiny-image: + type: oci-image + description: OCI image for oai-spgwu-tiny (rdefosseoai/oai-spgwu-tiny:v1.1.2) + tcpdump-image: + type: oci-image + description: OCI image for tcpdump (corfr/tcpdump:latest) +requires: + nrf: + interface: nrf + limit: 1 + smf: + interface: smf + limit: 1 +provides: + spgwu: + interface: spgwu diff --git a/oai/oai-spgwu-tiny-operator/requirements-dev.txt b/oai/oai-spgwu-tiny-operator/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/oai/oai-spgwu-tiny-operator/requirements.txt b/oai/oai-spgwu-tiny-operator/requirements.txt new file mode 100644 index 00000000..964c4f26 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/requirements.txt @@ -0,0 +1,2 @@ +ops >= 1.2.0 +kubernetes \ No newline at end of file diff --git a/oai/oai-spgwu-tiny-operator/run_tests b/oai/oai-spgwu-tiny-operator/run_tests new file mode 100755 index 00000000..d59be2c6 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/oai/oai-spgwu-tiny-operator/src/charm.py b/oai/oai-spgwu-tiny-operator/src/charm.py new file mode 100755 index 00000000..a2748551 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/src/charm.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import time + +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.pebble import ConnectionError + + +from utils import OaiCharm + +logger = logging.getLogger(__name__) + +SPGWU_PORT = 8805 +S1U_PORT = 2152 +IPERF = 5001 + + +class OaiSpgwuTinyCharm(OaiCharm): + """Charm the service.""" + + def __init__(self, *args): + super().__init__( + *args, + tcpdump=True, + ports=[ + ("oai-spgwu-tiny", SPGWU_PORT, SPGWU_PORT, "UDP"), + ("s1u", S1U_PORT, S1U_PORT, "UDP"), + ("iperf", IPERF, IPERF, "UDP"), + ], + privileged=True, + container_name="spgwu-tiny", + service_name="oai_spgwu_tiny", + ) + # Observe charm events + event_observer_mapping = { + self.on.spgwu_tiny_pebble_ready: self._on_oai_spgwu_tiny_pebble_ready, + # self.on.stop: self._on_stop, + self.on.config_changed: self._on_config_changed, + self.on.spgwu_relation_joined: self._on_spgwu_relation_joined, + self.on.nrf_relation_changed: self._update_service, + self.on.nrf_relation_broken: self._update_service, + self.on.smf_relation_changed: self._update_service, + self.on.smf_relation_broken: self._update_service, + } + for event, observer in event_observer_mapping.items(): + self.framework.observe(event, observer) + # Set defaults in Stored State for the relation data + self._stored.set_default( + nrf_host=None, + nrf_port=None, + nrf_api_version=None, + smf_ready=False, + ) + + #################################### + # Charm Events handlers + #################################### + + def _on_oai_spgwu_tiny_pebble_ready(self, event): + try: + container = event.workload + self._add_oai_spgwu_tiny_layer(container) + self._update_service(event) + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _on_stop(self, event): + pass + + def _on_config_changed(self, event): + self.update_tcpdump_service(event) + + def _on_spgwu_relation_joined(self, event): + try: + if self.unit.is_leader() and self.is_service_running(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _update_service(self, event): + try: + logger.info("Updating service...") + if not self.service_exists(): + logger.warning("service does not exist") + return + # Load data from dependent relations + self._load_nrf_data() + self._load_smf_data() + relations_ready = self.is_nrf_ready and self.is_smf_ready + if not relations_ready: + self.unit.status = BlockedStatus("need nrf and smf relations") + if self.is_service_running(): + self.stop_service() + elif not self.is_service_running(): + self._configure_service() + self.start_service() + self._wait_until_service_is_active() + if self.unit.is_leader(): + self._provide_service_info() + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + #################################### + # Utils - Services and configuration + #################################### + + def _provide_service_info(self): + for relation in self.framework.model.relations["spgwu"]: + logger.debug(f"Found relation {relation.name} with id {relation.id}") + relation.data[self.app]["ready"] = str(True) + logger.info(f"Info provided in relation {relation.name} (id {relation.id})") + + def _wait_until_service_is_active(self): + logger.debug("Waiting for service to be active...") + self.unit.status = WaitingStatus("Waiting for service to be active...") + active = self.search_logs( + {"[spgwu_app] [start] Started", "Got successful response from NRF"}, + wait=True, + ) + if active: + # wait extra time + time.sleep(10) + self.unit.status = ActiveStatus() + else: + self.unit.status = BlockedStatus("service couldn't start") + + @property + def is_nrf_ready(self): + is_ready = ( + self._stored.nrf_host + and self._stored.nrf_port + and self._stored.nrf_api_version + ) + logger.info(f'nrf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_nrf_data(self): + logger.debug("Loading nrf data from relation") + relation = self.framework.model.get_relation("nrf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.nrf_host = relation_data.get("host") + self._stored.nrf_port = relation_data.get("port") + self._stored.nrf_api_version = relation_data.get("api-version") + logger.info("nrf data loaded") + else: + self._stored.nrf_host = None + self._stored.nrf_port = None + self._stored.nrf_api_version = None + logger.warning("no relation found") + + @property + def is_smf_ready(self): + is_ready = self._stored.smf_ready + logger.info(f'smf is{" " if is_ready else " not "}ready') + return is_ready + + def _load_smf_data(self): + logger.debug("Loading smf data from relation") + relation = self.framework.model.get_relation("smf") + if relation and relation.app in relation.data: + relation_data = relation.data[relation.app] + self._stored.smf_ready = relation_data.get("ready") == "True" + logger.info("smf data loaded") + else: + self._stored.smf_ready = False + logger.warning("no relation found") + + def _configure_service(self): + if not self.service_exists(): + logger.debug("Cannot configure service: service does not exist yet") + return + logger.debug("Configuring spgwu service") + container = self.unit.get_container("spgwu-tiny") + if self.service_name in container.get_plan().services: + container.add_layer( + "oai_spgwu_tiny", + { + "services": { + "oai_spgwu_tiny": { + "override": "merge", + "environment": { + "REGISTER_NRF": "yes", + "USE_FQDN_NRF": "yes", + "NRF_FQDN": self._stored.nrf_host, + "NRF_IPV4_ADDRESS": "127.0.0.1", + "NRF_PORT": self._stored.nrf_port, + "NRF_API_VERSION": self._stored.nrf_api_version, + }, + } + }, + }, + combine=True, + ) + logger.info("spgwu service configured") + + def _add_oai_spgwu_tiny_layer(self, container): + entrypoint = "/bin/bash /openair-spgwu-tiny/bin/entrypoint.sh" + command = " ".join( + [ + "/openair-spgwu-tiny/bin/oai_spgwu", + "-c", + "/openair-spgwu-tiny/etc/spgw_u.conf", + "-o", + ] + ) + pebble_layer = { + "summary": "oai_spgwu_tiny layer", + "description": "pebble config layer for oai_spgwu_tiny", + "services": { + "oai_spgwu_tiny": { + "override": "replace", + "summary": "oai_spgwu_tiny", + "command": f"{entrypoint} {command}", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + "GW_ID": "1", + "MCC": "208", + "MNC03": "95", + "REALM": "3gpp.org", + "PID_DIRECTORY": "/var/run", + "SGW_INTERFACE_NAME_FOR_S1U_S12_S4_UP": "eth0", + "THREAD_S1U_PRIO": "98", + "S1U_THREADS": "1", + "SGW_INTERFACE_NAME_FOR_SX": "eth0", + "THREAD_SX_PRIO": "98", + "SX_THREADS": "1", + "PGW_INTERFACE_NAME_FOR_SGI": "eth0", + "THREAD_SGI_PRIO": "98", + "SGI_THREADS": "1", + "NETWORK_UE_NAT_OPTION": "yes", + "GTP_EXTENSION_HEADER_PRESENT": "yes", + "NETWORK_UE_IP": "12.1.1.0/24", + "SPGWC0_IP_ADDRESS": "127.0.0.1", + "BYPASS_UL_PFCP_RULES": "no", + "ENABLE_5G_FEATURES": "yes", + "NSSAI_SST_0": "1", + "NSSAI_SD_0": "1", + "DNN_0": "oai", + "UPF_FQDN_5G": self.app.name, + }, + } + }, + } + container.add_layer("oai_spgwu_tiny", pebble_layer, combine=True) + logger.info("oai_spgwu_tiny layer added") + + +if __name__ == "__main__": + main(OaiSpgwuTinyCharm, use_juju_for_storage=True) diff --git a/oai/oai-spgwu-tiny-operator/src/utils.py b/oai/oai-spgwu-tiny-operator/src/utils.py new file mode 100644 index 00000000..93fb5bc3 --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/src/utils.py @@ -0,0 +1,312 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import time +from typing import List, Set, Tuple, Optional + +import kubernetes +from ops.charm import CharmBase +from ops.model import MaintenanceStatus +from ops.pebble import ConnectionError +from ops.framework import StoredState +from ipaddress import IPv4Address +import subprocess + + +class PatchFailed(RuntimeError): + """Patching the kubernetes service failed.""" + + +class K8sServicePatch: + """A utility for patching the Kubernetes service set up by Juju. + Attributes: + namespace_file (str): path to the k8s namespace file in the charm container + """ + + namespace_file = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + + @staticmethod + def namespace() -> str: + """Read the Kubernetes namespace we're deployed in from the mounted service token. + Returns: + str: The current Kubernetes namespace + """ + with open(K8sServicePatch.namespace_file, "r") as f: + return f.read().strip() + + @staticmethod + def _k8s_service( + app: str, service_ports: List[Tuple[str, int, int, str]] + ) -> kubernetes.client.V1Service: + """Property accessor to return a valid Kubernetes Service representation for Alertmanager. + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Returns: + kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and + ports. + """ + ports = [ + kubernetes.client.V1ServicePort( + name=port[0], port=port[1], target_port=port[2], protocol=port[3] + ) + for port in service_ports + ] + + ns = K8sServicePatch.namespace() + return kubernetes.client.V1Service( + api_version="v1", + metadata=kubernetes.client.V1ObjectMeta( + namespace=ns, + name=app, + labels={"app.kubernetes.io/name": app}, + ), + spec=kubernetes.client.V1ServiceSpec( + ports=ports, + selector={"app.kubernetes.io/name": app}, + ), + ) + + @staticmethod + def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]): + """Patch the Kubernetes service created by Juju to map the correct port. + Currently, Juju uses port 65535 for all endpoints. This can be observed via: + kubectl describe services -n | grep Port -C 2 + At runtime, pebble watches which ports are bound and we need to patch the gap for pebble + not telling Juju to fix the K8S Service definition. + Typical usage example from within charm code (e.g. on_install): + service_ports = [("my-app-api", 9093, 9093), ("my-app-ha", 9094, 9094)] + K8sServicePatch.set_ports(self.app.name, service_ports) + Args: + app: app name + service_ports: a list of tuples (name, port, target_port) for every service port. + Raises: + PatchFailed: if patching fails. + """ + # First ensure we're authenticated with the Kubernetes API + + ns = K8sServicePatch.namespace() + # Set up a Kubernetes client + api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient()) + try: + # Delete the existing service so we can redefine with correct ports + # I don't think you can issue a patch that *replaces* the existing ports, + # only append + api.delete_namespaced_service(name=app, namespace=ns) + # Recreate the service with the correct ports for the application + api.create_namespaced_service( + namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports) + ) + except kubernetes.client.exceptions.ApiException as e: + raise PatchFailed("Failed to patch k8s service: {}".format(e)) + + +logger = logging.getLogger(__name__) + + +class OaiCharm(CharmBase): + """Oai Base Charm.""" + + _stored = StoredState() + + def __init__( + self, + *args, + tcpdump: bool = False, + ports=None, + privileged: bool = False, + container_name=None, + service_name, + ): + super().__init__(*args) + + self.ports = ports + self.privileged = privileged + self.container_name = container_name + self.service_name = service_name + + event_mapping = { + self.on.install: self._on_install, + } + if tcpdump: + event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready + for event, observer in event_mapping.items(): + self.framework.observe(event, observer) + + self._stored.set_default( + _k8s_stateful_patched=False, + _k8s_authed=False, + ) + + def _on_install(self, _=None): + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if not self._stored._k8s_authed: + kubernetes.config.load_incluster_config() + self._stored._k8s_authed = True + if self.privileged: + self._patch_stateful_set() + K8sServicePatch.set_ports(self.app.name, self.ports) + + def _on_tcpdump_pebble_ready(self, event): + self.update_tcpdump_service(event) + + def update_tcpdump_service(self, event): + try: + self._configure_tcpdump_service() + if ( + self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and not self.is_service_running("tcpdump", "tcpdump") + ): + self.start_service("tcpdump", "tcpdump") + elif ( + not self.config["start-tcpdump"] + and self.service_exists("tcpdump", "tcpdump") + and self.is_service_running("tcpdump", "tcpdump") + ): + self.stop_service("tcpdump", "tcpdump") + except ConnectionError: + logger.info("pebble socket not available, deferring config-changed") + event.defer() + + def _configure_tcpdump_service(self): + container = self.unit.get_container("tcpdump") + container.add_layer( + "tcpdump", + { + "summary": "tcpdump layer", + "description": "pebble config layer for tcpdump", + "services": { + "tcpdump": { + "override": "replace", + "summary": "tcpdump", + "command": f"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap", + "environment": { + "DEBIAN_FRONTEND": "noninteractive", + "TZ": "Europe/Paris", + }, + } + }, + }, + combine=True, + ) + + def start_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + logger.info(f"{container.get_plan()}") + container.start(service_name) + + def stop_service(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + container.stop(service_name) + + def is_service_running(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + is_running = ( + service_name in container.get_plan().services + and container.get_service(service_name).is_running() + ) + logger.info(f"container {self.container_name} is running: {is_running}") + return is_running + + def service_exists(self, container_name=None, service_name=None): + if not container_name: + container_name = self.container_name + if not service_name: + service_name = self.service_name + container = self.unit.get_container(container_name) + service_exists = service_name in container.get_plan().services + logger.info(f"service {service_name} exists: {service_exists}") + return service_exists + + def _patch_stateful_set(self) -> None: + """Patch the StatefulSet to include specific ServiceAccount and Secret mounts""" + if self._stored._k8s_stateful_patched: + return + + # Get an API client + api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient()) + for attempt in range(5): + try: + self.unit.status = MaintenanceStatus( + f"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5" + ) + s = api.read_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace + ) + # Add the required security context to the container spec + s.spec.template.spec.containers[1].security_context.privileged = True + + # Patch the StatefulSet with our modified object + api.patch_namespaced_stateful_set( + name=self.app.name, namespace=self.namespace, body=s + ) + logger.info( + "Patched StatefulSet to include additional volumes and mounts" + ) + self._stored._k8s_stateful_patched = True + return + except Exception as e: + self.unit.status = MaintenanceStatus( + "failed patching StatefulSet... Retrying in 10 seconds" + ) + time.sleep(5) + + @property + def namespace(self) -> str: + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() + + @property + def pod_ip(self) -> Optional[IPv4Address]: + return IPv4Address( + subprocess.check_output(["unit-get", "private-address"]).decode().strip() + ) + + def search_logs(self, logs: Set, wait: bool = False) -> bool: + """ + Search list of logs in the container and service + + :param: logs: List of logs to be found + :param: wait: Bool to wait until those logs are found + """ + found_logs = set() + os.environ[ + "PEBBLE_SOCKET" + ] = f"/charm/containers/{self.container_name}/pebble.socket" + p = subprocess.Popen( + f'/charm/bin/pebble logs {self.service_name} {"-f" if wait else ""} -n all', + stdout=subprocess.PIPE, + shell=True, + encoding="utf-8", + ) + all_logs_found = False + for line in p.stdout: + for log in logs: + if log in line: + found_logs.add(log) + logger.info(f"{log} log found") + break + + if all(log in found_logs for log in logs): + all_logs_found = True + logger.info(f"all logs found") + break + p.kill() + return all_logs_found diff --git a/oai/oai-spgwu-tiny-operator/tests/__init__.py b/oai/oai-spgwu-tiny-operator/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/oai/oai-spgwu-tiny-operator/tests/test_charm.py b/oai/oai-spgwu-tiny-operator/tests/test_charm.py new file mode 100644 index 00000000..aa1be8cf --- /dev/null +++ b/oai/oai-spgwu-tiny-operator/tests/test_charm.py @@ -0,0 +1,68 @@ +# Copyright 2021 David Garcia +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import OaiAmfCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(OaiAmfCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service( + "httpbin" + ) + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) -- GitLab