From 501873e9b7f9bb3a522b8c566d71ae0731131591 Mon Sep 17 00:00:00 2001 From: Dominik Fleischmann Date: Thu, 2 Jul 2020 13:05:05 +0200 Subject: [PATCH] Add K8s Proxy Charm Packages This is are the packages used to test the k8s proxy charm feature. Signed-off-by: Dominik Fleischmann --- .../hackfest_k8sproxycharm_nsd.yaml | 39 + hackfest_k8sproxycharm_ns/icons/osm.png | Bin 0 -> 55888 bytes hackfest_k8sproxycharm_vnf/README | 0 .../charms/charm-simple-k8s/.gitignore | 1 + .../charms/charm-simple-k8s/.gitmodules | 9 + .../charms/charm-simple-k8s/README.md | 52 + .../charms/charm-simple-k8s/actions.yaml | 38 + .../charms/charm-simple-k8s/config.yaml | 29 + .../charms/charm-simple-k8s/hooks/start | 1 + .../lib/charms/osm/sshproxy.py | 419 ++++++ .../lib/charms/requirementstxt.py | 58 + .../charm-simple-k8s/lib/ops/__init__.py | 0 .../charms/charm-simple-k8s/lib/ops/charm.py | 306 +++++ .../charm-simple-k8s/lib/ops/framework.py | 941 +++++++++++++ .../charm-simple-k8s/lib/ops/jujuversion.py | 77 ++ .../charms/charm-simple-k8s/lib/ops/main.py | 191 +++ .../charms/charm-simple-k8s/lib/ops/model.py | 679 ++++++++++ .../charms/charm-simple-k8s/metadata.yaml | 8 + .../charm-simple-k8s/mod/charms.osm/LICENSE | 201 +++ .../charm-simple-k8s/mod/charms.osm/README.md | 67 + .../mod/charms.osm/charms/osm/libansible.py | 108 ++ .../mod/charms.osm/charms/osm/ns.py | 301 +++++ .../mod/charms.osm/charms/osm/sshproxy.py | 250 ++++ .../charm-simple-k8s/mod/charms/LICENSE | 201 +++ .../charm-simple-k8s/mod/charms/README.md | 29 + .../mod/charms/charms/requirementstxt.py | 58 + .../charm-simple-k8s/mod/operator/.flake8 | 3 + .../charm-simple-k8s/mod/operator/.gitignore | 3 + .../charm-simple-k8s/mod/operator/.travis.yml | 15 + .../charm-simple-k8s/mod/operator/LICENSE.txt | 202 +++ .../charm-simple-k8s/mod/operator/Makefile | 41 + .../charm-simple-k8s/mod/operator/README.md | 120 ++ .../mod/operator/ops/__init__.py | 0 .../mod/operator/ops/charm.py | 306 +++++ .../mod/operator/ops/framework.py | 941 +++++++++++++ .../mod/operator/ops/jujuversion.py | 77 ++ .../charm-simple-k8s/mod/operator/ops/main.py | 191 +++ .../mod/operator/ops/model.py | 679 ++++++++++ .../charm-simple-k8s/mod/operator/setup.py | 38 + .../mod/operator/test/__init__.py | 0 .../mod/operator/test/bin/relation-ids | 11 + .../mod/operator/test/bin/relation-list | 16 + .../test/charms/test_main/config.yaml | 1 + .../test/charms/test_main/lib/__init__.py | 0 .../test/charms/test_main/lib/ops/__init__.py | 0 .../test/charms/test_main/lib/ops/charm.py | 306 +++++ .../charms/test_main/lib/ops/framework.py | 941 +++++++++++++ .../charms/test_main/lib/ops/jujuversion.py | 77 ++ .../test/charms/test_main/lib/ops/main.py | 191 +++ .../test/charms/test_main/lib/ops/model.py | 679 ++++++++++ .../test/charms/test_main/metadata.yaml | 26 + .../test/charms/test_main/src/charm.py | 156 +++ .../mod/operator/test/test_charm.py | 311 +++++ .../mod/operator/test/test_framework.py | 1200 +++++++++++++++++ .../mod/operator/test/test_helpers.py | 76 ++ .../mod/operator/test/test_jujuversion.py | 130 ++ .../mod/operator/test/test_main.py | 362 +++++ .../mod/operator/test/test_model.py | 868 ++++++++++++ .../charms/charm-simple-k8s/requirements.txt | 1 + .../charms/charm-simple-k8s/src/charm.py | 216 +++ .../cloud_init/cloud-config.txt | 12 + .../hackfest_k8sproxycharm_vnfd.yaml | 69 + hackfest_k8sproxycharm_vnf/icons/osm.png | Bin 0 -> 55888 bytes 63 files changed, 12328 insertions(+) create mode 100644 hackfest_k8sproxycharm_ns/hackfest_k8sproxycharm_nsd.yaml create mode 100644 hackfest_k8sproxycharm_ns/icons/osm.png create mode 100644 hackfest_k8sproxycharm_vnf/README create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitignore create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitmodules create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/README.md create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/actions.yaml create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/config.yaml create mode 120000 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/hooks/start create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/osm/sshproxy.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/requirementstxt.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/__init__.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/charm.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/framework.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/jujuversion.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/main.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/model.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/metadata.yaml create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/LICENSE create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/README.md create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/libansible.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/ns.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/sshproxy.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/LICENSE create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/README.md create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/charms/requirementstxt.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.flake8 create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.gitignore create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.travis.yml create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/LICENSE.txt create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/Makefile create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/README.md create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/__init__.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/charm.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/framework.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/jujuversion.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/main.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/model.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/setup.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/__init__.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-ids create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-list create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/config.yaml create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/__init__.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/__init__.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/charm.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/framework.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/jujuversion.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/main.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/model.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/metadata.yaml create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/src/charm.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_charm.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_framework.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_helpers.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_jujuversion.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_main.py create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_model.py create mode 100644 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/requirements.txt create mode 100755 hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/src/charm.py create mode 100755 hackfest_k8sproxycharm_vnf/cloud_init/cloud-config.txt create mode 100644 hackfest_k8sproxycharm_vnf/hackfest_k8sproxycharm_vnfd.yaml create mode 100644 hackfest_k8sproxycharm_vnf/icons/osm.png diff --git a/hackfest_k8sproxycharm_ns/hackfest_k8sproxycharm_nsd.yaml b/hackfest_k8sproxycharm_ns/hackfest_k8sproxycharm_nsd.yaml new file mode 100644 index 00000000..e5e51a6c --- /dev/null +++ b/hackfest_k8sproxycharm_ns/hackfest_k8sproxycharm_nsd.yaml @@ -0,0 +1,39 @@ +nsd:nsd-catalog: + nsd: + - id: hackfest_k8sproxycharm-ns + name: hackfest_k8sproxycharm-ns + short-name: hackfest_k8sproxycharm-ns + description: NS with 2 VNFs with cloudinit connected by datanet and mgmtnet VLs + version: '1.0' + logo: osm.png + constituent-vnfd: + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index: '1' + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index: '2' + vld: + - id: mgmtnet + name: mgmtnet + short-name: mgmtnet + type: ELAN + mgmt-network: 'true' + vim-network-name: PUBLIC + vnfd-connection-point-ref: + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index-ref: '1' + vnfd-connection-point-ref: vnf-mgmt + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index-ref: '2' + vnfd-connection-point-ref: vnf-mgmt + - id: datanet + name: datanet + short-name: datanet + type: ELAN + vnfd-connection-point-ref: + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index-ref: '1' + vnfd-connection-point-ref: vnf-data + - vnfd-id-ref: hackfest_k8sproxycharm-vnf + member-vnf-index-ref: '2' + vnfd-connection-point-ref: vnf-data + diff --git a/hackfest_k8sproxycharm_ns/icons/osm.png b/hackfest_k8sproxycharm_ns/icons/osm.png new file mode 100644 index 0000000000000000000000000000000000000000..62012d2a2b491bdcd536d62c3c3c863c0d8c1b33 GIT binary patch literal 55888 zcmeHwcVN_2w*Q%4(|d0OLLi+Mnsh-yL{Y>ZASk;E3aIGrzPH8Qr`y-xuB*GQy6#gH z8?K0`bg9xyNCE*8l8|1f_uuDyCle;aWC)1v`-2OU`L= z(5#u$?!fmFe3mNZ_&Xtc#6$Q}Ju!3PDx%sQo4@$EA1*Fll77=f_4qYo(ZJ)St4mVXJpS17Rb$soNEg~2i(|f=m!2v- zvigS;(kF9=srSseJ$34em8Gd8b4TSYE+{BS9bKGTFmg;$;mF~sh4}>|^YV-H3i5OE z3&s}Yk1fnk?J4OK)i@fza>>%McTAhn(;dzxq(8cP^%Gj zxHvDrFt4yM2T$azdUpBhk~KNYS7o>x@w79obk*XO%br-hY{l|aZntF7il<7WdY9;;EI72}4`5IIr}v(#K1euU>_E z1%B&2@#u=xD^@+aqMt5%Z};0Va5863Z{L2$`}pI2yI!?=`qKbG4@mviTs8mMCrb0~ zC|$MUsg;XMr$3EX$?*5~tCuakgq%Jt@tuChUb<#kKh^k-yP7Z++!z{&aO`C`dxyzO<8CzN~VrgN?s1Z3!M~_;RGopCW(wvfl zkp(#gqn9oz88Le4=rK!22|%1P$GiGvO;1~~_$j6rH0@?BOI9pK^OuHIxVU&}LDAy; zoDroZi*gDIMi=K4FBwsoQ&Ln?Jf^s4)Y7G;BRm~>HFjA?b5||{=PP;4>luERCFrbZ zWXY(dW5$%`j6hF0BStLA&nX_cv?ynE(WsGyg-e$%9yMx-rwc#LT-MdhWveh>pY@u@ zn9TxjyR-DkzE?dr9$O~xs3%HRt}0~;oRIDz!Ct1;%d@zSK*?iE7BhF7xR?o~6hrKD z_0q6Vs{Wr2|#nRQ!l&mbB{0L^qz-Zsy zZr_T&9gNRodGOe>?*J96N}m39h!_}MT`MB|@TmVJA}-D3#gCROf24HD#5~U=_FNbk z?%WvdOI;p{y__1yvxpiCO2(`z5h$o&eBNc(F7@nw{vM{t$DR_NFCLYT0!0R;r>ggD z->M4(SM|Q_TlG@SyOu9oJ+ZK-H=oO1wOEe2S^S+)`AR2_8ac9PcS23CJ3Tes$nsLp1UhtIh4KfzGp|?iyo^M9 zdD-mKmwoM09Ro6 z_}sk#E+2pdKD`01!0_?8djniP0114016+aO<8${0xO@N-`1A(20>j7W?hSDH03`6~ z4R8g9kI&s3;PL@T;L{u63Jf2gyEnk)1CYR{H^3DbK0bGEfXfFUflqIMD=>U~?%n{G z4?qH+-T+r%`1stt0WKea1U|h1uE6l|xqAa#J^%@PdIMa6;p21n2Dp3x68Q86xB|n+ z=k5(~`2ZyF=?!oNhL6wP8{qN*NZ`{O;0g>MpSw4}=-Q@&>x-MnBSef-f!)L33dcC(R! zV-slN%$bxC7D9E^)zonQ+@m&|t!3hrsUL3Lx1Wr%+6N>CoXrl$@MQ?QLzOLK`xvl)AdRXz`*&!gCRLSEtiCASg=*^wjtCkVGsciC97+ zhn;LDgUa099o^xgY@N|K+TPxoX-v-w+huZw$Y$K8ikm)13>=bJM6UBPnXO*m)lpy9 zD89I3$25gn-5HmW+3YYHj$sybivS>zSo}S`#kcF`KaLO)AS#wfh|DJHs=tuh)ZQ_& zTdBR(8Ww$%>ek{|XHuF{idn4C1W`y(5IKQ_NGXuk178=;otViYwTOZ@kD%sLWuzRQ z8+vxh>h0#E2ii>s_LM8znm=^ap4%b{3oVz3MGmIs|EuLch7g{yVtnkK9onwOhOv63 z`XNfpxKlfSu_7cfg(NB^5uoj|I{!QR<5p>xNVuJikMZdRPD{Hxrc+aXaZ+ofQOGJNFt{9h^GD?%`t ziDxJCSWA0rSYz#lhg*w^9}z9Qc}QfzC?G*WE~_0#u#pqIdtgOGjF9HF|D=#H6G)yA zNfK=^na)&@dT0*0Y*vygRTOpm-6F^AJExn^RZZ{O`BklJ*OoWzr^{+E~0rh%gh^)B`J-5z}}2oX!9BnM?2 z*}rG5HFMaD^usknRG7VDyIr7!Yn_K;g@TNgr^sA$p5jX%Cnq#07WDFjBr?5QNw$_I zk_3kcEYAZ*rWpRnG#ZM3^hs)dCzwJe%}zB`SW_igcavG-{Na9cW1FaT{|SwudS|fI zzK$H0N-#zH_aS?}Q-tu0l*<*=U00KI)M)&Te$K)Lq2q3H%K?*-2BrvlK-u@3*_0hB zZQI866m#DqazTytuo$sINg>4(fvB?-R5)6ozP`1ABfXI#Z@q(}Zo3P7iooJdQPbK_ zK&6>PgJ(uKQg52np>Lbi-ckLWvuev3(lxv#>u&o*CUTUEB|RF2Z==`Y{JU!zYLEm} zQG5L8jHZaVJbgLARs%`F9<@bxOD^WET67}*-a`i(C2Kod8nYBq7Bb8Wu1~qn2ywwGh7o2o zD7J3h@~`%J5B@y-mboGqtaE{-T!%4>!2-=^D0IvJkV4Ch5M})9*9=A&HTqWES@9GO{&)O7`|LnB zb(fGbdzPvu_o=7OX-DpDZfY1K5n()6`(8eS5VPiRJwEKi>8h&@`_p?LY>+H_`j+7I z3}{e>zTE+MB2bW6E))6`D;1Z3#rh??O~CH>!^fVN6G4ay&w|~xefANB&%8}wN#1g! zZ%52&9HdB26{y78R1eD@?OjD-HYkryrsSuWf=a%i_RTL*=k`D2ojNf#=t69$0x_!4 zXp&5Py6|w+f}M4pwx=X^6Z*P#7ADn0uzlO2R4QrH*Si&KzxeW9N6GS=wBh04$L3yu zdfFF(-B>Qx(%Gk%k+r^-l793n3Ys^UYzCO#5TY&!dX(F!K2(_$t?ViHaosr`$W zg{G~|RTMq{ckmXt`*%5Puv6@$%$g=^ip==6`RFSnb6j1enB-l)V;RV~w2J}vEs7LY zcUwaP{rju0UNX;rXhv8>r0^nrngl<0;p5a0o&U*isB_yF!tKVNK23%DPEcI8g~B>@ zB-QC*BS2eXNgMG>A%-D7`r*Vj9L9b2HRxf^V)vA5y? z52R*0Nbed8vpS2@Mz8+u@BiIA<+i0!u(}(IO?1^ zovee0P!O~&sSVaRG^IWTGDa2?3k1W?IDZCmA-!MZ^{IywH|vyA>-tY9XhbntK+#TP z7pc+;!IC&w;9KB!wwnbxYMJ!hgo=~@yR)j|)a;_s#iuWS*0=7ZbJ31w@Wg8@8d$rZ zez0!lKP}RYiFd$Pq=U?0Z9}6XlR-}eXJ6#n_h{mg8oHGtDPF2Yd?18gxVW3DOl|bb z)aeu|Rg#_gae$%_NoYfRC2i|EM~}pgq&!(TwSt$Qa~kM`-}|*ZqqW@rk6)$|BR*vlNLwF23>) zGF3&xd6^3Y_)LJF##mliT@sQVey!?%?ns}KQ#88PFhIAE=r`XS_cx|1UXwyFcU-6m zfB(?&eIbvo%oITuiQpzTnGNLpY7^ytxQXVT>7-&sECNy?WW@}J%MdOU2~`?fFklB2 zXyZxmunx#qq!z<7ix)M4A}x?JkT7UVOj59-MtvJ?pceXg@^ET+U?KVorLG-YNHaVS zz#K+)1dCcee1~H1UvfqDTcS~u?(hNX+PQ^7CryXpSwuB2{0hjNBRDG^-K|vpi@%UE zaXyK7-`LC8mm7496pP(O%0quWT2iQ-dE5MZnk)m5*HSeAd$omGmD6UU&pzMuTg9@c zGua}y1>;4&*hnMZ`iSnW=%V7#R1!zVk(nH%XA0<{TR7AOpz^5=fE}nlVgw@4sW4Qo z13Dlx8*NaYLPMiysVjzV-`Y%X9eAEz>^wq-Gv%=0V<_j79k3gQfZgcHP=10m!}0}R zmhU?YK`h9v=6Bx$b#zk9{iP%ek0A5;29m=QpdOMfP?t0$k|O4eqLvS~k~VK1+VZbl zutQS;)!2}~H2(M}it{t}6y)x|A&3@&`FFo(Lo*-dh=El)&@ zWn|c-nWRWC!A7YCi|${YW~8V=MJ+=fnEcDv{{1J5zEjX1ef#%5mx@{V4zRF(&D3CM zX%TNYTl4da5t#%Hps00H8@s_gCb(7JpP_X{{HsI zNAA3J>Z@kNeEKU=NBO{H2*0kGeMCcs)2^>~E-_D?HCpkvzta!?vYzgX$RS&#jC27I z;TeF?4iIuZixiUzVI}%!(_ukD*f{(lYPA_CLaIT$qlKP&WI0v5^lMUNWRR`3iB!-A zofeD%fNc5b9kRAGQ~Z+U#6B_)GT8NEtSA$-GFfC~?{ywGdjT}r`6Si8PuAudl1Ih| zMv4uI9ccyh$)U>URiCck9GjSS)(V#IuSjN1zKQJ6h_3V@7)kY~E9l_fz3QW((XU7E zJskCmclOaOu{mUfgUxXTP07L9wcLqheRe@thx)2M6Lc9Lp>K_E_Qdgpbpo)j2*_X$IAc>zfbV^?=z zH)KvONy8$5yuO;C2+D~pHk`Wm9tV{m+2=}=jPFMdQ8%61oiIKtYF%hV46&BV*(=^3 zX**JG22vDoch@#FPW#`t-h8cK$mo>6it;Ei4lZt{imQOBhh>3VTun8)9)`%fAOdU-Ium~=&N`!CL=A;Pe{>rANLqGm0?QTCy_5be$ihcM8kQI-UDmDF* z?IhH|AE@p&P>rGNxS%R~Bg*rL6E+VUonplX!I%}vmG}-|WMcv-;OnbToO|cdGg}tj zIeXaaCd=hiA~qn`XdoY>-hlk1hCH%4QY=;QwlY=VrDd2YS5sJSSRm|KZN1jQNL2$F zn>)|aM%X70#N<`>HCBl}XN3 zZ!TCCGm`F)E2M+^i?juX+O*IN%2Y*?86-Inw#D<7Z3$*w*Fqy)j}h`t(^0{eSr#{1 zm=xT9e}yN3OeCe-B8CdWUXEvFg9bxB_^;tYUYPN883gy;m%D*_s<545Yww`?mwycx z{~-j!-a|V5RjI>Fy5t#|Z*Tm|qH#HCzh_&;yCB8Re>}nS5M$BUZK5wf z-SCG}8gyS44P zKm}Yp&#?;KYB$ik&Btj7C}Kv~AlM?%>ewE+Lg7j9Slk#|5Sc>~$ci7mzln~I8B5{8 z@V$A-4tALc>M_`sEFAax^Yy_4?SQ>vI(J6snSES_vP~2^;So@k8q@$Q-p{yPm<+1$ z1Uh^s=o-dBd0ABb_eU+PX7BE< zFUO-8YH;V)O)w^N1cR{Oz#86PiUbOKYcnY10JN@IaI*}D7yB65J8Ge^g&>9!4)}o+ z_$WLutH~CWqK?v?tj!sev`-4Q$Xtj&l2!LE794v1Vj>*l*m`j9c*&*{@6C-FCgIsw zW@w@eXfr&+S%%oq*n%{0ETvSSJUXAL;>jD0P`F( z(Ee1vhgP23NR?>w4E$c7w3Sn}xr6>IWh!MTBLrK9nV09ojsASGhXZKTe~LefMM+$;}*s*)g=kQdxg zMwaStV3aC=m@s^+dJuytgnAM+a*->KCP!M%|E)`Ja&~ld5PzgK2T%I#R|?W?G3k`$ z>%V+yMO2~OgqL8WkY9!^V>xCwS{CXuJ76%(jmV}ZxS_x7I72%+&(nhF97>eKj|D_B zMR8%uVIVTnJoSYe_`_{56_`M2Z#5n9T<-losH>-HLmQcuHkh9XxAr79@L1)jCq|yB z2p=_oz%}4S#$Kb`JhkGS>^mvj4ge@FW~0fip-Dqxqk3Ps_W&d2?85c$e6{9INpb-prH`#qHoq7dRvc@X5_>DS zk6C_%K-0J{I^R93Tgxd%uBDqo(u8j#R8nR=>?o9=Ew0bnmI4CO1w?l`jG(eEItP{V zA`Cmf?aUPV%~i}c42QP|X3m%H+P z2h(%TERF3Hd2}d(R&cOFaq_6Yvalp1@TJ4;KN4=1(=bxkkt8G*&kjb!Gm~UsafkMN za8GPR>*mDNL4X0+paGFO-=c)gvA3#hY+AkT*(^8+^*zb7%x-w5Gev}p@S9mZv);SD zaNlKvED57W<44gU%<6Yrj#G*vlx_(h454o)1KMM@#7wWw+C_(8OLS5wHB$uH#K|-~ zavTk9xBWgEBMsEiPU1(Tgv<=xl0iq{!3+O2{5WHC@frBC=3$WDo@x4gwqP zrjDpcI$B*L+9Ee=V31z1If{* zJRup$6L+|MR9?ci&p^CS00UNkF4`wze8FRqsdS z^aQ-cA#~>SncK=lnt4-Z-14!oM8Y@ei+OQ30xhS%*!}d%;5@kj!1okpHB9m*bi!mG zs4#0|5*P_tkp}_J_QLCphv=iWGTPf+3)ZHiF_EL_yeo=S@*>I)8%OzyNaEOsQlz57 zl+o3v9N*Z@UDiR|4G}UaE;m19c1r^7PcP?Iki+ zeI2!{BjQ)0KmYktP>5aX^l@}45yw7q=FHS_wurgvV5L9}oxUZVLSg*dZANSooE0JVxW^mY3tq<7M?c|-5qWipy>QLEL+ z8ddB-rh0>3uNM~E45V+*GfvEOhYjK$h*LVn6dDpy*AgG|YLiC!>X(O(Y|uHaw|buC z@2*{2Nq_ho3Yjp4Ts}Ep-p_fC1x-rCDXPqEg7NWY=ANhA-!4pW7bX!Kks;%jkg@dw z#mJhE4j-0n=lLX-Yt_imbAOaUTGEN^qVli5`R{STDSeRPX_keqp!W|99|Gdkm_cQx zMmlaokV&Eyno1AKqD*ZD5TSz*M^H;p8f4U@Z=mDJU;TCF+!>odj{3a3ymduIMeoAj z(_r_?PV|iDYQ8-airZ0v&5+Sby0(rGe{I-1+dUq4f46bGUR(hwSo+@b&&CPh6rX)4dSzS5hhZjR@lcKFleMp>A+D;#xj2jvLT+78a zEHP6GOW;5qpq{?YJ+EO6_>kM+w&LUC2`o&|`*Cg4`t|F_dp9D{Xa%>4lM^lmSVTym z6GFP+A_FUUo)sl%lx+@eUUN%TusS%bn-d{>3#+GfM=O4?Kr>WeOT9OG4p>79f-q)H z%e$WUa|%!p7xvPewY1T@dJ83nK0ph?#)1kg6eSO*!NIv?feXPK848h-PU()2e2U>G z6Gl$$w1Ji8=H^lhCJax6>rogX>|xEx$r0EdFV+bU4}YQ*qF}wQ+rTy&MMXss%Z%%V5YJFqoAIzeeA{Qtm{FRMk?~gf(IZhYx7=RhTyX!b zoQQoj#R&l*0{6gRucn%yg}H(dB6h_jrz9nQBOROvD;0&!_S2t#cIOxhi|o%K!GPiT zgVm2ZsDf?r@Ah&ksr*owDYFvppr?kvO6-j?!@dYqfFU?GA)ee`#a-w6nEg@uLfOhZahs>p6SP*HxXcJNRk5Y=Bj@9WGt$O+4e zeOTZgS5ug6NrIxtW(~tsCIi*NctL>PPqDt!5S;x3%DiHf|AhH$zSzD#R z&UirW)^5Q7G+hB1OdVG6(F5u)-j56kJ$mjeY`&htz+!%Xy(z@gK7VuDoDC8k9ZiD< z!8kJ-wjVln;DReVCz-C0kkH!>6cU#WdZ%1-^%R2sWvXC0Qh6cd{K3;CEkAV%J|Gdb zn)Q*%9nNC4Rta`>$@b{&ln0N0ObPtFFJVe?ZD>$DQELSrv{Fv!r~zplaxti*Ym3v8 z(fNCqRdbn2+X#Duvm>upVed7wXHL#zzRzkt3wQ|j=*gpld4O*RVI z&B5tA-bD?n*`})@t7a;FH1P#G(YYVbKrbJ_%Q)a^Js)-CqaoRYkII~v2Y9#*){IzX z6)af>8ji2Fa62r}8=ITyvrU`&jEyIl6&?$d39}UlOK@4=t!k*dLy-oG@p8@k>yzUm zb}U&i*SC;5n`KDg@PofLq_FPm8=6DDX{}dEhvp84ks_gs4GrV76cPOjY@s3GiVZ>` zN2;e~1ytKAODlbD4yQX)ABJ;KLK(0z)}DWvD!Y&2I9ouDzkw*kD%#(^l_XQluNMv< zkFdReLCUjQ4tEZ-7HC5hg!SrITR40giX?C&(s--^SP^Stu^RQx>Wj6VG$<=n@B#AJG&f14XU?A!lG)W+KuCWBWJRtf?Bv%;=?R}VH#PT}wZ3&& zxHDzc*Voh3sZ(haW;s(#pu$wa0OseSV`GFzIiVm>;XdptWs~dhz6-8tGeQBJehUY) zI8Un+TZBjvIc37VGK0IkIUc7|B&yR$g@5bEU^JhLYE zx)lx^aOlLHy?N^1S=6qUshtUEEaFc@pdWpBQZ)#lBbXr&QcY#WnvIO<8 zr%~*mKf(k(ZjiC(SHY&_zzPH7rkidOfZZRJFd~Kx8%Co>jS_Zi7$`DWfoN@Q z4L|wm$Hk4Wy+Ro;{Dqz?D)}m z7cEIc;^E~5HDaA3yHhw$FCd9&$Olop8tdet!)@Ki>P^y%9c^^>;)TQ_TLiBKxB|_= zUysHuXxs$jZXN7{$w><#BQCcyoY2R^9Q%+VTZJ>?rQuOX=FX?~P3w_$vH|&Bxyb*WOpfj*At)tQAcYkUM5hUv zl5*@26g3FcgEUu&1-4g&B@F`W2Q(me!d0XTmIWd7WJYYlA&L%{B5Uso9FUm^r$Lg?Y2z^?44DI`nrcCaRdY%>39wg*W4 zeY3&HGn%)<;KJ;S-66ak5(Ab)&{tcyEg*R}sL+D;6hR}$QuICdLs+vV&i*H~2*mJ= z=Oqu?>|y|WKJvTrNTP`#R|2y$j0N|3m6b!+gmlyfd}ESyHDXn89hPEu3CNKnuT2aD zUHt3?8j%x{aqHaqkFkv3;0GFSBYxH*7F zs)@jgIW35(Lny$e%Z_DxKn^3uX)z%GYb!a5YXUAvE2qv8ZNc8wfD=82@co~9@4r=o}n0g=LNDmQElozb15I>R}NSEW!VoQ`KT zM>nQ~#9J(QSq8$j76TI9*uLQ6QEB0qy4L1;>kUpy+*8+OJtv8Xh~#}ze2Z8vrIxkp zpj}M>km0BEvCZ7iV@fh2`ydku-XU=a`zC?nz-=Mjpo{W`MhOqE(nYQ#6>Vq1Ueqo+M~xyQ=1 z^hDBl`ssx|G%;!#O^BWa`Qav0&vX91*=oC;a?rD(9J@slP~Gs3h=S{=@gW&W^IOwl_f7fN0!3KlvB1=N=!i#(Kdv}1%z!j6nu9eH# znfLR7j#yA)l2jVn}l&HFsDe1g!*b+Y=2Zi7#k&72;*Y+ zP6t-m4zRFI!YZ)80>`0>?TJACC4co;b}$laAU9am<-!w*ljcIgA}J;|fm~u_$e6L) z0{l&Emv9itQ&^GkHdH;=J>Me4-HlPP5StcZS5u}GiAG6{e|r<5{$ZpD2?urfeh$ruY4tD`3 z#j*SdMKr7d226qm%Yk9B479jaUnNbCn@6X*4)>{sSjUN^q@=I=iwZIYFd%)AB>)pV zec!%)LPU}64)4M<+Kw5nkYNWRu#}XnOp1#NMY<8}pdMMk-;7j$8XjMh0lO7?bH{P? z56%K)sHyei_o(}b6^lJb0~uGAo!(>eJ?8u^J2pf}Y*CUZRDl?i9MN2vOnU6tu@92c zQjEo2+P8lgQKUX409>4XUrjJ%hxckuw6*%2dU(xgekZ4WRRknCjKzJ0r2fV$hl zRA9tlrgwLn;kPGO zW6Kt5{p1t{jePhjwW{8x`y4p&4vNZSlqnyHBq(H{U_F0nK>Z3u|cv3_)w>)CdP~V8$mf=byU%>kcG~RHIL|iOe2u6FoY8o;IA4;;S?XaSV7O{zXq*H zODf42^k7k#O?BAg82PtOOhK>00B>(^r`KP9U2r0@--?mpK?oy^0qsGYSJlf8G1FO& zSU497z?MQLup+=z*8~>}7jO3q-)m}WqEE`o7FA!UU1P_l(Apg2i%!XZ4Ic1;61+>WIVy8GIJ&lGA&LUm3>clztr#ws4m`a!; zSimP>X~BzW(Yr&EJ5SGjOrPS#|}Z()|XkZ6SCAUl_9zhuB2JWcOsQ z{ms7Dj+Y-)vA|`ko8<`ur2!k-c+XsS8`tkIBZhrl>}p}pFuPk~V`B-)4q|A3%YAz? zR&EoPt%1x0l4Iu}Z*0cP4A>tfI1F*j4n?-WJMA=wFhEH;cx8 zEn+B7ow_E+@rHwDskSE^nv|MS-d-sEhqqy4dzK~NW#RTau_t4@-bS4U6Qw4`Lv1E&d|d1f$gm!dpnC-DBn&Em@ysrD zc6k6n>>PAr^_UahTXwVa`VijWOh`qCymC_uHo?L3%oX%UKfwyC6GDUgTHd0p(3^3L zsS0z>BxyE|nV8=u^=LQ}2-m~%uHCnE8AfWcfZp2gkvBjK}RfzL~1*k$6T4WWv_zghOMvFIr_Bv>bg4JGHfyMOx0J6jzK z7Tx+hQwD209wF{|_Ph9IYu*OdW5Aj~WpD(QmmME%8Mbhu(zcb%XSNGeBf~yeQFpwE zS+5an4-wcd2c9Gxh+^R+urap^EkFmrXKDip;Xh0Sz=_$L1B;D}6`O_Eh*D+FyC(?^b`={zL(337VI1o&PZS?C4Yt4M`yF-a8yaHwkf$ z@d!X=BN(--`3oxRIz&a`6A&pJD!AJ{bvSb7!~hssmrw$|#bu(TkbJtZP4tWRMPE?z zu#o3HF%GY;uInLBGR|gUC?C@|#K!6P4S!AVq z8G;OK3KvGIa7qQJ%e|!SQY&^KB_%AMYPLClv9tPpCFI|affS@lNiX{~10E}X zuC^$#^ATs5a|Q}Z1ZMMutx$B`8da z4TzZ)NkH~YuHMDwGEiz*At~!M&wZ|Ya%^i`Ya-HqK7h^e96!hP7!gc8SEGS(9iCJh zzS>D$SefMj`-PSN(__buKgP`lHH)m_5ByW1fDPgjk}ZI2^wr-6WjvMv7ld!k4aX>Z zOF88QKZyOouoti}8g~m4Owu-66*ZgBl#3gqS-?YN_k1K+c@#7(?Sl=*a|Y-_y?ru| z3xSrPceHsm)qDH01C|}^yBZZSNuU^JYj2$UJ=N&X&_!c8ZL0qTC`u>r(7u(C$pNG~ zMK)1wjyXkMz-xfKvfN0wEMs@ z$cnPao~QA{BMAyAX0h8hb@i0jU#}t=mQ%o}PTsVgCTQ*>hX7I^#)h5eEXwBkq?EXC z!b2lTnvjy%r@ff8#PeStKlj(R^WCe}@ZT_-Von)}JrS)|WVYkqKnt@mDU8R-F>{2} z(mz%Fgtj((B2>3Iy6LX8KjWC&yEI5xD3Dd*P#|Kc%X$&@6|SSl4{73C67iEloyhf# zqdj}}(B#QeKrLQ;^$LUHrF&|n3?XG_lq@!TO=#*Jw{`A+8}e9!o#TGsB_FQP&;W09 zVuKk=#1pXmZnEeua$c9b}hMw zhK7Q>2}lXWCIMj*=;Gj~0|x_BJ}hCdv1H94#8P5=yVBXP?gPN*`FIH(Ko8_gs62sY zBs@!1-8-Nh+^cBXb@b!LL3#U2L zwK=O41@j_;m%R!o?SSY0Z_(KMdlWj7+tfo6Q zO18fGJj*sLSUq$4wC5dqzxhQSS+T!cckR?M zYY+;sx*gEC>WwwDF!cxc9l4*<%V+*TN*T74?NR%D+p!|gi|rK!)+1Ut?t=Hs4wAlSF>*zvE-!9#w9q%)_8{c(C@W<$4Ti9Y4F z1#PJlQk2ny;E}NO2yOJp?u|aQ={t%6oO@U%D?extfeNzk`4^2i=AuGv2|2Jnjn(}A ziUsRI96EP3~k*zw)MaEe_g$9L}ZHah!lLkMqfdRswu!` z;CKM%V6i{!z3Nve3<$hCWvRf9jCLdCh33*X*bhXj#?msMY;i_Jr?nBPaul4GDj1h; zb<^s??|o>JDb)JI=_=!(J?q~6pPIJz=21K_4pc#|`~oYYl~D<4`)loyt7*tBqbMp% zsR{*~#CEP6>GNid{@!`u*P!jeViz`;L8~IH?=WK%9?j;DXhv%hC5DU=R9j}({S`sG zvQ(2Ixp}=tg*_^Nm()W40q-U1pKIZ;@Y<6(1g$OFDu{|+Unb1!R0B9eozo&t5aQ+vE0VCSS1 z7H^_zM2xQ55aWH1kB^pBJ{OVyv`ATy3%3g>K%fYg4cE7bu-7pAwOZD%5hx=v_f8so zo@kV49;w6P;nfa2S%uMHfoq5QXzkFnh>vQjFZ8PM($v({en-o$Z{>r^6@UBV#xW~o zBa+gQr5LPhgp1z&Yia%}vNeCI>xhu~J~`$#O4km7ZE`6QfLUFvGKEsH`AV0y3EC7w zp#nnTynKYomxP`5|?k0e9*UDPyl;z8NrzGY@3MG z!PIr+J#3dzB|I9c-%GOw{fkW40@R&W%c@HMg}HzZ@ep-E-77V)?$tRx_ z)?084!uP|j=|O4f`lF(b6K#9T78Zx4Np^RYkwcL~k#hWcr57Ms1yv%}zOQu~7VLaO zwQbkujVK!IJE$^<|sYl<$jpc5=W~;({BT?E)c&0cRqAvn6gg{%^rkt4MN(q*+Vj zST}!M`~iosTRN%fe@n@Hb`Q1~-$^Mo7Mk8zL?g9xDGZc8BxEv;k9ky>JdLJPbY8bd z7>@>&FNX{iEPmYe6rPN&x>}06<=mQSvnFp=tFRd;eqPq|fgR!&CY}oZzIg-8v9qVY z`s>p#D}LOa`lmMC7U~F_gy^4Ch#GcSTY-#s;pNSN3x0~=*Yd~%-753B}5iyK* zx9)|tGeaPCCglzb2CE(hWO%6N@)qo1nPKuIdNA`XYKKgNf=iLgbm2jjEP|q4!zr%~ zF8t;?sz0!ws^u5RiXW@A$HXIWjH#muBg4LTsC)MtLLF96G)42U5*E8RSn&HF2x=?M z&=8s$NKscyU7ZFJBrV{YB^pWbVOV})@1hZr3sD~|6!n?Xd1`T_iWhljqqA1Z-Kt<5(tW{n3B{l;YM98bM9i|-@zwk6h z(w!euY6TJvu>WC@ID*0@aTF@ThWp}p(kgDHWATZ!rQ!FK9XgXT!=?fmo#+w~Rj*Vi z_WQBD5iCd4QISMx+A-7siaFG}mZDXI1>`8jp;V>YV2_`zTceSO82u?!Qf51xcwO zbRHdQLk0}SJ3Hn_NOc_tHNnk-SzSlVm>?W#79$qaP7Bjtr8rFvR?Kz_$nfpYb1wMe zptx`lE?El|Sd&j}mh)86xrO#O{gX`MR>SOzv#U35{JdAQ^3>&T(U2jSVFM_P6h@x# z?Q*8q)m|vW&-^M&2d@#lyTQ^>icny4V7S`ZGG{x4lOUMiy8}{qNxUErn9;;DjP-O4 zV)E})zlNpQ!vS0)b=w-jm!}HCeE`D>_y`MG_`;xA=mC%~LA~7+E>9NZ5BtkHtq|%M zr&_|^2%Z3awId$4&1I%j9h;~iV*VAVw*R59J!OV${o~1-$fYnF=FPoh4C47G{dugV z_uhMN|1J64VRwb#%67RLUwh-VExKCMxWdqJp+=~oddwUh_^J#5V4!!yRd2u`*wY&% z3#BMUEQP{A43c7VNB}_&M6oM{+q)%U9<4h3C{^n!C@*3z4ThlnHid!Atj}m~r_)_q zF|H;GSENu@$PDxuBp}-i|eJ% zSh`>lHfpGR;DHA!Il;m&La-^=mD=lbp8*I;G1k@B&DN+jxt86gz0+cDSDOUe0Iw(T zQjV8n*&CdSj1-nD?o?7248~T>%r?PrpA!RW-9K67MB@3v0v8cTgUfY zhMo6LckFb<%&T5DZo>3`f41RsA?=7cjbDUtF4EN$MhI(W{6q9vIYW+red_byXLRKJ zI1K9?IL_`})R>K0DKap?I=G&jbunY$fDzA3W`Ry2viSD-KLc_2aV<%RFmt_a9Pc*$ z9WzBpVc~la!CFpc@Faot2@Xv7%-FuUn&Q#;*cTNni@WOjOnEYqhU$>589V#*nnjO1 z^nOKImB97`jRP_IzI-##!+cKwEb0^&7gKy(JjouC{%-3Z-@pAX#}9_%*M|F4XVHk? z?LvW7-H&`{4N@{F#F@- zAIl1vE+Cy*5wp*0Da`U%rDlyUkWZ?=w&h|iT{M)@so?+j{L?cZ{WZ2e7*IO{vP1lJ znw*@BWC{mmrDb%afKaYo`DDkB8w)f6|S*l1Yj2q0TbcO@^PmruWlJYW}C z*EqpVa!J^_u_^|zSPn73D!aCTs)C{ZX2A{^hL`rpgsU0F<pbCyWG7zsv zVUGb@-AsY(rWq)^W7aHjsi@Yq%TgZt@(;3O(qFe*yw;ZtU~p2-r~S4Cne24aoy9M% z|Lv~;a!(moD=-{xaI98=g@=qW`|~M zg&%T2Y|6WxYo-wIM))`g`M<>v<~{!EpMIM9Wc+V#lEQh|pD)X|E}yeO$qInSR)Jpt zKCyeT3~nNw)14HmFa^v^e9kTOK`8?<9{u$d=o-s3GgSCCu*0m-=~t9v%ueOdq;&8$ z9J0Q8C~8#uT_KvN^Clz5ah!p=@_X-6%+v4ffnQ`E8Lo-coew^C`){kB`kK$BoZ&aX zVupKz*CMhaz=Z`VZ(^eXk~}2{R1$=EI}7SSzAn%6*YfQV=IobZSrw?TiY;===#23z z<5v6Azpy8r8Xps$be<{YTFZ4q2oHdfppgc3Za1yJ{Wmo$zc5>|f)&Qql?%4ou(d-_ zW6SX#b&S7R$ge@R{VfUi;In|{#E+xHEw92oe*wr=0$>C4!(F-mOHVK;t91D7T%>yT z7k*_#A%H+lNKiN;{o~f!gnt!l;%xsVb*an@gKtx=D?)f6P;%e}*0wjtpSk@t^UpqR zLbkXH7I~n;GoEAh>}uiqp2A7n$(js$q2Ns+k$YU!6e`VF0L<^BV{PxFP6v?4+4R?E z4GAMy2iJ`nZXmbXeMx0zRwm=VT=;D&YcuUNymYQ2=C|_{gX*V6N5mfQNxJe3{hGh8 zJ3@FQyy`VV8*SFbHQxPK+p~W-(z*{w3KHamzmc-6=>l*$Ap*5~A7LPPlJ1yd%HUa! z!A=m1G9IJ5v0h$h`3kO)PpJ+eUk8%>WC$HI%emIVKBtSulh|zn3u!PjxPN9#a*-AW zoePN?$6SZ(uN>a5{_(?x`2E8oW5d@=oC@qo^X;g@d%TF$d7=#=h(i0ipdBHhWlQMfR{gYnUN5sh@;I7>#^Jc zyQczpo;4`(s&rbAiXRV#&x}|3R&-qL_goUulof)LfiMr!cpGFxN!3M(2O*~6@o zKkTb$MA-Q4s)z85lGO6xZc^zFShW|AiWA%SDKz3QLR3LIYg0IMKV+L=qkn$4@1lIC z2;l+qymVqe6|k^sdvyDpH)?)*cXs30U)~(DAT3fAi>QNpeME#jip?t2@Nd7c1_p&e z*rt3~Xpz9mxP4}KoQ}zvFnM9)VOifKWR^FYYlU1e6Jj8|zkG|i1%as!$OukEaJyvK z;~a_~oWa62DmQ#>HaOczqgAyBO*lEZuD0%UbaX65h9zT}ok8$QIIkmX;<5WH-#J1& zow`|Au!9mrYn#%_HvIDF(#I!u&Rab#{Eo0@Q#~jq@UB2CONzqBBMMjBJHtI~^EV@& z7nNFj*m*3B#sqbyF#FjaVEcyc7!C*UvkGWwysoj%e2%^_y!%aJfnvqe=}*^3V)Gwt z&s4`NfLT!6;U4<7nv?f9uXOyqAw&QglR07oVd@B7Q*`sMwiw@e^{8pvk9Hm0`6SQu zoS@N^9*ndhxboeruPzOr?T#2#BC=Yx&yC@~rUk+-Ad52{(vV5{930YNQjU z!^WNZPq%f(*8Nc#FW(v)8SjjWj1qRK`!<{5c>256@9z~Mobhpk* z0vEPnWRNxNgk)CJZSAB(hFz{x=7WbXYES>WQ`z*MNF}a}4MV~;o^{$`h$14&sW=`# zox8>F_wRoMA>OZOGubWqxdpSpKE$qMUF#v!XFuCq^X~Kl@wic%?(oT>mguxWnqlHl zDSmwi>n&mw3G^n`nmnAg632-^6JpO7mU!E8W=Uqq4Q5B@%%}7^McbZgQTg5$ zMcvMBYxnl7#4KloH42;4V38(}CB|=T{a-2nafAq(m{{B{1yIBer1JztSId$Wf0qt2 z{vA8dsjw0uLn{hRjMF4%=q?(EoVa*+K}Exf=qh7b_mrgBEf%|30`tS(&{d~%i0xg9 zAlU_@OJ8Mhb)A-kJL|;?(K*cgi(33hag;flEcjJ5W}DxqT(}x!|Hl!+gJq<6)JO&t z$5~ii4w8kKqQYWM%H*<>4vhg@uUcrr$kE3tUOBNceQNB}Cr#Tv!z@=#n>NjU@kE!c zqN0pO<&379(-%mAP0V7W69prcmstN_MuIoOL>@qRU%YYrpMwA-pv!e=%)#Q7>+a6u QVa%F-+q93SJpA+j4_IIKX8-^I literal 0 HcmV?d00001 diff --git a/hackfest_k8sproxycharm_vnf/README b/hackfest_k8sproxycharm_vnf/README new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitignore b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitignore new file mode 100644 index 00000000..722d5e71 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitignore @@ -0,0 +1 @@ +.vscode diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitmodules b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitmodules new file mode 100644 index 00000000..fce0171f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/.gitmodules @@ -0,0 +1,9 @@ +[submodule "mod/operator"] + path = mod/operator + url = https://github.com/canonical/operator +[submodule "mod/charms.osm"] + path = mod/charms.osm + url = https://github.com/charmed-osm/charms.osm +[submodule "mod/charms"] + path = mod/charms + url = https://github.com/AdamIsrael/charms.requirementstxt diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/README.md b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/README.md new file mode 100644 index 00000000..21d1a44c --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/README.md @@ -0,0 +1,52 @@ +# charm-simple-k8s + +This is a WORK IN PROGRESS example of a simple proxy charm used by Open Source Mano (OSM), written in the [Python Operator Framwork](https://github.com/canonical/operator) + + +## Usage + +To get the charm: +```bash +git clone https://github.com/charmed-osm/charm-simple-k8s +cd charm-simple-k8s +# Install the submodules +git submodule update --init +``` + +To configure the charm, you'll need to have an SSH-accessible machine. You'll need the hostname, and the username and password to login to. Password authentication is useful for testing but key-based authentication is preferred when deploying through OSM. + +To deploy to juju: +``` +juju deploy . --config ssh-hostname=10.135.22.x --config ssh-username=ubuntu --config ssh-password=ubuntu --resource ubuntu_image=ubuntu/ubuntu:latest +``` + +``` +# Make sure the charm is in an Active state +juju status +``` + +To test the SSH credentials, run the `verify-ssh-credentials` action and inspect it's output: +``` +$ juju run-action simple-k8s/0 verify-ssh-credentials +Action queued with id: "9" + +$ juju show-action-output 9 +UnitId: simple-k8s/0 +results: + Stdout: | + Verified! + verified: "True" +status: completed +timing: + completed: 2020-02-14 19:30:38 +0000 UTC + enqueued: 2020-02-14 19:30:33 +0000 UTC + started: 2020-02-14 19:30:36 +0000 UTC +``` + +To exercise the charm, run the `touch` function + +``` +juju run-action simple-k8s/0 touch filename=/home/ubuntu/firsttouch +``` + +Then ssh to the remote machine and verify that the file has been created. diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/actions.yaml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/actions.yaml new file mode 100644 index 00000000..956e1f19 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/actions.yaml @@ -0,0 +1,38 @@ +touch: + description: "Touch a file on the VNF." + params: + filename: + description: "The name of the file to touch." + type: string + default: "" + required: + - filename + +# Standard OSM functions +start: + description: "Stop the service on the VNF." +stop: + description: "Stop the service on the VNF." +restart: + description: "Stop the service on the VNF." +reboot: + description: "Reboot the VNF virtual machine." +upgrade: + description: "Upgrade the software on the VNF." + +# Required by charms.osm.sshproxy +run: + description: "Run an arbitrary command" + params: + command: + description: "The command to execute." + type: string + default: "" + required: + - command +generate-ssh-key: + description: "Generate a new SSH keypair for this unit. This will replace any existing previously generated keypair." +verify-ssh-credentials: + description: "Verify that this unit can authenticate with server specified by ssh-hostname and ssh-username." +get-ssh-public-key: + description: "Get the public SSH key for this unit." diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/config.yaml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/config.yaml new file mode 100644 index 00000000..5b908ae0 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/config.yaml @@ -0,0 +1,29 @@ +options: + ssh-hostname: + type: string + default: "" + description: "The hostname or IP address of the machine to" + ssh-username: + type: string + default: "" + description: "The username to login as." + ssh-password: + type: string + default: "" + description: "The password used to authenticate." + # ssh-private-key: + # type: string + # default: "" + # description: "DEPRECATED. The private ssh key to be used to authenticate." + ssh-public-key: + type: string + default: "" + description: "The public key of this unit." + ssh-key-type: + type: string + default: "rsa" + description: "The type of encryption to use for the SSH key." + ssh-key-bits: + type: int + default: 4096 + description: "The number of bits to use for the SSH key." diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/hooks/start b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/hooks/start new file mode 120000 index 00000000..25b1f68f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/hooks/start @@ -0,0 +1 @@ +../src/charm.py \ No newline at end of file diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/osm/sshproxy.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/osm/sshproxy.py new file mode 100644 index 00000000..60057c92 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/osm/sshproxy.py @@ -0,0 +1,419 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import paramiko +import os +import socket +import shlex +import traceback + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def has_ssh_key(): + if os.path.exists(SSHProxy.private_key_path): + return True + else: + return False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self._ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def sftp(self, local, remote): + client = self._get_ssh_client() + + # Create an sftp connection from the underlying transport + sftp = paramiko.SFTPClient.from_transport(client.get_transport()) + sftp.put(local_file, remote_file) + client.close() + pass + + def verify_credentials(self): + """Verify the SSH credentials.""" + try: + (stdout, stderr) = self.run("hostname") + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except paramiko.ssh_exception.AuthenticationException as e: + stderr = "{}.".format(e) + except paramiko.ssh_exception.BadAuthenticationType as e: + stderr = "{}".format(e.explanation) + except paramiko.ssh_exception.BadHostKeyException as e: + stderr = "Host key mismatch: expected {} but got {}.".format( + e.expected_key, e.got_key, + ) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(cfg["ssh-hostname"]) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + + if len(stderr) == 0: + return True, stderr + return False, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] + + def _get_ssh_client(self): + """Return a connected Paramiko ssh object.""" + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + pkey = None + + # Otherwise, check for the auto-generated private key + if os.path.exists(self.private_key_path): + with open(self.private_key_path) as f: + pkey = paramiko.RSAKey.from_private_key(f) + + ########################################################################### + # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL 5) where # + # the server may not send the SSH_MSG_USERAUTH_BANNER message except when # + # responding to an auth_none request. For example, paramiko will attempt # + # to use password authentication when a password is set, but the server # + # could deny that, instead requesting keyboard-interactive. The hack to # + # workaround this is to attempt a reconnect, which will receive the right # + # banner, and authentication can proceed. See the following for more info # + # https://github.com/paramiko/paramiko/issues/432 # + # https://github.com/paramiko/paramiko/pull/438 # + ########################################################################### + + try: + client.connect( + self.hostname, + port=22, + username=self.username, + password=self.password, + pkey=pkey + ) + except paramiko.ssh_exception.SSHException as e: + if "Error reading SSH protocol banner" == str(e): + # Once more, with feeling + client.connect( + host, port=22, username=user, password=password, pkey=pkey + ) + else: + # Reraise the original exception + raise e + + return client + + def _ssh(self, cmd): + """Run an arbitrary command over SSH. + + Returns a tuple of (stdout, stderr) + """ + client = self._get_ssh_client() + + cmds = " ".join(cmd) + stdin, stdout, stderr = client.exec_command(cmds, get_pty=True) + retcode = stdout.channel.recv_exit_status() + client.close() # @TODO re-use connections + if retcode > 0: + output = stderr.read().strip() + raise CalledProcessError(returncode=retcode, cmd=cmd, output=output) + return ( + stdout.read().decode("utf-8").strip(), + stderr.read().decode("utf-8").strip(), + ) + + +## OLD ## + +# def get_config(): +# """Get the current charm configuration. + +# Get the "live" kv store every time we need to access the charm config, in +# case it has recently been changed by a config-changed event. +# """ +# db = unitdata.kv() +# return db.get('config') + + +# def get_host_ip(): +# """Get the IP address for the ssh host. + +# HACK: This function was added to work around an issue where the +# ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first +# is the floating ip, and the second the non-floating ip, for an Openstack +# instance. +# """ +# cfg = get_config() +# return cfg['ssh-hostname'].split(';')[0] + + +# def is_valid_hostname(hostname): +# """Validate the ssh-hostname.""" +# print("Hostname: {}".format(hostname)) +# if hostname == "0.0.0.0": +# return False + +# try: +# ipaddress.ip_address(hostname) +# except ValueError: +# return False + +# return True + + +# def verify_ssh_credentials(): +# """Verify the ssh credentials have been installed to the VNF. + +# Attempts to run a stock command - `hostname` on the remote host. +# """ +# verified = False +# status = '' +# cfg = get_config() + +# try: +# host = get_host_ip() +# if is_valid_hostname(host): +# if len(cfg['ssh-hostname']) and len(cfg['ssh-username']): +# cmd = 'hostname' +# status, err = _run(cmd) + +# if len(err) == 0: +# verified = True +# else: +# status = "Invalid IP address." +# except CalledProcessError as e: +# status = 'Command failed: {} ({})'.format( +# ' '.join(e.cmd), +# str(e.output) +# ) +# except paramiko.ssh_exception.AuthenticationException as e: +# status = '{}.'.format(e) +# except paramiko.ssh_exception.BadAuthenticationType as e: +# status = '{}'.format(e.explanation) +# except paramiko.ssh_exception.BadHostKeyException as e: +# status = 'Host key mismatch: expected {} but got {}.'.format( +# e.expected_key, +# e.got_key, +# ) +# except (TimeoutError, socket.timeout): +# status = "Timeout attempting to reach {}".format(cfg['ssh-hostname']) +# except Exception as error: +# tb = traceback.format_exc() +# status = 'Unhandled exception: {}'.format(tb) + +# return (verified, status) + + +# def charm_dir(): +# """Return the root directory of the current charm.""" +# d = os.environ.get('JUJU_CHARM_DIR') +# if d is not None: +# return d +# return os.environ.get('CHARM_DIR') + + +# def run_local(cmd, env=None): +# """Run a command locally.""" +# if isinstance(cmd, str): +# cmd = shlex.split(cmd) if ' ' in cmd else [cmd] + +# if type(cmd) is not list: +# cmd = [cmd] + +# p = Popen(cmd, +# env=env, +# shell=True, +# stdout=PIPE, +# stderr=PIPE) +# stdout, stderr = p.communicate() +# retcode = p.poll() +# if retcode > 0: +# raise CalledProcessError(returncode=retcode, +# cmd=cmd, +# output=stderr.decode("utf-8").strip()) +# return (stdout.decode('utf-8').strip(), stderr.decode('utf-8').strip()) + + +# def _run(cmd, env=None): +# """Run a command remotely via SSH. + +# Note: The previous behavior was to run the command locally if SSH wasn't +# configured, but that can lead to cases where execution succeeds when you'd +# expect it not to. +# """ +# if isinstance(cmd, str): +# cmd = shlex.split(cmd) + +# if type(cmd) is not list: +# cmd = [cmd] + +# cfg = get_config() + +# if cfg: +# if all(k in cfg for k in ['ssh-hostname', 'ssh-username', +# 'ssh-password', 'ssh-private-key']): +# host = get_host_ip() +# user = cfg['ssh-username'] +# passwd = cfg['ssh-password'] +# key = cfg['ssh-private-key'] # DEPRECATED + +# if host and user: +# return ssh(cmd, host, user, passwd, key) + +# raise Exception("Invalid SSH credentials.") + + +# def get_ssh_client(host, user, password=None, key=None): +# """Return a connected Paramiko ssh object.""" +# client = paramiko.SSHClient() +# client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + +# pkey = None + +# # Check for the DEPRECATED private-key +# if key: +# f = io.StringIO(key) +# pkey = paramiko.RSAKey.from_private_key(f) +# else: +# # Otherwise, check for the auto-generated private key +# if os.path.exists('/root/.ssh/id_juju_sshproxy'): +# with open('/root/.ssh/id_juju_sshproxy', 'r') as f: +# pkey = paramiko.RSAKey.from_private_key(f) + +# ########################################################################### +# # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL 5) where # +# # the server may not send the SSH_MSG_USERAUTH_BANNER message except when # +# # responding to an auth_none request. For example, paramiko will attempt # +# # to use password authentication when a password is set, but the server # +# # could deny that, instead requesting keyboard-interactive. The hack to # +# # workaround this is to attempt a reconnect, which will receive the right # +# # banner, and authentication can proceed. See the following for more info # +# # https://github.com/paramiko/paramiko/issues/432 # +# # https://github.com/paramiko/paramiko/pull/438 # +# ########################################################################### + +# try: +# client.connect(host, port=22, username=user, +# password=password, pkey=pkey) +# except paramiko.ssh_exception.SSHException as e: +# if 'Error reading SSH protocol banner' == str(e): +# # Once more, with feeling +# client.connect(host, port=22, username=user, +# password=password, pkey=pkey) +# else: +# # Reraise the original exception +# raise e + +# return client + + +# def sftp(local_file, remote_file, host, user, password=None, key=None): +# """Copy a local file to a remote host.""" +# client = get_ssh_client(host, user, password, key) + +# # Create an sftp connection from the underlying transport +# sftp = paramiko.SFTPClient.from_transport(client.get_transport()) +# sftp.put(local_file, remote_file) +# client.close() + + +# def ssh(cmd, host, user, password=None, key=None): +# """Run an arbitrary command over SSH.""" +# client = get_ssh_client(host, user, password, key) + +# cmds = ' '.join(cmd) +# stdin, stdout, stderr = client.exec_command(cmds, get_pty=True) +# retcode = stdout.channel.recv_exit_status() +# client.close() # @TODO re-use connections +# if retcode > 0: +# output = stderr.read().strip() +# raise CalledProcessError(returncode=retcode, cmd=cmd, +# output=output) +# return ( +# stdout.read().decode('utf-8').strip(), +# stderr.read().decode('utf-8').strip() +# ) diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/requirementstxt.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/requirementstxt.py new file mode 100644 index 00000000..298d5845 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/charms/requirementstxt.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Requirements.txt support + +import sys + +sys.path.append("lib") + +from ops.framework import StoredState + +import os +import subprocess +import sys +from remote_pdb import RemotePdb + +REQUIREMENTS_TXT = "{}/requirements.txt".format(os.environ["JUJU_CHARM_DIR"]) + + +def install_requirements(): + if os.path.exists(REQUIREMENTS_TXT): + + # First, make sure python3 and python3-pip are installed + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + # Install the Python3 package + subprocess.check_call( + ["apt-get", "install", "-y", "python3", "python3-pip", "python3-paramiko"], + # Eat stdout so it's not returned in an action's stdout + # TODO: redirect to a file handle and log to juju log + # stdout=subprocess.DEVNULL, + ) + + # Lastly, install the python requirements + cmd = [sys.executable, "-m", "pip", "install", "-r", REQUIREMENTS_TXT] + # stdout = subprocess.check_output(cmd) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + + stdout, stderr = p.communicate() + + print(stdout) + print(stderr) + # subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", REQUIREMENTS_TXT], + # # Eat stdout so it's not returned in an action's stdout + # # TODO: redirect to a file handle and log to juju log + # # stdout=subprocess.DEVNULL, + # ) + + +# Use StoredState to make sure we're run exactly once automatically +# RemotePdb('127.0.0.1', 4444).set_trace() + +state = StoredState() + +installed = getattr(state, "requirements_txt_installed", None) +if not installed: + install_requirements() + state.requirements_txt_installed = True + diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/__init__.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/charm.py new file mode 100755 index 00000000..71472f96 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/charm.py @@ -0,0 +1,306 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import yaml + +from ops.framework import Object, EventSource, EventBase, EventsBase + + +class HookEvent(EventBase): + pass + + +class ActionEvent(EventBase): + + def defer(self): + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot): + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results): + self.framework.model._backend.action_set(results) + + def log(self, message): + self.framework.model._backend.action_log(message) + + def fail(self, message=''): + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + pass + + +class StartEvent(HookEvent): + pass + + +class StopEvent(HookEvent): + pass + + +class ConfigChangedEvent(HookEvent): + pass + + +class UpdateStatusEvent(HookEvent): + pass + + +class UpgradeCharmEvent(HookEvent): + pass + + +class PreSeriesUpgradeEvent(HookEvent): + pass + + +class PostSeriesUpgradeEvent(HookEvent): + pass + + +class LeaderElectedEvent(HookEvent): + pass + + +class LeaderSettingsChangedEvent(HookEvent): + pass + + +class RelationEvent(HookEvent): + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit and unit.app != app: + raise RuntimeError(f'cannot create RelationEvent with application {app} and unit {unit}') + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self): + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot): + self.relation = self.framework.model.get_relation(snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationJoinedEvent(RelationEvent): + pass + + +class RelationChangedEvent(RelationEvent): + pass + + +class RelationDepartedEvent(RelationEvent): + pass + + +class RelationBrokenEvent(RelationEvent): + pass + + +class StorageEvent(HookEvent): + pass + + +class StorageAttachedEvent(StorageEvent): + pass + + +class StorageDetachingEvent(StorageEvent): + pass + + +class CharmEvents(EventsBase): + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + + +class CharmBase(Object): + + on = CharmEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(f'{relation_name}_relation_joined', RelationJoinedEvent) + self.on.define_event(f'{relation_name}_relation_changed', RelationChangedEvent) + self.on.define_event(f'{relation_name}_relation_departed', RelationDepartedEvent) + self.on.define_event(f'{relation_name}_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(f'{storage_name}_storage_attached', StorageAttachedEvent) + self.on.define_event(f'{storage_name}_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(f'{action_name}_action', ActionEvent) + + +class CharmMeta: + """Object containing the metadata for the charm. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + """ + + def __init__(self, raw={}, actions_raw={}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta('requires', name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta('provides', name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta('peers', name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', []) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml(cls, metadata, actions=None): + meta = yaml.safe_load(metadata) + raw_actions = {} + if actions is not None: + raw_actions = yaml.safe_load(actions) + return cls(meta, raw_actions) + + +class RelationMeta: + """Object containing metadata about a relation definition.""" + + def __init__(self, role, relation_name, raw): + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/framework.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/framework.py new file mode 100755 index 00000000..d95eb61f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/framework.py @@ -0,0 +1,941 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pickle +import marshal +import types +import sqlite3 +import collections +import collections.abc +import keyword +import weakref +from datetime import timedelta + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = f"{parent}/{kind}[{key}]" + else: + self._path = f"{parent}/{kind}" + else: + if key: + self._path = f"{kind}[{key}]" + else: + self._path = f"{kind}" + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {path}") + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError(f"Event requires a subclass of EventBase as an argument, got {event_type}") + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def __set_name__(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + f'EventSource({self.event_type.__name__}) reused as ' + f'{self.emitter_type.__name__}.{self.event_kind} and ' + f'{emitter_type.__name__}.{event_kind}') + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event rather than charm_instance.on.event, + # but in that case it couldn't be emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return (f'') + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class Object: + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + @property + def meta(self): + return self.framework.meta + + @property + def charm_dir(self): + return self.framework.charm_dir + + +class EventsBase(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls -- a type to define an event on. + event_kind -- an attribute name that will be used to access the event. Must be a valid python identifier, not be a keyword or an existing attribute. + event_type -- a type of the event to define. + """ + if not event_kind.isidentifier(): + raise RuntimeError(f'unable to define an event with event_kind that is not a valid python identifier: {event_kind}') + elif keyword.iskeyword(event_kind): + raise RuntimeError(f'unable to define an event with event_kind that is a python keyword: {event_kind}') + try: + getattr(cls, event_kind) + raise RuntimeError(f'unable to define an event with event_kind that overlaps with an existing type {cls} attribute: {event_kind}') + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor.__set_name__(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(EventsBase): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f'no snapshot data found for {self.handle_path} object' + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f"cannot restore {self.handle_path} since no class was registered for it" + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), isolation_level=None, timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute("CREATE TABLE notice (sequence INTEGER PRIMARY KEY AUTOINCREMENT, event_path TEXT, observer_path TEXT, method_name TEXT)") + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path, snapshot_data): + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, snapshot_data)) + + def load_snapshot(self, handle_path): + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return row[0] + return None + + def drop_snapshot(self, handle_path): + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def save_notice(self, event_path, observer_path, method_name): + self._db.execute("INSERT INTO notice VALUES (NULL, ?, ?, ?)", (event_path, observer_path, method_name)) + + def drop_notice(self, event_path, observer_path, method_name): + self._db.execute("DELETE FROM notice WHERE event_path=? AND observer_path=? AND method_name=?", (event_path, observer_path, method_name)) + + def notices(self, event_path): + if event_path: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice WHERE event_path=? ORDER BY sequence", (event_path,)) + else: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice ORDER BY sequence") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, data_path, charm_dir, meta, model): + + super().__init__(self, None) + + self._data_path = data_path + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + self._storage = SQLiteStorage(data_path) + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError(f"two objects claiming to be {obj.handle.path} have been created") + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError(f"cannot save {type(value).__name__} values before registering that type") + data = value.snapshot() + # Use marshal as a validator, enforcing the use of simple types. + marshal.dumps(data) + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(data) + self._storage.save_snapshot(value.handle.path, raw_data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + raw_data = self._storage.load_snapshot(handle.path) + if not raw_data: + raise NoSnapshotError(handle.path) + data = pickle.loads(raw_data) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event, observer): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self.on_something_happened) + + If the method to be called follows the name convention "on_", it + may be omitted from the observe call. That means the above is equivalent to: + + framework.observe(someobj.something_happened, self) + + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError(f'Framework.observe requires a BoundEvent as second parameter, got {bound_event}') + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError(f'event emitter {type(emitter).__name__} must have a "handle" attribute') + + method_name = None + if isinstance(observer, types.MethodType): + method_name = observer.__name__ + observer = observer.__self__ + else: + method_name = "on_" + event_kind + if not hasattr(observer, method_name): + raise RuntimeError(f'Observer method not provided explicitly and {type(observer).__name__} type has no "{method_name}" method') + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(getattr(observer, method_name)) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + if not sig.parameters: + raise TypeError(f'{type(observer).__name__}.{method_name} must accept event parameter') + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError(f'{type(observer).__name__}.{method_name} has extra required parameter') + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from scratch in the next path. + self.framework._forget(event) + + if not deferred: + self._storage.drop_snapshot(last_event_path) + + +class StoredStateChanged(EventBase): + pass + + +class StoredStateEvents(EventsBase): + changed = EventSource(StoredStateChanged) + + +class StoredStateData(Object): + + on = StoredStateEvents() + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError(f"attribute '{key}' is not stored") + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError(f"attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, str, bytes, list, dict, set)): + raise AttributeError(f"attribute '{key}' cannot be set to {type(value).__name__}: must be int/dict/list/etc") + + self._data[key] = _unwrap_stored(self._data, value) + self.on.changed.emit() + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is None: + self.parent_type = parent_type + elif self.parent_type is not parent_type: + raise RuntimeError("StoredState shared by {} and {}".format(self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + return self + + bound = parent.__dict__.get(self.attr_name) + if bound is None: + for attr_name, attr_value in parent_type.__dict__.items(): + if attr_value is self: + if self.attr_name and attr_name != self.attr_name: + parent_tname = parent_type.__name__ + raise RuntimeError(f"StoredState shared by {parent_tname}.{self.attr_name} and {parent_tname}.{attr_name}") + self.attr_name = attr_name + bound = BoundStoredState(parent, attr_name) + parent.__dict__[attr_name] = bound + break + else: + raise RuntimeError("cannot find StoredVariable attribute in type {}".format(parent_type.__name__)) + + return bound + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/jujuversion.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/jujuversion.py new file mode 100755 index 00000000..5256f24f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/jujuversion.py @@ -0,0 +1,77 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'^(?P\d{1,9})\.(?P\d{1,9})((?:\.|-(?P[a-z]+))(?P\d{1,9}))?(\.(?P\d{1,9}))?$' + + def __init__(self, version): + m = re.match(self.PATTERN, version) + if not m: + raise RuntimeError(f'"{version}" is not a valid Juju version string') + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = f'{self.major}.{self.minor}-{self.tag}{self.patch}' + else: + s = f'{self.major}.{self.minor}.{self.patch}' + if self.build > 0: + s += f'.{self.build}' + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + return self.major == other.major and self.minor == other.minor\ + and self.tag == other.tag and self.build == other.build and self.patch == other.patch + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/main.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/main.py new file mode 100755 index 00000000..c8d5da2a --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/main.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model + +CHARM_STATE_FILE = '.unit-state.db' + + +def debugf(format, *args, **kwargs): + pass + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path(f'{__file__}/../../..').resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _load_metadata(charm_dir): + metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text()) + + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = yaml.safe_load(actions_meta.read_text()) + else: + actions_metadata = {} + return metadata, actions_metadata + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError(f"action event name {bound_event.event_kind} needs _action suffix") + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError(f'cannot create a symlink: unsupported event type {bound_event.event_type}') + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. For Windows + # it is always an absolute path (any symlinks are resolved) while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), event_dir) + + # Ignore the non-symlink files or directories assuming the charm author knows what they are doing. + debugf(f'Creating a new relative symlink at {event_path} pointing to {target_path}') + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file which is determined by inspecting + symlinks provided by the charm author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + debugf(f"event {event_name} not defined for {charm}") + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + debugf(f'Emitting Juju event {event_name}') + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError(f'invalid remote unit name: {remote_unit_name}') + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +def main(charm_class): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + + charm_dir = _get_charm_dir() + + # Process the Juju event relevant to the current hook execution + # JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used + # in order to support simulation of events from debugging sessions. + # TODO: For Windows, when symlinks are used, this is not a valid method of getting an event name (see LP: #1854505). + juju_exec_path = Path(sys.argv[0]) + juju_event_name = juju_exec_path.name.replace('-', '_') + if juju_exec_path.parent.name == 'actions': + juju_event_name = f'{juju_event_name}_action' + + metadata, actions_metadata = _load_metadata(charm_dir) + meta = ops.charm.CharmMeta(metadata, actions_metadata) + unit_name = os.environ['JUJU_UNIT_NAME'] + model = ops.model.Model(unit_name, meta, ops.model.ModelBackend()) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model) + try: + charm = charm_class(framework, None) + + # When a charm is force-upgraded and a unit is in an error state Juju does not run upgrade-charm and + # instead runs the failed hook followed by config-changed. Given the nature of force-upgrading + # the hook setup code is not triggered on config-changed. + # 'start' event is included as Juju does not fire the install event for K8s charms (see LP: #1854635). + if juju_event_name in ('install', 'start', 'upgrade_charm') or juju_event_name.endswith('_storage_attached'): + _setup_event_links(charm_dir, charm) + + framework.reemit() + + _emit_charm_event(charm, juju_event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/model.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/model.py new file mode 100644 index 00000000..a12dcca2 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/lib/ops/model.py @@ -0,0 +1,679 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import weakref +import os +import shutil +import tempfile +import time +import datetime + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + + +class Model: + + def __init__(self, unit_name, meta, backend): + self._cache = ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + + def get_unit(self, unit_name): + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name): + return self._cache.get(Application, app_name) + + def get_relation(self, relation_name, relation_id=None): + """Get a specific Relation instance. + + If relation_id is given, this will return that Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + """ + return self.relations._get_unique(relation_name, relation_id) + + +class ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + @property + def status(self): + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for application {self} status: {value}') + + if not self._is_our_app: + raise RuntimeError(f'cannot to set status for a remote application {self}') + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + +class Unit: + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + @property + def status(self): + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for unit {self} status: {value}') + + if not self._is_our_unit: + raise RuntimeError(f'cannot set status for a remote unit {self}') + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + def is_leader(self): + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError(f"cannot determine leadership status for remote applications: {self}") + + +class LazyMapping(Mapping, ABC): + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of Relation instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role == 'peers': + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError(f'relation name {relation_id} must be int or None not {type(relation_id).__name__}') + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class Relation: + def __init__(self, relation_name, relation_id, is_peer, our_unit, backend, cache): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}:{self.id}>' + + +class RelationData(Mapping): + def __init__(self, relation, our_unit, backend): + self.relation = weakref.proxy(relation) + self._data = {our_unit: RelationDataContent(self.relation, our_unit, backend)} + self._data.update({our_unit.app: RelationDataContent(self.relation, our_unit.app, backend)}) + self._data.update({unit: RelationDataContent(self.relation, unit, backend) for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app: + self._data.update({self.relation.app: RelationDataContent(self.relation, self.relation.app, backend)}) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on whether this unit is a leader or not, + # but this is not guaranteed to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError(f'cannot set relation data for {self._entity.name}') + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units.""" + + _statuses = {} + + def __init__(self, message): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + cls._statuses[cls.name] = cls + return super().__new__(cls) + + @classmethod + def from_name(cls, name, message): + return cls._statuses[name](message) + + +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message=None): + super().__init__(message or '') + + +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation for providing those services. + This is a "spinning" state, not an error state. It reflects activity on the unit itself, not on peers or related units. + """ + name = 'maintenance' + + +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the charm has not called status-set yet. + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which it is related is not running. + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names, backend): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name): + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError(f'invalid resource name: {name}') + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + def __init__(self, backend): + self._backend = backend + + def set_spec(self, spec, k8s_resources=None): + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names, backend): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name): + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name, count=1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(f'cannot add storage with {storage_name} as it is not present in the charm metadata') + self._backend.storage_add(storage_name, count) + + +class Storage: + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + self._location = Path(self._backend.storage_get(f'{self.name}/{self.id}', "location")) + return self._location + + +class ModelError(Exception): + pass + + +class TooManyRelatedAppsError(ModelError): + def __init__(self, relation_name, num_related, max_supported): + super().__init__(f'Too many remote applications on {relation_name} ({num_related} > {max_supported})') + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + pass + + +class RelationNotFoundError(ModelError): + pass + + +class InvalidStatusError(ModelError): + pass + + +class ModelBackend: + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self): + self.unit_name = os.environ['JUJU_UNIT_NAME'] + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = 0 + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + try: + return self._run('relation-get', '-r', str(relation_id), '-', member_name, f'--app={is_app}', return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + try: + return self._run('relation-set', '-r', str(relation_id), f'{key}={value}', f'--app={is_app}') + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + if time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(tmpdir) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + app -- A boolean indicating whether the status should be retrieved for a unit or an application. + """ + return self._run('status-get', '--include-data', f'--application={is_app}') + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + app -- A boolean indicating whether the status should be set for a unit or an application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', f'--application={is_app}', status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError(f'storage count must be integer, got: {count} ({type(count)})') + self._run('storage-add', f'{name}={count}') + + def action_get(self): + return self._run(f'action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run(f'action-set', *[f"{k}={v}" for k, v in results.items()]) + + def action_log(self, message): + self._run(f'action-log', f"{message}") + + def action_fail(self, message=''): + self._run(f'action-fail', f"{message}") + + def network_get(self, endpoint_name, relation_id=None): + """Return network info provided by network-get for a given endpoint. + + endpoint_name -- A name of an endpoint (relation name or extra-binding name). + relation_id -- An optional relation id to get network info for. + """ + cmd = ['network-get', endpoint_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/metadata.yaml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/metadata.yaml new file mode 100644 index 00000000..fb67ecbf --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/metadata.yaml @@ -0,0 +1,8 @@ +name: simple-k8s +summary: A simple example Kubernetes charm +description: | + Simple is an example charm used in OSM Hackfests +series: + - kubernetes +deployment: + mode: operator diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/LICENSE b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/README.md b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/README.md new file mode 100644 index 00000000..a5d80722 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/README.md @@ -0,0 +1,67 @@ +# charms.osm +A Python library to aid the development of charms for Open Source Mano (OSM) + +## SSHProxy + +Example: + +```python +from charms.osm.sshproxy import SSHProxy + +# Check if SSH Proxy has key +if not SSHProxy.has_ssh_key(): + # Generate SSH Key + SSHProxy.generate_ssh_key() + +# Get generated public and private keys +SSHProxy.get_ssh_public_key() +SSHProxy.get_ssh_private_key() + +# Get Proxy +proxy = SSHProxy( + hostname=config["ssh-hostname"], + username=config["ssh-username"], + password=config["ssh-password"], +) + +# Verify credentials +verified = proxy.verify_credentials() + +if verified: + # Run commands in remote machine + proxy.run("touch /home/ubuntu/touch") +``` + +## Libansible + +```python +from charms.osm import libansible + +# Install ansible packages in the charm +libansible.install_ansible_support() + +result = libansible.execute_playbook( + "configure-remote.yaml", # Name of the playbook <-- Put the playbook in playbooks/ folder + config["ssh-hostname"], + config["ssh-username"], + config["ssh-password"], + dict_vars, # Dictionary with variables to populate in the playbook +) +``` + +## Usage + +Import submodules: + +```bash +git submodule add https://github.com/charmed-osm/charms.osm mod/charms.osm +git submodule add https://github.com/juju/charm-helpers.git mod/charm-helpers # Only for libansible +``` + +Add symlinks: + +```bash +mkdir -p lib/charms +ln -s ../mod/charms.osm/charms/osm lib/charms/osm +ln -s ../mod/charm-helpers/charmhelpers lib/charmhelpers # Only for libansible +``` diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/libansible.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/libansible.py new file mode 100644 index 00000000..32fd26ae --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright 2020 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import yaml +import subprocess +import sys + +sys.path.append("lib") +import charmhelpers.fetch + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + + with open(inventory_path, "w") as f: + f.write("[{}]\n".format(hosts)) + h1 = "host ansible_host={0} ansible_user={1} ansible_password={2}\n".format( + hostname, username, password + ) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + + with open(ansible_config_path, "w") as f: + f.write("[defaults]\n") + f.write("host_key_checking = False\n") + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, "/var/lib/juju/agents/") + + with open(playbook_path, "r") as f: + playbook_data = yaml.load(f) + + hosts = "all" + if "hosts" in playbook_data[0].keys() and playbook_data[0]["hosts"]: + hosts = playbook_data[0]["hosts"] + + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + + call = "ansible-playbook {} ".format(playbook_path) + + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += "--extra-vars " + + string_var = "" + for k,v in vars_dict.items(): + string_var += "{}={} ".format(k, v) + + string_var = string_var.strip() + call += '"{}"'.format(string_var) + + call = call.strip() + result = subprocess.check_output(call, shell=True) + + return result diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/ns.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/ns.py new file mode 100644 index 00000000..25be4056 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/sshproxy.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/sshproxy.py new file mode 100644 index 00000000..724b98cf --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms.osm/charms/osm/sshproxy.py @@ -0,0 +1,250 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress + +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +def install_dependencies(): + # Make sure Python3 + PIP are available + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # This is needed when running as a k8s charm, as the ubuntu:latest + # image doesn't include either package. + + # Update the apt cache + check_call(["apt-get", "update"]) + + # Install the Python3 package + check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + # Install the build dependencies for our requirements (paramiko) + check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + check_call( + [sys.executable, "-m", "pip", "install", "paramiko"], + ) + +try: + import paramiko +except Exception as ex: + install_dependencies() + import paramiko + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self._ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def sftp(self, local, remote): + client = self._get_ssh_client() + + # Create an sftp connection from the underlying transport + sftp = paramiko.SFTPClient.from_transport(client.get_transport()) + sftp.put(local, remote) + client.close() + pass + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + try: + (stdout, stderr) = self.run("hostname") + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except paramiko.ssh_exception.AuthenticationException as e: + stderr = "{}.".format(e) + except paramiko.ssh_exception.BadAuthenticationType as e: + stderr = "{}".format(e.explanation) + except paramiko.ssh_exception.BadHostKeyException as e: + stderr = "Host key mismatch: expected {} but got {}.".format( + e.expected_key, e.got_key, + ) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + + if len(stderr) == 0: + return True, stderr + return False, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] + + def _get_ssh_client(self): + """Return a connected Paramiko ssh object.""" + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + pkey = None + + # Otherwise, check for the auto-generated private key + if os.path.exists(self.private_key_path): + with open(self.private_key_path) as f: + pkey = paramiko.RSAKey.from_private_key(f) + + ########################################################################### + # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL 5) where # + # the server may not send the SSH_MSG_USERAUTH_BANNER message except when # + # responding to an auth_none request. For example, paramiko will attempt # + # to use password authentication when a password is set, but the server # + # could deny that, instead requesting keyboard-interactive. The hack to # + # workaround this is to attempt a reconnect, which will receive the right # + # banner, and authentication can proceed. See the following for more info # + # https://github.com/paramiko/paramiko/issues/432 # + # https://github.com/paramiko/paramiko/pull/438 # + ########################################################################### + + try: + client.connect( + self.hostname, + port=22, + username=self.username, + password=self.password, + pkey=pkey, + ) + except paramiko.ssh_exception.SSHException as e: + if "Error reading SSH protocol banner" == str(e): + # Once more, with feeling + client.connect( + host, port=22, username=user, password=password, pkey=pkey + ) + else: + # Reraise the original exception + raise e + + return client + + def _ssh(self, cmd): + """Run an arbitrary command over SSH. + + Returns a tuple of (stdout, stderr) + """ + client = self._get_ssh_client() + + cmds = " ".join(cmd) + stdin, stdout, stderr = client.exec_command(cmds, get_pty=True) + retcode = stdout.channel.recv_exit_status() + client.close() # @TODO re-use connections + if retcode > 0: + output = stderr.read().strip() + raise CalledProcessError(returncode=retcode, cmd=cmd, output=output) + return ( + stdout.read().decode("utf-8").strip(), + stderr.read().decode("utf-8").strip(), + ) diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/LICENSE b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/README.md b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/README.md new file mode 100644 index 00000000..a5667070 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/README.md @@ -0,0 +1,29 @@ +# charms.requirementstxt + +A Python library, to aid the development of charms, that will automatically install Python dependencies as declared by a `requirements.txt` file in the root of the charm. + +## Usage + +Install the charms.requirementstxt library in your charm: + +```bash +git submodule add https://github.com/AdamIsrael/charms.osm mod/charms.osm +mkdir -p lib/charms +ln -s ../mod/charms.osm/charms/osm lib/charms/osm +``` + +Import the `charms.requirementstxt` library early, before any dependencies it may install. + +In `src/charm.py`: + +```python +#!/usr/bin/env python3 + +import sys + +sys.path.append("lib") + +import charms.requirementstxt +... + +``` \ No newline at end of file diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/charms/requirementstxt.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/charms/requirementstxt.py new file mode 100644 index 00000000..298d5845 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/charms/charms/requirementstxt.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Requirements.txt support + +import sys + +sys.path.append("lib") + +from ops.framework import StoredState + +import os +import subprocess +import sys +from remote_pdb import RemotePdb + +REQUIREMENTS_TXT = "{}/requirements.txt".format(os.environ["JUJU_CHARM_DIR"]) + + +def install_requirements(): + if os.path.exists(REQUIREMENTS_TXT): + + # First, make sure python3 and python3-pip are installed + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + # Install the Python3 package + subprocess.check_call( + ["apt-get", "install", "-y", "python3", "python3-pip", "python3-paramiko"], + # Eat stdout so it's not returned in an action's stdout + # TODO: redirect to a file handle and log to juju log + # stdout=subprocess.DEVNULL, + ) + + # Lastly, install the python requirements + cmd = [sys.executable, "-m", "pip", "install", "-r", REQUIREMENTS_TXT] + # stdout = subprocess.check_output(cmd) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + + stdout, stderr = p.communicate() + + print(stdout) + print(stderr) + # subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", REQUIREMENTS_TXT], + # # Eat stdout so it's not returned in an action's stdout + # # TODO: redirect to a file handle and log to juju log + # # stdout=subprocess.DEVNULL, + # ) + + +# Use StoredState to make sure we're run exactly once automatically +# RemotePdb('127.0.0.1', 4444).set_trace() + +state = StoredState() + +installed = getattr(state, "requirements_txt_installed", None) +if not installed: + install_requirements() + state.requirements_txt_installed = True + diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.flake8 b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.flake8 new file mode 100644 index 00000000..d0224f38 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 160 +exclude = sandbox diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.gitignore b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.gitignore new file mode 100644 index 00000000..cf0f3716 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.gitignore @@ -0,0 +1,3 @@ +__pycache__ +/sandbox +.idea diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.travis.yml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.travis.yml new file mode 100644 index 00000000..adfdcb93 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/.travis.yml @@ -0,0 +1,15 @@ +dist: bionic + +language: python + +python: + - "3.6" + - "3.7" + +install: + - sudo apt update + - sudo apt install flake8 make + - pip3 install pyyaml autopep8 + +script: + - make test diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/LICENSE.txt b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/Makefile b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/Makefile new file mode 100644 index 00000000..3c822540 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/Makefile @@ -0,0 +1,41 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +test: lint + @python3 -m unittest + +lint: quotelint check-copyright + @autopep8 -r --aggressive --diff --exit-code . + @flake8 --config=.flake8 + +quotelint: + @x=$$(grep -rnH --include \*.py "\\\\[\"']"); \ + if [ "$$x" ]; then \ + echo "Please fix the quoting to avoid spurious backslashes:"; \ + echo "$$x"; \ + exit 1; \ + fi >&2 + +check-copyright: + @x=$$(find . -name \*.py -not -empty -type f -print0 | xargs -0 grep -L "^# Copyright"); \ + if [ "$$x" ]; then \ + echo "Please add copyright headers to the following files:"; \ + echo "$$x"; \ + exit 1; \ + fi >&2 + + + + +.PHONY: lint test quotelint check-copyright diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/README.md b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/README.md new file mode 100644 index 00000000..f684a828 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/README.md @@ -0,0 +1,120 @@ +# Operator Framework for Charms + +This framework is not yet stable and is subject to change, but is available +for early testing. + +## Getting Started + +The following overall structure for your charm directory is recommended: + +``` +. ++-- config.yaml ++-- metadata.yaml ++-- mod/ ++-- lib/ +| +-- ops -> ../mod/operator/ops ++-- src/ +| +-- charm.py ++-- hooks/ + +-- install -> ../src/charm.py + +-- start -> ../src/charm.py # for k8s charms per below +``` + +The `mod/` directory should contain the operator framework dependency as a git +submodule: + +``` +git submodule add https://github.com/canonical/operator mod/operator +``` + +Then symlink from the git submodule for the operator framework into the `lib/` +directory of your charm so it can be imported at run time: + +``` +ln -s ../mod/operator/ops lib/ops +``` + +Other dependencies included as git submodules can be added in the `mod/` +directory and symlinked into `lib/` as well. + +You can sync subsequent changes from the framework and other submodule +dependencies by running: + +``` +git submodule update +``` + +Those cloning and checking out the source for your charm for the first time +will need to run: + +``` +git submodule update --init +``` + +Your `src/charm.py` is the entry point for your charm logic. It should be set +to executable and use Python 3.6 or greater. At a minimum, it needs to define +a subclass of `CharmBase` and pass that into the framework's `main` function: + +```python +import sys +sys.path.append('lib') # noqa: E402 + +from ops.charm import CharmBase +from ops.main import main + + +class MyCharm(CharmBase): + pass + + +if __name__ == "__main__": + main(MyCharm) +``` + +This charm does nothing, because the `MyCharm` class passed to the operator +framework's `main` function is empty. Functionality can be added to the charm +by instructing it to observe particular Juju events when the `MyCharm` object +is initialized. For example, + +```python +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self.on_start) + + def on_start(self, event): + # Handle the start event here. +``` + +Every standard event in Juju may be observed that way, and you can also easily +define your own events in your custom types. + +> The second argument to `observe` can be either the handler as a bound +> method, or the observer itself if the handler is a method of the observer +> that follows the conventional naming pattern. That is, in this case, we +> could have called just `self.framework.obseve(self.on.start, self)`. + +The `hooks/` directory must contain a symlink to your `src/charm.py` entry +point so that Juju can call it. You only need to set up the `hooks/install` link +(`hooks/start` for K8s charms, until [lp#1854635](https://bugs.launchpad.net/juju/+bug/1854635) +is resolved), and the framework will create all others at runtime. + +Once your charm is ready, upload it to the charm store and deploy it as +normal with: + +``` +# Replace ${CHARM} with the name of the charm. +charm push . cs:~${USER}/${CHARM} +# Replace ${VERSION} with the version created by `charm push`. +charm release cs:~${USER}/${CHARM}-${VERSION} +charm grant cs:~${USER}/${CHARM}-${VERSION} everyone +# And now deploy your charm. +juju deploy cs:~${USER}/$CHARM +``` + +Alternatively, to deploy directly from local disk, run: + +``` +juju deploy . +``` diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/__init__.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/charm.py new file mode 100755 index 00000000..71472f96 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/charm.py @@ -0,0 +1,306 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import yaml + +from ops.framework import Object, EventSource, EventBase, EventsBase + + +class HookEvent(EventBase): + pass + + +class ActionEvent(EventBase): + + def defer(self): + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot): + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results): + self.framework.model._backend.action_set(results) + + def log(self, message): + self.framework.model._backend.action_log(message) + + def fail(self, message=''): + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + pass + + +class StartEvent(HookEvent): + pass + + +class StopEvent(HookEvent): + pass + + +class ConfigChangedEvent(HookEvent): + pass + + +class UpdateStatusEvent(HookEvent): + pass + + +class UpgradeCharmEvent(HookEvent): + pass + + +class PreSeriesUpgradeEvent(HookEvent): + pass + + +class PostSeriesUpgradeEvent(HookEvent): + pass + + +class LeaderElectedEvent(HookEvent): + pass + + +class LeaderSettingsChangedEvent(HookEvent): + pass + + +class RelationEvent(HookEvent): + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit and unit.app != app: + raise RuntimeError(f'cannot create RelationEvent with application {app} and unit {unit}') + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self): + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot): + self.relation = self.framework.model.get_relation(snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationJoinedEvent(RelationEvent): + pass + + +class RelationChangedEvent(RelationEvent): + pass + + +class RelationDepartedEvent(RelationEvent): + pass + + +class RelationBrokenEvent(RelationEvent): + pass + + +class StorageEvent(HookEvent): + pass + + +class StorageAttachedEvent(StorageEvent): + pass + + +class StorageDetachingEvent(StorageEvent): + pass + + +class CharmEvents(EventsBase): + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + + +class CharmBase(Object): + + on = CharmEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(f'{relation_name}_relation_joined', RelationJoinedEvent) + self.on.define_event(f'{relation_name}_relation_changed', RelationChangedEvent) + self.on.define_event(f'{relation_name}_relation_departed', RelationDepartedEvent) + self.on.define_event(f'{relation_name}_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(f'{storage_name}_storage_attached', StorageAttachedEvent) + self.on.define_event(f'{storage_name}_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(f'{action_name}_action', ActionEvent) + + +class CharmMeta: + """Object containing the metadata for the charm. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + """ + + def __init__(self, raw={}, actions_raw={}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta('requires', name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta('provides', name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta('peers', name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', []) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml(cls, metadata, actions=None): + meta = yaml.safe_load(metadata) + raw_actions = {} + if actions is not None: + raw_actions = yaml.safe_load(actions) + return cls(meta, raw_actions) + + +class RelationMeta: + """Object containing metadata about a relation definition.""" + + def __init__(self, role, relation_name, raw): + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/framework.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/framework.py new file mode 100755 index 00000000..d95eb61f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/framework.py @@ -0,0 +1,941 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pickle +import marshal +import types +import sqlite3 +import collections +import collections.abc +import keyword +import weakref +from datetime import timedelta + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = f"{parent}/{kind}[{key}]" + else: + self._path = f"{parent}/{kind}" + else: + if key: + self._path = f"{kind}[{key}]" + else: + self._path = f"{kind}" + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {path}") + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError(f"Event requires a subclass of EventBase as an argument, got {event_type}") + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def __set_name__(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + f'EventSource({self.event_type.__name__}) reused as ' + f'{self.emitter_type.__name__}.{self.event_kind} and ' + f'{emitter_type.__name__}.{event_kind}') + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event rather than charm_instance.on.event, + # but in that case it couldn't be emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return (f'') + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class Object: + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + @property + def meta(self): + return self.framework.meta + + @property + def charm_dir(self): + return self.framework.charm_dir + + +class EventsBase(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls -- a type to define an event on. + event_kind -- an attribute name that will be used to access the event. Must be a valid python identifier, not be a keyword or an existing attribute. + event_type -- a type of the event to define. + """ + if not event_kind.isidentifier(): + raise RuntimeError(f'unable to define an event with event_kind that is not a valid python identifier: {event_kind}') + elif keyword.iskeyword(event_kind): + raise RuntimeError(f'unable to define an event with event_kind that is a python keyword: {event_kind}') + try: + getattr(cls, event_kind) + raise RuntimeError(f'unable to define an event with event_kind that overlaps with an existing type {cls} attribute: {event_kind}') + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor.__set_name__(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(EventsBase): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f'no snapshot data found for {self.handle_path} object' + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f"cannot restore {self.handle_path} since no class was registered for it" + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), isolation_level=None, timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute("CREATE TABLE notice (sequence INTEGER PRIMARY KEY AUTOINCREMENT, event_path TEXT, observer_path TEXT, method_name TEXT)") + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path, snapshot_data): + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, snapshot_data)) + + def load_snapshot(self, handle_path): + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return row[0] + return None + + def drop_snapshot(self, handle_path): + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def save_notice(self, event_path, observer_path, method_name): + self._db.execute("INSERT INTO notice VALUES (NULL, ?, ?, ?)", (event_path, observer_path, method_name)) + + def drop_notice(self, event_path, observer_path, method_name): + self._db.execute("DELETE FROM notice WHERE event_path=? AND observer_path=? AND method_name=?", (event_path, observer_path, method_name)) + + def notices(self, event_path): + if event_path: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice WHERE event_path=? ORDER BY sequence", (event_path,)) + else: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice ORDER BY sequence") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, data_path, charm_dir, meta, model): + + super().__init__(self, None) + + self._data_path = data_path + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + self._storage = SQLiteStorage(data_path) + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError(f"two objects claiming to be {obj.handle.path} have been created") + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError(f"cannot save {type(value).__name__} values before registering that type") + data = value.snapshot() + # Use marshal as a validator, enforcing the use of simple types. + marshal.dumps(data) + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(data) + self._storage.save_snapshot(value.handle.path, raw_data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + raw_data = self._storage.load_snapshot(handle.path) + if not raw_data: + raise NoSnapshotError(handle.path) + data = pickle.loads(raw_data) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event, observer): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self.on_something_happened) + + If the method to be called follows the name convention "on_", it + may be omitted from the observe call. That means the above is equivalent to: + + framework.observe(someobj.something_happened, self) + + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError(f'Framework.observe requires a BoundEvent as second parameter, got {bound_event}') + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError(f'event emitter {type(emitter).__name__} must have a "handle" attribute') + + method_name = None + if isinstance(observer, types.MethodType): + method_name = observer.__name__ + observer = observer.__self__ + else: + method_name = "on_" + event_kind + if not hasattr(observer, method_name): + raise RuntimeError(f'Observer method not provided explicitly and {type(observer).__name__} type has no "{method_name}" method') + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(getattr(observer, method_name)) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + if not sig.parameters: + raise TypeError(f'{type(observer).__name__}.{method_name} must accept event parameter') + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError(f'{type(observer).__name__}.{method_name} has extra required parameter') + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from scratch in the next path. + self.framework._forget(event) + + if not deferred: + self._storage.drop_snapshot(last_event_path) + + +class StoredStateChanged(EventBase): + pass + + +class StoredStateEvents(EventsBase): + changed = EventSource(StoredStateChanged) + + +class StoredStateData(Object): + + on = StoredStateEvents() + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError(f"attribute '{key}' is not stored") + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError(f"attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, str, bytes, list, dict, set)): + raise AttributeError(f"attribute '{key}' cannot be set to {type(value).__name__}: must be int/dict/list/etc") + + self._data[key] = _unwrap_stored(self._data, value) + self.on.changed.emit() + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is None: + self.parent_type = parent_type + elif self.parent_type is not parent_type: + raise RuntimeError("StoredState shared by {} and {}".format(self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + return self + + bound = parent.__dict__.get(self.attr_name) + if bound is None: + for attr_name, attr_value in parent_type.__dict__.items(): + if attr_value is self: + if self.attr_name and attr_name != self.attr_name: + parent_tname = parent_type.__name__ + raise RuntimeError(f"StoredState shared by {parent_tname}.{self.attr_name} and {parent_tname}.{attr_name}") + self.attr_name = attr_name + bound = BoundStoredState(parent, attr_name) + parent.__dict__[attr_name] = bound + break + else: + raise RuntimeError("cannot find StoredVariable attribute in type {}".format(parent_type.__name__)) + + return bound + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/jujuversion.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/jujuversion.py new file mode 100755 index 00000000..5256f24f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/jujuversion.py @@ -0,0 +1,77 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'^(?P\d{1,9})\.(?P\d{1,9})((?:\.|-(?P[a-z]+))(?P\d{1,9}))?(\.(?P\d{1,9}))?$' + + def __init__(self, version): + m = re.match(self.PATTERN, version) + if not m: + raise RuntimeError(f'"{version}" is not a valid Juju version string') + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = f'{self.major}.{self.minor}-{self.tag}{self.patch}' + else: + s = f'{self.major}.{self.minor}.{self.patch}' + if self.build > 0: + s += f'.{self.build}' + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + return self.major == other.major and self.minor == other.minor\ + and self.tag == other.tag and self.build == other.build and self.patch == other.patch + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/main.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/main.py new file mode 100755 index 00000000..c8d5da2a --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/main.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model + +CHARM_STATE_FILE = '.unit-state.db' + + +def debugf(format, *args, **kwargs): + pass + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path(f'{__file__}/../../..').resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _load_metadata(charm_dir): + metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text()) + + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = yaml.safe_load(actions_meta.read_text()) + else: + actions_metadata = {} + return metadata, actions_metadata + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError(f"action event name {bound_event.event_kind} needs _action suffix") + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError(f'cannot create a symlink: unsupported event type {bound_event.event_type}') + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. For Windows + # it is always an absolute path (any symlinks are resolved) while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), event_dir) + + # Ignore the non-symlink files or directories assuming the charm author knows what they are doing. + debugf(f'Creating a new relative symlink at {event_path} pointing to {target_path}') + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file which is determined by inspecting + symlinks provided by the charm author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + debugf(f"event {event_name} not defined for {charm}") + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + debugf(f'Emitting Juju event {event_name}') + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError(f'invalid remote unit name: {remote_unit_name}') + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +def main(charm_class): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + + charm_dir = _get_charm_dir() + + # Process the Juju event relevant to the current hook execution + # JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used + # in order to support simulation of events from debugging sessions. + # TODO: For Windows, when symlinks are used, this is not a valid method of getting an event name (see LP: #1854505). + juju_exec_path = Path(sys.argv[0]) + juju_event_name = juju_exec_path.name.replace('-', '_') + if juju_exec_path.parent.name == 'actions': + juju_event_name = f'{juju_event_name}_action' + + metadata, actions_metadata = _load_metadata(charm_dir) + meta = ops.charm.CharmMeta(metadata, actions_metadata) + unit_name = os.environ['JUJU_UNIT_NAME'] + model = ops.model.Model(unit_name, meta, ops.model.ModelBackend()) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model) + try: + charm = charm_class(framework, None) + + # When a charm is force-upgraded and a unit is in an error state Juju does not run upgrade-charm and + # instead runs the failed hook followed by config-changed. Given the nature of force-upgrading + # the hook setup code is not triggered on config-changed. + # 'start' event is included as Juju does not fire the install event for K8s charms (see LP: #1854635). + if juju_event_name in ('install', 'start', 'upgrade_charm') or juju_event_name.endswith('_storage_attached'): + _setup_event_links(charm_dir, charm) + + framework.reemit() + + _emit_charm_event(charm, juju_event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/model.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/model.py new file mode 100644 index 00000000..a12dcca2 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/ops/model.py @@ -0,0 +1,679 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import weakref +import os +import shutil +import tempfile +import time +import datetime + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + + +class Model: + + def __init__(self, unit_name, meta, backend): + self._cache = ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + + def get_unit(self, unit_name): + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name): + return self._cache.get(Application, app_name) + + def get_relation(self, relation_name, relation_id=None): + """Get a specific Relation instance. + + If relation_id is given, this will return that Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + """ + return self.relations._get_unique(relation_name, relation_id) + + +class ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + @property + def status(self): + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for application {self} status: {value}') + + if not self._is_our_app: + raise RuntimeError(f'cannot to set status for a remote application {self}') + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + +class Unit: + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + @property + def status(self): + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for unit {self} status: {value}') + + if not self._is_our_unit: + raise RuntimeError(f'cannot set status for a remote unit {self}') + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + def is_leader(self): + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError(f"cannot determine leadership status for remote applications: {self}") + + +class LazyMapping(Mapping, ABC): + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of Relation instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role == 'peers': + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError(f'relation name {relation_id} must be int or None not {type(relation_id).__name__}') + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class Relation: + def __init__(self, relation_name, relation_id, is_peer, our_unit, backend, cache): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}:{self.id}>' + + +class RelationData(Mapping): + def __init__(self, relation, our_unit, backend): + self.relation = weakref.proxy(relation) + self._data = {our_unit: RelationDataContent(self.relation, our_unit, backend)} + self._data.update({our_unit.app: RelationDataContent(self.relation, our_unit.app, backend)}) + self._data.update({unit: RelationDataContent(self.relation, unit, backend) for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app: + self._data.update({self.relation.app: RelationDataContent(self.relation, self.relation.app, backend)}) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on whether this unit is a leader or not, + # but this is not guaranteed to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError(f'cannot set relation data for {self._entity.name}') + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units.""" + + _statuses = {} + + def __init__(self, message): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + cls._statuses[cls.name] = cls + return super().__new__(cls) + + @classmethod + def from_name(cls, name, message): + return cls._statuses[name](message) + + +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message=None): + super().__init__(message or '') + + +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation for providing those services. + This is a "spinning" state, not an error state. It reflects activity on the unit itself, not on peers or related units. + """ + name = 'maintenance' + + +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the charm has not called status-set yet. + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which it is related is not running. + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names, backend): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name): + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError(f'invalid resource name: {name}') + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + def __init__(self, backend): + self._backend = backend + + def set_spec(self, spec, k8s_resources=None): + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names, backend): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name): + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name, count=1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(f'cannot add storage with {storage_name} as it is not present in the charm metadata') + self._backend.storage_add(storage_name, count) + + +class Storage: + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + self._location = Path(self._backend.storage_get(f'{self.name}/{self.id}', "location")) + return self._location + + +class ModelError(Exception): + pass + + +class TooManyRelatedAppsError(ModelError): + def __init__(self, relation_name, num_related, max_supported): + super().__init__(f'Too many remote applications on {relation_name} ({num_related} > {max_supported})') + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + pass + + +class RelationNotFoundError(ModelError): + pass + + +class InvalidStatusError(ModelError): + pass + + +class ModelBackend: + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self): + self.unit_name = os.environ['JUJU_UNIT_NAME'] + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = 0 + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + try: + return self._run('relation-get', '-r', str(relation_id), '-', member_name, f'--app={is_app}', return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + try: + return self._run('relation-set', '-r', str(relation_id), f'{key}={value}', f'--app={is_app}') + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + if time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(tmpdir) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + app -- A boolean indicating whether the status should be retrieved for a unit or an application. + """ + return self._run('status-get', '--include-data', f'--application={is_app}') + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + app -- A boolean indicating whether the status should be set for a unit or an application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', f'--application={is_app}', status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError(f'storage count must be integer, got: {count} ({type(count)})') + self._run('storage-add', f'{name}={count}') + + def action_get(self): + return self._run(f'action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run(f'action-set', *[f"{k}={v}" for k, v in results.items()]) + + def action_log(self, message): + self._run(f'action-log', f"{message}") + + def action_fail(self, message=''): + self._run(f'action-fail', f"{message}") + + def network_get(self, endpoint_name, relation_id=None): + """Return network info provided by network-get for a given endpoint. + + endpoint_name -- A name of an endpoint (relation name or extra-binding name). + relation_id -- An optional relation id to get network info for. + """ + cmd = ['network-get', endpoint_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/setup.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/setup.py new file mode 100644 index 00000000..44765e20 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/setup.py @@ -0,0 +1,38 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from setuptools import setup + +with open("README.md", "r") as fh: + long_description = fh.read() + +setup( + name="ops", + version="0.0.1", + description="The Python library behind great charms", + long_description=long_description, + long_description_content_type="text/markdown", + license="Apache-2.0", + url="https://github.com/canonical/operator", + packages=["ops"], + classifiers=[ + "Development Status :: 4 - Beta", + + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + + "License :: OSI Approved :: Apache Software License", + ], +) diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/__init__.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-ids b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-ids new file mode 100755 index 00000000..a7e0ead2 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-ids @@ -0,0 +1,11 @@ +#!/bin/bash + +case $1 in + db) echo '["db:1"]' ;; + mon) echo '["mon:2"]' ;; + ha) echo '[]' ;; + db0) echo '[]' ;; + db1) echo '["db1:4"]' ;; + db2) echo '["db2:5", "db2:6"]' ;; + *) echo '[]' ;; +esac diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-list b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-list new file mode 100755 index 00000000..88490159 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/bin/relation-list @@ -0,0 +1,16 @@ +#!/bin/bash + +fail_not_found() { + 1>&2 echo "ERROR invalid value \"$1\" for option -r: relation not found" + exit 2 +} + +case $2 in + 1) echo '["remote/0"]' ;; + 2) echo '["remote/0"]' ;; + 3) fail_not_found $2 ;; + 4) echo '["remoteapp1/0"]' ;; + 5) echo '["remoteapp1/0"]' ;; + 6) echo '["remoteapp2/0"]' ;; + *) fail_not_found $2 ;; +esac diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/config.yaml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/config.yaml new file mode 100644 index 00000000..ffc01860 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/config.yaml @@ -0,0 +1 @@ +"options": {} diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/__init__.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/__init__.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/charm.py new file mode 100755 index 00000000..71472f96 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/charm.py @@ -0,0 +1,306 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import yaml + +from ops.framework import Object, EventSource, EventBase, EventsBase + + +class HookEvent(EventBase): + pass + + +class ActionEvent(EventBase): + + def defer(self): + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot): + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results): + self.framework.model._backend.action_set(results) + + def log(self, message): + self.framework.model._backend.action_log(message) + + def fail(self, message=''): + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + pass + + +class StartEvent(HookEvent): + pass + + +class StopEvent(HookEvent): + pass + + +class ConfigChangedEvent(HookEvent): + pass + + +class UpdateStatusEvent(HookEvent): + pass + + +class UpgradeCharmEvent(HookEvent): + pass + + +class PreSeriesUpgradeEvent(HookEvent): + pass + + +class PostSeriesUpgradeEvent(HookEvent): + pass + + +class LeaderElectedEvent(HookEvent): + pass + + +class LeaderSettingsChangedEvent(HookEvent): + pass + + +class RelationEvent(HookEvent): + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit and unit.app != app: + raise RuntimeError(f'cannot create RelationEvent with application {app} and unit {unit}') + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self): + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot): + self.relation = self.framework.model.get_relation(snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationJoinedEvent(RelationEvent): + pass + + +class RelationChangedEvent(RelationEvent): + pass + + +class RelationDepartedEvent(RelationEvent): + pass + + +class RelationBrokenEvent(RelationEvent): + pass + + +class StorageEvent(HookEvent): + pass + + +class StorageAttachedEvent(StorageEvent): + pass + + +class StorageDetachingEvent(StorageEvent): + pass + + +class CharmEvents(EventsBase): + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + + +class CharmBase(Object): + + on = CharmEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(f'{relation_name}_relation_joined', RelationJoinedEvent) + self.on.define_event(f'{relation_name}_relation_changed', RelationChangedEvent) + self.on.define_event(f'{relation_name}_relation_departed', RelationDepartedEvent) + self.on.define_event(f'{relation_name}_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(f'{storage_name}_storage_attached', StorageAttachedEvent) + self.on.define_event(f'{storage_name}_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(f'{action_name}_action', ActionEvent) + + +class CharmMeta: + """Object containing the metadata for the charm. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + """ + + def __init__(self, raw={}, actions_raw={}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta('requires', name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta('provides', name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta('peers', name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', []) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml(cls, metadata, actions=None): + meta = yaml.safe_load(metadata) + raw_actions = {} + if actions is not None: + raw_actions = yaml.safe_load(actions) + return cls(meta, raw_actions) + + +class RelationMeta: + """Object containing metadata about a relation definition.""" + + def __init__(self, role, relation_name, raw): + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/framework.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/framework.py new file mode 100755 index 00000000..d95eb61f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/framework.py @@ -0,0 +1,941 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pickle +import marshal +import types +import sqlite3 +import collections +import collections.abc +import keyword +import weakref +from datetime import timedelta + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = f"{parent}/{kind}[{key}]" + else: + self._path = f"{parent}/{kind}" + else: + if key: + self._path = f"{kind}[{key}]" + else: + self._path = f"{kind}" + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {path}") + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError(f"Event requires a subclass of EventBase as an argument, got {event_type}") + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def __set_name__(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + f'EventSource({self.event_type.__name__}) reused as ' + f'{self.emitter_type.__name__}.{self.event_kind} and ' + f'{emitter_type.__name__}.{event_kind}') + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event rather than charm_instance.on.event, + # but in that case it couldn't be emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return (f'') + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class Object: + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + @property + def meta(self): + return self.framework.meta + + @property + def charm_dir(self): + return self.framework.charm_dir + + +class EventsBase(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls -- a type to define an event on. + event_kind -- an attribute name that will be used to access the event. Must be a valid python identifier, not be a keyword or an existing attribute. + event_type -- a type of the event to define. + """ + if not event_kind.isidentifier(): + raise RuntimeError(f'unable to define an event with event_kind that is not a valid python identifier: {event_kind}') + elif keyword.iskeyword(event_kind): + raise RuntimeError(f'unable to define an event with event_kind that is a python keyword: {event_kind}') + try: + getattr(cls, event_kind) + raise RuntimeError(f'unable to define an event with event_kind that overlaps with an existing type {cls} attribute: {event_kind}') + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor.__set_name__(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(EventsBase): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f'no snapshot data found for {self.handle_path} object' + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return f"cannot restore {self.handle_path} since no class was registered for it" + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), isolation_level=None, timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute("CREATE TABLE notice (sequence INTEGER PRIMARY KEY AUTOINCREMENT, event_path TEXT, observer_path TEXT, method_name TEXT)") + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path, snapshot_data): + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, snapshot_data)) + + def load_snapshot(self, handle_path): + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return row[0] + return None + + def drop_snapshot(self, handle_path): + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def save_notice(self, event_path, observer_path, method_name): + self._db.execute("INSERT INTO notice VALUES (NULL, ?, ?, ?)", (event_path, observer_path, method_name)) + + def drop_notice(self, event_path, observer_path, method_name): + self._db.execute("DELETE FROM notice WHERE event_path=? AND observer_path=? AND method_name=?", (event_path, observer_path, method_name)) + + def notices(self, event_path): + if event_path: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice WHERE event_path=? ORDER BY sequence", (event_path,)) + else: + c = self._db.execute("SELECT event_path, observer_path, method_name FROM notice ORDER BY sequence") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, data_path, charm_dir, meta, model): + + super().__init__(self, None) + + self._data_path = data_path + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + self._storage = SQLiteStorage(data_path) + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError(f"two objects claiming to be {obj.handle.path} have been created") + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError(f"cannot save {type(value).__name__} values before registering that type") + data = value.snapshot() + # Use marshal as a validator, enforcing the use of simple types. + marshal.dumps(data) + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(data) + self._storage.save_snapshot(value.handle.path, raw_data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + raw_data = self._storage.load_snapshot(handle.path) + if not raw_data: + raise NoSnapshotError(handle.path) + data = pickle.loads(raw_data) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event, observer): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self.on_something_happened) + + If the method to be called follows the name convention "on_", it + may be omitted from the observe call. That means the above is equivalent to: + + framework.observe(someobj.something_happened, self) + + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError(f'Framework.observe requires a BoundEvent as second parameter, got {bound_event}') + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError(f'event emitter {type(emitter).__name__} must have a "handle" attribute') + + method_name = None + if isinstance(observer, types.MethodType): + method_name = observer.__name__ + observer = observer.__self__ + else: + method_name = "on_" + event_kind + if not hasattr(observer, method_name): + raise RuntimeError(f'Observer method not provided explicitly and {type(observer).__name__} type has no "{method_name}" method') + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(getattr(observer, method_name)) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + if not sig.parameters: + raise TypeError(f'{type(observer).__name__}.{method_name} must accept event parameter') + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError(f'{type(observer).__name__}.{method_name} has extra required parameter') + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from scratch in the next path. + self.framework._forget(event) + + if not deferred: + self._storage.drop_snapshot(last_event_path) + + +class StoredStateChanged(EventBase): + pass + + +class StoredStateEvents(EventsBase): + changed = EventSource(StoredStateChanged) + + +class StoredStateData(Object): + + on = StoredStateEvents() + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError(f"attribute '{key}' is not stored") + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError(f"attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, str, bytes, list, dict, set)): + raise AttributeError(f"attribute '{key}' cannot be set to {type(value).__name__}: must be int/dict/list/etc") + + self._data[key] = _unwrap_stored(self._data, value) + self.on.changed.emit() + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is None: + self.parent_type = parent_type + elif self.parent_type is not parent_type: + raise RuntimeError("StoredState shared by {} and {}".format(self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + return self + + bound = parent.__dict__.get(self.attr_name) + if bound is None: + for attr_name, attr_value in parent_type.__dict__.items(): + if attr_value is self: + if self.attr_name and attr_name != self.attr_name: + parent_tname = parent_type.__name__ + raise RuntimeError(f"StoredState shared by {parent_tname}.{self.attr_name} and {parent_tname}.{attr_name}") + self.attr_name = attr_name + bound = BoundStoredState(parent, attr_name) + parent.__dict__[attr_name] = bound + break + else: + raise RuntimeError("cannot find StoredVariable attribute in type {}".format(parent_type.__name__)) + + return bound + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/jujuversion.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/jujuversion.py new file mode 100755 index 00000000..5256f24f --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/jujuversion.py @@ -0,0 +1,77 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'^(?P\d{1,9})\.(?P\d{1,9})((?:\.|-(?P[a-z]+))(?P\d{1,9}))?(\.(?P\d{1,9}))?$' + + def __init__(self, version): + m = re.match(self.PATTERN, version) + if not m: + raise RuntimeError(f'"{version}" is not a valid Juju version string') + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = f'{self.major}.{self.minor}-{self.tag}{self.patch}' + else: + s = f'{self.major}.{self.minor}.{self.patch}' + if self.build > 0: + s += f'.{self.build}' + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + return self.major == other.major and self.minor == other.minor\ + and self.tag == other.tag and self.build == other.build and self.patch == other.patch + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError(f'cannot compare Juju version "{self}" with "{other}"') + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/main.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/main.py new file mode 100755 index 00000000..c8d5da2a --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/main.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model + +CHARM_STATE_FILE = '.unit-state.db' + + +def debugf(format, *args, **kwargs): + pass + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path(f'{__file__}/../../..').resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _load_metadata(charm_dir): + metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text()) + + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = yaml.safe_load(actions_meta.read_text()) + else: + actions_metadata = {} + return metadata, actions_metadata + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError(f"action event name {bound_event.event_kind} needs _action suffix") + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError(f'cannot create a symlink: unsupported event type {bound_event.event_type}') + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. For Windows + # it is always an absolute path (any symlinks are resolved) while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), event_dir) + + # Ignore the non-symlink files or directories assuming the charm author knows what they are doing. + debugf(f'Creating a new relative symlink at {event_path} pointing to {target_path}') + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file which is determined by inspecting + symlinks provided by the charm author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + debugf(f"event {event_name} not defined for {charm}") + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + debugf(f'Emitting Juju event {event_name}') + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError(f'invalid remote unit name: {remote_unit_name}') + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +def main(charm_class): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + + charm_dir = _get_charm_dir() + + # Process the Juju event relevant to the current hook execution + # JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used + # in order to support simulation of events from debugging sessions. + # TODO: For Windows, when symlinks are used, this is not a valid method of getting an event name (see LP: #1854505). + juju_exec_path = Path(sys.argv[0]) + juju_event_name = juju_exec_path.name.replace('-', '_') + if juju_exec_path.parent.name == 'actions': + juju_event_name = f'{juju_event_name}_action' + + metadata, actions_metadata = _load_metadata(charm_dir) + meta = ops.charm.CharmMeta(metadata, actions_metadata) + unit_name = os.environ['JUJU_UNIT_NAME'] + model = ops.model.Model(unit_name, meta, ops.model.ModelBackend()) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model) + try: + charm = charm_class(framework, None) + + # When a charm is force-upgraded and a unit is in an error state Juju does not run upgrade-charm and + # instead runs the failed hook followed by config-changed. Given the nature of force-upgrading + # the hook setup code is not triggered on config-changed. + # 'start' event is included as Juju does not fire the install event for K8s charms (see LP: #1854635). + if juju_event_name in ('install', 'start', 'upgrade_charm') or juju_event_name.endswith('_storage_attached'): + _setup_event_links(charm_dir, charm) + + framework.reemit() + + _emit_charm_event(charm, juju_event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/model.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/model.py new file mode 100644 index 00000000..a12dcca2 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/lib/ops/model.py @@ -0,0 +1,679 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import weakref +import os +import shutil +import tempfile +import time +import datetime + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + + +class Model: + + def __init__(self, unit_name, meta, backend): + self._cache = ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + + def get_unit(self, unit_name): + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name): + return self._cache.get(Application, app_name) + + def get_relation(self, relation_name, relation_id=None): + """Get a specific Relation instance. + + If relation_id is given, this will return that Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + """ + return self.relations._get_unique(relation_name, relation_id) + + +class ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + @property + def status(self): + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for application {self} status: {value}') + + if not self._is_our_app: + raise RuntimeError(f'cannot to set status for a remote application {self}') + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + +class Unit: + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + @property + def status(self): + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value): + if not isinstance(value, StatusBase): + raise InvalidStatusError(f'invalid value provided for unit {self} status: {value}') + + if not self._is_our_unit: + raise RuntimeError(f'cannot set status for a remote unit {self}') + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' + + def is_leader(self): + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError(f"cannot determine leadership status for remote applications: {self}") + + +class LazyMapping(Mapping, ABC): + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of Relation instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role == 'peers': + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError(f'relation name {relation_id} must be int or None not {type(relation_id).__name__}') + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class Relation: + def __init__(self, relation_name, relation_id, is_peer, our_unit, backend, cache): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return f'<{type(self).__module__}.{type(self).__name__} {self.name}:{self.id}>' + + +class RelationData(Mapping): + def __init__(self, relation, our_unit, backend): + self.relation = weakref.proxy(relation) + self._data = {our_unit: RelationDataContent(self.relation, our_unit, backend)} + self._data.update({our_unit.app: RelationDataContent(self.relation, our_unit.app, backend)}) + self._data.update({unit: RelationDataContent(self.relation, unit, backend) for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app: + self._data.update({self.relation.app: RelationDataContent(self.relation, self.relation.app, backend)}) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on whether this unit is a leader or not, + # but this is not guaranteed to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError(f'cannot set relation data for {self._entity.name}') + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty string will + # remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units.""" + + _statuses = {} + + def __init__(self, message): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + cls._statuses[cls.name] = cls + return super().__new__(cls) + + @classmethod + def from_name(cls, name, message): + return cls._statuses[name](message) + + +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message=None): + super().__init__(message or '') + + +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation for providing those services. + This is a "spinning" state, not an error state. It reflects activity on the unit itself, not on peers or related units. + """ + name = 'maintenance' + + +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the charm has not called status-set yet. + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which it is related is not running. + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names, backend): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name): + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError(f'invalid resource name: {name}') + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + def __init__(self, backend): + self._backend = backend + + def set_spec(self, spec, k8s_resources=None): + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names, backend): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name): + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name, count=1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(f'cannot add storage with {storage_name} as it is not present in the charm metadata') + self._backend.storage_add(storage_name, count) + + +class Storage: + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + self._location = Path(self._backend.storage_get(f'{self.name}/{self.id}', "location")) + return self._location + + +class ModelError(Exception): + pass + + +class TooManyRelatedAppsError(ModelError): + def __init__(self, relation_name, num_related, max_supported): + super().__init__(f'Too many remote applications on {relation_name} ({num_related} > {max_supported})') + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + pass + + +class RelationNotFoundError(ModelError): + pass + + +class InvalidStatusError(ModelError): + pass + + +class ModelBackend: + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self): + self.unit_name = os.environ['JUJU_UNIT_NAME'] + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = 0 + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + try: + return self._run('relation-get', '-r', str(relation_id), '-', member_name, f'--app={is_app}', return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + try: + return self._run('relation-set', '-r', str(relation_id), f'{key}={value}', f'--app={is_app}') + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + if time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(tmpdir) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + app -- A boolean indicating whether the status should be retrieved for a unit or an application. + """ + return self._run('status-get', '--include-data', f'--application={is_app}') + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + app -- A boolean indicating whether the status should be set for a unit or an application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', f'--application={is_app}', status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError(f'storage count must be integer, got: {count} ({type(count)})') + self._run('storage-add', f'{name}={count}') + + def action_get(self): + return self._run(f'action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run(f'action-set', *[f"{k}={v}" for k, v in results.items()]) + + def action_log(self, message): + self._run(f'action-log', f"{message}") + + def action_fail(self, message=''): + self._run(f'action-fail', f"{message}") + + def network_get(self, endpoint_name, relation_id=None): + """Return network info provided by network-get for a given endpoint. + + endpoint_name -- A name of an endpoint (relation name or extra-binding name). + relation_id -- An optional relation id to get network info for. + """ + cmd = ['network-get', endpoint_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/metadata.yaml b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/metadata.yaml new file mode 100644 index 00000000..3b3aed87 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/metadata.yaml @@ -0,0 +1,26 @@ +name: main +summary: A charm used for testing the basic operation of the entrypoint code. +maintainer: Dmitrii Shcherbakov +description: A charm used for testing the basic operation of the entrypoint code. +tags: + - misc +series: + - bionic + - cosmic + - disco +min-juju-version: 2.7.1 +provides: + db: + interface: db +requires: + mon: + interface: monitoring +peers: + ha: + interface: cluster +subordinate: false +storage: + disks: + type: block + multiple: + range: 0- diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/src/charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/src/charm.py new file mode 100755 index 00000000..c20ae783 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/charms/test_main/src/charm.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import base64 +import pickle +import sys +sys.path.append('lib') # noqa + +from ops.charm import CharmBase +from ops.main import main + +import logging + +logger = logging.getLogger() + + +class Charm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + # This environment variable controls the test charm behavior. + charm_config = os.environ.get('CHARM_CONFIG') + if charm_config is not None: + self._charm_config = pickle.loads(base64.b64decode(charm_config)) + else: + self._charm_config = {} + + self._state_file = self._charm_config.get('STATE_FILE') + self._state = {} + + self._state['on_install'] = [] + self._state['on_start'] = [] + self._state['on_config_changed'] = [] + self._state['on_update_status'] = [] + self._state['on_leader_settings_changed'] = [] + self._state['on_db_relation_joined'] = [] + self._state['on_mon_relation_changed'] = [] + self._state['on_mon_relation_departed'] = [] + self._state['on_ha_relation_broken'] = [] + self._state['on_foo_bar_action'] = [] + self._state['on_start_action'] = [] + + # Observed event types per invocation. A list is used to preserve the order in which charm handlers have observed the events. + self._state['observed_event_types'] = [] + + self.framework.observe(self.on.install, self) + self.framework.observe(self.on.start, self) + self.framework.observe(self.on.config_changed, self) + self.framework.observe(self.on.update_status, self) + self.framework.observe(self.on.leader_settings_changed, self) + # Test relation events with endpoints from different + # sections (provides, requires, peers) as well. + self.framework.observe(self.on.db_relation_joined, self) + self.framework.observe(self.on.mon_relation_changed, self) + self.framework.observe(self.on.mon_relation_departed, self) + self.framework.observe(self.on.ha_relation_broken, self) + + if self._charm_config.get('USE_ACTIONS'): + self.framework.observe(self.on.start_action, self) + self.framework.observe(self.on.foo_bar_action, self) + + def _write_state(self): + """Write state variables so that the parent process can read them. + + Each invocation will override the previous state which is intentional. + """ + if self._state_file is not None: + with open(self._state_file, 'wb') as f: + pickle.dump(self._state, f) + + def on_install(self, event): + self._state['on_install'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def on_start(self, event): + self._state['on_start'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def on_config_changed(self, event): + self._state['on_config_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + event.defer() + self._write_state() + + def on_update_status(self, event): + self._state['on_update_status'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def on_leader_settings_changed(self, event): + self._state['on_leader_settings_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def on_db_relation_joined(self, event): + assert event.app is not None, 'application name cannot be None for a relation-joined event' + self._state['on_db_relation_joined'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['db_relation_joined_data'] = event.snapshot() + self._write_state() + + def on_mon_relation_changed(self, event): + assert event.app is not None, 'application name cannot be None for a relation-changed event' + if os.environ.get('JUJU_REMOTE_UNIT'): + assert event.unit is not None, 'a unit name cannot be None for a relation-changed event associated with a remote unit' + self._state['on_mon_relation_changed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_changed_data'] = event.snapshot() + self._write_state() + + def on_mon_relation_departed(self, event): + assert event.app is not None, 'application name cannot be None for a relation-departed event' + self._state['on_mon_relation_departed'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['mon_relation_departed_data'] = event.snapshot() + self._write_state() + + def on_ha_relation_broken(self, event): + assert event.app is None, 'relation-broken events cannot have a reference to a remote application' + assert event.unit is None, 'relation broken events cannot have a reference to a remote unit' + self._state['on_ha_relation_broken'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._state['ha_relation_broken_data'] = event.snapshot() + self._write_state() + + def on_start_action(self, event): + assert event.handle.kind == 'start_action', 'event action name cannot be different from the one being handled' + self._state['on_start_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + def on_foo_bar_action(self, event): + assert event.handle.kind == 'foo_bar_action', 'event action name cannot be different from the one being handled' + self._state['on_foo_bar_action'].append(type(event)) + self._state['observed_event_types'].append(type(event)) + self._write_state() + + +if __name__ == '__main__': + main(Charm) diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_charm.py new file mode 100755 index 00000000..26a943c4 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_charm.py @@ -0,0 +1,311 @@ +#!/usr/bin/python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import tempfile +import shutil + +from pathlib import Path + +from ops.charm import ( + CharmBase, + CharmMeta, + CharmEvents, +) +from ops.framework import Framework, EventSource, EventBase +from ops.model import Model, ModelBackend + +from .test_helpers import fake_script, fake_script_calls + + +class TestCharm(unittest.TestCase): + + def setUp(self): + def restore_env(env): + os.environ.clear() + os.environ.update(env) + self.addCleanup(restore_env, os.environ.copy()) + + os.environ['PATH'] = f"{str(Path(__file__).parent / 'bin')}:{os.environ['PATH']}" + os.environ['JUJU_UNIT_NAME'] = 'local/0' + + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, self.tmpdir) + self.meta = CharmMeta() + + class CustomEvent(EventBase): + pass + + class TestCharmEvents(CharmEvents): + custom = EventSource(CustomEvent) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + CharmBase.on = TestCharmEvents() + + def cleanup(): + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + def create_framework(self): + model = Model('local/0', self.meta, ModelBackend()) + framework = Framework(self.tmpdir / "framework.data", self.tmpdir, self.meta, model) + self.addCleanup(framework.close) + return framework + + def test_basic(self): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + self.started = False + framework.observe(self.on.start, self) + + def on_start(self, event): + self.started = True + + events = list(MyCharm.on.events()) + self.assertIn('install', events) + self.assertIn('custom', events) + + framework = self.create_framework() + charm = MyCharm(framework, None) + charm.on.start.emit() + + self.assertEqual(charm.started, True) + + def test_relation_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + for rel in ('req1', 'req-2', 'pro1', 'pro-2', 'peer1', 'peer-2'): + # Hook up relation events to generic handler. + self.framework.observe(self.on[rel].relation_joined, self.on_any_relation) + self.framework.observe(self.on[rel].relation_changed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_departed, self.on_any_relation) + self.framework.observe(self.on[rel].relation_broken, self.on_any_relation) + + def on_any_relation(self, event): + assert event.relation.name == 'req1' + assert event.relation.app.name == 'remote' + self.seen.append(type(event).__name__) + + # language=YAML + self.meta = CharmMeta.from_yaml(metadata=''' +name: my-charm +requires: + req1: + interface: req1 + req-2: + interface: req2 +provides: + pro1: + interface: pro1 + pro-2: + interface: pro2 +peers: + peer1: + interface: peer1 + peer-2: + interface: peer2 +''') + + charm = MyCharm(self.create_framework(), None) + + rel = charm.framework.model.get_relation('req1', 1) + unit = charm.framework.model.get_unit('remote/0') + charm.on['req1'].relation_joined.emit(rel, unit) + charm.on['req1'].relation_changed.emit(rel, unit) + charm.on['req-2'].relation_changed.emit(rel, unit) + charm.on['pro1'].relation_departed.emit(rel, unit) + charm.on['pro-2'].relation_departed.emit(rel, unit) + charm.on['peer1'].relation_broken.emit(rel) + charm.on['peer-2'].relation_broken.emit(rel) + + self.assertEqual(charm.seen, [ + 'RelationJoinedEvent', + 'RelationChangedEvent', + 'RelationChangedEvent', + 'RelationDepartedEvent', + 'RelationDepartedEvent', + 'RelationBrokenEvent', + 'RelationBrokenEvent', + ]) + + def test_storage_events(self): + + class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.seen = [] + self.framework.observe(self.on['stor1'].storage_attached, self) + self.framework.observe(self.on['stor2'].storage_detaching, self) + self.framework.observe(self.on['stor3'].storage_attached, self) + self.framework.observe(self.on['stor-4'].storage_attached, self) + + def on_stor1_storage_attached(self, event): + self.seen.append(f'{type(event).__name__}') + + def on_stor2_storage_detaching(self, event): + self.seen.append(f'{type(event).__name__}') + + def on_stor3_storage_attached(self, event): + self.seen.append(f'{type(event).__name__}') + + def on_stor_4_storage_attached(self, event): + self.seen.append(f'{type(event).__name__}') + + # language=YAML + self.meta = CharmMeta.from_yaml(''' +name: my-charm +storage: + stor-4: + multiple: + range: 2-4 + type: filesystem + stor1: + type: filesystem + stor2: + multiple: + range: "2" + type: filesystem + stor3: + multiple: + range: 2- + type: filesystem +''') + + self.assertIsNone(self.meta.storages['stor1'].multiple_range) + self.assertEqual(self.meta.storages['stor2'].multiple_range, (2, 2)) + self.assertEqual(self.meta.storages['stor3'].multiple_range, (2, None)) + self.assertEqual(self.meta.storages['stor-4'].multiple_range, (2, 4)) + + charm = MyCharm(self.create_framework(), None) + + charm.on['stor1'].storage_attached.emit() + charm.on['stor2'].storage_detaching.emit() + charm.on['stor3'].storage_attached.emit() + charm.on['stor-4'].storage_attached.emit() + + self.assertEqual(charm.seen, [ + 'StorageAttachedEvent', + 'StorageDetachingEvent', + 'StorageAttachedEvent', + 'StorageAttachedEvent', + ]) + + @classmethod + def _get_action_test_meta(cls): + # language=YAML + return CharmMeta.from_yaml(metadata=''' +name: my-charm +''', actions=''' +foo-bar: + description: "Foos the bar." + params: + foo-name: + description: "A foo name to bar" + type: string + silent: + default: false + description: "" + type: boolean + required: foo-bar + title: foo-bar +start: + description: "Start the unit." +''') + + def _test_action_events(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.foo_bar_action, self) + framework.observe(self.on.start_action, self) + + def on_foo_bar_action(self, event): + self.seen_action_params = event.params + event.log('test-log') + event.set_results({'res': 'val with spaces'}) + event.fail('test-fail') + + def on_start_action(self, event): + pass + + fake_script(self, f'{cmd_type}-get', """echo '{"foo-name": "name", "silent": true}'""") + fake_script(self, f'{cmd_type}-set', "") + fake_script(self, f'{cmd_type}-log', "") + fake_script(self, f'{cmd_type}-fail', "") + self.meta = self._get_action_test_meta() + + os.environ[f'JUJU_{cmd_type.upper()}_NAME'] = 'foo-bar' + framework = self.create_framework() + charm = MyCharm(framework, None) + + events = list(MyCharm.on.events()) + self.assertIn('foo_bar_action', events) + self.assertIn('start_action', events) + + charm.on.foo_bar_action.emit() + self.assertEqual(charm.seen_action_params, {"foo-name": "name", "silent": True}) + self.assertEqual(fake_script_calls(self), [ + [f'{cmd_type}-get', '--format=json'], + [f'{cmd_type}-log', "test-log"], + [f'{cmd_type}-set', "res=val with spaces"], + [f'{cmd_type}-fail', "test-fail"], + ]) + + # Make sure that action events that do not match the current context are + # not possible to emit by hand. + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_events(self): + self._test_action_events('action') + + def _test_action_event_defer_fails(self, cmd_type): + + class MyCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + framework.observe(self.on.start_action, self) + + def on_start_action(self, event): + event.defer() + + fake_script(self, f'{cmd_type}-get', """echo '{"foo-name": "name", "silent": true}'""") + self.meta = self._get_action_test_meta() + + os.environ[f'JUJU_{cmd_type.upper()}_NAME'] = 'start' + framework = self.create_framework() + charm = MyCharm(framework, None) + + with self.assertRaises(RuntimeError): + charm.on.start_action.emit() + + def test_action_event_defer_fails(self): + self._test_action_event_defer_fails('action') + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_framework.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_framework.py new file mode 100755 index 00000000..fc364ea8 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_framework.py @@ -0,0 +1,1200 @@ +#!/usr/bin/python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import tempfile +import shutil +import gc +import datetime + +from pathlib import Path + +from ops.framework import ( + Framework, Handle, EventSource, EventsBase, EventBase, Object, PreCommitEvent, CommitEvent, + NoSnapshotError, StoredState, StoredList, BoundStoredState, StoredStateData, SQLiteStorage +) + + +class TestFramework(unittest.TestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, self.tmpdir) + default_timeout = SQLiteStorage.DB_LOCK_TIMEOUT + + def timeout_cleanup(): + SQLiteStorage.DB_LOCK_TIMEOUT = default_timeout + SQLiteStorage.DB_LOCK_TIMEOUT = datetime.timedelta(0) + self.addCleanup(timeout_cleanup) + + def create_framework(self): + framework = Framework(self.tmpdir / "framework.data", self.tmpdir, None, None) + self.addCleanup(framework.close) + return framework + + def test_handle_path(self): + cases = [ + (Handle(None, "root", None), "root"), + (Handle(None, "root", "1"), "root[1]"), + (Handle(Handle(None, "root", None), "child", None), "root/child"), + (Handle(Handle(None, "root", "1"), "child", "2"), "root[1]/child[2]"), + ] + for handle, path in cases: + self.assertEqual(str(handle), path) + self.assertEqual(Handle.from_path(path), handle) + + def test_handle_attrs_readonly(self): + handle = Handle(None, 'kind', 'key') + with self.assertRaises(AttributeError): + handle.parent = 'foo' + with self.assertRaises(AttributeError): + handle.kind = 'foo' + with self.assertRaises(AttributeError): + handle.key = 'foo' + with self.assertRaises(AttributeError): + handle.path = 'foo' + + def test_restore_unknown(self): + framework = self.create_framework() + + class Foo(Object): + pass + + handle = Handle(None, "a_foo", "some_key") + + framework.register_type(Foo, None, handle.kind) + + try: + framework.load_snapshot(handle) + except NoSnapshotError as e: + self.assertEqual(e.handle_path, str(handle)) + self.assertEqual(str(e), "no snapshot data found for a_foo[some_key] object") + else: + self.fail("exception NoSnapshotError not raised") + + def test_snapshot_roundtrip(self): + class Foo: + def __init__(self, handle, n): + self.handle = handle + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + self.my_n = snapshot["My N!"] + 1 + + handle = Handle(None, "a_foo", "some_key") + event = Foo(handle, 1) + + framework1 = self.create_framework() + framework1.register_type(Foo, None, handle.kind) + framework1.save_snapshot(event) + framework1.commit() + framework1.close() + + framework2 = self.create_framework() + framework2.register_type(Foo, None, handle.kind) + event2 = framework2.load_snapshot(handle) + self.assertEqual(event2.my_n, 2) + + framework2.save_snapshot(event2) + del event2 + gc.collect() + event3 = framework2.load_snapshot(handle) + self.assertEqual(event3.my_n, 3) + + framework2.drop_snapshot(event.handle) + framework2.commit() + framework2.close() + + framework3 = self.create_framework() + framework3.register_type(Foo, None, handle.kind) + + self.assertRaises(NoSnapshotError, framework3.load_snapshot, handle) + + def test_simple_event_observer(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_any(self, event): + self.seen.append("on_any:" + event.handle.kind) + + def on_foo(self, event): + self.seen.append("on_foo:" + event.handle.kind) + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs.on_any) + framework.observe(pub.bar, obs.on_any) + framework.observe(pub.foo, obs) # Method name defaults to on_. + + try: + framework.observe(pub.baz, obs) + except RuntimeError as e: + self.assertEqual(str(e), 'Observer method not provided explicitly and MyObserver type has no "on_baz" method') + else: + self.fail("RuntimeError not raised") + + pub.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_any:foo", "on_foo:foo", "on_any:bar"]) + + def test_bad_sig_observer(self): + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + baz = EventSource(MyEvent) + qux = EventSource(MyEvent) + + class MyObserver(Object): + def on_foo(self): + assert False, 'should not be reached' + + def on_bar(self, event, extra): + assert False, 'should not be reached' + + def on_baz(self, event, extra=None, *, k): + assert False, 'should not be reached' + + def on_qux(self, event, extra=None): + assert False, 'should not be reached' + + framework = self.create_framework() + pub = MyNotifier(framework, "pub") + obs = MyObserver(framework, "obs") + + with self.assertRaises(TypeError): + framework.observe(pub.foo, obs) + with self.assertRaises(TypeError): + framework.observe(pub.bar, obs) + with self.assertRaises(TypeError): + framework.observe(pub.baz, obs) + framework.observe(pub.qux, obs) + + def test_on_pre_commit_emitted(self): + framework = self.create_framework() + + class PreCommitObserver(Object): + + state = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self.state.myinitdata = 40 + + def on_pre_commit(self, event): + self.state.myinitdata = 41 + self.state.mydata = 42 + self.seen.append(type(event)) + + def on_commit(self, event): + # Modifications made here will not be persisted. + self.state.myinitdata = 42 + self.state.mydata = 43 + self.state.myotherdata = 43 + self.seen.append(type(event)) + + obs = PreCommitObserver(framework, None) + + framework.observe(framework.on.pre_commit, obs.on_pre_commit) + + framework.commit() + + self.assertEqual(obs.state.myinitdata, 41) + self.assertEqual(obs.state.mydata, 42) + self.assertTrue(obs.seen, [PreCommitEvent, CommitEvent]) + framework.close() + + other_framework = self.create_framework() + + new_obs = PreCommitObserver(other_framework, None) + + self.assertEqual(obs.state.myinitdata, 41) + self.assertEqual(new_obs.state.mydata, 42) + + with self.assertRaises(AttributeError): + new_obs.state.myotherdata + + def test_defer_and_reemit(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier1(Object): + a = EventSource(MyEvent) + b = EventSource(MyEvent) + + class MyNotifier2(Object): + c = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + self.done = {} + + def on_any(self, event): + self.seen.append(event.handle.kind) + if not self.done.get(event.handle.kind): + event.defer() + + pub1 = MyNotifier1(framework, "1") + pub2 = MyNotifier2(framework, "1") + obs1 = MyObserver(framework, "1") + obs2 = MyObserver(framework, "2") + + framework.observe(pub1.a, obs1.on_any) + framework.observe(pub1.b, obs1.on_any) + framework.observe(pub1.a, obs2.on_any) + framework.observe(pub1.b, obs2.on_any) + framework.observe(pub2.c, obs2.on_any) + + pub1.a.emit() + pub1.b.emit() + pub2.c.emit() + + # Events remain stored because they were deferred. + ev_a_handle = Handle(pub1, "a", "1") + framework.load_snapshot(ev_a_handle) + ev_b_handle = Handle(pub1, "b", "2") + framework.load_snapshot(ev_b_handle) + ev_c_handle = Handle(pub2, "c", "3") + framework.load_snapshot(ev_c_handle) + # make sure the objects are gone before we reemit them + gc.collect() + + framework.reemit() + obs1.done["a"] = True + obs2.done["b"] = True + framework.reemit() + framework.reemit() + obs1.done["b"] = True + obs2.done["a"] = True + framework.reemit() + obs2.done["c"] = True + framework.reemit() + framework.reemit() + framework.reemit() + + self.assertEqual(" ".join(obs1.seen), "a b a b a b b b") + self.assertEqual(" ".join(obs2.seen), "a b c a b c a b c a c a c c") + + # Now the event objects must all be gone from storage. + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_a_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_b_handle) + self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_c_handle) + + def test_custom_event_data(self): + framework = self.create_framework() + + class MyEvent(EventBase): + def __init__(self, handle, n): + super().__init__(handle) + self.my_n = n + + def snapshot(self): + return {"My N!": self.my_n} + + def restore(self, snapshot): + super().restore(snapshot) + self.my_n = snapshot["My N!"] + 1 + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append(f"on_foo:{event.handle.kind}={event.my_n}") + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs) + + pub.foo.emit(1) + + framework.reemit() + + # Two things being checked here: + # + # 1. There's a restore roundtrip before the event is first observed. + # That means the data is safe before it's ever seen, and the + # roundtrip logic is tested under normal circumstances. + # + # 2. The renotification restores from the pristine event, not + # from the one modified during the first restore (otherwise + # we'd get a foo=3). + # + self.assertEqual(obs.seen, ["on_foo:foo=2", "on_foo:foo=2"]) + + def test_weak_observer(self): + framework = self.create_framework() + + observed_events = [] + + class MyEvent(EventBase): + pass + + class MyEvents(EventsBase): + foo = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def on_foo(self, event): + observed_events.append("foo") + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "2") + + framework.observe(pub.on.foo, obs) + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + # Now delete the observer, and note that when we emit the event, it + # doesn't update the local slice again + del obs + gc.collect() + pub.on.foo.emit() + self.assertEqual(observed_events, ["foo"]) + + def test_forget_and_multiple_objects(self): + framework = self.create_framework() + + class MyObject(Object): + pass + + o1 = MyObject(framework, "path") + # Creating a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + o2 = MyObject(framework, "path") + # Unless we _forget the object first + framework._forget(o1) + o2 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o2.handle.path) + # Deleting the tracked object should also work + del o2 + gc.collect() + o3 = MyObject(framework, "path") + self.assertEqual(o1.handle.path, o3.handle.path) + framework.close() + # Or using a second framework + framework_copy = self.create_framework() + o_copy = MyObject(framework_copy, "path") + self.assertEqual(o1.handle.path, o_copy.handle.path) + + def test_forget_and_multiple_objects_with_load_snapshot(self): + framework = self.create_framework() + + class MyObject(Object): + def __init__(self, parent, name): + super().__init__(parent, name) + self.value = name + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + framework.register_type(MyObject, None, MyObject.handle_kind) + o1 = MyObject(framework, "path") + framework.save_snapshot(o1) + framework.commit() + o_handle = o1.handle + del o1 + gc.collect() + o2 = framework.load_snapshot(o_handle) + # Trying to load_snapshot a second object at the same path should fail with RuntimeError + with self.assertRaises(RuntimeError): + framework.load_snapshot(o_handle) + # Unless we _forget the object first + framework._forget(o2) + o3 = framework.load_snapshot(o_handle) + self.assertEqual(o2.value, o3.value) + # A loaded object also prevents direct creation of an object + with self.assertRaises(RuntimeError): + MyObject(framework, "path") + framework.close() + # But we can create an object, or load a snapshot in a copy of the framework + framework_copy1 = self.create_framework() + o_copy1 = MyObject(framework_copy1, "path") + self.assertEqual(o_copy1.value, "path") + framework_copy1.close() + framework_copy2 = self.create_framework() + framework_copy2.register_type(MyObject, None, MyObject.handle_kind) + o_copy2 = framework_copy2.load_snapshot(o_handle) + self.assertEqual(o_copy2.value, "path") + + def test_events_base(self): + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyEvents(EventsBase): + foo = EventSource(MyEvent) + bar = EventSource(MyEvent) + + class MyNotifier(Object): + on = MyEvents() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append(f"on_foo:{event.handle.kind}") + event.defer() + + def on_bar(self, event): + self.seen.append(f"on_bar:{event.handle.kind}") + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + # Confirm that temporary persistence of BoundEvents doesn't cause errors, + # and that events can be observed. + for bound_event in [pub.on.foo, pub.on.bar]: + framework.observe(bound_event, obs) + + # Confirm that events can be emitted and seen. + pub.on.foo.emit() + + self.assertEqual(obs.seen, ["on_foo:foo"]) + + def test_conflicting_event_attributes(self): + class MyEvent(EventBase): + pass + + event = EventSource(MyEvent) + + class MyEvents(EventsBase): + foo = event + + with self.assertRaises(RuntimeError) as cm: + class OtherEvents(EventsBase): + foo = event + self.assertEqual( + str(cm.exception.__cause__), + "EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo") + + with self.assertRaises(RuntimeError) as cm: + class MyNotifier(Object): + on = MyEvents() + bar = event + self.assertEqual( + str(cm.exception.__cause__), + "EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar") + + def test_reemit_ignores_unknown_event_type(self): + # The event type may have been gone for good, and nobody cares, + # so this shouldn't be an error scenario. + + framework = self.create_framework() + + class MyEvent(EventBase): + pass + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append(event.handle) + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + framework.observe(pub.foo, obs) + pub.foo.emit() + + event_handle = obs.seen[0] + self.assertEqual(event_handle.kind, "foo") + + framework.commit() + framework.close() + + framework_copy = self.create_framework() + + # No errors on missing event types here. + framework_copy.reemit() + + # Register the type and check that the event is gone from storage. + framework_copy.register_type(MyEvent, event_handle.parent, event_handle.kind) + self.assertRaises(NoSnapshotError, framework_copy.load_snapshot, event_handle) + + def test_auto_register_event_types(self): + framework = self.create_framework() + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class MyEvents(EventsBase): + foo = EventSource(MyFoo) + + class MyNotifier(Object): + on = MyEvents() + bar = EventSource(MyBar) + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append(f"on_foo:{type(event).__name__}:{event.handle.kind}") + event.defer() + + def on_bar(self, event): + self.seen.append(f"on_bar:{type(event).__name__}:{event.handle.kind}") + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + pub.on.foo.emit() + pub.bar.emit() + + framework.observe(pub.on.foo, obs) + framework.observe(pub.bar, obs) + + pub.on.foo.emit() + pub.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + def test_dynamic_event_types(self): + framework = self.create_framework() + + class MyEventsA(EventsBase): + handle_kind = 'on_a' + + class MyEventsB(EventsBase): + handle_kind = 'on_b' + + class MyNotifier(Object): + on_a = MyEventsA() + on_b = MyEventsB() + + class MyObserver(Object): + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append(f"on_foo:{type(event).__name__}:{event.handle.kind}") + event.defer() + + def on_bar(self, event): + self.seen.append(f"on_bar:{type(event).__name__}:{event.handle.kind}") + event.defer() + + pub = MyNotifier(framework, "1") + obs = MyObserver(framework, "1") + + class MyFoo(EventBase): + pass + + class MyBar(EventBase): + pass + + class DeadBeefEvent(EventBase): + pass + + class NoneEvent(EventBase): + pass + + pub.on_a.define_event("foo", MyFoo) + pub.on_b.define_event("bar", MyBar) + + framework.observe(pub.on_a.foo, obs) + framework.observe(pub.on_b.bar, obs) + + pub.on_a.foo.emit() + pub.on_b.bar.emit() + + self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"]) + + # Definitions remained local to the specific type. + self.assertRaises(AttributeError, lambda: pub.on_a.bar) + self.assertRaises(AttributeError, lambda: pub.on_b.foo) + + # Try to use an event name which is not a valid python identifier. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("dead-beef", DeadBeefEvent) + + # Try to use a python keyword for an event name. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("None", NoneEvent) + + # Try to override an existing attribute. + with self.assertRaises(RuntimeError): + pub.on_a.define_event("foo", MyFoo) + + def test_event_key_roundtrip(self): + class MyEvent(EventBase): + def __init__(self, handle, value): + super().__init__(handle) + self.value = value + + def snapshot(self): + return self.value + + def restore(self, value): + self.value = value + + class MyNotifier(Object): + foo = EventSource(MyEvent) + + class MyObserver(Object): + has_deferred = False + + def __init__(self, parent, key): + super().__init__(parent, key) + self.seen = [] + + def on_foo(self, event): + self.seen.append((event.handle.key, event.value)) + # Only defer the first event and once. + if not MyObserver.has_deferred: + event.defer() + MyObserver.has_deferred = True + + framework1 = self.create_framework() + pub1 = MyNotifier(framework1, "pub") + obs1 = MyObserver(framework1, "obs") + framework1.observe(pub1.foo, obs1) + pub1.foo.emit('first') + self.assertEqual(obs1.seen, [('1', 'first')]) + + framework1.commit() + framework1.close() + del framework1 + + framework2 = self.create_framework() + pub2 = MyNotifier(framework2, "pub") + obs2 = MyObserver(framework2, "obs") + framework2.observe(pub2.foo, obs2) + pub2.foo.emit('second') + framework2.reemit() + + # First observer didn't get updated, since framework it was bound to is gone. + self.assertEqual(obs1.seen, [('1', 'first')]) + # Second observer saw the new event plus the reemit of the first event. + # (The event key goes up by 2 due to the pre-commit and commit events.) + self.assertEqual(obs2.seen, [('4', 'second'), ('1', 'first')]) + + def test_helper_properties(self): + framework = self.create_framework() + framework.model = 'test-model' + framework.meta = 'test-meta' + + my_obj = Object(framework, 'my_obj') + self.assertEqual(my_obj.model, framework.model) + self.assertEqual(my_obj.meta, framework.meta) + self.assertEqual(my_obj.charm_dir, framework.charm_dir) + + def test_ban_concurrent_frameworks(self): + f = self.create_framework() + with self.assertRaises(Exception) as cm: + self.create_framework() + self.assertIn('database is locked', str(cm.exception)) + f.close() + + +class TestStoredState(unittest.TestCase): + + def setUp(self): + self.tmpdir = Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, self.tmpdir) + + def create_framework(self, cls=Framework): + framework = cls(self.tmpdir / "framework.data", self.tmpdir, None, None) + self.addCleanup(framework.close) + return framework + + def test_basic_state_storage(self): + framework = self.create_framework() + + class SomeObject(Object): + state = StoredState() + + obj = SomeObject(framework, "1") + + try: + obj.state.foo + except AttributeError as e: + self.assertEqual(str(e), "attribute 'foo' is not stored") + else: + self.fail("AttributeError not raised") + + try: + obj.state.on = "nonono" + except AttributeError as e: + self.assertEqual(str(e), "attribute 'on' is reserved and cannot be set") + else: + self.fail("AttributeError not raised") + + obj.state.foo = 41 + obj.state.foo = 42 + obj.state.bar = "s" + + self.assertEqual(obj.state.foo, 42) + + framework.commit() + + # This won't be committed, and should not be seen. + obj.state.foo = 43 + + framework.close() + + # Since this has the same absolute object handle, it will get its state back. + framework_copy = self.create_framework() + obj_copy = SomeObject(framework_copy, "1") + self.assertEqual(obj_copy.state.foo, 42) + self.assertEqual(obj_copy.state.bar, "s") + + def test_mutable_types_invalid(self): + framework = self.create_framework() + + class SomeObject(Object): + state = StoredState() + + obj = SomeObject(framework, '1') + try: + class CustomObject: + pass + obj.state.foo = CustomObject() + except AttributeError as e: + self.assertEqual(str(e), "attribute 'foo' cannot be set to CustomObject: must be int/dict/list/etc") + else: + self.fail('AttributeError not raised') + + framework.commit() + + def test_mutable_types(self): + # Test and validation functions in a list of 2-tuples. + # Assignment and keywords like del are not supported in lambdas so functions are used instead. + test_operations = [( + lambda: {}, # Operand A. + None, # Operand B. + {}, # Expected result. + lambda a, b: None, # Operation to perform. + lambda res, expected_res: self.assertEqual(res, expected_res) # Validation to perform. + ), ( + lambda: {}, + {'a': {}}, + {'a': {}}, + lambda a, b: a.update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {}}, + {'b': 'c'}, + {'a': {'b': 'c'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c'}}, + {'d': 'e'}, + {'a': {'b': 'c', 'd': 'e'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'a': {'b': 'c', 'd': 'e'}}, + 'd', + {'a': {'b': 'c'}}, + lambda a, b: a['a'].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': set()}, + 'a', + {'s': {'a'}}, + lambda a, b: a['s'].add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: {'s': {'a'}}, + 'a', + {'s': set()}, + lambda a, b: a['s'].discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + None, + [], + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [], + 'a', + ['a'], + lambda a, b: a.append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a'], + ['c'], + ['a', ['c']], + lambda a, b: a.append(b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList), + ) + ), ( + lambda: ['a', ['c']], + 'b', + ['b', 'a', ['c']], + lambda a, b: a.insert(0, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], 'a', ['c']], + lambda a, b: a.insert(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], ['c']], + # a[1] = b + lambda a, b: a.__setitem__(1, b), + lambda res, expected_res: ( + self.assertEqual(res, expected_res), + self.assertIsInstance(res[1], StoredList) + ), + ), ( + lambda: ['b', ['d'], 'a', ['c']], + 0, + [['d'], 'a', ['c']], + lambda a, b: a.pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: [['d'], 'a', ['c']], + ['d'], + ['a', ['c']], + lambda a, b: a.remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].append(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 1, + ['a', ['c']], + lambda a, b: a[1].pop(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].insert(1, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: ['a', ['c', 'd']], + 'd', + ['a', ['c']], + lambda a, b: a[1].remove(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + None, + set(), + lambda a, b: None, + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + 'a', + set(['a']), + lambda a, b: a.add(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(['a']), + 'a', + set(), + lambda a, b: a.discard(b), + lambda res, expected_res: self.assertEqual(res, expected_res) + ), ( + lambda: set(), + {'a'}, + set(), + # Nested sets are not allowed as sets themselves are not hashable. + lambda a, b: self.assertRaises(TypeError, a.add, b), + lambda res, expected_res: self.assertEqual(res, expected_res) + )] + + class SomeObject(Object): + state = StoredState() + + class WrappedFramework(Framework): + def __init__(self, data_path, charm_dir, meta, model): + super().__init__(data_path, charm_dir, meta, model) + self.snapshots = [] + + def save_snapshot(self, value): + if value.handle.path == 'SomeObject[1]/StoredStateData[state]': + self.snapshots.append((type(value), value.snapshot())) + return super().save_snapshot(value) + + # Validate correctness of modification operations. + for get_a, b, expected_res, op, validate_op in test_operations: + framework = self.create_framework(cls=WrappedFramework) + obj = SomeObject(framework, '1') + + obj.state.a = get_a() + self.assertTrue(isinstance(obj.state, BoundStoredState)) + + op(obj.state.a, b) + validate_op(obj.state.a, expected_res) + + obj.state.a = get_a() + framework.commit() + # We should see an update for initializing a + self.assertEqual(framework.snapshots, [ + (StoredStateData, {'a': get_a()}), + ]) + del obj + gc.collect() + obj_copy1 = SomeObject(framework, '1') + self.assertEqual(obj_copy1.state.a, get_a()) + + op(obj_copy1.state.a, b) + validate_op(obj_copy1.state.a, expected_res) + framework.commit() + framework.close() + + framework_copy = self.create_framework(cls=WrappedFramework) + + obj_copy2 = SomeObject(framework_copy, '1') + + validate_op(obj_copy2.state.a, expected_res) + + # Commit saves the pre-commit and commit events, and the framework event counter, but shouldn't update the stored state of my object + framework.snapshots.clear() + framework_copy.commit() + self.assertEqual(framework_copy.snapshots, []) + framework_copy.close() + + def test_comparison_operations(self): + test_operations = [( + {"1"}, # Operand A. + {"1", "2"}, # Operand B. + lambda a, b: a < b, # Operation to test. + True, # Result of op(A, B). + False, # Result of op(B, A). + ), ( + {"1"}, + {"1", "2"}, + lambda a, b: a > b, + False, + True + ), ( + # Empty set comparison. + set(), + set(), + lambda a, b: a == b, + True, + True + ), ( + {"a", "c"}, + {"c", "a"}, + lambda a, b: a == b, + True, + True + ), ( + dict(), + dict(), + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "2"}, + lambda a, b: a == b, + True, + True + ), ( + {"1": "2"}, + {"1": "3"}, + lambda a, b: a == b, + False, + False + ), ( + [], + [], + lambda a, b: a == b, + True, + True + ), ( + [1, 2], + [1, 2], + lambda a, b: a == b, + True, + True + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a <= b, + True, + False + ), ( + [1, 2, 5, 6], + [1, 2, 5, 8, 10], + lambda a, b: a < b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a > b, + True, + False + ), ( + [1, 2, 5, 8], + [1, 2, 5, 6, 10], + lambda a, b: a >= b, + True, + False + )] + + class SomeObject(Object): + state = StoredState() + + framework = self.create_framework() + + for i, (a, b, op, op_ab, op_ba) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj.state.a = a + self.assertEqual(op(obj.state.a, b), op_ab) + self.assertEqual(op(b, obj.state.a), op_ba) + + def test_set_operations(self): + test_operations = [( + {"1"}, # A set to test an operation against (other_set). + lambda a, b: a | b, # An operation to test. + {"1", "a", "b"}, # The expected result of operation(obj.state.set, other_set). + {"1", "a", "b"} # The expected result of operation(other_set, obj.state.set). + ), ( + {"a", "c"}, + lambda a, b: a - b, + {"b"}, + {"c"} + ), ( + {"a", "c"}, + lambda a, b: a & b, + {"a"}, + {"a"} + ), ( + {"a", "c", "d"}, + lambda a, b: a ^ b, + {"b", "c", "d"}, + {"b", "c", "d"} + ), ( + set(), + lambda a, b: set(a), + {"a", "b"}, + set() + )] + + class SomeObject(Object): + state = StoredState() + + framework = self.create_framework() + + # Validate that operations between StoredSet and built-in sets only result in built-in sets being returned. + # Make sure that commutativity is preserved and that the original sets are not changed or used as a result. + for i, (variable_operand, operation, ab_res, ba_res) in enumerate(test_operations): + obj = SomeObject(framework, str(i)) + obj.state.set = {"a", "b"} + + for a, b, expected in [(obj.state.set, variable_operand, ab_res), (variable_operand, obj.state.set, ba_res)]: + old_a = set(a) + old_b = set(b) + + result = operation(a, b) + self.assertEqual(result, expected) + + # Common sanity checks + self.assertIsNot(obj.state.set._under, result) + self.assertIsNot(result, a) + self.assertIsNot(result, b) + self.assertEqual(a, old_a) + self.assertEqual(b, old_b) + + def test_set_default(self): + framework = self.create_framework() + + class StatefulObject(Object): + state = StoredState() + parent = StatefulObject(framework, 'key') + parent.state.set_default(foo=1) + self.assertEqual(parent.state.foo, 1) + parent.state.set_default(foo=2) + # foo was already set, so it doesn't get replaced + self.assertEqual(parent.state.foo, 1) + parent.state.set_default(foo=3, bar=4) + self.assertEqual(parent.state.foo, 1) + self.assertEqual(parent.state.bar, 4) + # reloading the state still leaves things at the default values + framework.commit() + del parent + parent = StatefulObject(framework, 'key') + parent.state.set_default(foo=5, bar=6) + self.assertEqual(parent.state.foo, 1) + self.assertEqual(parent.state.bar, 4) + # TODO(jam) 2020-01-30: is there a clean way to tell that parent.state._data.dirty is False? + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_helpers.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_helpers.py new file mode 100644 index 00000000..7d7379fe --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_helpers.py @@ -0,0 +1,76 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pathlib +import subprocess +import shutil +import tempfile +import unittest + + +def fake_script(test_case, name, content): + if not hasattr(test_case, 'fake_script_path'): + fake_script_path = tempfile.mkdtemp('-fake_script') + os.environ['PATH'] = f'{fake_script_path}:{os.environ["PATH"]}' + + def cleanup(): + shutil.rmtree(fake_script_path) + os.environ['PATH'] = os.environ['PATH'].replace(fake_script_path + ':', '') + + test_case.addCleanup(cleanup) + test_case.fake_script_path = pathlib.Path(fake_script_path) + + with open(test_case.fake_script_path / name, "w") as f: + # Before executing the provided script, dump the provided arguments in calls.txt. + f.write('#!/bin/bash\n{ echo -n $(basename $0); for s in "$@"; do echo -n \\;$s; done; echo; } >> $(dirname $0)/calls.txt\n' + content) + os.chmod(test_case.fake_script_path / name, 0o755) + + +def fake_script_calls(test_case, clear=False): + with open(test_case.fake_script_path / 'calls.txt', 'r+') as f: + calls = [line.split(';') for line in f.read().splitlines()] + if clear: + f.truncate(0) + return calls + + +class FakeScriptTest(unittest.TestCase): + + def test_fake_script_works(self): + fake_script(self, 'foo', 'echo foo runs') + fake_script(self, 'bar', 'echo bar runs') + output = subprocess.getoutput('foo a "b c"; bar "d e" f') + self.assertEqual(output, 'foo runs\nbar runs') + self.assertEqual(fake_script_calls(self), [ + ['foo', 'a', 'b c'], + ['bar', 'd e', 'f'], + ]) + + def test_fake_script_clear(self): + fake_script(self, 'foo', 'echo foo runs') + + output = subprocess.getoutput('foo a "b c"') + self.assertEqual(output, 'foo runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['foo', 'a', 'b c']]) + + fake_script(self, 'bar', 'echo bar runs') + + output = subprocess.getoutput('bar "d e" f') + self.assertEqual(output, 'bar runs') + + self.assertEqual(fake_script_calls(self, clear=True), [['bar', 'd e', 'f']]) + + self.assertEqual(fake_script_calls(self, clear=True), []) diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_jujuversion.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_jujuversion.py new file mode 100755 index 00000000..d19fd600 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_jujuversion.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from ops.jujuversion import JujuVersion + + +class TestJujuVersion(unittest.TestCase): + + def test_parsing(self): + test_cases = [ + ("0.0.0", 0, 0, '', 0, 0), + ("0.0.2", 0, 0, '', 2, 0), + ("0.1.0", 0, 1, '', 0, 0), + ("0.2.3", 0, 2, '', 3, 0), + ("10.234.3456", 10, 234, '', 3456, 0), + ("10.234.3456.1", 10, 234, '', 3456, 1), + ("1.21-alpha12", 1, 21, 'alpha', 12, 0), + ("1.21-alpha1.34", 1, 21, 'alpha', 1, 34), + ("2.7", 2, 7, '', 0, 0) + ] + + for vs, major, minor, tag, patch, build in test_cases: + v = JujuVersion(vs) + self.assertEqual(v.major, major) + self.assertEqual(v.minor, minor) + self.assertEqual(v.tag, tag) + self.assertEqual(v.patch, patch) + self.assertEqual(v.build, build) + + def test_parsing_errors(self): + invalid_versions = [ + "xyz", + "foo.bar", + "foo.bar.baz", + "dead.beef.ca.fe", + "1234567890.2.1", # The major version is too long. + "0.2..1", # Two periods next to each other. + "1.21.alpha1", # Tag comes after period. + "1.21-alpha", # No patch number but a tag is present. + "1.21-alpha1beta", # Non-numeric string after the patch number. + "1.21-alpha-dev", # Tag duplication. + "1.21-alpha_dev3", # Underscore in a tag. + "1.21-alpha123dev3", # Non-numeric string after the patch number. + ] + for v in invalid_versions: + with self.assertRaises(RuntimeError): + JujuVersion(v) + + def test_equality(self): + test_cases = [ + ("1.0.0", "1.0.0", True), + ("01.0.0", "1.0.0", True), + ("10.0.0", "9.0.0", False), + ("1.0.0", "1.0.1", False), + ("1.0.1", "1.0.0", False), + ("1.0.0", "1.1.0", False), + ("1.1.0", "1.0.0", False), + ("1.0.0", "2.0.0", False), + ("1.2-alpha1", "1.2.0", False), + ("1.2-alpha2", "1.2-alpha1", False), + ("1.2-alpha2.1", "1.2-alpha2", False), + ("1.2-alpha2.2", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2-alpha1", False), + ("1.2-beta1", "1.2-alpha2.1", False), + ("1.2-beta1", "1.2.0", False), + ("1.2.1", "1.2.0", False), + ("2.0.0", "1.0.0", False), + ("2.0.0.0", "2.0.0", True), + ("2.0.0.0", "2.0.0.0", True), + ("2.0.0.1", "2.0.0.0", False), + ("2.0.1.10", "2.0.0.0", False), + ] + + for a, b, expected in test_cases: + self.assertEqual(JujuVersion(a) == JujuVersion(b), expected) + self.assertEqual(JujuVersion(a) == b, expected) + + def test_comparison(self): + test_cases = [ + ("1.0.0", "1.0.0", False, True), + ("01.0.0", "1.0.0", False, True), + ("10.0.0", "9.0.0", False, False), + ("1.0.0", "1.0.1", True, True), + ("1.0.1", "1.0.0", False, False), + ("1.0.0", "1.1.0", True, True), + ("1.1.0", "1.0.0", False, False), + ("1.0.0", "2.0.0", True, True), + ("1.2-alpha1", "1.2.0", True, True), + ("1.2-alpha2", "1.2-alpha1", False, False), + ("1.2-alpha2.1", "1.2-alpha2", False, False), + ("1.2-alpha2.2", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2-alpha1", False, False), + ("1.2-beta1", "1.2-alpha2.1", False, False), + ("1.2-beta1", "1.2.0", True, True), + ("1.2.1", "1.2.0", False, False), + ("2.0.0", "1.0.0", False, False), + ("2.0.0.0", "2.0.0", False, True), + ("2.0.0.0", "2.0.0.0", False, True), + ("2.0.0.1", "2.0.0.0", False, False), + ("2.0.1.10", "2.0.0.0", False, False), + ] + + for a, b, expected_strict, expected_weak in test_cases: + self.assertEqual(JujuVersion(a) < JujuVersion(b), expected_strict) + self.assertEqual(JujuVersion(a) <= JujuVersion(b), expected_weak) + self.assertEqual(JujuVersion(b) > JujuVersion(a), expected_strict) + self.assertEqual(JujuVersion(b) >= JujuVersion(a), expected_weak) + # Implicit conversion. + self.assertEqual(JujuVersion(a) < b, expected_strict) + self.assertEqual(JujuVersion(a) <= b, expected_weak) + self.assertEqual(b > JujuVersion(a), expected_strict) + self.assertEqual(b >= JujuVersion(a), expected_weak) + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_main.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_main.py new file mode 100755 index 00000000..a4ced948 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_main.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import logging +import os +import sys +import subprocess +import pickle +import base64 +import tempfile +import shutil + +import importlib.util + +from pathlib import Path + +from ops.charm import ( + CharmBase, + CharmEvents, + HookEvent, + InstallEvent, + StartEvent, + ConfigChangedEvent, + UpgradeCharmEvent, + UpdateStatusEvent, + LeaderSettingsChangedEvent, + RelationJoinedEvent, + RelationChangedEvent, + RelationDepartedEvent, + RelationBrokenEvent, + RelationEvent, + StorageAttachedEvent, + ActionEvent, +) + +from .test_helpers import fake_script + +# This relies on the expected repository structure to find a path to source of the charm under test. +TEST_CHARM_DIR = Path(f'{__file__}/../charms/test_main').resolve() + +logger = logging.getLogger(__name__) + + +class SymlinkTargetError(Exception): + pass + + +class EventSpec: + def __init__(self, event_type, event_name, env_var=None, relation_id=None, remote_app=None, remote_unit=None, + charm_config=None): + self.event_type = event_type + self.event_name = event_name + self.env_var = env_var + self.relation_id = relation_id + self.remote_app = remote_app + self.remote_unit = remote_unit + self.charm_config = charm_config + + +class TestMain(unittest.TestCase): + + def setUp(self): + self._setup_charm_dir() + + _, tmp_file = tempfile.mkstemp() + self._state_file = Path(tmp_file) + self.addCleanup(self._state_file.unlink) + + # Relations events are defined dynamically and modify the class attributes. + # We use a subclass temporarily to prevent these side effects from leaking. + class TestCharmEvents(CharmEvents): + pass + CharmBase.on = TestCharmEvents() + + def cleanup(): + shutil.rmtree(self.JUJU_CHARM_DIR) + CharmBase.on = CharmEvents() + self.addCleanup(cleanup) + + def _setup_charm_dir(self): + self.JUJU_CHARM_DIR = Path(tempfile.mkdtemp()) / 'test_main' + self.hooks_dir = self.JUJU_CHARM_DIR / 'hooks' + self.charm_exec_path = os.path.relpath(self.JUJU_CHARM_DIR / 'src/charm.py', self.hooks_dir) + shutil.copytree(TEST_CHARM_DIR, self.JUJU_CHARM_DIR) + + charm_spec = importlib.util.spec_from_file_location("charm", str(self.JUJU_CHARM_DIR / 'src/charm.py')) + self.charm_module = importlib.util.module_from_spec(charm_spec) + charm_spec.loader.exec_module(self.charm_module) + + self._prepare_initial_hooks() + + def _prepare_initial_hooks(self): + initial_hooks = ('install', 'start', 'upgrade-charm', 'disks-storage-attached') + self.hooks_dir.mkdir() + for hook in initial_hooks: + hook_path = self.hooks_dir / hook + hook_path.symlink_to(self.charm_exec_path) + + def _prepare_actions(self): + actions_meta = ''' +foo-bar: + description: Foos the bar. + title: foo-bar + params: + foo-name: + type: string + description: A foo name to bar. + silent: + type: boolean + description: + default: false + required: + - foo-name +start: + description: Start the unit.''' + actions_dir_name = 'actions' + actions_meta_file = 'actions.yaml' + + with open(self.JUJU_CHARM_DIR / actions_meta_file, 'w+') as f: + f.write(actions_meta) + actions_dir = self.JUJU_CHARM_DIR / actions_dir_name + actions_dir.mkdir() + for action_name in ('start', 'foo-bar'): + action_path = actions_dir / action_name + action_path.symlink_to(self.charm_exec_path) + + def _read_and_clear_state(self): + state = None + if self._state_file.stat().st_size: + with open(self._state_file, 'r+b') as state_file: + state = pickle.load(state_file) + state_file.truncate() + return state + + def _simulate_event(self, event_spec): + env = { + 'PATH': f"{str(Path(__file__).parent / 'bin')}:{os.environ['PATH']}", + 'JUJU_CHARM_DIR': self.JUJU_CHARM_DIR, + 'JUJU_UNIT_NAME': 'test_main/0', + 'CHARM_CONFIG': event_spec.charm_config, + } + if issubclass(event_spec.event_type, RelationEvent): + rel_name = event_spec.event_name.split('_')[0] + env.update({ + 'JUJU_RELATION': rel_name, + 'JUJU_RELATION_ID': str(event_spec.relation_id), + }) + remote_app = event_spec.remote_app + # For juju < 2.7 app name is extracted from JUJU_REMOTE_UNIT. + if remote_app is not None: + env['JUJU_REMOTE_APP'] = remote_app + + remote_unit = event_spec.remote_unit + if remote_unit is None: + remote_unit = '' + + env['JUJU_REMOTE_UNIT'] = remote_unit + else: + env.update({ + 'JUJU_REMOTE_UNIT': '', + 'JUJU_REMOTE_APP': '', + }) + if issubclass(event_spec.event_type, ActionEvent): + event_filename = event_spec.event_name[:-len('_action')].replace('_', '-') + env.update({ + event_spec.env_var: event_filename, + }) + if event_spec.env_var == 'JUJU_ACTION_NAME': + event_dir = 'actions' + else: + raise RuntimeError('invalid envar name specified for a action event') + else: + event_filename = event_spec.event_name.replace('_', '-') + event_dir = 'hooks' + event_file = self.JUJU_CHARM_DIR / event_dir / event_filename + # Note that sys.executable is used to make sure we are using the same + # interpreter for the child process to support virtual environments. + subprocess.check_call([sys.executable, event_file], env=env, cwd=self.JUJU_CHARM_DIR) + return self._read_and_clear_state() + + def test_event_reemitted(self): + # base64 encoding is used to avoid null bytes. + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # First run "install" to make sure all hooks are set up. + state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [InstallEvent]) + + state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent]) + + # Re-emit should pick the deferred config-changed. + state = self._simulate_event(EventSpec(UpdateStatusEvent, 'update-status', charm_config=charm_config)) + self.assertEqual(state['observed_event_types'], [ConfigChangedEvent, UpdateStatusEvent]) + + def test_multiple_events_handled(self): + self._prepare_actions() + + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + actions_charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + 'USE_ACTIONS': True, + })) + + fake_script(self, 'action-get', "echo '{}'") + + # Sample events with a different amount of dashes used + # and with endpoints from different sections of metadata.yaml + events_under_test = [( + EventSpec(InstallEvent, 'install', charm_config=charm_config), + {}, + ), ( + EventSpec(StartEvent, 'start', charm_config=charm_config), + {}, + ), ( + EventSpec(UpdateStatusEvent, 'update_status', charm_config=charm_config), + {}, + ), ( + EventSpec(LeaderSettingsChangedEvent, 'leader_settings_changed', charm_config=charm_config), + {}, + ), ( + EventSpec(RelationJoinedEvent, 'db_relation_joined', relation_id=1, + remote_app='remote', remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'db', 'relation_id': 1, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', relation_id=2, + remote_app='remote', remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', relation_id=2, + remote_app='remote', remote_unit=None, charm_config=charm_config), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote', 'unit_name': None}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', relation_id=2, + remote_app='remote', remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationBrokenEvent, 'ha_relation_broken', relation_id=3, + charm_config=charm_config), + {'relation_name': 'ha', 'relation_id': 3}, + ), ( + # Events without a remote app specified (for Juju < 2.7). + EventSpec(RelationJoinedEvent, 'db_relation_joined', relation_id=1, + remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'db', 'relation_id': 1, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationChangedEvent, 'mon_relation_changed', relation_id=2, + remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(RelationDepartedEvent, 'mon_relation_departed', relation_id=2, + remote_unit='remote/0', charm_config=charm_config), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote', 'unit_name': 'remote/0'}, + ), ( + EventSpec(ActionEvent, 'start_action', env_var='JUJU_ACTION_NAME', charm_config=actions_charm_config), + {}, + ), ( + EventSpec(ActionEvent, 'foo_bar_action', env_var='JUJU_ACTION_NAME', charm_config=actions_charm_config), + {}, + )] + + logger.debug(f'Expected events {events_under_test}') + + # First run "install" to make sure all hooks are set up. + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + + # Simulate hook executions for every event. + for event_spec, expected_event_data in events_under_test: + state = self._simulate_event(event_spec) + + state_key = f'on_{event_spec.event_name}' + handled_events = state.get(state_key, []) + + # Make sure that a handler for that event was called once. + self.assertEqual(len(handled_events), 1) + # Make sure the event handled by the Charm has the right type. + handled_event_type = handled_events[0] + self.assertEqual(handled_event_type, event_spec.event_type) + + self.assertEqual(state['observed_event_types'], [event_spec.event_type]) + + if event_spec.event_name in expected_event_data: + self.assertEqual(state[f'{event_spec.event_name}_data'], expected_event_data[event_spec.event_name]) + + def test_event_not_implemented(self): + """Make sure events without implementation do not cause non-zero exit. + """ + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + + # Simulate a scenario where there is a symlink for an event that + # a charm does not know how to handle. + hook_path = self.JUJU_CHARM_DIR / 'hooks/not-implemented-event' + # This will be cleared up in tearDown. + hook_path.symlink_to('install') + + try: + self._simulate_event(EventSpec(HookEvent, 'not-implemented-event', charm_config=charm_config)) + except subprocess.CalledProcessError: + self.fail('Event simulation for an unsupported event' + ' results in a non-zero exit code returned') + + def test_setup_event_links(self): + """Test auto-creation of symlinks caused by initial events. + """ + all_event_hooks = [f'hooks/{e.replace("_", "-")}' for e in self.charm_module.Charm.on.events().keys()] + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + initial_events = { + EventSpec(InstallEvent, 'install', charm_config=charm_config), + EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config), + EventSpec(StartEvent, 'start', charm_config=charm_config), + EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config), + } + + def _assess_event_links(event_spec): + self.assertTrue(self.hooks_dir / event_spec.event_name in self.hooks_dir.iterdir()) + for event_hook in all_event_hooks: + self.assertTrue((self.JUJU_CHARM_DIR / event_hook).exists(), f'Missing hook: {event_hook}') + self.assertEqual(os.readlink(self.JUJU_CHARM_DIR / event_hook), self.charm_exec_path) + + for initial_event in initial_events: + self._setup_charm_dir() + + self._simulate_event(initial_event) + _assess_event_links(initial_event) + # Make sure it is idempotent. + self._simulate_event(initial_event) + _assess_event_links(initial_event) + + def test_setup_action_links(self): + charm_config = base64.b64encode(pickle.dumps({ + 'STATE_FILE': self._state_file, + })) + actions_yaml = self.JUJU_CHARM_DIR / 'actions.yaml' + actions_yaml.write_text('test: {}') + self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config)) + action_hook = self.JUJU_CHARM_DIR / 'actions' / 'test' + self.assertTrue(action_hook.exists()) + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_model.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_model.py new file mode 100755 index 00000000..544f4ff5 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/mod/operator/test/test_model.py @@ -0,0 +1,868 @@ +#!/usr/bin/python3 +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pathlib +import unittest +import time +import re +import json + +import ops.model +import ops.charm +from ops.charm import RelationMeta + +from test.test_helpers import fake_script, fake_script_calls + + +class TestModel(unittest.TestCase): + + def setUp(self): + def restore_env(env): + os.environ.clear() + os.environ.update(env) + self.addCleanup(restore_env, os.environ.copy()) + + os.environ['JUJU_UNIT_NAME'] = 'myapp/0' + + self.backend = ops.model.ModelBackend() + meta = ops.charm.CharmMeta() + meta.relations = { + 'db0': RelationMeta('provides', 'db0', {'interface': 'db0', 'scope': 'global'}), + 'db1': RelationMeta('requires', 'db1', {'interface': 'db1', 'scope': 'global'}), + 'db2': RelationMeta('peers', 'db2', {'interface': 'db2', 'scope': 'global'}), + } + self.model = ops.model.Model('myapp/0', meta, self.backend) + + def test_model(self): + self.assertIs(self.model.app, self.model.unit.app) + + def test_relations_keys(self): + fake_script(self, 'relation-ids', + """[ "$1" = db2 ] && echo '["db2:5", "db2:6"]' || echo '[]'""") + fake_script(self, 'relation-list', + """([ "$2" = 5 ] && echo '["remoteapp1/0", "remoteapp1/1"]') || ([ "$2" = 6 ] && echo '["remoteapp2/0"]') || exit 2""") + + for relation in self.model.relations['db2']: + self.assertIn(self.model.unit, relation.data) + unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys())) + self.assertIs(self.model.unit, unit_from_rel) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db2', '--format=json'], + ['relation-list', '-r', '5', '--format=json'], + ['relation-list', '-r', '6', '--format=json'] + ]) + + def test_get_relation(self): + err_msg = 'ERROR invalid value "$2" for option -r: relation not found' + + fake_script(self, 'relation-ids', + """([ "$1" = db1 ] && echo '["db1:4"]') || ([ "$1" = db2 ] && echo '["db2:5", "db2:6"]') || echo '[]'""") + fake_script(self, 'relation-list', + f"""([ "$2" = 4 ] && echo '["remoteapp1/0"]') || (echo {err_msg} >&2 ; exit 2)""") + fake_script(self, 'relation-get', + f"""echo {err_msg} >&2 ; exit 2""") + + with self.assertRaises(ops.model.ModelError): + self.model.get_relation('db1', 'db1:4') + db1_4 = self.model.get_relation('db1', 4) + self.assertIsInstance(db1_4, ops.model.Relation) + dead_rel = self.model.get_relation('db1', 7) + self.assertIsInstance(dead_rel, ops.model.Relation) + self.assertEqual(list(dead_rel.data.keys()), [self.model.unit, self.model.unit.app]) + self.assertEqual(dead_rel.data[self.model.unit], {}) + self.assertIsNone(self.model.get_relation('db0')) + self.assertIs(self.model.get_relation('db1'), db1_4) + with self.assertRaises(ops.model.TooManyRelatedAppsError): + self.model.get_relation('db2') + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-list', '-r', '7', '--format=json'], + ['relation-get', '-r', '7', '-', 'myapp/0', '--app=False', '--format=json'], + ['relation-ids', 'db0', '--format=json'], + ['relation-ids', 'db2', '--format=json'], + ['relation-list', '-r', '5', '--format=json'], + ['relation-list', '-r', '6', '--format=json'] + ]) + + def test_peer_relation_app(self): + meta = ops.charm.CharmMeta() + meta.relations = {'dbpeer': RelationMeta('peers', 'dbpeer', {'interface': 'dbpeer', 'scope': 'global'})} + self.model = ops.model.Model('myapp/0', meta, self.backend) + + err_msg = 'ERROR invalid value "$2" for option -r: relation not found' + fake_script(self, 'relation-ids', + '''([ "$1" = dbpeer ] && echo '["dbpeer:0"]') || echo "[]"''') + fake_script(self, 'relation-list', + f'''([ "$2" = 0 ] && echo "[]") || (echo {err_msg} >&2 ; exit 2)''') + + db1_4 = self.model.get_relation('dbpeer') + self.assertIs(db1_4.app, self.model.app) + + def test_remote_units_is_our(self): + fake_script(self, 'relation-ids', + """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', + """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + + for u in self.model.get_relation('db1').units: + self.assertFalse(u._is_our_unit) + self.assertFalse(u.app._is_our_app) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'] + ]) + + def test_our_unit_is_our(self): + self.assertTrue(self.model.unit._is_our_unit) + self.assertTrue(self.model.unit.app._is_our_app) + + def test_unit_relation_data(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "remoteapp1/0" ]) && echo '{"host": "remoteapp1-0"}' || exit 2""") + + random_unit = self.model._cache.get(ops.model.Unit, 'randomunit/0') + with self.assertRaises(KeyError): + self.model.get_relation('db1').data[random_unit] + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', self.model.get_relation('db1').units)) + self.assertEqual(self.model.get_relation('db1').data[remoteapp1_0], {'host': 'remoteapp1-0'}) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'remoteapp1/0', '--app=False', '--format=json'] + ]) + + def test_remote_app_relation_data(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + fake_script(self, 'relation-get', """[ "$2" = 4 ] && [ "$4" = remoteapp1 ] && echo '{"secret": "cafedeadbeef"}' || exit 2""") + + # Try to get relation data for an invalid remote application. + random_app = self.model._cache.get(ops.model.Application, 'randomapp') + with self.assertRaises(KeyError): + self.model.get_relation('db1').data[random_app] + + remoteapp1 = self.model.get_relation('db1').app + self.assertEqual(self.model.get_relation('db1').data[remoteapp1], {'secret': 'cafedeadbeef'}) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'remoteapp1', '--app=True', '--format=json'], + ]) + + def test_relation_data_modify_remote(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "remoteapp1/0" ]) && echo '{"host": "remoteapp1-0"}' || exit 2""") + + rel_db1 = self.model.get_relation('db1') + remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', self.model.get_relation('db1').units)) + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[remoteapp1_0]) + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[remoteapp1_0]['foo'] = 'bar' + self.assertNotIn('foo', rel_db1.data[remoteapp1_0]) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'remoteapp1/0', '--app=False', '--format=json'] + ]) + + def test_relation_data_modify_our(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-set', '''[ "$2" = 4 ] && exit 0 || exit 2''') + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "myapp/0" ]) && echo '{"host": "bar"}' || exit 2""") + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + rel_db1.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db1.data[self.model.unit]['host'], 'bar') + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'myapp/0', '--app=False', '--format=json'], + ['relation-set', '-r', '4', 'host=bar', '--app=False'] + ]) + + def test_app_relation_data_modify_local_as_leader(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + fake_script(self, 'relation-get', """[ "$2" = 4 ] && [ "$4" = myapp ] && echo '{"password": "deadbeefcafe"}' || exit 2""") + fake_script(self, 'relation-set', """[ "$2" = 4 ] && exit 0 || exit 2""") + fake_script(self, 'is-leader', 'echo true') + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + rel_db1.data[local_app]['password'] = 'foo' + + self.assertEqual(rel_db1.data[local_app]['password'], 'foo') + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'myapp', '--app=True', '--format=json'], + ['is-leader', '--format=json'], + ['relation-set', '-r', '4', 'password=foo', '--app=True'], + ]) + + def test_app_relation_data_modify_local_as_minion(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + fake_script(self, 'relation-get', """[ "$2" = 4 ] && [ "$4" = myapp ] && echo '{"password": "deadbeefcafe"}' || exit 2""") + fake_script(self, 'is-leader', 'echo false') + + local_app = self.model.unit.app + + rel_db1 = self.model.get_relation('db1') + self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'}) + + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[local_app]['password'] = 'foobar' + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'myapp', '--app=True', '--format=json'], + ['is-leader', '--format=json'], + ]) + + def test_relation_data_del_key(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-set', '''[ "$2" = 4 ] && exit 0 || exit 2''') + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "myapp/0" ]) && echo '{"host": "bar"}' || exit 2""") + + rel_db1 = self.model.get_relation('db1') + # Force memory cache to be loaded. + self.assertIn('host', rel_db1.data[self.model.unit]) + del rel_db1.data[self.model.unit]['host'] + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "myapp/0" ]) && echo '{}' || exit 2""") + self.assertNotIn('host', rel_db1.data[self.model.unit]) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['relation-get', '-r', '4', '-', 'myapp/0', '--app=False', '--format=json'], + ['relation-set', '-r', '4', 'host=', '--app=False'] + ]) + + def test_relation_set_fail(self): + fake_script(self, 'relation-ids', """[ "$1" = db2 ] && echo '["db2:5"]' || echo '[]'""") + fake_script(self, 'relation-list', + """[ "$2" = 5 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-get', """([ "$2" = 5 ] && [ "$4" = "myapp/0" ]) && echo '{"host": "myapp-0"}' || exit 2""") + fake_script(self, 'relation-set', 'exit 2') + + rel_db2 = self.model.relations['db2'][0] + # Force memory cache to be loaded. + self.assertIn('host', rel_db2.data[self.model.unit]) + with self.assertRaises(ops.model.ModelError): + rel_db2.data[self.model.unit]['host'] = 'bar' + self.assertEqual(rel_db2.data[self.model.unit]['host'], 'myapp-0') + with self.assertRaises(ops.model.ModelError): + del rel_db2.data[self.model.unit]['host'] + self.assertIn('host', rel_db2.data[self.model.unit]) + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db2', '--format=json'], + ['relation-list', '-r', '5', '--format=json'], + ['relation-get', '-r', '5', '-', 'myapp/0', '--app=False', '--format=json'], + ['relation-set', '-r', '5', 'host=bar', '--app=False'], + ['relation-set', '-r', '5', 'host=', '--app=False'] + ]) + + def test_relation_get_set_is_app_arg(self): + self.backend = ops.model.ModelBackend() + + # No is_app provided. + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval') + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity') + + # Invalid types for is_app. + for is_app_v in [None, 1, 2.0, 'a', b'beef']: + with self.assertRaises(TypeError): + self.backend.relation_set(1, 'fookey', 'barval', is_app=is_app_v) + + with self.assertRaises(TypeError): + self.backend.relation_get(1, 'fooentity', is_app=is_app_v) + + def test_relation_data_type_check(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', + """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") + fake_script(self, 'relation-get', """([ "$2" = 4 ] && [ "$4" = "myapp/0" ]) && echo '{"host": "myapp-0"}' || exit 2""") + + rel_db1 = self.model.get_relation('db1') + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = 1 + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = {'foo': 'bar'} + with self.assertRaises(ops.model.RelationDataError): + rel_db1.data[self.model.unit]['foo'] = None + + self.assertEqual(fake_script_calls(self), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'] + ]) + + def test_config(self): + fake_script(self, 'config-get', """echo '{"foo":"foo","bar":1,"qux":true}'""") + self.assertEqual(self.model.config, { + 'foo': 'foo', + 'bar': 1, + 'qux': True, + }) + with self.assertRaises(TypeError): + # Confirm that we cannot modify config values. + self.model.config['foo'] = 'bar' + + self.assertEqual(fake_script_calls(self), [['config-get', '--format=json']]) + + def test_is_leader(self): + def check_remote_units(): + fake_script(self, 'relation-ids', + """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + + fake_script(self, 'relation-list', + """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + + # Cannot determine leadership for remote units. + for u in self.model.get_relation('db1').units: + with self.assertRaises(RuntimeError): + u.is_leader() + + fake_script(self, 'is-leader', 'echo true') + self.assertTrue(self.model.unit.is_leader()) + + check_remote_units() + + # Create a new model and backend to drop a cached is-leader output. + self.backend = ops.model.ModelBackend() + meta = ops.charm.CharmMeta() + meta.relations = { + 'db0': RelationMeta('provides', 'db0', {'interface': 'db0', 'scope': 'global'}), + 'db1': RelationMeta('requires', 'db1', {'interface': 'db1', 'scope': 'global'}), + 'db2': RelationMeta('peers', 'db2', {'interface': 'db2', 'scope': 'global'}), + } + self.model = ops.model.Model('myapp/0', meta, self.backend) + + fake_script(self, 'is-leader', 'echo false') + self.assertFalse(self.model.unit.is_leader()) + + check_remote_units() + + self.assertEqual(fake_script_calls(self), [ + ['is-leader', '--format=json'], + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ['is-leader', '--format=json'], + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ]) + + def test_is_leader_refresh(self): + # A sanity check. + self.assertGreater(time.monotonic(), ops.model.ModelBackend.LEASE_RENEWAL_PERIOD.total_seconds()) + + fake_script(self, 'is-leader', 'echo false') + self.assertFalse(self.model.unit.is_leader()) + + # Change the leadership status and force a recheck. + fake_script(self, 'is-leader', 'echo true') + self.backend._leader_check_time = 0 + self.assertTrue(self.model.unit.is_leader()) + + # Force a recheck without changing the leadership status. + fake_script(self, 'is-leader', 'echo true') + self.backend._leader_check_time = 0 + self.assertTrue(self.model.unit.is_leader()) + + def test_resources(self): + meta = ops.charm.CharmMeta() + meta.resources = {'foo': None, 'bar': None} + model = ops.model.Model('myapp/0', meta, self.backend) + + with self.assertRaises(RuntimeError): + model.resources.fetch('qux') + + fake_script(self, 'resource-get', 'exit 1') + with self.assertRaises(ops.model.ModelError): + model.resources.fetch('foo') + + fake_script(self, 'resource-get', 'echo /var/lib/juju/agents/unit-test-0/resources/$1/$1.tgz') + self.assertEqual(model.resources.fetch('foo').name, 'foo.tgz') + self.assertEqual(model.resources.fetch('bar').name, 'bar.tgz') + + def test_pod_spec(self): + fake_script(self, 'pod-spec-set', """ + cat $2 > $(dirname $0)/spec.json + [[ -n $4 ]] && cat $4 > $(dirname $0)/k8s_res.json || true + """) + fake_script(self, 'is-leader', 'echo true') + spec_path = self.fake_script_path / 'spec.json' + k8s_res_path = self.fake_script_path / 'k8s_res.json' + + def check_calls(calls): + # There may 1 or 2 calls because of is-leader. + self.assertLessEqual(len(fake_calls), 2) + pod_spec_call = next(filter(lambda c: c[0] == 'pod-spec-set', calls)) + self.assertEqual(pod_spec_call[:2], ['pod-spec-set', '--file']) + # 8 bytes are used as of python 3.4.0, see Python bug #12015. + # Other characters are from POSIX 3.282 (Portable Filename Character Set) a subset of which Python's mkdtemp uses. + self.assertTrue(re.match('/tmp/tmp[A-Za-z0-9._-]{8}-pod-spec-set', pod_spec_call[2])) + + self.model.pod.set_spec({'foo': 'bar'}) + self.assertEqual(spec_path.read_text(), '{"foo": "bar"}') + self.assertFalse(k8s_res_path.exists()) + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + self.model.pod.set_spec({'bar': 'foo'}, {'qux': 'baz'}) + self.assertEqual(spec_path.read_text(), '{"bar": "foo"}') + self.assertEqual(k8s_res_path.read_text(), '{"qux": "baz"}') + + fake_calls = fake_script_calls(self, clear=True) + check_calls(fake_calls) + + # Create a new model to drop is-leader caching result. + self.backend = ops.model.ModelBackend() + meta = ops.charm.CharmMeta() + self.model = ops.model.Model('myapp/0', meta, self.backend) + fake_script(self, 'is-leader', 'echo false') + with self.assertRaises(ops.model.ModelError): + self.model.pod.set_spec({'foo': 'bar'}) + + def test_base_status_instance_raises(self): + with self.assertRaises(TypeError): + ops.model.StatusBase('test') + + def test_active_message_default(self): + self.assertEqual(ops.model.ActiveStatus().message, '') + + def test_local_set_valid_unit_status(self): + test_cases = [( + ops.model.ActiveStatus('Green'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertEqual(fake_script_calls(self, True), [['status-set', '--application=False', 'active', 'Green']]), + ), ( + ops.model.MaintenanceStatus('Yellow'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertEqual(fake_script_calls(self, True), [['status-set', '--application=False', 'maintenance', 'Yellow']]), + ), ( + ops.model.BlockedStatus('Red'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertEqual(fake_script_calls(self, True), [['status-set', '--application=False', 'blocked', 'Red']]), + ), ( + ops.model.WaitingStatus('White'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertEqual(fake_script_calls(self, True), [['status-set', '--application=False', 'waiting', 'White']]), + )] + + for target_status, setup_tools, check_tool_calls in test_cases: + setup_tools() + + self.model.unit.status = target_status + + self.assertEqual(self.model.unit.status, target_status) + + check_tool_calls() + + def test_local_set_valid_app_status(self): + fake_script(self, 'is-leader', 'echo true') + test_cases = [( + ops.model.ActiveStatus('Green'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertIn(['status-set', '--application=True', 'active', 'Green'], fake_script_calls(self, True)), + ), ( + ops.model.MaintenanceStatus('Yellow'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertIn(['status-set', '--application=True', 'maintenance', 'Yellow'], fake_script_calls(self, True)), + ), ( + ops.model.BlockedStatus('Red'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertIn(['status-set', '--application=True', 'blocked', 'Red'], fake_script_calls(self, True)), + ), ( + ops.model.WaitingStatus('White'), + lambda: fake_script(self, 'status-set', 'exit 0'), + lambda: self.assertIn(['status-set', '--application=True', 'waiting', 'White'], fake_script_calls(self, True)), + )] + + for target_status, setup_tools, check_tool_calls in test_cases: + setup_tools() + + self.model.app.status = target_status + + self.assertEqual(self.model.app.status, target_status) + + check_tool_calls() + + def test_set_app_status_non_leader_raises(self): + fake_script(self, 'is-leader', 'echo false') + + with self.assertRaises(RuntimeError): + self.model.app.status + + with self.assertRaises(RuntimeError): + self.model.app.status = ops.model.ActiveStatus() + + def test_local_set_invalid_status(self): + fake_script(self, 'status-set', 'exit 1') + fake_script(self, 'is-leader', 'echo true') + + with self.assertRaises(ops.model.ModelError): + self.model.unit.status = ops.model.UnknownStatus() + + self.assertEqual(fake_script_calls(self, True), [ + ['status-set', '--application=False', 'unknown', ''], + ]) + + with self.assertRaises(ops.model.ModelError): + self.model.app.status = ops.model.UnknownStatus() + + # A leadership check is needed for application status. + self.assertEqual(fake_script_calls(self, True), [ + ['is-leader', '--format=json'], + ['status-set', '--application=True', 'unknown', ''], + ]) + + def test_status_set_is_app_not_bool_raises(self): + self.backend = ops.model.ModelBackend() + + for is_app_v in [None, 1, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + self.backend.status_set(ops.model.ActiveStatus, is_app=is_app_v) + + def test_remote_unit_status(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + + remote_unit = next(filter(lambda u: u.name == 'remoteapp1/0', self.model.get_relation('db1').units)) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus('Green'), + ops.model.MaintenanceStatus('Yellow'), + ops.model.BlockedStatus('Red'), + ops.model.WaitingStatus('White'), + ) + + for target_status in test_statuses: + with self.assertRaises(RuntimeError): + remote_unit.status = target_status + + def test_remote_app_status(self): + fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""") + fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""") + + remoteapp1 = self.model.get_relation('db1').app + + # Remote application status is always unknown. + self.assertIsInstance(remoteapp1.status, ops.model.UnknownStatus) + + test_statuses = ( + ops.model.UnknownStatus(), + ops.model.ActiveStatus(), + ops.model.MaintenanceStatus('Upgrading software'), + ops.model.BlockedStatus('Awaiting manual resolution'), + ops.model.WaitingStatus('Awaiting related app updates'), + ) + for target_status in test_statuses: + with self.assertRaises(RuntimeError): + remoteapp1.status = target_status + + self.assertEqual(fake_script_calls(self, clear=True), [ + ['relation-ids', 'db1', '--format=json'], + ['relation-list', '-r', '4', '--format=json'], + ]) + + def test_storage(self): + meta = ops.charm.CharmMeta() + meta.storages = {'disks': None, 'data': None} + self.model = ops.model.Model('myapp/0', meta, self.backend) + + fake_script(self, 'storage-list', """[ "$1" = disks ] && echo '["disks/0", "disks/1"]' || echo '[]'""") + fake_script(self, 'storage-get', + """ + if [ "$2" = disks/0 ]; then + echo '"/var/srv/disks/0"' + elif [ "$2" = disks/1 ]; then + echo '"/var/srv/disks/1"' + else + exit 2 + fi + """) + fake_script(self, 'storage-add', '') + + self.assertEqual(len(self.model.storages), 2) + self.assertEqual(self.model.storages.keys(), meta.storages.keys()) + self.assertIn('disks', self.model.storages) + test_cases = { + 0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')}, + 1: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/1')}, + } + for storage in self.model.storages['disks']: + self.assertEqual(storage.name, 'disks') + self.assertIn(storage.id, test_cases) + self.assertEqual(storage.name, test_cases[storage.id]['name']) + self.assertEqual(storage.location, test_cases[storage.id]['location']) + + self.assertEqual(fake_script_calls(self, clear=True), [ + ['storage-list', 'disks', '--format=json'], + ['storage-get', '-s', 'disks/0', 'location', '--format=json'], + ['storage-get', '-s', 'disks/1', 'location', '--format=json'], + ]) + + self.assertSequenceEqual(self.model.storages['data'], []) + self.model.storages.request('data', count=3) + self.assertEqual(fake_script_calls(self), [ + ['storage-list', 'data', '--format=json'], + ['storage-add', 'data=3'], + ]) + + # Try to add storage not present in charm metadata. + with self.assertRaises(ops.model.ModelError): + self.model.storages.request('deadbeef') + + # Invalid count parameter types. + for count_v in [None, False, 2.0, 'a', b'beef', object]: + with self.assertRaises(TypeError): + self.model.storages.request('data', count_v) + + +class TestModelBackend(unittest.TestCase): + + def setUp(self): + os.environ['JUJU_UNIT_NAME'] = 'myapp/0' + self.addCleanup(os.environ.pop, 'JUJU_UNIT_NAME') + + self._backend = None + + @property + def backend(self): + if self._backend is None: + self._backend = ops.model.ModelBackend() + return self._backend + + def test_relation_tool_errors(self): + err_msg = 'ERROR invalid value "$2" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'relation-list', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_list(3), + ops.model.ModelError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-list', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_list(3), + ops.model.RelationNotFoundError, + [['relation-list', '-r', '3', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-set', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.ModelError, + [['relation-set', '-r', '3', 'foo=bar', '--app=False']], + ), ( + lambda: fake_script(self, 'relation-set', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.model.RelationNotFoundError, + [['relation-set', '-r', '3', 'foo=bar', '--app=False']], + ), ( + lambda: fake_script(self, 'relation-get', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.ModelError, + [['relation-get', '-r', '3', '-', 'remote/0', '--app=False', '--format=json']], + ), ( + lambda: fake_script(self, 'relation-get', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.model.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--app=False', '--format=json']], + )] + + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_status_is_app_forced_kwargs(self): + fake_script(self, 'status-get', 'exit 1') + fake_script(self, 'status-set', 'exit 1') + + test_cases = ( + lambda: self.backend.status_get(False), + lambda: self.backend.status_get(True), + lambda: self.backend.status_set('active', '', False), + lambda: self.backend.status_set('active', '', True), + ) + + for case in test_cases: + with self.assertRaises(TypeError): + case() + + def test_storage_tool_errors(self): + test_cases = [( + lambda: fake_script(self, 'storage-list', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_list('foobar'), + ops.model.ModelError, + [['storage-list', 'foobar', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-get', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_get('foobar', 'someattr'), + ops.model.ModelError, + [['storage-get', '-s', 'foobar', 'someattr', '--format=json']], + ), ( + lambda: fake_script(self, 'storage-add', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=2), + ops.model.ModelError, + [['storage-add', 'foobar=2']], + ), ( + lambda: fake_script(self, 'storage-add', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=object), + TypeError, + [], + ), ( + lambda: fake_script(self, 'storage-add', f'echo fooerror >&2 ; exit 1'), + lambda: self.backend.storage_add('foobar', count=True), + TypeError, + [], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_network_get(self): + network_get_out = '''{ + "bind-addresses": [ + { + "mac-address": "", + "interface-name": "", + "addresses": [ + { + "hostname": "", + "value": "192.0.2.2", + "cidr": "" + } + ] + } + ], + "egress-subnets": [ + "192.0.2.2/32" + ], + "ingress-addresses": [ + "192.0.2.2" + ] +}''' + fake_script(self, 'network-get', f'''[ "$1" = deadbeef ] && echo '{network_get_out}' || exit 1''') + network_info = self.backend.network_get('deadbeef') + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), [['network-get', 'deadbeef', '--format=json']]) + + network_info = self.backend.network_get('deadbeef', 1) + self.assertEqual(network_info, json.loads(network_get_out)) + self.assertEqual(fake_script_calls(self, clear=True), [['network-get', 'deadbeef', '-r', '1', '--format=json']]) + + def test_network_get_errors(self): + err_no_endpoint = 'ERROR no network config found for binding "$2"' + err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found' + + test_cases = [( + lambda: fake_script(self, 'network-get', f'echo {err_no_endpoint} >&2 ; exit 1'), + lambda: self.backend.network_get("deadbeef"), + ops.model.ModelError, + [['network-get', 'deadbeef', '--format=json']], + ), ( + lambda: fake_script(self, 'network-get', f'echo {err_no_rel} >&2 ; exit 2'), + lambda: self.backend.network_get("deadbeef", 3), + ops.model.RelationNotFoundError, + [['network-get', 'deadbeef', '-r', '3', '--format=json']], + )] + for do_fake, run, exception, calls in test_cases: + do_fake() + with self.assertRaises(exception): + run() + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-get', f'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_get() + calls = [['action-get', '--format=json']] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_set_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-set', f'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_set({'foo': 'bar', 'dead': 'beef cafe'}) + calls = [["action-set", "foo=bar", "dead=beef cafe"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_log_error(self): + fake_script(self, 'action-get', '') + fake_script(self, 'action-log', f'echo fooerror >&2 ; exit 1') + with self.assertRaises(ops.model.ModelError): + self.backend.action_log('log-message') + calls = [["action-log", "log-message"]] + self.assertEqual(fake_script_calls(self, clear=True), calls) + + def test_action_get(self): + fake_script(self, 'action-get', """echo '{"foo-name": "bar", "silent": false}'""") + params = self.backend.action_get() + self.assertEqual(params['foo-name'], 'bar') + self.assertEqual(params['silent'], False) + self.assertEqual(fake_script_calls(self), [['action-get', '--format=json']]) + + def test_action_set(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-set', 'exit 0') + self.backend.action_set({'x': 'dead beef', 'y': 1}) + self.assertEqual(fake_script_calls(self), [['action-set', 'x=dead beef', 'y=1']]) + + def test_action_fail(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-fail', 'exit 0') + self.backend.action_fail('error 42') + self.assertEqual(fake_script_calls(self), [['action-fail', 'error 42']]) + + def test_action_log(self): + fake_script(self, 'action-get', 'exit 1') + fake_script(self, 'action-log', 'exit 0') + self.backend.action_log('progress: 42%') + self.assertEqual(fake_script_calls(self), [['action-log', 'progress: 42%']]) + + +if __name__ == "__main__": + unittest.main() diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/requirements.txt b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/requirements.txt new file mode 100644 index 00000000..8608c1b0 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/requirements.txt @@ -0,0 +1 @@ +paramiko diff --git a/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/src/charm.py b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/src/charm.py new file mode 100755 index 00000000..eb61f692 --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/charms/charm-simple-k8s/src/charm.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 + +import sys + +sys.path.append("lib") + +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess + + +def install_dependencies(): + # Make sure Python3 + PIP are available + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # This is needed when running as a k8s charm, as the ubuntu:latest + # image doesn't include either package. + + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the build dependencies for our requirements (paramiko) + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + REQUIREMENTS_TXT = "{}/requirements.txt".format(os.environ["JUJU_CHARM_DIR"]) + if os.path.exists(REQUIREMENTS_TXT): + subprocess.check_call( + ["apt-get", "install", "-y", "python3-paramiko", "openssh-client"], + ) + + +try: + from charms.osm.sshproxy import SSHProxy +except Exception as ex: + install_dependencies() + from charms.osm.sshproxy import SSHProxy + + +class SimpleProxyCharm(CharmBase): + state = StoredState() + + def __init__(self, *args): + super().__init__(*args) + + # An example of setting charm state + # that's persistent across events + self.state.set_default(is_started=False) + + if not self.state.is_started: + self.state.is_started = True + + # Register all of the events we want to observe + for event in ( + # Charm events + self.on.config_changed, + self.on.start, + self.on.upgrade_charm, + # Charm actions (primitives) + self.on.touch_action, + # OSM actions (primitives) + self.on.start_action, + self.on.stop_action, + self.on.restart_action, + self.on.reboot_action, + self.on.upgrade_action, + # SSH Proxy actions (primitives) + self.on.generate_ssh_key_action, + self.on.get_ssh_public_key_action, + self.on.run_action, + self.on.verify_ssh_credentials_action, + ): + self.framework.observe(event, self) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_config_changed(self, event): + """Handle changes in configuration""" + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + + verified = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + + def on_start(self, event): + """Called when the charm is being started""" + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + + print("Generating SSH Keys") + SSHProxy.generate_ssh_key() + + unit.status = ActiveStatus() + + def on_touch_action(self, event): + """Touch a file.""" + try: + filename = event.params["filename"] + proxy = self.get_ssh_proxy() + + stdout, stderr = proxy.run("touch {}".format(filename)) + event.set_results({"output": stdout}) + except Exception as ex: + event.fail(ex) + + def on_upgrade_charm(self, event): + """Upgrade the charm.""" + unit = self.model.unit + + # Mark the unit as under Maintenance. + unit.status = MaintenanceStatus("Upgrading charm") + + self.on_install(event) + + # When maintenance is done, return to an Active state + unit.status = ActiveStatus() + + ############### + # OSM methods # + ############### + def on_start_action(self, event): + """Start the VNF service on the VM.""" + pass + + def on_stop_action(self, event): + """Stop the VNF service on the VM.""" + pass + + def on_restart_action(self, event): + """Restart the VNF service on the VM.""" + pass + + def on_reboot_action(self, event): + """Reboot the VM.""" + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run("sudo reboot") + + if len(stderr): + event.fail(stderr) + + def on_upgrade_action(self, event): + """Upgrade the VNF service on the VM.""" + pass + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + + pubkey = SSHProxy.get_ssh_public_key() + + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + + cmd = event.params["command"] + + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + + event.set_results({"output": stdout}) + + if len(stderr): + event.fail(stderr) + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + + proxy = self.get_ssh_proxy() + + verified, stderr = proxy.verify_credentials() + if verified: + print("Verified!") + event.set_results({"verified": True}) + else: + print("Verification failed!") + event.set_results({"verified": False}) + event.fail(stderr) + + +if __name__ == "__main__": + main(SimpleProxyCharm) diff --git a/hackfest_k8sproxycharm_vnf/cloud_init/cloud-config.txt b/hackfest_k8sproxycharm_vnf/cloud_init/cloud-config.txt new file mode 100755 index 00000000..36c8d1bf --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/cloud_init/cloud-config.txt @@ -0,0 +1,12 @@ +#cloud-config +password: osm4u +chpasswd: { expire: False } +ssh_pwauth: True + +write_files: +- content: | + # My new helloworld file + + owner: root:root + permissions: '0644' + path: /root/helloworld.txt diff --git a/hackfest_k8sproxycharm_vnf/hackfest_k8sproxycharm_vnfd.yaml b/hackfest_k8sproxycharm_vnf/hackfest_k8sproxycharm_vnfd.yaml new file mode 100644 index 00000000..831a404e --- /dev/null +++ b/hackfest_k8sproxycharm_vnf/hackfest_k8sproxycharm_vnfd.yaml @@ -0,0 +1,69 @@ +vnfd:vnfd-catalog: + vnfd: + - id: hackfest_k8sproxycharm-vnf + name: hackfest_k8sproxycharm-vnf + short-name: hackfest_k8sproxycharm-vnf + version: '1.0' + description: A VNF consisting of 1 VDU connected to two external VL, and one for data and another one for management + logo: osm.png + connection-point: + - id: vnf-mgmt + name: vnf-mgmt + short-name: vnf-mgmt + type: VPORT + - id: vnf-data + name: vnf-data + short-name: vnf-data + type: VPORT + mgmt-interface: + cp: vnf-mgmt + vdu: + - id: mgmtVM + name: mgmtVM + image: "bionic" + count: '1' + vm-flavor: + vcpu-count: '1' + memory-mb: '1024' + storage-gb: '10' + interface: + - name: mgmtVM-eth0 + position: '1' + type: EXTERNAL + virtual-interface: + type: VIRTIO + external-connection-point-ref: vnf-mgmt + - name: dataVM-xe0 + position: '2' + type: EXTERNAL + virtual-interface: + type: VIRTIO + external-connection-point-ref: vnf-data + cloud-init-file: cloud-config.txt + vnf-configuration: + juju: + charm: charm-simple-k8s + proxy: True + cloud: k8s + initial-config-primitive: + - seq: '1' + name: config + parameter: + - name: ssh-hostname + value: + - name: ssh-username + value: ubuntu + - name: ssh-password + value: osm4u + - seq: '2' + name: touch + parameter: + - name: filename + data-type: STRING + value: '/home/ubuntu/first-touch' + config-primitive: + - name: touch + parameter: + - name: filename + data-type: STRING + default-value: '/home/ubuntu/touched' diff --git a/hackfest_k8sproxycharm_vnf/icons/osm.png b/hackfest_k8sproxycharm_vnf/icons/osm.png new file mode 100644 index 0000000000000000000000000000000000000000..62012d2a2b491bdcd536d62c3c3c863c0d8c1b33 GIT binary patch literal 55888 zcmeHwcVN_2w*Q%4(|d0OLLi+Mnsh-yL{Y>ZASk;E3aIGrzPH8Qr`y-xuB*GQy6#gH z8?K0`bg9xyNCE*8l8|1f_uuDyCle;aWC)1v`-2OU`L= z(5#u$?!fmFe3mNZ_&Xtc#6$Q}Ju!3PDx%sQo4@$EA1*Fll77=f_4qYo(ZJ)St4mVXJpS17Rb$soNEg~2i(|f=m!2v- zvigS;(kF9=srSseJ$34em8Gd8b4TSYE+{BS9bKGTFmg;$;mF~sh4}>|^YV-H3i5OE z3&s}Yk1fnk?J4OK)i@fza>>%McTAhn(;dzxq(8cP^%Gj zxHvDrFt4yM2T$azdUpBhk~KNYS7o>x@w79obk*XO%br-hY{l|aZntF7il<7WdY9;;EI72}4`5IIr}v(#K1euU>_E z1%B&2@#u=xD^@+aqMt5%Z};0Va5863Z{L2$`}pI2yI!?=`qKbG4@mviTs8mMCrb0~ zC|$MUsg;XMr$3EX$?*5~tCuakgq%Jt@tuChUb<#kKh^k-yP7Z++!z{&aO`C`dxyzO<8CzN~VrgN?s1Z3!M~_;RGopCW(wvfl zkp(#gqn9oz88Le4=rK!22|%1P$GiGvO;1~~_$j6rH0@?BOI9pK^OuHIxVU&}LDAy; zoDroZi*gDIMi=K4FBwsoQ&Ln?Jf^s4)Y7G;BRm~>HFjA?b5||{=PP;4>luERCFrbZ zWXY(dW5$%`j6hF0BStLA&nX_cv?ynE(WsGyg-e$%9yMx-rwc#LT-MdhWveh>pY@u@ zn9TxjyR-DkzE?dr9$O~xs3%HRt}0~;oRIDz!Ct1;%d@zSK*?iE7BhF7xR?o~6hrKD z_0q6Vs{Wr2|#nRQ!l&mbB{0L^qz-Zsy zZr_T&9gNRodGOe>?*J96N}m39h!_}MT`MB|@TmVJA}-D3#gCROf24HD#5~U=_FNbk z?%WvdOI;p{y__1yvxpiCO2(`z5h$o&eBNc(F7@nw{vM{t$DR_NFCLYT0!0R;r>ggD z->M4(SM|Q_TlG@SyOu9oJ+ZK-H=oO1wOEe2S^S+)`AR2_8ac9PcS23CJ3Tes$nsLp1UhtIh4KfzGp|?iyo^M9 zdD-mKmwoM09Ro6 z_}sk#E+2pdKD`01!0_?8djniP0114016+aO<8${0xO@N-`1A(20>j7W?hSDH03`6~ z4R8g9kI&s3;PL@T;L{u63Jf2gyEnk)1CYR{H^3DbK0bGEfXfFUflqIMD=>U~?%n{G z4?qH+-T+r%`1stt0WKea1U|h1uE6l|xqAa#J^%@PdIMa6;p21n2Dp3x68Q86xB|n+ z=k5(~`2ZyF=?!oNhL6wP8{qN*NZ`{O;0g>MpSw4}=-Q@&>x-MnBSef-f!)L33dcC(R! zV-slN%$bxC7D9E^)zonQ+@m&|t!3hrsUL3Lx1Wr%+6N>CoXrl$@MQ?QLzOLK`xvl)AdRXz`*&!gCRLSEtiCASg=*^wjtCkVGsciC97+ zhn;LDgUa099o^xgY@N|K+TPxoX-v-w+huZw$Y$K8ikm)13>=bJM6UBPnXO*m)lpy9 zD89I3$25gn-5HmW+3YYHj$sybivS>zSo}S`#kcF`KaLO)AS#wfh|DJHs=tuh)ZQ_& zTdBR(8Ww$%>ek{|XHuF{idn4C1W`y(5IKQ_NGXuk178=;otViYwTOZ@kD%sLWuzRQ z8+vxh>h0#E2ii>s_LM8znm=^ap4%b{3oVz3MGmIs|EuLch7g{yVtnkK9onwOhOv63 z`XNfpxKlfSu_7cfg(NB^5uoj|I{!QR<5p>xNVuJikMZdRPD{Hxrc+aXaZ+ofQOGJNFt{9h^GD?%`t ziDxJCSWA0rSYz#lhg*w^9}z9Qc}QfzC?G*WE~_0#u#pqIdtgOGjF9HF|D=#H6G)yA zNfK=^na)&@dT0*0Y*vygRTOpm-6F^AJExn^RZZ{O`BklJ*OoWzr^{+E~0rh%gh^)B`J-5z}}2oX!9BnM?2 z*}rG5HFMaD^usknRG7VDyIr7!Yn_K;g@TNgr^sA$p5jX%Cnq#07WDFjBr?5QNw$_I zk_3kcEYAZ*rWpRnG#ZM3^hs)dCzwJe%}zB`SW_igcavG-{Na9cW1FaT{|SwudS|fI zzK$H0N-#zH_aS?}Q-tu0l*<*=U00KI)M)&Te$K)Lq2q3H%K?*-2BrvlK-u@3*_0hB zZQI866m#DqazTytuo$sINg>4(fvB?-R5)6ozP`1ABfXI#Z@q(}Zo3P7iooJdQPbK_ zK&6>PgJ(uKQg52np>Lbi-ckLWvuev3(lxv#>u&o*CUTUEB|RF2Z==`Y{JU!zYLEm} zQG5L8jHZaVJbgLARs%`F9<@bxOD^WET67}*-a`i(C2Kod8nYBq7Bb8Wu1~qn2ywwGh7o2o zD7J3h@~`%J5B@y-mboGqtaE{-T!%4>!2-=^D0IvJkV4Ch5M})9*9=A&HTqWES@9GO{&)O7`|LnB zb(fGbdzPvu_o=7OX-DpDZfY1K5n()6`(8eS5VPiRJwEKi>8h&@`_p?LY>+H_`j+7I z3}{e>zTE+MB2bW6E))6`D;1Z3#rh??O~CH>!^fVN6G4ay&w|~xefANB&%8}wN#1g! zZ%52&9HdB26{y78R1eD@?OjD-HYkryrsSuWf=a%i_RTL*=k`D2ojNf#=t69$0x_!4 zXp&5Py6|w+f}M4pwx=X^6Z*P#7ADn0uzlO2R4QrH*Si&KzxeW9N6GS=wBh04$L3yu zdfFF(-B>Qx(%Gk%k+r^-l793n3Ys^UYzCO#5TY&!dX(F!K2(_$t?ViHaosr`$W zg{G~|RTMq{ckmXt`*%5Puv6@$%$g=^ip==6`RFSnb6j1enB-l)V;RV~w2J}vEs7LY zcUwaP{rju0UNX;rXhv8>r0^nrngl<0;p5a0o&U*isB_yF!tKVNK23%DPEcI8g~B>@ zB-QC*BS2eXNgMG>A%-D7`r*Vj9L9b2HRxf^V)vA5y? z52R*0Nbed8vpS2@Mz8+u@BiIA<+i0!u(}(IO?1^ zovee0P!O~&sSVaRG^IWTGDa2?3k1W?IDZCmA-!MZ^{IywH|vyA>-tY9XhbntK+#TP z7pc+;!IC&w;9KB!wwnbxYMJ!hgo=~@yR)j|)a;_s#iuWS*0=7ZbJ31w@Wg8@8d$rZ zez0!lKP}RYiFd$Pq=U?0Z9}6XlR-}eXJ6#n_h{mg8oHGtDPF2Yd?18gxVW3DOl|bb z)aeu|Rg#_gae$%_NoYfRC2i|EM~}pgq&!(TwSt$Qa~kM`-}|*ZqqW@rk6)$|BR*vlNLwF23>) zGF3&xd6^3Y_)LJF##mliT@sQVey!?%?ns}KQ#88PFhIAE=r`XS_cx|1UXwyFcU-6m zfB(?&eIbvo%oITuiQpzTnGNLpY7^ytxQXVT>7-&sECNy?WW@}J%MdOU2~`?fFklB2 zXyZxmunx#qq!z<7ix)M4A}x?JkT7UVOj59-MtvJ?pceXg@^ET+U?KVorLG-YNHaVS zz#K+)1dCcee1~H1UvfqDTcS~u?(hNX+PQ^7CryXpSwuB2{0hjNBRDG^-K|vpi@%UE zaXyK7-`LC8mm7496pP(O%0quWT2iQ-dE5MZnk)m5*HSeAd$omGmD6UU&pzMuTg9@c zGua}y1>;4&*hnMZ`iSnW=%V7#R1!zVk(nH%XA0<{TR7AOpz^5=fE}nlVgw@4sW4Qo z13Dlx8*NaYLPMiysVjzV-`Y%X9eAEz>^wq-Gv%=0V<_j79k3gQfZgcHP=10m!}0}R zmhU?YK`h9v=6Bx$b#zk9{iP%ek0A5;29m=QpdOMfP?t0$k|O4eqLvS~k~VK1+VZbl zutQS;)!2}~H2(M}it{t}6y)x|A&3@&`FFo(Lo*-dh=El)&@ zWn|c-nWRWC!A7YCi|${YW~8V=MJ+=fnEcDv{{1J5zEjX1ef#%5mx@{V4zRF(&D3CM zX%TNYTl4da5t#%Hps00H8@s_gCb(7JpP_X{{HsI zNAA3J>Z@kNeEKU=NBO{H2*0kGeMCcs)2^>~E-_D?HCpkvzta!?vYzgX$RS&#jC27I z;TeF?4iIuZixiUzVI}%!(_ukD*f{(lYPA_CLaIT$qlKP&WI0v5^lMUNWRR`3iB!-A zofeD%fNc5b9kRAGQ~Z+U#6B_)GT8NEtSA$-GFfC~?{ywGdjT}r`6Si8PuAudl1Ih| zMv4uI9ccyh$)U>URiCck9GjSS)(V#IuSjN1zKQJ6h_3V@7)kY~E9l_fz3QW((XU7E zJskCmclOaOu{mUfgUxXTP07L9wcLqheRe@thx)2M6Lc9Lp>K_E_Qdgpbpo)j2*_X$IAc>zfbV^?=z zH)KvONy8$5yuO;C2+D~pHk`Wm9tV{m+2=}=jPFMdQ8%61oiIKtYF%hV46&BV*(=^3 zX**JG22vDoch@#FPW#`t-h8cK$mo>6it;Ei4lZt{imQOBhh>3VTun8)9)`%fAOdU-Ium~=&N`!CL=A;Pe{>rANLqGm0?QTCy_5be$ihcM8kQI-UDmDF* z?IhH|AE@p&P>rGNxS%R~Bg*rL6E+VUonplX!I%}vmG}-|WMcv-;OnbToO|cdGg}tj zIeXaaCd=hiA~qn`XdoY>-hlk1hCH%4QY=;QwlY=VrDd2YS5sJSSRm|KZN1jQNL2$F zn>)|aM%X70#N<`>HCBl}XN3 zZ!TCCGm`F)E2M+^i?juX+O*IN%2Y*?86-Inw#D<7Z3$*w*Fqy)j}h`t(^0{eSr#{1 zm=xT9e}yN3OeCe-B8CdWUXEvFg9bxB_^;tYUYPN883gy;m%D*_s<545Yww`?mwycx z{~-j!-a|V5RjI>Fy5t#|Z*Tm|qH#HCzh_&;yCB8Re>}nS5M$BUZK5wf z-SCG}8gyS44P zKm}Yp&#?;KYB$ik&Btj7C}Kv~AlM?%>ewE+Lg7j9Slk#|5Sc>~$ci7mzln~I8B5{8 z@V$A-4tALc>M_`sEFAax^Yy_4?SQ>vI(J6snSES_vP~2^;So@k8q@$Q-p{yPm<+1$ z1Uh^s=o-dBd0ABb_eU+PX7BE< zFUO-8YH;V)O)w^N1cR{Oz#86PiUbOKYcnY10JN@IaI*}D7yB65J8Ge^g&>9!4)}o+ z_$WLutH~CWqK?v?tj!sev`-4Q$Xtj&l2!LE794v1Vj>*l*m`j9c*&*{@6C-FCgIsw zW@w@eXfr&+S%%oq*n%{0ETvSSJUXAL;>jD0P`F( z(Ee1vhgP23NR?>w4E$c7w3Sn}xr6>IWh!MTBLrK9nV09ojsASGhXZKTe~LefMM+$;}*s*)g=kQdxg zMwaStV3aC=m@s^+dJuytgnAM+a*->KCP!M%|E)`Ja&~ld5PzgK2T%I#R|?W?G3k`$ z>%V+yMO2~OgqL8WkY9!^V>xCwS{CXuJ76%(jmV}ZxS_x7I72%+&(nhF97>eKj|D_B zMR8%uVIVTnJoSYe_`_{56_`M2Z#5n9T<-losH>-HLmQcuHkh9XxAr79@L1)jCq|yB z2p=_oz%}4S#$Kb`JhkGS>^mvj4ge@FW~0fip-Dqxqk3Ps_W&d2?85c$e6{9INpb-prH`#qHoq7dRvc@X5_>DS zk6C_%K-0J{I^R93Tgxd%uBDqo(u8j#R8nR=>?o9=Ew0bnmI4CO1w?l`jG(eEItP{V zA`Cmf?aUPV%~i}c42QP|X3m%H+P z2h(%TERF3Hd2}d(R&cOFaq_6Yvalp1@TJ4;KN4=1(=bxkkt8G*&kjb!Gm~UsafkMN za8GPR>*mDNL4X0+paGFO-=c)gvA3#hY+AkT*(^8+^*zb7%x-w5Gev}p@S9mZv);SD zaNlKvED57W<44gU%<6Yrj#G*vlx_(h454o)1KMM@#7wWw+C_(8OLS5wHB$uH#K|-~ zavTk9xBWgEBMsEiPU1(Tgv<=xl0iq{!3+O2{5WHC@frBC=3$WDo@x4gwqP zrjDpcI$B*L+9Ee=V31z1If{* zJRup$6L+|MR9?ci&p^CS00UNkF4`wze8FRqsdS z^aQ-cA#~>SncK=lnt4-Z-14!oM8Y@ei+OQ30xhS%*!}d%;5@kj!1okpHB9m*bi!mG zs4#0|5*P_tkp}_J_QLCphv=iWGTPf+3)ZHiF_EL_yeo=S@*>I)8%OzyNaEOsQlz57 zl+o3v9N*Z@UDiR|4G}UaE;m19c1r^7PcP?Iki+ zeI2!{BjQ)0KmYktP>5aX^l@}45yw7q=FHS_wurgvV5L9}oxUZVLSg*dZANSooE0JVxW^mY3tq<7M?c|-5qWipy>QLEL+ z8ddB-rh0>3uNM~E45V+*GfvEOhYjK$h*LVn6dDpy*AgG|YLiC!>X(O(Y|uHaw|buC z@2*{2Nq_ho3Yjp4Ts}Ep-p_fC1x-rCDXPqEg7NWY=ANhA-!4pW7bX!Kks;%jkg@dw z#mJhE4j-0n=lLX-Yt_imbAOaUTGEN^qVli5`R{STDSeRPX_keqp!W|99|Gdkm_cQx zMmlaokV&Eyno1AKqD*ZD5TSz*M^H;p8f4U@Z=mDJU;TCF+!>odj{3a3ymduIMeoAj z(_r_?PV|iDYQ8-airZ0v&5+Sby0(rGe{I-1+dUq4f46bGUR(hwSo+@b&&CPh6rX)4dSzS5hhZjR@lcKFleMp>A+D;#xj2jvLT+78a zEHP6GOW;5qpq{?YJ+EO6_>kM+w&LUC2`o&|`*Cg4`t|F_dp9D{Xa%>4lM^lmSVTym z6GFP+A_FUUo)sl%lx+@eUUN%TusS%bn-d{>3#+GfM=O4?Kr>WeOT9OG4p>79f-q)H z%e$WUa|%!p7xvPewY1T@dJ83nK0ph?#)1kg6eSO*!NIv?feXPK848h-PU()2e2U>G z6Gl$$w1Ji8=H^lhCJax6>rogX>|xEx$r0EdFV+bU4}YQ*qF}wQ+rTy&MMXss%Z%%V5YJFqoAIzeeA{Qtm{FRMk?~gf(IZhYx7=RhTyX!b zoQQoj#R&l*0{6gRucn%yg}H(dB6h_jrz9nQBOROvD;0&!_S2t#cIOxhi|o%K!GPiT zgVm2ZsDf?r@Ah&ksr*owDYFvppr?kvO6-j?!@dYqfFU?GA)ee`#a-w6nEg@uLfOhZahs>p6SP*HxXcJNRk5Y=Bj@9WGt$O+4e zeOTZgS5ug6NrIxtW(~tsCIi*NctL>PPqDt!5S;x3%DiHf|AhH$zSzD#R z&UirW)^5Q7G+hB1OdVG6(F5u)-j56kJ$mjeY`&htz+!%Xy(z@gK7VuDoDC8k9ZiD< z!8kJ-wjVln;DReVCz-C0kkH!>6cU#WdZ%1-^%R2sWvXC0Qh6cd{K3;CEkAV%J|Gdb zn)Q*%9nNC4Rta`>$@b{&ln0N0ObPtFFJVe?ZD>$DQELSrv{Fv!r~zplaxti*Ym3v8 z(fNCqRdbn2+X#Duvm>upVed7wXHL#zzRzkt3wQ|j=*gpld4O*RVI z&B5tA-bD?n*`})@t7a;FH1P#G(YYVbKrbJ_%Q)a^Js)-CqaoRYkII~v2Y9#*){IzX z6)af>8ji2Fa62r}8=ITyvrU`&jEyIl6&?$d39}UlOK@4=t!k*dLy-oG@p8@k>yzUm zb}U&i*SC;5n`KDg@PofLq_FPm8=6DDX{}dEhvp84ks_gs4GrV76cPOjY@s3GiVZ>` zN2;e~1ytKAODlbD4yQX)ABJ;KLK(0z)}DWvD!Y&2I9ouDzkw*kD%#(^l_XQluNMv< zkFdReLCUjQ4tEZ-7HC5hg!SrITR40giX?C&(s--^SP^Stu^RQx>Wj6VG$<=n@B#AJG&f14XU?A!lG)W+KuCWBWJRtf?Bv%;=?R}VH#PT}wZ3&& zxHDzc*Voh3sZ(haW;s(#pu$wa0OseSV`GFzIiVm>;XdptWs~dhz6-8tGeQBJehUY) zI8Un+TZBjvIc37VGK0IkIUc7|B&yR$g@5bEU^JhLYE zx)lx^aOlLHy?N^1S=6qUshtUEEaFc@pdWpBQZ)#lBbXr&QcY#WnvIO<8 zr%~*mKf(k(ZjiC(SHY&_zzPH7rkidOfZZRJFd~Kx8%Co>jS_Zi7$`DWfoN@Q z4L|wm$Hk4Wy+Ro;{Dqz?D)}m z7cEIc;^E~5HDaA3yHhw$FCd9&$Olop8tdet!)@Ki>P^y%9c^^>;)TQ_TLiBKxB|_= zUysHuXxs$jZXN7{$w><#BQCcyoY2R^9Q%+VTZJ>?rQuOX=FX?~P3w_$vH|&Bxyb*WOpfj*At)tQAcYkUM5hUv zl5*@26g3FcgEUu&1-4g&B@F`W2Q(me!d0XTmIWd7WJYYlA&L%{B5Uso9FUm^r$Lg?Y2z^?44DI`nrcCaRdY%>39wg*W4 zeY3&HGn%)<;KJ;S-66ak5(Ab)&{tcyEg*R}sL+D;6hR}$QuICdLs+vV&i*H~2*mJ= z=Oqu?>|y|WKJvTrNTP`#R|2y$j0N|3m6b!+gmlyfd}ESyHDXn89hPEu3CNKnuT2aD zUHt3?8j%x{aqHaqkFkv3;0GFSBYxH*7F zs)@jgIW35(Lny$e%Z_DxKn^3uX)z%GYb!a5YXUAvE2qv8ZNc8wfD=82@co~9@4r=o}n0g=LNDmQElozb15I>R}NSEW!VoQ`KT zM>nQ~#9J(QSq8$j76TI9*uLQ6QEB0qy4L1;>kUpy+*8+OJtv8Xh~#}ze2Z8vrIxkp zpj}M>km0BEvCZ7iV@fh2`ydku-XU=a`zC?nz-=Mjpo{W`MhOqE(nYQ#6>Vq1Ueqo+M~xyQ=1 z^hDBl`ssx|G%;!#O^BWa`Qav0&vX91*=oC;a?rD(9J@slP~Gs3h=S{=@gW&W^IOwl_f7fN0!3KlvB1=N=!i#(Kdv}1%z!j6nu9eH# znfLR7j#yA)l2jVn}l&HFsDe1g!*b+Y=2Zi7#k&72;*Y+ zP6t-m4zRFI!YZ)80>`0>?TJACC4co;b}$laAU9am<-!w*ljcIgA}J;|fm~u_$e6L) z0{l&Emv9itQ&^GkHdH;=J>Me4-HlPP5StcZS5u}GiAG6{e|r<5{$ZpD2?urfeh$ruY4tD`3 z#j*SdMKr7d226qm%Yk9B479jaUnNbCn@6X*4)>{sSjUN^q@=I=iwZIYFd%)AB>)pV zec!%)LPU}64)4M<+Kw5nkYNWRu#}XnOp1#NMY<8}pdMMk-;7j$8XjMh0lO7?bH{P? z56%K)sHyei_o(}b6^lJb0~uGAo!(>eJ?8u^J2pf}Y*CUZRDl?i9MN2vOnU6tu@92c zQjEo2+P8lgQKUX409>4XUrjJ%hxckuw6*%2dU(xgekZ4WRRknCjKzJ0r2fV$hl zRA9tlrgwLn;kPGO zW6Kt5{p1t{jePhjwW{8x`y4p&4vNZSlqnyHBq(H{U_F0nK>Z3u|cv3_)w>)CdP~V8$mf=byU%>kcG~RHIL|iOe2u6FoY8o;IA4;;S?XaSV7O{zXq*H zODf42^k7k#O?BAg82PtOOhK>00B>(^r`KP9U2r0@--?mpK?oy^0qsGYSJlf8G1FO& zSU497z?MQLup+=z*8~>}7jO3q-)m}WqEE`o7FA!UU1P_l(Apg2i%!XZ4Ic1;61+>WIVy8GIJ&lGA&LUm3>clztr#ws4m`a!; zSimP>X~BzW(Yr&EJ5SGjOrPS#|}Z()|XkZ6SCAUl_9zhuB2JWcOsQ z{ms7Dj+Y-)vA|`ko8<`ur2!k-c+XsS8`tkIBZhrl>}p}pFuPk~V`B-)4q|A3%YAz? zR&EoPt%1x0l4Iu}Z*0cP4A>tfI1F*j4n?-WJMA=wFhEH;cx8 zEn+B7ow_E+@rHwDskSE^nv|MS-d-sEhqqy4dzK~NW#RTau_t4@-bS4U6Qw4`Lv1E&d|d1f$gm!dpnC-DBn&Em@ysrD zc6k6n>>PAr^_UahTXwVa`VijWOh`qCymC_uHo?L3%oX%UKfwyC6GDUgTHd0p(3^3L zsS0z>BxyE|nV8=u^=LQ}2-m~%uHCnE8AfWcfZp2gkvBjK}RfzL~1*k$6T4WWv_zghOMvFIr_Bv>bg4JGHfyMOx0J6jzK z7Tx+hQwD209wF{|_Ph9IYu*OdW5Aj~WpD(QmmME%8Mbhu(zcb%XSNGeBf~yeQFpwE zS+5an4-wcd2c9Gxh+^R+urap^EkFmrXKDip;Xh0Sz=_$L1B;D}6`O_Eh*D+FyC(?^b`={zL(337VI1o&PZS?C4Yt4M`yF-a8yaHwkf$ z@d!X=BN(--`3oxRIz&a`6A&pJD!AJ{bvSb7!~hssmrw$|#bu(TkbJtZP4tWRMPE?z zu#o3HF%GY;uInLBGR|gUC?C@|#K!6P4S!AVq z8G;OK3KvGIa7qQJ%e|!SQY&^KB_%AMYPLClv9tPpCFI|affS@lNiX{~10E}X zuC^$#^ATs5a|Q}Z1ZMMutx$B`8da z4TzZ)NkH~YuHMDwGEiz*At~!M&wZ|Ya%^i`Ya-HqK7h^e96!hP7!gc8SEGS(9iCJh zzS>D$SefMj`-PSN(__buKgP`lHH)m_5ByW1fDPgjk}ZI2^wr-6WjvMv7ld!k4aX>Z zOF88QKZyOouoti}8g~m4Owu-66*ZgBl#3gqS-?YN_k1K+c@#7(?Sl=*a|Y-_y?ru| z3xSrPceHsm)qDH01C|}^yBZZSNuU^JYj2$UJ=N&X&_!c8ZL0qTC`u>r(7u(C$pNG~ zMK)1wjyXkMz-xfKvfN0wEMs@ z$cnPao~QA{BMAyAX0h8hb@i0jU#}t=mQ%o}PTsVgCTQ*>hX7I^#)h5eEXwBkq?EXC z!b2lTnvjy%r@ff8#PeStKlj(R^WCe}@ZT_-Von)}JrS)|WVYkqKnt@mDU8R-F>{2} z(mz%Fgtj((B2>3Iy6LX8KjWC&yEI5xD3Dd*P#|Kc%X$&@6|SSl4{73C67iEloyhf# zqdj}}(B#QeKrLQ;^$LUHrF&|n3?XG_lq@!TO=#*Jw{`A+8}e9!o#TGsB_FQP&;W09 zVuKk=#1pXmZnEeua$c9b}hMw zhK7Q>2}lXWCIMj*=;Gj~0|x_BJ}hCdv1H94#8P5=yVBXP?gPN*`FIH(Ko8_gs62sY zBs@!1-8-Nh+^cBXb@b!LL3#U2L zwK=O41@j_;m%R!o?SSY0Z_(KMdlWj7+tfo6Q zO18fGJj*sLSUq$4wC5dqzxhQSS+T!cckR?M zYY+;sx*gEC>WwwDF!cxc9l4*<%V+*TN*T74?NR%D+p!|gi|rK!)+1Ut?t=Hs4wAlSF>*zvE-!9#w9q%)_8{c(C@W<$4Ti9Y4F z1#PJlQk2ny;E}NO2yOJp?u|aQ={t%6oO@U%D?extfeNzk`4^2i=AuGv2|2Jnjn(}A ziUsRI96EP3~k*zw)MaEe_g$9L}ZHah!lLkMqfdRswu!` z;CKM%V6i{!z3Nve3<$hCWvRf9jCLdCh33*X*bhXj#?msMY;i_Jr?nBPaul4GDj1h; zb<^s??|o>JDb)JI=_=!(J?q~6pPIJz=21K_4pc#|`~oYYl~D<4`)loyt7*tBqbMp% zsR{*~#CEP6>GNid{@!`u*P!jeViz`;L8~IH?=WK%9?j;DXhv%hC5DU=R9j}({S`sG zvQ(2Ixp}=tg*_^Nm()W40q-U1pKIZ;@Y<6(1g$OFDu{|+Unb1!R0B9eozo&t5aQ+vE0VCSS1 z7H^_zM2xQ55aWH1kB^pBJ{OVyv`ATy3%3g>K%fYg4cE7bu-7pAwOZD%5hx=v_f8so zo@kV49;w6P;nfa2S%uMHfoq5QXzkFnh>vQjFZ8PM($v({en-o$Z{>r^6@UBV#xW~o zBa+gQr5LPhgp1z&Yia%}vNeCI>xhu~J~`$#O4km7ZE`6QfLUFvGKEsH`AV0y3EC7w zp#nnTynKYomxP`5|?k0e9*UDPyl;z8NrzGY@3MG z!PIr+J#3dzB|I9c-%GOw{fkW40@R&W%c@HMg}HzZ@ep-E-77V)?$tRx_ z)?084!uP|j=|O4f`lF(b6K#9T78Zx4Np^RYkwcL~k#hWcr57Ms1yv%}zOQu~7VLaO zwQbkujVK!IJE$^<|sYl<$jpc5=W~;({BT?E)c&0cRqAvn6gg{%^rkt4MN(q*+Vj zST}!M`~iosTRN%fe@n@Hb`Q1~-$^Mo7Mk8zL?g9xDGZc8BxEv;k9ky>JdLJPbY8bd z7>@>&FNX{iEPmYe6rPN&x>}06<=mQSvnFp=tFRd;eqPq|fgR!&CY}oZzIg-8v9qVY z`s>p#D}LOa`lmMC7U~F_gy^4Ch#GcSTY-#s;pNSN3x0~=*Yd~%-753B}5iyK* zx9)|tGeaPCCglzb2CE(hWO%6N@)qo1nPKuIdNA`XYKKgNf=iLgbm2jjEP|q4!zr%~ zF8t;?sz0!ws^u5RiXW@A$HXIWjH#muBg4LTsC)MtLLF96G)42U5*E8RSn&HF2x=?M z&=8s$NKscyU7ZFJBrV{YB^pWbVOV})@1hZr3sD~|6!n?Xd1`T_iWhljqqA1Z-Kt<5(tW{n3B{l;YM98bM9i|-@zwk6h z(w!euY6TJvu>WC@ID*0@aTF@ThWp}p(kgDHWATZ!rQ!FK9XgXT!=?fmo#+w~Rj*Vi z_WQBD5iCd4QISMx+A-7siaFG}mZDXI1>`8jp;V>YV2_`zTceSO82u?!Qf51xcwO zbRHdQLk0}SJ3Hn_NOc_tHNnk-SzSlVm>?W#79$qaP7Bjtr8rFvR?Kz_$nfpYb1wMe zptx`lE?El|Sd&j}mh)86xrO#O{gX`MR>SOzv#U35{JdAQ^3>&T(U2jSVFM_P6h@x# z?Q*8q)m|vW&-^M&2d@#lyTQ^>icny4V7S`ZGG{x4lOUMiy8}{qNxUErn9;;DjP-O4 zV)E})zlNpQ!vS0)b=w-jm!}HCeE`D>_y`MG_`;xA=mC%~LA~7+E>9NZ5BtkHtq|%M zr&_|^2%Z3awId$4&1I%j9h;~iV*VAVw*R59J!OV${o~1-$fYnF=FPoh4C47G{dugV z_uhMN|1J64VRwb#%67RLUwh-VExKCMxWdqJp+=~oddwUh_^J#5V4!!yRd2u`*wY&% z3#BMUEQP{A43c7VNB}_&M6oM{+q)%U9<4h3C{^n!C@*3z4ThlnHid!Atj}m~r_)_q zF|H;GSENu@$PDxuBp}-i|eJ% zSh`>lHfpGR;DHA!Il;m&La-^=mD=lbp8*I;G1k@B&DN+jxt86gz0+cDSDOUe0Iw(T zQjV8n*&CdSj1-nD?o?7248~T>%r?PrpA!RW-9K67MB@3v0v8cTgUfY zhMo6LckFb<%&T5DZo>3`f41RsA?=7cjbDUtF4EN$MhI(W{6q9vIYW+red_byXLRKJ zI1K9?IL_`})R>K0DKap?I=G&jbunY$fDzA3W`Ry2viSD-KLc_2aV<%RFmt_a9Pc*$ z9WzBpVc~la!CFpc@Faot2@Xv7%-FuUn&Q#;*cTNni@WOjOnEYqhU$>589V#*nnjO1 z^nOKImB97`jRP_IzI-##!+cKwEb0^&7gKy(JjouC{%-3Z-@pAX#}9_%*M|F4XVHk? z?LvW7-H&`{4N@{F#F@- zAIl1vE+Cy*5wp*0Da`U%rDlyUkWZ?=w&h|iT{M)@so?+j{L?cZ{WZ2e7*IO{vP1lJ znw*@BWC{mmrDb%afKaYo`DDkB8w)f6|S*l1Yj2q0TbcO@^PmruWlJYW}C z*EqpVa!J^_u_^|zSPn73D!aCTs)C{ZX2A{^hL`rpgsU0F<pbCyWG7zsv zVUGb@-AsY(rWq)^W7aHjsi@Yq%TgZt@(;3O(qFe*yw;ZtU~p2-r~S4Cne24aoy9M% z|Lv~;a!(moD=-{xaI98=g@=qW`|~M zg&%T2Y|6WxYo-wIM))`g`M<>v<~{!EpMIM9Wc+V#lEQh|pD)X|E}yeO$qInSR)Jpt zKCyeT3~nNw)14HmFa^v^e9kTOK`8?<9{u$d=o-s3GgSCCu*0m-=~t9v%ueOdq;&8$ z9J0Q8C~8#uT_KvN^Clz5ah!p=@_X-6%+v4ffnQ`E8Lo-coew^C`){kB`kK$BoZ&aX zVupKz*CMhaz=Z`VZ(^eXk~}2{R1$=EI}7SSzAn%6*YfQV=IobZSrw?TiY;===#23z z<5v6Azpy8r8Xps$be<{YTFZ4q2oHdfppgc3Za1yJ{Wmo$zc5>|f)&Qql?%4ou(d-_ zW6SX#b&S7R$ge@R{VfUi;In|{#E+xHEw92oe*wr=0$>C4!(F-mOHVK;t91D7T%>yT z7k*_#A%H+lNKiN;{o~f!gnt!l;%xsVb*an@gKtx=D?)f6P;%e}*0wjtpSk@t^UpqR zLbkXH7I~n;GoEAh>}uiqp2A7n$(js$q2Ns+k$YU!6e`VF0L<^BV{PxFP6v?4+4R?E z4GAMy2iJ`nZXmbXeMx0zRwm=VT=;D&YcuUNymYQ2=C|_{gX*V6N5mfQNxJe3{hGh8 zJ3@FQyy`VV8*SFbHQxPK+p~W-(z*{w3KHamzmc-6=>l*$Ap*5~A7LPPlJ1yd%HUa! z!A=m1G9IJ5v0h$h`3kO)PpJ+eUk8%>WC$HI%emIVKBtSulh|zn3u!PjxPN9#a*-AW zoePN?$6SZ(uN>a5{_(?x`2E8oW5d@=oC@qo^X;g@d%TF$d7=#=h(i0ipdBHhWlQMfR{gYnUN5sh@;I7>#^Jc zyQczpo;4`(s&rbAiXRV#&x}|3R&-qL_goUulof)LfiMr!cpGFxN!3M(2O*~6@o zKkTb$MA-Q4s)z85lGO6xZc^zFShW|AiWA%SDKz3QLR3LIYg0IMKV+L=qkn$4@1lIC z2;l+qymVqe6|k^sdvyDpH)?)*cXs30U)~(DAT3fAi>QNpeME#jip?t2@Nd7c1_p&e z*rt3~Xpz9mxP4}KoQ}zvFnM9)VOifKWR^FYYlU1e6Jj8|zkG|i1%as!$OukEaJyvK z;~a_~oWa62DmQ#>HaOczqgAyBO*lEZuD0%UbaX65h9zT}ok8$QIIkmX;<5WH-#J1& zow`|Au!9mrYn#%_HvIDF(#I!u&Rab#{Eo0@Q#~jq@UB2CONzqBBMMjBJHtI~^EV@& z7nNFj*m*3B#sqbyF#FjaVEcyc7!C*UvkGWwysoj%e2%^_y!%aJfnvqe=}*^3V)Gwt z&s4`NfLT!6;U4<7nv?f9uXOyqAw&QglR07oVd@B7Q*`sMwiw@e^{8pvk9Hm0`6SQu zoS@N^9*ndhxboeruPzOr?T#2#BC=Yx&yC@~rUk+-Ad52{(vV5{930YNQjU z!^WNZPq%f(*8Nc#FW(v)8SjjWj1qRK`!<{5c>256@9z~Mobhpk* z0vEPnWRNxNgk)CJZSAB(hFz{x=7WbXYES>WQ`z*MNF}a}4MV~;o^{$`h$14&sW=`# zox8>F_wRoMA>OZOGubWqxdpSpKE$qMUF#v!XFuCq^X~Kl@wic%?(oT>mguxWnqlHl zDSmwi>n&mw3G^n`nmnAg632-^6JpO7mU!E8W=Uqq4Q5B@%%}7^McbZgQTg5$ zMcvMBYxnl7#4KloH42;4V38(}CB|=T{a-2nafAq(m{{B{1yIBer1JztSId$Wf0qt2 z{vA8dsjw0uLn{hRjMF4%=q?(EoVa*+K}Exf=qh7b_mrgBEf%|30`tS(&{d~%i0xg9 zAlU_@OJ8Mhb)A-kJL|;?(K*cgi(33hag;flEcjJ5W}DxqT(}x!|Hl!+gJq<6)JO&t z$5~ii4w8kKqQYWM%H*<>4vhg@uUcrr$kE3tUOBNceQNB}Cr#Tv!z@=#n>NjU@kE!c zqN0pO<&379(-%mAP0V7W69prcmstN_MuIoOL>@qRU%YYrpMwA-pv!e=%)#Q7>+a6u QVa%F-+q93SJpA+j4_IIKX8-^I literal 0 HcmV?d00001 -- GitLab