From 45287f9ed7837d9be925db19045955807b5b210c Mon Sep 17 00:00:00 2001 From: Robin Rombach Date: Sat, 14 Jan 2023 13:48:28 +0100 Subject: [PATCH] stable unclip finetune --- README.md | 36 + assets/stable-samples/stable-unclip/panda.jpg | Bin 0 -> 175412 bytes configs/karlo/decoder_900M_vit_l.yaml | 37 + configs/karlo/improved_sr_64_256_1.4B.yaml | 27 + configs/karlo/prior_1B_vit_l.yaml | 21 + .../v2-1-stable-karlo-inference.yaml | 74 ++ ldm/models/diffusion/ddpm.py | 55 ++ ldm/modules/karlo/__init__.py | 0 ldm/modules/karlo/diffusers_pipeline.py | 512 +++++++++++ ldm/modules/karlo/kakao/__init__.py | 0 ldm/modules/karlo/kakao/models/__init__.py | 0 ldm/modules/karlo/kakao/models/clip.py | 182 ++++ .../karlo/kakao/models/decoder_model.py | 193 ++++ ldm/modules/karlo/kakao/models/prior_model.py | 138 +++ ldm/modules/karlo/kakao/models/sr_256_1k.py | 10 + ldm/modules/karlo/kakao/models/sr_64_256.py | 88 ++ ldm/modules/karlo/kakao/modules/__init__.py | 49 ++ .../modules/diffusion/gaussian_diffusion.py | 828 ++++++++++++++++++ .../karlo/kakao/modules/diffusion/respace.py | 112 +++ ldm/modules/karlo/kakao/modules/nn.py | 114 +++ ldm/modules/karlo/kakao/modules/resample.py | 68 ++ ldm/modules/karlo/kakao/modules/unet.py | 792 +++++++++++++++++ ldm/modules/karlo/kakao/modules/xf.py | 231 +++++ ldm/modules/karlo/kakao/sampler.py | 272 ++++++ ldm/modules/karlo/kakao/template.py | 141 +++ scripts/streamlit/stablekarlo.py | 381 ++++++++ 26 files changed, 4361 insertions(+) create mode 100644 assets/stable-samples/stable-unclip/panda.jpg create mode 100644 configs/karlo/decoder_900M_vit_l.yaml create mode 100644 configs/karlo/improved_sr_64_256_1.4B.yaml create mode 100644 configs/karlo/prior_1B_vit_l.yaml create mode 100644 configs/stable-diffusion/v2-1-stable-karlo-inference.yaml create mode 100644 ldm/modules/karlo/__init__.py create mode 100644 ldm/modules/karlo/diffusers_pipeline.py create mode 100644 ldm/modules/karlo/kakao/__init__.py create mode 100644 ldm/modules/karlo/kakao/models/__init__.py create mode 100644 ldm/modules/karlo/kakao/models/clip.py create mode 100644 ldm/modules/karlo/kakao/models/decoder_model.py create mode 100644 ldm/modules/karlo/kakao/models/prior_model.py create mode 100644 ldm/modules/karlo/kakao/models/sr_256_1k.py create mode 100644 ldm/modules/karlo/kakao/models/sr_64_256.py create mode 100644 ldm/modules/karlo/kakao/modules/__init__.py create mode 100644 ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py create mode 100644 ldm/modules/karlo/kakao/modules/diffusion/respace.py create mode 100644 ldm/modules/karlo/kakao/modules/nn.py create mode 100644 ldm/modules/karlo/kakao/modules/resample.py create mode 100644 ldm/modules/karlo/kakao/modules/unet.py create mode 100644 ldm/modules/karlo/kakao/modules/xf.py create mode 100644 ldm/modules/karlo/kakao/sampler.py create mode 100644 ldm/modules/karlo/kakao/template.py create mode 100644 scripts/streamlit/stablekarlo.py diff --git a/README.md b/README.md index f413068..9d72b85 100644 --- a/README.md +++ b/README.md @@ -137,6 +137,42 @@ Note: The inference config for all model versions is designed to be used with EM For this reason `use_ema=False` is set in the configuration, otherwise the code will try to switch from non-EMA to EMA weights. +### Stable Diffusion Meets Karlo +![upscaling-x4](assets/stable-samples/stable-unclip/panda.jpg) +_++++++ NOTE: preliminary checkpoint for internal testing ++++++_ + +Recently, [KakaoBrain](https://kakaobrain.com/) openly released [Karlo](https://github.com/kakaobrain/karlo), a pretrained, large-scale replication of [unCLIP](https://arxiv.org/abs/2204.06125) (also known as DALLĀ·E 2). +We introduce _Stable Karlo_, a combination of the Karlo CLIP image embedding prior, and Stable Diffusion v2.1. +More precisely, we finetuned SD 2.1 to accept a CLIP ViT-L/14 image embedding in addition to the text encodings. +This means that the model can be used to produce image variations in the style of unCLIP, but can also be combined with the +embedding prior of KARLO and directly decodes to 768x768 pixel resolution. + +To run the model, first download the KARLO checkpoints +```shell +mkdir -p checkpoints/karlo_models +cd checkpoints/karlo_models +wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/096db1af569b284eb76b3881534822d9/ViT-L-14.pt +wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/0b62380a75e56f073e2844ab5199153d/ViT-L-14_stats.th +wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/85626483eaca9f581e2a78d31ff905ca/prior-ckpt-step%3D01000000-of-01000000.ckpt +cd ../../ +``` +and the finetuned SD2.1 checkpoint [+++prelim private upload on HF+++] from [https://huggingface.co/stabilityai/stable-unclip-preview](https://huggingface.co/stabilityai/stable-unclip-preview), and put the ckpt into the `checkpoints folder` + +The, run + +``` +streamlit run scripts/streamlit/stablekarlo.py +``` + +The script optionally supports sampling from the full Karlo model. To do so, you need to download the 64x64 decoder and 64->256 upscaler +via +```shell +cd checkpoints/karlo_models +wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/efdf6206d8ed593961593dc029a8affa/decoder-ckpt-step%3D01000000-of-01000000.ckpt +wget https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/4226b831ae0279020d134281f3c31590/improved-sr-ckpt-step%3D1.2M.ckpt +cd ../../ +``` + ### Image Modification with Stable Diffusion ![depth2img-stable2](assets/stable-samples/depth2img/merged-0000.png) diff --git a/assets/stable-samples/stable-unclip/panda.jpg b/assets/stable-samples/stable-unclip/panda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49aa1ba4d5a818df96988f3dc2ade49200c767ab GIT binary patch literal 175412 zcmbSyWmFtN*XE#uTY%sYhJ>KO2KNMacM<}G;O-6q5`tUM!QEw$Ai)RM!6C?CgS$hP z_xtwjp0mGp_jYyv=&tTl&#CIV^*s01zxjV_03tASfjK{)3FHoV#>V%+A__hV2p#dy0><42B0GkFK^s_F1vS#}H?DRQIWvz_LRDlBZ5Vo?aIhT~?4 z^Lf)*j;yz2^!Vs_Kr&64bLY;!-XeyJf2=FSrY-(x4JCezgyH4u(y&Q3-2m6hN!NqjMPbjycV?FnJI~sS*~SQ4@~}hi^GHBsnFNZ4 zL9SX8TAVI9Oso!gS!7{l#WY2S|BBh0uEC1=Z@h0-b$6De!A(K#eG`A8(Gz=HAnQVk zsHEiK6S&*UH0^XA>T!P!s9J)xVrCqUtn52g_#h?&$Pil~(Pv(qOQ^|yeh*TTQ&#z*G{ zw44J+w+Io|K3PMF!Qf}3TD0&jG-+EqHx0FmFR*^akiZ3~7OSzZptw8$o>%qAwi)~y zjzc*5!Q(~;%moNU-c`z#3{|0P&?mWzHyk%?;S`YpwWh&1B}<7xgo=}tyrR9Gblk}F za(9V@Fu7H;Ab$))_`8G^=NXvP6F7pY5<~)H_ni&Wjf<fCPSc~xZN=rM(CGa#ap9`}7+ zGCWi#<9*mWtPDSL4mQ*<#L{x6))VB2N!}UDJ?u^%xjE#oR^GWNcQ#0nvEc~kL49JZ zTAaU&Ev=x{%HuN`(&YyUfgsRQQgoJE=cdwf7J#znI3;RDg~(7@mHrbH#+o^D9LASY zq+Cm5<=9eIZ{%H`c<#yAbY!N+p0E_>c4;#Rku0CSEdRC;mEV%c!)_JY?9r#=q@PUh zby=PeQUJjgb6(6*Ev6OwJ!fY!#{Yw(ayXL{<+Acne83&|@)E2%L{7nEE>=7B+k_u1 zYaBhgzsn{J719k)TFa;{HZ$KU%XE#JO;juk0~&uWRVaaxkV;d=YD$FXq*&;;Eg0vB zR+rP8nX-gcz>C4&G5^gsw=WPJkVvggNKfgvfyj>O4`T zk8};+rEAX83gI?7c%0oFPJ;C1Z29o6k%xgeIEj7&h!B*^I@K=R}o^&||JR+i` zNrkeiHCoSW4nlxoLoLvGt9U+}e)4_(%iBSTzk}oJMyx*$2HeuJNG#|C>5{IZ)?9CK zYW8rs8Q#&maV~yK+z&P^O*q7&wJ6^Y)vzqauWjNL8GG!|zb`C%;w4VCY*TW-8b{x4lITsKSUuZlyytiYE*+GR=GC#-gpaNGhiSX0vf(dCWm1jJ z^e^N)nzG&q+LTs=*yjawD+i!!+UyD&htD5HQ0_Xbw3|#6H<6~;pVi%?-D*$6o}#ng zYo>gY-*IpygR8AFJjwo65HZg@H@+%KjS-;zp=ERu|GEHwFZ5P-aE0i1v=}R8_+`J1 zo?yvpx~(hKpB@WqBAB|(OdBQG1UMSy4lcH)AJz57)hJpC*&z)V6%bD@Uftvr?ci(D>ZsO;}G)$9k zVA>=ZWF#=?N5;pvYJK;&8Bq^sK+&Z_X^OG6NE>J471PSpOzJbd7aEmg_a!uj%TJ|Y zj!=1hbg*9c8Wq!~LDQ$DCgKU;V}0nEDsm1N+Ci$w4CCFw+i@dX&~uIup^%OlCUe}F zOVM~#Z3JmaKJr+f--mnRn`|%5B@(I1O$J!oiXgL)rB2>`YstQeV8z7HR^f2A&&BVg zTPPoRJt*2v3`&2@Fn2YP4xaA1cO)kOe=dic#SJ& zy`%&s?qYE2wX{*~@e>V&(sCB@eS#h;5t=^`o<6vY++RC^M+w5dvy}BtBUJ>G@Ob!K zx#>JsFgp2R%*@ZnAGE!7j?_NMEBjBrmnECq6X&ez^%Li_b#E6a8U6u03mvTE-z74N z!v)8V4i6)j6MKe4p-IUOshxe2i=Ubj?Oeh+#(Z%ChG6gNoIQDus?z~^M7TUrCHu(8 zf?s+s32-wX=5czOoQ*V*_AD}I-8JYA{{Z_l(ah4TZZ(5pOqpbq4|@nTt%N-LxTR2G z2JVz|3O2jJrE#^Vc59m_@_T~xU#_CIq!BLPyAU-4hY#hR@+X_={sYu#IU?a(gJ#Qt zVFJ1-aw&mHmGvxP)6Q(6H%>k-V))v*Lr_}5mhoiJXilT|R-!X|WG^NyVY{_AHVu&3 zLuj%2;z?&A7Kh>hmkcKZEnfgPv7bi*XB&!@@Ryt&^*3jCp{fhvEyHyMIA%+`o9SA} z>NB?41Y(OZ^^N*wld>PIrTkZ}JiSCIgWH|1MTWsV0s;inz|wcmhI*^^eE1Kp#gG&7 zl&6_jU|qps7o!m8uF@%@*k%B=grafp{j0U(PG)wsePw3a$I?I6T^baNtAGCi2o)jd z1XUwB7Mm#3NZqt?R!fD=#VjAsl2peojZozAzFr|zrA(f(>>A5_U6#;NtR2Y@!{SJd z?EI8CU0o>IXPFcvNBg+9Ct1e=ICK!c5{Kd-?>!Ff8eJ5~I2ykueZFM7?IaODg;5|P z>nP&K4J{_~%)f5ZSXBx+H_=C1zkpOidi1TxaH|DNgZ}|=q+7r(U7tIasiMOb$_cov zGh8JrblnPwPqiAnN!i^ZK4->Xc9tx7mnXa(ETNPXV~MKLgEVNJ$^iUXNk!-gxUYY7 zmB$r)WZMySzATYWGibuI%KHb1Hf&Uxq=|=_FSe@jWlGbMs~7s0EJ0&b)$FwLm{`zW z8A3XVJ{PC!mmUUU{h@O+|=O;Az zRx18>Z+cGWkU+)X>LlC)qNjZtKSI`aoXRJ%I<1;7k;?JGuH*Yk%fx+@JGAZAt_`1v zVU;S8s(n9QA%u!q`QuD*B6r|y)!PB2?spG19t-o!oHv4f2E^Bo)_?WEd9v)aMM3no zZhZIOxq8jpWdPWW+3yWs$`ep{7O~|)>kb$zl{13GY}(Azn_pgk4q`nppG$Yiy(nj) zgpsi>Z6_a4C#T?@Mm3iY>cd zUi_f>g7Q_+UcsH-`;9;0!mzLkM!1RiOmT;R3idLHIhV;@m!_Nm>q1ngC@m^)wwR>H zE_2#BrFd{`%4a)06Uv?ErG2%(*iJ4I+hpD78MOJ^+a<6~;!&q_sfw-gBy4D)-a3IF zJ>T!cmGcvrJ#QZ`O6NU+VI`4vTn~XD=ytjhvx{cCK3*aDgFemlnrc3okhB=AP=VB3 zOz5i0n=1G8&CCEL<8LL$i$@W&Ex0aKwWEfi&xdTQOco(6{ejtcnz2b{F5IE%Q_#e% z?c&)3(H2oS4Y9!qZE>Zg+gH1SuEz8x(yoX%wTO(!G%L2QHEmuQbN$iK%u|iq7Qe_G zhX9Q+|5A)GIb&_U;fd+<=6)aei861H>t%&wgH?x(yhD>Y4ZZQy43J~2Y)T{ZN}|$M z|8Hv1zyNKZtA{2DDT(*7ku#t)F>0rjbehLm_}+HQYo?AnCbgsrO2~nQ?rtagB$Wr8{ZnTZu*~jU2b<_*wvc|H1T(}=b-TK*F zA(}06njHxUza#D-Y36GgizP$e1>l$Y1rF5g!FG|@8`zHJgwl<#ktb63l;hw z77|we%m^Jw{I)rL#T~Vgd^Jd^DdD;7`M`p`##=LfQE@F$_hIs3d57|FUo{>1No$(+ zQLUzoXCONe3@Xah?)sS!Pbxjv8W)D$76 zD=`d>^vx6oy5wE0JQvl%QDBDmJayPb4^d*$XTWW8d_>Cs<5yMs3L3xUUL$!`W zZ?j)^x`kse?Yj<1}=YY{s35B=VW0I~9FRv3%&vZ`3dcYTRrUdU8 zHQn;7U;B0n>V@4YTCV_juJMrKsb{(dVEy(FR4KHD!l;++Y+)$?3 z_EskEM0Z7xhf7TS<74Wa(aHYW_0Nu182%((DI2<|CVHBh9F=ir^1$Oixiosyt2h$o zf3Uo8*1;+!g{Pk1g*bdmU8%VDYsJJ{kjB&MF%?7l&6>I^**p)zH%f@(?*nfp-8@g8 zk~&}bd+`sxI>J1Ps22ZYczsL|(C_m)EF@Y>udNM^{f_-4E7C}4%^d;X?norx$3c&l z&RLg^0*IB5Jqh?(pHz2NV;SFs_N7;Ns25n~Ht1q*AZvS_nKcynZ;d2@Yl;X$3F-iUDNPw(waJD<#;o8;( zRe4>IZA`4faIg;Z$WLQBU2o@F4?8;37IBn^ zRRuhJJru?w{wu|h6}Cy}DuL17BG=4Cn&};kGJdl&;SETH>EpUt{0*`3W=qDK-k!fuw~c;$&FQk#hY@;ny_Y$V?Jr!><50;N(TJ~X zmtccf)ZAC|(l(6+0`r0w4Dij%BIOSI`f8V=kl(yKGqsHOA*}nZ=%LL!WZ7t8Y>B}T zs8ekZU0SK)63rAh6jRHQk-j}n#s*2aZLLnXmi$moetTUbhN)uhW!*GClr|`bRI$#< zyW|{#V$;n0GQKD=u$kR<4~$^s`+QGxBc)XRg%V8BlU+_pS1!xR*CW_ znn)@*h%Jv#@+B?4VtM>j0(hs`u&&0c&?M?_MKxoPFwJFI{i{mdP#T1&XPJxCIBIRm zjxt&DkWHprPA&kaG44}nCusunh*yQ3-GbT|HK~}3%~(9a(#G?t0(qF>kSu(n-?%JN zi>Oa+Yg0Ya2clN^8DTyV(;ELF^SrYZPY?}_`QcO(Mt&yZ&pISE4z)$Mi;L^VDM5og zZC7%nqD^cRhOqOiY3NdLqe$M}&l!IvN%)b${vi{Q{9yBxR;U!{R8C|08&w2vHYK@0 z!JR?9fSktakC=?lYOo*P5mB3kMO>#_^?Tf42&0}aJ!O{jYrHZDa$u{0KKy(Y3*M_L z*^lWv=UCteY=dm+m^-<6_sieSwcb#7&w8Zn8ovAcVgUD{9Nxe|wK}3+#>m|97rLuG zXxCB|VAH0oUY5(kv~d0uaRO%N2wj$?Qr^klG1T(HGRh04MuBh-JoBizX}PXEBPYB% z0m@x670W$wyr5S&!6ukSv}!kViF&w$wRhqg{X^xZOJDrPopU+91!$KTiZA*2I~$LG zZxej2o2-Xccgg(l1icW4uFJ0&Q-u5a&ZY0A;~E(!`FZ=-IpKR(%b8y-6L3N}aokpp zyf6z32?e%)iiZ{j5?qFHW{9$5X_k%wO@;NKdjT|GkL07!1`p#6>bdGbra%`f1n zf#W`vwuK*D3@i*@c*xN_iv1ZkoMduQBc%N5#N6Gxlyc_bU{`9M;IBr@l5$72koALz z@DHo^ZN%L4>aAYW&AV>$U$L&<>{ksGYN&4z!FJVtL4!xtkPM81fkqs)_gcjY+Mme_|kb@W3$*D=G|Jm}BU?<)?qo^vDl z{zG))+%GySJ?5F8=zL-_an2u&6u;mzRnj=<=2nlU>tGNl-JMxG&XM*s^avL^g<3nFc`ZcE zN-ffk5p#4&xDk1ElI5Z+VAC@Xj}h4B1>y}_f-os`m|Yc;@2L7)pjk{Z48SfC0P@Cn zre9B}Y|`3UA| zq0gDRC8c&bCzu3cKHBw5@$>+ST5p^eXEPE0T}HMR~>BEWH?3W&0C;V0`>YJjMF zNeB_Om<^&2I>sYHgA+DE4x&9zw@wtX`#|V7JwIr9eh?Xi1ZHCHW$XdUam4@;bNqRn&pE4y^FDLVJ1_8IuU*Ie zW|B;Lgs%Sq#AkHHdonL*F3H>h3wvTR=*b0iW!{K=9nR0Js=$*?pV_ECte1m83Nq=< zO`^8*OoDHo1)ZZwSVze@kx$PUMIbf!1Qk`Jvh2KMCMEr9kz~r74t-&-w^RJdCeFS~N|1iG;(hcO zS3tYOqc(O5StyIB`@vgHHDChE%OeEEoqCQHAIY%#i#C*TN^rode*jdZ!3if4pz$7+t3_)}tbF2$*3Z2CL?>qoOd$c~Bggo_iPpw& zi9?9tuYHP)TvL32lXevZUA7yorz9fSUX**WK2b_Zh$YpTsb8zBy$xIl?3~NkGvv?B zHHB8?TZ5DKA6~pNeiWhaF1I#}KKkL)Waz#;tEco2!12n?YCh(hFU?$?7cYV~rIHc( z#0w=cIDlo?;tW$~{sWLI6K)syvuR)O*70GBP(W)lRHarSRN zXzRU=651=)6()v6Wamap_76JYKR5pVR#r}>6{08dMY39TM1r^a79pLezGzc*D$7k| z=3U#(CbA2Tr1rmFSxG&xi#$n;r8#nefcrOE4#%^EFKmEcmTc1mzV)NXp#(p;5ZJI~ zc-FH)n?3{}?tQ4+|cY7}`o9W6#q8& z(p$5{iBR|WcILHTBlegV#pQKLVWQK17=5L*gA0G~z@Ci9Ym3j<@6sCm$~uFuGvIfG zHdObX)EzPp=LVW9aokxs7uO}TJDv;(GSOcw@^iK$K?;jN*Tu5N+-(UoP~H8kU*Z!7Ry;_~ z9X3c1EGp?#8hl$iEwVn#Oq052ta zecY;g)}Ff{i)}qN+`hpcb%qf~VyZeamVf;ppzUx@ymL*jz-Vdk)nYd5CA;rjqGox@ z_w8~+&B?FPZ1PjoZXpy?f6>=LQD&0mpAb>b8eMi8K#J0bn*8lYwjY^|@`I=EzF+(U z0B;<1=AhoQb4t@31+6WqnfB2yndu%QTB^I$VpnP0KgK@Kd+xE+`>#Vxl(wv|EbAIm z_Itkq9fZUS(jOK60Yb99Zrf<@c~^F8LS z%rQQ$AGK^Nl^LOYBuFRMOgc;;Gg8yPJYX&0T2%~KX#PXo0qfyTYj(W}M1X1~bWzdh zDSYe3pJ@AzS(G@`bmyMyK}T315*PtxWSR!c5Y?%=ru3sefy8h7{I1DP)U*HEtwlm` z5suEJL#v_=G=zcGmsNHq6dP}cNaQ?#WTNDZ;QYxa;Oxg2ZCp>}bDN=NL?d{!2fqDk z^xt=F$Bv|cj)lJ4squbaZ{9h2f7ur=H>1-bR`|3h;Z<-)JaAc!2U$tGLL_!7yV$OO zRp|Qdvhp<`KBlM)Y)GwQLMao;uNQupZkH>)-U1HT>ltDQ!OOy_mbto{HTi4yS-1)6 z!?)guQ&M>e+J;YXfq^N;af&9dl^Xt=RLMfv#*EkVf z_Yt{3F}A!6!GfqCoj)jRAv36NF3~S9%FkfUG2$A^_kvac|Gm}pMVf~uc%&e97nl>3 zprus$A{Q~L^u91p_4<>?7qD4 z#6+6Q#KUu(z;3qy-)!O^VEAj6c$+TApCMb-sgG=$4RIb$gd5I!wAj4!JCIwUhdmYy zgSLo)_U738I<7|>@|;N`VtH|sg3+F||*^O3+BJ3(7c|XV0gC#G(LQ_0>wVgCsZRJ$%#2 zs?q39kIz!;5l|-LWu);ZIJ;0edUzL*;X-_dk{*oftaC%rr|Ml!#60A4S(4;ly0#kt z&D}KJP4&wyHQP^@1p@!3h_f0feIAjc+#wVGBM#N&U79VVH1*ZVXL%vCqCqyUx(v^8 zV@{W)(Xg#iU5tq#Glu}m-{$9lQ1c{pq{RGLC6`ZZD(7cZcs$g&)k?||I<+$|%HwM( z9$E@oOpx)Kh?(?$Pg$#9=TwP<=)UT^lV?)Jub5*Me=;6(q@hXk$O^01Zd8hz(`~1| zq|qZibp4$GVv{sBkIXD7;*>+9eA4kNwxOj_rq7IQBI=hqoXx>exM0veB@o;Z8?~k= zaAVxmh&?H9ybAXY2~PZ5X3(Urs^TV25i}}fHb!AJ_ptqY7Y4CmKRXv5#}7}InLmEO zSpOFIjyvGAhG%uQBl_fHsIigb8D%m+WvGo%G61@rO_&qwW)P)#;M|lu?n3DBXuE{? zLky$gA7^r+Enss_5cA)^VCSHi4%u3M0&heORr(yN)ax$x-1B{3WUOeD1vlWV#@q^ z7hC+<`u^C#Co2oTe1mwLxW@1ve*~wN7!KM5On0Y>DjSuu2+;HH8rW-TG@hGd)Wat3 z^w2gQu?tkX6od|h1i>qQye+?UR5HZX5SoGCJq3^0=GuhTeku+3lXORT4O_T_ZuNTf zrmQZR@yxfr<=dps-!iVQCqI?4sZjA?459XZqHDjQ4Wx?IxbgNx9ke-ZefzvTccq{q z5su@%p$}j?&L9L>r$nMImD3T5Hu;FwNRJcppKxG~-cArM`UgrTn&oh>cy zcvSteXet*QeUine6qpeD_1fo-**`b7MXN&=DNO8=G@Qb2YkFiG)xq4Ra(s;f1c{AK zlrhWkF_I60SLIsD7hiYy3Z@_ty%5ku{`8{Qy&+TA9#AR@EhcSzYt&`&=P+&5q@mIG zGvS)f6$qy9lco zclGQ~GpE{Oi;GRebi5RJduO}tr9Bm-a0iBRJH`Bkj+c;{dmqs_3s2_fDxtViuIS}d z$f`}%&GR-+A&m)0n_gKq&LA0FeQ^62x8*$nkKh9UxqN{%ZC_svn=rhQxDV8B4Swga z)|d3|3+T^AuJs?911wA=joNs{L}(DR^vrPi>x0cQs0E%E)hQi9e(XQBr#R&n z7}I$Q&b;5xpDsOYH=pXWx-;%+=~|U0I*T&{zw3*dZ^uG*wo^*^i2G|9D4e+T;ua)! z(g#|+89~lWRZhZsxnAd;uDdUf@orz~r_K{<855LKM#ns~{e&mN1GCT5Zv%Xll8*w$ zF>o4Y6djPMdam(|#An=~qpPa$A*6%-oT%Xa!exd2XUsLqmre}kv(U9hb%9p^x*$Dc zCP+lKX$Ev~JIHEU?9rwmp42>u!1r?0=1>E>i#oz^JhSQmWn8VlNuzd$|B8*>+(Rw^ zYWIPE=d5f$PLNF0g|^*`Y=&ANk+RMd$mu)d_qnQi4BK%k*KH8kgw+1>VJAvQBy6Kf zzQA=tM?rxh=5JCeqD7-m&waa+3z1vGWH>TkA}5}tmFoH&f0tqk?hzVl#8h)t(YuWS zFyWcg+JN2~#CS{e6TVk<(ou>t5Ftptn{(bK|3%`0DZl>bZgpMv)$K2u7g6pT<+A)$ zLU@vEG08TJVa}6XgBSE+0I__|3K#xt7cQWJLftPX6sFkxGZ?lbStS@9w2>m7KO52l%##&=WHCv_Y*2Q_zOKHxp(fdj%-H%XNlc- z|9-|SlGfCpS^0Oo=HvQuXHRx&G9W=#5EP$&r`DBC^dNF7@g8o+xp=HoJ4ItmjZ7SB zMw4LKZ+#17dR+g=xXdlT(AAH^5IX(9wmZI1zCPRdTeZd2{}X#9m|`Qc>nN_o1Eu4W z@ZoLiiS+{5N#*aLasH{*`s=xG0k8@q;nm%#yxt~-9+bf-LFBdmThLUqkBgUjDVDM@VwiU zg3-6_xL;OsMRG+Dq6gz3a$Ai(0{qI^<`Y?dq)6yL8(xU-5h7<%39 z!7}VN9F88s3Wgig9KC9nD%bPzt1W#c+7%TNdK)>!20f9JQ0(g6RpC|S(6kg<$*$C= z3LT9?oK*B`I_u_|dRJH$_5akoTNcN7m6cH*e^GMY$D4TxPJ>1$ghnOaq?Zc8-rqzv z=(z+(Rx(-8rIJok<>9Semy%0&67)3hE792V1%q^xV=XAbt;ILeXYM@4d>-njDM?#@ z-aWOOWXuaqEe|!t?S$A?ois+5?5jM5*-fGJD5}oGJ%=F@tOZRSG!sm3dCOJ^f*y9U z{sAnUZ>MZliNQv@$#}?O!x-Y17_|pLw9EVjex-`|UFwT>Sbfwso*x%~9bVfVhU=L2%I3Rbs&aB1X8bVK#4~s^XP^5OyXbdI z4~_qU>&^Y#2kR1>F&4Os4=s6c!ScQc6*4TV%tgaDR?X*#!<1TIG{b_ZvyFV$u%%rn z?GuSg>c)Y@_b0M?YfJaCx?k_{JH&N%^?fIq{iBY}U6rIf`hFb|?6jM<#hXVVg1pE~ zDfE;ZMZc+wN3zA70%K5LhAOKyn$X2yntyy^Ln?e|ozF%18?GvwMJahQyb+WOyo$|v za^HHXl@aj?=32XaxM!#u=P5stnL}I`ZBM5de3?1)C3r`@#x?X7$@94HQ`#C9GRzZn| zPAt|CNw#woqlB;Av9 zryK1O!?~F7l69HLa#!Ci%7;T7%qj^&#uVj9=PynDmcYD`&^{e&`2FRV!}N&WNc`Yp zJcDTci_WV{L9(c;R&aQ_qxY1g1m!z z%I@Hx@@dqPB&dYoS>tR_Gu(Ls_)#>RFo)aaEPOW~`Bu~A;h-e(2Wh&BSBiy8@w`TQ zSv}R^Nvt!b`vG1O4^H_-TUHhXpd})}_}UGbFH>--VK5);=Y~Ko=HSVg0;oQh`^HaW zj;=pu4s05}H{;!kXCc?mfX0x_&n*`4Bj)CJBnM$~?6OMPl44@}vJ)STN9t6#L)cw- z0C(Bvkx=Q)dlka5@f-G!kvoxr5yI$JM5#xP**gQASUN*jg3OS$x$XE{S)hXi&i(Nd z&Z?_z;Hg|!qTvX0OG8bX7ESvkO&UGYzwD%-i@N- zI+ANRlvgp{Pf(IZ!gdh9n&@2qZ+_?{KFqUSEO2qlhC4y7v>5X?+y}*g>K%`!$3THO zz@yYGk*A1oC_vLDUjhEMvoaR<>)C#KdwZ08odSDgj%WtT)p)zPt_^W3dgj(Jt&#ZZ zW2jr2dj{?<`QFA~A@yu-=Z3XdtsdV`n^fW=eoQj~^j;wd^^k=x(++7rAXwesm=jQA zTlY;7_o#k?PXi~7CGPmbwg$KrT$LkCJV8A20a4iFs+py`Y`e##equFqcAR`mOYSX^ zTUeiZLSY{(1N0*M6d^qB<`qV@cOLX)s#$N>Sud0}@3RxGNk|=69M=4I4i63LKQyx| zz%={YeG$i9Vsh&*ztags7fcnoUU{EpyAT$7%VgSuAEfQP6+QXo&~dJ|0?WTgh0!(ak5YbtCK#i;@4SH?ZulbWq`LOxd>p?YGfq`R zy|-PK=PE~D7E>j-Dqv3*Q#okPBgo%iNJl}6A>8{Hr4y$?fx=Z8+0|%5LQj9zwU|{~ zF5k4XEuRR#u)>9j?m#XLPI%K(NxX{M0>f<5Ud#toEng3mSjD=$h|=lKek;}o{`J=X zZnnmX&5$DDgQtI>V$@qn$^GTKnb!DlOZp#|W%o|5G09E*#Uc}*nL%KXtPo=&8km75 zefmAqFU{6o87aQNSCf;}qfHJ?Boq{I=zOE4xE3$omf`+0blPL?p-&wY zdFc0kZAI0UVM&lE85Q@Y>x+=#jHgdPphndAc zPCi#)x2P5}C1t4BYp2db(Q`xAmLxdGfy&4A!6@t~_{2LKlgy2RXy-UKB|2uVvt~mw zuTsS=4Avq{R)ahr{3w7;r=YqQEVroQO(t>3#m3L@EGbh+6}N`Bm4u;=n%YpQ2yRmL z$mJNzZYO=EWQlGj#nCmHjUEQcUL2R2grQH|W)xz=XU2YWP3vczi*QVGzMd|tut=RX zqnSuXY!<7`JAg3tOa; z5pPw7a^+_Hu(JkF5De|KyqXd@MQRrs87ox-fl| zUp3>dYT04x+=w8a#NAFUOs&wJj`?Qp^<{f|@0r|o4*|Uy+ACF*YYF4@d%=wslDYS{ z=G+4*ZyKtBqpnkq9j{a#e_|xVotsy_wVGOtI~nCZ*fQFC(5g7_Eo|{epsMj^b;h6| zO2n|hYl}_2HyRF{;;T5)wYT>uLm`4)2XZw#Q}u2iX;dm#k$OIMr-5I8`p4K0rU(&1 zh}%zZVt~`w+oWqPpM5CA2`j$`%;rNI#3Xc^+G{v0Zc^0RP{i}2i02p^M9J1~RW>J= z98$Jjcw=wBi0ssY;Wt{H>D4iuxz>#L><+-RCegrb)60w4pn#Xi0J8c>!F7DDehbHs zHmBVB9iFZiQJDHQ4M{|+VRR&kvbA`pLA?%4?$x#@a9dnTvF1^8gB}`s zcc&jkG|pSI4W!BxnB1L>QIJ{{WpWxfvotTMTWDDywA{KzUBLE)rerNY7e-(0Oy%o| zH*{E4aU1Y?u%LYGkB9qcJ#;OGi`4hMi5PVWoBieJOU+(FEf($koZ8=mFOKXzU6M5+ zxYQIkS@}V`{|7`G_6rEsNR+)OhvyxW`_&>Gu?t=(Z5zTIJ#yACkdUm?@ylxN*$y5f zFKXFeQnP%B@==%hy2cnuoWXfipEt7la|;qgew4Nw(In0duSaYQS)`9xB`=P|%1@WB z(q=?e*6CxWGWNeNF3#dNPJK8qcXw>tyxR#84mRP-D}Fs%dkJby3ABF;1$+D=}9*FQG(cK!hlhEJ#1%I7ODjS3+8 z-QNeggXZsxr*mO`&&lAEqK@{ZhA@Uq`LUcZVB$vujq^SiyFjq&jh~KMg)RTmv*7Q; zQ8-}vx>d(_#nL_&9U!LS8rnN?&|PyOC_2M-sEV89n5J-dW}CuyH0Jl^OX;*<0vw** zS7Y3i$apA=c+;Hy!g_M7nlgreS&(A@ZMI}sNGU|XQPWF2>9)#mzLD2S_=Rcg%q`jE zZ9)A|YFQ?fz6}y}8;E$<{nJEVQ5R2fv#zds^3yE)j$2Ey=zp7!3ilFY7F(W~!QW6u{mc){Ig3OYX?$vRt> z%evNCTdN{qhuF|~lNpcriR?bJrjdrxT zXbIX^T9k`wR>;oYOb7XbzDfhNla-i^R z%IaL9)kv^0%4+&}(o$-U_ElbmJ#aQMgJ5Tps1nUogXeHd>?9RBmghrZfB zXikV*?Hn@w6+Lu_GxCXS-n(G*T47m=8BL-`H9BTxev?S}44+4N{R#~%)sa~EL}Dgs6l@TgK2 z*{M2a?_*V2H1P}6QAjSzYh=BO>N)X@@12;24bZ+0pLp93PSCe0;Z;@R4a zFE+maN&)j$lVw!crgYFxfBD_wrrrinGxVHL6|hX;zMXQ|0c_3)mGnQfg%q&!&A#G>Z8IG>+1u@E0tlNDxzDjCx#G z;nw$0b?a#QnscvQwlur?Bqe*?I(IggdCs)s zrY*Fo4&}!TH;kDMHUkskmk~-$>9@a12Be>ViCC_O^1l}nFFtzJ4ecD_T}_Nu6cEc6 zZ=b<2Egw_~{7a82B*GQyfe2U>=f$Y>ft)kD@5Vh@9aEbNFK_%F0ja-Qq2KwR78qtb z5aAzF_Qam8o{nikx3g21rh3U>B?Mbx3jZGft3Xu0JoU|RmO4J8e`7uEy^X~7aDT0y zIEh61p4F;WM?1Td8XIuwOeuyWepS5(fc!x{pt@Zq)T`n9v)9R7b<5Wv-YdL2!!{Tk6(a-@*S%S?*Z$DzC*voE0*_#~ z2U1KtopK>>N49d+Z$E482ijWpss6H8io9r2^oxX+)V11%)f++e5*VKGl zB=*4}{oG-3xSHT~3zK&oF2iMlQ9dlWV z&L@W4EK7_MI}d8=HF<*CJ8#;6WVfw(Hm`gyE*T>~BA%kONY2f&tdsfjv$4rOwOxs3 zIp&m~%8+%cM%=ukmyyjWKP@*jv<%1bqiyh>s(TA|{#En;0NO$6+;d*}O%KD9q*{IwD%icb_KELgIcB&p$-`>dN zamXB#{{Ytaty{E7V~Q{~q(qWI`I!2jf9YB8Zd8^4fKT2%t9a~0mZJsBGXR6jP%(u( zDD)ow^t*ms1CL6UJ7kSQfKS)fi6b$LbUg(U)Ja^8SOcDV;A5_9^bhtu_%NOq@Vt8F zrD`M9Z@zX8cQPkc$UF~Rdy4rV_J8`yvmf_o+l72?KL}Lh~gyRfz#<$WaFxX!N*ch0g} z*W+z>S@G56&{>&bvTP06DDwL;A9a7(AC7C+!Qm98&oc`3ow;Is8T(ZDi&oV>Ax@gh zo5K|HY`sE2xI)JgFX8_H*5bStM-p5}!-3D`U!Wfnz6fcaHf4`eo>6yhe|d2L3I`b4 z3FS+~r?b1rihLY}XCLa@saEwE z?_O7+=#$TR3{k)&az;To{d-s3zp{^nMxS|Xw=WDE-x4aw8C-S*V{85(73vt*lbBRd zr6{hayMV)5YODJv=tqIA{=?zx1XzWtEKB==j48(>jefV-50eC9zdZWCB5`{jjcNPb=<=_Pp9(ELDELD6Oprwk@~6o>k(ct% zt_L;wmGMX6cD3-c#1m<{oF~aHL>F6dv#9O|QS`6aSA|#{it+D_{{RZ~zZq!~#d%{5 z!nynAFj)>dsqB5}(XT8k)qAY;u+<}7dkMRCeUHl+bvdm;5jg-3aoV$P_go(`RxU~V z$CF(zkGwQ|CE^>sG%U&?l&D*PL*udc`qwcf+ZM_J<`3fRqu5u}(rQUgD|3SorA|(q z-oy@@2n+(a{tlr>zr8joL2kTz)E5$gkfe}0libt`E)PtK?2LMzY^0OW(pLG@=O(Pk zy*R+dXDs00jtDiNUvdLffxDtGxZ|}po(~3)4mwrEm``Gy_MzJ#;QC^zOKxLsYin*Q z8KDH{6{^*o?IyG_{JL;oUX^w2fd`6$NH&AkwMfFnVsX1R*O<>M|CQ;7BveR zbRMVHs+26mb;U?>!#Mf@Px1n;>0Cy>r1Yy%6D(=kEcf|k8zUWAyZtNFd_CbUCrz@p zo_K`wu*u)a`~_s_8V04Uh{K;(cn^Tu6}GL;wmOC9&&@ z#&hNBYZVlSd{npd{1xF#=v!d&e3BG>&@0Tf0xvX;lJ0g@BQ44NE9=Y8+1mG3@Ft(8 z+~3J7Ld&u^&De`T3*%%o?rq zUuxHxt>hyj&Nt;gr`EA`U)g_Fj^V|*QwDxr%rjqZYTgFbIWsJZeYvh`R=JF8&U(Z#-@>~+zf2}vS`+T&vn;bDXN(_UdmTI zS8jg_SjJkMIL$oKGk&@8HLM-ll^32 zxOVJ7_4f9y9~GG=)-Gga+Rod^9D$MB+PPaDD(3H8hTh61lIlhRC5#-FC$&`S&AVLr z`j}b~oT_r8r|^!6;vb5BA&*J6^Nez`iEs|UM@;+ha1DNjd+1i8;I8f6_ zeqzns`&S+NEPMnW2GfS4sS_@>d;>UU`C}R7N584>Us~x_k=eq#Mh~#96nRzN=Y@#G zDZWT)6m%XOu(i@{(kK;1(fliq^YpHs8^w$Q#Cri;d^ch~PhP-MzOVo|ZckI|S-9Nf z=ds?ys|=5rWPX*gWvF?)91rVWV`r-tZcaV3T`rrcZs2~EoS-FF%=B9+v5s&(t6CXB z#yeLVrr(UO%mSZU=xwfpAm=s5D6`d}Rao7PgA7%ekyvDbky!S(By~JilrwSw70)GO z)~7Wh&_GGe8+MOsMQrC3ySr9R9fKKjRi#X4<>%{G{{YTuj=80ko?-GirKRSg)5Z@F>RPkG!X`?~e8LW5=F5RZF{2>UiMx6|^dHyO}pA>lbEzA$XhN zpNRep-g*04;bOSl;bX?!^v^tG_4Kb{_*45t+-ebd0`5_A6#oFNVYegs4!`G``+wt~ zz>7Z|Slv$yAk4_#WC{QhfA#+Woqlupzu;Bh!(SCkb!P1ix|U=~4=8`S57R%&zLO1E zNhFVm&9IQfDN3sMx-;ur4~o()m}g^f+@#dxTvpV*fKu?E31~#IF$Y4p1bG;X37+HEPC-;w{f2^lh@X$ zG$-ej05t-dw-2cqi*b#&O6kN zItYPsaf|`jirnEd18%q*au*mi6Di5bARfSb)RPv9Q24@=oB_~OZa!=ijyhEmHA@z4 zqi`LC2wQ@9TysJapqwAAJ!2&ZJ#oRsRe)4(NWrDfPBYS@P%Bp9CxcAQD091s+}$k_SCK zKD4YCBl7`_WMkVsDri%40oNx0RC`LQoL~dn{3^wmEg@cq+o_-pJ9z$8PTbft1yu&K$Zw&Z)$ZP1c(D%eig_N?Swb~7(-;jp)P z=8}jTa3JD^D{f4pq>s{@9ZC!M)#7m5fH()St82z_$*-EfXD=6PULn^e`&^7{Qaz&} z^MSyxrJ_c0@{UbxaLID+eFOU_X)|4~izkhe+?Zg1T)#Uq%)|lo8TYS3)x7@z3|dQJ zq{BVbdR45~63-wD_M4d(YK_N_lrdwDm=)#T5%FZ69`Lq-Vd3!wjF&DZyO<$SaL9W25NyvFwjc2xW|u_l%ht zeEn;Ryu6Pw4>8d)ScEXO}VP;NU&2NkC>GfADsy{riAXPOK7gf~hRRs*=HQM)-~ zkOypFj)J)@M%vwNW18mvYt2$@JFy%p;}~*zEPcJI3w!x3LZC?Hi+2A2DplO1`r@kJ zT}gLv^2&Ee?YN&`^Zh8guvv{)+71QGV)MIAjlY?8@U#%r5#EpySBl(OnI@W<^9LM^Q?PWfnnTx*FkSHK`3-$jJ};J8?8L* zH(qV3gq^v#xVusYa9))rV z{5sVcw5V(VI<4ikhDgL)i%Q=`87K6uThps*WQapS3FFJPJ4qhrukx(>e+)gFG_7Mg zu{qqJ7C-PK&1$C@9PXT_qb9YyvWyR~==0sTX|JEjL+`L;pT?Pe;yrLh<)z$K5r5As z2!wz5_a?1~B?@*~-DjsUe_YfTS{|ow3d;J$lvnO z$t-@P^IaB~u1{|u0gexUN=sjcI*Udozm+T?9Pe@m>T1w$4rnYlTsEgV{{RYdKR%V4 zk#UPU-6L6=_A!@Yr@v~KR@UtFi*nZ!0Qmm^Ryk(;D}dB|do0nI?W0>Lx&@EsY<+8# z)irr;@^gZEwsOX>Zu=c-Hb&2h{9!h*#w5%(;PDvik8@n?a&6#MXtKcL^{If`C`PGR zn>3XWd>WN`T-AemjtfpvOjgZ5Bmqc92gE)+53PUKtN#Eh`CVi=IXEA!eNpig5BNcK z%#8m4qQCO5lbTj$$vLh{%IzOVmj3__UcaHFmm6a_({)lru`)zUW)bI?$LuL)hFyH@UKaX1EWCKe7J+ zhP*wX&2glN?e4ADB1u~){CwF{(2@Q{ePO6-%WV^c!?q7Wn)x^OhWO^T@JXWEKbdzq zFoC=)D;oFkF~wGryLLRxMkX}XmaND42cTJaT4mF1Y;EAXd74|9Bjm6EW1hfzbOf69 z?Qc+Po7cd`6aX{RCcN+9E{`>bhTm1V08MZ_xKkJbd!Bi&#^-bS(`~}%u73*pWlctH zpE+K%suhL6mhM?x5gKwp_TUQJ@SV#=B==Lu!rl-EgW zp0HV_L^0+`an+4wDsj1mIZD@Op;`FEXts@YdQX;rczJ`aB1mba(Ce4wUFr$EKMV1<4!aZiqeo=#23@_fbzA(v|{0;$z&7m4??(6YkC; zPrP%7b?OWMI!UN9sLb&3f&}a8Cdw+S@+SAq z%Jhv2NDAeQgmJsIdPjwBZZv}%!sHRpUMtG2hLuF!^*zXDQJ}d}J@>-?A=PvZvCge5 zLj*-~Fu;2^YWBYfc*erhv;j?ejoVsYRv_W^`c$Gen4dBKTPzmXM@0+ zb^2#cx%(~3j%}pne=}b?3g?@%}Z< zSd-fHPYE@cDJCTFOu#3?m_w*@#^8)S6ZW3O{+;C9{$%~1OEWw zDbfzBWFPxV--K9zK4<$l$4qA zl~t(fJTf#{{X=`Nq=F_h!>t8)Nizl?HgFtbmq;*i)S-5n{kp7 zTx@3ps*3sAXs#@-=e4`Hmd^4AF->hc?2;!PRe|V5exv@#TKdW0Y46DSBE6C}4cBiO z{{R~Kqy7pb@Z#S`@%ESE?J~_$+g1&2d3c9|aDZ$_9RqFWC#`z1#(O+M!`0K9NWVVc zq0p&$L8X_8?@LW;KfRbk^ttbL`fqY`jJ{z zae0J+$?9uV%;l(yY+9A9vJcDn)}$7>0JjU&3b*!X)O63aX6Dlxh0bg21%*L1fgN#O zB$n*#5;QxHMNyJ!NxU^YNE+dSN_i0uSPy?n+7o6{f;fANGQGNb)^zdU;=5bDG1X2~ z5#F)}i9VxzEp092p5gddqg=8701E3Pt0EOyXo7JUn<{`N!ee(|hl zUNQJgTaoflg+FJ16nK%5uOk{P4ngwD`L`@SgN6J}d!K>-0A&3yNuDWhAQIdhZEK_E z%1^TN74#mPrK}K?w1{c;-F*2J8C&QN3-vtLU*P`$3F{~v}p4u_I0K>RvDFt z)3tW~75HhV_ToNZ)qG0ng@wZNoZ_>!{Y0d8t-YP4L}R$%43A36 zG-X>>&px(3H1EsM+q~AI&OEq37Octdt>h$>gP-oWu5wGfsRh<29Y!*LAy75gYy;;? zdt(SIb%u;jHRh9gn%8F5MJ1vtzuw!C`c`bZMxCevL{VG;*UBR!@~EKuMa965-UGb! z3g0y@p<^A3Cz~q$yFWU^(FglXI+}jU+9OzI4eHU$_=Dlah4;-IlH0eVjl=UC*Ac7y zJiW0jVU`Po>$TS$SE{Yy2R>_lCYx}v+{XKW>4T1^>F-vhIB3rLc$GJ?^Y!JBg@iYD zOADTj_mtO#{?%GNwWq^jJW=d_w?wBOMz5wkM;vfpPj19J1|tCR$3tH_e%2Uoh1p!~ zTlh!ghp$5n?^ZsiRw4f8UWdk=O-KiNMOt`T`cvgekmD5~sL*carc}*Idu|9SU*V>K zfr@rGphFx|0h*MrG}5G0GiU%A6z#MHIH@s{nuczeefN3VscQNq)AFx~>qk;k<`G$lqmp4Gi^rZUDD zEp(9@Q+Xmr?Ib3%R)K?@|lMvvp?${AnzkRH}ge z`2)DGas8M+6G5u$4Xmw^e_2=}7vX+-@|v zE_6IXNaz-Dc+WKEVcVr*+TVGHtzQ#v9fd@q)3K~8DHE3z*G@R*q2J9|QWy%0SlTa` z%e=SV8zYXj@jvaypnqgoE%HJ>%P-9AqXZuR08^Y|zUZDH>TAQlX>S9kjl2ner@R6} zMpWY@A5mS{TAZ~N&o42h&q=<=;@65==S#nPh@Z@X&&-TH{{WZruE*i0jx2382z<@a zzVDch-{bH4R`0~`gj%MFCBrliBrDh{ zeGPpHW8$rP`$@&ZeVg2lmGl&|Dlv`fy^q3kZUDqf5qu)LlQw=Vc+gtdJW;-7JPZ+E zGHWwjUth^Q&iOqQoa6AXRM(W=yUQaz!;Yl?054kNwG9Zop^`r@Et~<~zF#!PJjPas z!jIXwLnDU`;q5=-J^YRqEivVlasWL6`c>F%+AbI#_{VzQeJH~tBgcm90OqAOpC;X_ z>zrb|IMU_N{RXZhnO^5PbCt#qNs^bH$dLQdmVu6(B82*)`X_W&YeKFFug1a)QJ1q!W-voofs}Wjf z0|N%6WZkrk59?R0ArFnbpVJl4qhp#?bR!m{W(Nd%RSVmEDFe4^=)8cc`@KK?RhfMW z+Dk8&}|v+dHgFXX#P|DtE0b#K;UJw*U)-aHQM~74xYHhay4VtgD_e# zxcr&=QCAuwtUwu}u{th&9$S{@C#Gs!wA+O_1qX5Yiip5SR>@thk4*DYT!!;l5(y{2 zu4|;?E3|450&*1Ro|PPy#ODX6tymzJB%GdleQC=gj#vUS&*R#f5l~xzibhHGJ?cb{ zbZkg$bI288@UB2O7|t=?q=_;DoMVoA)m%n)!cnuw)|FJRI5-$Pv-(x0kFh&fC!pX8 zYbMNX9eVRfXg1}5AY-tpgnWa8&uVmWsg4&6H)Q3%0aG+YF+dLoG>W-m(MgQ%9ew-K zJ_sjhz{eh*^&~0*10)`~H2{<4l^o>m1$pWRwMYywslW}82N~p4*74$eFCDn=PK|e4 zpa+66$o%SPK-{?*&tGHfLlLKKzyWG<7e89lu!xlWX_Cgk zxCD33Ijgb7zk)fWf&yENRru{V=}{u1vGVurg{sYMZEI?>Y*vSG8Q_fIj917bIqQIH z?4R2U!xx&gx@53hx0aFP&Duh@IV+yW*w@WEcf+k>>R@28mEA`CtYnT4raDwL*xHjx zJDiLd^9PslA%(#IP~wD`WL`o zv7O5V8p0&81qxyjfLG{8U&fkCsZOF>AD@Y(&m#<5$W(H04@&C1EAX>b@mG^9ll!m}Ph2pebs12$M90RwH&b>m%;2eG(vl@qsbxUnB6UdHfJiPk= zK4w3a7O_7UL&w!J!BJV<@D)Z)YT0;0SJYQ>*|cf>>3GF=nwO3|HQ{*8zlQX+zLbB< z)yjEsXVfoN`jcEfv*NvP#M_PKsZ z(=O5&kN6QI^{n2cW3?+u$R5=WMv5Iyn%~A(Ht1CX?i~(s`c{Rn#VuaY46HH?e-e>g zG8~=_O@ctF<|~yRjUUE)@VNUWxIf;;f2~%$__eD}ghz5=2dIG+KU&~GIjKOPENEZr zmwJntE+vw7Jmyy{e_Em?X$~2w7-3JYDj+tT(nTapTYx)MGMt)LdVRR_BowSPhr&F# z$;C%2oQzeQh6c>ay;}FF$bSJv~6h zZruLT(yPoa?^V-=Q|Bv;WBbZ+UzFCKI@GQ4<_M-82q397r4Pg{NX?ir0+F@iEx8zd zKdpPeQ=xmPCGtEudql0w&(S{~{?zt*b*av#(A`direwhb(~dFtX1-$ZkB+AJk@0+9 zUoy>UD}A01F?VM;&OJ%T2e)eQJyTqhSY(oMw;fk@N8w(#`!Z=?+Mg2iSkz^0nXY66 zXBg;z1Kx*CLP;bx$;xB$7f;E1m%D zRTdUz<>t2~)Zy8PQg~H8D@Gkz=Z#@j!gXRRkT(M*RP)`veT7`HozP)^NYwuTlUhAZ z7KeYN-z;pPu_vHm(JP>V;&Zp}5;+5${{Z#t&+N4jfB}J@?{qb}VRY`p=PQHvg>udb zyOf(cZA(tKxVPL?Gl9Syl^&+KyUznyyqo1h#EvoEw{3MLwv6sztLS>QSD#q@>`85z zWY0n}N3g7KHKH_&?s9j261TNSXzk?Bxa(9b^mde;ipd)Ccs@( z6zH^MNXjq(oRUXxdeu~Ed&Ie2oK?F|9IB8U73z8bE7E)y@gDm}xPsn&M__paNp*5d zY*E>J;;^REB7!ZE0NfrcB0EtG=6Rb1O<`7*JVab%jj7g^C(RapmEpe=+IX8sigZ&f z5|RL!df?+fja$3X;*>FkiVS_h_iN@K3;5>#)@yiTnU>L6a~^OA2lTI|^sPm1?NP%k zGazHM(Haac*B=h2ZgN`2ttO9eG*|mt)Ep^M&VL`}U!4B{w6>w3 z&+%4EodWJD<0!s#ae^6GjH!Qryghiquev{JOCRmehWh=IJ4Y)*LOPPTU^gpdaxh0) z{Hn9Kx|>*#+|1V(6G~cHWNpmnxg`77rO4wzvE7QqsL}Q|-}?K{&`;R%UoYWi zrqY43OULATZ6DI3_>tos2g926j*;VAIJGYdc#_-f_sevf#1lq#r1(9ykO>^{4RW6Y zyi@&+@Y_w&RkqyRO6)-6=aGP1`vIEbe{MZ=+xW*p(uexXYgd)nebQuMkI+{pX_wQ? zDb9skr1ecZH2O69H1#vV&8n-h*!ZLNEcjb?qTT51rLBnEPLkdd$`?_PuZGkkl!@P@l-;f-2< zJHpzO!rfL=pR!8-0Ckr?z)uBw4CGg7`)+(QO;5pEcZ%)eZ?t%3=n2sd3AJ|h^jE>(d(*r_hhctsjDARTMa_3A8iPG(ciP_@_&KxwXCK_ zE4OTb;ID8;1b%g~WUL&2cyMd%U)ek0&%;lPe-GdO5^oGzO@Co_@JV#@I!!A+Sy-|W zfsEtnn)h3e*>B+PjSalko)25t!Zu}!_D4_v&&*46o|&)HtLJoZ^(977cTU|8n3XPB za;AQE=~6<)W1o*n4@&6c@WRf4;(f?@+`QM@x^KcSg?G_gnZ6x1oghDPHTE8<@W0_@hN705XNzxRxMmCXVISLL9OJGA2jg9) zk?>E#nhderY8Pv&768WrL*=Re0DGU(y+x1aOO+UNMQ)$me0Sjg0NL+S@sSrYC-!!f zrxIRFydPvA#C`f#s_35vw7(7~*t9J-GO_vY=H=u*tlpl&zN<|?N4khdBw#4Qq)`!o zKe_;`H=YQviVcv4k8sI|9)A-<#z`yBW1fEVM< zwuUZ~Igbf=5w(lhL4XU&g&M4Q9_sMYgg*aV&fT{0shls|v%+(oFGO zMI+l>CdnEjhEdpx)zb8+UNCH>hU5YFSnY5<2EaMo27)DeAkX{ReIgFL{T zd4mzJ(P<}^Ymt_e6X;GcOly0D{{UHUBK;R4smj;~Dp%Ja(o#p2RnFRx&!t{Zov{~O zV}deyskMt6m9w)iZ96g`Qm~*c$NO30nH`(XH)m>&?N_JLzSU?^B9NSASZ5X0PZtVr zksOZ)Dv)xqfc6uWR$V{K8P5Be$K-hwl0$D82I}T6Kjod8?mtY{iugkKp(Z%tPNk$c zAFWrHPqTn+n^aj`e+8_0Kc7nJc$ECiW0%xxR8Y9pZh04-c0K+)ij{(cXb-F{{YK4&i49O2d{YRRh&FFJ2dDI9%CKyW7Hni=Q`iT%L$7Y zzdL&*7z#fgwa0276RpU^5pU0SC9A45o82CrIuM4)?(VN*iJZxECEV;uVw~jqlU^tL zQQTZzd^bmtL~yO_TVqp^(YZeLrE&2B{#PO-Xq5T@GhT7=j{fsl@Uo=xNdExo2!3On zaBH@OCaNaNuP@xo(C`qxz3Np$0BmFf^A7l_WU^@60Azo7Q&u(}$2IS5sq-Q$2^b=i zl{qcYA8N3x#~95B3FjRuWnJC5snP5WzN0oDK~C5~1GQR;J<2jR)Bem{5(Ocra+3;8 zr0zY1UN)S4-22ict1BOrkb7d0tX+zs3l1qlmgcpR!xlbp2XQ+<$2CY>$TBt(54C3E zZ*nYPQjt~?&Sn@0PvKI3XHEUgRJCo*RZDa#guv$_lpUw38)nJ{ty zG$`Lux~Q!aqkpXDHK7)nYjXj8#1-RjueC(BAO{5e>NAX0%S8zbFCY`mY_%oSwUF3C z*|P|UDo)@(yX{(YYAY-eDNs*RK&<}&ByYPd%a2d<-@i4@G!g;GJF#Ay;(L2TshkqwPTiznkUF2CHOgN@7?s}u5znU;WAUR{rnbBb-%&eA~V9+h4;^GPgly~#DSNN~A!-*oMT;2utT3f#1rW7`^V zy-D@1O7C4uWZH*n1A>3OQ)!y?cd_Cp&NwvL(F`=VvFa9j;>T>LhZ}g{)gKagqf@bg zWwZ)VADH(dx!YUTj3~}?T31&F;uVY>FMRi|I@H|Nk?-JnDNQ-6qr32bi0-wBZCTWz zBjz5J+)s1mfIjmcaaCj`fanp>Vz2h9lP?c*_*tu-% zBj>F{a$k~iPx7r5gJ>LJ0a<#zz*QkmJu%+2hOa}5@k5MxzPnVK9p7ml0dt_HN zr{BjS4Y(QoYi8O_$Lm}&i(8*TM!!C&({YiS#@96UyS8Ovk&4kaKTdQ2i%y`?^@UN8qIR4Du57DFhb(M$uQaLWWvaY{RQS0B3Ywm4z zW=YIjLZ(j#p4sF4`&Z8&9PFpQ@{%anD>uv(XYj8|5|s40;Z{+l?q;<4`RhTLt*{v6iiQi2!;;8ZsN zF;m;th{0}Cs^aW(uWVEt4!to>js|WpaC+jkrneqgARL+?fF8j1zM%6zE~kI2az4eh4uiC*HT^IVDrM2t~-}C$?)pQjC-%p*_WEEPi8l z8K`v#{z5q0-!$6N!i=pXad+|Ijt+h69^xO}viI-xu8Q(JpPLx#S(Cy!9W&Orr5#UZ z6%~vF7Ef%^SK8dOG3k@Q9MM?vv!~i4=peY-4=PS@KBlT%;La6$bBeJGI~B>`j+y?n z$3x~EkQuqjALsI_6CuHDkUmk(J~&jyz!Jo=oa2u9{*`Q6cIrEk+N4`>5xM#E>BU)_ zD3u{M$U64LShk6oQASDXb5|Mw>Z;>;Cj&im)~&^6AqAA~$mgCYC6o=g0!ADHxW``H z@m2oJC5-?ak%Bw_0QIY&I#QSv1A^a&`TA6!V~W`a2YCk!J?VkZtV7gA*%Y3ezpRmtg6h2y0wmMlqIT550wWx$8}R?eLt%n$2Sos8uA7wxS^ z<~Xew;4JEqsdgj<3Nla9nQXr!dg8u}{hvM$2Qm1MShvYIHjv1;$NN~UCo2~iU8Ihu z;h(`xA|Hl{YvN5J*HV`7dGf(`3b+{@1DfHJ_ImK{pLuXJ{VLwlIV3L8tbZrqJAMYf zoRAS2Kmk1f^c9!=n;2rufU)-CxSZY3L)oiX@!ccgU6z#+D%&p6(+h#^TF1hR7zHCj zeTVa}N0P-y0ZICFq+LCFkQdsHc1N>3*m!zHay+mJ^r@f2o?2%doOQ23n%+EffPFx! zS1lYWNyB5*Q&M-(PMx)9CI0{jRFT5W#x|II!WEM(jCxlu;zl}UqS;2?OK$UJ+ug|H zC(sTnvb(=qRePx$2^q>KV%@*`_2*jtqxQLEnTQ~aLM*AM682RArJABGUqb9JQu3?vh8B9fQa{Dr;~(V@%jq&75GDd+Y0|uO^WH0IZ#| zK=d7l&{MsKd!y8EK0PjhVETW9^mr`3U8|_wplMIIPyio|c`mQ2>KgU)-s-pW>Nk#Z zW}V9s><2(ShAI#+M>LJe6$z`B+iJuyl}m`^bP|PN#T%nwgM;jAcg6bshlq7gwpr;* zrsXy?X!%lmHUJpMr?p~Ztu?sfoykazx@WyH`gG=|E%NOOpyxaB&(rBlaq{${t zt0B!=i>RQIP65S8IOdvHy*)-&ft=68{{Z&&tFQ`(xoxCE1rXz z%tZtY)tF>4oP)(nOJjKHx$B=BtRwiP`ybDB5R&Iq@J^u|vZ6TvTTDWk8G12X4o^;N z;DT9O2a5Yw;m5@bkB1u6mtxXAS4q9Mdp%Q1xEYG_*~S_+9=?aMuPptg{{Uxe9|l=l z`1`<9KCAHKSA0gz%-US%lWjky4*;IDu#@_;aOn2=^f@tfkO6C!EAE;^@e#0{z!YMqL2d)p@{R#Z5nblJ_K3gtTT%YhYui^=9rqS0> zicj7=#7P!=0n~eEKb>G-UnR@iN*o@MzCCfltkG>2(%lFfMjAo`XSnpPb6maMDF@$#n%TMA4=f$Jwz#y$;m8mJ%v`{Tb3?!BwsE7=v&gW=77ojsK`_!=40NY zOW`CwkPckm^xSoMe6{#Me6Ye%w9FyDe zq|hU;`)kTV?l|j`d94jL>N`0BX&WcGt`&8JZjN!Ez3WqrpECr;gQa z9^(;`5;6b;A4Bb5KQz)H+&gyltxG*j#-aBzY#7FS`_?#`F{>L%TZWqudeV4nDQsd^^_T(zTsOQ<;E^ zOtbQt^T3du?fEFhe3#-aY5Ya6-rqa!lJ4S3y}o87eweQx7tE%5I5w}$&HWnIX_D%A zBq(hoTa_SWWn5S2hm37u@o#{&T{d)SlT^{(Hc&H{F=cr5-HQD8@adA%OuCFXR=Z?~ z1KCF>^{;!;zBy}JRi(TSdK{cb6Hd7bpkRKr`Ck@f_`KFwYPef-({1YQ+3)I)xy!0h zeDIsHXw?0SF0{LU4(ob-?x}mF+Nr5S^f&`4ze0-2=p)Jdn+GcRbdq^GIiGMG*eD_O7wK zG4R*n6@p!BS8L*(Y1A0(VPMgN>yJ-TPvKl{x$zUk9}wqTXktw#NCRX#b&B8>^v|Vo zQ^C{ToD%aec!woqj>5|K!`==8EgmG-wP=pko==wABkVtj`wH!RFRJSvDY;nQ!_K}O zhqoIB5BI_7J?jhMpTe1S7EnrC!nhJn-*tVL)xS#h-A3xi!!}Q(XhX`33|w5Fzajqs zd%bl^l{BrpJj%F^S7oaf(8J;R8DTNqY76quKkz~7tLlB~z0ZxKNj!*ueKvk#Ps)eX zp7o0bo6eygUoIojK{-CvHSL9^t**;-X1FIFTKw6rTGZl|s}sR( z`24FtY7nykCIFGpn&5xo3%Y%?&mj%L5vk#RwCkH0?c;5X{BeMKSI_2f>UL9X^O@Hlbc~wqJTc;1?P=0AiCK?P zv!BwnmJb&dc(nd@Jo(guc4vKSD?|WU-IR83oAs>-rY;8MjlQXqURUF9k3JCie{kA; z#qibqK%<>2O=XeeKlIRpg#3B-uN3k3?IoyQMD2CEOVgVrZ7j(fhx>AMKS7GTHeZjZ z?<*v7&ZR3KQ0qSsv>y+-ibjs=>~y!aK=1gTi};%2HLs1Vi6MKNZ59RT99WuSJ=oz_ z@XdTR;y>Cx?UC)Yz^V>#Vgs{LV>K zPvJ+{zBK)(w5>fG=4-|u^3w8B$3O1yE5&?K`&8>1h$&;3qv$sN?Q!(4I?=uqYC4Sj zukK};k0)kbhx8TOL-4Cg(jW%xBy0WYUoLCVjvo(6?RG`nju!XgZ;CuiaL4VEEP2Zq zfdG4Z)hP5o5_p2+d5+d00z+;N z0af{r{{ULFb5ZzJicq=d79R<t7ZZ}7Qo7dKbY#p)KdCcn6ppE7A5 zKCEj#S)N%|G{ZZb4DSw_+!JC0GY0(xM9>UO5{EwS!nvVhs~|DY^Tmr z8=>GD>96l1w~3&K2LtDSQ(O;<4*fi@A$-7k{cEca&Fy1%)aS-UEHy^E9&M<29NVdA zU(OzcS1oy`c}&2eQV&8a(_orq`9TsW{uZb&^xJ(>EUN94bqA=gq;W^VskOP~qR(q! zqi` zPV0BS(?q`jWfpX1zv`3 zO?5fD>jX%HkT91`Jvjo8^fPQsCOEwNy9X%^m1z`5?nJ&g6!L_rD zlh%>#PIoWft-)e>1PqRNsYaWSGBA4mYX==qVini3(du&2#_}DtM1va;!!_BRdF^{t-@=yGdXgixp52Pe}N zt)%!-PNi~=n~{|5&r0=Ah5iPP3rMHRvr9NeAaVdTH*EKvSjH|)``7j=@LYNyfegYR zo64v-=Kxo__^5eS#rG8o3CQ|ZKf$dg9S7m=nj!}e2$i_x`inf zbBVyGHNlb24MTf-<;EjZ^`ww_3Awv-n#Nhp&E^5f&uZBl(4}_d#@)vx@dWBi9y40r z7}48Gw|Q8$<~#$|w4l;aHaiW&8OAF6pc_Hs9V<6x&gj(Tk18jEW?UQ-n%c8gmj#X% zvn|vTGtbhs?J`P%$o1_`MsK;=cqT=T_(ldN-`ImVjfE&3W~uV;^ScG&)ERmw-E0FRI(NUk&)4)z|8G5n0<3?5Hu+*AJ>| za6ubk!BgB+QMt>EX=vHV>UV06V;O95ScdVVn;?L5(v4eCS#KSqY=hl#SyN6!I!(Zt6|2H@JHPaVAm9$U>t3~@>ZuGdFC~cQi~;G2 z;Kb5h4}UGiYwt6-fwD1Du=!4Em5jb}h6gotXwLWfAx#u1wwTdNZQy5&Z$MJjpYsmazb>s$%Zdv~Thzw%AN5pp$TRf5gB#e?3 zvCVlWjU?X}%y9V0&#!OBwzh(%{o~CwAC^W?bI)8;VndC%{3=;dq?i`~k8XmNC>$}` zG6#Oul2>Q1%%X<0ViCaW*!QL!%HJr?JD&A%qYg9E>M4Odl{{o0QCcOTmp%PV`E$T* z^HwatVnYFdc*Ql>&MbRl=~gV`*}xeizxnT4MQUd$XjzIMkmL_sRBbb0pgVEf(ySNq zl#ZvSYLXrMqyXdYob~ssc1BV6wPnkHPzWZg>P{68InS+26v`CgvyWQIzPaA*jw)Nw z))Bmn%ZR+j^vz{nLSjP0k_Y8i{O!zo4@!dIZ;iJMcG4}exC0sEJ#pHZ6iQ}pr|DRo%tLMl z6j2OWaHBheZXEvrI?#BEtZYF5^V9I85Qy#p9sBh-tqj6zq%J_{Pj26>Pi(Qd zD5?Xf=XZL&_C_({ObLnLf!32_v57JjLn@|NoP3~QdvR8@YZ#(}NoNEwI9wd$Rrz$8 znab_m5ddU#&)3$phlk~wAYsIYB@8k;{b^j2kG`|}WT%Wfa0%&*)DcwVyudkND?Z}<79cfeTwUZ5oc$`)Zf7Z2zjLVs-OmHk zx$Q?+-zidY@BFFv7h4eLu+KHkPdh!tlX6lqo_Zfo*191BlBGt+_=?=m{R+-B&EJ^HZm()w0Uf3L!^HeAw#mM9N*JL8m z!6rd*sWtV?vo;h1wNP|b0|0&l70BzXW4qm*pp0Yb#dO#I0A_~v@hpq|iDh9CkbK2+ z$Mvp9#S?sivPaAhC)fV~)mvK3&6Tvz9>0Tih;F2@Tj*5$s9SE<`u_l0+3^R&T~Ef_ zC%BL;yN$_hANsie0L1&6;58RXm6l8}VbIax+v60H*vdgFe|Pk$n$tp{`Ww=2bMq41 z_ca8ZE<06>j8qeQS28-4ga8l?HJ6;z)AXl>-ATI)tAkA{YRg5>6w@6S9g5{Cr43jy z(+H{+hzk0OX3Vu%*^@wznqVYgikBHQDsxh7#ZV0kOMy}d@+mgrfFs+LBB{_JBhO0983hH0~y@SD`)W#Aby~b4+$5PSs*I#wxms znRxu@6^p;ZJt^_2G|0~2D#Sya(r&{@<^Cw49}c`j{{UW6e=7LQ4r}c{ifR7<7T3fl z{+y)#RqRnp56so57d|fN3Bg7iiR+HRfxzupTd#rCQO5{1kiFml%8i8G>7H{ zsm?q0qQ#CAV0Ae3t^WWB_}5YJieKB>tQYz=rlu`NM7>`&+VL^bl=NZQ*92EDAs_+k z+K;uBQjwf-^ENt$N!AG--`Jz=S10-Z;KH#_GUmj}y3efN5d91B2w35Rj13#Xqd-IHu z+P=sAh%N4PKZiPvw22&uwg(dLle>!t}WY-A@WO!oLccptvM)~RWFrL6E=QGvxQ<3X?OKb&R}-R$ zVR`0HzR{^|pHa|#YuWr4@Y)%Tt>yGfBo6ypEa0C)Yu)r84%*q;i+Ev*+9TNz08jHj zjd|58s5>NdrF47`spe*XVAu@cP5Vax85f zu#I?-m{hZUG2iQ6W8;6>b56clCbG&%0Ra#V+3YrP*XvhL6$hlWCP|~@NH}ldDmtE+ z{A$zArZ)a1&q2m(vGGU4OTP`>yfXl^N5erU%2VG2WA!!5wY#oW(C#Ca2RN@mD9co1 zhST)H-S$0|K4<+b~;0eODFbROoiE^X$C&Jj>`)w8Oyrry};ST^@wa2#g?wOh$!(~8BSO^bW>c@&e!erriJ4nmAz`qwcG%@||& z3Jvi_+FKZ+#+K07pI6&*SMKBGE9h#Dp|8EXOwqs20r@k*tgTa0Q0ps3_^u7@IXCk?$1t?xg&G4)9)3q*b5R453ka$PpzAagfc!t{{RT> z=~+6W8#Guz!vV-%m6vaIwa{!?GQ$e_!~0h(AgVrOdKgO5jJc)M_Adn5J;A@!UNl)Q zWZqeT3X6+N1)V}E%v+s7)%W1X5I^fe;|9SyAq!}97^s;;Jcw>fRx{${=Z z0O9Y$$YL?cCPZ!bqyy)W{{VG;2fcHi71Q)@25AvqYUPXE5Fob$GF&!&I)wW5>t5U8 zu`Z=&9o>`pc>zRZW6KA&D;kw-v_59NU0Ozs!@ikjEiD9aC{UyY%Emna?$xcJ_yjen zxxYpLjmZl(S-r{rmAT=c3dN@=y1YJp!R1koLVxw^r69^dZk>RxbvWAh)t)TncVv0V z@O$~J$b<~zjsPCjsipW{&c-G$k28`V{{XLDgn>p>44$O=RY_w|dBzV;Kaj3^wKTN$ z5|g*Pp^0^3zE<0KOOiQu?^t@4f@QaDtF}NhitLPp5x`PE99J>pzZmGA0`T?Tx8fZ^ zEOh%&3ryUS$L^wm%Q5Vun&ISAl8hFoZ91~2te%IGYF-w5B)PfNB#zF))l9S9PU1M@ zLCI!g&mb|1A1efFll0^7A8PwN z77;=!b>6*?HxXIQ+9=1;b$v=yTR#t5JPW%rPcO|Rt=-wW%o(^!$OZL+}^D{YS*fzDS!((~llXf2?qSyhl=f2TJ-M!oLS}p9g6xJkYJI zBw>Ko(ePg8t0z==TVTB zW^DClIb-BbNolrx$Q>%?yeqJY?k3y z&{WfpQtm}P!H^XncRc&n7l_CFOZNBPD`bx?GJ4hzh-A%}{{Vb{TJOWJ-pAD9#=rJz zo`*OF+*3$x!PMc$Lz;GTMgctZucbde8FdtQko?09{cD?=&*o%hf&9%eQFrijOP*B(&=-O0mwD3n0dlCk&#WfCQBI{>?qv|b^K{HETsNsH~hRS90O0ez|tLnP&mq{#bVB+^g@dj2+>AJ z0yH~`{A#T5Wv!ymSw3LLCmm|t*#bEeC^tnO1cw8qA-41H?UZ6LfU^##7{?S}LiJAU zw*pA$4nb2dA^76A)-b5N`&K=nQ4Dbst0T(6aHWAG`Bv19B8@VvSpC&rNvlQ@+_h^L znIeKa^sa#ECS8of3ZrrQRZk6C3yD1E8&Dj4+*f}NkY&=|OnlOj8vZ7(D(vQ~Qx^(IWYO|Ms$_cOt#+T;;ll1-eFZX2IX02N&lnx4*wNK}8OeWR0T>_#lhBH< zailcFZC%gqIIh(!HtpOGrtDRS^bO-7NbkG3rsHyBS;Ab$4{4&z@`cU=efGyA*jKFl zJ<{0UTfwxfL6>mHrBTtmF!vIO)icg=2Lio+!k!Qse6Ta950FQx`qc8IgtZtqp$B$9R|g!vDny3w-)|mFj$msXY^1xCcYu@*X`S^Y2Oib1k)_7ZhSLsapcDfE+vrx!Eqq& zc{u6PzNYxKTQZ1vqHVDU^a=zjceI-?H6SFm_@PWx<1z;I4JwfWKi00jN< zVn_YE;mypmnUd@&JQX8h!1g%L*1t~t7YuJTy}Wo~67%kBRW{*s8q1OLo`s<4i)CXB zNx&E_^{%Qp;jpz5Ouol5TCRhB_5!V}ZWRtF!2dDo8gN%V_zc^erZ z&mnjV=yC5~lR7uXe-n6rUK*9Iv3RpU>nh3Tzup|+^V8SXzfS%Rd|!*>Pr<>f%NQ52 zskgdPK2sn7?osG-kzMMKvbCMj^LV~#ry7dR+88rycyPesPh&~_q2@Tj7(E-(x!pwO zIWkVwVmEpUhfBV1G$S_U9maZliuUQjSoj*%tnANA(Pi3Hj=gtRr)e{ZB1ZL4e;V?Q z3Adu0k4`Jn^fo_dQOU(}SB=k80asfdnV?7{fopMb`d2gIS$CfBL_lwQU_%q{m00#Y-ezeL?#Vx}AD$EyWcEhtqbpWl-=H zal07ip!+a|(;4J#Bp%h$8zn?-gba-F$gKN_O2`U=Pt0rTPFr*xJAPm!P zBW=ifka~O7tAt;W;I;-hqBxYOC3kHkRa{mWAuRE+!j{Ku=j&Gx5T_C3G09>(deo6b z3zYe{1sEUasI7f6%rS2~Zi)U;o>&?#A{!Ws{Ky2D>*x>p?^|}3zH5~n$Z>^UaB3TS zact5Nh1-$2xCf2~D*lL{ylJ+Ru;V=kPxH+~U~K5Rc3Fg&ask4z$ie=#(P|oeQRwk` zC1oJ|`$kT2zz41|R4tgKVswwomQVpsfC|t_RAMH|s2@1(`1Yw|IV;^g{?VP}{Fuh> zap{`pZ*<+W{_~TJ1wBb8*BxZf5N&v`tG}Jr`rY-PTLfY!1X_tYw!O6hMo+u@GpgJH0wyh;Q^8R zx%V~lkL=%~$1jVdznE@V5D3Bk^B?}TeVuOpTnW7Mj(zKvtgWHjMn7qj$s-ZQF;p%u z6#>ZvdQ&fNh*Ae1PW8`TUS+UOG1ON&jm@Pi7cR9Tau4keh83CB9SF z<|B|Rme%i{(&{py2WzuRRYIXJ-`2Sfh=*R5-d;6#iGBLk&z9y7lWWgsVU!RPT6X=+K$j}^x9EMp31f0c2cA8@ zFV4Er8QYGAzc&69{7}_wFSN}P(&lShIcG$a!f?fR0zf^oYxIjvDr{ofq-8-FIV5(; zH8ZkzIk{SPV}xTo^Gs;KIID{v9V$1ECZz)^G<;^DWvlHT)dLx*8a$k0s9Kc>sbfs7 zIink~Q!IN@sR@vxb5r?zbf?7@Sk&bLtyRGl!enBPC}8o;NLfeLpf^gabHyuw$y_30 zmdQcO^{nfwr7yQV>g~Lv$>Y+2YFLrIt~*r89RN!jahkA)YSeM3%4$#H#b+YYIp2zn{{R-t#9#I0C-Sd} zk=f(`9Csx21Ju{rUlyNd!!HtZ{{SvBe=7K0uAV{@c?+I#$3C@^S3R5_k$$H~7%y+V zRP5c2$Eo$lu>N!`m?Is|Dx6nMBncY)h=VxvAK*Vz&0f>tD04 zj8B7Zb*meRg}>WXJhVc0Gply(oceXIHi$HNXvtDkqZK&Hk%BYmM+erlG|Se~?(J?7 z6^wF+8^-{Shv+M+n@U$_C3;-&aX#empNg#R@1adY&WuSLtk*W{hEhiar$^_vTFuw| zKd*RiN0#c}I+l?pV>*&Yz83|~A&?K`!-LVQkno1Ao-FWZ+O6l1-rH@sRFMj+ob$Bw z$J3@O-hLVB_u9UbExw&SmFze6tqs+tzA#OI6 zkNEbU2>5FO%xkV>_5HAfCWv6ZH44dEuhn*`jjT_CFE- z0Kr2172&;Z=T8#J=2}Q&Xzim-jmK>A{^GrV;dku?;ZKU%XqL)*cDo=6ZMW@_Sd4+N zoSr%3IOuEeZs${;;^t}IR(WP(?B1lDlk9U_eirzFt@uX%E6p!axv-uuyDhtwbqB5p z`=t8RI&^0nTE_m(t(p7%t7!UniR{h2{kmU8Ajg$*o3{NA-XDc{w~YS)X9yXcv^Oxh z1pSuid4PYq!;$|00^3V@;;Tv z&EV^l3KGKZTa4rejFLY9Kb?Mx-ssjB%zUe4c+N=$w-fG9TA_2`q@Ky*YuK#qT?mpG z2uE?~Nam$udNc}~N9T)aS2ixn$W{B77~Av}Y8z=nV4*B~o(?^0>faT7EYTK8Be1o% z3K!J9iJQZ(~@iiFt90X;q&950v`Xl=#cU zg3i`f7%p@2XRmIR-gu7nbn9~4fo{Nc=DZ8Wda1qDq*Wu!UZ5IH9-SpU5P+_3wx#=N&a zhoxHahlodud{I2$ZL)=m#G{~5!2VUqR#t;!OWv`Z5y=lJEi9*d6W5;gy{FqPyej-| zepF|WJDSIU?~t+NvCcgObhc2(JZ~(=7{^cH9f#7sR-{wao{SekmophM3lccw_o%Gc z$8#Gm-i?C-+zu(B{&TeK9EHz%=ro@U+-kP#8YzZJapp|jB9Hufn&_oUG@yDkxvEO* zq1JpkLw%|pZ&kRRbo)7eNbR2jX&Z%xFnHWZ{{VoDn);W*q8KgX)pcl=6p44GqJ70~ z$~}Ic#nji${uGkV<5hxHMu$k#NlOSu0cQRwkNZq}`eMDK!RGVEIU%0sO0)q;Br?N2gYNl$&o;)I1TOUiizzGHUv+RXcRya$!hAjOba6*(VvD*2Uov++T%JikTJ~#bf|Up`iZ}zH9ff>e zZ<)G(yQjJH6I{}}EycOm0!bZCf0bD>mH9{BJxJ|PsKSs51J|VBO+_T8UBDhcO0P8Q5|$(n_mAd&g1f3>6jismqNi3aK8WG1SDD!!sqn|( z;EPR7dsU0~^6;U$p8HeSe-Z6ooum939x2pgX#%Rpf^o!fkM585jc)01T==I|hR)p) zVu%E7yd+9H4`KS6+4$G-Z$kZ?{ufWGT*N25bM}2dPX`j;TrlB(_GUeWD!7Tte6Zb? zpraeT%v<~Y6T$v0(|j#t7WY`XLn2#7z&MP3=Wp(y?t1jB&)OSRxA5dze8W(VDa=?l zalZE5*LhyvgFU-f$QpIFyW(w1Zxr~7?rC-1N>4WC=W{D2c~~L?7)2d@X}%=W?(O5b z)CBWG6d);fkg~DHdCqv^yJ_KF)!o@reNBH5_=4kBc`=lU&RcV3o9ch1bFK3qudQ?5 z8}a_1;t3^yOrqf*X#N#^cNNh2fMt(gO6-(*9gNevEI>pRoSsc=sx ztzgl-4U=|A1347dxg>?)XN=ZbUE`8zliuY{4>g;O&08(%`l1NLai83$3J+4=wT}e5 z$#?v!h27zrUOwpQQ(L}38S77$$Pqk}nOt*$+ZB)E76R89{vpz}ZlB9gsxrr*?^r$} zm_ux^ADTi#J9>Lo_-$&()Z)i!g{kPwP~e(>mYe8zt+5*b z?oCvxXt;43>#pA0$YIIFt+fLbJZ!Dg(RGDORcIjkP$(fCcrhc{kv zZP-u(9)te?*Hq`SYinDa&m_i12W*)6ewCyF+7GoudYi5Ad$1LZkQ`*i zyDlL`AOLa)HET$=4|gQQlez$kbK5T51qJ}*P(sK=mm3H9+Hri3y`SFh`-$e zy4e|Jy*VTIaezM>-W#56Xj_Nlw|ZH(xH7AW8_l9*B$F|E!sy4qX6^(^{qFySR)YuxPS>H@t;qnR^aB* z&RghcVhhr3RYwI+K(2=lqJ^v$F^Sg*hE~>rI(TSJab7(PFgn z!l!&6TJH4QF(mNd3@<0%u&f|4DRYjUf6r>_bV4?+bGVH0*j6$!PVDu6g_lAri|cIT zdWjd_wEiVVAH&iSlY@}Ihxpec;Tz+p>sGQ11#gu7E6{vhYASjGV#=!cA+SM6VbX+s={==QG)~Ww@h}d-A?DsB;?__9<{PPH5}r!~LT^Jw@<~NK1QES+$$E*`ZfB z2)V}ZbDo3ouYi1I~t7!`!^EW(MH_R|(;!#u zpTf%*mKkmiHuO?YYW$P*vW24T4Srlg9yrSK2-S z{jvN>;q5*O({E=w2WH=J;@{RuhJh8_`CLp z{ibx?Tf-VJ!(s5#!gtp4TixnjB5yYCXqi6ZCm1c$AaS0x`Hg?2Uicqh)cilL*vV;e zZ7GW4-MDz=jFrL9<@r}4jH0j9?ZWa!r6Whgo=u#IKAw^+A^f=_nS(H3Gs6$Oq<62; zU)g8II!2%H4^Oqza*|xBnq0P6%HMdO%cpEt=Dqf=Z>-~k0vJ$mn|U8!!?k<2?2++C zJs(kV*Xdch zMZ*cg5)R|RC;anT_C6cA*X^+bqpw}SW{nyywE2MZ~EZ6lWft>(ump zFr}n%xW?1fdY}Hid9|t|+`!@G7keFNgO2FsInFt*t2|dT;h6r;@~O!z2PV2kVT^H~ zYWe#5%R}`1z1o7gpQ>CeFu5&*UK!)fJ#TJgla0L;e-?XUy??})3d+fmv<^u;SCeaq zW|_9WLHV#oap_$Y6_w93Fqbxmli%nwPa>paYJx^z?*4U!;w=K=EhQ~hEJ+&{Wd}RS z?gn~+#yu<6uWc2r{$JhAoT~xQ{{V$#+-UMf8f(N-vL@v+gO>VxSE8kJ=A^8&K3wr6 zQ*Cp%D2HfLJLlH8tBY}WBZrF)eL($vYv|7w_<1g_nrPKj?#T+EcKtr0xD98)5j;L@ zY30gj+;#)}Wb4P= zR~Qy%WAfwis@DhQ9>J2n*Kk4mIwS!MGH zA#=d%$E9N=cEyw@e9GAb^T)}aD5+g!GC5JZo=Xn&RM^(%*O5Mb4o4)^H!G9bha_>+ zy*lC-u~Qigvh|HTqAUR%yj70DYzWc2U;eJfQh zgpnwa1=;g%>Fvk8Woh#dw5)iLfzx8+uS)4{ZT{BYQzqes19J=tK#pTjdE|Yhe-;qu zpvP*>c&12*V6Tz)ladFiuD4LLM3ksNWF&);j0|uw{(iNaac*qX70;8m1Z~EBMN%cB zkGt{~9oS}!w^cvSio=#9GI?>ReZz6sI`jU2O6awVRcWMUQjxgZw0+g%{{XLCtgPv} zEUydlupNH2qSTFsI-f9>=L^7YbN+k&G}*`un?W(2OB0?ASccpS7j8F_K>50@I@$-G z)#GD=ry0gL?M-M8YZ82x&SG`_*Z_uJyz$bkvTXpA+Ekp9a!C}#gk4#yV3v&SC)>C6 z;vGp~*L6rBP{fAFp4srOl?e*Eg1~lECf?kb41vUs5&e z1;#Q403GY$f7mYaOBnTwBw%fUUnpZ3JsY2VSK8?}mu9TTF2e<@hNAb5nr_h(QoX|G zIPKQCYwbX(ILY^`+P%>P;EZCqE9>wNTHX*j>Q3XXZw^~8g5RA{jl;MEfscBC%jdB< z2tCbaYkJ+jjcn3SD2+hmk}Iw-IV8=SjX_lTas1ESsV&gUAl$>A)#2VD{h_oE4K%Mk zz*yr_?8}02?Od*%`*?gaYx{d|?U|XRaG6_X!sO#5^Z2(Dgh zO2=0xyUcv;;>Tz%zFxhtT+FjGtL`N6-naa3a-&j|Daa4CPwD+DnEB;aB#;2fHQ0_8 ziZk4KcFsv6xK9vXy~K|matO)Iby}s)+g2IDCm8mwBJgkA-3uSmv_Wf}#pHN#jQZla z-`Wb)%kb_t#u`1H4{QqRFHjaaUAYz;s7|&XvF=HD)8f=ZmGg1-))pz^U!sKn=2C{F2rAbp-lF5vU%AK;r zdeXJQOUTQL8K$_}PEBt^14S4IrAKLI5Qgq+L1Q?{s!G7*F?q)n;~q^;kc@j%%wPwpK)Jxd|hmOGx0W0A7PU}m3%UIJ-xeu{{Yvb z2d#woS$@gbNT|exzEc2E-0CUME?#S`y5t^wPhL&hr9QU33L7(=-3??ON%knZ`3 z>RY$9WXiD}wSvg5@~fEFzIN`%UV9H#?e(kiX_8!8dG5Yc2OdmeaL&HC{uCX*z3yvY zOVSI*vRyBoZ~!q|AGm-X44&Qg_v&k+v(bFr(n#+l(>7$291?vyepP9tQcb%fgG)lW z80*Pi^sQ{d06_)F;~l*#&|koQc~^~gu{Z=|b6qW;g7nV?*+-~qMBZw$F6&)88Q4F+ zAoM+l<63iAP7cM-fZimN!gr8q_tA+hXFHlZk&ze6)C0iJy?vkXf?FLgO}moH+T6<^ z^W&B^`O!(i13fT$4wd=MtZJ9btTpApSsHgPkuE|m52tVs;hOsA_Ga<@&cA5ODz4)Z zP^aW$+i5+>#!Yp}ds=!QIffSEWGz(q&=CfyEZln;qf^_8WWA~fsUom`G z{h&M*uWRq7+q`!QcL8?;eCwV@x>HQx7&%7 zA^K>T53MF&)H+vI!AR z1CEXFT~Cg5&l31L`s2ge){S%F`^eZO#kA0}Nh=QQTb3VAD~|cIgOOZqt(L~=n%X9@ zKAU+H#c=VexGX;KARg<`{SAJKe#ZX*5H&q+SmU?4lI{#)o<>#3_6yVLU!1x`3ofB< zvO1!d0GAmbjee4T&;A-u0r+;pZ93rv%&M+Q3YjEy$gf6)-8m z*etmm#HSBgy1ub>Dx&?3$m$W|3IlrX=xdtrkHjXrup6Px@&Fhk;5WDjir!sbdD=-0 zwZu}KH=Py{ewaB2@~;+j+*`Udi_qn@Z9ekiXOi+%Rb#tgeZ>AfE1A_Gw6eCiytwu1LdM{vK+vyvDaz}zYV+=nkry; zms(Yt{fFMRxBdvT*q)}jXyP{TBx9C-TBAuWwVUbnOXo7&TsdZDJxxoU^Ro}T&{v&` zc8!r#msV(5VmQ+T4VU zA1)MCz-G^XVb|;Que-hrd^m^0S{0+rgij=5SCP8x_4={>E9Y<7r{Rounv^$JfeaSo zbI1n_2UcI?KN|a&!a9HUozxH%3mal-030l1g;Dx(UKT$RqMTOCbC(f45@&JY9W2>t zt0RrGT%E*v4#V=Urrsi;_o(fo-r$A#K-xX3`os}Jiu~53C5Wo(&F*;9pFtKefIoy( zQxFv7Rf!JK(}7r8jnhu%6=I-|n2$&vcwTixRbM)1?4z*!E7`Q2LfcZmR$M%;H!NGQRXQS{_qvnmUul?P zoSugz8h3om>ij99+4z4=k>+%SM;V^#X5cCTlb=ooeuBOU__5=6{h_`nUig5SPJ^QZ z4uK3wyKH;8os{)y53fbVdw=b(@uEKo_*&QDzL$1?w)L&`>(2iGYb!C~9WKkknlM&EnYy@8c7RWIJeEDd{4gu%-78zN z@Gg|L_G%`&Xhe~~;Duw;a2-M`U}HR*`2+TW@hfYdCyP$DxSHlcA$dS=`DlOExgMnO z2c|LJy}Bv7m*{iq&kGi|z8JIfZZ1qt*ck_tpQU=emXoa5cvc;4<+s*!CXm~WLe2LW zoB(#Z^&E4bYVw_a+SgyzWfwA~rir8v^vi`inn@4*vWvFn=rhn{`d5AMZ^t$k4|$yGv&!t-x$WBxk!PT=yBS+R;*t#{|Dq_G z4HOF9hs#D&TQn@)a z$!7B7$?9sD%#V{h!1zz>^N9U!u&h{|-B|05L(%zCotKX1P!Y8>x~hcPvgxIVQIJ zKV!*#x!ONdT>I*FY^-TDutfvXs$Ir*AnBJCv2utxBmf9g^{RJ^61-n9DkT7rdV5w( z(g;M44|VLn(ywT?;Igg<-CBg~BJ`;l1Ci-k`W?0r-1Z6w9@HVt*&4R+Z;)VPam9AN z9Mgg`V0)Uu((P2v3FtZeYq;?BxCmH}yl6P4%hp!nDI;IvB(+zD_s+PS^xW910k$>N z{gn!e%F%}98TBVM)dPm1zZ5Pux`W4FYi8OnIxHNM*dFy)Nr29zk+kC^`&QyxDn8ZZ z{{SOeqmi+ewnbls+{g5-B(PnXanDWOm0}n-AUMJ3N3CCu0{A3*_32b%*$suN#y1h4 z(-olvKt4t~a52EA#c7n_bMM-u5$$cDV;w>5^`s)@s8BflYg*FrEaG{j!p9nxV#k07 zB>gKHFnW+S@^iqiF8!>2Ag{yEg%Ier^0@I2in(M)!`s*?@9E|@=~+ri7efj%Q>KNGwr_u2-pX{y5mG%Y2&?p2Y0g!jn* z0QFbk&ZFWv7g2bULNZSGLxI<)9YuSu?790!YF`8VZnrR8G`daTRlc-8GRUBeF!vpe zb4t_L`ieLy&`wvq&(Uv*ntH`MOrIi?l6&)BYc2Y+I^=Fak;ZfFUX9`p7im8b{38#D zG+VKEWh&*0kalEpqz>TquNJ%1RcFTUP`d81)12|sZ=LWw^{wRDl(mpI`AK7}Dk{RM=drR1q7lnwy zBph{kBc=Yk@W8p=n}v|WoL?LoPq7MkJh+bvv)nY;c6%DtE1%oJ59XS zwHP$*KGsY7+lSc{Feztt>;N5W=`Vv{v(4v(b-4Av6vZ0qHs^JwoTN$RziA{RjE=x} z>(afu;h(@=ANF*yn&?Qnzlr9E$2O%RZAcJ~Ru$|Ic07^Nv|2$CSKc1xUU>ICy{mdK za+Tz6@Jf{@Qm%)i{4Uc(&n?WXStB@Ho~(QO_5ADUuLRmmuNsGCRRA!_>s}S`(@p;X zN3yn5%QJ3j`nSV6BI&ji!;|;=hNj)-6`VB5M#vpn34;3orn3>yQts{VViZ$N_4DrxoU(88rC(Mc~g9=#s`I z7y5khM2t8xY$ZqHKo#Yr&c`)Rm(2cp<DryO^_0Fv%nl@t!NF21 zJ5q*QE7@(QMvhq6qejC82t1F#@%U0(LnN<=)UiE)&2`3n>c);1cRsc7SM6!7TUuIe zSuL*98`W5t*!z2bvU886eaZVdd}o8>3iNGfC3`LTW?7?8UCKt&T4v`%@lUh67fcJv^EO* z{{WwA-PQF`b-3Ux+^S>0{{ULK`R$DMZ;p zwQ*YAlQfuKPI6CkT3G6)?V-ZzP=t9*Y*=%&xAp%38pLZ!8fN5-o=$t$TXkyfD;5Ee zboI?slFcFyCdC6Czt0t&u6j|CTS$S%=5_f%+A`fw`R&rRr*RrLAmC(Tpt6mBv(W8w zTRi8cf0a!(RhBGpdC2cu!$fjb_oZ@$%86kslgBku+Stnxg(y;!!> zBnn9bZ>}nuxRm)(&dp}a7!0{Sp0%BIW390SE}-a;dbz6s>|p7o0%3eI{4ILEzl$!dELcZnR09`Bft z-TB281ICvQ%*PzybJ~i7OC#-X5X6#9lPMxR0x&k`k4nUz$&x&v$>W~*uAWF0l33&m zw}QFp^{N*z?*{`LdSe#Jeaps+}F!=xgk%BQ=4G8kXDu4*f9P|hO0IylfgLHer z!km`xR?{+U$t((!xryWP{{THIg<)ktB0{Wzy>p)Yf1i3?tW}HTh5(EXywf5>vw@sv z02;XjcnVlEF~9>jphcTFvZOBWn2a3OrMA;P(8qsFu;(54s``3{jT`_yx)MEV?wf15 znnoc`NZtHE_NGfO>GPGiA;Ar}uDXfc~`{_Km43AwF8;c*xGfDe|OIzJ!_7*(Z#H+&Z=Cm-6Uk5l+&=9IM^F{M;_9lO9sg4 z@AT+vLr=7I}d*3>d(b5+9TmyI!B5%ohn6S)U4dk1epH-kC-3kQa*s!l>94LY&=k8 zxsg%%MG}q)$N1OY-?yj3zwG(6-D1HH+bls?n;gp@U8MeF@~?q>M55mM9a_kkqm=F; zc-Z~Hr{;7;-B|qx{f6~b{@2uCJ9mTkLFf)YsOw*R>@Q{p-q((T?eGEcZBRF zp5pcx$^zdx$^3^k@bAZu+Y;wfX>9blBl1+>vPw?xz<_JU{xW{kde6jbi;G0t1+jmW z#K)2U0P3#{yJ;PZNgymnN6nnplB%PkHm8SjcahO}v*J#*;+b4edntgQxISIq?_;f0 z@HCd^#X4xZ1j5s*y6(Wiawy4)H?gkgwh=1t0*?6aLVi9r%;*GS2wiEuHq5rW?C?8v}4Zc?WQMRZkyyf&6){TuCI)Yo{w=S3kU9 zI|15=)0C<@`K9AcLV0f zYUOnXDu5mz7y`P@b-v5yOCS-a@I~DI6~@fXGnQ2LVPwUu*m;{fzWK3`ZTt zw!-%J87jR8wR`@D;Qc4V@S<4R$8WS_D7Yu_uQkp$?s`t6O33pcfd2rq&WrGiPD%dA zvukd@yt|Zw$`A0U94P&3*S2o$fGTy2DuAj0_86d{sB$tYlmd_-s}e3TQISQ*Vi#OB zSCG|-bDkIr!~RT4oc&pE8- zAH3w&gr{ng?Nnk@EQi0=nnapVfK+#@3cRnSK@#l16=gxdsbDycK&PtnigF;%dYV>J zPDXQ3*=Yb!4k^sJ6yiCjV*-<95U~DK&c>!;nu)lnPQ$SlaA~>fX-Vdq%mUGUN;uz( zr#}q*P5%I1GJh)gaa43U=DyiFB5;)jGxNB81ptRGnU4A>?y?dFiP-c+a)i) zP^#$<&O>kqp=0&vJDQRy3o&v_u|AyUsC}kHjN7YW3vXl{3w9^z=~ndkP0hkZg5;bC z)2nfxzz(GJ0<}k`yBd){ma8PgCmD?g^CQx(+arJv&eBTR2nJgu^{mr~r*&uhKvVNe z8Ne;*r?0kpk@#0brO3Ks30XnEnRcebk0fKD^kP5nmG`JMNFs4>AQG{R08E+2G2cGk zwdx-W{{Uy-h<_B48;=cmPU=l9+bOGR3@t6T`l~<88AuGVz}0i5;b9HV>w1HSRXj z#j4pzj=SR^D-T-v{AGMq3G&o@zJJ$43K-9BuYC^!U)a;aca}{pjGDfKF#+M49YLjX zar|i@E7!2ac&Eh=*_+}ggQ32>vD59mb7!-En`FY?^B@vSLdOUG07>)}_D-3rNvG@A z8YZE`qWQ49HWFBmMjuXucKmC3=Sco&(8{7g!5PQX(zIC0*m$pN9XEIVENdJ?a%vMl z8Fk36WxEVy`%KQ8nSzn!^NoW)bPhq!IqhAqz+V`@guHKW1dh)I+1o7LHg;d9^~HNv z?aT0XKM45F)5cm{CJzqyhDJ-rR>XGK$EHRwt0?c&c=xU!;IHh%t$b63HF*TuKZjtH zOxnf5?pcDKL<%`}^l!VLTKh^h;VNpRUX2*e)gauD0sXw}r_{bP*~@7XTs`HQn{Ts+ zdF4>sM&-tM#{qIN$gUgV@9h5o@pDYRgIBWnYpq{e%M-`C>qmEtEQnWj8yO%HN4-S^*8C+5 z?xHJ?5ya?|Hh5Jb$=nA$1#-_2l&obHK1-!^em`m7v*(K~ZMFGtG^?)<>RunVT~_Yv zS5~!1Vd#S@O}2p!rz!F!ygzCPPDbWjF&@Q%KGpQ!$FGfkANa|v zc(dU)o+G-0R?>`GUan>HErzB+k~p~;=%;Jv8;&?N@?XJSK>q;33#3CjGTvG=)>EN8 ztH&#G_y(^>2RAuNGC8p+s9wj}e-GZ(%u2`3L(j-MdaZp8p!jP|xwMotYa+{y?nVjp z{Hx^ehMJp0bpvy^AUl65`m?~Ax0?(}CJxiI4@&c~(3Bn1IcUv2B#hsPzq8+kJ}BvC z$Hf{grnPSv#1~S<{*iC~0+VMb4FcuE%WeG{kmagZuXudPO(wh-p z2-~P=oHl)`_3y@=^7qfm1w zmLb9WppJfrt#nG9X6|+8^n_3oy)h-Bbr2dN&Q_Z8x=G}@iRVapadcmC-A02+xmvO6io zL`y4KHCvKcpCNwvKgd^2dAj~vXo*y|fkbh{g9@YD`qp;jvVmbigV9HSsjIpXd#PEE z`nB)xQ0)bDZy4R`Yuts31cd+~E)EAZQ*Iam&%ImQ=F71;9m((SS{K?3x?Pe_G;Su9 zccgoIFJgVqrFhk!GqKgk+R(tAn&Lre8Np$h7w#F3<;P5v80H4g# z_&dR@@~f+t-l zZ6;9GHIX#$8Zk>-bloB!W5Ro$=j-iJ_#(;tLE>0h0c@~bxA3lc{VRvGd9QAk1o^|Z z_sPl6a(f zJjWK;w{rwWIL&g?-G1@Ub~wkSUe+}+6l?+9-G`|)=RXmAeGkLm19*ePw;#MZWt+~C zj(0vjLHM>S=CKNR%v3P&d$ZG_Nx}|n_^bX32l0+i5%}uQzZFtlR*JGbdduR@tKzSV8pn=y4~v;uP^6A=V6;G0&d3ANx?mbO8CR!Hk|ebkAf(2qvb&&x7=&vz>}HYyHs?00(rYtwL1N-JZMTC>(X zZ}B;9ygM!QaJk2YDda=$I@abKDh4qiLY0GBJ46%HrLF>;Il}dJ} z7_NqGyR)N~Q#^$8^r@}m$<0@_R%R-|h8!I6in>2{T&>$fqBd&mu@tEktGpJCPFY7& zP1Q9Cu4Xq=e2Tv;5RQsHY6p;Q&1cOpEJ+y)Sw$K)D@)`do=#l<06OPy?poqw+;C{bm}djhs%h4be|fQR4hi|Ix{9>ZJlu2t0B5P_M+Li*H~=47)3mqCDj+WBu&oGm zC}JD#u2mFv=}_MIvr5sB#b!RyF#hLH#MiNV9|31AjU={%OM$K~nL{YgJr=l4ds;fZ z;bh9dydXc$t7_Wi&bKz}hyB<;EJN;!s~9;X@mR^++ME*A?n`YCpKyuSapt=l8&$O) zTJ2VF6@F%?24?~1aU4^-;9xyp0(A-U<$kR z3&5(kR`^mqsu~<-&PF>k4Yk6S+|J=kV;QZzEHW{Teqi$j!8qUwt0W_H3?60`tEE6q z^Z;}$&H7SExjUn((_;C69ZAJ@)=-;x<59-Zn&!MaWxnb#!y!2}*FuJA$Q{lzNp5mC z)TgO`qiE8T{AU2w4IOZxXPj1_ix5R=YXIt7HAc=e8?FJ*wRFK~aa3nrqVNuR_N}{R z+(#WN8pV9IQQH-}Vid3kfO=-NIUc69l4X#d++fzsR{N9;V06wu&*fCDqRM~<39V~< zfC0@WLnVsXzsdDIAnRJCy^or(M)`RsZWcJyZg)O_ z0Dnr&%G!~v+KhK*9p0yPGn zckcZA6=b&Oc~kgxsO_}|uyv9)M>~f~!D*^K zu~C!NBj_)IzqCE~!~6Lc&3la+>-Tq>acppZjdRh5W8a~#pcBUSn!6?CjnduN%y%S+ zGuPC1Bhc69u?3%fw7^N&ZsxW$FNwY*@LkHitj0Ctat=|)xI9&ROKx=0aisnDpQ8G1 zt>>v=*^GRwLFwpm>08r$P>V;gC?`aC$IF9^4_~fDd~e||+LjBeGadb@Nt|tC3CCXf zJu&H3b+3u=>PaQU8ic4YR3HJ4?!5EU-lfVLp6wcx)Na}9e-?f$t8ji1RMZ826M%DPOW!&uISgdEDVH6x6NSL^v9tcI`S*& z?}R=d8WxFV8>vmIfrdB)57VixtRm-|li40Vb&I_vO=yp={sd^E_fn2`8IB>igI`HT zxR3$&c)+h2_)0A-^avt?WHZb+IsTRF_Ak05`S%qZoOP+Mer->IrzF|i+PC^T4{GE* zb))^h&Sc32##NW9x2<({M4M#ioK;P7+|Q@VI0Z&>Yl<$+?x`Mlq|eO{+86c;)BYvv z^Xqy9vFVzevJ-U*fD%%Fh;{_^{OiS|{eV1GqcC=b_5w$nA0+#Xp2zg}uhG3@T}0Jp zh-_&ABo)B_06*ugc77YwVxBcg6;$vy6P%ByZ>B4D`8BZICemET#2*DeVvB;%$*Mv{ z${d*jkCQS2YTQ3$+@y;>;{{TH{R{dkO zxK~!oaHnriuhy6qM2s*14snj3TvLUjtcNS|e=6y+v4Wevrd7?fGb6i4`{rGW#AglP zIra6gKh|^?^IZ(V*@0Z=kLn_8Hn$w^V)*0indk)ql# z0R3quK^Yv3dsCp0De4D&R*4xoHdmg@YPLTdS2L7wk;F6z+|6!}BAIT_=h^Z8dFsRGQ*6e@y$I`1_|8dI?sB>MjVD&?eIJ%w{K+#fk440;pkUP5QlF@Pag0e2jC>N%xn z4J=&lP;<1O^ZL@Z6actZkv9Mb2mb)ANer8v*fO`wFb}7#Q4GCok@PE-2b0`pufr5! zh5_0K1$oc8s<_EW0xuGP2SNZVqE>&1#lh&D=)a%~ae3r@# z?Hl&UB{r;79*Bi3{AxHpw`%^+C)NB&c?HUzt zDU^JuF^-jbD`9RPRUuric9Hl~Z6PwOL+3Io{KE$e&-oR%HMo0nj;pu;0|n{z`qZ{u zQOfFo$vfqW<#5^RI&q)N^sZ}C(;Hm42?f3qJ-Hy@dJau>y2ZI=m>}FR0opr%oc^@` z00HPJuWM;N?ZC9sH2(nJ$jmS^*k|kC(x&zuuFcPfzq4)Mh?X1eX6)(OV8aN|=4nPf z{^bzj@EUkYFO zTHl5wNP`eePlJs047=@lG6OqSU9+|I%G+&K& zzB#phAH?YxyYk9MFcUa&*xjH zK|7iAO{+8Q4QJvUKOVjkOqV5pw=9O))P3cUbYb04<%sG=;rVm_00H-}*zXN$pKZ{hmmr2g)6%~P?5-{} zjdnds&v23lj%6r5pOGDLU#*|A-^D0=bMW%QW{^DQFp{QuMit8_-CWjBk@}v^;s-CH z-L~E3P@EHtkEpN5Z`%vROrABowX?gLJh53V=8bsZbYY#{y({)6=5zL00bsu=9Q`Zd z&)OIERndMeYx7@4JepRgvk36U9b3?C_Vw#s6t3fBa!IJoN=N0)?n9_m5wIMU<24&w zvo6vIZcYg1zSq&e;Fz8=@dl$JroCZrY^{$nRC3Hca4YH$f?x1S?*sUHNOcbz>UUly zxLgxvgUGl1am#*n5?s5T%Cug`Um9Igicw0lh)-@ShYR72{KHqXj0DGGK z6!<~^00f=!F0Y`&Kg16izKYW*&XcCXCNYmkb~w+l2ERxA3Gl1KUk&uMu<);k^zRSZ zD(%Ir3XFcFa(@$3c#B`M(4liQ(p}6y<(eE1(y_aL2<1B~$oOmH7r=dg_CWYCBwq(? zqt%KtPpn_wjrWCd!=jIu01ktvTKSj3U$#??GAQoKe``AdadmX~EZxZT759(E--;J9 z=+nosA%XZ20iL5B>+{>=Z^F+9+2}X=)9Hc@QPt-kWq&U{@>Fs9SEC5cM(3B$N^|9A z&bRwY+UuH=fp}P5T&dg(Fr;FlTzxWyv~J&_q>m$ejfhQE#r+{>@{mf zj_PL2GQ@WS?B1VIUe9fECx|&tNXOE?9QZS7B;GNxWy+G+U86ZY{{R~MdqA_2OKpgx z9PPoWi*i?HRp6m3M8BtevNWtP2O#}9tzU$ert8IX&fqr4+mq8Kvh^ai)~>3!3N~l? z*I)2oO<@+Hc6x3RgZi3P9iYWcU7nh%Kmw%M(=;IDdS<5D4{Gz)i$k&M$iSwunwxDK zay=P6dI)&GgJ64Daa*Ak-?G*VqdR63sR6yOC6%?p5SG=RQH3X6Mrk)O-AY)XjDA1oC4=)&vH*-I+I&77%U=dODR0YI9aZgf36;i zJ0^M#{ZFN0he#nayL{vji~a80`)9X$yj0mjn}ZvHt?&QWCBUWbJeXHQEiRQ zEoAIBz#41(Gx!-{q%@JisA{;?wf2q2nC1C~)QshJCJE22a}h$mFVrJ@i9DzTfW)DJ z+s_1w`j^CivfqR}cdRY-&5od1fk|$yi$eDV3=H7mvCwCwK0nzb!&-Hx*>p`GP||KO zk$Cd?nPJdK81g^eu6zbzLk$mlH=EVJU-CXz9YUree9)Jn$NV}L-XB8Nn=QP&`Ry15 zoe9auudRI(;7=AT(M1Uo6;OVB92oxqgr7rSFKc>Uukf<{;MFY}MTmnT0|RO4AV1x& zMDW+dGpgy&r^2gj^QV;^%9FfjxBmcMy*ynCIh8JW>*v_{ni!d2s`Av+^*!^)ejRTU zXuf5O{{Y0^!#5U^Dds_rQ|QBwt_^dZ5%II?wjmgp=PSyShEvlA->>9o_&;^4&85UI zBZ|VrrYyvc#&%$E7z5D=A9VB9pz&vgWzsxJ{imnF8robleX&6SVQ!_5`z_b&^{)dB zC|X!L?)KUKhp&oNCZUYOt$xz-@A5jU-4jRgo{e##YgRV8#+P>(S?sP8%!QZ`c?aAN z%C4fdlq(Iej@(AO0z@{VsNe-1usth=wDHZZpLQVyA{h*Y-;O%-?_J3;U5M^sB1oqh z1x`Rc>*RAxRx>7{FRMqHH=^x-_$Rr6sY@2^DQbuk*4o^e7T?X7soZw_&&RJ*UIFn} z!uB5@bsc-bGU+qM+8cSW>gCxCcdWbw45bWexZ{$;BLLU59~)UuV7iu^(caj{If_e` za}059IA+Tam;=WhPfF!{N8+7JThecBye;Hez^&%Ku_7#S0(zC*fuHtR@mspdZSzEbbX8bpIL1p5 z+3GnsuO-wr_v0=4w^9*M`wWSsg zFgULpPmP)nhisN@LsE~*P%#o=ACiHwW-H%!niDWuQBn~pC^cPol3$O?WDZ9wvbyJwA~WV zf@3ZRY>XdcUJ3Az_NlY@v!kW;rOcAWxWy_-8Age~$yObXImqDltv`?79wGQq@XuHA z1W)Gbvc}qWlWGp;p6*rJfDZcu7C(orda=TNS#N(cmDH639cGi3&!*9ZKT7c5iO?;)ziIOZ@hivfcJutJ*{63McReanQf(D&ykGWI z5~Sgy3y`mz53i+jTKB{Ytz{j;i)(cGS%c?y(APJvYEs**Eb@hlLAjNAHEDI0 z@-4F+q7F|@)iReur6h^q@WJDb;%lRw zmeGh~&#pdWUa?`Unb&f+1PtfX(!58*mxE2yypVA1^An!r*L|UDF~YI=>{VnrB}Pc} z;=eVk8kMmZCE3LE+kIyU5Rgw{(u?g>Tf-wWWG{2jp4hG%QrFQWUn}l{0)l#z>sUTA z@o7tIduU`J$i879^RuRYtau*4)K!B@-B}T*89SP~_lgX82*Y+x(oT8qYvh zwpt#V=V^)v-coq^m3cJCGiP@=To{?RUnj95I4_ zd7r2t1Nzk861;4;HWB!1N=02eO>?*Gje$MM02e%URT~HQw&t=MT8mE5?b~a{(L&KJ zyDk;j^}!sd=Og{v=d>$Sm6}_NmWJK~<~u|;Gst=Yp8furt=6woot@Xiy>+}Mi!4_$ zDQuFGU}pn!k)EfH53OfwUNF1y4yERaU8T5U>`z^p`B&>)?alMaY8x0?nE7vkhE+c? z$5G8c!`iHRMeNtpz%FiCaRNEO40^Hlu7pZQ**~*=?Y5Pu$E@mbq_OGnHORLkuF;5r z^=FWS^ix&EBJTdn(w)``JKj z24Fbj&}RU9*H`d+L_Rk0zOUkaT%WVr2Dp*iq9P1{Kb2$blAk*_Qqc2H5m~Q>o;ygu zTsGOW$qs+5ZulEUT}Jh8d$SHyamPQ>x$Dhwd{^RKQt9F!>6Z;6CtjzFeFb`_gS6Gr z^x-6*EX8;S`>WNe>bpk7MPF7;;wzYsiR6YN3#=&5kM9x9Qj%Z|bC!CeY91_}a>V($ z#(lb0^t>|`^{xpdnY=V(427EmsjSUX=gLxpuX9=#GmAJqWoqZ{37nF1xYVRm)s+$1 zoad%$EBpB@qs%fiPIxS*wODW6tz+ua2_nJdZWYr@<42v1{tVZVM(0#AH}51Y$ENls zpciqm`K2D1QA~<7DY=jqJ?F^Ybb*$MdMsog1X?X88Yi~q@{w7wr zAHc?%nrkeo{eErO#cpW&aXN%gj0BG#jU0>G@I${nO5CXHT;V!Bud2;)Y^t*`YW+K;pLT z;RSkg=~V1(4&R%fL5kUe*pvWLA&|!&-=}hGa$Btv!aomuRpJj5UyUnEj?Jy-x*!%v z+p3`cCvE^9q*ON6z+jLLJ-Dx)f8d~AC2MbnJ{<7EPEl@VZARf&tFoL&pJCRrl5)`- z;Z#*r zuT1cTj8CE8TwN-aUFcblC;-kyd4=>u=Na0^HN`!2N9b5+dnm`ut2Eo?R_%d|Q?4Qo zf!C<#ibA{L361nKDC~V=9H7VmgW?(v~q4!u=d(T8^juyf_x3)pA2d@ zHnuU^v=^&r*b&@@&glT_kUR0keD~om5lf(I@XoD>S%SF0B#*|s?K{M}hk!2bpo|NR z0`<2wyYm?bw<8_6&wmHUgUi0IRA88&B@Q$Oa z>enl6WeiQ0Jy?;#f!vc{mfr7GgesRg~ zUfJOI{9XG~YfxTmw>Nq>gKV>LYiik9CmqQLz7O919)h{uCbef)9mb^7h4~k8{{Rg9 zO|5v#Uet9v=$vVB$V$y1Cty7@j12qdn)RD6AK1m`3u}}fNx@Z881cv9gP&T&_~+vt z7vYbBY_u&MEv+>0xk#1zAnHjR^*@b#ZSgbqjMKb7rRpNeSHF<}+2x^*G$e8Jp8d9t zap_7<>g@Uobm+}-#tEE1A^GsN$n(HlyXC^$wf+PU@Dk`L_Y!`6Is_fp34_~OS%Z0s>8+LLy$o~Ku>by0oOBBf^yssRAe}wY9^&jVo z?Wc(P$GcJp`D|`a94s8Lm0Vw*r|hP!;)`8gQzf*dq(nr^AlT~o$x^3~YoEH+*oMn; zcpTTup8~!p-T0ft8e-~~rsn1wMzxCKT$Yq}Z1-`Ex%981M&ORjL*oh$t#+zvo{apx z;_BC_D9cGj+5Z5aq3Bu$iEs8hjaFN@X0*3pMCdpqbsJB;a@yyKj=QLmkt|m5!dl=hH!^!z>Qj2B)-p&L2L$?7rNk6MlH*qjUuj^}|>%5|xi;=3V; zBXK7_-qky_ZyT13I3$6;u0Fl%GftR1=awVWu7A&^T%JK3zTU@yQ^;-YZj60u+-*Bp z?KuTcr=hPa*6hC7cP7;!soEEB2mb)AUb%P-+xFmr!Bfs_i`DI;R^C7iPDsZem2Djn zmpSK?TC+-4LIZ$#?nPF*w;wPofIAWT{{Z!?uhedBC5-NYOpBZo(0kV_dpM7G&Bk+_ zboZ?RO6=yfD-#)F4hKwUfmrWo(d4>=lhAfGzk4?3W*iKCpSEo^hVPO6WYLo^)ACq+7D?Jvy9!p7mP#?XRbM zhE0H-sEg4T7*!s?RV2gOnGOcS+Z=J$vt_iB;oJ-;10|UD{{Zz@V|if?o8dcorGaF= z)MYZp?bZuexM%+Wfw>AjmbgzAUs~xt7}W2t!<%yvjHG7_#rO(62|fL*ChnZO9q_uG zW7bwY`rpSe>o*gNTMMxgF;6}A?I0X(9Q3Q^-5X=B(uGMlKg-L4A8QgwBN^iyn)KKP=+RRWE}u4t{bfNdG% z_dPR37Bl8Pv+-0Sb7*D|%(#FngBsPdIT&#he3;q%p%R}3?>?Z?)s>I?H)NsfG=M&sW#wW2c1 zaDr4WuhRns4*+llXDJ#+HaU$>+1Y%kVrP-a0aNBApYm!{@O|~AtHp6JwzdGg z%1&~740pzV3h3q5Z2TRf&#KH4d*{l+j6aw4HRl&v=Zxg=uCuCJstF}`Z?sMKEs;U_ zx!@DnQ`{|WPayc=@dC%g_f2J{>4@7D2f4iYHvxUc$&p45T!9>IORfeT7B4{u4bOc;zqWJbQP@xFywoQ~7oJ3SSV#FNC6jZ*CB! z!YqifzhVKw&m*mKx85p-+S(tmC`p#%&IgQ~e-3+&ynC7(Ph!%SQ$yj`i5mAxy70E3 z#LWp~Yq)ehN{{FG(`D2og{HH)W0>4B3}v|VHOzRy+e+80we2*JE&|L7@)eFaK8LSb z-PGqAy}IgAsaaK5dN)#xGOdqKQ|(Ik(9@Td+|=+tlX0tCX!@3&X?1xNCh}h<1(|>{NsR4Z zQ-EvZEjPyvqyE%C4e#Zdmcv=JyVG2d0{NUebL(D>`+c^jqx>V${A;6zYtISmR=TSN z>LU%c{{WcD_pVy7_pEnOm%4HvwI}Vz<8Rrw<1o^-zX3Jh!?{v5x7A{il3S8g845St z$R`_4I#;Q9ALIW3!;g%<0X``3-m@IAC(ChdazamSAs=!?>JO)5+PO~@c-!G$?a}a7 z^TTrMc5&R_z>>!tj-u~QxW-Y!6O)24anxqNBs_hic+>V*@%M=QBXKHQUpztvo(JC? z(3aeir`H1+=@P>)6Pw>AHexVCHtM=O(8^i2-p z=IK+)eaC<{6I>j=@$ESzuFq!uoIDsWwHPnVj1-@mGt)o9%kDp|eT5#~KFnp0npXw= zYtH@!{4BokzK0FN+K;hZD4Aw&ypQ}!NARC&>^1FCWw>Qm%CE1rbj4X%@#;Q#m32F^ z@r6~~{{X@Va&g6bXT$v$`wzmFkTCu1nU+tfuOaY8n02oZL%W!wZR`(!O81#$TUg^v zjs^vC*Ojz7DXVHc)6wuMnY&VWBNXv)p&6NiB7#}i{Hi@ku6uMwzGCWn#jcHEHSO%e z3uu=t=lNr~_OF}%GJe$8noHSe-WY%;wLFXFp~Ji?SZrVS*kdEGKDC$oMtpExHrH*l z29cb{VQtj24h-x)2S5*eRKK$Cz$dY{xAE7DBWd+}wlmsW+;UM*NgN;IJx8^7Dp02z z%X6Jnl-<_H)H-d%*BVWf*Gc8wJP7ij7z*V4nd+hclGZ`$E|Xm7rH7i+(t!M zjWS22HBj!xD#9+%Dn^Gm+L)?x4CbxKa=5B;I2;`2ng&;86Xf%(+*n`=}iQ}Vf| zY{07wqZH7&HAZD{pi)Wl0&3BSWSWNI0gZt4phZ}2<8y#{2cWA6**!7sT&TP@m)LF> zHPA(Ju0{`0>sa4YTWNCGYyv4#O0F9x6mQzpm=a1xd8*FeHA^e*9V(;96!StI#}?$$ zZ@{RG!x^g2CUZ<@JMmUf-wr%S{{UW6e=7KsB)KZ3n4ASsl56b$0E!Y5@aM!}^Ztb= z@~@8V6KXab#9MNbP$MN+uwPYE_3C|dT2kENIMETzP@za|+5TAb)C23^KDBGWz7D+F_(!K``eXq6Km@;E6m|W=^UZiPVCM-K18Yl< z;$|eM9e}U4ZEUXfm=f{@CFEem%jM6wJ-T$SoBlBBnpeS(1pGDd)Ov-!pRx&HjVG8Z zO3Hw9d8$l&zdsmILO>bB5UDP@zqj6=a+dr5*hTA2`QIiq=>o;OM*O2&Y<7Ttr zoj5@>k~AuXT`m`Nu{T1jPdkC@UfJQ#9og$zoJV(%`2E~~;Uy>d52^Gu^WW`t;rH-f zsRxR5zgNBm42m+hVMpj@9EaK-CIMx_-o>Cun_O1NDP+l$3G`@Bz0xaUgH(S zSn8JA)xPUy3^B@{qP<(gx6Sbf!BhBp=Qz1Q>6A2-RZ zYB9QAz0{IQ$C+4@?gl+QtLTr}>*F=$#g3uj`-2_L`dZ00oRcwBmDNcw=iG8T^{*}X z)$sfN5N{sd+Q#4UC)77ySV#s`#_UN0{l+_fn5-{`v1&GcHqmCabWgJ0Ex>mBqBQ1K zW7q`GUOBI0GWxDNE0uX!S}sZUvg`L)`OF)|W!bc7!8u+@HQhJb@y}H8(^*M%dvfqQ7i&od$PrXvAczsL z9;XE7ij!8B+r_>r)>BJC5!5bjZtY|98PIt}pOrjrDoG#h5!WKTzV_Bcxrv3Y%V&*P zTF!AOQZf(9cyo|(>Oil}aIuAN-cNSFPwUYBlKQ+b?$M5`@EPIxVw=Qv_j4lNPc``3 zlX4p|$cvJD24&=T_pcD}mc2K_jcy%Z%4W5>gxX%k82OZ``@`tUKU((-4Sjqqd8KMu zw$ximh<%dI>N4=!${0F^MaK3G^KB;p@=3-k%RFIlb#D>g>NeI8TCLe>rPJ(_5$|J` zF{%5!^S2~&eQWHp+Lo2JU#au?jVaCDo`}B~^^Ft5clzCqHH$k=URzW0sV5s-KXiS6 z3gR_i+lN~gam8!lyBKd25Yt{yG{7F%k17Y_n&kA9(!4LMo22nA)`K(cSBB2l?I#Vx zBSu&Z5ysPyX_}9LJ|B3`#>)nm;)wh^Vqgz*b*ftZmMJFB%p!<0>=^7@1mp~M`YP4x z&dLtQ%q%=$+OaI3wkO0}=f%y=mpz^j`YF=kRKe|)0lyvDt`gHu@ju1S5xjQLNqu!8 z^Ce<-#|m~V$AwTmO?nIX7r~wtj`vT~d|-7e%lQ7!9rf0gCAGA0u?5QR%NA?^7z_cs zo<(fwJ|FRAt*y3!rTCjh(Ch;h&4g^S%*1l!ungpm`NySmTCAhXCfe4ocvVI+iEY z*TMe)7H+Ml)uOnS<2DwpCB)%)=L2ew_lN?$FJAHVaOrZ-G~p!*S=&D-`sclTvE$Db zw~IA7V6r4wBkpN>Z$IJbT@m*ck*ykhh~#xEo2`3Ll1bQ&?U0|{&p-#YS`f!=8p3*x zst@xts8Rvt#-tE%T}RFMRoy#9pHPsqMtr0@oq_p4_dNilqi1uiD->Xbnrwa3COUiYR=&mfYo;437!k3#u0d9HzGk3pDw;cgNc256;9cC1ctCPbZKAys z!?W02TC&R_TgD_r&5)6fk;n))3!wR1j@87`c8?xO=az(3+EiPd$7oJli2T;py}PRhT4{&g0ot-Q!fJ4ko| zoaD25 z_l2_j*!*~3!oD&1pRK;Bt=T{uaCUpi2dO_QkIKHUE5q5T1zL}m#jl6)Ew-Y!?wN|k z8e$LeNZcU&&TF05^|zbiHJhxPp4HWrzuM|g=4)DANpw9T8(i&aPoFW*puh&a=S;q9 zzYsmq>ds&Duda%+ZJujquj@L)4I%|+B)MmN^*!t7o9nZ8t6WY_NsyfWHR_%w)I5tb z^Kh2r{&nM#9FlTR_?pTQOHh+Ep6uSq7@Oyb58NbykF9M-DqCrYSwv9@HmFs}$>$!K zJY@D2k)zuErhb4be8ithcBiQ|%PeTvhAf30i9J~R)!S1dyGiEDB#x@P&)i-B9)hjf zFWRq_m=*^Z1GYK}%=1GKI2f(}01R7smjXw@!wsvRt=s%-b)b()vzzRiO52WYpbGOU z{{S4P?l0%fdPcLUpNIbd0hY!lSoJRvIah4ptRt3V`0-w4Z1BWjfXFs0gCRZK{p0v^ zTb?Mo9wyao^&L;lx6rMTVS*AcU02W_T9+k#O?g{ZF}x|H!{PlhUlM9Sh}?O0ac7wi zOyawnt$oLZFK^^KC7*4)2s(uS068`0ekSoG{x9)um8@zl?Cr23fd>(&9AtXc{dZI^ zb8l>i_b~jbGt~D#TFz;;c0VL&_;y0Nye*Ci$2HvC0EEcg8YSQu}Dbs^l+I7{U0k$~~JqK#n7f|jC}x@sPSQZgad)WH0(sBp}nWr;Z$uS zoYuUk(Z(^z9Myd~{hR_~;N$-Q)m;=8(Oj^~l|682q;XB(a~jV_^DykWEO!zsZqm!h zZe3E9ew18nv2PFx1$=k=e8Try z?tz#6%G;D9(C*Lj74}pB?)$w5>t7xI+maiVM+>*gyNWeq>*co@q-}@RtWSon?`|Qo zzte<-TlwZfqp3ZC_N?;C1YzH$R*B4Su#6nm-loEI2ct%J4m#FV^hBM@z;0^?csrxk!`625 zL>DQCS6-@l#au$OfTRL_>L}1p2&eknCNqzwj?wsdH-_qaziZqHwm7IpCWB=m_p>(|#NHmdjK~^o>ty&G#aa z@I#Kk=eYFmUUhL7mdD%Psj(*W zTzIEYi!LrT1`iZ22MxD8srJdPm-dyNRr$lof5OseNU!?kt_PwZT7cziL+)l)|ew{k| zSBvTzmF}r>qVnQNPV#?x+%8Ww_SfwP@DEt{bEfIjYLKad{vS3O6yy<(htQ8o`Get~ z!->8n{9*9_0EF%1+WLj`cF)PkOy$_0Wdzc%9TzLm=fYKOwKb|KXj(0*L?f2*QI9wS zr{`KXx*4;9WJv?47_Yg1ZNG>91J!&ddvW2d8r?PB5)@H=WCD4y$An~k>3ml}D3^#L;>MO1AvRm9tieK!qk&K_X-(OFyak0P6Tly5j z-r*%|6;GI{Jx8T=Qi@F;9xiig%c=G^?5*JmZv0bqsm(mF4JOnycBL~MyZ&A3dSQ4t z9=Wfnt|wBfv=R#AA5Q+&@L$7Uj<4|2YY1db)>n+JDJPiJk<;6p5$#`SX}aoanrNQh zM3ogs`o4~gGOhULxu{}gQxhsv+3b9FYk_!Z#m&yCtJYaVf*?5cLmZfQ{ z+l?(8&XXC|Tn#zSOa6Y1^gUgxRY*x7tJXm#62bf~p;-tyA1@qzYmTzI1Jn`F^c8LO zJbc;h^s1C@XHKHAsdsX)50#3J5B~tFu=NPyzeD^_lk$<;sNZU?SwJk@9MwO!0KtQh zamTea8y2T?GXBIUar{8?d)F_l*{Y@pD~`CXvi{%=?#z92+w!hkS-XvqwvnGgdK$G5 za%kjkM%T_uV~#4Dup^$QjyqMmxd|*i@^Mu)E7X;3;EFJb3gowTbIUjK0=H2#Z$hlB zNh9Uvrqc9>*0n2J7>SJBI7XAQFII2BWd0RHc>_K3#%lhTsJ@qZ6zRwe1c{sp5RthL zvkni|oTH<#M@Ft|{6_NDSZ$+28Jalc5gxGGAjQ0X#fD{J>cYH};(Q=@KIF12kt-== zZifJopTr9EO>>8~{5C^KBhR zk)V^#%|oZuSF-qPLz`UGEi5fTS?(AjIB4GxLnn;RQ+F+~am3%8u(l6}eN@uIMES2%Aol<}Rh zj_f!9%}Z=d^0O&s!s8eN@S+IPL|NGHA$iaERl7)j)hQ@2xz8LD zd(&>fC)t|fV#P|}gOSG-(P>t`SOJxY?n%Z4M`vhoaxi_XZrapgdXh&<&CF^g%{;6n zSw=@6dmVkNVj0$CAS&gF9eN7Ow@)#0G6$d)a@2^5zTAP&9AwnCRgIfy!aBQ^Ez@W} zjY)DsL>uoPyZ|}x{!LesVJMHY$FpcqsZ-1pLm0{i) z_+H*3NUpUTWiY57S)F(zJ(s`ZT<^xOhq3FC9WFl-T3BhwyxI?wpH{)-`g&Fm?H&6? zYno-P)|KHPmQgA4n^Kp#$3*q7&z~D!_@`XGp5pVw_fpKvT;-1^z5qQdq7ZLXWhpI> zZt+*_t)<;Gv0C_w#LDF5nb#*g_v`hqo&G6)&zh%$;kdbBZ+jer23vd_cJ06w#a~(Y znh9Cq)GqD{x6IygB#ibM_pQs{iXK(1y_-RM3$$fgS=*^NCvfSG3Xft5=C;=M8dlix zFBM07_V<@lX`Hc9=5E|@Fgx`C4xCmE%foEZyi>H%wn-ayl^Gxdp8Yy;&3dPZ?_=={ zO43{Ic}pT;HfIN~J%9Su<(j^MabiTF8R2e20n^y~)oh0seanz)EvxAgY0?rTk22eS zd0;-j(vJxEvRw-17}_@_2>xqtRZ~xNejlJDPlaZJ$Wbk;-$Yu^OEW> zmKn2ek9SvZwmMJDjIKg?*2r`0vBN8h#T&G~z4WA6>99&kOV_>xNV4 zLC?_Fz~8f{?Fnt-cpF+F6Lqi;Q%wdp{oEe$0L%yjY>Q)V>*N)0y^LjLW7< z=PU>PGd+Fy?_CyC)bi~re(K!$llGkWOR0Pg@nmsnmva9AWm`LqH&wP`vk3QY5lgh^ za*{x9;=DUr@xHU-ooWqNUAt{^?g-KmCHwh>kvLUu*W09ZIxjzm3F43cP z8gGS_n19P#_cA#53twjb%pV70)m}OED>x%am025L!k&a|0~qG1UYvdAVNu4UuLY^_ z29NtSc#p@w5@oVWNv&+Ba>DP+$_B)07CZ}osoGg-I(7b;Y3Br+RKB|N9H~El$_fSmt zevjh&3D}$nH5Gey$ENN^YZgC*o+oK>cKUUtusK=$=8`|as9{-iH`LZN)LHb6r;mI& z@OM|bwbu10Zmv;=!D0K~F-Y934?qYY^Z3^M{{Xblf-ZD;@8Pl6F6C^iK;Bdv4sbqV zPJasd`p@CTg`k97c&|nCg6I7rIZFPY)|;+rH=^AlvGLb~ty88_*}Tuga5y!a_qPs2 zk?Q(~?PaCfOg_)x$;G^2mX_~RY#(!k0DiTjr)cxSWVnxZWtwfo zEC_#zax=Tr>0Udcd%{W-gG;uI&4{0Ey8+N=KR2LJjGE^3)xQ0sz9w44 zEMC{b8e1wuVIE^a2)6a z(lCcD_!13y3MNw@t$Ppbw<~F$De;Z$Cv5iK64fP)_Y5N7isi+o=t(ZWsp-YrQ-%Gc z{w6oY-;3Jbs2Dz#Whv7%)P6*E@RFdY>{+)l$9z>EhJHVnPq>Fhzm=bRV%p`-Nn&`7 znEr;m+QwNf?Cs6ds=t<@ZkO)294v#^{i>4Wy|^$>8AjlPp1y=0{VV9{QB>*1oj1zT zNi*j$G%4bs>D5=i;pl#Wcnjho)2-u&P`KF{8$^;m4n0nNI{MeCufz>Mq3>)eGk@ZW@W%ddzZ4ldDVGi&-H3yIL4Ww*vW;r-$;6}>Vl z-+_K2&2N7cw$n*zaSj+Cy2`_`^shQuV#T@6PSqmxU7G9ZBjK@?tK@YcsY3Lh#Qy-9 zZMQ~_sA-pe4A$jyX>0X!x7MW9Q$K6 zqorydANY}@YEf&kLu-9?Z7bSpD&-o|=pkU-a8g1B;Cu7lyqiP6xJ0u{rr63t$~$fy zN9UULAwR59ojS3zO3d@-l{&QR(3RAlo>uO9-^M*ST3kV{Y00+Pg4`hkAGnb4Kj1C= z>zcgrWZIse8?2=rICJ#sKPuAk7l@DiArd_E2Npvj+D8D%8*wCRJ;x)^t$7uuvGz;J zAX&WIZm8qBAG+UV2Q#{{RwcKNG$Q>Hh%Qx?<}$ z+C;*}Spk0`$2d}~)*4HlM)p@vT|IR_ckuSM2ROlfW~DXtids2!w@rF_ zjn*`q$)b+RD@B7&w(_oRo;bu;R^g-ASj|76_*!Eb462BjVLrEvsv zvO*urfuvl7C4gxeN8UIAdSKVkUmrdrUFzD4Xnr5DheOdc$z?$t5v))|c7f*2w;`PK zfO>KKS+A6?^=%_t@oZLiy5#n7yA{3Hng*SMa!E*m2d)M>l5$TK`^HmS5s0N4vX3fi z_TBvd0Lb_($;J}Zi>BT9Q^OjQH;Qd;b?Kr4Pq13bBKt&{&efY$FJ z)U`{?xU8-8ZTcQr#9@*0x?BrU8I0`Vlr?@!Nq5c zlk;#o0bf-F(!94mZgz6H=w1l;wQ-}`$E54lF!+-}QL%0B5AUEE{{WUUoRs=~-)i(& z?{xXt$>FQmH7g>alE+kxE5O~AHQUUZ8^Zk@RXuOU*h1|dbM-}NBAA$y_5H|O( z1F$CyGBfY_R_d=N;GxWP9wPCliTpgo&86Hd2d>0ERzKd!70TayQr9nI#l4zD&N!A? z02_XttD+hwi*#iF07JfMVdyr#;KTjXS(iGWi7llMJ+vl4{7(P`{*_9ymds9@T3F@g z8t;pXCB*YwqaXP8wo|a<)wme^>Gxg?zS6B4;6?JHD+tVgASB>|Jx8r{U+|RpgnWiY zg-=kCVyjE3UtPw+;_iEZ-A5Qdhox(McNLCUUEKYpBbpdeAi*AGi;%hG)lFfZ`gTiy zv^AFE6iz|N>~@R+T|u{vm=Jdoa!04VD%;!TyvI}^@}P7+{?%7Zt6b!5G&h+D5QC2F zr;5K9gx*Xv&LUq$z|D3sC4?s}aPAj{+IHks+iho0u?Ot-VrCs3#^d_dQk}F$k&9-fO$wAVDI*5;}g*wV4^4~Bjl#U7{QWnFJvQ@Rfh!1#hQ)EkKh zUv(dyab7s_#;fD~LOZ=qRq|j4J6kJN^6jlt`;kUCQSa2$nuHH?B&{sXBeVYitaF^P z?^!e3It{x=2Vha#*0HtK*<7g^u-luN)mH!(=smkvY`R;tmX-++S-=uV6|Btzm%#euBnYu;!rX%FH|>5uiAU z2J$=Sl7BkWw{}r1j5%|$Yvv~}X;H|Uk+M2VDgM(cjE|V@Rc6{T?t4|6jZLKiHlm-n ztfK_3f1kZ`Mso_E>raaCqBNxa-kGbERjhI*YTVuCO{WD@jQjWet4m90=fAPEk+v(h zn=w73UVkd&;hehV&rFk=$MGMEJjFIw|e=_53RZLz+iqCCrjXAMwA( z-|&)nlT*Hh$hNmbX=uFwPRkA`;otpLxK+vYQP1m|^e-LjBH3Yyn+)x^ zf_me>(!5eD$GkW4E?QaT@{kUvCce7|PnIakyR+p+rm?KaeFW08<)>l*9RdUXHJ70O z0LSv2bz%JJx}CI^Gc~-7zGO?Zs+@pvQTTpBE?0Nn1Rgurv2S&pXF=mjt+qFqJrE^% z+zMQba5=8O#*&xQ4hi|>1pOOF__eu zU+6imxg93e3a1fU#t>t%@A}ejLZGf(7po4TADRnWLDUc9Zuz9~cZ;t4MKVb-@~z-d zu-mEmR1TkwV#{-o?pql>Ju5Yt=9g?^bI8F^R7TC}Yc9t@;fFTn_fn9Sk;slFKi=n)M>s|io62$K-^8@`Wo+$BnS3DU8 ze_E<<-$pp4`-%DyXz`;WmQDF$GHSiT1ewATyqsY4sBIn8VOXf_1x{nY@0#?|>U=}o zeT1?*O0LY|NXAb})q(Oj=~-5YW0Fq1fm)WxLlQkHnMl^N%8uZAS68EijYBEH?!Aq2 zwwVNEbLuOi(-Uifz}?OddbD#%=ykTTZVE@s>6*N=Gd98U4EC&RHx2TPpRH}%4azZ- z@7z_`wpbOKhP(=c0~3FwvI{GyK>q-bd>@sE`y`wb>slqFI4bNk@c}^$*sulZh+(- zJLajq`l@+Pf+{Qdf=SAC^sJfW-o%cBwR7_=ndnoTugs0RYn2iZtfU@&F;^9(M)MI# z80QtrLhZkUioBPT#4@rn;-)gXJsC~Tmd8LZ<8><{bMIATc*7lxcJ|Ftk5-w^A8TjS zRj95@j$&V~DwDb9w%Jt4~|{{RDj!T|&t zebI{SK?-?SaFAoTjE|CZ^$ak5jz?)lQ;>uJ(JJl_@#Xv|h0_zifXTYCbQ~ zEDiGsE$mmyL{-4)f=)Vrc}3p8)ta!@=4M@3esVmv@=T zyPdJRPsgAhg14&(%1SiWhIj~0bsXkrTt^A4QyM7y{9Ky)CH zBxIE;%Z|Y0Hh9H()hTmDHKSUm<~)2ODjc%eA6V;_4{m&yBV!*iOklR${{RoCzaF)_ z;a?v3yGD}V>{mB00mtMR_t%HDexvyo&~whiaNeo{Y&1a$y+HRoD? zi!bc#C58);Z*F3HW{8utZXmWhf_+7OZ7DjmS29hXj$&Z&xVb1zt9Cj+9oTr9?&f_< zRF381m6Vw!1whYCcE_bqywdJGH*KcPs!R45FVxEs!FSj{h^q1d>^s)i!V`U}c#ZU{ z`7 z)RjAJ*vZmTrt7Be_PguX@HpAXg6V`4Ey`dKBzY=9_B{P+!b(|Hi5&I%)q6c9?jBrE z5~2oe?ARQ39;|x`(wYrAKL$&7(@MISxe4Wx6nSKI0QEWa=~~I9u@oGhjK|bu8yGGD zUYPW(SoIC8Ltu_T$j=n(i;~10!~@T`u3u8VFp!k<}M=YHHe&lw*>(9dle( ztE&+F9{3m^=LVqEwaG?M^yfau`ql;2*?9rZRC;t3Ngc3{b68xN2hGPlYOT)<#GGT3 z&rfQK&fsy9GfRH>Esiic`c;?9b}pS}FRp}a{!%D1O6-2>j;H)SwEqAkZeprcN%?nm z0;^dP6!FRDpYzRLj1&MIU<`B3JA+mt@>m{(=Qv;KSbj8sP2sIVJ-N&dMmg(jp0IrxzI8d;vYiI@jA@v#-OK zxzy7BcnZm%Dl{w8`@|o~e>(Zs;pc@`=I2W$5b`zC3w34O1l>ch?x-@y@UOMCZxCC2 z5BNO#!VTKJ`U@>0-Tw129J&0{AllE}2^_4%`DsrzgoA{;sL2Dit@r~Av;d>LP zSy%xC)2|@TGF%U!Dk!f99}#YRAO8Rn`)eVo&;GXV;~C8D%MAV^isQ7NwmmLdanZ>7 z-^EtNmQIXwoZ`IuTedB1p1Z5oJV$OyxyNrx^Q~&vCsvRL3T?*16p_!T_|ziBwv6)KK^+BM#mY{u64L;%;tAVwzGu$vT>Yo!L6GJ4#0#B@^>Am za?;}EE+k%Ep_RZYGI-~4?OundYF3{Nei)<7!aYLyaJe{kcI5t5#9Ym79;abzI|%J! zb&_U000$>OfazMzrAOmIcjIpqJeJxVD$!|HC9(Uio?#!}Zq(_;%EZa?pEP*SNY(x% z>ZaRIh2*riAH0`4Q5(4}+v#2{@gD2p-@^jwu6U6zwPhbQhMGfQ{{Xbr7wx_KRv!#R zo(J$e>!&*cZcVSsVT|-rM^@fWkz5+ zkUCKfyt2rv8kS;k0Ho&I(Hq7IA4q9_H1PhbrZt(jTX~U#sbCpZj|6489gh{sc)|y@ zR!L&{uo#eX7$4%zae6O;b?fLzMv54mjG3kjgWA2P!M_ZzwHqU4sQ9BoYj-*FT2|h@ zv$!u>okyZP2UD{dGRJ_x;HdBKT0R-Hi&xWb>>TZso$>n|U{|{MkM=;;F71T=9M^Qc zT4e_|>nV)4JYb#)$F3`Zli)wZ4+!c;=5Gw%#;L~Wg2P)40=x2Bi`Rvcx zr8^on*vfbyitpuHfMV1vOGPqr9S#bf;<#N~N4tvTYB1Q{M>apwt=c9GG5kPd^R0~w z#IE+=yKIIX4?OzUa%*FPac|y6zO|ugo;{4bjGAV**U!cwnEE&$bo;4YbJUbyx zM#4zq>a(xS8xK&!j8|8vYqKMqMu<=GsK$Q2^;`ZD{aDB3EyFZN{#j=$_>PsF>8WZ} zIZhg!kB7WLdEvcHFD&M0o+b^sPw8JxcwhFI(sf-hS?gfi%E__Ijq$kmAJdxo+gU}p zl^x-aelkoTFXxKGXpjO!HcuD>C-bb5lr}b$V^LkB>D@Z(!0>8D`#{zkNt#Yu>=$hc zx780AuFBKJ9v_VPmzuKNfH^NPA6$`&_?hgs$`U>DNst6-$i+=6 z`ux$&c?4Oxy_EryNB$V&{BeaH`q$4MJNT!ld|LR6H;nbH#?GcaH%?~CT0l8P`jg8Z z=D6zBayn?yih3g#_LJ2$-xBLKT7>=}H%|;J6p+m1#{uh+>73y4n&*5iFO9U_9yYX! z9ZJ_2A~KS$Ki%t^%<)dBi6r0Ja^fjiv3EHjlb_1DLusg5$TbfO>hCOE6C5vsKA`ok zZf4Qg;mfhw_>WxDb(?RqYgz+omzdAmZP&{xeRiA!ToiYD{{V&^q|o&Tx`FvHqu{Xg zC{1cX@y}AVLt)|z8%<0ARIS`#upd)dy5ETWGp$0`77H$qa-fk4w)j}%r#z3stz8-D zVd}?R)!F5;(It-PozHG!bq($~;QLlJhlygdy*hrmrrc>aOn&r=KtGu2TY7KCU0T}Q z*roK7+OYosR3<7^_8`;lwQC;~q!DW?E#-+j3&`7~fsb(KBoETI<(A}K5nX6LAn|S8 zhlcc-=Kjo?{?PHOW)#N;Mgyl>_umLUgYbVuiWqepdp5UF&d-;5r9Ox`IrSs}E5m#N z;-~O8i5bKq=`HetH*>Z{{vW6ve+tI&4!J(Be?83XT2^v*emVUsbv{^}?w>rokF2~U z`%p!HbjMS;({Al|H_FO%`-K_cDf#1mGr`(Vt!?YSwYPw*V326~KBKALra&5{!o>+b zxQ{!&mGG_2=ELSp#ZeCh?t>?w1EPREM>Xs}2)+nwUktu0*y@@dvM>B4bdDzT z9}*QKs01U42qr3;rP^2 z{KEpYB(ANvW4&07cokb7QL7P12o*?7ltwYum{bn5{M0Uap%O(a3>v32nH_3b-x(D~ zXU`d?NS9LiaqxLH$?GtzgLxSuw(jQ3W74x_0%Aer8p?Ye5YXjC^pH)!AXM<)n}^Bk znr^EIib54?t)4P-T*q%sPKZtNAYV#hkb})vf>6oy;+*eYG1%ggvnN?{dee-Y)lkPY zJljVmv@r0-tu0Kloq9ocY=+7Ium%=*EKI+>!Kb3sw7-KyOTV*9S=(OPuZTzS+$=WNwE1X;N4g&{{VXtVE(*Ul-XL_!vkFE4p9Q`jse3-=uh`Y z>VI1IPuUk-gI4kHi*$LT3in2f7esfmXU1vbHiFZm-=0^ zLZf$-prPbAz^<0p!7^V!E#y@&Dann`%%19c*Dv9%PU}jyfIv$|5C&7h;=ND8UO2J3 zDGlwW2;l7*HS5%=B&?4uLhO^_Z^Q2rct^*2HmRWMR`Il#0U=4^IJ(`zi1uLpG43m} z_|5RzPY(P`)-BUdy|*Z-a+iKzmleEacQkCofhvMX3_f9w_2`}i7T4_;+0)EO_PU2$ z?O<}OGth!KBc*+X;lB&`XW}-MZ+GGyGArBo_e&v@b42^oVGg;$1QU>W2a(Nu%fw>v zxTRioIpb?=lV9d`V`*UVcu%XUT)p0|?{2#z;V*@ED>7S1@fqWJ1`+uidxPzY!1#l0 zH-fc&M@>nImeOL0BaH4t^CNyLL9eH7{{UrOZ^2$8mfyoR&#UOO$QIR=)IFg*`ErIM zC_75IMC*W^gLXvF2-h+XACtrQb}r-r#Tj; z^2U#C=1Xz%h!QjN7c2s=u1jE7+#VtLkzt|gGgN_4!tLnc;Y+6AC$cRU=OR^}XA^*V4=9`3-E2 zg-V#q#n0^DqVI3(x$AnLk6`$1rZ%hMeN1bfD3=~hwcBB#AZ1GcG4l1>jE>&5^CyeH zYkhl5O;}HTA~vgV*Ecp$$+2Z(HvHTjoM3EUsUVv1{{RcA1kIW@w?Ja-bysN5g4!DqmC<(>1C0efyEk=Fp4>A~bUd^{Sf9(L^A*HzhU zkEW@Nh54go%icWrm9F@zcG8<}mu`{*mT(P#F&M@@NIgj>-nku1!^@`1@ioME1Og1v z9mrewg17Micq}@eoO4tqyPp2-rjfkKrI4`)ki?vv)|~CKQ6&EWIlx_uz!)8er)*c$ z)1?}5QHruWIn7S}i~qo6tM&T(Et;_nZ5 zYfF`3mhSB@T)06raj*&f@0X=`R58%6J3{BB?5wpsKULF~DA>B-oRZ3aL0pva#;!sI zLFzxPdVZ~?Xm=t3{j)KaT;K~}Ndwa-t#fmDQ%{YmgH*l)p0NC=| zIi>ZOM@yN)H|lv*Ry&w}?>QI;x%aE_+sP*k#YY?gfH7UQ-@`pt(T4Plfz%cw`c{0t z8NRpqWJOO-K^2?C#p`pr6t0Y0Yi}(aZpM0($I`8=SSf&5jy{LhterkYc_Vn3Of!&q za6066{Ap4ka0p@8gT^ZtQB3IMwX`ZdvjtU6jbZ8wl7K+Pa`gWHjfG8|V zMhkqzFAbk^arsx#9|b%WHiO}0X`@xRy=)RxWB_vCfjAZ9dM=M|toX9SNr9GVU@E}> z0DTbk{7KI>_1A~=PbS_7ND3f~DkvZ4xvw`7QTs3P6=bDl(88HbsE};qkXOAU;x|P< zFO<8Sd!Ci*3*h8m2~9J^jXl(x!=9>%#(y(h1=fcpowEo309h+;Yz_kd0QKv}P}b$o zRjHD08Jab5zaYDxK}kKxbXd{8@yYsArnPj9q+zppzz2_%dsPcmX(V|Lagu(3RjnP4 zImH(5ZdMyrL&JUM`d65E&suxighGDn7r_4U;19yPJyPq;j#p+N1teFF_`6yFGz6Bze^(%_hoM_hxx+t{7#UlZ^VB zheXR>!Uj%r(xAR@iL*FnJk`$*gZ-%=Ol>vV_9Z<@{8HuYVMWO$&j4n*cVLOfTIf7Q zc=m{{qbwyj2hzD&W-PhN(iwzZn%?2St|@*T-N zht{+$9y@2-5fe;C(Wpr5J{}9EX!jSaaWv#Z9z8pnw{LJEwzz(cFTXX9 ze|QMDw2Y0hE=V4ytu}j*3dWfmdC8(m=8%=QElED{yE)GSx*r5-v8=ZmwbG9w*>Zup z7Oqwu)0K@mEIHLX8{rZ>LBK4(b$SZnte>F&04BF|c^-SzKuam? zI*1vuG=lNHc;u z8pH7(R=^x*_k;S^V)nMWZHhhc-75^F1JmBQUlv5~r8Ue^n9xe29fos67ru<~EB6!h zF*MCu*1!+7TQs?B=NYXlt$xN>K)0~NF&{QDisk-!c4czgxB|AVbPJoP*UIw2$T()f zuIWjpe19#|FLE5Q7$>z_g^22V=RK+k^y{lcEkBY}9*ZVTC9dx@WqN-q*7rC$S~s-z zV<&Dp1K3wdr!a7vPv$F>iTMmTKM6Pvg;f*qG> zKDiY%v2?1-mJa3^oCYThxvIv7EdXA1z1(_04cPQ!KdS>GiKj@RVT4 zhkyq_E3LE0cj|g(l>m(a$2HK|!{*F`8BxwF6HJhhy|LQ2>>rXqIRd$)HMy%32RQ9z>gM3K1Dq^fcBtu^N4<+m(P7h~ZPH8o>xM(i9?P1=6EJr<2;}uWRC;ID zvrAjES2Y-@N9J$EcNO505?Pu+L;7O4&1NAC`&aJ_sr(Ifejm3HTio8u`?UjrdjZ2A#FG^Vb6HSu5J zwa0_Cy%O(Flxp4_)EJu$N({0(5OAdSVd!hjym6!XjK3icgFA@F9+mA{pT&J&;OE7S zZ@{-!Hkz-4rn)o1r(8xqcCI%A6MqTo$4t_EPw*94RQvT6aAfQ@xBdlMkA|6pG7}jC<2AH2uJ`4tn5KXjsO?9-Zr*K3y5=Q>kq_ z7sP&31JrgtwWAA(6k~(w>r~)jEL%=+Iv+|DG;hXhMW&_|W6Lje(nw2|IRNIAJ0Qk* z?^RLUj`c8&isYOD+}4LTsc|(wF9UBu+|%zhq`tE-T*f%%Wd{XM^{hLKoE#@V?NwtO zlCI__XzplpX*3*(yRzEqZ3LK&)b`Fe{A%}x{w8Za6!8t7qiK4}J(R074xJ-)IZ{2l zA7fn2(-bWj=m*Sdq-a|w(z#_e^g1G~sq_}H`%nB=@ou3my?^3syT-X<7_>;xsLpl; z0}7+litD@^`&nvMN~y2t*7xy@194>LCm!WaLHsdXm+ZZECDtzV-4@!>d}niQDrx@! z+5^qDy;3spGB*Lp`>o%neAi*|yY_h2yd~iq4;wa@Z==aMKic|5n{SHE$-=rGfm^zQ zcAogH;W;IvBicAh$oGE(d|U9Z#1g5c>Pol7b7gh=xrfnDU&6eL_RWMW{u7Y!{$0ZK z?Uk>aMblf#vd1e$G-qEy*mGR+ z(OAJjJx1AE$`PdU+ixQ{>P3Ag`x@9t-ZX7W>h3#TCM~wJv797-vG=-m?_P7@ z9~t;l!afwmZ{tly^6$e*u3BB# z&2JO|zj+j&dMQBwkClMjanqU{c@$G-Rolcuq!fPrnfmqd2gB)q;7eF-bSF2uWD&el z?h0mrf?UR&Sd3;7vbXj(gkvVChy?u^A<6n2UlsWMJh;^ggx8{1y1S;$IqA z9bZ|oxwY5i(*$<ET zGZH}gPeMoFO;@$6MjqzJ#^Z2Qu{F8*bxU5~f%Gn+@iRu!d>0mlqTIHve9G3%s!47l z*%3T}+?;o>Hk#ni8C~H>!5AN>y>TM^+g`~I#z3swL-pJ5TlUwP1mNyrfN_!jf1P_& zqZr2Psq%H>DzaSKTSIEwQCl@b=8){z^1|RHSe@4L&&++`6NFCW9;Y3DPHPr@QSD-s zDy}9_8!Nb}2ZB8fQMA`@nrDg=B!&=WEfD1b@>@NC>z>uR)Z}#eU6HwW;mt-nN4Pd} z3s}Zk**1Vb8uRUPZL=3&xx1hluW9gKgnV7`9@5)VmijGILY)}NEcaP4;l5;Gc?5Oz zHOG8T@SlXDOQ?K1;)@BjOJLr0io1Do5!Yi7L2Q2rIIRu zip2Qs@nL*t;|tAkGP)Lqzhu%a4z#AQK~UXs(8BD&3enqR3os1fFs4??rm*F^aM9^Q$_NbH3->dc~&o ze@|NBHC-#tO{z1^dUm50^24tk>y*2+C9pWb>tCOye#()=OJK=`+IUx>4cQCO1CISq^EG$2tk&1q<~4i!O)@mHw?VN+HUr4_tb2>aw6(T-OJf^O z2{JesBscJnPTyL%xnM z60Dnd>>vz`cc)+=u<=KVbSDjKsoX`5n2?(6w2#_1$GSp=Yvs8?+IYiaxLKu?=jT#B zl~u{f-2VWTA=y38Y16-KFB4di_KPcdzyuIe0E5?xh|=% zPp=psrFe|KTV$Qp1o8P%?+fzhf@n?mA;K*BKHK6?!d+wrzR<6&-}}hl;a09LJ`CQj z=x%T9*#7`~wS2s8%CBlse9R7USI*Z{Cq*Ag4+MNP*2@cD6PO9;X8Cblu8;o!1oZJ= zkMue9ZwPBTUZrT6P^~@-9A^NGjPqY0TWTo^{`E^c4hZ0ndiwtW_I~*Ft$0)ReDSY` z^lSMnCR=fMlR)3S3!F=X{5b?ul-pVgG`U{oKiP-&U%Au%Gu}_*_OjA6pA0FwyZc4T z09YTF9Q5uupAYS}w zj=`?C+34C(A7#@-yMmMVrE~mFM}FO_#e8$(qoLlzEJ?Wv02k90&S}3C+DYS9kogxV zBP01&3E~ZUYnf(AoU_K|c9ZYLV3TiCbktUY__xNlI@GVdw!}{WS1%T?C6foXnd5Kv zPAI*#W(@vfoQ&=Cs<5`kQn7>F*8Hg%b2qS>j-_{hFPkxUY<=eMRO8eoi#cKxo@=5{ z5KCwyZz(o^5|9l`s7a=4C?}2;ha+fxr6w>sH^v*B2APBvVB_laQ}6z6@aZAo^FScst=tJ}(WaJVFf?&;1tFSmW&y z590R&sr&_Y(xl}VHgeReqb^gB(61X%v;E;yEGdoTa#uJ%k3&^2@};-7amN+Y__N`b zsqn`7M7jcdVuifa`=cw!KKUc^t_NAvcJZ7NI&`dNmqTi9=4P*}n28abk@{9Fx~3!C zyfO9Wn|(g!B=xMx4p{nDDB8r0e=|5c&OxuWe`0@#_g*QE`$4~UmF!~@#K-%Y#w+4` za){2-41lBo?rZE{_$U7Wh2%;6L9aRy6DnGxW2x$EUWYwfTT|@d*i|I+L6)JB_@>6> zt!K(}CRpFaF*wa2Bi9td#BuLH?hdoTCRezR(2;m^{U22PNtUa*d7IB%JOBm9CO;aq~6C= zQYXE)TeRGu6I4I5L?-BG!iX?ilhT$$y=q)iR}OaFjPWwW2=}MMDUh7>s+yE+ zhk?hnEv)Ml1(@<_Mn;t)+R+z-kWMN&6v%KnCWUV?Ln+8LZr%a76J0A)j?bx!;~PN_ zfc3-N+CTEInTBu)=jPy7)V@3C!5ZNI0H;U(Rr1@800b^r4}Yyak7EPyzf#thqw1bB z*Q_)>F%8D0sLqhWor^KXPoN!h*b4ij;dj8_1pFiT@u2u)MYy=W@rCz=wI$Z)d3P?> zMG>~&;0#IHJvr}QBl`n*e%D|9qGP+CVUJ6(EQnV>JGS4{0y(IB6Y-+sz+NN$q5diA zEEmO4v+0^~lRoMBqdneOxZ*=Q`$#-EqmXf24 ze9^gF{$HMJXTqAiI&P^HvL@iC83Q%B@z29|GHc7@J82{mq2VRB$9b3f#5vca1e{M`(*`r{6gItIwWa?Oo*T zb59#(%LIOvYoD}+i=x@jJ)Cg5pbhqoK!{*L-0nXy`Y7hS&%r+&Ej)1tihNDt`{{>* z_s6AMq;}z>W|7YKc=-hg8*q6FJ!|5(@P4`CJLH2@)g-*Qa5DlqZG*XE{35-dU9*o& z{gM1~EWdXZmZzr(-z07^xh@C4IH;?ZVBIH5albXL-~0}}n$+f{QoTv@N$7iTkG>83 zJ@|DB_=ZSy>n$4nlIF_xYnf!TZZZgrWU&LCy(`50Mf*i)9vPN@_(iS%0JU|8W;1Hq z(g=_cc=GMR$rv~Vh{iY-?H>#zwfN`z6_?ih$MhHK61D}0OO8&Mv?-Xl(Eb(21&b5AS^$k5$WoZj}h`jE~fx-dX1A*4Ocf=aaqUoh= z7IA#hJKfE2z~zrcLC?$3?mXmIv-ktzCae23c*^g?{v<wAm6o4_GuRI#qp7QB* z;b9zvLnQuhlBWt^7|*Xy!nymfzGzg+ssI2r*r58H^0mu4EwFjsVnOo86b^*?(RC=D z+EPY7=pWXt%7(*ARUAm?oN-jH00|Uh0Fjb9)iq^eN`Md>r=UL6+es#bm$!0~V6I1} zdbJ{z2LKb&n9zjV^PFcS0a=4rH@qtX+zHi=>04;|k5Sh+9`*Eh!<4wO@fU=2=&q-W zPLkB!$jl0ElsU@o^dtdZ8}P3|yVm?=X{OpDtvfhCtlsJbwzB@^P4tWZYBLroR0V>CmF8LhWpQH8gU*ljTbq zhh4$qje+MJVE4vz^sg8A!SLSSO`g|T5&fbUB516O9sXfnqz~X2Em=B;#krHkU&K_O5M&wOYnnSwf*hY(s@xkOeR5{$`?64gN_FkkD(>3*ODc)A|lJ?0eHjTThvKA3upy z(~l#$?w%cypoT9k*%*+7A;%y6dehZmxUZzelCsQ%8gkTbO#CoPPSr?q*mtEgY;*Vl2~TFqm5Zskl<$A%*v zThI~H)}-*|)A)1Y&F#<+1;(A?1=}wq0*X}Q)b#wSj5_4EIu@-4r8UN-dwB6=%0>tb zSjbx$$iM)5n(o2QtYtNRXsKIW`hFIDkE5wZk0e|dE4BQHY~D$h2;@I#MN&`$i5I!r z-90{5`F@n>ufEeOt2DB1%BxL`pG@`?lE9YHqHGA$j3FI{dFkuZ6p=vdDywu~nV9z- zl#bQzrFgk?JZa6TB)(+LJ4yj8Foyu+Ipg_z)!i;nvL(&a^@#{par{ld1K%9|D?U~r zWz;J58SFa>k&;`OZM5}QGtS|d1KjYV{sO4=qn$#Mjy09`KxRZ00N1ajZHSE* zY;AyZCOT)Z{01wJ8&dAb$|}Yly{GN5Fs}5sKqFYk91hw3b>{ljvm1h{r0u|5*Qt1x z^o5QWAQB12{0vv0-n4Q);4)Mk;kwrpZq}o3sl#iqiNlr4Fi@+3SvPulwzgQXLonRO z(z>bb)Lh_>zcI<_SenakW?(?}9qHRsKZ)4${{R!nVuNBHPD^9Bubcib>m?#fB`0yr zj-!rG1M;t;d{4C!X(VEy5V0-T0bd>X+gm!;w{ddMm6AdNlhId!{cGC5!tB>v4fQv+qCD5bTwm6ovm%sD^>4cv_*=ePiK%~uFKvfIXTK2g;B{VO!>ph?}_l`f_7 zOe-M~7Ti~{KBlf|hsu!4{438)=Ck7)vIkSp_N%e3*ohfjec{;Fh|RLZQ#aa-j5jGc zZ*fzLgFCw*VvIl@xUACPCIQJkN$u<^3LU$g12EyR$lN31hUKrSWA2XLwJs)iMvbMLNT+cKxaxSV-6nYLTVx+Jrx|0{70Jn}F8=_(B&sSV952&bhydte22G+ z(HxU3?nBKhu^-+Bw~zuC9J2sw@X%x=<YP1(ozR~_N|ksbg$Kp4$-8bJ+mn8N}5L#24ReJ314XTP3TRj`R2{)Bl^ zj+N?O6B|^weEL_JcyR=hFE}2)_3C;Oh;9B}KgPblq4{LposR2Ixc~v2`kLtUyUa)i zS8SLWbHKsi*9C8Puwytix2NB>KfhCRgzT?l8AP7PC6@yMYm2YXPXFRfHOM#H5IU}W8 z)9jx5bWjP;R_$78;TM67mlrXlKwz+8Npr1R>CrSAb{?@hQVTO}9-RaVwG{hlnmS*IW+}33Cvx21Kn%OQ>8OrR;xR9ALdYbdE z5n8mqD7%Oz=D4z%RCdT+z<=Yl=<>(Cw-v^E)^)a#ZzqVMyP7i61Z0i#fUJ1u@ZzpA zvnHR$u6!N)SNJmG_SZ!5<)ktxw%c!V88L|Slwv-)U&g%O!v6pewQm95z5b0V&0%=B zp2~IK6pP$4{(O!_edqC0L(^Ux%_~)ne#3neM9AX-f$jD0^{;?DY2aJ`0Ee2?_DJUc z07>4)-sO5Sd-@U3*F+oF%=$>Q;+IwV5%{wAPw_PJ-q_jwhBo{5`}b@<$GZA=uRqmv zlO$)$!H|CsuhO?8yYd{oUm*jgDjUR(=!Zt~q{l0s3HCqIvzocnN^TEBh&h4q^6B?aBo3qh0Igju^^e*ZtfOm=kB)yD=dL1aIf}UFuzdG#S}Pi~D=)rwHf zw6LumOA6jMP#m>a4kX?sN z^!BR|e8(q~?_Cy01ijtH+v(<606cdm(xkoH9C%PRAo)l2tIzwJRa}ko9!T`8+xd4U zHuV@esFsKz?#yLTC7r|f-ajg@ZZeX6D*m3LMZAP^Fc=S|SGSAzj-I4fJ+@~}&V#}l z?Z<@tL8WVUj8fxIx{l=t>y==CmTT^>6zVn_-^YCy&S|93e0iapcaBL*#Ej{Yu}=y< z?@smbWRUE)J+sAqd#1{D&x2nGyhwz7uSB|r-bqR2P)+5xJ%%>cp9?u=_GinUE72}| zHR3%}MYvM4*j&qM+5Z4xii7$CUZwj&d0LD3KgE!u#=8FihwbH;AH_T)$&md?QBM7? zG-&j{ju)3vF35FDcyER}ZY#C_09E~KH^wjhulqE3BJ6VdKCgSGG3$hUNTd1CpDVe1 zyz#N)$mF{m)n}8-Nn56VT9Vu}(yVAQ?!i=gl5hoeUM2W#d*JCUpx5TNv=-vkpCSfE-^bIuL{9jn6cudcO_l3htCOB-c^BO-uSH6I|yey5)G@4vJE z0PPv!{{RJiA@QbzuJ}7i*5$hhQs(aE;JTezfbs*VB!$M|jAOlY(|4Vvvpo9rBMLH9 zqp$g&M6RZ~=9yuvTgm37#41_Qc+YZi)13a5yQE#26>{Tp_RlrPcyGnN0r-#bb{j8- zdVTJZ;@dexUt9g9rC|sdC8TTv8E$}J;8&@5W8oFGzlF5TOG(#nyg{tazv-3|LN-L> zB<*3)a7IpZjw{upCr$}HQSjKz3g@3L>!FdTyCiH|C`rLN&OgtsW9eG`=ZDi$`z)jz z`)9MCe0zgL+>!0|uEWW*@f0l;znN!zfbX`G?{UGv{*}pJ>1H{aJP{}ddbW^=x3+#}@7B0YLcv^@P!10mtY34ci&xy`)!S>Xa!Q2* z1e3*OI|!6_Bd|Sb^LTSoyzuS*vm_ILW!NE-=5!n&y5Y`13{6JQJn(cf;3yXN2^qk>-{YgMV_% zk_V~JUqCCb@u$a63u>A^nc&|D*~Y#R3mn?L=A&x9eycgbV0UtgqdQ8FdRGVFUxD{O zAN*N&bOLxw;pUkFeFKpS?Q6a-fe(nY~=hq!awS5QT z&x#)h{t0N0p?E`0hg-fy*(~ArLSG#Dvvp(s%5m>sDERN&%D$pG?=qT>BJN*sB>vQO|MM z)j44zGn`{RMOrPK@$50)s$EOv3>Aod{sqNO`c#horcyHZ3NcBt^(&Xe41 zibh~vF4A9z)g5_Nprv&{yz3VC%TIm?O#}lqgNZ-VI&9LM&Y4Y7#)l2O0GvKaEA@G?5tT0GQgRrchrLjao4{TsZwmOsN!3&wTF$QP>b)3#MAqkwEd_^*bvV_xK=QLj z{h1r)R{V&r2Km|x()b^Hw?qE`)~=@7<@N145}~D%**vgul29HL`;Ok#Ep{48tX+^r@GVCF9Uc2D`0E1B2yjL24H!i1?0}J%Wt$pk9{{X_i z2=IsOcVd=4AF`I~TDDb7f-xjbjF$v=2aHy(lj?GNc;3-JCGH)_&RdSW@lt4dZSJ?K z!D(is%PR%s@F|+zv)WA25XA1_diSp1!@eB4(QG5Kwgr4ati`$Azvarz^<(JHKM=oK zDN*K?-o~{ma>q@?w6?SRZOx22ky}_edy=OUH+4hMcg0chZ^cXbe#@oUTs^&=nuy~N zxm|!BgC6{o-m^S&uUcv9x-ONEvfF&GH+kE+HN!>8Cp~LwwK-QJGp7!3eV)6n{B+hl zN#KY)Kd3+2{JF%xYmol*pd2AR`8myb?E1S+!5KcLvjdUEC^g4+&V``oY=UYW)5*Za zGdQXks->791ePPEew=>Gz7k&s{2b9OnN@DCRj#MUUCe)=#eQ;lFH1U}y8$bhp0OUew4b%sSOs1zMc_>ZI+* ztuQFxqjR)Y6~q?~#BImDbh1AcN_ht-rYk0mRIFv%*%--bPfl@DdC}ytI0CER-Lj)& zKZRmTc+iY7!99(0VjEGiIX>4+FJBYa+C}yVLb+kgLa~ zal-o@(rKc`pLhx~?!ymy-LRczhdIj*4RBCgl$RioYoO7t_C85F8~N9K8(W@rMRMnk z)c*j&53YaLtN#Eh`E0q2=Dwiu-P(V`4Xq50ZC@Xie6us2O>Xr&@Jroq%=J&%zs2{y z1itZRwWq)ORB33aIu&J<$+-J=t)JT?!&-i$@ap5@H-qB|q4=u#C)Mp@!wXCGQm(8A z-3(4gsjo5kbFRan>l%a4^IA!6B$_P!lvF00FC-PsJOLf*v&2{3+slQQ|KF zc!D*K?#)hCcpI4KjPwox7$dl$QRkAH-5!+MP)0q6iO-9?B?2> zSax>T@Ji{Y%_L=ifpLO9hl-cSzX&x?0WtA+!~IFTPvEBjgUW_^0`UC?gY98&l9J8KE`hnXYU9FB4Fi!knfgc|mL1AHR! z2Dbv->X%YznszrcO3M)={l(|=BD`lx{iLMPq1&eDu_TzlasgrVJ*%GaSI3_h_`5dm zcIpqzVU4f=_alHlwRW7gIo&wk=e_(^_^shjg&GCMjp0^`^H`Bcd!hp-TX zs>i7tRpE;X{sIOA}mZrB& zQ&G~RlR9sOd^>Xt(gT&2?qz0~htuU>z}FczkUlN`%qe%L#;L7%aBaAW4&UK)<5rPr zscJU*Dc!bPgk~vtzB81hl(MymtMMS2O`=@cR{zJgUI|waa45; z3FW_rMZ~eL11cYwW4=3zWU#fB%otykoOA2n6`^v78Dx!uNKfAf1ONvjwRYWwjD0{Y z#lT*nJ-9x#MgC{tjyS86Yf)WEB3#BxhAJ*%>VBiWR{#JsA-wim~p&`0`pvxl7*A%Z3sCg z`yW7l$-V*b=ZrolUg`SGx{O-ZnG)UEM`J3aS4I`y(MrwG^uZlQLG`0u)~+V_Dz?dK z=0@84NQLFtLpnQy<)Olr=PKOsn)Hv@H^B?9{CDwwp(@HF)AZRO4+!2?XtZhtDe3Zg+`~#rIGVzfVEk5?~EQAQp#dWm=RURLj~)fty1{Cs6T?dQ{xR9;yA7( zOY4g$02o*moIc@>yc`PezhoZZ=+nvV%KQ|hfPWRkF>DLEQDksJ&rl|uDjuWn{TT4<4Co+g|A?F@L(Ty zL5_s_XBn>N#h(B*uL*c}QoQ?hpZ0yMv8=Y%(-tBV_hlpaxjhaDu1`bornhNzXt7)Q zS2LouxDsuAF~X8Mu6q3|zOV0S${gIcO)n$Lp!sUXYCPPo_V51y4tu4PwmMzLrD+Td zin6qJ+awr_2I#hQmZC^yzn?%$ttoSb9#hMMLhxA5GDsy-*BPs`%CBb2zHz57y+s|W))deqU-!e-wN;@NdNJbH&<^j+gCvrOMk{>9L_`)??+Qhh{fO`DGa= z3LgMhJS9^E)i#`4ce7Wz>#5YS(y19~%&pU-=zeWQZ8UbW?hhkl%Lt?pgdWGC2le)- zrqd;kz+KAfma;fiyMz*+Z~XFO*@vP0GhXxiO?()Cg})PZ>n$qYVWM8|)a+jb#}dfc z@`o7tTt6O+E6#jBakkKz3M}Xc+H56^HLc2ysUr2r?8D|$(AT>f&81d`z1Q`#?f8`k z3X%8v?c{LrT79zVBQ3cU8D9q&3);F*0BJ9*YfW)+Z}4)Hh( zy8#!l^sJ4BQOuGd)Jm)>DQXEipNc~lI}(!BxseN=5GFgbm!W>n+FePNjYxA6(r}%x*eCpIA?`! zuc5ejRmok-N@hNUzMetcC7aH@TpPdw&o|O^sizIL(7)ukcgt5M_uvTk?O<` z%bKUCNU*}T(Xey;ZPKM|88O?QA-t`A&PPDvFAx3Qd55ssMYO<{!} z%A7ePn!jSdW3!U&fHKMqP^tGHGk#T_wI#G?LzQukO=vrngn^TuYJ@`#fuBs(@HY&5 zR?A}-p;uu{1K*0#xIiO?eFaszjK#F|1oKxejy$uD*{fFQP40CX)}D}fQ^R)v;igHy z^eV(hRN;67s~XhtCZHci@a3@S6`60NGaBbi^>#@X))!Z~TV(l!WZ=KK^cBH)a>my{gk0Yc z(-FjDu6>11;xCD^h)ue;8-LUx;eXyg(y@}hhR~I_JzrY!&Ayipkz?hJg91W6Mfwi) z=Fw{{t!spCGP|iJNe)Y6`B$FH;#-^PKX}B0(E-OF(ziSTDU-y7-;g3;J-Snb5@#+l zZP1^g>JY)?TdE$tYBwMgnzfvuKGj*r%z9Sr_>{=^Rfs^5Kwubp8iQ{&W)Z-k8!+8A z%F0LaAHtswbDa0-)K!VC&zj#aImSIGj@`o!IqZEanzVV9k>7@>p)0djNS6{WIKlPn zUG9iWTq`y){Katkb(E2+F;W5PitT(c69i^{ojI<2H5VD&>Br(!>2jWj;rpK@mpET@ z(!D24)l@(Np1!?Jc@>S$2>w;nY5L1VfpP)teJk5X$5f5o&qPg4k%kUE{VQuv)jm)& zUUdejBmfrqyZhFqmZyw-{?x2tW7472cMRu>xR=0SmdWfZgM(SdIp?)sgH~ka_D{5kt90U!bg>gWRAk^4yL+O%JDu0Lb_JMQg=tw1JO)dexy~0^|+{H6DPK zozEQj=c3x`dTcRW?}V`39m1j910pESBR=)UGt^#5XG&G;Nd44+M{5F~xdRHI32QsykZejCi6S z3f{})>ChQ&2M=(AY7bC7YlFPdl(d2ZK=f})>92H2thM{Q2*w#CKQZVRJQMgZqk6U%e^gaqso3apvfHFrOu?&o9(;yPH_|GLnP%haguystZeq6>%gp0rLZfQhmFM z^(|z>X>90tiB9G{*!C6X+KY`w?Vrm~3E@R?RHc0nQiWY^a&MSVKX;%by*JEP9*j+F z>Uu-OrIdz`)3~mA!A_hHdh+I_4H@rJnv%R(F7G7ed!F45U9{8{@E_$qgQaE`E!=%7 zR8$~t+^42$qb^>hO(|bPd8P9?&Ojc$Dy8fBppp4iXbVC|I3AU1I7_!Z4Qq8Ia#=;_ zRMR79?_G%bNU81ZK)udBUQJtyA1x0Y^s83Z;9>jXvr(|z`jB4@kO5y)dpU(^O780%j|{>kdvuYo*uq>q@mibYfEL0TuV&r#k5U)ws| zCa2)NL>zlc*Rnaxy>wB1~hcQA%-~}Iqg`s*}PJYjK8GnOQq_Tw`K@##4;?NMZ)fl z@N>0?bv?y>HSp*5ozbs;A$Vi^U*X?{63S@XPr3UppXCLPW-85&gn)QIuNCs98PCcI zJxBwkUWQrSpp>y;77-X8nB)r9omoRfa#o`X(P~Gre$!VT8Pa|x>zYr6G&{Wx!ro7_ zY`MAf#DE-zUOrVI@CI?~URus#wSqI0IP(~E<+&fFZTv{_8hE!f$dgs9pi1*(KrMlL%tGBVgo--Eo!8=Ig0C>r- zOZb2BQ^!~SFI{Ti#8;LYl(Tt_c9N6kkM59uLUG4TdRNZ&j`z1q7>qh0!t=rP&3a{y zzBCOvHkj992Fc*BJOX?5>&7dlt$s*y#Lk8WHEKGOTc1~Wdr{T(EopTN-A-HE>xmT4 zI~Cr3q9yhdaiW)4M#q4uw zaZZmUGY#9i?Z-|x51_?+XYG0MwtLSN`92@lp}ny&B#71wSr{VQ+2Z_a~HL6F_`lpFL9YoXFMW^XjXc)$P*pw51FbC<}SI^%PE#%R) z85Y}YmQw*D#T4f#NhB*Sarpj~@!qfTS5}!{I*FPuFb7D=jmI4G`e(g$x@#!lZ~T=uW9rHiRjR!c+YF&RZHH9u)mR%fCeJ?0^Tfw*)% zO*i`*UpsczC(|82#Mh(P=^AZ}A#Ks63-X8Lv*Xj(>(Z{ z{A<6M@uJ?*S2=%&e+;DYjA zq8L`%6SX8@RoR806+h5^Fg!~%zo+R*vn`rauGtK7d5M`i-CB`OP z^0_AkdU{vRZSix&$HH-Vd2eKoPK@Bk56C4Rjz``Du<7ky`}=HomhHYGUT8}&SiFcM zS5vz&<0Ns@=FUfcE6OF(JRjlz0EZg2j-zcV_|743JTv*Nu(Q4-wq)t@p$t!4@m@wI zFpXYVy$`R#^ZO*_spYd9ii=}(JKk&a5@KIOO{txf%;$+==k-LJX(m3Nhp zn#s9*5}^G4pVqc7Mq07uL*}y_WbyQ`muI+Doz^7BU@Inhe8O3>039nz^Kt2qYRz={ zdLEgf0b6kZ<0{#}tJWlA<{%!%sueGTpUbeTwvdyKJJq5MYj}<`f%L0!+h#C(9GZeJ zkghSDWYxbj%tZqu0|0wd0;R-%AiEj6}!yPp7xFaTf6a#B}to@8RW#n?B}HLEw*3THlva zIeD}{Gyec=Z`q?z_#NZPHT^!!$D(R-?n&WKkz~$xk3xHj`H#jr-R`Gw_P35Cw^rsS zT%6;RpTvND>-q)p-h<)YU&0r*o+`7pztMcG+(mkU)POzv*T9}G{gi$Pd_ldwZ5B;K z!%~J}7Tb?95>HGc4xf#6Ki+%RB~RJ=oe#|0*gv$UmgZBs<(KBh26_R|W1#&j72`1@ zZUmoR)%T98`wQycAA6~7^vE^+Jg3PlAf4X(MhG?Go;UrU=Yr;Bw2oU#h{+#jz69<6 z0D)saoi2N5&AD!_j|7fDw(g#P%-CRXm zYiPqp_#ZCfJ$s(LE0mVakVeHBLuE+XN#m_-oy^SkQdA?8v=e|2e)RWP;kqwraQ;|Q z7DipHPTXT0^~n`6(ytihE#v`?er4p=g|~z+?%|ZjXDN|;GT;s~Oh+;HDPBcTgq9nC z1a$uZB8!+~RT~>1s3#+x-j&*1crQ?ER)wr5w{&g!@`wwZf(ssda6YtK_$F;)1dHsg z1b_l=9$pSlQoXq2kH(7%=-^QUNwtVk-1QXk8DY*8X9VZp15mJw{WA8>#1cVgn|lyF0IF2v%>Ar8oUCXL9j9z@vh1yE=Bm>tK*!Xis(=DzR+S^Et2tO&2128_s^sS*o zn{x@`V;vFZ-weJFYo8IXV;6Q!_8>s>qyZj!}CX$f&1g;q#Jl#(I&| z`&U|}7^BXqUZkDkX!wKSe}c6nmUw(A6ijo0C=3UuY@B+YwdCGA@FuaRN4vw8cGj_> z`H9QH0y9&v&P)SmUz2qn@D?M zkRq61D7RK%5hED^Q=WUCJ!|tX#Wt7Ic((S+bM}kXxOfOdkXcx+2jVM(5~GHuqX&O8 z*2}RgF{tILOZrA5yGtP~a!+$p#eq>e809M9g1lh$6ySOGuM>51>8wsqG==d|hsRnA zXQd%ne(Frp6MJvvQSP=*v!^EW5hb*oyt3A-z^(|#3bY}QciLT0^~DvWcq8vPpZ z29^Dn;JrN<67r1y0KjYIKiR|K4qMQF7>EEmB2MbmF2 zx7Do-QTfi&Z1TV7pkcRX(1J(hPSK>ww%>prn^WjO|y$mbU`~?3OBdBwjP0`{Y*EohFCjs8U7^sLZ^9 zGWk${-#uEq>O1SXoB?vlf!yMT@;4JK&pYGmR+`*LW8!a!+J}goK^)A`^a`U0qtoe0 z;hjnJS&7r)Y4v$HjsEb6o;Ly9`d2q&6~)^Gh7lU%08ewh>i2^I4vio9!k<4s-euBCYvB_UObAY}FDx$bMb_)X$T z{2k(b16qxMdpnhL{?O=G@f9&zwkvzB7&_LE6}O7KPo^M44XyOirUweUjyU$M{{RPS z(Q2ABz8==gHI_tcHgn{k;c@Py{#A|R3yl0n@jc{fJiD3N!zrzU9?rl%+3T{`nX)Xtr#F58P&19G30)bwRN!4*eQzYiMWN{|XJJuA~M zwDj@shE~n?jekpwn^{T!0FQ`AB{Sb0tI1~=C1eCL@ImSgPcyJu#YIi5ItBuyRphr$ z_@h>}5_lqNHb%m+^OLees` z1{fJUX1a?lA3D#5tnD*+-g4n%KQSg{AP$_I9C9m*5CHu^0>Ac0Wf->!No}X*eTDlK zc#7*q@lS~+B+YR%ksJ>u3xO1tj)$>iPz&rRq|p=CI~V+hHgT18*IzHo(DVNQ6nrNi34dqb9k+z;-dQg1mMgoH3G<-S1MQJ;yo>|$ z5$w!m78p{Vn|Q(d!oGO;gQj?Y#a|tKSEpJC1a|jRwv%gYpJ%suQWRH-j#LFLxZzLm zw^Bt<3upBzQIoCjOLm}^Pj}#ZjVI5Q{@peZ20(8k zjry_1Q0E5~(|9Mtdf&yL8r$AOr={k#e$oJ0l)z{Vfc!z@Vmfv8t=pl zO*dX!X*9*Sb&ZiqhLjvLeBUrR7{har0k2cf^5m9^DhW8f-9!cLt#-1@)ZXYBQF@b$&th;1w{^+XFDozNTP zg#gJ-$J}sgAKJgbJ~r`Bg%*DaYS-d8)uXu5>|JAdQGwkcazP|6aDa@Qax2a?9})Pc z#5!K3<9i);YpL|xf^Co~XiP=1xfOba$IOO-=2)-qFcjEs5hhTkcLypT^ z^HHvv(PNH9n3V$QRfz^RbwzMK>#lkl_x}J2>sMbBJR_;tc#6|Yxz}L^*HyW_v?5E( zO^ip#KJg9|5J6GYV!txW;Y6}^<3A_ zRjEUWFcOssd&_Q?f8}<4MjH=bF2&0aQ>SjeYrjoTz5X=(X1TENK8LPqUIo3^G#goN z7hY)w;|zc5(4%w>xCZIB9A`Y&%RV{qRgRl&YvNA^>PJ}cozoOIPEVU0t7RYhlYJoj5nb3S)`zTEY+!uLfYtOgUJ}zh3#M6o zZt!J-UU*ktQ3r_p3P`z0E?j`VUI7Y;CLbq|I&f<|7FqQeLy_86_qwTi`I%R!>CM?% zE$R23GvSX8KaG5Mq4-wXPcGIc`%a-Yb`Z~xH5o@ajX+V`xUaar8u%{PNs{{F7GV^x zAK8_@P?5%)20xkiHRXQ=J{D=VTCCnH(pKe8xhaxJbvTdOZJ%=sg^`#H;jk2s;rdt6 zw${sLr6WQNX|*`d9jdC-S}5pkO*eMPXH~bil>+&MWQFx#>s)@LWDyHScf~KvTNT+) z9MD^DW)gA00~q{&I>VX32Ir^~6=1aP-j!K5GnLvhVi8Z_bPkUgu_iR|7oV=YI9jh`g+Lz)4T61Z0UTM0AtV0!72O)KdmOW*)~6{Vl$7-Y80Mfd zN?|d1jpgU=_N__nUK6p>NLZh`M?aCO^4h8DIrUM3KBAPp<0W;mma!{Js_nH2!yiH3 zrN6xsOiBzO`6FNMsPFjH)7ZM?J^;sGQB@w_VGEp}TBK&vxY$=Gz7G|8-v&^G;GP9B zVi86O`ihPw#lkneQ4ezFqZVQ+M?G^-+9gV0YV0Yk~<1#iU{L$a?46 zlXeriuX(LqTugBxn)Y6-4-pSR?^g8v0^T(e>KA@UJecAeoc{p4dJ5z_>{0#R zrnVmD-VKiDECa8sbBi`sCHW1IOifmPm=oSlEnr2Ny>_1uEbiyl(&kv4Mv)lV$oWag#Zo!(Pxlk_Jv~qE$I_r~b5^8Xgn`g; zRbRiYS@EfYE=^svk2%1}tk%0rbTUZm9Bb9w;19;MJRhxTy7NmNo$MDw{WPwWS zU}{Y{C9oStGP4oD?nO@xvZ>B7(~jr-D`smfWUnLnR0#*SQ`6Rju4T^dTD4rNf;w~q zrF1$`@{?x^n#HoSnnc{Oxd8IYxfQi#4#V>#na}rlt5!r*Zc9^U)xK=^_N(hZL!1%V zRTx+Id%$kE$Wz5wg-Jbe-_wujSImw%BWlj+i0hwv+OwU9zH6VgjriK3iS#13Y;ENy zk;v!`QjyOs&3k!C$IZoP+D^#a9uI1x3^-A|@zXV=kVlm-R$`wKW zb(E27Lw?#I;2yoXtv0ce6b4w?0qMA7HKC!~Tj;uaYB#ttI_{1({t?^pKc!vOwK<-~ zCw5)Ve8C!!KvUR?=H_=gITtH!>To_WvUxl|atd+sw0&mFi&{ zvrNLI9+r2xeDkMrPHUEn_f zOXIIGCO%e|X&JiIF2MOxAg``B0p69i6I(DD-t9DX% z0CHnIhO8bM4`W#4XCH|0gpNp@tcXJn#9-Ep4i$dxee2jYKL=W0mNP~PBd_s(eXEVs zz8=YMC|fy!$jA~Nze7^!xa@1~)lGIeUE7zGa%XqeREm3wYjpp*yJze4Hz8pUqt@Hm$A2qJXK)BbIWBM!AZtK zz7O-P%`^5*@dlx$iLRuE-bNoHBN9*Ic&i@(ejoUs;g7}WpHb7~io)jB=FUmfl-EzT-^s7s5rm((fR?hQnG&-R<9UqcSimeGe7$-@&O3)&Bs9 zG~h`~-6mya^(*rV_1_JAB-dx~AH;tO+(#IDxgpf734DUfAqPH(X{#Rzzh(ab6!=TU zDXHJRq`G;vnl^Y`xD==myG$dT0l013rD-r6|! zu(c_tDd=Y?J^fD|n@{@*WpTaQC6NlSAObVaJ-by~akk_J&H<_BAL!6Iz&K%yV?6%= z_39MdB`h+;`gQy(n$|h*a^$fViDY(Cx%=wFKZvc1REFv9u5E;*#y1hPw;1J#9Xb3f z8r>e|bd3}NAqoKX?V9SmF|O<047Aj(>>yT6Rug9|zDRZ`Pa#iYd;S$_PCE`Ui?W-t z7edpd)nsTbq!3|Dg`DG%2{}EweJGDvH`Wu{%o*4aM1P(NSXMff<%Z^g+Ak_l z@3+(p44*;GOBDWhnBi9e_h5_+b?wu=EAwu2rjm?d^E$l~#W(s7lX9%{+bzIZA&?&_ zg69jk9ODdl9WzDOj5QYB9mJ~2=X$0=#!h+n>F^%t_el<-cZSt`>10(}Y+P^O+8#`B}7+F~!B%i%= ztoUc5jb~Bv!h^_N%rX}~p7r_-@YllHe}jGqU29hn381xNtr~IWF#Z$u<2CT7!7mqo z!Wpkx>X62bJ*C1QX}8=<_F=~O*!|aI$G8=>T^5HO2->2)i0=FsuY5p)`e{DR4cnOh$8K6Z(gC!M zi0jngdSleqo$jRm6tvYYHBU0jR=4>jg5(1v1g>&$NFd=n(r*Q`99KG-NR8Av?N#O?{y3}>48 z!uvskQHg%ps$5MfyEKyd8-_{ciR;f^c{TN)hkOKYEaZJu`TltsSq|bD;1)UR*Z^m> zbAK8?W{ZCcYCaFqd@8qp@R4h?`H$wvN+NQ19y%xllk97k6^G_r{Z8DsGEtkL(rImT z=Em@EhgKzXu1-z}z_iIH)2(AAq;RQJZWlOWFi7{VztjH%oYJo8-Mlc!0nYB)q92u@_rPhe|`l_dweZ2AmFE7~N~6jJSde-lpL z)W-@cDGC4_QClM40|OtG72nE_1=PE_`!jpjHJ3W|AbxeHbMpgKrc;65zDW9mBIZ7E z+NsG7Sb`2SirJPe&7W)^YO2=w%K&-c4?*id8HNx?>7RPD2g;;@fzqV%Gr$<<9Mggg zpaGuL47rWOeLK|g02pJbs18Vs=NQI$si7gCX&BEZHE5Fl%rYOnf&8hj9~kzKXorm2cH~`V$&w$z&gmm#W;-u3mmU$)Uz0G=g)1#YF zYqvnGycQ#ke;&1NsPZ+}S>l%H%;~eeYgLodyD~XQkN##wa1h?Jw9nE@w!ruiQe_mOn z0NTLWVU7r`B@~vXF`Tb-c{jwb*{@8GKuu3ec`Q;;ac?<0PeL$#>*P-rcrMFHzM2c0 z1)fBHu;32;$Gv`oe$)OsO*-AQnrhPsq@|M+;89yvQo<5_T{duis({)RICLLMy39YpG2mFM9xRLgY$G5wktV`9aL^6)c`IOr=D=n3Rp9pP7Q^ccy`f1O8eS(q}& zbJL!LdK%s|;*M$6lXf+4HA~H`(M-sMACfbH@Ay|OtXebP?lNUq^#_q#k=|{!mLnQ% zEC_6J2<`MWk#+m{n3muSjxn58wn)PEJs0+K)U32$h?;X<%o-b(HkUB;{mg*I`RsUL zbK1I1Bf;_LJ~Z(Loj($3_b&+F&qm;n!ylb^_lEC3vGwaak(B|*EI7g7*U&yLzFVIf z>RP;Vl{eP#O*+XFAz3`N-_(D29%>S5ML&e&uccJiCpiSHgb<@1*%j4k@vf-|CPW1g z<>Uu$&<@e=eJhx^f*G!i(qBfBef$tkYiCTeQEFn3QBtM4D>oIh)WtG{`iRSk6JCB22CD(M#DprR{x1Yn1$>&_%TquPWPeSMKHa_a| zeXH8^UmbXFOws4@ey5^_k_MB=x|SfVB1g=QeR_Zi?_39qeja$w#oBI_ajbu0_#ffj zv|{4k!rtsiQ3oW2US2{ol6lQ|pU3|Ii#FF52g2SQV`XI`M+OAj(lvWQES6Uvyos`+Wev#4=XxzsK$Z7qJqXDqDH!MwkgxyW3eP6h^Qu;iNI=z5?=X?jw1 z?Z?)yX<9Ybt#KSS@jR}_0U}b&XkJ3Sim=4@5(JHM#PP*@kHhbVb68tjdC(WtwIZnv zum^l-Pu?T8de+avzkwQ@m+%GCP5!XQFzLuQEPwRUR_HO;9V_hr0EJ%xZ9X19_O7ou z)x1Y2%Sd^O-#@~hr{D6e;?x!1=OsxwzEocLTi`VC4~8vmwt;rZ^RWIBe)r>DhRjAs ztyrAoj{fytLs87+?#x=Yg;yUrrWMJlAMaz=AQ@GMj;mkT&HuF^(w~W)Ka&exWz3K*#bK4bm7UO3>FIuQ+ z^UI9$RUUj(5a%j7oK|(^(%^+TKD67Z&h!0iHhFMPKN?96<8Cek1B1z|$#O_M;-xN2 z8jfSZHBBL*6^1EE8ONnid%rEmpfu*Y&ej8w`qph~Uc)btI}k?)u4|pt7s!(%is&w6 z5|PhZ&(v*%kz{f}sgf=-vC8YSlcC?|l45Mvlt}rnQt>2aJsSHX1IlXhc6^*vtq)@e z{nkS{HRj$Ww`(nS=O1=a=B|6@y&hA#xepy#JKIBgqdrr4Y<&UgTS4k}y<&2);zjwo z`cuWa2N+xv+#FS5HbW7?#Yq&PfypNw>$y9ejTaUO2+W(v^vzjIh~xQSgALfVXCs*a zC+_-If=__E01xVGLQ7HJy}XL@G4l}aUA;5vDJH+T)MRFr%a-AedsSk=iFP5M`5*A7 z2N1+I@>{MeHc};kEJ$Mw-vp7`yBP0RPqng$ee?64dK1#P*xe#{m5y21aD6J>&Y;&9 zvPcgd00*$5F2x;|prZH1MLzOP1bey3J<5)ytIcuZxPVz^Ip90iZ1ShK;q6(ro*CCXVXR3Wouyss zce0juj?NO?kGyg1SG+x^E~&0*@J{Otb4rOUkNhfF20!o>$*;L|p8?O|zZW!5bERpq z%>rBK`b>9frR?JvR|J5p^0o=X6V45IIi^0QUx$@ibll{Yi(k<9Fc=DWxv15jBJO-X z{{RTQYXTRW!dk)=T<-fL9zPD0**p&}kD*U_su{Ga^qMH`^xLaRR$HZP?{sW!0WdNT zt$u-B=@+_1oyG0dh154I6Dpua#BGjA1F0UI<39Bco$yD*-ZIu=j%Y6Q%j*!02C(xO z(jw#LXq4r@x(a|n!Syxqm|iHySDmZjTvwMh{{R%eXTOT!{2dxnoG~j-_P5~fetKEk zl|bi=jmw^Q4^VwQ>$&)GVu|97E$*gEaRtP3{Lz;z17)xWsK^7pYutZq?}oky@PCT5 ztA7pL!utKam|b=3;#gpkZq6cKl_fmnFBtc)J@7w==AXy5HWFL3>?DJ7#=#pKaz10g zAP{-4ud9evC^_4f>7OS;n$fg<>-!aaCDC-aJY97bqYsHJ?(ZOyF!6-)loH`2LzBH+ zgPfe=toV;nPl1al$9HjuDIN-*C(3#Kf(GJgMKA=*WyK=ffrxX zZ0GR({D=Elk?o0<#u_-!&Y_B)8;o?td2fq60=iC#;dVFn_d1@TZ#~qyV%$a^+A_gn zMt}CQG2CLkJT6;H2Srn(8ziiIAKDSbKW6WAl1cQ_{sDuc`MxdJV7`Xy%+?^$Q zJAz&lz}>+tq;)wSm3LH|P`lL{&c_8^G?c z+;Jiy;qmeS2h2#$a(aF>=-M}dwS%%N%Q?r|(n%l zcZ_}JeKYW)?tNq6Y?@W1rY&JD-C1E`cg~!Z4gT*W9@XM<{ABUZgAz+CEm*;9B@D~t zCQ|_9XBhzh0G_qpd@u2gUIp-N)X}O)uW{y!Se)%`tW+UAOB`qL6weoUV@LRprfVJ~ z@cb7xa%pmre`B>#ZX{*Nc;s$!IQd6kOxK-{&#)MZk*7`GYe{JAowmE*@;sR3*o-Qb zN|F8Co{#OZn?fc$Ubs|WDTqaTFU z-gaxtOO|OD=L8ptU*_I60p-8R#(UL|9DF}r2gTYKfjl2I&90x}*qT`{(&3QYUpNM7 z9YDtL0)RoMbVo;it!aV_u)aH@5R@7S|q5phN`>g$~f$SPX-L1cQz$)|22k zqSt=gb8WRD%uKfk3lpB19k>}i_^!B7#^teeaTDiDX+8dAeP)&}Nvo@L^X@-D@ch=g zT0PvJb<~Ke^1Bu%jFL(9?@w!gAZ`+p;N++{{OhErhCeb&zw=4%LNte$jIrC<{D}0^o6yc&%IczCt>a#%qxAXNZr5JT9!ph5&p~FtHrxyX@+lUroQDag&5c8f~%EwK($%a|~GFk8F>BO6Roy z02k?cjEdHmc6VeD#xFMm=treq*E~fow829}B2Yk%HVle;nu||oph>AOcd3Qqy-w3p zNmgq+d7x~MCTN4l&pPZW4$Y1hU}c>e%#A;KUz+JC$BuUgi11Ob>f=+t3z z-=16XuQ=B=AlGd8<}rmGf%UIOgx&d8#}#UEgl_jZjZIZ$agGKC2YSk6-#7=asOef) zvTu>k2h@(0pF4yAfs^fCq)#qdkY3BV!U2FPtSy%mj&qVvH5hC@YS|508?q4+qaK*5 zGr00V0Dxa39je5R9^DUI3Z(Mm3+QM>A@>p;xa8GCCp*2dR;FKs1z0GchNQfO~pY)|YbjaWg~|JN&IKGQfXyRNSOB1H=GRpUN;mPCVKETH5$)@XK8B#-i6&8e27t8S`)Nq*S+y9dhT7z;j)1jJ$E7cr1N_ z&r@F#Y6>>D*%Yn1A3(>GRp>QksUC&!*GbTdTwcm=<{cFLh(E@)q46%6eXFjSX>$uq zzc-T)Awc&Wcdr=Md_}K#p|;&zHLbbn1U+$GAHq#CJ8u-^M!<+1mvhPKQnyDYE?>Bv zyB@zRStq?fZPcE5Jw;z=>8lodb5|qd;;xSf_@yqZXRaGNo2h5hn-f7BVL=D6$6OvO zlV$t4MNHGPYdo)dLLCj#BpChd_(YA@os`v-2-YCq{#v9{Cvsy{{Tw);#*b{ z5J6TP9PmN!TvAO+=g{G&3Y7V5eU0I*O5eqLEtaQig9X%K!v6qqzll${ITf`AinOtTeX21tXXYpA-nEI%I47~x$EhrY4Dfi}o_>|9ZGRXbD~>%n z55~BGcZ>myRqK1U-T*l#@U2nFDt%7=-%w6+*~M&G+YP4r`~=Wmc9YbMlrM`e&Nrbo=a#6Z3W+mAhqk@{FEyP}S9m zy^mq>iTho|O&e~sXw;!!ySB2fZXPDKZJ%gwC@XyjK-Hb$zZ1m`quO6?lGyywwln3Q zLF{Vew>oCr$1#bs*|%rg=~!~Lp>lVVn(AM%VXNtyos2M)mP>aS&j*e%>0b)|(cTfb z*L;1T=r(b&gv7DJi~Zt$Zaqoo@UOe{O-{}`vt?=X-&{oN=QkMnPBC9P{7}#&w$~xQ zi9pq1h})>&$~$mhKBKqSHPwb~Nv3Nj%NWY%f_!7~2A}Yw;e_5E(OnF2GK;&E>w9)b z{(_v~SH_61e0V_y zOJXhIIV=9L{ZFNKw0abM*skm?;t|IzgT5me^zTlR!KFk5V4s_jR&AgXG!Y&E1)J+z zH+KGBNgWVi3d+YvGe**Vr|^K6c>}WmC^%#eG5cG)vTQK1Vxw{K2n|{3AJL zczQVpA5qA!to#kE{{X^A;d?1h`sIi#6aD4?0P9pVk7Exnb;Ry3pjj-VV7bruR~M;T zAk=MT$4M0a73dLxK8hR+?Hj)e<+Ym@V6K3Ci~%FkvrgAXqa@^vOT8ESJ6@TUHt)8P zPi)}iR*s%!WZce7UuTh)5x_q$THEnN3oAo9uMYt^_bLAX)~c9PJ z>86{|`#ieTzjpe@Uy4>aYo!G_QOW2&mGOV=6{jj$yJyw?Y2bQ~H|wLr|Sy zx0-oARE-NaumeBRz8Zr~o8lLa?w^6@+&uC*=o5GNVzI@iE?6Vu@l{nSJkoYY8GUIj zv`Y-=(a9T-Q1-~Gk$@PnABeAP@z=v^uM$d<=~1iS#H$RJ`2nR0Fs!-YoNz^XmZ{;5 zQ^VIEWz}^1iwUv2WyD1Z2h%-8d37mD6Xj^~@c5c_;H4L@V=~fJiR3YthGoOV@tmF6 z_3MwPdeJdzR@PAJCg6oKvW?G%$vGsCaz8Ot^s5`l(zVXyvx-g410%Dr0YDsc*Zlid zg_ebNC9IRjZz-}Ws`57go=N^y98Ys^c9X!)moXAVkJMvY}} zgHMht2%bCG%S*Y8Ns{S<#^KvOf|O&}sm(sRbSj9)mh8OcPCY6%Vo?a-;8fP(&H)%+ zquc)g*G~H%%kq=K^yn+CN2%w|NixQha@Lmt(Y{l`0QDc@E5Ek9`)#P$+scsG6~RMd z`-VXbIT)`(@Hc~RHOW*Yh6Q1tGt=92ZB>HHfWp{{DyRvuhg5 z9s-le{03|2e;n8e0lu&qgDbq&XA0oNdU`gX5EoLnEge6Aw8tt9E_ka${YwGRYp zmwKwD{FaI$f@z}+$j7D|t#TSKhAr*y9_Pta!Q-6d_ixPNy0-BxrMH6Z^h-N-Ov+}u znYj|T@T&U`wZGu+3k_pa)3n&!`HsqQE&~=PgY~5@RjrJhLelqa~iSGyzOj}H}l}|8224*UIZu^-X0X5h7U&9Y! zZ>U}f!cVYiwrYLH?+6{MSH==q{5AMlt2Cr4!E5C!W3lC%>(}zHE~Q5~S|_uFq3odQ zPqY63fPQOyRZTY2c<2>Z*0F9SxYRsfcjAepVJk6c3H}4mBit|QYn{}m zn^L`==Hc1pknSw!ByrR7uEzxjaVoQ@qOYPkpRx$}J$n6UtL8N)EWDoHlvY+cdD;5g zCp8&B{d-f3W34x-uZuqDjKCX;f=IG@aaY3NW2QROJg|86pk{QCe(bgYKXe+V$U)!| zD{@#t>Ckk|S7?Hq{{WLxfTb=%DB`5G%L3TO2vPN5 z&(z}YiuNcY*#d$wfm+j7cO=c$ZrK_;XusAw|6?csUsiI)D#vLtcfFR!1fJnL5}8;Z`&ZgzY4*H=k4QoPJe-HdxOi z%D*VW<3Be&exKJBv2=dZ8gsmJza2e?<0a7*jRfivs{{UXKbl0_#&ge|rrhk$&V}hjf zpQlR1xWa6UX7&n3T%LL7Kj*z|v5!!_h{X|~%9k?j`I&xEk&;LBtZ06B#{o|q{{Rha z>S;Bk7RfEbNd)V>q;hU#!NA-x!Q+AHSxBeXX#+8bT?6S2$Z9rliXrlqTty4>_jy<@=|rmsEy#)&jCyB0Rq z@`OeDFJOHuHrrLR((jGcg`{xWa(t#c@V}3G^J|NyH}?#mD>IxfbH{FLz0>ThAoxkB zU1~af@#=EjK(;q!h*pf`$N=CGf%mHIV;#=g=ff5rGtyD6*c*A5ax6=;8TG|aYs+;102bfc=n&dKP!*48U@}ynyIJ2I zJV&Tp+Dk8#(MN9_I%FARhD1ugN6ospd(2^0?=!qc(Jv0=4F zIU9X*j(w{R?mW3Xox>#hR8_3%JD!#g2@EAQP9(Zb+t^-OUc&xdw(~0jeTXCZSJ&Sb z{{UyLGvn@qW#St-R?||rwMk9D9A|GQ{h^xp+T98HNY5PmSLhGyudIKVf}N(4;{v=hflLOY|q0H?6viGgTG{-2l!IfDC07h1Lu)2 ztly3+*^)IF?~l^0DafTNjqFOiw=%pr;Y}mK`f;|>tfRGDa>_nuKCInMGVpFaI##<3 z@M@LXj-sJ9-lbT?kYlK-(hfMQbGOV!YO5TRTcajakl>1DaGcZTVVqTWd@WkkhF7_p zXgu(EtgTn#_l5o++s|dK=rL+~klQ?OE#ty`wiwz1gUBHD^{ZEG2N>P$UMKOt#7p5# zY~C_vOWOezw6h>67&aY-cq9YTx$zQlh2nczE*6d_7^zlz9ZseDPk2StZ17kIugj}T z$NoFbRnk9cEhApntTf0xF9Zhm;yZCEyODy1U@`}-e7h#08%3YA#^m({!TfsE{u8@c zd|{za6oyug_7+u(mM3`N4_fk7O0A!JUnmq)o-?Y>^2kX%{Iv}p#u45Ok%mW6Rj=)m^3H2{0cDX& zk3sMCsW<*q>~9zx580E4@9R7m7 zyZx9xD_m)JdUk=WK)RlaKoHv8N9Kqm&Pzt;nE2{J!R=oyX+9Ixk>X>fT)yT6FpQ1e zkNNFhnc&?&SJP!OrIpNheafOU&fnqscg_a@ zVH_-}&Cbo=x#kxP+Rf=^M!23v8~80FF+31DvuB(Pn&|u!X|3y;ytlUGU+U9K9jrF- zljcnBqyho#0)TxoDfV6h)@*!4)}m*!m*$x@8+av=V%VhbU4wCMzziOFuBYM9yBdg} zPnK(&iJl+yEC}UGmn178>~IfIGm<^)=W(?o?4K>RutG9bFC=~U`z&Zzx^9tm4yC6( zrPOWnZ$Y_87&#-UJrB~l{{W0$4^3J(f3nFulfcj_EdE~RJoLfmfJr~yz^#vgc2nB? zKhl=vUK@plM3Kz4`n`0cYMh67($G3X$FtNQWzUP-#tBxVb9~9~z z2kyK%;6IAS&e~IXac6C4hfahE<+R_rOlPYco;@oYOz<{?;GK6(OY7{Si~4m#J_+9jZyRn+d%6@_E~I}S^c(>{b(&b}(sk9A z(pnRqt)+;8`_DDhcgu7j^V8bBN8(?=xpW^F z#o`|gTit3pCD=2YbU`yr!~^9NKXyD02;}Zyam9Ss;$1Z9my#rAJ9!=6XR&}_*`aT` z7orTD0g^>|v~ak3*0;RZc6#d)-K3SjMtUd1pNR2|drGj>wA(A0S&gdR+{q*nq6Eh2 zLj1C0ByZ|7&P{z`py?K#7t?KZ&lBnvt!bu`_V%#@4WvwRK}i1q<+Pa282Xy{SHkl( zrmd%oMf-G?^9|Ft%(0R{`GyJLuqT7XeYD>NS5)|Y;q3!a)Fx}4D7^9AM8CRuB;B}d z&nN?N++w^u)`k}ih=pfl+q2UD06&qO>N=5B?(O|-aegK6U&2q?2UVE(Z&iN<_^ww9 zk=??p1><8pM%fuW^0+j74e)owZ-bs7)%+Q)+fOtXI%VXVexYe8mfR4zO^n=va&m{h zDi8Q%TlwDV+CLInuHn8JmL}e=KIsRjAmD;I;=YsAJZ%Ss^zRQ_-rvE0F`gSMo1uUj zQUW>lJBTEE*T09vP{s19GE!-;>;4$=F$vLJkavq;cg*1QzX9p+Xx<;x{4W60{6(Tb zkXuYtA-IojWP)Hih9!?Z8x`si>Je)8w)g27RJe&-4fmJTPq;Z1m20ERrRrO7u}9~~ z8w4HsVYP=jz|Jb%v#eIJ2>4QkmmCc4ImLOHd`2~4s!+#j<1426rrE41(sfj+bwyj| z%C>&#>ZZ9ZT5ZllW3GP+-@LlSZVx%&k=nUi*>FibWS^M&RvgFD(x+r)T*`UD!0Vh= zY||IO0Dyf>PkAxPz~I(=%)vn51L@Yf6H9Yf84?PYb`p}u$C|>`B0DIZpS@-weRJ$c_r=YZP4-Y)z|Rj(D^3ATG9MZYNB`h zBjn52>yoN-pAY8^>;8QzMCAZ<;fVFTb^VezmX5rFced zR_=D5;ivK&Pj?F*-iwjfIOjFyHrj@tu4)0NkL>n?=aqok0Q3j8eXFTzu3(saLRVzd;KbFON-lkg-B+a1yhhCW7F4;)z;W}V?xy|UhdZB=`Eb*FzO19JClM< zK5qwE+uQlJdZMnwxEWJ}$4$$Sx%c9ft!+q2-se4Q;%!9Tt)%w`cz0#Xu6|MIYdZGm z%Y2Wr1%5duj5$v3|d3ZD^BWC z0~3taRL_uiV;wVEqbIptj}L*!6&^uj{F z)X?3CWq58_0;eE>-lI8rLv$qb?@_z7(y?MPPI4#=z<_bZB=s{iCSC>!BNd%FF2QlM zXRd1eQ;8x9$L0p0L_>9%FcEnp*VtC+s|w}#gWGA+Ox<07AS(cp06gUL&1h;D&2r@d z#7Fo=QJI8=Mn3O)o86X{mo8~mX6jHshdh(~hpl=wu9u^DO4bYC5XbhJ56upp8(_Dm zndm*MkMQ21ZJ^uzvC6&S$hL`wLLcv+=~_A#lP8HasBblSoJTtu7bg!m9d@7LUcWcD z9My9up`CO1tHj?Hrh>-e^yvBoCt_JzAy|<|K(P*|>-d`J?LHQGcSbv;znU-xOI*f+ zQ|*l4eig=euf;E>%$7PtzuEeDOsd=CdoM*S-A;cx<0ZI~;qxQP><=bJCBIt5$Siw> z#+l(eWC|x~igjr&6e;=vQ+P^!5?ytpu!+JcbC9%gfabnL}S3oPF>U?_8tsdkf9D9z{;(r*tKW(G< zmK{pjqA_1(r->L79HZ}LM@)`K<6h(Dh>s&9J*&*MKL#0mS8!psx0c%P=Z&t@cvkMe z{<_bS=S>QVoUe1t{2i-a=zcJ?)HJvxxOik4iWMx*hlX4aQD0hWo+z-;EbZsiu2#s* zp_Um3??59wr&0LVo?3V(U4RJJT(#37Bbe;q+Cch%Kb>^`1o&N|F0hkovrDa9XRX)V z&#v6__?p#H=)%0?S*h(~tb+{>pp8_#<>3gdauQD zB~Rf~)MVEaZ7EolLl6f+S8eYInEc-`^grkGs^#o*v#nJj^V_}#D%?^9=s`Z7l}6ii zks}o<7x zXW{)1$=YPYbC9|HYek{WCXJ0boj^E0?;7Z=<|TTa-jx-em=!l11|2ildepI6vbov9 z_x}JIO_&JOhL4 zO14ML1voq&wQUqiqmp}n^{ZCG!+1=bx4kPGrbQG!YR4urz+mIJrTY)>nGgE81#Mbb zY-9NvzPdpmkOdoF@_no5PlH$fNV>CTI9Kx&dmsL_d5)u@ zhmmB)4?}@nUx4phN!IO~b~g=;nrQSW%a&=MOZa1L#ih-zazQFT8mAO-BH~5LWU2a! zhr<_tXVj%q3hoNnHKB6=d#y?Z>l=BC^saWS&!eQRB$uD$W$W@Gu(u_)tYwouutx&9 zJvQ9Qv=hn#pm+PH{A;c8ZjF$O^o#aT>lz$cp&-voFsNkQ!W8k6fr7P3D{EwP-uYg_} zX-(C>o%T7faL_S8GwsMV%W8iCZEmH)%+uS(fGBU3)2~k0^%eBc(`+s8V~X}^=8YR{ zg4QqzIOA^qI`z+PmEL#{;h%@KFAHi~uCJ)YYjQK>$0Vwx3}Cm)BuuNCwM!S97X56zwRp1W+8my(qhx|EDl%QzWA zIs@M)Am+<4ejd*Z=x2w6*8rT4#=V2XHnui4Ff2B{OsX*Q z7bLmQ1P+4(j1IWXeP!|g0N}TTbloc6O(JVp^&2?CF%snf`#=Q0?g8vT&QCe7o96KK z#k&Onx&g|M=UvoPZFGF*D!odwahZ1B?lxW#-LXX$8pZoTSQ zh5>9~^s4wIfH?Q51D>6`Q*S|pia;10s=SI@=ql43j8re*9ePp%yh*-IxC7kwtIeMrHrJ7{u%X9E>?C4_NtQzGr!0M0v9kjL{6YSOdQe$>f>yOG}={b{B{ zIzNYyslNRY^S-y+_A>GLq{`TO@zos*=F;bK5oZ z*NB;}q?yZ-Se$O}U6hw8g-3Xve|DpDXFj!uc@oVOo0x;b3=HS#_~V-0)QOGQWaFqH zaaHZL`+GeS{>p1*xSD)(cx?tm(FX z7GX3l^9ag<4mr(v#=mR*qb6ce9%SAe{M{>iCXN+SS9d*s;Wf?Ag}f`PTi$4venzDT zDRX%iW>(vPs1IJbuMqgt2Bjsn?u&U7!*L9P>fAR4xa4E$UB8HaA?p4u)^vF8E)oSs zjRULsVQ?|HlhdVm@5CK){{T_(HQcvajF)q7Wm%oW6OKCf9+jMpX(!Bi6Shh2M?BM; z4GfHN#yI?H-nnP?OQPyA6r*^@Z&6u~CL(-*Nyk5pVROFM9cv+XEO|ed;a^~W#9F|c zN7gj*PGPtbs6BxgKdpRgYWo>yetQ_Wb(RwY9$MVUfu1QOynywRr1BbhzBgSUKraq7XS1J46(d(v5>?tuLUG zvn$9kS-0)zc>30)+$krD=kMffoOGuVvV`({-1n-oHZn&QGn|p1tyGpzo0{4#?8-zk zw>6)6bF_S?Bl%RfcL50*=CY@o5uA0c(lBg>^6LuR_WD=4d<^(?pm?X?--s@JO{CfC zcCU3S#xG!56+E=TE4XpbabG-L&KMr#3i``XWbg;3XDSXYjqo=ndsRh^NbQW_o_2o?ilY?X1Fd%CyK83 zN&y7(>}y!Tbu)5fX{HH?w6Ww-zf$g7axo|R5B zoK?sVIqg`pL}qy0f5jcFXa0Sk`B%;1jeR5ImLKqjYX|>tAx@loetx{ z_vWv|8ZkLSOlLfG=eN?c;bl>r;PI2!CccvWk^UTbBgc)bN8=49^Hq>-lFVDm(EyR3 z-jK`uM3r88jMg}eL@<<{YMUe2rAgFwiQ&(sZ;Q*4+kA2kH!fAj*0}Ew_yw(tN`b|> zJ=Y}!NXWBio< zwS_F>3sN_W`~^BPT1i;^in{Rq6Of59<+1!9)hU~hyr zEFrP@bEQTf<@-hD{l&ziwnk0?^y0GqC4S6*4!kYl9YV_6!`d`=w@By`8$>S>vTfWL zjDwI4dgnF4S3Sd1N@=(Kd7X4Hm8;ZL)stuCRfV*tOt*xo0XfG*yz)=wUd`|y!Y!!y z_sano+u6x7?~lyfaJazyYt6L%H|&~(w(cA}4)KxKg*g230k2{BPP15FY7*N|C6)Qp zZWcMCIAm;sFnV^cdlxI=Huks6_ul~~{nM5N7s2n5n&@EhC5TyqMIhJ?Nm0QS;JjT+zG~d7%;2w! zuC$C>Is0#iwlG9OS>P7jaB;Zflb?R|?qZ2yiF3G_0~s6r zp@W9%GI3txcjAc*DYNB%?~mzTJ@Gn8HIFU^W&Q2kVn>tifrkA(Pin&nnM$Qq7|qJ( zH7Yf#UR7O^J>NcOKdz`z-Q)G2L`j(1ntU~#Zf_7iq%x$;n6WZrY{|oc=nZvCAUuKy_NClAn)c-B)s36b<&32VbkCjqW$^pO7CI!_ zKZXR?CP@i<%ZB-pPIoF2c*s4mTrQvR1H<&w3iwyTYZ7R7aY;Ndh?ef`7+jWB_mv_=@=HVm`Si?UmP=>GH-a-A5Dg zhr~Mmm*D%0jV4iGrdZ&4RluC9{{VCQzJ|P$N_)5PZI!EelF#)Q3&#gJ!3WZ=YEt~nhk!m}JJvkY5>7}9*oyeb-7Q)7(T<6t zrPkc!jt9MB>e{3lUAx>|$+jgS2to_U=kCU%yC2fD@2;Y@f@_)POM6%bWtLNbzmKnP zsTIj+T6Eqr@f3I0aebcB*Y}WHOrtO^crDlWjo;ItHSXYWp59*4e+oRzW+_pRvySZa zFAi8*Pi1YY&1D;1Z2~x?A2dMr2e-K$NUMG!@KboY`5o?Lw6}&S6&@!j0bzzC9dbCq zvq%XkjVZ~Gr&qI$8YvWt#nbXCo7|;*g+%aKOTGx@ZPUGwdR^2 zgfLW=081ZT-MjHzRqyO)pYMvXBD7&7QZ!7t za!y+VsVmfq>=RzJYt89$9v%$vS^ofKU2ZqEn@iVp zyQ^@ePno(Uj1I#be~o!(#gB)cCh$ItWxuz8T1hSi)tSsB_xI<}*VYhTU0Mi5tt>Nbj9sKY~C3Z6p~*jA3Gc-J-|neil-Lx$%YBLgJXP4?ZW z;$(JaV930c0{|2C>MON7?s)FeDbFc66;4S2Bp!#ZD#6ZJ^~a@ElO%=b>s50mLhdlx z=N#bEah4c39RZ|#9P$b7X<{U|9DNN-yC6~XFxn2>dQ-q0@!FGaPDmpjhxDm70m&FO zNCtr22X9J8EW;nfDovj@(aH4{Zo^BPQo2|n0H3_fcnH1Ok67JD?QCb6OA1*DGrkqvff40q@iTrl&0H4;q2jFb+Cyw&%v^BJfH+toGt|I+y78`K@W|*mI zn2Z2>D*F#w-uPzwd{2-z{^kDwn5`6fPJBwe8LuPiUpLmHdxg$74m~+F%IP}kE3)aE z%U%Nx-&)eUzk)YbNsyKq;PkB{8=n(4;#j@73J3>i^%a!bn#YllbBy7Kt$Sy~jZzz* zgPtB!Xg`wHoTh44)YpGL=Hy2T zw2t`Npk$o%z^zRdYmGZqM4C2^Wo@k+a8&!(0^TjVvYjNdxnk+}U6+yR%~ZVccCUKu z^4yT^_nj9EPpLGyl4o>RG6MW?*$V(A8#t1&#R*W|f08p|JG0qQKY&ulZf2&f%1d=@m zy*e9WtO*1Tht{)Ew!(5!+|jqyE~7kLKOHm8bzT{{X(w53TX%3VTzoMd<0Idi>a-Rk zX6ylvyheXYv~g5Q+8(>3i)+L~oz20=_}5o!rd#1a`GM>Q6~g#x<~9LiL$rP3PxCeC zT1-MkTy89XFT%9B(y%=J(L2i|$sbDbBciS6_Kk`z_lCkn zEB)tXIKeg1L*ltg9M)1kIPi=n887uag!5*Rjx*JXImhFg^cegx_8ly07xF6u^U1X1 zbM{5`#&Az+)$lLDol{iTA&TsMn@)ku(@IX!-2NMVeX762zY$yNdYQG;R~l`v?&CPY zlO0>}uD(?i?s^lb=Om(WS2qoHa?&y6o(K8uPIdV+gZR`i#L`GR*D9=p& z+Um4RXygPYPJOtlE8*mh>U3aPV+u#jlapKi60r#>ACP05RK1O&^)%qqj|Z=>9cxM} zAOYh287zWN|_9YKb3U%Hp85Xs&wNz zI0Gc{%{0x@yU_N_8_4dU(e)G~{VLINN2tbYtkYoMt~9#=x44UApGLK<+I8g;TC@K_AMwjVDc>#@bUoyTKumNZc`=Jv-NFs983f;u*whau_hrBL=E? zno}x9tCm$gI+SkXK9%0mrsEZ}K9>iD`qP#owEqBmN18V1*585hXNYv2{{Rwoi-rN7 zNytz-gVMc+!rmG&Sw@P0Ke*_9tB4vy-~33qj3M(LVbT+n)`-0|d~a)iqnF z>}=XS`{W%*sLw&~S;}#t9Z%;R!kqC~%8^|g`>xLhk3+bC!p#$r(*<*r{(GA4wC@!7 zuFpd{UB0C}8hB4H?9AY%JFh(Bn)JKT)0jEi2Q{8c&dr>xkVg&Be22n1PiT?~~IWhOO%U z0Jyx8&KOyyk_8!9A}(>$XyA_htG3quFnE&ZG1{0kyM4PvHdg96_9s8Bd4G&Ned2v1 zMAUVUwOrm@*g+-2z|uMic8rBPJ9p#KtRMYWlFe5`;3gs_bWpijC{AxKg2s+ExE? z;ly9?llW%ZE}(s_nB)HWt~=uY0E0XY;oCT8io-*=xVX8BD6KTI@yj7*!8|ePpT@dh z41UgD8Pv5{txk><%ElEZ%nHFqUP3!6lPxiu7yGv@)rS&tKhhk&kkE=b)-rzXpC5 zcs>&oUr(sq%A*lzu`FRY`^4ar`R5hGYo7!BH{#7QX*8`zRPgLJ@UzRQ$`o70&)#sr zWO3>U_pX=iTIY|#y0eR#lxjLI&QHz1$n8JjPR(^HEEmvRBlIHS4o|Lgnk4<6wJF`l znQBlRWX_|Xerx7^4&v$bEjsF3qG7m+i%km<7zZk@InLGtJ-YE$EhV{i@*&o4V=8`3 zN^l2by1HQ+lZuj&g+)#ilqU^Wne07(3yd9z2>Dqit1+*w5jT0Fd{=LbsAn~@X zF1zO2T7~;C9Lh%*!p@j6$)O?JH6T9zZ)f+#QN+Ss8zO0e3t*#+;R!uK894Q=Ins%wskyX|pMEcVGM~e` z>_%Y!0AhbJgsKTBxyy6iy?qM-RPP#35O`BT@GgfN?!T038mx?PtjaMQME?MCDDcGp z0CcbV$mr70%91KI!@o;vL5|^ZY*y&xCZQwVK$+t7>89x_#T4 z-o;yLMW63X!za~8wQmZssFUh>6{yvqX!81d zSHh-#sze6U)2%2tJ!!=|;-YsLJ-zCHQUQF9gR!Qfmw%h0@lQm^>6(;9w*wgNezb-v zL77emO1B;WBh+A1Vg| zE9h5oYX*G>O6K*O-M?v+HwZ$E*JN}^$+UhZTk8HN_)Fr}kNrQx+Rc;fc$+av86Kn1 z{zu-vnD}@5T|?qji%;=@NG`GzYLKoKzw4rp<~?ibkBz?r{2}qLPYbST%D3v6i&MA4 zPapgXxb`*hAH{#!H^mQJQGwlv)dE+SX){1(2f#+T(B} z9N>=Pv~(-cr|OYU<|;!glO!lPUs};X?`z_FnH4APSOdJ zN7I;yihb9|$`8LF{=T)$<*koRM$x-Cn>gAK1er5Q`^j1Q=gfsRVKeuSPL7 zsY#+{62?QOMleS;N8(?H)(uAAShi5Mx9GAujsm$H{<*ALXH+DvaCYcPBn~U+uh^pG zL-9ImbJ6XX{d#|ecpF;%l~kM%<8a5NdiU(Ls0}mX)|UAyCJhhgSV?Hkj)&=8k1*pL zd(ux9C-09+gJ&Pyr{eY*eyIfKN4*eK{yP zeQTm9%3BpK<<3oJPc9Vu)tiUN{3||kHu7>lxUQ3!G%XJA#a<@Uw0kJbw^y?)YCQ&i zVts(gucZF~X=}8x(X>Ae#Nnc}w^HCAMF4*)tNS{9KK}rO7ec!6_P+@iw=wy8eXB3b z6!!zMZ_rm4@doz$#6BRny}KmKI~LkG+6O>Ag$v%5%4?QBr=RNZtlDzyh;IkyJwMKW zrDDr)MWBdqn~CaAYUpP|6@fe+*sgx^KbVC$&*5DcIrT6Ej1l7mmO^%n^PrSWr8&G8p<{-j9QRr&o5?x5{Z$L)HSJK<<8k7j*bbGfrK7bNO=~XTz zG8}aF6+V=<=Hlz^GMHEF(iqu$$+zWS#MY?E>}cKIN#adVx0!EXg-`e9C!Y;YVJWSE0@~ai2=Whu^&-do*}xt@jiuN66u!Kk!hC60B94-^EuAu zJ=pQ>*1GRxaNAu>4-VhS5Y?l1%v0G)j;Ff-7-#55>s{?d@EXS9%@Po`t)3%(ytf_p8crh5YIX z?#BxnfCqYPavbt&o=E9T(L>J_I=itUu}9zmxsel3R0j(iwCEQHoPBGWkM?WvpiD@XP_bO9{h%D5Op5R_%x(&X9yhHN z{ZBtIom#bR2)4Ez;ih;!y&-HdRUPPo^uq=4PaZl6rD|lbxr;4PXY~CZ(r9w0F+#2d1$6D0P#!X`KKzo@DFgf}IT+fAk zJ97rRZ)-j6pf3Ji^^_0~V_vbY_+CE_czQ;;y@44@nB!&xa|6&1eAmK4b*e|(#oiHm z`5$Lb6B<<^ecAa_@h8K-@QM6X))dCbzil#X1FFA2s3yC=h5A7n>0QXkk;%POf-*6{ zJ;$M~kJ__AYcCxv+?2S0uPfB4$Upbb@~Zy;3Fn2irj?RGn2rlzoP`{mdiAg0{0J$_(m^Y4Knl4(D8^0)AFX~zjKox_r1ePqX<=g1P*y!Q(^0oR zQCot?DsmL@-ns~ItkQYWFqW_#=|0&6SI$at zyXKGLcy;i%+5QJraeHBI3N^|M_au{_Ojn%v%UFu*P_Tjs@w)L9-}s#m7{{USTGw|9 zkbqe3JuoYc)qs~#wv4M5HxZNrsOLY28LpfSbgRWiHYH%vr71S{KAwhg_DB^#+vTrs zU^t~&PH~+5YMr#6Wu3kO2Vm>lj`Z;V0H3Eys`ctlvu&RYpq0_31>3{-9IvmX4|^!d zEL+^xQ(p*93k;rsp0zQ!3@~yJrA{Jv%5?6e()BRqgL@Ons6IGsA5d{w7ncR*X%L;q zuKsgYWRP=N*B5FJ-QDe8O)OR_Qsk=cj>>R?)+KqZqPcyO+?hdb!3o$C@~lg}e?+#p zk}Gw2Z{uI}Fh*IvplMA75L*>@e~WD=(id@qBBP<_b339jvKEZLEgPnNAZ32h2^?xmb;@PZ`zlr zB;X8p9`%0aW94l?10U~w+RY+3%9%fY>-R_Siux$#al`1k9Fl^ntkKnv6B~=u1--Mp zNJx8!ARs=&si6Xb9zgU(T>*l!LTzan~FQj$5NW)beKlzXR5x)HMXstgbHQUH(dz zBh>aEo+!g!v}10p`&j!U+}RGXum1pN-2Ij}R9KVBs>yXrNw0$y3M?*9Ah56&r|rXgM!ym*eXIqfRX$qIx%y%_&VinXbZ7rl$ ziDi5i?lH;1^}yn{rmV)bS7s)mtR$Wyyz{M!ptn*X2V=R%Id4pLuPD`~)F8W(+2bp1 z8S^8CDFG4g<#2oTuUxZ+8}AV7vb~Z=ZwvtdLAVqL=3slb9M_Xw%8v7mt{zv(Ip|6G ze;_N?pwp4nhk~1CD@-GbY0G(Y#@=U|(T&Fk2WajOL0%W*8R4JCS5qvaMM<4PvXT5g zhq&ZdvfpcwM=$RXK+69BD`zB-a7R5e#eDmJbZ&3;2AWwg(%cfofNb{qAA0p@#jBZP zZ4{!964c_e{?5{tQ!2pI+{rnOl*x_YF3@-!4)x5;10dKipd9nb=a17B(d&UD(X?pD z;a2E^!}zm)C1q%uDHO! zIPcP+kp|;~fsVMUGG%+aqcj$^sn-ur4+h}$XgL9})9CA4O&RK%0f4T|D^{-a&PwZpyU&fYcsK17j!#YzS zZ|x5f+@*|rPjIdC5syJ$kKmt%zXd)FYt~xVioPX(!d0)>%(pOUJ{`6{W|h#85X&IT z0K=Y{?NQF2h)!B)dIyN2@b~P$;d>tkSn0D|YSP-QnxxAhgv}!l`D}S+0Fku;100(2 z$i6yVTCw|mt*(h)#vgF%_V#yfuM7#o5a}IhA=bGr!D(M-dt*P`Th5KiHp7a5vO$D4u zu-!Qh?%4i4pl5fdYP;gU+Hb?YD6~N?lfC3SMawvnLU0RYk&)K`bnV){K$Gng49f2^ z0##W-DsVa5?}~hPb9qs^G=Rp$Z)Myzj+}PK^{I_T)dwov^`95~K+?-ZbbS#-vQFx< zPJeP!vg7;1jGXa0osXFZ(`Naa^~HjCntL6haD@T_-Lf~oc{pV2L58XIBy+MQORh) z#yu;t_*|j9QgA~r+1%uI*N(~I;B%Q2p1_e*qTSAARx#4YCdhhprUw#Y?8aBRb#`Pi%Gi*ROnR*EG$2x9M*b z(8(#d3krE^n}<0bwZr%ZC5vCQIND5saCsR&(z@ezuFh(Ymu72dk1R6At_W4eMmf!U zzk+-{J@%Cy+WzV$J7ZV-(>J1y`0v)3qCGXzk;Lh}=%yq>_33E8P4G@eE#F z=~fyvu(qA1+Ap0e!m0@aEu8Wce>%pqlvaqsjVM%==4xo30KB-{BSvseI2;4?uEsBg z(uEt9fg_wRO3~B)AX{oSvRqi)LwOu0d$qPW2dN^NZSk*41n<34PhbxfC zG#0F)d<8YL1zeDO6{zk!CSs2}nL+gxx;``MOsc4U(B$Br*sAb+Pt$eV^QS{%#^P|> zB{&KQ8R<<*zhO#-8LiGURMJJGj*dF=FhyfLSaXBNYU#AE73rGwwCg-A9MLgH*f?%a zPkQI?N=Lnv9fK?h>x%T~&P`fK`TSJ~)0Z?JhY_z^uC1xGlguKrv{HHC06A>el2~iA zE!5hTnuq&T+mtsyCUJx5#dbdy?_^Jgz9ybIzRptEmXMygKXiMZmEb-E)vhM7ca~`_ zw5v$`%b9b?PwxF#@IM;%D61UNi{<7!PmLZZkHY!^)9t0(b$)@QU~t}Ecpr(+;49|W zCS1Gx#GXm5Pak;Ne;Rm-;_=Stw_+t8gMs(~Sv$Z*1%W4;*}i6P2slRl$skZ0uQk7- zXz!`s00uzo#dGjUxrPo#e=77Z2Rhr^qYyJBn4u#^`Au)^7_A~a0fEBz6_+>=z6ii0 zisrl-j+@e=~-PR6HderqsRm?v( zN{TcH`TZ*O(skmg$faASYD-3GvpR1Nl_A%m42}omUZe2k#4B}f2P*zx`V-B0M~7wJ z%t8MEmSFxyy$iwDt7)##sJT@;6QSGsB>2@~(&g@H*CY zAggHf>W}5z>YT7R>WbU8n)#mdt9%F5^qIWaBO0}yOCvvGbye+%F_|N&Nz!}X&*eJg`4KPgZs{x#1l=f6#oE;d=|zrkMwwU@A~4ipzxX0<2I5-<_VRQ z@r(j7jC$3WziKZJ>LGsH;g+-`ui0b|{OelRmLz*7wWztyVEsW`wdZA zMs!VL)JUqOxe?2bIXyGQIB7o!>@xaHhe>G$F{0~6*&p8-!LP#b+(k|kQO8rM2E3i0 zn$Z2b6@s23pDh_FF4sLLO#PdD3E|B{?NCo0#q16XJ2JDU9m@UF_-3#8lE+H;d#6p} zt1VhP17#eplR5($jkqYvNDsS#jE`FL{SU@I6Yy=kv1t=b=4Rg{*BY>u-M`vl_rDs4 zT=>tU-rD(6S!wq$;44LP$tY&Y=L|^n>s@b&@YJfQQ08mV-|{%&!}6;e)wkdO00hI) z{{U%^h(0ybWtT_r$AvB0A&9Q7(c>o_@Nzz!*N=FY;b+GG0FN4rMdF_k#c!nfnI;Jd zw_Ki^0;pm$z&WpTxA?>1oh}I$d#h~>G0GwcA|HpA2_KDNc%$}}@O}|(?j_X2y04oY zvNVcEJAfb#zx{gFw-;k^Fm!N~|pTx zleDreyna-%AHD}h_8y|RlVX$190dVD4ZQ5mJqK^8^si^}2a5b9@fzrOisw|c(cm$p zNet{v%eSf6fRmqJdh;I~_`|~1;z)FeZf~X8v8SCx2szF|^2hNazi`Ux)OadRzsF^N zQ}DdPo0c-Gr||PKt%U6~wrIsRFU$scTy-S>054kl{{Z%4(~=Jk-|GbuHkJIiq#bsF z^8@(T!a8=jD_yUc`}e6#$a(f3g?;nk{{Ru$cwgXlm46yU2(gWrM%V^M7|Hj~KJ~UL zrPYp%9R{UyOT``}msZtew1M*w`B}&CuTnoArm~*gPc&AL1l~>+o1)|RkFXWuR^Jk2 zxMndqJKTm+aL4?4tJa?oC$~-WTr@pHbIBgn=E_lzQ|Td1y^-qvF1fVR?AFTCZ?v3( z?s~f(=g@i%!m~U(q}^)w@mj23YR((aBjt<(_nXuY=ZfI8zZ=+HT(P;FJ2@QZjC=Z5 zt7u*~(d;xwiqYa(Wd%VTIrkpJwPzZ!mXT(%qbTbndLM?oRc@DSpx!)_+9?r3aVAR_ z1C}S#{{Yodd`IxEmp+w$bA16>Eur%d+1qPM6cHFcxZFn`wc?tWjSPwQYerPOnTo}{ z-~c+D{=E0Dx5wWU;?w>NYf);^J=BruuL4d6=G<_x?)*kcspXQAGQD4Y9kpSS}$RON9djuW_GRE7Cr0(mudZ#XD{nr&^IC z%X?Fdc|O9vA}8skS5wbgsw3Rqxaq}cNT337Rn!)2VPi%z% zan4T_EJ?L9{72T4VD~a(9!A}lNDPy~qVryUG1^UF1 z!@CSlKo!V%i%|1CA#s+@7d70YqB$omPFGOzGPRU(?;G)o&s=Kd zm<`xv2Gu4NhXJ2U5B+b`nH>t0Rp?@F1i{5horThA!q-Q0x* zna_1nKnv5oeBJa-s1?mx7*!Toms0OBKRT2++V zrKgB46Jd!-+dG}6HY27^2q%nJmwX_GZ5QG(u((o_`7=W5$L_GkMt-2tP2WR#FLvjg z_{ZV)so?wHBT}}+1d$!oKQv4|bKB@@JttMU(PD`#r87dS^T8qD`;SkheMREIgDg_S zlUu*aW0VF2eGPc_zwq}>xw%KvB^qgF8>5($67}Q}k_Wdn)S}WkJ))AcC~2N7g8nc_ zF+X*GVbt1hwX}qgSnebqIUPSL`YZO7yVP~R6kOlmLiVk3v2CMe&zEeu@|PWf<&W09 zO5aMJ?)EbYeTOaIj&gEGp&8H8qMFj>Rd&#&W@AW#l@8dzIl)s_)&@Ah^dx~@j9v_u ze=27Jd)H#p%FU46clYm_*0%7JR&Hdv+Pr98bEcDY@kJYBJ3`=^Dkxh)(y2Eq$)7#y+B|l#B4w46 zc1xZ&dJn?3Zu}~BzXbUx8s%++J8WFG!^>meqYt62zZ>{@tnTIW<9L%N<=OXshOPWH z*VfAG>{oBH7CT2jFFwCQe_Ha{?m_8u=N7V>31+sO4e_}LBh&nNHQIbA{{V?+jkJrw z^7(fNPt$PCYW!F5)K+@)-rFjy5|X7;I0v`*eifVHm|D_UE)?zgz+4_l70oqcxk~9D zr*;jp-T+2fly$3D0I$l*RFRMvV>R<1i~j&^7fE zqCa?!c{mk!_IsD&uZ#8RyiMZ`cK+i^xy+tj=^2#cj>^ZEH>^Gt_?$-hP@xTVK zF75)7rzMXd0m=UW3eleK=I9(l_DD}dAk2(AaoBx%_N=><$tRM2wX|KgGqYxN z@)qNP>^fF$?1iLmYfeSj`NvK@>l*0g&(^wN?#$+_ij%n&>3_4&!m;re#f^1+9{m$e zA?6t3SwDq8sjo1)Za4CQ&`dE%$M}GKo>TJe4fln6F0yq|-H*(QgvsIH8z=GaT`W5xt6@wN_RuxsO7BI?k4GTN{9?QfQ|g4V`CH zmsiy8Zr0x3-*DaKT1BxF=%b}vwechBlWo=SV}jNggPBSK?X+M8Rr}6*4E{A+Q1KiK z;X8X2xtQDuw>%*fLeb-?VaaUvHO@zRjQA)C?rWt_Ly}bWFKarp>k`Hi5o=>^2)4B! z!U^S=_jBC-6_(Dyj$7sTJk`%W798YfwMMZ<%m^7ixUAfbqYG>{A(iH&LBQuUG$)*A z6$p%u?OC?FnxxB$<8aBvT7~n{v*J&iB-QvpBOuo-&X{OW@q1&!dZ7ORr*{5T^Yn~K z;=X|Kc+Z8kFgw8C%DfSw8RCmv_3*mInfjVF+)-La3rTe{KnJ@p10S7!oOtHlyc6IZ zGS1#dwDLT-)uM8Q7@<|_J?rzg;f9U>00~FNUj*q}cdWXkEbH59HuL%|ef#@WYb#-( zY4GmBcphwLySE>pugti%^>Mg7GVa^_kEzRT3Uz8f>qFWyz$K{jVo-Mx8{1@k@366dQIFnw%#JVTZm5hq%sF6-n`b#{{Xdb#aCRvjrA!3 z&PEV%_$aR!x0`B#nt7ZyMtWDdTQ0$3KZ!;!)i3fsVwNu#hq|X$NqV#0bU)hX;yO&Q z-1wI2NEm>{cB|v-T{gS%^H9`nK)b)xFT-_7?;NT6g}|?ul6|fYaf4fO>W=oS&umxb zdB+cB@~Wjyl8>f`>^wD@;$?#r5?ZnrL%acw=g+;Qbe8E(Yz2lKC| zJ~!zDTKGpTzy+Gzq=1f~SIL@Z?jB93xo@8y-Rtc#II5X%uzfC%R})px#;U0ID{qN2 zTFUmwx1E4zWHvgl$YOzUUGdo>0dcrc-lpdNp2@Qpl6Or8S>a3 zr>Fo{U5CVLTd7?kohNA60kOCl_dU2ZsSmCrvxQ21g+mfdDKRS}E<*bM&wg?OjNABeYB`jOM^j>+W&$iU-j{{VMAsLArB zkE?54pO@yi*x(^4$m={wy%Mctmf1Lgm z;u<~5pc79gnB;Q3&*NKv+eD5y{#E*X@;og{YL!^r_!#FHhdy{aCVO?)#63T5A}k5& zXBfxjSk2=hb3O#fLEtFH57YkuuU<^r?mD({&nJqjajPSrF=S}lI4loS^shbe-VU4` zX~*w93VFSG%JMzl%Tj{if#`SjkE!X_|*F$6R z5W0^rjIZ~;U*%sHisAJMH52M+=3MD>9@5&>NWVO&qrO_R?z~G7ZIQ}{jN~3G%H;8r z%-e>6MQo0Z-+D-l*RA zi7suVA@^~f1~4&RZrZyfsxs9>DuoCE+Whd2SdTz{!rCE3<|_wLbOS5DX{>?;l$F zOs@wjaZ{P>;_)%%RN^wq9orR?_jc?FAYr$Toc9&GG>d&6*$Rc-oDML3y(*Yy^75yz z1+m2p#7XmzGI6+fuc4^!?s#);>|N6B8rtEGR!!ucy$B!F``2Y*q`R=v0W>}=G?lmk95qj zr?~^~ab0xjIUPza)<;wJkL;fT>UR)G;jbgiQK&h=CwJkWO7S^$X$7YD;CW!=jsgB; zA8OyzZY7G*pxTB>AZ5vRJGsd*j*4-?$E|VN!hfS#z^r5uuJ16M7En(?*mH{YXwu|v z=UyH1-1E;C>JnT`@~nthuFR>%{2bx4-|5o4hgsBq!{O`qB~nI>wmgR?mgR@8copgX zD2^yCoy=H*AkDe^dSG`0wRt~@tmHl!xg@DZwvuP*)Dez<9)i6!q)$H=cG>3Iyz4!V znxHw`6Xdr{mhJ6Xvaw~$bASiG(z=fj+PqepKEw^>>9L*=9ByJs82)Ct$t{RL%D*eV z)m_^--CF50PjVYE$#ZUI3I6ED(tXZpCEb$KI-l9=_FwTo#@%w>6Ky7m z;kE*6&1&^FM;uCW#ys-uKp+9%uS)s{#9tGwegemJqkJRq-j}1{mUxz zBS_gf>TsY8SI=$nug2OepsuZdXDMB%vnB``_c07Aj z4&!b}P`nNo2mb)oQFco-{PelG{nq)MmOmivjtE~|^Tkw-?no8VIUPL3B0x9?o|RZQ zMjHV_JCn%w%`W_&x$X6#Jw}Tx>dd)6!ZWw7eZA^7kjVS8bKf-*oN{_oiyot;XpInt zKo1<(ckpGm_`IP52HF1rn63!@FOI@(E z^ttiwHfZ-hb8)F0Z)H8Tbt-2e1Ps0k6vr_BjA_p z;o;lQhq`>1w(Bg-7-0U#wPNb3YV6LXDO75l7N<*lu2|UW5SF{Ngxl%+W8WYOa=0u{ zn}T1jYp$Kfpk%U$r*@1BNByKkKUJZs>#G{{Uy(O$>aR zoy=tNe5N30^P*Mh?wHmZt9EGlpW_TMMXTCG%43>1LP((S8`%2SU+|YqyKez_f@q-< zO)Ogg3P^5xepQA0M|gHG3HZhfTW5KrFoz;Tlb-dTru;?KXVUHMZ0B1wglyevJgoX| z1HZYdnsc*>5nQ)Vq3VAQAzcGaxdv7HRm4pggq&jl=M}`g%3IFJ#Hm)=P;h@*lS#JK zb)b!LsYy0RCp@>**Fhh{C^aTB++9yAvB76zPfT;`S=m9h-sZ{@npzmvdiB2bjyY33 zKJ|3o9q|pmo#HJv@uorL$^pZ6JQMoV^86QVHZ{4^+Y%}qx*Qy1r{h%gzXL;MB26u^ z45SdC@;!;Ee64O*EaL5Bd&9AX)^)v@X#CbO?K}~hmtL7|w8=UaVT@t2d&D;d+KqZG<-yJpPJI49}eyGu*2?CX6nIUKWY9oLL=T&<3g zaWTcU!k2IPXCd;*{7)6u*!X8#)1xw8==Vz#jETf0<3I2v$f~+pn7?{fE<$RDYPj) zNc^a>)8kmdKpWf~{{SkwuoBq8KT3%m6t3AdtL?z==~|FJQ#F|!WpRxAj8({AV~mWF zalot^J&H`U=m^QI`O_{)A4=2%dgONex^)zSW2>{I@*x_$?SBieZf-S z=imG)#CJqIWlrJ>@$H)AA-`DylEtyypVqGpMUA*|g#&?~=Tj%6L*-FvzK0(T{{V;d zUx*jauH4UWr^u5GTBtJb=V0f63F>_-%ysV)S-kQTB1H^#g*^pS_`|4q{x7*;NCDYE zJ-P2(6prc`=PT>$U!7J-`;BdL^$Ilg_OW}UZ_DB$$ACv{lg(SxyhC6LV_HM{TP3sWzmM{EC2tjT;!xbKKT#kBV<&onwmO zhIuMU;QIdnPkQFb`7+0Y&QDrpu`O~JCprAjYSD7v-H8&5ard3ir!);+&xUN~X$+fz zQu*#Gz0LfSNN{ku$sYBcX)UBWHG~#IXN4`}-{&9sYFGD|9-F!!N@GaC9F4xhzRH8O zG@0-f685sUb4uRqqdI4?}`mgee@Va7KRc>OQq~D#9tLJ$ecnRf1hvb}tdVvpa4K6#eXW z?Mrv!X|7^bXDk8EMRAL8GRn$!`yQV4I?J>u{KtCv8r55D{R)LR#wSm0tFUm{KTP%d z*F&XvrtvP}W{?padlx4s2e387+NdE%anD{WO5L+EkfV+=YG~AINOj_zS?E3+@f_NR ziR=x_hew%C&@iEMjDMcB>^~fr%lKj9G+4}Qc?9>gz%^3r( zGwEMJ{7aZ>Uk!XjrHd=u;4>UxjDEHBlI6g?e_e{XFOuG8Z6*?)0Q4T8cZxyTXN?Z zH9AGULB|87JtH9V+NM;%HJ!j|TvRi1aaKsG()`i@D_Cwsj=RE^`{G_J(CySiZZp_d zE8!bZw($}>3hJ((%CnIADX#iR#!o}QzAbpE!JnR{+ zL*ibhZhj=(IX^5;iJG(Go71NFIpbbKDIELPrOTn^w(faF{-R@_DrxrG*DHcXIjlWK zaMKbFDztNz=s*>tifYWsJ07v%ePOS4n@9r!Op4eit`2MANG?X5`#>;k0Q!Wh&-+Z( z_9w$#L(B1oo0j0q+v#5pc#`|b__c9-q+&fqn4fPj^ZM5sk1a#cgt@7-&v5aJhG0h^ zfGdO46Sy_i>o>yfTL+BrYn`4w>Fr*k*z*h-;~N+-45xw!$gXEq@VrxQlGq4j{_7u; z{A;b3{{UiNhnD^l@RZuVz2(_l z>$@&)8f+mMblg?b_i1e?COll&uZ#7B0yQ{f3EO0`_(m2!&xg3q$5$S?OeC9kwX>WAG8!6V@ zF4Y1beuF&YwtH8g>VF!%JN719Rg4b5H&dT#^8It-R-DrxCf+mEgJboprxg@sRk^#^ z=&ig}17Si;fQ;kGY=NGY(CK=l1d=3mQ_f5uG5qVsm*Sq#;b2!e7zE?BTZ7{J1dK_i zqu};9uF7>(I-E6d?x#cOy$8j=*$551tPVDu!a7%|_$S6NT)QlTGj=?!8ZhwbJX!(QK;HAjleEB4Z&J2ymHqr#n6&_ zkJ7WI@e16NxDdqSAXgi+*z};Ci`t~)R?;Ma`hnD(|`)ol?M$I*1O8VolZdL zN2O#=#u?Zil6Ca|06DBAbwg9@PuZ*CuZ4B(2m5~eN{a7Pj^)t2=XwG;Q@5u|`X=V; z-oh4=SB~BUURj7$QC~BD$vz|yS!x=+^Siw|5!3oe3(^fwn*Q zSD^a!_pcKXbt^N`rK&Sz)i3o$AK7+dC<({g^#v-Y{<+1&H`TU}t!BQ99i7u%O!8je z6~Vcjg3IiKq7P*Q>T9mMkSu$$TcJJcp41@0ZTAs? zSajpkwAL>=K%jDeO6auD4Sk{_SxXQya4U+wAyal|p@+jtYUiA47UmFgJxz1BBN_Fs z`(Coybs~YsL7&pNtws{8e?P5v;cF?YoOqf}48go&V<2Oi`ZM-H&^$bRF|BIS+bsIQ zyLe@DkirlK)%qNhUpZX@Lmq=1``6TevvcVBSHjy_V<9f?<&@l&$!B$Jen4Wqh(p?| zA3avDu%9*3Jp)tleusAcbXpypLpcYKbNE&b*pEe!q*`nT{l_(-sragU31EWR7HFjz z@;2cYPB^13=j<$N5*O3>;Ftk>R|>$=VvT6HxIX!SgiGI}A#H!m#f3t6>>u z(-`D`&%+AzyUPmznUVn^6~Oq)=GrHY-rn9Qn2r2(Riv=cdL(pD80p)Yo^9ftcFh9; zpg}Rlb2RS5@~;~4=ZYDu+7z?1u~`{oQ3N0oN$HyOKZrX0zM8MS{%)H)n)$EBej=Ax z@y~~B#u5W|93b``O?A8qjW;%q2dvwf#hy4@R$a8(zt(+0kJ{hj_8cst^+#A}@|#lrGX z>P@Y+yA+N_87w4W$m1BtTKYfZ2aNs+e#-tHvC{Q@63rx<7W*chYiKTJbQ~5N$-vKZ z-oAdP#G;R`ihR7x`D5b0?bYzE8|gG}0DL9VwRlq~x7v&;ZmsU9qjo*cE6C^lt1n>A z&xigKgLfx-?#KQ;UpRQbPO#QIW31}-v0q1~U0zExDDZ43jKqL($K_Po`$*G3EoU_3 z^-asoY_9>hsoA66H80x-RvAIzj|#%SclS0skMrqV)}#Aa>oZ2kEHv9?9Bs9+0Dn5< zbT1a^cQ&FZd_OGj8RRkt$KnMSzACWENi;(XpW*WGN8yT=@6@D{dloOhX-#TH-S&80 zTZ{{39R78lpR~TO1CcJBY~!vSG5n2Y-Op#KDe`pbLGC=hm5em&eeN!7+<$e5u5Md1 zb;|l4x8RT3fiCr%Xf+=R>DM-lz(`a`8y?s-_ve6oH}IFn9vy=E+S&v&AZLnFRgmMF z{Ik&7O(OYaga|^g{tngj=fcmA+D4Ua9lonHFtUxMz}b;iYoa8z(ChyIX#ERB@Z;)G zXfiWdwYz+*asl?QmJ(R^J2>YY4E6ONQ@|gIuW$IbRiRu&f57a7Jz>;xaHSJI!hZmuHnGAiS=7hF}6lvzwWUWjzM(mQGFTpxr_H{)m={#@t=w%O-la&ONm{i z`$Mxc9I0Fm-nEIOtVyO^Jkd6zMfS#+82+y@=c&T;&! z0t>c|8P34bIS77kM}9vl`lm@I(nrMA#L3R`=yc2D8>J4SP1{(>y|P;XbkAW{U&dZs zO7XK{lqpdCd*qK?*AYFzd;x#}#~n}Bp=k&iG`);_9N$~0v`qB-KN&p40IVc+2ood) z!RHz4_4KcC{g}LKE%(Pi3PB@@S9seY1UBBd>(lFBA;mkZjjhQ&Nw0JLk#2n7k3Jb@ z&e1m{b{MW~RT%pTq`E)I^zb;T&a@)mRDRQVVttV;Om>;!V1R#kF*y8GiuoVo=BaU} zc;q`X1ezi95zvFpeG#L0p4b~(1)N8D$_2U1S5ENa}i4 zjjhMZxg#UJc(|%jneE}+lGN`Z)(POT3OO9rv937zatIwOhmQXMj25YzP%NEzJ!{3D z8YuMRCUr7vO(cPXl5$Txk?cJMX6kyle2il}9@UU9e7MWxs*hg&!m(%6{OOKK!*s#z z+OztyJLt)_k7xLHTxhYjK`kNY+coGmzy^;TWRZ_b;k*rI)|w@xB;m>e;Pxk+S92L- z51IhokXJls728glQ9Wr%wrWFaV}uMaY;X^8>rb$-YwbcCaVkPskXt26pS%g=eRJNH^6Fd763R)H z+_v)>Ok*f;0Ub%L6l_Y3w>TTfWVf`ohDXB!P;fFa)Z@~*=d!ibCJCrTb3MF-gB6lSA8{W9c}x%99F-l-FSZHeLChDtu-4t40j8>9IsHS-7$=EI@c58JA>srJD)VJ zA7g#e56Tw1ZytD>{{Y3FA)isy;)>q%szY%cNtRm)U$Vsg=sHw^-SB{{Rsn zmE*P#=U$I-r`%uNJ-x8|Mbx+>Dt`GqfsWYi>s|hh`#ad`R{sEL@nnF+K>q-VY)5G0 zrt|6SDNdwftjMVqG|!7QOXT}Z3o`K)tXAPA+*Enel1!$bBybVA?j}B3s3$qWGexuf*C{5XlSYB0lp40IzZ9d}2S$NyRnoo;- zJ2=dGgb*sufb?MN%Kb%rSNlbN%}Ma?&3s+p>zTE`1l+j=rRL_sL;Mopay=WTt$RPh zPl-30UCf>m*3X-#PmygTyoem}j(>#oHP~9sZ=uV5q20!^cz0Tf?dFA!L{HF%K1d0lWp``|A|Jw-U0% zSJOHCv5sqqf_8>@rllK$s#0>8cJ*hK;k=HzC zrEVyS0c`ZmCpiPq(qxQ|nWI0QXlPx8CB~fN{#wM_hq$fJhT4eIyjy8IACr}H=ngBG zMgU;<_pXP*7V+I{q8DG4qHLc-kk=~+HM@f25`4br=@*6 zrCcTUnnFyj(fqBnP=}Qo$%8S?K?7m z=c@i3*X>8b6x;CkNXAF*6#kX?pW)Xc`q_u~!ToFYx8Wld_;;cN zNiS?iuj@1EH;tK{K{*3=YReurnIztlm(Lhsn!n@QQ*&{{uN?lN)SuBNmd0M182M5#p-u@^j__N}@) zhmAD((YZF@^u~GhuQAc5Ej%bsmc)Qa^seXQ^qzjJrb@$s5mHBPd)Eh}NCngYg$Hmu zo0{}CiaeXAH*?)QEoN=iNqyjeeqn?3uSC*+cmkZ`BZFKnhlyj00FtT!_BGt;5jg>r z?jx@?!%}H(dX#K;kSJJ@dl8N)8%v3Xe3<(6HJxvKRTLcO+qGL=N9GC*aLQKMoqp>z zblBszaj+=Q2WYP){jTjbJO2O+X@A(-WZIq5yC;`#jldDXIU|hMr@?Xi!S(B2eev(& zu7&X4-$|QYX_6Vs1bAZKjCId6s7d=|%rPlSi;3{PkL=I!AH*{=%cHHKk+&Hx=2Zj! z`N^)!P5p~*l@d=8c&6IPN#-jkjE;S|{{RZ?yj}ZD{6DyKy|wXHu(Bxs0FO%u{!)Fx zzz6ZKDA#{$Ng`$%9j>75$@8ygR44FJpVqyb)sf`#=z8vl`yTir+A}rJ_O<*Zg_!E+A zi`G6Nc#B+C`*(o*Vc*;G_17lcMQz$I>^PGX5wx z8qL(dYj1~|9im#<-2JK~7!g@oF^rFF;C^Dh3~8Q8^f^Go*9c#bq3@gAj=w=(d+?V+{@m79YuR0GA^Ybz>*zXWzJs!#L(>`?Ee^o6ufyF=#oHCr1eUfr zswlyz^*pmi@K=hpIU45X;^N!Q`|?isC+MU90Igo74v=Dj6q4+U0b|skTIe4DXB_u6 zUFBThk&056G2Kp=F1IlzvjFrN?e(d2{{XaET}M5;7-x)!jnv?&>_vJ9gnk$5ek+MC zQrY9OZUpj;h}{1G2|s)AuV3)*?CKl<@(1hwt}?^(HN{gCP8v3Pw5Uz$eAL#L6G-mJ z?;{<=?dnfc`O_uSF0xbX@4q-JjzO=pw7=QH&eLnY;=?#Df1s_1KWF_kVD0=s#d5rC zHb+{aC%Zl`()4?{ZLVT5kYrq->yj&_@CKdsYm1rW3@$j1HDQtg>P3A|aqtSt-Zo`) zjTCScWRInDC*cj1wy2XuF?i5oRZ)V({{RhDJY*#KPpQfuvBlA?HFXRAKjd=$5Yowl zYl&2dyv>oTak)oR_?qMVaVpELTu7uW5jZ5DP&xkq3ihju3u`jSyto)u9aL92seC-r zwV5r2%vS}BVB#hKWRsKiuF2x#7~P+wOEp+!D*UcZ?2nRuJlfprI#!k}V=|*k(lH$e z72{Vr7P8C?@XIFNGRL6k0QBa+Q1q{WIwqq628GICU`fqrU4G2|9G)gqYQJ{?f$dyN zF^1MZ5zO%QF_?+gZq2ju$soCgFkKl~@yOzAIrTgl<<(=VS3YNnr7|E!!UM_a z&$Vnf`j!VOFi$)Kiqi2{gs;3Ic3{QC$|9Zm5bj zjnaI%C$)82x5MdeY*J4vDIf%7V4U|KO7d#r<3?&T(Db6B;(288Zh0U0PfTASOfU_*I#+yi|B16n84u^jsgDwCb?ynTM1c5%1_?o)B4v|9}x<#+;=dm4|Sv4 z^i_ZCXb>Q8Jbe=SL)o?j$P_)D=*FF8Fj_amK`B7~|_y z{DB?-YUm{al^~qWcnFLGk;Q1) zxW;&6p7qSz-DR*%Y};I@3Uh;wzV&n|$;}>yDy(A$Z}@8DMdC|2Fi8_2Byo|N_kCt( z@iv!zrA=_w_LF>s#6E4mL%=ojzMFDw@8X*U!aAIE&JKFlt$)H}_Oy&f-b$bDrbzs2 z?J)Elm5dar-^#B%Es(pl6_s4Hc+$Y4;K5T(%ETxT3uus(Bta*5-Yox0+qK zIK@3jJkSRf`HODn>*ERx&=lsMigJ#$(8$(28gX2o!kk7Z9+a(cxeI)`H3hPH*F21j z)rrm*1Fa^Co^ATCT)3{7D;Y`Mo~Pm4e=g2E4#KYMl6e|j?0CYHPST^ zNb0;PsT(g4SjrD8D91JN*Nc&t#X5_X`Ih0onKktupK?#UF^yS9N}QURXpJI3x6GUpS$nA`nvJfSKn8bX86%9;G7+9nUX-JbNFe)mtT~Q^ ztXYCXxROV=v8!ux6#d*|IOnI+^r|d=&Pf?*XvR<$0;o}=4X5GlZ{*|X6g(oWv?Urm~0ED$>^{SB zZi&cmop44y>S0+_m63C!Dr)TZU)g7KYx5}OyuHACWZ?e*pKAK(U2R>x8;+gp3RnWiz5(m_R}LmE&qGQs#J3lJE98(dyN~N! z-R`$R0sjCW%Ccv=j^0l&eA5HWj&euS)9}S% z7#q2r2ZYuQLsLb#Ep2a~_0Fd_$56d%)h~QWp?GIcHu`Y;Ma!<%xSt&lqWag*$5zD0 z4B5_pzl|;Sw0A|Xla!lXPg0gH9H~p8x#B%S3!AwLDfb!gTu!E@LVmpc zDp_xi?noehCpBiq#Og3y6TU~=l?rjsy>!r{>=vx#uZZ^T@}rNxPu=$iJ!{!M1Zyp4 z;H!&ImRpu~Ft2q&dJl4PYsob@nki(Dx2Wi7(*7ygd>qqt-xlhLJ>`y_rv__lY;R_1 z5e75I@pbyw)8Ho|?R63vSODBvqBWtCl#ifcwBoj;s zb}RuKx{meqwBWJ2L8IfVPOeoIQlun(cd2TY@=9Y_kTG4#yT1Pb(!9UMH_dCPPSPEr zfZd(|U#)SP4feIFX*M(87tE*RJFv0O6{(H--*&01IJGU(q5hob~(>=S_f%xA0P}Frx%}Y|2 zd+Uj0%iLUW3G`9v=mmTPZsyxRU6hi7Nclfg@RyFfO=x1d)5}}`0C1>JUQOOsaBSssCJKTRNhS2$>c-DjP=J!O=nq5~+iRI@D z3XVl{x~Ib!^~X06X%7ZYcWxVh99PoUdXz9(yG(*zs&J%mYk^;g)}cIsX$r1RQZbL` zOK?q|0^bW{lTc`+RksV#$R{;hQ}|&Hn!-t9N!RnmiEp_8tx&rlo8gxY`!=Ox!^5(hlwm2KMv|l zcAyZk+v~-6ZlS{-cmki=Ry`~$e60J2{t3^diw^|&vt4z-`$OT62jxINsjr{>b*!Hd zc+*+b8155XNZ|Wt1Nm3eU$f*x;ot0UtZHwFrk*yFaqGF4Ab;b+zC@9kV3E3#I{yGV z{O!luW>}itnvdLlUM2EjDO3Km{{V+GF5k?ViR3Q=rbDvbK<`y{mIR*Wm8IyTwSJ#~ ze$Nb<_*QWqd}~OvB;7qKCX+esQApWy=qZ7?ub;$JZmVb5V5l#uDD@>n^JhF%;Qr|1 zswu(FYGr(>6_rVIMC`&-bmT`QoD9~Ng!Kcb_>)bXHr7joIsX8G3dfQ7BAX1RDHXvx zm4OxI@}EGSsh^|23AGU}t*XPg5p!)F!uypZ8u=gNb(zt=DC%(q+8S3m^#}g|uU_Z- zJovNwRdj2XVq}aPIftlWl{Mv`wKb2~ej!aKC63;{gmwJZxpK(5d6eVQNc$X3s%5d7 z=+W^n#1-4(yYhZqzeDd_?DMqGx$%$y`BV-7!0BBV#H3{LHOnyj#<^nLX1>Nt-giGe zs{Q6Xy&{H@!mlLs6(39*x26&H_=)M7%|`6!e(G$K>^CVM_C^Xi@m{(6EY58{GHCOF zL4-S#=mMT=fxl$?3k-sKfu6PAe$7AfQ|;Vy`-ynZ`xL9gM;{}jE~@8IN$IKj8LfDc zG z516IKGQm%N)yHcQxS3~$SGArqxg|y`f<4ElTJWWo!n|~RvX0FA3fQ!z%O!Zd&vW>L z@S8#LRI}<{8eykjlbLk+SPZvvJ7TKjcjHfDixPEne$chj2&j@MqIu|JBe-c<|CjT zaw!*3hXePR_Qh=f0K(4G^#H{$Ezh*j?IhGs;&|m7RkZm}B(f^tR`SHkeGXXVQi9Rk zhT8B*{{SQJ$ME2a(uzr>I7N3o!mmo9{lEK?%8&t$HmC&twU+)RniH_f{{Sx~lHG)b8;wfgH4Dg!S)m0H zy2#zX3fa*<9oT6lWw|EqQG=G~KML+GR0Il$2OCdiUr)xOQ0Wt{l{FW3bb3|3$v|$U zPB`t{kEiKf6bPavF5>|(Li6+|HI1h_M;`F&xnt@_Ph5MN(uMrU08$DMnMOE4=zg^= zSn0jl*XZ-w>E0N)h_ZRpD=SJeV_fsla0&fQVqNO7ojEL2xQPHSxv_zag<<{fwT~tu zjIzqgScY*Pb^<*|PAat8ovN7z3@B3tgYtdXJx9G%RmP%|Ivob(H8>~Ht?ZCqT@xqU zhr!OZgxkOcI75KJ1WBqi3n~tQ)TqY`e3P*`%{* z)wYz7lK%khdxKo{%iQ1Gti{wiA={IN`O7MQCtrRfZ zt1m{on}t#Q$@*8VLE-C2lXS?XK;Z}7Pw}Tu=BkjJRTLe?M{i26ZuTuB2S)I=o#6<# zK%(~ifF!z*0N?J9PIJZ})iW9~TJJ#&>ap;zwN)~H^GV+nhKhSzrdy7r5 zFPd`1WbWf=_YZHKS!*qcd~g zj=8GSO_Vu5FM5*VKmk0SKZmF1Sr>O=BaHO)=8|IdI4>7l{f|}?C(E_B1|Em6>&PT~AtTEg*!qHH+?E z)wIh^MKEMV@?<-C$UiA3^RJv79mc+u_N&1@({O{7Ro`M^4{0l2#~-GAEAfW36xeuUQk-@_ zXCprOHPcxB%s&*hK4I`}`3I)$qxqWqXW=L9m+<|3MvdYdS?#PYlglw&#fdGOyOtj- zuctyQ>g^N8z8UdFkh<`et)*+0jmSt!8Dk$pb6=U~9953W}y0Np6 z54kK08~uJ^oK|cQ>Ke7fL9FXGH)wy=CYf_25`HSLiufA7C1w=doT$brcT&H}b^FoZ z{f|cWQh$H^1M-gN_7wQ#YzaRPbpi)JB+)nZtoXlYZ-~DO?!UEsQ{h|f8&1K8?H2_~ zmu`!}?}J~U9~XWg_;&fEOEk0YTo4rc%K@*-rDl77F&Uy;@%i|A>zqY(H z>VLmhTStk6Tm$oNVsc0Iuer$hk2cLJ(RC=O!fkz-Cui4om&}@(Wehbph;=oN zwmjRzj`sF?g~IS+ha++0HHV>i+1mU|aR604e75GHpT!W^=(?S#Kfjm-09P%fXYES6 zfLs%b{drfZO3|jzfy80yVlgwTRd2EATDOkGw@6msg*jc$LB&MB6Qu~DY~wBtOGzs> zeXESrB*eV%cGh^1p0%}1WmwABI_Reee(Bo!o`r4kOIEfs3rC8`NHD@N{xw}SDK&_1 z&CSEyOv~m6k+&WzgDgf(ZFp{7mlpw;jnJ_DD>-B3%kL|kpV@Q8)?U)5clRd$07I$J zllPYG?_acE0WJok@W;dCwnJJc@~_4%4S#b1$9nyA{gJg-)V>pFF5ME?FY8YqNqZ`9 zyFMnCD%iO}v#cjS_^7y>YtHsWedLwf~){V`!;uzxuu|2a^Zv0yvo~;G4 zu|VyDHv0bnTI2jPe4;6naUrAQYW3&!70Y;wR3pYVu96%EQ^`Dg4{==7>(rF2&tnaV zly>^*69$EA#cAsxz2if*9YKlAKGgAloss_f>$K(AZ-}+J%`q){?vM@ zm&W#r?nz4|$0Q6K{{WDGI_G`{NdExa^SImc$Bb;w?s7-B73|Km(@{+NN;Km-4y=}^ z+!_q9!{Nlcjg*BLU;!97?OUE7@f806WqEdjhXD5k{{Tw!&jigU{40%2k`yJz*wmbq z7#wl@yH=luuB2@_a7>)!jORG@9+mUB+VX`<9wWs|+j6FQb%%*#n7Aiu=j9b=`(#Nz z7vxp%jcSCwUlILX1=w6`_Nv2c!)m5)M}DsI<1v9BZr zKs=wyzHt4z?w0pV@UUUHW?sCGgSC2f-lEYlN06%~I6VH9;eWN>t}e9S3?U~e=A8A& z`q!_6sR`6h4(Cg*>Royt8+eM*qGo6$D;{}4)K@%MlY&bhbDHioIbxKONKiYkf30&f z+C%0YgaCVHzJljV(^Sd|#Yds`r-qTS?dek#jRDIJI|G_&yM`c0U_PB_x#v{UYX%bHqziXcc1b5DB>^q=DvsgE9YGg;*7o@lt_XmAaIJm&%JIx z#@%M*X5Ppg{{VS-&*MsX`Y~3yG;*v>N%J`BeF-)DO5IL92Lh7Q4}WZq-@B4jRy}@F z!#}2L=KH^i9xl2V`$Afw?1P$vP1ZHRt?74Hnw7-S{jx9=$Ur>$R}}L)R^?q!W*Igj z-1E@=h4@b18r5_=JJ-E~Qo5HoApYtg4g_dA0HXucn)R5Cx~LHa2s&-XEAoQd$6B|6 z{7HRrqU!e=M6!WAkORIM82O3wf!>y?n>v-`gX@8r%pkwCz&GpRQtAm-ACs zwf_K(`Zlp>7e&@#iQB5)!6E=X1_fh0Okl3fsNv%|s7_qIr?zUoGSg#}TN{|#a&fd* zC#`BRK^%_(3nZ?1&re$Nn@<_}rZ;^{Pie<2P!YT7n!R`8#Bv&EhdAJ~fn3nc>8+yv z-b5S1~TR z_Ul%7pcjA=0ftXA9^Tc+$#*;3yIP{OT(CmLBtxD`IOSTZrf*I*=S?Hrp@fDq5{){F zQd=g^eY5zPf1t}0TE2mIWF%1HWOb4*dv58UYU=EMD`@ut1Kg}`dpIh8D){dA;xCEr z7t6fWUg8M%EMM9MU&~YK6oBM*W)+Wr@kMQs5__e&a5}~^PxKYRiOs3OuVL?8-iHTD zwKptEP?GHQ`+Bk&akP-M@ib>3 z6Sr^#SIFKg_{^U>TVGyTTrO~^Lk1oGT=wntuQJs>GI*O;Hx}A{t2LS!nIw{VK61VG zuR&T-#@4L93mz6XBf(&HtlM1`nc3?f6K22gcZmEuE$8;dt7$hEx?kEsl1odNKyk6T zY%=5(&MU(_HKqJY@jI;gSAn%HNprbbEG1~qez*d?Kj3z$rg&%LpTmo&T4QZ3^mg+# z&|HRaRE{{>axwbX>8FT%TWjFY4_)gQts%9v+DuS=<|N=KT(%%zg@M5?sx!UOm^za`U@NxWNy&?ilTZo+>z&b#t7Mdsl1mr$CiHC3ueZ z*2-usC%&3^B9V5yNbXoGk5QgaO5+8@>QrFic);fs#=cyc4fz)uoW&TmgJ~2_RhV-i1Z-#YC*;UpiSo4;aLA8H6jIRW+k+fK1xm0g057AG6e-fneKf_Hb^HZ}E z%!*($!}1@Ks)Q+qvSs zYALlVnHS8`;&6G(4UB8{EIc`=#lFY$emDL0qK3*(p?Aj`Wd0)X-QB*QD$OLRgb4f- z?ngEAz3+nc-3S)B)9xZ8ks^%Xdhy!6n$h%l?j}KVZv~QfKWV|wwRKD34-Y&n#pRT) za7_FE01Dy7;px@Zr$gJzu$5}bMhQ0F-F`>ISGrlbVzS0J_bf1ey{j%y3YTH9l{vuy zvtM&h;Xeq;AY02_v4hrF6|;pnb(j&1xfb?uUcr@gbc;Y;2pvxIE{Xa zIImtDwAPI9rB-u(^jXIL0K%w(;FlWJp#K1R>ACl!yW?Z1NJ|J;AMGdximz(_0GPv> z{{SiU!9J7wy8czzszNrqGMpzRWRI4iTug`DS84H7=fWCE>bwv9cC+rhISsAC z2z5&tl{wFw*Yp7IKoGyHzCU<%4+~mbM{x}E#&SYR4{j^1SEkyuk;5t#VzrAtdA}Lo z@7l6w8()sq(P}y-qOz6yBex&ILH@PR-b-_2Wel>z$OVo-K9$;>WwSXPTz5mFB>c5gCItAH5EGMIKKBBaYnB*G7RW`RV%A{;;FeBf!Ys#498Y#Og z1lG3~&i?>vpB&^I6IFT|a6g@FWLbRXqkKr1Bd=PFxo-5i$K}?PhMGoH@RVYtbUu^} z5T_&Dj@6?qLRIItLsZN@S;71(2_tS{)=)`1tTDv%%_9wgJN50{*HNYEk_&5lsUTI7 z-MQSoP#lgwI>nCYz=^Y`K=|bd=zXhp4-idxZuX_tJBay>ii5k`KBu3@>t0?WQ(7a~ z!bT5S9p{X^P@X)n(=G0-BDH%oF)Yxi+`!`_o|xyoaw*|>Bf58wbg`*BNm8xX(_-W0+x{Q-E z4aBg|IO(4KI@YvW1m|nX8(@w@4EL-XPl$v|wm&a+%x!)F=B}@g1aXawzFdcB8P3m9 z*ZlEZ6)NIc(+W6f=x^F+2sWcCf(AkF`G586p-&IRBr1|AV5)klBl*`7{wya2G z-Sj#3{3~Ng@nB%^2}2x)^B90X`sT8}A?n#}Vb+Ng_>S5;XfPN?BLr{8bI&-h67f8u z=THR1GNa|=2OYTWUYFwU5~tXu36>Zq<{)7DSCUNaHNhnAW9n<^^6G6znn%lK6&E)o zPf-1y6HtE>O>d`6w)c%0DVrDve2I5?73&%+l+Lqd)u#= z41tgVJ#$MW+p!+(f^n1Dsa+v61P5_r#w!S~f(v4`vNnHBVO8Dcb>oeG2OS&`&Vv;!c*^UNRw@T9S*Nd9^$_YcUy|rvfvp+U@ zQ~1}(T2G7heIm$eQme~tdo!S(Q^UA9Z^xST%{Rv$XM|_f;loV~Xm-CjDsjzy#46Hh zcRwh^<<+UpyXe__TlkB1`bF)H&A*Z^SLQ0dTc1q&iu0KKX7g&6G3s;1TFNgX5|OH% zq;Z^e`kJ4{P;0&|)ikNDZ>`|_U&^w`b|M^S2OUN*GhSz=nIrLSqFxQfF48%EY=8uS z5%~jNCOZ`u7Lq=@D$AWaN=*HV_&?xXLQmP_!wG#5ia4yi(vo2tVI_e+oE+Dj>;50` zKZ?U8pNMRd@-7NVaD^3pfjw!zwWq}GbHjfF?jzFm8Ejyi$hTF1?E)gJ^7`P4{K3?G zU#i7bhYewR-xok*!4q!D}=@Z-!SZxrat{*88+2kE-DtiBHD^2@b6 zQ=&LO-JJ;^t$tR|;>}c(o0E*^jw-S7rmYtu;r{>!Vf>9!#N5(ZWS3L*`qSWkny3l! z2Ad`Zepyft=T+`L5oofrBKVg~MjgEM>-pE@CC82Rq@C1W&J(cnpZsb+_)Ya(^K&kJ zeAfQ}8qrrUd3?6H`oXOH0|g0R7HLsNMnP$marxJsYkvatT{>cJyl17cBRN@+{{UM2 z=bOixw6X0scLa37nTY;%nLml|o%7~NpZe%(mLZVvGDqk&*MK}HXrxKwojpI`YyB&Z z*YuwZ*s1b-W2a0&&l$~pH$RB4*qluu_AY9x>({I~o%(-x)q8dyQ|`~Fd{3FQ@CFxphK%l(7OKf*4ynpPK z@*VKo0mn|ig?p#RABh%MUkAJyr!Cq_w)&Lt&fj|siMSK#oaVeLc~1gQV#?U8K(h0bW4eV2pCk$DmQq z)`~bvvBpBPR!KA7qm81n>XfA|dL!oFix$QY5X-n>m@5JJR}6E-dk5_&qW=J4c-3ur z;vf!t&MoD9LsuX!$R91AVk^%tadWAZw6!+-l~tp8 zH*HXgPaq%3^uB+y6A%GYe0P-p=SHu1!(g&4sq+Wp;z%j;OKTO zgq^sqi&&0c<@gGHrZ^fk+arA3b{+ne(%O!w$ZaD?s+;(R=KnhlP-q^v|9oV*}%u~S6|?N z3r5$`NG*iYD*`tS%0D`R?Bf32(!vy;)@z8!W&5nG*sp)F&~0?B(ZveJtKCO_)x}au z);CElOqg>Ls)ckN7z2Ttkw_(hW<0$84zpf(Ah~r30{I7*N}O>GbzLwTl1<56!)?fC=a;R>+v$ou7XnCq4ZLt8pJO zzh=OYLCcZ>$MdbI?jyz;P!>h`vM@NVcG7+732>xx4@`Sh)82s>A1Mb44{_Z4RxURk z%eQ)rWA}Yz|eALJUmi&LM zEOWiM%~WAyPH~nt%@!t}Fib$GF`dK=_V)IxUuHvaa}SgY0yF%-A4+08hEs;X86PR+ zk6&tnTj*_H=DCk{bC!IO?@ip=Gt+Zs+hietUF)2mFmg>w({C)w2_yKA09FHQa0X%w zmj|&?#Ua%%?&R5TZO>1f5>Me-vvk(xySQAl@-fr3X5Q=3+N!DnBaEDkWAXY`%Wnf~ zx~ArqQw&6B<{*AUHMOVs4#P;$O(r)|5$bSJ=z7nI+1WqQ zt#i1_LD~W9k6P-q{{Rlf;oDy(+B~r%%$)T;_^y)kShj@uV(tjlR1C*51;?O2&am!$ zN2J;_M<%Oeicc&jQbG4QtyEcztMh5lr#_V&GBlC}3{K$EHhJ|U)4f1^9j)sQ!%fp} zuQ@s>v3253Vawe`;Tc_b}!! zh&C$6kY=%-JP*ZdkyXTA&KCSkC5frg9#M1qIBU036%~yS*!z&!JAQd0y-QH|{{Z3bNEUP=(<8Q zcY3CuVE+KzSsGag_E5anlz5-^jni)9bkVG@HH(AyWSSuaVE+KSjmEpFOAzd2=gq4; zgU6o@JV~T!w`*}??y@0IIu{Oup0)GLx)z}&?1WlL8!0Q#%0D{#J6!Q!h;_T1-%lz7 z^B6%2iapoU>s(#ktU82-Lx}-m1d4dgda=E&GZ*h3Ul)WXyST)U4Wi(Dk^J1&oSJpM zn$X!u(mF)=g!+TS4{Gf6zYpBp>JZu4#pm8!%8)@2{wHR@{YWR>8)*tiG>DC~nys)Nb;aA9&gB095{BRd&@FM|>(ikd$r{q6=Pp?8cz5;<;kPqd zd0Mrfd2IP*_H;imHCERCeKO=dw6_*Bj`2H2!2TGoM%F$A_=`Z3?RsmQ*azL3(Cw5D zb^yv;C@mC0V*Z_VjZ~youzqi+Dfp01bJQ>M1ZN_Hd-Fz#svip{$87 z*<6N2#(GrM!C~aR=tcaO`2;IYFGkN~)PHGT5^HxR<5aQJudYumIEP~~^(Vb%YCjb& zJYQ&Z>9mVWQMgOzK+T>zVDZjt%zoaZlDPVFT7Dn9S?|Jtaul9MD{M6=!lV>qWQ=iA zZj76K3c9SAynOIUJXPHtRi;pTt}4HYt;*gbfC9MPf!d_fa_JV{xdOe1q?zVPb2R&; zZuqB0I&JcyZR+uFQ(WdNsqr1#TrIF;mW>K^u4t(|ii)2RttEPK`HaSEXF|98Rm_SG z7(~tWz^r0>X1X5s+H2j`@3xH{UWXU_OOj7V z0~PwQ`xQ*2p$~sTYx1*1Un@$HSoM){U#Oq4dwD+xbihyWG5t+_Rw>^NPn&=E z2jI11$(U42qi3OfT)0`hBdKQxDqjKN9s;9u?Ha z~3- zzs5eT$9|@>r3*;+@N#_2@-Nyp=`X$^vK`B|)d$Uk&*A#kpTNB>?e4DSCF34eH%H44 zLyzJE(z)-CQbxZME&^};+)<$tAXWqqZV0ZU;eEoTtjt1^0+6H}|B=WN}bk-wSVq@a5Tv zIXkil$?gwN!nnIH6QWo$vk-b=KUkMobpu;=jso%4_ma5(MtuO7WFb+L@< z&dSX7jSEjR(Sa+V-^;kz;P!hN&(}+eX|V z9a-C=f!v-2b>1}buA6D1JQ_8zg4JY5t}m>j+^~b&C#!YiHQV?u_rm`G415U=wJqnH zVXE$yINJvMQN~mT1EPb~KH%y}JXF)I|-LE8LUwkCB@V$-E7wM>K z&{~VRphygAu@i#mSmdhr-L#&n4P|N8%`Bc;pkc-W9#6G#{w?^Eta#Gu6`oTCg}K{p zYYSv18z8eDUA@opuT}78k08|aO8|7)?ysZ&wT`uZO1^mc9JW z?5{GTUZm(nZL>1t@SV)SnO|aqk+f&^rPQ=Gu^L+iz!#DK06Oo&-rSP1K^o((!;(K9 zm6hXrSuM0Eu2$McSSLvyL|woDdXwp1EeuShRxy+6d)4FnsXKHq{8!>w1Yv`&|YFx zo0wp`p5G|Nj53wnaqV2c!e5Enlj%CflX-V6*1~Pey}n_bd$6ltXQ;k}4i&wRZh9X#eT{*?ubJnjlxa5(MOQ~*|a@9&}fP-|+ zw2^_9&mT&lvfIrWbU5`G=}{Y-P*0gE5<8PwLY!8}`bwFNN{il={$)Kz);Um=EC?j- z+IId~u1`_BipBRCcM*_Af0bSFUy9uHQvR(B~nNKf5mV9IOkyKmZiRMh-Ad!=}QjY7iiA|!VMc?1G5 zae#BrJ!|vTHA8VcaAdxDC$0x`UqN_(#Zh1Ah~@;EMT{$?ZMDDy0O0lMUVdRxwi!lL zpQ-O2fYvBO#9rIv?<= zo+O^iOSgMdEG$k;a*l_-D+}$?N72h?^{k;q&2y(_7@WTeFLA3_>7T*6emjibrI?AEe;FuITg)S-4G1*X5p z^R2rbH^T16`#RGNC?MdNuf?=qdEQvNw}-!cM@cbRP9hblX_kQ!UlB z%*Q{vZvLk=;deS*s!VX}dX!RjMZIie^X9nE7~3y~?G4rDyL$)sQfsCKkFe^03eHs} zJt)S^%MpQCcgF%{^7{3M;2+v=NAa^8Dd$UFK%6Se`?DeZ_UGtpveZ0x0sxGMAoIJ7 z){7HPI#jxSna?~nCQ9_8k4S0Zi0x~L(2mMzuOE}fX}pzUp!0>n6jt9`quk>D&deWU z&%8OTd{xjjSGmwUV)pkkuyzkSGRf}Bax2_C1@Y(OT$jmds6nIaxnMFRXdL>Sj1SVh zXF>g_{6DTw8|!`t({%|u+-UbE)tl<0BD!1O+G|a+iKXz~gQ(fwh71komm-fq4_feQ zQ+0G`{X(4MOI5k`eV4?48^IE`rQr=mHrtZcH)%5d9OkV<@uFyCDRFJA+55|EPuIPC zVt;J^02^APFO?qAoCUW~ML@r{Z;3A^U+rBo=H)T>-~n4c+<#i-`i0%^_?oPDc>V|7 zHoqFQ`+^X7g&{cmw+PX<Ve`~u9iVlITJb-k6EOYr9 z&Aa`kZ*?rRJ{x6T-)WR*^b{<;xADKc9yUE7zT@os&x{sRk{`qts@*}}7P(zd;$Si# zBgGK1w;v_I;=Wn8_`|R2Br3pBTrBP{CZU;{iD1`r&|ENA-w1B2m;5STxXHb@}cF>hNwTZYEMfa zMCsoTJV~x8c(ix6R^-ci%MyY0Jl1E#PllR5hI~DHE$4`=rq_0XZ>HHdnsbr3x$HLQ z(!NmDzi2IQQJk&Tw{v;s=N@FufI;9Mdym4mJUj77Y8sV_q(x(ue776+kuP4|LH@PT zPMV`6&U&(yXtd6qzk6&5+TE0&QP9^JGmv3G^;FhQtw82n>zsmfN`<>HZ< z;tB=|laK{v>ah62;?hWNbc>S}!j;1(QQrhtvCSDR&lB0oFH~~6ji>xm`hCy`p;jZY z?OF+CaV4Zuh?uJ&IZ>LiKZm?eZ>B|IaiC8Hsc+q_#B8OHu{9!iXT_}uhC339qY5`S z0-AAJ+-@w$nm3G)%2(H`7|E@*^e z`yO+%%5@%U6mLQgHCU@c%J8{xTZZE+&3Y`K2K=&E+>?=x20*A+;d5I`CXRN<82ML& z_#P{gtYag#jGP(g8rYb+YS03^{z=phJkL}9@aOZa`Lz>tn^j7m;RNRg)9GHio&!lE zni&*2N;WnJ;tmg?$Lc%PFZfi9z_Jhza2wasyy+_*v??}vBs$!%$b8ju%hw!!RkLTT zmHA058FF1t2=pV|SENJmVnevNUYvZS;Hh!(AiF^E->S@8E4?`5{P(V>O8uRm zQ`y^oQ>|d1B&`GU0MJ^Ylwz>?-{R2(*FQszYa8N#`v!6bZik_4X^Bn;aHe6 zUT*cv)KsfZPWKdP&OHwWpIx)JB>kZqJmYZUstsPvLrp6@nI9=oNUx!MVemWQ_ros* z>b@e?8_cw@gv!c}22Ww%sqbGfC+#!vVXX(+?XRqE&|tu@tjVAE@;{Y0Q}&CQ6Dhc8 z$*(ooq;3BIGb!WcLs?fIAdLf^#0+&H{{V$gZTmoc5x$s-HLKeg+zw0t0DJR_?X;g8 ze$M)oTkbw0O2Yt{p5Y-s`UQ1a!C1p~d2IT;GD*9B2rf)F0xpkBi*fh}{zd_o%KOg?l9|!CsW$<>H;wF@@4?N4Tdttdhg=PNH zvq!0X9+y;58K#2Ul|758AEiv6Hs!)DEi=ZxBKX%&@h63@8Wf!+mM9ejfF4m5PEV%< z*A_1fmaryUh=P&OeFaz54m>px10<+Ftu_|SNuEAYUD!zSH|AxHyHPZ)u4Qu!4J#Ox ziBx7K6rQ9G)#-i-@fdpzS4NqY7V_QJ-CK;X8OD7{uO?Q>&sxwLgtG14#FQ9Q(2vHw z7gfVlc`~A^=`(vzOU-s>h~1*PM9eT zdQ~Y7{DH+;xIeq90nYR3UC>$?)Rv@@jG=z`0;}&h=}%_LFs2@rsz)Pw+>P3+$mg-C zr6#MtLt0qIPUJEZwAFJ{NPb#}JPOeq?VUHm^atWUhesUV#yR@e(^~4A9}!!+bLF-$ zrxo)r!z+lbej@mGX^X5$dl_RI<*}Z_*1nq7ucEls-r@+^BSV_h{?bzxcb*L_L3>VM*ONK zw~&7eepG#0mteWnoqdS=tN#FfQZI;JGm)@;Yn1Yw54v;oqsf$yy2O1d)*g@Y5iNS1 z74E-rFktubz=!x^Tva_AMbvy(dbYYIlYgz;vA|fOX6Nw#02<4QS{GIb0}g(5$o#9( z{4x7Md_nM)u(r~CK_!F(7>YPzRs9ya6)upnF?No-o{8XZ*w4n=>cu~Zt}VPTVAw4J zVoRbuHVy~kYwE9rKMQovgmFP_qwRtbD4yct=?c*hDj9`*75nfp}yKr9yX zTKh_MWj3kvdh#pL{5$(d++GB0xgBN4PyYa2VJsy|jX6r-@jRDNiR9n5WwPD;UB8w+ zg+kymDN;^yL0*amcmliyELoV~9FFP@eGTK;Y`jyhOL~6QF>~`tETQmzy>U#_JQbl| z6%t>hB#uO}IT`#otEo;ZTAb>nxpq7bJvL9_h?ee6SdPkQh-{dj7zKFGuOhyt(!L02 z+L8}1UeN}7TuxN}L{~v+`!8AF6*21m8#(m5UO!re78(q`Skc7#UXfl@JCexv&zcw-ge-IYBy_Pv_iuTmepAxlQA6K%}#gq>A(g`C5Qr|E+1EqS5U$#e!B8+)@bP>ohLxy615k&B{9Ik`w z5V7w1{{X}-H%n%XZ1pFYoDVkNVyOCc&2rQHL^7B1w5yaYj2BV|ABTGK+dtax#qnG{ z#mNbiHwHLIs6^!!M5uFkdoAnHvfA+W&5 z3L$nm^~uF7Ul6=ZxDeSqdrp2>27bJni^D&)Kf|vP$RpIW+x;3r)uVAdS^i?Z3O^M1 z8t&dZn{7%{6OG8;M^I}v?Yu|NbaBym$Hy0~v?Re#0GO0wrx(6CkcN_1Rmb}vk?qBG zzwnj#P5=VxGcz7X?t9jK{{X}f4?_V7Jn|8oe&-k+`kJ0@$WBb zz!Aq<=QTeS>aZQBRGL`_VIjc$X}&7>kE7m5v4_5mxsZ8L0$V=)>x+|I({Eg?c6ycJ zJd?aA#dNJ!s8?p>o;tr>z+67>rsJQjWL)^aQ_B>*l?FNn#%eL7z2biBO_mM)8cU3y zZYizdpj77BLx48LF+vX#n zXr$5wQ2jh zkxQtXoEhvM8~vd?Gj(Rs_>;i4_i_XB+-Z&>C!A%Mr#S6WYF`+>8qTN4@NT6OXOHx? z#eCR_uG##N=@(!QaA1G%qJuX*Uy+G0-Rj^P@`m(_q?lt7`fr>vk~KRQ>H*{fUQ9eqs#;CCVZF4I z7~rBa(yzMglLqLyh~{Ld;9#g_ns?fce7M|*fE}t24%sc0p|BH0F;G~vMY1&;c|Hc)PIF} z*s6T@I%!XqS3afipU2%cZ8ke!5JGG&F}1Gk*D%ZO%hgZQit(R}-YJX4-YJsm&QSV% z2q3l##z5r1)SBfa)?t=5G0nRN1&HJ6S=U;EnK>Nv!gG(tzF!HG(86LKB9dvoojymg zTNx_U)Ft3k}x`mhB-0SU+DT&xlW$#1>uSFB|+&Hy~)wM`{z(SBrNKi%ZKk3iRSYfIZyeY;vC zxPZfv+*W3tqFc=x8#}aR3_eLt*Btb$xU~7*K`P9r>w{OVH0F&J*loe?I#;J!X-eqw zFy7KhDLbTJ@eZ2S?xCbk{SRu#TT6TcBPseCxvUM+g2#3RWcdv*2)~K0I_Xc|bFub0 zCNeZ*>|umeB;QhNfecZy$BduGv~0BJX*GhV{eUz(92&GdU7}124xmB9AoxXQHZ^3>UpTwG6No|3b1hE}>?O&!J zvi6rQf$+A<+Ev3wq#XCJfPM^oYw&l1bbDCt{4G88$_`JPv=i&ieG8%f-`XaZY%QSh zrPaJx+l85NUqGdZl1h4?&iL#e8dW7yj{57|^}mUl%C>>3DH$l+g7nT#I#cJRCe{Q4%f$5 zcCeMUPn))S3Vxm9tyXsZyI=T%m$+M79-ZQ?Nv!@I+Q;U$Nl~%b z=b-Jyr{gb-P-|Av*x83$MHwk4Jo_5rZoEh)O`)(l<1|&mI+D9S zTAmVbS*^~_UlF?u76g0qP(!X2kbK$aJ#$3nHf>k=6 zee2x(Y4O(I;~t4)r!AcJ0@DY5%!=FnFaYAX>S5Ly^SzIu%J8#{oMN>+MoBeWnFtQ( zhqP)CR&DKs(Z?gqV`J$&#fkhY(r$b`Z#pP>nTK+@1lJ|5d_S?9cG--0>A8uhhGj}p zveV>!jIyjkk2CzwB=JX#;?eIcY^AyTC6w|3b#Z49-jM_IZ2tggjw^TKCyD+D_=mv; ze+_8Y`bUZFZN$ja$kM@chQWet@?4Lbc);yk7mvI+VbmVVDF@2g4tfgZhl9S(ttHLe zL}04N8x|~orF%B#v)vwjIYy4}RA}3LSc*$~xOGh)+fs`FSzC!77%iB87nZ+&Bi5=} z>Q--~r1GjH>p2BeB(gU+$4``wp7o0dhS}vKI2q#un(H)g22Xpn65NCz#78xB>Bfq? z(9acKShv>AbSMN8}_8!&hKMn3A@Ct^z)c0HLII%2VH7BVH*!?S8 z!+r#}w}=Tr1ohf0ztQ{|rP=vXNoxe1$s=(;T-T9}$Ixlhapyfe7F&r;N;2e2w&pkd zE#Y}GN2cjIhO0046-q{b0bYRL6EzJl!L}n-@VA!omlt;NrZz>jV2fJ;Qnw z?dxAxO|C^}bHJR^leU92?-slYsV4ZaKZ!`KRq>uwe)u=ip@UCcU_L0Xtn{CSU4SlPo%Fi^YP^}xk>DDd@& zIr3C}9+&$f&3^1H$NP1h>CmX}D1=rbnrbt#>63hGMAB}firjgUAyCBh&3W&Kb%}KQ zTZ@Sf4A1iCsrRTOvxx{1TW({(+<2?0;c0G2dA2z|yyNh!r3z4sj8(2{Mm6U(DN6P( zOXKO|1rlAOrU|TjzZTs}a3y9QflY7Q_#y&x!2bYsaaNCnZ6J?xoMVx>vs|3iQ_&v$ zBUz)*uCH#cSm%CEbH!163#rG=yJ_r$`qy@BAc4qO)7KzWmilX6F<9Vv`RN)GKT6fs zP+BVvGea#uPv^`PEu-Qpb{H6&GKxON)=EPuFby3sWM02Aq3Ueq(e)W*x^ zCOyHfzfQIA_liTOh;(D8Os5}ivm{{pkzS-K#-`G_F7a1Ty7K=3BLm>yfZ_2C{{Z%l zW z#cA{_ud#TaMY_>02g+oT4#W7@=vWs>|diZK}k>>QI$d#|)eWRq`{IxF#kP+VNCv~4*e-XJ-4;F|G2ieDY< z^d*wdN0BaXxl-ep;y$O;*F0A=H8{^twc+<)&U9r`5T!2Ca`X1)UO=L zFrHFUktn>s7c|?AHyCm(yg_tZ8kXdCAgWA8Lo*e zN=x@n0SbC@D;k)|H>v1S%HX4Pc<`2MQV9(k1DxQ4Ur+wdJ|VpE=8dUoxA(?-tvD;g z=O3M;ZV1nC4S5a6z+Z?q4t~pT7Nr>KZSG^0NcHQ|y$it>+P}fu>x<}gdEvRZgUgW5 z6Bxkic8=WFEo@yGD^y17EUvETG<$n^YFN(9?c2r%Xs;)e_J!Bsk$@3^PI0rP6~pZ? z{h6oKvE9-0CFYfRq{ufUwvE>#BlQ%6#QNo;vPG+1DxSh)%{ty)Hsx@2ON({%k#avn zS0K@Ji-|x!BDRM-W<+6K#oeN$UN-RrL~PV&Y<1k(&0B-whl=c!q#Bfws~nNRIs8Wz z(Aj(^T}?Mxc%Ma^Bct2}Kdop%@C#m;gKw*7uEVA;5Y;H%kVN68W_s0&VJ9vzY^W#dwY1I-nil=QTS)2Y*_fC;?u?D#>y0o5>+c&-@`u^^yvVW z-B0_Ve}!m8FU0#{wCVf3!(;kYC>gqjeT-ihu3R#*wnjd1q++z7_-*k^Rc+H=!D*hX z0uSj{7Wd+#0gW|(ADXShH^pXLof#Z^L=7B_k7?Y^PubhWGwyru5_x$%h}-`FuTu}% z#?sx8>YgE*Mb1esPJb%dwbXtlTaaJFhEGt@oL0nN5Il8d6D7xlA|Q>sys~)wO=T)` zdm6^7Pg8s0pX}f8_Wu4mdH&aXDCLV~Qy=dd`q#m~3%(ur7f+My8YPOzc>-8Oj!$k| zIQ8PbGSL1g>$(v{8ZE*F;CYf~71G7=`^1v3mNd2sIs-5N09v`Ff|U1W>lmY=N7xtE zJ{HzwRk_jQP^c9e5)%Ws9+g*FO)tZ`UAz_=66z4K%Pe3&mG$Q*-x#lvkH&u$HcHuT z1BJ_zkJ6G)ik>aMQijyBu-e-tarz1t0!jQtOBkfA(e(=Wy4Gt}g4@7$7n8dZg7#gh zpP|XFV_NX%#2*stmWy@a-7`edvGb;gk*vfK)U)TGYVd0x6?nqggTbfDAF1nEK^;0>U)GByFI!i#&gLalh4;R@@B2#-EUDt9;c(nw>jA)K5D7;0ke<7sQ&=M zL3pF&Sl|PqM-J6f>?<0$YVuy>tZ7dXNqlj+_J4=;KLmKH+}o|2-b6Aa)n>U^2kLqH z3gG@H{4DVIhSSf{{6uepftz-WGB3GsK>RUZHMfcNM?dlR@}8ijM=q~@bl8&YoOA<> zR@A80inQI#>Q|>X?yDY!Z!Vu4g)i@Il?l#X62~6Oq*mUK;q4#9F*KGpz(l8+Cxk}7 zLGu!8#$6g>+nk?pI@6&`l)#Q@S3G>8x0MFbjB6y%XuI%6munzdHCq$UUR3^72lhXQ zp#VOwXijmA5=6cOx6Zn~{#k9TDdmyyZ6X%Zp z%&$TF#OL#=SL|wxWR})N>J&(v``3hs7B=OI^ZY|U&ZdHQh>tGdsU0@e{YIZwN7t&Q zq3%|GDbqBa-c_x!Va7l#->r38FNkBhD$xCsGEQ*sx1XUE;krJveQ*SFTzQG;V+7Zw zcwMa~+a1hI?0W9`f30ct8Wz;c>-d!PJ(Ee*B9;>OmwJ3BqO5an{Q6f#Vd50Gx5Pr) zZZnmP{{TGKmM))cVc8X}w!{20%0Kw3B~fk@4`C1=!Y+XRwV%~#M`E5Pw6&4zBjO#* z0I7~N1HQriO*UVNx6GN0!b<-D2vs%XcOD^*)ym6%0w?=3WPX6vE8mD3PM--P?ir*5 zJ4Yn`m1=}L89LK_PS@iuqp18{_+hK~U(H)9o7LEptYiSd_v4|j&Hn%!e#_oH@U_FO z%quD>AUemnPvR@?iyzttOSbY=uM!7F$Pz?OKPtbe{C3lIvcvmNN|2GynQxQ&*0J|A zOF?TbTR$~F;Y)a~QZZ!=nd}z_^`w7*zBI;0gCm2^SG|4xs(8Odx{gU1^pPp&%uu60 zhI3q#{7g+g;}(x&F;kpJD<^HsmL z^nIaLJCGNF$TjwMqwpW#&be%5n^)28?;DcP+?e_LbgmOx{h7WSMDYnK5H7@lsYZXo@G>;K(=c4 z3+Psk#w%Bu)B~ARF%HSH5mw}*Na-2&}BI6W$5PIla%Zx7{1xBk$+KG6JK z@M7B6PQ8w6n35pzlZFH$=lNH`+Pi}z5PQ{c6l%UD(5|JjxRT)^40eWIF`uP!(rY(z zpPE2V(!5oMlw{jJf}S>-rlO6Df(qk2)e;C|YLV3E1DrR~oA&7&_-mm-q9hGMRXF02%xlp}or5CH|)j26s3Klhm5~+g<&XJVmBm zNMbQu#^m6m0zW+0=NH47q=(1445;QeOSEA5=s~ZsqWIIVG!d+RAiIH(U@FPLt+{5^ z;>m=p^*i~0XMJwes4$^8!;{VjwPE;!_I{nBIJTI-t8x$FSTOu>@kCtz0K!E%KkZfug&Aajc04@oCqixkVL=sr*}gWf^XC=;9}-j|5k2Wml>>q^Z4+2Gx8xE#1`v zk}CpjDrIx`kI+}KU&(EIBR0^a^Nd!vg(2{}vW;KNg@MU*T!II+b}F`7m~ykit^7NE zaEkFEV0vU$lwJzfq)dx)N3PR~`y;~=d@R$MWsc<@QggcI41hkB+@`hg`g90&jWtyN z002O>=hesZJ4OwjjT{Q9uPTWj0k45INBi(2dVq05cnalmWVCI*y=(4`M@8^9qmu@& zrO3p85mEY@rDgDk!&;nshOeVS3E&Im&OVu~>Eo2u+Ofx92;kkNbK=N+7$W}wmJGS* zM-_S>3$o)YAi?XNwf1Gtz?}_aZ!1>Pq&Uy~ey8}?2d(@%(sbzwk5RLdT<{^CWFMt< zEKFX<5~R`LR@yX47#>`)9^{OJSE17O;KEt))CC~dv)X(+Y2jH_pl;dolpo7Ab}t4` z6M1G>2?x1c56Y+7ru8e?%c13(E}HW&5(PkeNWdTEU9E?Nkbp|S#~pGH^{(qk@ORoI zjZoXLEr&amvVWy}uY`UEXcxL{65lPKn#1NlY>qO2TF#~>l@)nA5~x(0v{CbQkA${u z@}y)wk*##L9u>PlWcys>rb905en!5dvi+WX5}{eHY}5YYi~cpQ7r}1ZKP?77bsz^}l2pB~)D;)?*Uc%u_p z+#Rm0KZl+{9`*PAm&0F%)-x=Ld!>g%oXe_iACWaasiS-}F}b_eygO=7-kKZLNq;=^ zT+_}l@c2I?s-7CEUh(mVhQ1&8Q$s+WA<#8grXVxz8C>U`xUW>wPr3@_W&rI&H=3*XfnHCn_=CfOSB~3RvD8T6WLZvI@FR-e71e)w zE&eFe+1YA(3TeLxJhm{y5y8rawqwuZUTyKaL-=i~crNlCD$or{!ib$Mis0_&n&$Og zV8nlMb!rrh^CvaI_?yI1M-!yCkbq8pR~%PFE}iZ!RC4|m@K%AMc)I4>#WBnMkg*aP zHQL*cLtdL};V*-CK|ar`UBp;_%TfJtUL&aZi%y>NH&^CT+%r6xHD=Gn+8u;Sv0cW< zy%h6`)_l@;yDrg7W85bAN#JX^q!wNw!m!WIfQrYL;60f}HoD!cfb~fR0Ut{9sD3Ev z7T}LIHsii9YRY_P(xb*5Php0v?E8hUt&L0XfI4}&x1Q$u&I4{)fVS$y{{VSM-lqXT zBfs1Y0d#8xwT|XdZxa<#-zhxxAaoU$za8}G)Q_{IzWKqd>+gWZCqpI* zK=7DyAGa|M>j917>uSic?(YNsg*cfWb#H7!# zhln(JfoP_Lucil2tu$)ZJAjSjQO*kbSIV~DCY?YVi2Q2$c)IQ{yD1-9#imxh52dYC!RD!lT*T)l}t2*Fx({MPnky zFgj#ZGhAu1W6O0>{n3M(`0`(gI;MEKlj)kPC&X=7^t_RO8h*uh7qvdx(8iK4k>IB~B)EAm}gP<9E`@DO~pdZS)%}-MBg^^(lQ{0d5z0L)E{BNgjvc7%I zM=i^ccS=X&RHI>~-1fP=QQ@^x%VQdm$L}RAkEL_kr;Bv??QWuh;j}VFP#DG!HRUp0 z0Dk#3UGB*Hqdt`RQGC%aMq`PZBI-zP2t8`T-85r6f$TZyT0QVFqa=vcHkhOWLUGzO8!@9Pg;n`+gOGg^4E;1L)D9i3D?x*n! z!joD&HvSX2hH@|@7rZI#af-5ZsBVy#2l4M!-@-NyIYX1)w{_yxq>f4D6{ys5rrPS9hvPNf%STdONa@|MnLp6*va7QBnN7o`e&NWxX~?b`VL29Svqv4?_;VNhCc@v zPBXX39JI1K1F!-0sL}}wXOmqJ(%w^up>M=h>t72=E)1M{fn897g7;OA3lWH`ik0h4 z$@-Z(OG^`vn4fCzbe%F=^xhPLy>cpru8a034IE=0wZEob%WW`LW3=!^Dc0qVjNx=# zo`}><1IUC)B*_|%w2LmE2T2T!Ke{NmGR1>}(LZBIbYVPxZMui`9+l=)$3L{}j@nst zUbedlHKbL{8%~;83P=?n{+i_0QN=CP zjb(th*s+gudi#hDr1T*MYdYY&XL0A;;<_81BK|ZiuM|n^7!}N3+bnVt^7)56^Ii0@ z3V#bZD&br{BvoMHj$4X)5*U2uqIpytC_jxNi5sBD)K{dcD{6W3p{|CK+$#45hr2>J zT-6UEL;OeZ6oT4ZWNtrNiB^gfaL=)?X>`jfkQGcui8&*y^fl%XM>4l0 zkHWhD01HbbS1~aF>^aHeyqsH8gS$O^99=o4k?a;1S6Ye$Eb2+>4l48bLTlKuB;;qV zIjA&^LK(I!Y-Qb#yT%Q6Ho86J$_Zo&Mt=2tHCmIV^w9hIRH0Hn{-=fbi{RXIY7u#woh|Hi*I-Xg+Ta@6&X{ z1dgaHm0;_0UiU7xWrtB(Sn~Z=OC2SaOS`vKX4*58lT{#-OTN_&h-YiPyY@1WLi(EY zyWa`4PqjAc0Kv&Rx~dK*~xy_H#&KS<%Swia@h1I(z<@DDJHu92P4d(7N6Pt zPd&c)SbRZh3@EaN=&SP;A5Nm6hxS;x)|HB}v~m66!LMl_4$?{5TRZ734mb+fKb>^C z4Thtp0%X=2c*i?j1JmBRFx0D3_hyej9gLw-bJCu@nm$1BAMEw2cy>pQMU?Hy1#m&G zIsP4AE0Mbx{`VF4FNw9QYq^%*ctn`#Apn7&TIDrQ4{4W6hl()R<+3mbrYYiU%7(k6 zOauDLN?J4I$bK5?a;ZB;2fb@s{4Lc=0>A}52q4$eHvR*O;xKLY?axwp$I`cS?}ibb zyO}`8L>;T=>SsxnhD%7v+w}srw2upT7sOe81#jR{fYDvSjD93n6tk++S|hjnFAGI;%}U+v zr|0?~9ou|2bwzebWK-E&kx^dw0@@WmbP{Cr$T>sX4D_(EJmk z_-{&#U8G4ggMLDQbLsT2QHJlsvDkTIbdMg5HqwP(LjiU?&~;K$JZ zb>-soNn(F@dmhCMx-nkTP-;Jq$nsb&Y@Ys0X0&T&&QZgMMju~V+0&u1e>GiRGN&6{ zb^2DOzZQ$3n4z|r4bz+=M}{EQMWvOl8PSud9Wh@)SX$X>fmljGUfqpp%c)sGn}=mqBh**wnF6mEb7EURDaT_f zQj>-in4TE$MZSw;9j&}66e=>Rql&HK?*Zt#^{ZRzb}-#39V9@;lTrAc4UsCWq;vpR zFRFY`wDy(84JbF+G1yW>dzqYhwScrJu~WF51v~Y7v^hC z-@1PI$7;&4_@AZedQA5k;QJn_WAaOE!{;sS?OvOI@qJ+gi>p^8V+_K)6XIT(W8=Mf zwJ8iy%FDEfT~s%@uADAwI@)4Yyu%Tb{aXJ3R(tM`HTaM5msate)UjKWXJg8&ah{{! z-n_~5JDaA;MbF1GpyC=9_&Rw>NM_5#x6!Kb2*#!&@m3XHbokJtIaV`B$|^sYx7PG!svro?-$k z16CKer1^0{YagnNes$+l%&~HIn>{(;FzH?}(DS&wFq&g#4My=;cS%`mPSeDGAk!BR zc!yE|a05-|56IQ|t?XdW+qGDuBm1OQJ@<+{F{g~IlitWZ1{d?L_+oJr(I);!B(S)6 z>n>QYQ(iBKJ|R`XzqgS80D4z|ZSYG6x*(;-t_loVv-7#-t z&-1M${i^lXTul;4JMLaTO7TFd-Nh%6r=h^7%*^>C*!Me6+Hsap*BTy~ckMbjAI`g7 zEB3X}{H05ehW;P8Wx(8$GLkFhMz$xT)Guh`IC`&W*5vyr^ghzlzib}|TS>a-;kC3; z_FGBEADFI^-}dMDeQE(;0oE74No4fqZ^YSN?gf@BS+|z zU+_^M1sL|fgznXSnSa8dy8XSp19=+*px#1A?T_%UfFsq>hB*AGPNtrVK&*Y4sQ%af z1HX17LcC&qhxk`BsQ%Kv3%V!D(C-wVVP7gXt+f203W@c3vKdGpS{%lpeJdx&?E*rjZJt`s%;oUn8cYFITydN3&FeQj4j%mA~Lr)Y!e5_1gSqAZ|Kb zKzjMhRj&Rz*$9fyV`>zCeo_4^s3`+&ho{;#OJ8-eb6l6)}0m09ue9$~qI}iaY zp82YEA3{|d?tLo18Xzb4miIXJb6Qqk7_@f)PHy5}{BEz74~cFtPn_94hc!~;S-ZI4 zUdW%NFKWy73A5}K_{HHA+$O!$K>q-ya4G))*{V5yFZcoQUsctY9Aw%l{{Y#m<3~%m zc{dVCp5e1m?Vd6IwZ?rxsP>Y%ud#{s)}!%P;qfcwYWl2zpPzN2gO6{TrnmTw@Q&?3 zy73Q*ty>s7bjX~3l2^y+bs_`0VA%fp(+;gTV}=;?0-@A>N>pNfmV9CGS<@%Rn*Koh zucNB|yUkHPGx$iZ<{ueqsylAde(&SHzC!-il>Op06H%d^uu1&EspVdP$|fKluG;%i`Y(haw*lM{=LvXpCp_B-hNEj9z8bjRI_h zpOr&&6#<4tQJ?2Sn&=9OdY@07Yr^wj{{U@7eU}v~>b? z^646xK+ziJj9n84JeBN!YV*dbJ z=KU%O8AD)({9818Mu+TIbLltuz2M05-b{>f!1GQ&N`w9vw}q{S*fjg2?w1TJ=MfN3&erBpKv@l7(5&{d!{bgR)}soPG3BS#=$-j&nb z_`*k!y3etR$=lG^0lKym3$PajB!DSn{)it@id6r$p%l@yO#Pl>mw z%HPHKsQ&;8nLa9NBN)Gg{{X;6c>e&}IO=H(+3!n{0ql?QUsHU8dk!(^D!e}w^$TWV zlUueoa)jR!|pZBp|HfE#Fs^vg?GW=uJ z6#T7hgRjcue_BWQ#i~9@n%z&RkpBRUc-NVp^cgETQ%`gr^4Xre-xqaU=k1XGE=@JQ zF6zW_c>u?uHROAPvKMSA=wqpKIf{x#eHG9`+6UxM1Cmh=l)-YeZa3W5lDZ9 zs#xtOZi>~SgjUFoX&#K<6?MRJFNff&DmM73uMkJ}gO1~96~iu_KU0Ms=AHIJoHUBr z>RkcFylUy7KxWr9A&eb zc+|k}SW-#*&y(&BDMaK{wk=KZs&IMzX%3*o51T)YVGDy!6B2571E#arR>CGN7-xR| zf~s8WQ{8}!~KLsu)65cy#Cr4Wq$+Ln}JqnEf@ z5eSrj2%}&fg+02`QDjw;*l56~15Bk9`B2AxCc6)XSw1Vk$tDNs^%ck0Q}EkfkHfwu zTgh$y(I1q^P#h7GIUf8}xTdT`syZv$M|G+17SLTk%8(aLL?+a?$fm}u94^}ka z3VcY=@18Y|NR`;|lMFi7sA<~#0YUjacoi!Qo~MalJjCB)7T4gb-P^2^>roeOu&itt zel=x20LrS;#Tl6wtINf8*8VYq%{LMiy?tuqd{MSYN<7<_A9S9TqNmENk1nn|5n11q z@~uPR#MUlXV#9;qy;WZYxncXj20DuG^*e+!e%FNeHCr)byWYwI{7 zau*yLwRdilMnmIxuO3(}9=AukPP)3%$1!_5T(0<6Z>R#WZ)~n+>RX=pu7Y`1+E!fg zR3_BbI0v6v;81j3qK>(^)K`-^xqLT0!1Fm!LUOi#0%{Hw|BNp_Q2$|9X4h4G!PyGS2HTbA&BhUz&>g=p9^ox6rX$jI(1 zN5|eQ);=O?Fy89M$IOBu5`CH>7s@(k*00*d?##WMld?xKV`~(jym=HSa>t*_wyo|2 zAbBi7^&+$Mt6Mm=D`wOg{=w#`x$-!ZXMMi)v*MBA>kW2j{5PoGUuio-MvTbu zWj{*ut59!bdK4?hI!T_LrTBx*hhp4bJcFhx9YXr)_XoI;6WzegaRbEBs6JfmGycs( zCbuQf+OjkLHHA7gFQKICP_@pE^FqD0wvPFe%(sy345v6g^`WKNzSVu9KDEwiI>XwM zmrpT>^RZ4xEvl4!>IUXj7lVBj+)dB9}A~*J*aJMo<Uj8~pp_>$7!m%+z9YUBJxw`B5{o;a^!5sf9O^A$a|B3dKT6YQ)8Zb}k)QYmM9 zf8ih#(bVv3$ENXYqDFLfBhxiz)5li`zkFw~r_Dj=^d-{7M`U|MHqprd^A1TE45tRD zYW^M)TFAE7PXwf8a!Ic^hvG(|ZbrkqKf*b!OK*xYODe-~s*+>n1Yp*6C5CagIaJ|{ zjJdfRHuTF23soZW>`g8+=5jh#4~Vsm7S3@e+m)Q3^=Na7vvJ}m?hxh})=4;KW#HE( zePb(6`>3tfJn{g@BD`w&dd@tlT^~_^#Nk}|>q*7-Jv+cR(9L%+ywf#H_|h*iAktx1 zcJIwk;SY_z81aU)eWcph-f5RMZLy%f#_)5`PM=ERJU8PlSHm}=eLm&wBn#z}3BcP) z7N@6acb7Vh8f~gx-7#q<3G+zDsbgO=TMHOnP8y$899yfbUWd2M<2@%!VZZl{+hg)- z{h!584z0USb#QH>PFdt1DEw>ZuLYO!$HZG53hL|ZaZcGGzCcEL17BVEhv2q{rg&*> zq_ww})UFslK5xdko*I=kdmOlm)fSAcrg*ze)%4MGr@KoOgDEPm;6+r_V)0I=Y2@hI zjA^^?EV1K`_^p2l=+@djly>&;MGPpoRbRRmw{G>QELu3$NY{DVy|R4;d9`4tbe4v= zJY=xbRVkzYe9-ELu%Wt;O`8duYz%{V2NlTYs$E7F{FEwg(Q=F;mZRiu6q- z-sWs70!ifh*LkN~Gsoq^Z&}w9+JV5);WqStDOA2<92;(7s1*e$BVrI zF7+F|GRsl>tz&keu>+wjykytb+NPU!*$*av*<<=wHGN||4~J99>b28L9O=Pf<4QJL zT`QhODwS&Tb)`|Z{Lh;&_3w=u0~WsUg|4d-pY@iL1E0dPXZZW7StidAcq3M_^<{;f zN8~H$y+!AT198FWSu* z;FKH~r$VPcjdD7N!yQ`L+ct%OJU3%r$EtYaO|v9D)Nq{i-IH8ix$%=i(^a!_WntSq zS2a4An7ct-_G+lsmpoT0^*r)#2x@U0Y%#Fzj#m|xsX?ykG46v=m2vzs{KNC6c(3+` zw;RQ}#|rxOCcKNpKN>D|24JQ~&v!T#@8M`;Dp}M?BZjsk4^3N|@5t___=&1%(~)?I zIL{;jR!_z|SYh6=qA(nQE5P*+8s1*9`L^fMvSrm~l;p7|-o1x_rBQq9?mX3+RHc7r z{`1>4pN+Ru0xjRmA55C!wQq=*_u2EVrIS41HV5Tg6t_h5W4o_4586u{cl534VCYi# zVsTW=F*UT5&C8z>O)C?-zgo<>)MlGIq+|HiLneFv6&pTD=bG(Nw#O5cp6phYnB{iYW&`R%JXInPuELqKYm{9ti`Q3GYP|*oN-njGpvSQWqLfIT`ItYgo>3 zFleHgNUbaZP60I$k7%Nb28fwyj3}a=^aesy6Vj0FJO#xRQE(fN){_L1GlNAGhA|w* zFnK=JIJj=ieR<}JD7i#4PIHo=gX$`@imnOT6#G#{NFoei;M9>SoNxvYQPj~zA>7d1 z#YfGb!jKb16-1AX+#IVB^sN!7-YiN!-7pg zJt(52kwkes(ip+%MHJAU!g+gGA9{IeD6QeKwLO^9)NZ1Rvc=p@J#u)WiV;Y(wlWk9 z8G2P$`FQlAin$M9$)w*!6$z7lK=VtUXri=5A=JI7qJi}T#Wb2IszLz7QM)~8qL8|i zKt}FHYegpkyB`gFMAfuq5VWT1+oqKZ{paIjE{RTC@2 zsXlYgMHkwONH=l~6jzBYHa?mT=;!b6510wbs_-;ZHz-bhO%zv?SsslYcQ);9Ao7b8 zeDIVj%cEeNcJJOV!-hg%8YKOH;FEG^)yjQBxLp6fa~`MoyVVgmK|@!173|^CD54kE6g&N()i3YO~tPk|PlDoU3; zbbc|??Jd69ac0q6EXy=4wUmR}iYw=)+D^qnE8DFO+e2xzu?LlLCg7oR&79-byWbCZ zqUY^#Lvj{C=NVChMHRzFwMA7~^+&5;4@uK+l=PU$*a+$AO2-zCq6d)V?{ z6wEHK?p+H+OLP^=%Xs#7^0bc9d3h&{(M5i3RbIP(Z}UF$43naf=lcHu#XUCAIFn75 zLcC-UKdpK0v+;(|hw`+?Tyygz literal 0 HcmV?d00001 diff --git a/configs/karlo/decoder_900M_vit_l.yaml b/configs/karlo/decoder_900M_vit_l.yaml new file mode 100644 index 0000000..02a3530 --- /dev/null +++ b/configs/karlo/decoder_900M_vit_l.yaml @@ -0,0 +1,37 @@ +model: + type: t2i-decoder + diffusion_sampler: uniform + hparams: + image_size: 64 + num_channels: 320 + num_res_blocks: 3 + channel_mult: '' + attention_resolutions: 32,16,8 + num_heads: -1 + num_head_channels: 64 + num_heads_upsample: -1 + use_scale_shift_norm: true + dropout: 0.1 + clip_dim: 768 + clip_emb_mult: 4 + text_ctx: 77 + xf_width: 1536 + xf_layers: 0 + xf_heads: 0 + xf_final_ln: false + resblock_updown: true + learn_sigma: true + text_drop: 0.3 + clip_emb_type: image + clip_emb_drop: 0.1 + use_plm: true + +diffusion: + steps: 1000 + learn_sigma: true + sigma_small: false + noise_schedule: squaredcos_cap_v2 + use_kl: false + predict_xstart: false + rescale_learned_sigmas: true + timestep_respacing: '' diff --git a/configs/karlo/improved_sr_64_256_1.4B.yaml b/configs/karlo/improved_sr_64_256_1.4B.yaml new file mode 100644 index 0000000..282d3cb --- /dev/null +++ b/configs/karlo/improved_sr_64_256_1.4B.yaml @@ -0,0 +1,27 @@ +model: + type: improved_sr_64_256 + diffusion_sampler: uniform + hparams: + channels: 320 + depth: 3 + channels_multiple: + - 1 + - 2 + - 3 + - 4 + dropout: 0.0 + +diffusion: + steps: 1000 + learn_sigma: false + sigma_small: true + noise_schedule: squaredcos_cap_v2 + use_kl: false + predict_xstart: false + rescale_learned_sigmas: true + timestep_respacing: '7' + + +sampling: + timestep_respacing: '7' # fix + clip_denoise: true diff --git a/configs/karlo/prior_1B_vit_l.yaml b/configs/karlo/prior_1B_vit_l.yaml new file mode 100644 index 0000000..159330d --- /dev/null +++ b/configs/karlo/prior_1B_vit_l.yaml @@ -0,0 +1,21 @@ +model: + type: prior + diffusion_sampler: uniform + hparams: + text_ctx: 77 + xf_width: 2048 + xf_layers: 20 + xf_heads: 32 + xf_final_ln: true + text_drop: 0.2 + clip_dim: 768 + +diffusion: + steps: 1000 + learn_sigma: false + sigma_small: true + noise_schedule: squaredcos_cap_v2 + use_kl: false + predict_xstart: true + rescale_learned_sigmas: false + timestep_respacing: '' diff --git a/configs/stable-diffusion/v2-1-stable-karlo-inference.yaml b/configs/stable-diffusion/v2-1-stable-karlo-inference.yaml new file mode 100644 index 0000000..da867b4 --- /dev/null +++ b/configs/stable-diffusion/v2-1-stable-karlo-inference.yaml @@ -0,0 +1,74 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.ImageEmbeddingConditionedLatentDiffusion + params: + embedding_dropout: 0.25 + parameterization: "v" + linear_start: 0.00085 + linear_end: 0.0120 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 96 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn-adm + scale_factor: 0.18215 + monitor: val/loss_simple_ema + + embedder_config: + target: ldm.modules.encoders.modules.ClipImageEmbedder + params: + model: "ViT-L/14" + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + num_classes: "sequential" + adm_in_channels: 768 + use_checkpoint: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + spatial_transformer_attn_type: "softmax-xformers" + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py index 6090212..bde253f 100644 --- a/ldm/models/diffusion/ddpm.py +++ b/ldm/models/diffusion/ddpm.py @@ -1793,3 +1793,58 @@ class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): log = super().log_images(*args, **kwargs) log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w') return log + + +class ImageEmbeddingConditionedLatentDiffusion(LatentDiffusion): + def __init__(self, embedder_config, embedding_key="jpg", embedding_dropout=0.5, freeze_embedder=True, *args, **kwargs): + super().__init__(*args, **kwargs) + self.embed_key = embedding_key + self.embedding_dropout = embedding_dropout + self._init_embedder(embedder_config, freeze_embedder) + + def _init_embedder(self, config, freeze=True): + embedder = instantiate_from_config(config) + if freeze: + self.embedder = embedder.eval() + self.embedder.train = disabled_train + for param in self.embedder.parameters(): + param.requires_grad = False + + def get_input(self, batch, k, cond_key=None, bs=None, **kwargs): + outputs = LatentDiffusion.get_input(self, batch, k, bs=bs, **kwargs) + z, c = outputs[0], outputs[1] + img = batch[self.embed_key][:bs] + img = rearrange(img, 'b h w c -> b c h w') + c_adm = self.embedder(img) + if self.training: + c_adm = torch.bernoulli((1. - self.embedding_dropout) * torch.ones(c_adm.shape[0], + device=c_adm.device)[:, None]) * c_adm + all_conds = {"c_crossattn": [c], "c_adm": c_adm} + noutputs = [z, all_conds] + noutputs.extend(outputs[2:]) + return noutputs + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, **kwargs): + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True, + return_original_cond=True) + log["inputs"] = x + log["reconstruction"] = xrec + assert self.model.conditioning_key is not None + assert self.cond_stage_key in ["caption", "txt"] + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) + log["conditioning"] = xc + uc = self.get_unconditional_conditioning(N, kwargs.get('unconditional_guidance_label', '')) + unconditional_guidance_scale = kwargs.get('unconditional_guidance_scale', 5.) + + uc_ = {"c_crossattn": [uc], "c_adm": c["c_adm"]} + ema_scope = self.ema_scope if kwargs.get('use_ema_scope', True) else nullcontext + with ema_scope(f"Sampling"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=True, + ddim_steps=kwargs.get('ddim_steps', 50), eta=kwargs.get('ddim_eta', 0.), + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_, ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samplescfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + return log diff --git a/ldm/modules/karlo/__init__.py b/ldm/modules/karlo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ldm/modules/karlo/diffusers_pipeline.py b/ldm/modules/karlo/diffusers_pipeline.py new file mode 100644 index 0000000..07f72b3 --- /dev/null +++ b/ldm/modules/karlo/diffusers_pipeline.py @@ -0,0 +1,512 @@ +# Copyright 2022 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from torch.nn import functional as F + +from transformers import CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput + +from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel +from ...pipelines import DiffusionPipeline, ImagePipelineOutput +from ...schedulers import UnCLIPScheduler +from ...utils import is_accelerate_available, logging, randn_tensor +from .text_proj import UnCLIPTextProjModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UnCLIPPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using unCLIP + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + Args: + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior ([`PriorTransformer`]): + The canonincal unCLIP prior to approximate the image embedding from the text embedding. + text_proj ([`UnCLIPTextProjModel`]): + Utility class to prepare and combine the embeddings before they are passed to the decoder. + decoder ([`UNet2DConditionModel`]): + The decoder to invert the image embedding into an image. + super_res_first ([`UNet2DModel`]): + Super resolution unet. Used in all but the last step of the super resolution diffusion process. + super_res_last ([`UNet2DModel`]): + Super resolution unet. Used in the last step of the super resolution diffusion process. + prior_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the prior denoising process. Just a modified DDPMScheduler. + decoder_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the decoder denoising process. Just a modified DDPMScheduler. + super_res_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler. + """ + + prior: PriorTransformer + decoder: UNet2DConditionModel + text_proj: UnCLIPTextProjModel + text_encoder: CLIPTextModelWithProjection + tokenizer: CLIPTokenizer + super_res_first: UNet2DModel + super_res_last: UNet2DModel + + prior_scheduler: UnCLIPScheduler + decoder_scheduler: UnCLIPScheduler + super_res_scheduler: UnCLIPScheduler + + def __init__( + self, + prior: PriorTransformer, + decoder: UNet2DConditionModel, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_proj: UnCLIPTextProjModel, + super_res_first: UNet2DModel, + super_res_last: UNet2DModel, + prior_scheduler: UnCLIPScheduler, + decoder_scheduler: UnCLIPScheduler, + super_res_scheduler: UnCLIPScheduler, + ): + super().__init__() + + self.register_modules( + prior=prior, + decoder=decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_proj=text_proj, + super_res_first=super_res_first, + super_res_last=super_res_last, + prior_scheduler=prior_scheduler, + decoder_scheduler=decoder_scheduler, + super_res_scheduler=super_res_scheduler, + ) + + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + ): + if text_model_output is None: + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + text_embeddings = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + else: + batch_size = text_model_output[0].shape[0] + text_embeddings, text_encoder_hidden_states = text_model_output[0], text_model_output[1] + text_mask = text_attention_mask + + text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = uncond_embeddings.shape[1] + uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt) + uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return text_embeddings, text_encoder_hidden_states, text_mask + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's + models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only + when their specific submodule has its `forward` method called. + """ + if is_accelerate_available(): + from accelerate import cpu_offload + else: + raise ImportError("Please install accelerate via `pip install accelerate`") + + device = torch.device(f"cuda:{gpu_id}") + + # TODO: self.prior.post_process_latents is not covered by the offload hooks, so it fails if added to the list + models = [ + self.decoder, + self.text_proj, + self.text_encoder, + self.super_res_first, + self.super_res_last, + ] + for cpu_offloaded_model in models: + if cpu_offloaded_model is not None: + cpu_offload(cpu_offloaded_model, device) + + @property + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module + hooks. + """ + if self.device != torch.device("meta") or not hasattr(self.decoder, "_hf_hook"): + return self.device + for module in self.decoder.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + @torch.no_grad() + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + prior_num_inference_steps: int = 25, + decoder_num_inference_steps: int = 25, + super_res_num_inference_steps: int = 7, + generator: Optional[torch.Generator] = None, + prior_latents: Optional[torch.FloatTensor] = None, + decoder_latents: Optional[torch.FloatTensor] = None, + super_res_latents: Optional[torch.FloatTensor] = None, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + prior_guidance_scale: float = 4.0, + decoder_guidance_scale: float = 8.0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. This can only be left undefined if + `text_model_output` and `text_attention_mask` is passed. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + prior_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the prior. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + decoder_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + super_res_num_inference_steps (`int`, *optional*, defaults to 7): + The number of denoising steps for super resolution. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*): + Pre-generated noisy latents to be used as inputs for the prior. + decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + decoder_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + text_model_output (`CLIPTextModelOutput`, *optional*): + Pre-defined CLIPTextModel outputs that can be derived from the text encoder. Pre-defined text outputs + can be passed for tasks like text embedding interpolations. Make sure to also pass + `text_attention_mask` in this case. `prompt` can the be left to `None`. + text_attention_mask (`torch.Tensor`, *optional*): + Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention + masks are necessary when passing `text_model_output`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + """ + if prompt is not None: + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + else: + batch_size = text_model_output[0].shape[0] + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 + + text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask + ) + + # prior + + self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) + prior_timesteps_tensor = self.prior_scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + prior_latents = self.prepare_latents( + (batch_size, embedding_dim), + text_embeddings.dtype, + device, + generator, + prior_latents, + self.prior_scheduler, + ) + + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=text_embeddings, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == prior_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = prior_timesteps_tensor[i + 1] + + prior_latents = self.prior_scheduler.step( + predicted_image_embedding, + timestep=t, + sample=prior_latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + prior_latents = self.prior.post_process_latents(prior_latents) + + image_embeddings = prior_latents + + # done prior + + # decoder + + text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( + image_embeddings=image_embeddings, + text_embeddings=text_embeddings, + text_encoder_hidden_states=text_encoder_hidden_states, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) + + self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) + decoder_timesteps_tensor = self.decoder_scheduler.timesteps + + num_channels_latents = self.decoder.in_channels + height = self.decoder.sample_size + width = self.decoder.sample_size + + decoder_latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + decoder_latents, + self.decoder_scheduler, + ) + + for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents + + noise_pred = self.decoder( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + class_labels=additive_clip_time_embeddings, + attention_mask=decoder_text_mask, + ).sample + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if i + 1 == decoder_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = decoder_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + decoder_latents = self.decoder_scheduler.step( + noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + decoder_latents = decoder_latents.clamp(-1, 1) + + image_small = decoder_latents + + # done decoder + + # super res + + self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) + super_res_timesteps_tensor = self.super_res_scheduler.timesteps + + channels = self.super_res_first.in_channels // 2 + height = self.super_res_first.sample_size + width = self.super_res_first.sample_size + + super_res_latents = self.prepare_latents( + (batch_size, channels, height, width), + image_small.dtype, + device, + generator, + super_res_latents, + self.super_res_scheduler, + ) + + interpolate_antialias = {} + if "antialias" in inspect.signature(F.interpolate).parameters: + interpolate_antialias["antialias"] = True + + image_upscaled = F.interpolate( + image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias + ) + + for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): + # no classifier free guidance + + if i == super_res_timesteps_tensor.shape[0] - 1: + unet = self.super_res_last + else: + unet = self.super_res_first + + latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) + + noise_pred = unet( + sample=latent_model_input, + timestep=t, + ).sample + + if i + 1 == super_res_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = super_res_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + super_res_latents = self.super_res_scheduler.step( + noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + image = super_res_latents + # done super res + + # post processing + + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) \ No newline at end of file diff --git a/ldm/modules/karlo/kakao/__init__.py b/ldm/modules/karlo/kakao/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ldm/modules/karlo/kakao/models/__init__.py b/ldm/modules/karlo/kakao/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ldm/modules/karlo/kakao/models/clip.py b/ldm/modules/karlo/kakao/models/clip.py new file mode 100644 index 0000000..961d815 --- /dev/null +++ b/ldm/modules/karlo/kakao/models/clip.py @@ -0,0 +1,182 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------------ +# Adapted from OpenAI's CLIP (https://github.com/openai/CLIP/) +# ------------------------------------------------------------------------------------ + + +import torch +import torch.nn as nn +import torch.nn.functional as F +import clip + +from clip.model import CLIP, convert_weights +from clip.simple_tokenizer import SimpleTokenizer, default_bpe + + +"""===== Monkey-Patching original CLIP for JIT compile =====""" + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = F.layer_norm( + x.type(torch.float32), + self.normalized_shape, + self.weight, + self.bias, + self.eps, + ) + return ret.type(orig_type) + + +clip.model.LayerNorm = LayerNorm +delattr(clip.model.CLIP, "forward") + +"""===== End of Monkey-Patching =====""" + + +class CustomizedCLIP(CLIP): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @torch.jit.export + def encode_image(self, image): + return self.visual(image) + + @torch.jit.export + def encode_text(self, text): + # re-define this function to return unpooled text features + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + x_seq = x + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x_out = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + return x_out, x_seq + + @torch.jit.ignore + def forward(self, image, text): + super().forward(image, text) + + @classmethod + def load_from_checkpoint(cls, ckpt_path: str): + state_dict = torch.load(ckpt_path, map_location="cpu").state_dict() + + vit = "visual.proj" in state_dict + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [ + k + for k in state_dict.keys() + if k.startswith("visual.") and k.endswith(".attn.in_proj_weight") + ] + ) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round( + (state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5 + ) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [ + len( + set( + k.split(".")[2] + for k in state_dict + if k.startswith(f"visual.layer{b}") + ) + ) + for b in [1, 2, 3, 4] + ] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round( + (state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5 + ) + vision_patch_size = None + assert ( + output_width**2 + 1 + == state_dict["visual.attnpool.positional_embedding"].shape[0] + ) + image_resolution = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len( + set( + k.split(".")[2] + for k in state_dict + if k.startswith("transformer.resblocks") + ) + ) + + model = cls( + embed_dim, + image_resolution, + vision_layers, + vision_width, + vision_patch_size, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + convert_weights(model) + model.load_state_dict(state_dict) + model.eval() + model.float() + return model + + +class CustomizedTokenizer(SimpleTokenizer): + def __init__(self): + super().__init__(bpe_path=default_bpe()) + + self.sot_token = self.encoder["<|startoftext|>"] + self.eot_token = self.encoder["<|endoftext|>"] + + def padded_tokens_and_mask(self, texts, text_ctx): + assert isinstance(texts, list) and all( + isinstance(elem, str) for elem in texts + ), "texts should be a list of strings" + + all_tokens = [ + [self.sot_token] + self.encode(text) + [self.eot_token] for text in texts + ] + + mask = [ + [True] * min(text_ctx, len(tokens)) + + [False] * max(text_ctx - len(tokens), 0) + for tokens in all_tokens + ] + mask = torch.tensor(mask, dtype=torch.bool) + result = torch.zeros(len(all_tokens), text_ctx, dtype=torch.int) + for i, tokens in enumerate(all_tokens): + if len(tokens) > text_ctx: + tokens = tokens[:text_ctx] + tokens[-1] = self.eot_token + result[i, : len(tokens)] = torch.tensor(tokens) + + return result, mask diff --git a/ldm/modules/karlo/kakao/models/decoder_model.py b/ldm/modules/karlo/kakao/models/decoder_model.py new file mode 100644 index 0000000..84e96c9 --- /dev/null +++ b/ldm/modules/karlo/kakao/models/decoder_model.py @@ -0,0 +1,193 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ + +import copy +import torch + +from ldm.modules.karlo.kakao.modules import create_gaussian_diffusion +from ldm.modules.karlo.kakao.modules.unet import PLMImUNet + + +class Text2ImProgressiveModel(torch.nn.Module): + """ + A decoder that generates 64x64px images based on the text prompt. + + :param config: yaml config to define the decoder. + :param tokenizer: tokenizer used in clip. + """ + + def __init__( + self, + config, + tokenizer, + ): + super().__init__() + + self._conf = config + self._model_conf = config.model.hparams + self._diffusion_kwargs = dict( + steps=config.diffusion.steps, + learn_sigma=config.diffusion.learn_sigma, + sigma_small=config.diffusion.sigma_small, + noise_schedule=config.diffusion.noise_schedule, + use_kl=config.diffusion.use_kl, + predict_xstart=config.diffusion.predict_xstart, + rescale_learned_sigmas=config.diffusion.rescale_learned_sigmas, + timestep_respacing=config.diffusion.timestep_respacing, + ) + self._tokenizer = tokenizer + + self.model = self.create_plm_dec_model() + + cf_token, cf_mask = self.set_cf_text_tensor() + self.register_buffer("cf_token", cf_token, persistent=False) + self.register_buffer("cf_mask", cf_mask, persistent=False) + + @classmethod + def load_from_checkpoint(cls, config, tokenizer, ckpt_path, strict: bool = True): + ckpt = torch.load(ckpt_path, map_location="cpu")["state_dict"] + + model = cls(config, tokenizer) + model.load_state_dict(ckpt, strict=strict) + return model + + def create_plm_dec_model(self): + image_size = self._model_conf.image_size + if self._model_conf.channel_mult == "": + if image_size == 256: + channel_mult = (1, 1, 2, 2, 4, 4) + elif image_size == 128: + channel_mult = (1, 1, 2, 3, 4) + elif image_size == 64: + channel_mult = (1, 2, 3, 4) + else: + raise ValueError(f"unsupported image size: {image_size}") + else: + channel_mult = tuple( + int(ch_mult) for ch_mult in self._model_conf.channel_mult.split(",") + ) + assert 2 ** (len(channel_mult) + 2) == image_size + + attention_ds = [] + for res in self._model_conf.attention_resolutions.split(","): + attention_ds.append(image_size // int(res)) + + return PLMImUNet( + text_ctx=self._model_conf.text_ctx, + xf_width=self._model_conf.xf_width, + in_channels=3, + model_channels=self._model_conf.num_channels, + out_channels=6 if self._model_conf.learn_sigma else 3, + num_res_blocks=self._model_conf.num_res_blocks, + attention_resolutions=tuple(attention_ds), + dropout=self._model_conf.dropout, + channel_mult=channel_mult, + num_heads=self._model_conf.num_heads, + num_head_channels=self._model_conf.num_head_channels, + num_heads_upsample=self._model_conf.num_heads_upsample, + use_scale_shift_norm=self._model_conf.use_scale_shift_norm, + resblock_updown=self._model_conf.resblock_updown, + clip_dim=self._model_conf.clip_dim, + clip_emb_mult=self._model_conf.clip_emb_mult, + clip_emb_type=self._model_conf.clip_emb_type, + clip_emb_drop=self._model_conf.clip_emb_drop, + ) + + def set_cf_text_tensor(self): + return self._tokenizer.padded_tokens_and_mask([""], self.model.text_ctx) + + def get_sample_fn(self, timestep_respacing): + use_ddim = timestep_respacing.startswith(("ddim", "fast")) + + diffusion_kwargs = copy.deepcopy(self._diffusion_kwargs) + diffusion_kwargs.update(timestep_respacing=timestep_respacing) + diffusion = create_gaussian_diffusion(**diffusion_kwargs) + sample_fn = ( + diffusion.ddim_sample_loop_progressive + if use_ddim + else diffusion.p_sample_loop_progressive + ) + + return sample_fn + + def forward( + self, + txt_feat, + txt_feat_seq, + tok, + mask, + img_feat=None, + cf_guidance_scales=None, + timestep_respacing=None, + ): + # cfg should be enabled in inference + assert cf_guidance_scales is not None and all(cf_guidance_scales > 0.0) + assert img_feat is not None + + bsz = txt_feat.shape[0] + img_sz = self._model_conf.image_size + + def guided_model_fn(x_t, ts, **kwargs): + half = x_t[: len(x_t) // 2] + combined = torch.cat([half, half], dim=0) + model_out = self.model(combined, ts, **kwargs) + eps, rest = model_out[:, :3], model_out[:, 3:] + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + half_eps = uncond_eps + cf_guidance_scales.view(-1, 1, 1, 1) * ( + cond_eps - uncond_eps + ) + eps = torch.cat([half_eps, half_eps], dim=0) + return torch.cat([eps, rest], dim=1) + + cf_feat = self.model.cf_param.unsqueeze(0) + cf_feat = cf_feat.expand(bsz // 2, -1) + feat = torch.cat([img_feat, cf_feat.to(txt_feat.device)], dim=0) + + cond = { + "y": feat, + "txt_feat": txt_feat, + "txt_feat_seq": txt_feat_seq, + "mask": mask, + } + sample_fn = self.get_sample_fn(timestep_respacing) + sample_outputs = sample_fn( + guided_model_fn, + (bsz, 3, img_sz, img_sz), + noise=None, + device=txt_feat.device, + clip_denoised=True, + model_kwargs=cond, + ) + + for out in sample_outputs: + sample = out["sample"] + yield sample if cf_guidance_scales is None else sample[ + : sample.shape[0] // 2 + ] + + +class Text2ImModel(Text2ImProgressiveModel): + def forward( + self, + txt_feat, + txt_feat_seq, + tok, + mask, + img_feat=None, + cf_guidance_scales=None, + timestep_respacing=None, + ): + last_out = None + for out in super().forward( + txt_feat, + txt_feat_seq, + tok, + mask, + img_feat, + cf_guidance_scales, + timestep_respacing, + ): + last_out = out + return last_out diff --git a/ldm/modules/karlo/kakao/models/prior_model.py b/ldm/modules/karlo/kakao/models/prior_model.py new file mode 100644 index 0000000..03ef230 --- /dev/null +++ b/ldm/modules/karlo/kakao/models/prior_model.py @@ -0,0 +1,138 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ + +import copy +import torch + +from ldm.modules.karlo.kakao.modules import create_gaussian_diffusion +from ldm.modules.karlo.kakao.modules.xf import PriorTransformer + + +class PriorDiffusionModel(torch.nn.Module): + """ + A prior that generates clip image feature based on the text prompt. + + :param config: yaml config to define the decoder. + :param tokenizer: tokenizer used in clip. + :param clip_mean: mean to normalize the clip image feature (zero-mean, unit variance). + :param clip_std: std to noramlize the clip image feature (zero-mean, unit variance). + """ + + def __init__(self, config, tokenizer, clip_mean, clip_std): + super().__init__() + + self._conf = config + self._model_conf = config.model.hparams + self._diffusion_kwargs = dict( + steps=config.diffusion.steps, + learn_sigma=config.diffusion.learn_sigma, + sigma_small=config.diffusion.sigma_small, + noise_schedule=config.diffusion.noise_schedule, + use_kl=config.diffusion.use_kl, + predict_xstart=config.diffusion.predict_xstart, + rescale_learned_sigmas=config.diffusion.rescale_learned_sigmas, + timestep_respacing=config.diffusion.timestep_respacing, + ) + self._tokenizer = tokenizer + + self.register_buffer("clip_mean", clip_mean[None, :], persistent=False) + self.register_buffer("clip_std", clip_std[None, :], persistent=False) + + causal_mask = self.get_causal_mask() + self.register_buffer("causal_mask", causal_mask, persistent=False) + + self.model = PriorTransformer( + text_ctx=self._model_conf.text_ctx, + xf_width=self._model_conf.xf_width, + xf_layers=self._model_conf.xf_layers, + xf_heads=self._model_conf.xf_heads, + xf_final_ln=self._model_conf.xf_final_ln, + clip_dim=self._model_conf.clip_dim, + ) + + cf_token, cf_mask = self.set_cf_text_tensor() + self.register_buffer("cf_token", cf_token, persistent=False) + self.register_buffer("cf_mask", cf_mask, persistent=False) + + @classmethod + def load_from_checkpoint( + cls, config, tokenizer, clip_mean, clip_std, ckpt_path, strict: bool = True + ): + ckpt = torch.load(ckpt_path, map_location="cpu")["state_dict"] + + model = cls(config, tokenizer, clip_mean, clip_std) + model.load_state_dict(ckpt, strict=strict) + return model + + def set_cf_text_tensor(self): + return self._tokenizer.padded_tokens_and_mask([""], self.model.text_ctx) + + def get_sample_fn(self, timestep_respacing): + use_ddim = timestep_respacing.startswith(("ddim", "fast")) + + diffusion_kwargs = copy.deepcopy(self._diffusion_kwargs) + diffusion_kwargs.update(timestep_respacing=timestep_respacing) + diffusion = create_gaussian_diffusion(**diffusion_kwargs) + sample_fn = diffusion.ddim_sample_loop if use_ddim else diffusion.p_sample_loop + + return sample_fn + + def get_causal_mask(self): + seq_len = self._model_conf.text_ctx + 4 + mask = torch.empty(seq_len, seq_len) + mask.fill_(float("-inf")) + mask.triu_(1) + mask = mask[None, ...] + return mask + + def forward( + self, + txt_feat, + txt_feat_seq, + mask, + cf_guidance_scales=None, + timestep_respacing=None, + denoised_fn=True, + ): + # cfg should be enabled in inference + assert cf_guidance_scales is not None and all(cf_guidance_scales > 0.0) + + bsz_ = txt_feat.shape[0] + bsz = bsz_ // 2 + + def guided_model_fn(x_t, ts, **kwargs): + half = x_t[: len(x_t) // 2] + combined = torch.cat([half, half], dim=0) + model_out = self.model(combined, ts, **kwargs) + eps, rest = ( + model_out[:, : int(x_t.shape[1])], + model_out[:, int(x_t.shape[1]) :], + ) + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + half_eps = uncond_eps + cf_guidance_scales.view(-1, 1) * ( + cond_eps - uncond_eps + ) + eps = torch.cat([half_eps, half_eps], dim=0) + return torch.cat([eps, rest], dim=1) + + cond = { + "text_emb": txt_feat, + "text_enc": txt_feat_seq, + "mask": mask, + "causal_mask": self.causal_mask, + } + sample_fn = self.get_sample_fn(timestep_respacing) + sample = sample_fn( + guided_model_fn, + (bsz_, self.model.clip_dim), + noise=None, + device=txt_feat.device, + clip_denoised=False, + denoised_fn=lambda x: torch.clamp(x, -10, 10), + model_kwargs=cond, + ) + sample = (sample * self.clip_std) + self.clip_mean + + return sample[:bsz] diff --git a/ldm/modules/karlo/kakao/models/sr_256_1k.py b/ldm/modules/karlo/kakao/models/sr_256_1k.py new file mode 100644 index 0000000..1e874f6 --- /dev/null +++ b/ldm/modules/karlo/kakao/models/sr_256_1k.py @@ -0,0 +1,10 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ + +from ldm.modules.karlo.kakao.models.sr_64_256 import SupRes64to256Progressive + + +class SupRes256to1kProgressive(SupRes64to256Progressive): + pass # no difference currently diff --git a/ldm/modules/karlo/kakao/models/sr_64_256.py b/ldm/modules/karlo/kakao/models/sr_64_256.py new file mode 100644 index 0000000..32687af --- /dev/null +++ b/ldm/modules/karlo/kakao/models/sr_64_256.py @@ -0,0 +1,88 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ + +import copy +import torch + +from ldm.modules.karlo.kakao.modules.unet import SuperResUNetModel +from ldm.modules.karlo.kakao.modules import create_gaussian_diffusion + + +class ImprovedSupRes64to256ProgressiveModel(torch.nn.Module): + """ + ImprovedSR model fine-tunes the pretrained DDPM-based SR model by using adversarial and perceptual losses. + In specific, the low-resolution sample is iteratively recovered by 6 steps with the frozen pretrained SR model. + In the following additional one step, a seperate fine-tuned model recovers high-frequency details. + This approach greatly improves the fidelity of images of 256x256px, even with small number of reverse steps. + """ + + def __init__(self, config): + super().__init__() + + self._config = config + self._diffusion_kwargs = dict( + steps=config.diffusion.steps, + learn_sigma=config.diffusion.learn_sigma, + sigma_small=config.diffusion.sigma_small, + noise_schedule=config.diffusion.noise_schedule, + use_kl=config.diffusion.use_kl, + predict_xstart=config.diffusion.predict_xstart, + rescale_learned_sigmas=config.diffusion.rescale_learned_sigmas, + ) + + self.model_first_steps = SuperResUNetModel( + in_channels=3, # auto-changed to 6 inside the model + model_channels=config.model.hparams.channels, + out_channels=3, + num_res_blocks=config.model.hparams.depth, + attention_resolutions=(), # no attention + dropout=config.model.hparams.dropout, + channel_mult=config.model.hparams.channels_multiple, + resblock_updown=True, + use_middle_attention=False, + ) + self.model_last_step = SuperResUNetModel( + in_channels=3, # auto-changed to 6 inside the model + model_channels=config.model.hparams.channels, + out_channels=3, + num_res_blocks=config.model.hparams.depth, + attention_resolutions=(), # no attention + dropout=config.model.hparams.dropout, + channel_mult=config.model.hparams.channels_multiple, + resblock_updown=True, + use_middle_attention=False, + ) + + @classmethod + def load_from_checkpoint(cls, config, ckpt_path, strict: bool = True): + ckpt = torch.load(ckpt_path, map_location="cpu")["state_dict"] + + model = cls(config) + model.load_state_dict(ckpt, strict=strict) + return model + + def get_sample_fn(self, timestep_respacing): + diffusion_kwargs = copy.deepcopy(self._diffusion_kwargs) + diffusion_kwargs.update(timestep_respacing=timestep_respacing) + diffusion = create_gaussian_diffusion(**diffusion_kwargs) + return diffusion.p_sample_loop_progressive_for_improved_sr + + def forward(self, low_res, timestep_respacing="7", **kwargs): + assert ( + timestep_respacing == "7" + ), "different respacing method may work, but no guaranteed" + + sample_fn = self.get_sample_fn(timestep_respacing) + sample_outputs = sample_fn( + self.model_first_steps, + self.model_last_step, + shape=low_res.shape, + clip_denoised=True, + model_kwargs=dict(low_res=low_res), + **kwargs, + ) + for x in sample_outputs: + sample = x["sample"] + yield sample diff --git a/ldm/modules/karlo/kakao/modules/__init__.py b/ldm/modules/karlo/kakao/modules/__init__.py new file mode 100644 index 0000000..11d4358 --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/__init__.py @@ -0,0 +1,49 @@ +# ------------------------------------------------------------------------------------ +# Adapted from Guided-Diffusion repo (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + + +from .diffusion import gaussian_diffusion as gd +from .diffusion.respace import ( + SpacedDiffusion, + space_timesteps, +) + + +def create_gaussian_diffusion( + steps, + learn_sigma, + sigma_small, + noise_schedule, + use_kl, + predict_xstart, + rescale_learned_sigmas, + timestep_respacing, +): + betas = gd.get_named_beta_schedule(noise_schedule, steps) + if use_kl: + loss_type = gd.LossType.RESCALED_KL + elif rescale_learned_sigmas: + loss_type = gd.LossType.RESCALED_MSE + else: + loss_type = gd.LossType.MSE + if not timestep_respacing: + timestep_respacing = [steps] + + return SpacedDiffusion( + use_timesteps=space_timesteps(steps, timestep_respacing), + betas=betas, + model_mean_type=( + gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X + ), + model_var_type=( + ( + gd.ModelVarType.FIXED_LARGE + if not sigma_small + else gd.ModelVarType.FIXED_SMALL + ) + if not learn_sigma + else gd.ModelVarType.LEARNED_RANGE + ), + loss_type=loss_type, + ) diff --git a/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py b/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py new file mode 100644 index 0000000..6a111aa --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/diffusion/gaussian_diffusion.py @@ -0,0 +1,828 @@ +# ------------------------------------------------------------------------------------ +# Adapted from Guided-Diffusion repo (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + +import enum +import math + +import numpy as np +import torch as th + + +def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac): + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + warmup_time = int(num_diffusion_timesteps * warmup_frac) + betas[:warmup_time] = np.linspace( + beta_start, beta_end, warmup_time, dtype=np.float64 + ) + return betas + + +def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): + """ + This is the deprecated API for creating beta schedules. + See get_named_beta_schedule() for the new library of schedules. + """ + if beta_schedule == "quad": + betas = ( + np.linspace( + beta_start**0.5, + beta_end**0.5, + num_diffusion_timesteps, + dtype=np.float64, + ) + ** 2 + ) + elif beta_schedule == "linear": + betas = np.linspace( + beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 + ) + elif beta_schedule == "warmup10": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1) + elif beta_schedule == "warmup50": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5) + elif beta_schedule == "const": + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 + betas = 1.0 / np.linspace( + num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64 + ) + else: + raise NotImplementedError(beta_schedule) + assert betas.shape == (num_diffusion_timesteps,) + return betas + + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): + """ + Get a pre-defined beta schedule for the given name. + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = 1000 / num_diffusion_timesteps + return get_beta_schedule( + "linear", + beta_start=scale * 0.0001, + beta_end=scale * 0.02, + num_diffusion_timesteps=num_diffusion_timesteps, + ) + elif schedule_name == "squaredcos_cap_v2": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +class ModelMeanType(enum.Enum): + """ + Which type of output the model predicts. + """ + + PREVIOUS_X = enum.auto() # the model predicts x_{t-1} + START_X = enum.auto() # the model predicts x_0 + EPSILON = enum.auto() # the model predicts epsilon + + +class ModelVarType(enum.Enum): + """ + What is used as the model's output variance. + The LEARNED_RANGE option has been added to allow the model to predict + values between FIXED_SMALL and FIXED_LARGE, making its job easier. + """ + + LEARNED = enum.auto() + FIXED_SMALL = enum.auto() + FIXED_LARGE = enum.auto() + LEARNED_RANGE = enum.auto() + + +class LossType(enum.Enum): + MSE = enum.auto() # use raw MSE loss (and KL when learning variances) + RESCALED_MSE = ( + enum.auto() + ) # use raw MSE loss (with RESCALED_KL when learning variances) + KL = enum.auto() # use the variational lower-bound + RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB + + def is_vb(self): + return self == LossType.KL or self == LossType.RESCALED_KL + + +class GaussianDiffusion(th.nn.Module): + """ + Utilities for training and sampling diffusion models. + Original ported from this codebase: + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 + :param betas: a 1-D numpy array of betas for each diffusion timestep, + starting at T and going to 1. + """ + + def __init__( + self, + *, + betas, + model_mean_type, + model_var_type, + loss_type, + ): + super(GaussianDiffusion, self).__init__() + self.model_mean_type = model_mean_type + self.model_var_type = model_var_type + self.loss_type = loss_type + + # Use float64 for accuracy. + betas = np.array(betas, dtype=np.float64) + assert len(betas.shape) == 1, "betas must be 1-D" + assert (betas > 0).all() and (betas <= 1).all() + + self.num_timesteps = int(betas.shape[0]) + + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + alphas_cumprod_next = np.append(alphas_cumprod[1:], 0.0) + assert alphas_cumprod_prev.shape == (self.num_timesteps,) + + # calculations for diffusion q(x_t | x_{t-1}) and others + sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) + sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) + log_one_minus_alphas_cumprod = np.log(1.0 - alphas_cumprod) + sqrt_recip_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod) + sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod - 1) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = ( + betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) + ) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + posterior_log_variance_clipped = np.log( + np.append(posterior_variance[1], posterior_variance[1:]) + ) + posterior_mean_coef1 = ( + betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) + ) + posterior_mean_coef2 = ( + (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) + ) + + self.register_buffer("betas", th.from_numpy(betas), persistent=False) + self.register_buffer( + "alphas_cumprod", th.from_numpy(alphas_cumprod), persistent=False + ) + self.register_buffer( + "alphas_cumprod_prev", th.from_numpy(alphas_cumprod_prev), persistent=False + ) + self.register_buffer( + "alphas_cumprod_next", th.from_numpy(alphas_cumprod_next), persistent=False + ) + + self.register_buffer( + "sqrt_alphas_cumprod", th.from_numpy(sqrt_alphas_cumprod), persistent=False + ) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", + th.from_numpy(sqrt_one_minus_alphas_cumprod), + persistent=False, + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", + th.from_numpy(log_one_minus_alphas_cumprod), + persistent=False, + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", + th.from_numpy(sqrt_recip_alphas_cumprod), + persistent=False, + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", + th.from_numpy(sqrt_recipm1_alphas_cumprod), + persistent=False, + ) + + self.register_buffer( + "posterior_variance", th.from_numpy(posterior_variance), persistent=False + ) + self.register_buffer( + "posterior_log_variance_clipped", + th.from_numpy(posterior_log_variance_clipped), + persistent=False, + ) + self.register_buffer( + "posterior_mean_coef1", + th.from_numpy(posterior_mean_coef1), + persistent=False, + ) + self.register_buffer( + "posterior_mean_coef2", + th.from_numpy(posterior_mean_coef2), + persistent=False, + ) + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + ) + variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = _extract_into_tensor( + self.log_one_minus_alphas_cumprod, t, x_start.shape + ) + return mean, variance, log_variance + + def q_sample(self, x_start, t, noise=None): + """ + Diffuse the data for a given number of diffusion steps. + In other words, sample from q(x_t | x_0). + :param x_start: the initial data batch. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :param noise: if specified, the split-out normal noise. + :return: A noisy version of x_start. + """ + if noise is None: + noise = th.randn_like(x_start) + assert noise.shape == x_start.shape + return ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def q_posterior_mean_variance(self, x_start, x_t, t): + """ + Compute the mean and variance of the diffusion posterior: + q(x_{t-1} | x_t, x_0) + """ + assert x_start.shape == x_t.shape + posterior_mean = ( + _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = _extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + assert ( + posterior_mean.shape[0] + == posterior_variance.shape[0] + == posterior_log_variance_clipped.shape[0] + == x_start.shape[0] + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + model_kwargs=None, + **ignore_kwargs, + ): + """ + Apply the model to get p(x_{t-1} | x_t), as well as a prediction of + the initial x, x_0. + :param model: the model, which takes a signal and a batch of timesteps + as input. + :param x: the [N x C x ...] tensor at time t. + :param t: a 1-D Tensor of timesteps. + :param clip_denoised: if True, clip the denoised signal into [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. Applies before + clip_denoised. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict with the following keys: + - 'mean': the model mean output. + - 'variance': the model variance output. + - 'log_variance': the log of 'variance'. + - 'pred_xstart': the prediction for x_0. + """ + if model_kwargs is None: + model_kwargs = {} + + B, C = x.shape[:2] + assert t.shape == (B,) + model_output = model(x, t, **model_kwargs) + if isinstance(model_output, tuple): + model_output, extra = model_output + else: + extra = None + + if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: + assert model_output.shape == (B, C * 2, *x.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + if self.model_var_type == ModelVarType.LEARNED: + model_log_variance = model_var_values + model_variance = th.exp(model_log_variance) + else: + min_log = _extract_into_tensor( + self.posterior_log_variance_clipped, t, x.shape + ) + max_log = _extract_into_tensor(th.log(self.betas), t, x.shape) + # The model_var_values is [-1, 1] for [min_var, max_var]. + frac = (model_var_values + 1) / 2 + model_log_variance = frac * max_log + (1 - frac) * min_log + model_variance = th.exp(model_log_variance) + else: + model_variance, model_log_variance = { + # for fixedlarge, we set the initial (log-)variance like so + # to get a better decoder log likelihood. + ModelVarType.FIXED_LARGE: ( + th.cat([self.posterior_variance[1][None], self.betas[1:]]), + th.log(th.cat([self.posterior_variance[1][None], self.betas[1:]])), + ), + ModelVarType.FIXED_SMALL: ( + self.posterior_variance, + self.posterior_log_variance_clipped, + ), + }[self.model_var_type] + model_variance = _extract_into_tensor(model_variance, t, x.shape) + model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) + + def process_xstart(x): + if denoised_fn is not None: + x = denoised_fn(x) + if clip_denoised: + return x.clamp(-1, 1) + return x + + if self.model_mean_type == ModelMeanType.PREVIOUS_X: + pred_xstart = process_xstart( + self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output) + ) + model_mean = model_output + elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: + if self.model_mean_type == ModelMeanType.START_X: + pred_xstart = process_xstart(model_output) + else: + pred_xstart = process_xstart( + self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) + ) + model_mean, _, _ = self.q_posterior_mean_variance( + x_start=pred_xstart, x_t=x, t=t + ) + else: + raise NotImplementedError(self.model_mean_type) + + assert ( + model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape + ) + return { + "mean": model_mean, + "variance": model_variance, + "log_variance": model_log_variance, + "pred_xstart": pred_xstart, + } + + def _predict_xstart_from_eps(self, x_t, t, eps): + assert x_t.shape == eps.shape + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + ) + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute the mean for the previous step, given a function cond_fn that + computes the gradient of a conditional log probability with respect to + x. In particular, cond_fn computes grad(log(p(y|x))), and we want to + condition on y. + This uses the conditioning strategy from Sohl-Dickstein et al. (2015). + """ + gradient = cond_fn(x, t, **model_kwargs) + new_mean = ( + p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() + ) + return new_mean + + def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute what the p_mean_variance output would have been, should the + model's score function be conditioned by cond_fn. + See condition_mean() for details on cond_fn. + Unlike condition_mean(), this instead uses the conditioning strategy + from Song et al (2020). + """ + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + + eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) + eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs) + + out = p_mean_var.copy() + out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) + out["mean"], _, _ = self.q_posterior_mean_variance( + x_start=out["pred_xstart"], x_t=x, t=t + ) + return out + + def p_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + ): + """ + Sample x_{t-1} from the model at the given timestep. + :param model: the model to sample from. + :param x: the current tensor at x_{t-1}. + :param t: the value of t, starting at 0 for the first diffusion step. + :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - 'sample': a random sample from the model. + - 'pred_xstart': a prediction of x_0. + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = th.randn_like(x) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + if cond_fn is not None: + out["mean"] = self.condition_mean( + cond_fn, out, x, t, model_kwargs=model_kwargs + ) + sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def p_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + ): + """ + Generate samples from the model. + :param model: the model module. + :param shape: the shape of the samples, (N, C, H, W). + :param noise: if specified, the noise from the encoder to sample. + Should be of the same shape as `shape`. + :param clip_denoised: if True, clip x_start predictions to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param device: if specified, the device to create the samples on. + If not specified, use a model parameter's device. + :param progress: if True, show a tqdm progress bar. + :return: a non-differentiable batch of samples. + """ + final = None + for sample in self.p_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + ): + final = sample + return final["sample"] + + def p_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + ): + """ + Generate samples from the model and yield intermediate samples from + each timestep of diffusion. + Arguments are the same as p_sample_loop(). + Returns a generator over dicts, where each dict is the return value of + p_sample(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for idx, i in enumerate(indices): + t = th.tensor([i] * shape[0], device=device) + with th.no_grad(): + out = self.p_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + ) + yield out + img = out["sample"] + + def p_sample_loop_progressive_for_improved_sr( + self, + model, + model_aux, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + ): + """ + Modified version of p_sample_loop_progressive for sampling from the improved sr model + """ + + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for idx, i in enumerate(indices): + t = th.tensor([i] * shape[0], device=device) + with th.no_grad(): + out = self.p_sample( + model_aux if len(indices) - 1 == idx else model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + ) + yield out + img = out["sample"] + + def ddim_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t-1} from the model using DDIM. + Same usage as p_sample(). + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + sigma = ( + eta + * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * th.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + # Equation 12. + noise = th.randn_like(x) + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_prev) + + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps + ) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def ddim_reverse_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t+1} from the model using DDIM reverse ODE. + """ + assert eta == 0.0, "Reverse ODE only for deterministic path" + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x + - out["pred_xstart"] + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) + + # Equation 12. reversed + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_next) + + th.sqrt(1 - alpha_bar_next) * eps + ) + + return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} + + def ddim_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Generate samples from the model using DDIM. + Same usage as p_sample_loop(). + """ + final = None + for sample in self.ddim_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + eta=eta, + ): + final = sample + return final["sample"] + + def ddim_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Use DDIM to sample from the model and yield intermediate samples from + each timestep of DDIM. + Same usage as p_sample_loop_progressive(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + with th.no_grad(): + out = self.ddim_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + eta=eta, + ) + yield out + img = out["sample"] + + +def _extract_into_tensor(arr, timesteps, broadcast_shape): + """ + Extract values from a 1-D numpy array for a batch of indices. + :param arr: the 1-D numpy array. + :param timesteps: a tensor of indices into the array to extract. + :param broadcast_shape: a larger shape of K dimensions with the batch + dimension equal to the length of timesteps. + :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. + """ + res = arr.to(device=timesteps.device)[timesteps].float() + while len(res.shape) < len(broadcast_shape): + res = res[..., None] + return res + th.zeros(broadcast_shape, device=timesteps.device) diff --git a/ldm/modules/karlo/kakao/modules/diffusion/respace.py b/ldm/modules/karlo/kakao/modules/diffusion/respace.py new file mode 100644 index 0000000..70c808f --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/diffusion/respace.py @@ -0,0 +1,112 @@ +# ------------------------------------------------------------------------------------ +# Adapted from Guided-Diffusion repo (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + + +import torch as th + +from .gaussian_diffusion import GaussianDiffusion + + +def space_timesteps(num_timesteps, section_counts): + """ + Create a list of timesteps to use from an original diffusion process, + given the number of timesteps we want to take from equally-sized portions + of the original process. + + For example, if there's 300 timesteps and the section counts are [10,15,20] + then the first 100 timesteps are strided to be 10 timesteps, the second 100 + are strided to be 15 timesteps, and the final 100 are strided to be 20. + + :param num_timesteps: the number of diffusion steps in the original + process to divide up. + :param section_counts: either a list of numbers, or a string containing + comma-separated numbers, indicating the step count + per section. As a special case, use "ddimN" where N + is a number of steps to use the striding from the + DDIM paper. + :return: a set of diffusion steps from the original process to use. + """ + if isinstance(section_counts, str): + if section_counts.startswith("ddim"): + desired_count = int(section_counts[len("ddim") :]) + for i in range(1, num_timesteps): + if len(range(0, num_timesteps, i)) == desired_count: + return set(range(0, num_timesteps, i)) + raise ValueError( + f"cannot create exactly {num_timesteps} steps with an integer stride" + ) + elif section_counts == "fast27": + steps = space_timesteps(num_timesteps, "10,10,3,2,2") + # Help reduce DDIM artifacts from noisiest timesteps. + steps.remove(num_timesteps - 1) + steps.add(num_timesteps - 3) + return steps + section_counts = [int(x) for x in section_counts.split(",")] + size_per = num_timesteps // len(section_counts) + extra = num_timesteps % len(section_counts) + start_idx = 0 + all_steps = [] + for i, section_count in enumerate(section_counts): + size = size_per + (1 if i < extra else 0) + if size < section_count: + raise ValueError( + f"cannot divide section of {size} steps into {section_count}" + ) + if section_count <= 1: + frac_stride = 1 + else: + frac_stride = (size - 1) / (section_count - 1) + cur_idx = 0.0 + taken_steps = [] + for _ in range(section_count): + taken_steps.append(start_idx + round(cur_idx)) + cur_idx += frac_stride + all_steps += taken_steps + start_idx += size + return set(all_steps) + + +class SpacedDiffusion(GaussianDiffusion): + """ + A diffusion process which can skip steps in a base diffusion process. + + :param use_timesteps: a collection (sequence or set) of timesteps from the + original diffusion process to retain. + :param kwargs: the kwargs to create the base diffusion process. + """ + + def __init__(self, use_timesteps, **kwargs): + self.use_timesteps = set(use_timesteps) + self.original_num_steps = len(kwargs["betas"]) + + base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa + last_alpha_cumprod = 1.0 + new_betas = [] + timestep_map = [] + for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): + if i in self.use_timesteps: + new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) + last_alpha_cumprod = alpha_cumprod + timestep_map.append(i) + kwargs["betas"] = th.tensor(new_betas).numpy() + super().__init__(**kwargs) + self.register_buffer("timestep_map", th.tensor(timestep_map), persistent=False) + + def p_mean_variance(self, model, *args, **kwargs): + return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) + + def condition_mean(self, cond_fn, *args, **kwargs): + return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) + + def condition_score(self, cond_fn, *args, **kwargs): + return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) + + def _wrap_model(self, model): + def wrapped(x, ts, **kwargs): + ts_cpu = ts.detach().to("cpu") + return model( + x, self.timestep_map[ts_cpu].to(device=ts.device, dtype=ts.dtype), **kwargs + ) + + return wrapped diff --git a/ldm/modules/karlo/kakao/modules/nn.py b/ldm/modules/karlo/kakao/modules/nn.py new file mode 100644 index 0000000..2eef3f5 --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/nn.py @@ -0,0 +1,114 @@ +# ------------------------------------------------------------------------------------ +# Adapted from Guided-Diffusion repo (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + +import math + +import torch as th +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm32(nn.GroupNorm): + def __init__(self, num_groups, num_channels, swish, eps=1e-5): + super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) + self.swish = swish + + def forward(self, x): + y = super().forward(x.float()).to(x.dtype) + if self.swish == 1.0: + y = F.silu(y) + elif self.swish: + y = y * F.sigmoid(y * float(self.swish)) + return y + + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def normalization(channels, swish=0.0): + """ + Make a standard normalization layer, with an optional swish activation. + + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(num_channels=channels, num_groups=32, swish=swish) + + +def timestep_embedding(timesteps, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = th.exp( + -math.log(max_period) + * th.arange(start=0, end=half, dtype=th.float32, device=timesteps.device) + / half + ) + args = timesteps[:, None].float() * freqs[None] + embedding = th.cat([th.cos(args), th.sin(args)], dim=-1) + if dim % 2: + embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) diff --git a/ldm/modules/karlo/kakao/modules/resample.py b/ldm/modules/karlo/kakao/modules/resample.py new file mode 100644 index 0000000..485421a --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/resample.py @@ -0,0 +1,68 @@ +# ------------------------------------------------------------------------------------ +# Modified from Guided-Diffusion (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + +from abc import abstractmethod + +import torch as th + + +def create_named_schedule_sampler(name, diffusion): + """ + Create a ScheduleSampler from a library of pre-defined samplers. + + :param name: the name of the sampler. + :param diffusion: the diffusion object to sample for. + """ + if name == "uniform": + return UniformSampler(diffusion) + else: + raise NotImplementedError(f"unknown schedule sampler: {name}") + + +class ScheduleSampler(th.nn.Module): + """ + A distribution over timesteps in the diffusion process, intended to reduce + variance of the objective. + + By default, samplers perform unbiased importance sampling, in which the + objective's mean is unchanged. + However, subclasses may override sample() to change how the resampled + terms are reweighted, allowing for actual changes in the objective. + """ + + @abstractmethod + def weights(self): + """ + Get a numpy array of weights, one per diffusion step. + + The weights needn't be normalized, but must be positive. + """ + + def sample(self, batch_size, device): + """ + Importance-sample timesteps for a batch. + + :param batch_size: the number of timesteps. + :param device: the torch device to save to. + :return: a tuple (timesteps, weights): + - timesteps: a tensor of timestep indices. + - weights: a tensor of weights to scale the resulting losses. + """ + w = self.weights() + p = w / th.sum(w) + indices = p.multinomial(batch_size, replacement=True) + weights = 1 / (len(p) * p[indices]) + return indices, weights + + +class UniformSampler(ScheduleSampler): + def __init__(self, diffusion): + super(UniformSampler, self).__init__() + self.diffusion = diffusion + self.register_buffer( + "_weights", th.ones([diffusion.num_timesteps]), persistent=False + ) + + def weights(self): + return self._weights diff --git a/ldm/modules/karlo/kakao/modules/unet.py b/ldm/modules/karlo/kakao/modules/unet.py new file mode 100644 index 0000000..c99d0b7 --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/unet.py @@ -0,0 +1,792 @@ +# ------------------------------------------------------------------------------------ +# Modified from Guided-Diffusion (https://github.com/openai/guided-diffusion) +# ------------------------------------------------------------------------------------ + +import math +from abc import abstractmethod + +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from .nn import ( + avg_pool_nd, + conv_nd, + linear, + normalization, + timestep_embedding, + zero_module, +) +from .xf import LayerNorm + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, encoder_out=None, mask=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, AttentionBlock): + x = layer(x, encoder_out, mask=mask) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=1 + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels, swish=1.0), + nn.Identity(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization( + self.out_channels, swish=0.0 if use_scale_shift_norm else 1.0 + ), + nn.SiLU() if use_scale_shift_norm else nn.Identity(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class ResBlockNoTimeEmbedding(nn.Module): + """ + A residual block without time embedding + + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + **kwargs, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + + self.in_layers = nn.Sequential( + normalization(channels, swish=1.0), + nn.Identity(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.out_layers = nn.Sequential( + normalization(self.out_channels, swish=1.0), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb=None): + """ + Apply the block to a Tensor, NOT conditioned on a timestep embedding. + + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + assert emb is None + + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + encoder_channels=None, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels, swish=0.0) + self.qkv = conv_nd(1, channels, channels * 3, 1) + self.attention = QKVAttention(self.num_heads) + + if encoder_channels is not None: + self.encoder_kv = conv_nd(1, encoder_channels, channels * 2, 1) + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x, encoder_out=None, mask=None): + b, c, *spatial = x.shape + qkv = self.qkv(self.norm(x).view(b, c, -1)) + if encoder_out is not None: + encoder_out = self.encoder_kv(encoder_out) + h = self.attention(qkv, encoder_out, mask=mask) + else: + h = self.attention(qkv) + h = self.proj_out(h) + return x + h.reshape(b, c, *spatial) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv, encoder_kv=None, mask=None): + """ + Apply QKV attention. + + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + if encoder_kv is not None: + assert encoder_kv.shape[1] == self.n_heads * ch * 2 + ek, ev = encoder_kv.reshape(bs * self.n_heads, ch * 2, -1).split(ch, dim=1) + k = th.cat([ek, k], dim=-1) + v = th.cat([ev, v], dim=-1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum("bct,bcs->bts", q * scale, k * scale) + if mask is not None: + mask = F.pad(mask, (0, length), value=0.0) + mask = ( + mask.unsqueeze(1) + .expand(-1, self.n_heads, -1) + .reshape(bs * self.n_heads, 1, -1) + ) + weight = weight + mask + weight = th.softmax(weight, dim=-1) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param clip_dim: dimension of clip feature. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param encoder_channels: use to make the dimension of query and kv same in AttentionBlock. + :param use_time_embedding: use time embedding for condition. + """ + + def __init__( + self, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + clip_dim=None, + use_checkpoint=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + use_middle_attention=True, + resblock_updown=False, + encoder_channels=None, + use_time_embedding=True, + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.clip_dim = clip_dim + self.use_checkpoint = use_checkpoint + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.use_middle_attention = use_middle_attention + self.use_time_embedding = use_time_embedding + + if self.use_time_embedding: + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.clip_dim is not None: + self.clip_emb = nn.Linear(clip_dim, time_embed_dim) + else: + time_embed_dim = None + + CustomResidualBlock = ( + ResBlock if self.use_time_embedding else ResBlockNoTimeEmbedding + ) + ch = input_ch = int(channel_mult[0] * model_channels) + self.input_blocks = nn.ModuleList( + [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] + ) + self._feature_size = ch + input_block_chans = [ch] + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + CustomResidualBlock( + ch, + time_embed_dim, + dropout, + out_channels=int(mult * model_channels), + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = int(mult * model_channels) + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + encoder_channels=encoder_channels, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + CustomResidualBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + CustomResidualBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + *( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + encoder_channels=encoder_channels, + ), + ) + if self.use_middle_attention + else tuple(), # add AttentionBlock or not + CustomResidualBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + CustomResidualBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=int(model_channels * mult), + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = int(model_channels * mult) + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=num_head_channels, + encoder_channels=encoder_channels, + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + CustomResidualBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch, swish=1.0), + nn.Identity(), + zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), + ) + + def forward(self, x, timesteps, y=None): + """ + Apply the model to an input batch. + + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.clip_dim is not None + ), "must specify y if and only if the model is clip-rep-conditional" + + hs = [] + if self.use_time_embedding: + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + if self.clip_dim is not None: + emb = emb + self.clip_emb(y) + else: + emb = None + + h = x + for module in self.input_blocks: + h = module(h, emb) + hs.append(h) + h = self.middle_block(h, emb) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb) + + return self.out(h) + + +class SuperResUNetModel(UNetModel): + """ + A UNetModel that performs super-resolution. + + Expects an extra kwarg `low_res` to condition on a low-resolution image. + Assumes that the shape of low-resolution and the input should be the same. + """ + + def __init__(self, *args, **kwargs): + if "in_channels" in kwargs: + kwargs = dict(kwargs) + kwargs["in_channels"] = kwargs["in_channels"] * 2 + else: + # Curse you, Python. Or really, just curse positional arguments :|. + args = list(args) + args[1] = args[1] * 2 + super().__init__(*args, **kwargs) + + def forward(self, x, timesteps, low_res=None, **kwargs): + _, _, new_height, new_width = x.shape + assert new_height == low_res.shape[2] and new_width == low_res.shape[3] + + x = th.cat([x, low_res], dim=1) + return super().forward(x, timesteps, **kwargs) + + +class PLMImUNet(UNetModel): + """ + A UNetModel that conditions on text with a pretrained text encoder in CLIP. + + :param text_ctx: number of text tokens to expect. + :param xf_width: width of the transformer. + :param clip_emb_mult: #extra tokens by projecting clip text feature. + :param clip_emb_type: type of condition (here, we fix clip image feature). + :param clip_emb_drop: dropout rato of clip image feature for cfg. + """ + + def __init__( + self, + text_ctx, + xf_width, + *args, + clip_emb_mult=None, + clip_emb_type="image", + clip_emb_drop=0.0, + **kwargs, + ): + self.text_ctx = text_ctx + self.xf_width = xf_width + self.clip_emb_mult = clip_emb_mult + self.clip_emb_type = clip_emb_type + self.clip_emb_drop = clip_emb_drop + + if not xf_width: + super().__init__(*args, **kwargs, encoder_channels=None) + else: + super().__init__(*args, **kwargs, encoder_channels=xf_width) + + # Project text encoded feat seq from pre-trained text encoder in CLIP + self.text_seq_proj = nn.Sequential( + nn.Linear(self.clip_dim, xf_width), + LayerNorm(xf_width), + ) + # Project CLIP text feat + self.text_feat_proj = nn.Linear(self.clip_dim, self.model_channels * 4) + + assert clip_emb_mult is not None + assert clip_emb_type == "image" + assert self.clip_dim is not None, "CLIP representation dim should be specified" + + self.clip_tok_proj = nn.Linear( + self.clip_dim, self.xf_width * self.clip_emb_mult + ) + if self.clip_emb_drop > 0: + self.cf_param = nn.Parameter(th.empty(self.clip_dim, dtype=th.float32)) + + def proc_clip_emb_drop(self, feat): + if self.clip_emb_drop > 0: + bsz, feat_dim = feat.shape + assert ( + feat_dim == self.clip_dim + ), f"CLIP input dim: {feat_dim}, model CLIP dim: {self.clip_dim}" + drop_idx = th.rand((bsz,), device=feat.device) < self.clip_emb_drop + feat = th.where( + drop_idx[..., None], self.cf_param[None].type_as(feat), feat + ) + return feat + + def forward( + self, x, timesteps, txt_feat=None, txt_feat_seq=None, mask=None, y=None + ): + bsz = x.shape[0] + hs = [] + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + emb = emb + self.clip_emb(y) + + xf_out = self.text_seq_proj(txt_feat_seq) + xf_out = xf_out.permute(0, 2, 1) + emb = emb + self.text_feat_proj(txt_feat) + xf_out = th.cat( + [ + self.clip_tok_proj(y).reshape(bsz, -1, self.clip_emb_mult), + xf_out, + ], + dim=2, + ) + mask = F.pad(mask, (self.clip_emb_mult, 0), value=True) + mask = th.where(mask, 0.0, float("-inf")) + + h = x + for module in self.input_blocks: + h = module(h, emb, xf_out, mask=mask) + hs.append(h) + h = self.middle_block(h, emb, xf_out, mask=mask) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, xf_out, mask=mask) + h = self.out(h) + + return h diff --git a/ldm/modules/karlo/kakao/modules/xf.py b/ldm/modules/karlo/kakao/modules/xf.py new file mode 100644 index 0000000..66d7d4a --- /dev/null +++ b/ldm/modules/karlo/kakao/modules/xf.py @@ -0,0 +1,231 @@ +# ------------------------------------------------------------------------------------ +# Adapted from the repos below: +# (a) Guided-Diffusion (https://github.com/openai/guided-diffusion) +# (b) CLIP ViT (https://github.com/openai/CLIP/) +# ------------------------------------------------------------------------------------ + +import math + +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from .nn import timestep_embedding + + +def convert_module_to_f16(param): + """ + Convert primitive modules to float16. + """ + if isinstance(param, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): + param.weight.data = param.weight.data.half() + if param.bias is not None: + param.bias.data = param.bias.data.half() + + +class LayerNorm(nn.LayerNorm): + """ + Implementation that supports fp16 inputs but fp32 gains/biases. + """ + + def forward(self, x: th.Tensor): + return super().forward(x.float()).to(x.dtype) + + +class MultiheadAttention(nn.Module): + def __init__(self, n_ctx, width, heads): + super().__init__() + self.n_ctx = n_ctx + self.width = width + self.heads = heads + self.c_qkv = nn.Linear(width, width * 3) + self.c_proj = nn.Linear(width, width) + self.attention = QKVMultiheadAttention(heads, n_ctx) + + def forward(self, x, mask=None): + x = self.c_qkv(x) + x = self.attention(x, mask=mask) + x = self.c_proj(x) + return x + + +class MLP(nn.Module): + def __init__(self, width): + super().__init__() + self.width = width + self.c_fc = nn.Linear(width, width * 4) + self.c_proj = nn.Linear(width * 4, width) + self.gelu = nn.GELU() + + def forward(self, x): + return self.c_proj(self.gelu(self.c_fc(x))) + + +class QKVMultiheadAttention(nn.Module): + def __init__(self, n_heads: int, n_ctx: int): + super().__init__() + self.n_heads = n_heads + self.n_ctx = n_ctx + + def forward(self, qkv, mask=None): + bs, n_ctx, width = qkv.shape + attn_ch = width // self.n_heads // 3 + scale = 1 / math.sqrt(math.sqrt(attn_ch)) + qkv = qkv.view(bs, n_ctx, self.n_heads, -1) + q, k, v = th.split(qkv, attn_ch, dim=-1) + weight = th.einsum("bthc,bshc->bhts", q * scale, k * scale) + wdtype = weight.dtype + if mask is not None: + weight = weight + mask[:, None, ...] + weight = th.softmax(weight, dim=-1).type(wdtype) + return th.einsum("bhts,bshc->bthc", weight, v).reshape(bs, n_ctx, -1) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, + n_ctx: int, + width: int, + heads: int, + ): + super().__init__() + + self.attn = MultiheadAttention( + n_ctx, + width, + heads, + ) + self.ln_1 = LayerNorm(width) + self.mlp = MLP(width) + self.ln_2 = LayerNorm(width) + + def forward(self, x, mask=None): + x = x + self.attn(self.ln_1(x), mask=mask) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__( + self, + n_ctx: int, + width: int, + layers: int, + heads: int, + ): + super().__init__() + self.n_ctx = n_ctx + self.width = width + self.layers = layers + self.resblocks = nn.ModuleList( + [ + ResidualAttentionBlock( + n_ctx, + width, + heads, + ) + for _ in range(layers) + ] + ) + + def forward(self, x, mask=None): + for block in self.resblocks: + x = block(x, mask=mask) + return x + + +class PriorTransformer(nn.Module): + """ + A Causal Transformer that conditions on CLIP text embedding, text. + + :param text_ctx: number of text tokens to expect. + :param xf_width: width of the transformer. + :param xf_layers: depth of the transformer. + :param xf_heads: heads in the transformer. + :param xf_final_ln: use a LayerNorm after the output layer. + :param clip_dim: dimension of clip feature. + """ + + def __init__( + self, + text_ctx, + xf_width, + xf_layers, + xf_heads, + xf_final_ln, + clip_dim, + ): + super().__init__() + + self.text_ctx = text_ctx + self.xf_width = xf_width + self.xf_layers = xf_layers + self.xf_heads = xf_heads + self.clip_dim = clip_dim + self.ext_len = 4 + + self.time_embed = nn.Sequential( + nn.Linear(xf_width, xf_width), + nn.SiLU(), + nn.Linear(xf_width, xf_width), + ) + self.text_enc_proj = nn.Linear(clip_dim, xf_width) + self.text_emb_proj = nn.Linear(clip_dim, xf_width) + self.clip_img_proj = nn.Linear(clip_dim, xf_width) + self.out_proj = nn.Linear(xf_width, clip_dim) + self.transformer = Transformer( + text_ctx + self.ext_len, + xf_width, + xf_layers, + xf_heads, + ) + if xf_final_ln: + self.final_ln = LayerNorm(xf_width) + else: + self.final_ln = None + + self.positional_embedding = nn.Parameter( + th.empty(1, text_ctx + self.ext_len, xf_width) + ) + self.prd_emb = nn.Parameter(th.randn((1, 1, xf_width))) + + nn.init.normal_(self.prd_emb, std=0.01) + nn.init.normal_(self.positional_embedding, std=0.01) + + def forward( + self, + x, + timesteps, + text_emb=None, + text_enc=None, + mask=None, + causal_mask=None, + ): + bsz = x.shape[0] + mask = F.pad(mask, (0, self.ext_len), value=True) + + t_emb = self.time_embed(timestep_embedding(timesteps, self.xf_width)) + text_enc = self.text_enc_proj(text_enc) + text_emb = self.text_emb_proj(text_emb) + x = self.clip_img_proj(x) + + input_seq = [ + text_enc, + text_emb[:, None, :], + t_emb[:, None, :], + x[:, None, :], + self.prd_emb.to(x.dtype).expand(bsz, -1, -1), + ] + input = th.cat(input_seq, dim=1) + input = input + self.positional_embedding.to(input.dtype) + + mask = th.where(mask, 0.0, float("-inf")) + mask = (mask[:, None, :] + causal_mask).to(input.dtype) + + out = self.transformer(input, mask=mask) + if self.final_ln is not None: + out = self.final_ln(out) + + out = self.out_proj(out[:, -1]) + + return out diff --git a/ldm/modules/karlo/kakao/sampler.py b/ldm/modules/karlo/kakao/sampler.py new file mode 100644 index 0000000..b56bf2f --- /dev/null +++ b/ldm/modules/karlo/kakao/sampler.py @@ -0,0 +1,272 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. + +# source: https://github.com/kakaobrain/karlo/blob/3c68a50a16d76b48a15c181d1c5a5e0879a90f85/karlo/sampler/t2i.py#L15 +# ------------------------------------------------------------------------------------ + +from typing import Iterator + +import torch +import torchvision.transforms.functional as TVF +from torchvision.transforms import InterpolationMode + +from .template import BaseSampler, CKPT_PATH + + +class T2ISampler(BaseSampler): + """ + A sampler for text-to-image generation. + :param root_dir: directory for model checkpoints. + :param sampling_type: ["default", "fast"] + """ + + def __init__( + self, + root_dir: str, + sampling_type: str = "default", + ): + super().__init__(root_dir, sampling_type) + + @classmethod + def from_pretrained( + cls, + root_dir: str, + clip_model_path: str, + clip_stat_path: str, + sampling_type: str = "default", + ): + + model = cls( + root_dir=root_dir, + sampling_type=sampling_type, + ) + model.load_clip(clip_model_path) + model.load_prior( + f"{CKPT_PATH['prior']}", + clip_stat_path=clip_stat_path, + prior_config="configs/karlo/prior_1B_vit_l.yaml" + ) + model.load_decoder(f"{CKPT_PATH['decoder']}", decoder_config="configs/karlo/decoder_900M_vit_l.yaml") + model.load_sr_64_256(CKPT_PATH["sr_256"], sr_config="configs/karlo/improved_sr_64_256_1.4B.yaml") + return model + + def preprocess( + self, + prompt: str, + bsz: int, + ): + """Setup prompts & cfg scales""" + prompts_batch = [prompt for _ in range(bsz)] + + prior_cf_scales_batch = [self._prior_cf_scale] * len(prompts_batch) + prior_cf_scales_batch = torch.tensor(prior_cf_scales_batch, device="cuda") + + decoder_cf_scales_batch = [self._decoder_cf_scale] * len(prompts_batch) + decoder_cf_scales_batch = torch.tensor(decoder_cf_scales_batch, device="cuda") + + """ Get CLIP text feature """ + clip_model = self._clip + tokenizer = self._tokenizer + max_txt_length = self._prior.model.text_ctx + + tok, mask = tokenizer.padded_tokens_and_mask(prompts_batch, max_txt_length) + cf_token, cf_mask = tokenizer.padded_tokens_and_mask([""], max_txt_length) + if not (cf_token.shape == tok.shape): + cf_token = cf_token.expand(tok.shape[0], -1) + cf_mask = cf_mask.expand(tok.shape[0], -1) + + tok = torch.cat([tok, cf_token], dim=0) + mask = torch.cat([mask, cf_mask], dim=0) + + tok, mask = tok.to(device="cuda"), mask.to(device="cuda") + txt_feat, txt_feat_seq = clip_model.encode_text(tok) + + return ( + prompts_batch, + prior_cf_scales_batch, + decoder_cf_scales_batch, + txt_feat, + txt_feat_seq, + tok, + mask, + ) + + def __call__( + self, + prompt: str, + bsz: int, + progressive_mode=None, + ) -> Iterator[torch.Tensor]: + assert progressive_mode in ("loop", "stage", "final") + with torch.no_grad(), torch.cuda.amp.autocast(): + ( + prompts_batch, + prior_cf_scales_batch, + decoder_cf_scales_batch, + txt_feat, + txt_feat_seq, + tok, + mask, + ) = self.preprocess( + prompt, + bsz, + ) + + """ Transform CLIP text feature into image feature """ + img_feat = self._prior( + txt_feat, + txt_feat_seq, + mask, + prior_cf_scales_batch, + timestep_respacing=self._prior_sm, + ) + + """ Generate 64x64px images """ + images_64_outputs = self._decoder( + txt_feat, + txt_feat_seq, + tok, + mask, + img_feat, + cf_guidance_scales=decoder_cf_scales_batch, + timestep_respacing=self._decoder_sm, + ) + + images_64 = None + for k, out in enumerate(images_64_outputs): + images_64 = out + if progressive_mode == "loop": + yield torch.clamp(out * 0.5 + 0.5, 0.0, 1.0) + if progressive_mode == "stage": + yield torch.clamp(out * 0.5 + 0.5, 0.0, 1.0) + + images_64 = torch.clamp(images_64, -1, 1) + + """ Upsample 64x64 to 256x256 """ + images_256 = TVF.resize( + images_64, + [256, 256], + interpolation=InterpolationMode.BICUBIC, + antialias=True, + ) + images_256_outputs = self._sr_64_256( + images_256, timestep_respacing=self._sr_sm + ) + + for k, out in enumerate(images_256_outputs): + images_256 = out + if progressive_mode == "loop": + yield torch.clamp(out * 0.5 + 0.5, 0.0, 1.0) + if progressive_mode == "stage": + yield torch.clamp(out * 0.5 + 0.5, 0.0, 1.0) + + yield torch.clamp(images_256 * 0.5 + 0.5, 0.0, 1.0) + + +class PriorSampler(BaseSampler): + """ + A sampler for text-to-image generation, but only the prior. + :param root_dir: directory for model checkpoints. + :param sampling_type: ["default", "fast"] + """ + + def __init__( + self, + root_dir: str, + sampling_type: str = "default", + ): + super().__init__(root_dir, sampling_type) + + @classmethod + def from_pretrained( + cls, + root_dir: str, + clip_model_path: str, + clip_stat_path: str, + sampling_type: str = "default", + ): + model = cls( + root_dir=root_dir, + sampling_type=sampling_type, + ) + model.load_clip(clip_model_path) + model.load_prior( + f"{CKPT_PATH['prior']}", + clip_stat_path=clip_stat_path, + prior_config="configs/karlo/prior_1B_vit_l.yaml" + ) + return model + + def preprocess( + self, + prompt: str, + bsz: int, + ): + """Setup prompts & cfg scales""" + prompts_batch = [prompt for _ in range(bsz)] + + prior_cf_scales_batch = [self._prior_cf_scale] * len(prompts_batch) + prior_cf_scales_batch = torch.tensor(prior_cf_scales_batch, device="cuda") + + decoder_cf_scales_batch = [self._decoder_cf_scale] * len(prompts_batch) + decoder_cf_scales_batch = torch.tensor(decoder_cf_scales_batch, device="cuda") + + """ Get CLIP text feature """ + clip_model = self._clip + tokenizer = self._tokenizer + max_txt_length = self._prior.model.text_ctx + + tok, mask = tokenizer.padded_tokens_and_mask(prompts_batch, max_txt_length) + cf_token, cf_mask = tokenizer.padded_tokens_and_mask([""], max_txt_length) + if not (cf_token.shape == tok.shape): + cf_token = cf_token.expand(tok.shape[0], -1) + cf_mask = cf_mask.expand(tok.shape[0], -1) + + tok = torch.cat([tok, cf_token], dim=0) + mask = torch.cat([mask, cf_mask], dim=0) + + tok, mask = tok.to(device="cuda"), mask.to(device="cuda") + txt_feat, txt_feat_seq = clip_model.encode_text(tok) + + return ( + prompts_batch, + prior_cf_scales_batch, + decoder_cf_scales_batch, + txt_feat, + txt_feat_seq, + tok, + mask, + ) + + def __call__( + self, + prompt: str, + bsz: int, + progressive_mode=None, + ) -> Iterator[torch.Tensor]: + assert progressive_mode in ("loop", "stage", "final") + with torch.no_grad(), torch.cuda.amp.autocast(): + ( + prompts_batch, + prior_cf_scales_batch, + decoder_cf_scales_batch, + txt_feat, + txt_feat_seq, + tok, + mask, + ) = self.preprocess( + prompt, + bsz, + ) + + """ Transform CLIP text feature into image feature """ + img_feat = self._prior( + txt_feat, + txt_feat_seq, + mask, + prior_cf_scales_batch, + timestep_respacing=self._prior_sm, + ) + + yield img_feat diff --git a/ldm/modules/karlo/kakao/template.py b/ldm/modules/karlo/kakao/template.py new file mode 100644 index 0000000..949e80e --- /dev/null +++ b/ldm/modules/karlo/kakao/template.py @@ -0,0 +1,141 @@ +# ------------------------------------------------------------------------------------ +# Karlo-v1.0.alpha +# Copyright (c) 2022 KakaoBrain. All Rights Reserved. +# ------------------------------------------------------------------------------------ + +import os +import logging +import torch + +from omegaconf import OmegaConf + +from ldm.modules.karlo.kakao.models.clip import CustomizedCLIP, CustomizedTokenizer +from ldm.modules.karlo.kakao.models.prior_model import PriorDiffusionModel +from ldm.modules.karlo.kakao.models.decoder_model import Text2ImProgressiveModel +from ldm.modules.karlo.kakao.models.sr_64_256 import ImprovedSupRes64to256ProgressiveModel + + +SAMPLING_CONF = { + "default": { + "prior_sm": "25", + "prior_n_samples": 1, + "prior_cf_scale": 4.0, + "decoder_sm": "50", + "decoder_cf_scale": 8.0, + "sr_sm": "7", + }, + "fast": { + "prior_sm": "25", + "prior_n_samples": 1, + "prior_cf_scale": 4.0, + "decoder_sm": "25", + "decoder_cf_scale": 8.0, + "sr_sm": "7", + }, +} + +CKPT_PATH = { + "prior": "prior-ckpt-step=01000000-of-01000000.ckpt", + "decoder": "decoder-ckpt-step=01000000-of-01000000.ckpt", + "sr_256": "improved-sr-ckpt-step=1.2M.ckpt", +} + + +class BaseSampler: + _PRIOR_CLASS = PriorDiffusionModel + _DECODER_CLASS = Text2ImProgressiveModel + _SR256_CLASS = ImprovedSupRes64to256ProgressiveModel + + def __init__( + self, + root_dir: str, + sampling_type: str = "fast", + ): + self._root_dir = root_dir + + sampling_type = SAMPLING_CONF[sampling_type] + self._prior_sm = sampling_type["prior_sm"] + self._prior_n_samples = sampling_type["prior_n_samples"] + self._prior_cf_scale = sampling_type["prior_cf_scale"] + + assert self._prior_n_samples == 1 + + self._decoder_sm = sampling_type["decoder_sm"] + self._decoder_cf_scale = sampling_type["decoder_cf_scale"] + + self._sr_sm = sampling_type["sr_sm"] + + def __repr__(self): + line = "" + line += f"Prior, sampling method: {self._prior_sm}, cf_scale: {self._prior_cf_scale}\n" + line += f"Decoder, sampling method: {self._decoder_sm}, cf_scale: {self._decoder_cf_scale}\n" + line += f"SR(64->256), sampling method: {self._sr_sm}" + + return line + + def load_clip(self, clip_path: str): + clip = CustomizedCLIP.load_from_checkpoint( + os.path.join(self._root_dir, clip_path) + ) + clip = torch.jit.script(clip) + clip.cuda() + clip.eval() + + self._clip = clip + self._tokenizer = CustomizedTokenizer() + + def load_prior( + self, + ckpt_path: str, + clip_stat_path: str, + prior_config: str = "configs/prior_1B_vit_l.yaml" + ): + logging.info(f"Loading prior: {ckpt_path}") + + config = OmegaConf.load(prior_config) + clip_mean, clip_std = torch.load( + os.path.join(self._root_dir, clip_stat_path), map_location="cpu" + ) + + prior = self._PRIOR_CLASS.load_from_checkpoint( + config, + self._tokenizer, + clip_mean, + clip_std, + os.path.join(self._root_dir, ckpt_path), + strict=True, + ) + prior.cuda() + prior.eval() + logging.info("done.") + + self._prior = prior + + def load_decoder(self, ckpt_path: str, decoder_config: str = "configs/decoder_900M_vit_l.yaml"): + logging.info(f"Loading decoder: {ckpt_path}") + + config = OmegaConf.load(decoder_config) + decoder = self._DECODER_CLASS.load_from_checkpoint( + config, + self._tokenizer, + os.path.join(self._root_dir, ckpt_path), + strict=True, + ) + decoder.cuda() + decoder.eval() + logging.info("done.") + + self._decoder = decoder + + def load_sr_64_256(self, ckpt_path: str, sr_config: str = "configs/improved_sr_64_256_1.4B.yaml"): + logging.info(f"Loading SR(64->256): {ckpt_path}") + + config = OmegaConf.load(sr_config) + sr = self._SR256_CLASS.load_from_checkpoint( + config, os.path.join(self._root_dir, ckpt_path), strict=True + ) + sr.cuda() + sr.eval() + logging.info("done.") + + self._sr_64_256 = sr \ No newline at end of file diff --git a/scripts/streamlit/stablekarlo.py b/scripts/streamlit/stablekarlo.py new file mode 100644 index 0000000..e57500c --- /dev/null +++ b/scripts/streamlit/stablekarlo.py @@ -0,0 +1,381 @@ +import importlib +import streamlit as st +import torch +import cv2 +import numpy as np +import PIL +from omegaconf import OmegaConf +from PIL import Image +from tqdm import trange +import io, os +from torch import autocast +from einops import rearrange, repeat +from torchvision.utils import make_grid +from pytorch_lightning import seed_everything +from contextlib import nullcontext + +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler +from ldm.models.diffusion.dpm_solver import DPMSolverSampler + + +torch.set_grad_enabled(False) + +PROMPTS_ROOT = "scripts/prompts/" +SAVE_PATH = "outputs/demo/stable-karlo/" + +VERSION2SPECS = { + "Stable Karlo": {"H": 768, "W": 768, "C": 4, "f": 8}, + "Full Karlo": {} +} + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + importlib.invalidate_caches() + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def instantiate_from_config(config): + if not "target" in config: + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_interactive_image(): + image = st.file_uploader("Input", type=["jpg", "JPEG", "png"]) + if image is not None: + image = Image.open(image) + if not image.mode == "RGB": + image = image.convert("RGB") + return image + + +def load_img(display=True): + image = get_interactive_image() + if display: + st.image(image) + w, h = image.size + print(f"loaded input image of size ({w}, {h})") + w, h = map(lambda x: x - x % 64, (w, h)) + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return 2. * image - 1. + + +def get_init_img(batch_size=1): + init_image = load_img().cuda() + init_image = repeat(init_image, '1 ... -> b ...', b=batch_size) + return init_image + + +def sample( + model, + prompt, + n_runs=3, + n_samples=2, + H=512, + W=512, + C=4, + f=8, + scale=10.0, + ddim_steps=50, + ddim_eta=0.0, + callback=None, + skip_single_save=False, + save_grid=True, + ucg_schedule=None, + negative_prompt="", + adm_cond=None, + adm_uc=None, + use_full_precision=False, + only_adm_cond=False +): + batch_size = n_samples + precision_scope = autocast if not use_full_precision else nullcontext + if use_full_precision: st.warning(f"Sampling {model.__class__.__name__} at full precision.") + if isinstance(prompt, str): + prompt = [prompt] + prompts = batch_size * prompt + + outputs = st.empty() + + with precision_scope("cuda"): + with model.ema_scope(): + all_samples = list() + for n in trange(n_runs, desc="Sampling"): + shape = [C, H // f, W // f] + if not only_adm_cond: + uc = None + if scale != 1.0: + uc = model.get_learned_conditioning(batch_size * [negative_prompt]) + if isinstance(prompts, tuple): + prompts = list(prompts) + c = model.get_learned_conditioning(prompts) + if isinstance(model, Txt2ImgDiffusionWithPooledInput): + c, uc = c[0], uc[0] + + if adm_cond is not None: + if adm_cond.shape[0] == 1: + adm_cond = repeat(adm_cond, '1 ... -> b ...', b=batch_size) + if adm_uc is None: + st.warning("Not guiding via c_adm") + adm_uc = adm_cond + else: + if adm_uc.shape[0] == 1: + adm_uc = repeat(adm_uc, '1 ... -> b ...', b=batch_size) + if not only_adm_cond: + c = {"c_crossattn": [c], "c_adm": adm_cond} + uc = {"c_crossattn": [uc], "c_adm": adm_uc} + else: + c = adm_cond + uc = adm_uc + samples_ddim, _ = sampler.sample(S=ddim_steps, + conditioning=c, + batch_size=batch_size, + shape=shape, + verbose=False, + unconditional_guidance_scale=scale, + unconditional_conditioning=uc, + eta=ddim_eta, + x_T=None, + callback=callback, + ucg_schedule=ucg_schedule + ) + + x_samples = model.decode_first_stage(samples_ddim) + x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) + + if not skip_single_save: + base_count = len(os.listdir(os.path.join(SAVE_PATH, "samples"))) + for x_sample in x_samples: + x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') + Image.fromarray(x_sample.astype(np.uint8)).save( + os.path.join(SAVE_PATH, "samples", f"{base_count:09}.png")) + base_count += 1 + + all_samples.append(x_samples) + + # get grid of all samples + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n h) (b w) c') + outputs.image(grid.cpu().numpy()) + + # additionally, save grid + grid = Image.fromarray((255. * grid.cpu().numpy()).astype(np.uint8)) + if save_grid: + grid_count = len(os.listdir(SAVE_PATH)) - 1 + grid.save(os.path.join(SAVE_PATH, f'grid-{grid_count:06}.png')) + + return x_samples + + +def make_oscillating_guidance_schedule(num_steps, max_weight=15., min_weight=1.): + schedule = list() + for i in range(num_steps): + if float(i / num_steps) < 0.1: + schedule.append(max_weight) + elif i % 2 == 0: + schedule.append(min_weight) + else: + schedule.append(max_weight) + print(f"OSCILLATING GUIDANCE SCHEDULE: \n {schedule}") + return schedule + + +def torch2np(x): + x = ((x + 1.0) * 127.5).clamp(0, 255).to(dtype=torch.uint8) + x = x.permute(0, 2, 3, 1).detach().cpu().numpy() + return x + + +@st.cache(allow_output_mutation=True, suppress_st_warning=True) +def init(version="Stable Karlo", load_karlo_prior=False): + state = dict() + if not "model" in state: + if version == "Stable Karlo": + config = "configs/stable-diffusion/v2-1-stable-karlo-inference.yaml" + ckpt = "checkpoints/v2-1-stable-unclip-ft.ckpt" + + elif version == "Full Karlo": + from ldm.modules.karlo.kakao.sampler import T2ISampler + st.info("Loading full KARLO..") + karlo = T2ISampler.from_pretrained( + root_dir="checkpoints/karlo_models", + clip_model_path="ViT-L-14.pt", + clip_stat_path="ViT-L-14_stats.th", + sampling_type="default", + ) + state["karlo_prior"] = karlo + state["msg"] = "loaded full Karlo" + return state + else: + raise ValueError(f"version {version} unknown!") + + config = OmegaConf.load(config) + model, msg = load_model_from_config(config, ckpt, vae_sd=None) + state["msg"] = msg + + if load_karlo_prior: + from ldm.modules.karlo.kakao.sampler import PriorSampler + st.info("Loading KARLO CLIP prior...") + karlo_prior = PriorSampler.from_pretrained( + root_dir="/fsx/robin/checkpoints/karlo_models", + clip_model_path="ViT-L-14.pt", + clip_stat_path="ViT-L-14_stats.th", + sampling_type="default", + ) + state["karlo_prior"] = karlo_prior + state["model"] = model + state["ckpt"] = ckpt + state["config"] = config + return state + + +def load_model_from_config(config, ckpt, verbose=False, vae_sd=None): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + msg = None + if "global_step" in pl_sd: + msg = f"This is global step {pl_sd['global_step']}. " + if "model_ema.num_updates" in pl_sd["state_dict"]: + msg += f"And we got {pl_sd['state_dict']['model_ema.num_updates']} EMA updates." + global_step = pl_sd.get("global_step", "?") + sd = pl_sd["state_dict"] + if vae_sd is not None: + for k in sd.keys(): + if "first_stage" in k: + sd[k] = vae_sd[k[len("first_stage_model."):]] + + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + model.cuda() + model.eval() + print(f"Loaded global step {global_step}") + return model, msg + + +if __name__ == "__main__": + st.title("Stable Karlo") + mode = "txt2img" + version = st.selectbox("Model Version", list(VERSION2SPECS.keys()), 0) + use_karlo = st.checkbox("Use KARLO prior", False) + state = init(version=version, vae_version=vae_version, load_karlo_prior=use_karlo) + st.info(state["msg"]) + prompt = st.text_input("Prompt", "a professional photograph of an astronaut riding a horse") + negative_prompt = st.text_input("Negative Prompt", "") + scale = st.number_input("cfg-scale", value=10., min_value=-100., max_value=100.) + number_rows = st.number_input("num rows", value=2, min_value=1, max_value=10) + number_cols = st.number_input("num cols", value=2, min_value=1, max_value=10) + default_steps = 25 + steps = st.sidebar.number_input("steps", value=default_steps, min_value=1, max_value=1000) + eta = st.sidebar.number_input("eta (DDIM)", value=0., min_value=0., max_value=1.) + force_full_precision = st.sidebar.checkbox("Force FP32", False) + if version != "Full Karlo": + H = st.sidebar.number_input("H", value=VERSION2SPECS[version]["H"], min_value=64, max_value=2048) + W = st.sidebar.number_input("W", value=VERSION2SPECS[version]["W"], min_value=64, max_value=2048) + C = VERSION2SPECS[version]["C"] + f = VERSION2SPECS[version]["f"] + + SAVE_PATH = os.path.join(SAVE_PATH, version + "_" + vae_version + "-decoder") + os.makedirs(os.path.join(SAVE_PATH, "samples"), exist_ok=True) + + seed = st.sidebar.number_input("seed", value=42, min_value=0, max_value=int(1e9)) + seed_everything(seed) + + ucg_schedule = None + sampler = st.sidebar.selectbox("Sampler", ["DDIM", "PLMS", "DPM"], 2) + if version == "Full Karlo": + pass + else: + if sampler == "PLMS": + st.warning("NOTE: Some models (such as v-pred) currently only support DDIM/DPM sampling here") + sampler = PLMSSampler(state["model"]) + elif sampler == "DPM": + st.warning("NOTE: Using DPM sampler with default sampling parameters (DPM-2)") + sampler = DPMSolverSampler(state["model"]) + elif sampler == "DDIM": + sampler = DDIMSampler(state["model"]) + if st.checkbox("Try oscillating guidance?", False): + ucg_schedule = make_oscillating_guidance_schedule(num_steps=steps, max_weight=scale, min_weight=1.) + else: + raise ValueError(f"unknown sampler {sampler}!") + + adm_cond, adm_uc = None, None + if use_karlo: + # uses the prior + karlo_sampler = state["karlo_prior"] + with torch.no_grad(): + karlo_prediction = iter( + karlo_sampler( + prompt=prompt, + bsz=number_cols, + progressive_mode="final", + ) + ).__next__() + adm_cond = karlo_prediction + adm_uc = torch.zeros_like(karlo_prediction) + + else: + init_img = get_init_img(batch_size=number_cols) + with torch.no_grad(): + adm_cond = state["model"].embedder(init_img) + adm_uc = torch.zeros_like(adm_cond) + + if st.button("Sample"): + print("running prompt:", prompt) + st.text("Sampling") + t_progress = st.progress(0) + result = st.empty() + + def t_callback(t): + t_progress.progress(min((t + 1) / steps, 1.)) + + if version == "KARLO": + outputs = st.empty() + karlo_sampler = state["karlo_prior"] + all_samples = list() + with torch.no_grad(): + for _ in range(number_rows): + karlo_prediction = iter( + karlo_sampler( + prompt=prompt, + bsz=number_cols, + progressive_mode="final", + ) + ).__next__() + all_samples.append(karlo_prediction) + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n h) (b w) c') + outputs.image(grid.cpu().numpy()) + + else: + samples = sample( + state["model"], + prompt, + n_runs=number_rows, + n_samples=number_cols, + H=H, W=W, C=C, f=f, + scale=scale, + ddim_steps=steps, + ddim_eta=eta, + callback=t_callback, + ucg_schedule=ucg_schedule, + negative_prompt=negative_prompt, + adm_cond=adm_cond, adm_uc=adm_uc, + use_full_precision=force_full_precision, + only_adm_cond=False + ) + \ No newline at end of file