From ab34c16610e0d4f05ad98dfee5ad0cddec6230c8 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Mon, 4 Dec 2023 14:09:51 +0100 Subject: [PATCH 001/153] Fix AMD documentation (#1307) As per title --- assets/architecture.jpg | Bin 342347 -> 0 bytes assets/architecture.png | Bin 0 -> 952555 bytes docs/source/quicktour.md | 10 +++++++--- docs/source/supported_models.md | 6 +++++- 4 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 assets/architecture.jpg create mode 100644 assets/architecture.png diff --git a/assets/architecture.jpg b/assets/architecture.jpg deleted file mode 100644 index c4a511c9db7a690b991e95e36aab4e1a5b5fef57..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 342347 zcmeFa2UJsUwmy6?6lq3OinOSpsE8hHx%KXQ%pOuw`m3==42m5|@b`CBm7YFA7 zPIh)~9_|Csg9mvIa&R3weCXg|@b`m%T!iV5S3;QggD*VD$<7J>?_YT^>Hwbo%r#ti z2on#mkB14u!^CI;VBqIuVfwcN_3p(`U}g z$tx%-UAnAsO;bx-M_2FWEtA`(ckY@!d}L#5XYb(X`NYfH$JfvQ`HSF?(6I2Aaq+Jc z5|iG%P0q~9&Uv5v;bUG=aY<=e`PYidhQ_Ammez0I+j{%@2L^|J4v$PuP5+*mots}+ zB(864ZjrXhJG+03iwS`I`>??8|30w)HZE`ynD#M)Q_A+oxS00&fd3#o%q+(*vL3o( z#AfY&_=L=}{YS38$|$U77n3z6@ILV9;ouXOn>V4~G06~=M*YbZW5q{lnd(~z#M81fnXvR}?B9}3>T zWnA-VQ>Y%3kt}zFsl|!`)LSqBTPIyE?^-@`xIdeA#P#A^@a?f)OmCpfECUE?pfX#L zx5;kP+YA83d6I6*8hD%mgf>#43?S2b8bg`wVgNCQObh@TN`^6jYmKe&?G6kB5KV{B zVNe2`9v&KvUQ2<~Rn^(3YOo13?J!iB0pJzTRR2II840gr0K2C|FEM~xX9hst0$>C( z`WXY*ysqNO0Fc8BfVc?2CkYGy!T{#q_|DOHiW$IkKY(tWq_Z)Ao(se-+J+qiX#D`} zeXzY5+goFMyK>K5>>1cSNZP~7zqxk-nFd{?MkI&CHlAC>H;eXWd6WS-y##PB41fhq zOZk1jT#bz!bHM*g-|`(OcGnWE2P|%)$!C3*UwvGmxZD$m|iCyy{~3}%6gRF+nVI%lby zjT9yO<=#~m)=EAVs+R9tG0*NXkDwJDVY-JD2iJHu(n}5fQbm7i>_p4l+UG3nsd%QY z?Sf-AZq#6Pp;`g(6iw>2fjNp>Q%sbg{Oh9B4Ychr;)8P9j^A|^$^>1GX)zf}JtRAk zkbave(c~uRcwO17v>@;BDwnZv!2aECwx>B@G;$IBsEWnNXEMOkSh9c2$@8Nd_Vvxp z^aE+FboOluY+<1Y_M9(fP3Fl`4aGZ_w z?(te727p5_faX82fXje#2HnF=z|(fo^s`{(@xX0ZpC0BG!T{F#z#!um7*HHWwOMUy zAQiz-2aI4|f)R{B3dtA@bc7~xluZUeH36fOCVWv6?I4~TgxFjLF!e{&5ps(58aE$8 zcB9ka41gC5bS^3`gTuL|*ofX<0Hc~bFoN0pOndXRx1RR)-JUJjvzdF?vWLO{Uw#2Z zoEC<*Vb>CxK!=7>^3O5=iF6J{_!sTv@FzJoWXboZRgqPr-@0atxeVEqKceg9;g2M# zw3NveItH^t8gjb2?UoYqPKDOjObc$ideepUk8(m7)ojbB~(E-e~8k%?i^0{t6Hwc6UU@)>KwTEwFe z1*0XF`fzjFSnjAC@SOA$^!>t-`<2zs_!_U$JE2KGh>F1@N-tydBis+7x68|?)kSLv zeRR&llgfR(m}Y#iV00n_@LDSoN~ny)++?mg%GUFX0h|SPM<=aT&=e)4ZJZ=xAJwF1 zw%@kSLSbj5h4_X6a5O7v#m0~nq5c5r@?Ik1AC!pr+rI6-vaF8|BdU?R@3Cl?hc=EO zx~r}>cQV7-ZreTB_iPaiY(j}xI!8D~1u3H_Xccs-k{tZ0Ie|Ur!pr)5-cIGr7Z`x! z$=IjeY~Lz#SL`TA)KK#45mQ}_?GXU?v%Q2y7 zf*v%iRubGXFCwOK&8OvdTvIx~mi0wA@)yPT>$;!kvHHcX2?SUW=IHDj6iMWz_1u`r zUf(#}adQt}K{oC1l*u%$ZNNd5^IHMtMs=NggqZ(bNQ>b%rdJ z|0)ajDrDa$Cbl4Ovl1CV7`_O)JW9OTk<^T5my`85uWz|P6yl7-k2^@U$zryPR6jsw zU@y@G9US-gr*b?i{p7eqvByos&=mi{_<4yi$xPbASV}kBD}K<}#GsnZZOv1COb7)@ zhC&oi9Ukci2OgCo(7!M!d&oRA2uCn=3mstq!<(3BHSS76>8Q1pNw0?Go%pe2>8&29 z%?6?d2xy&vhtcd^S+5p+WUv_CR4xD}EYqC2L#)+dN)C8dv?~J`eNVzS!FaZ6w67CNIwgW! zT#{Q6*Hl8v3L_!MM|%z4ICFg;4^*olOM<}MO~arBT3IuuCC{LaIT=kRyWHJ3EQv0( z&(5%NxQ+=1ZO!|IWY?&NtV!7pq(Oz--^4;jMozvw{rZWCCUBaxFYsLVKh=Ht%eYIX zN%T?p1I+NywBs)e-}pN35!*sF1gRxFLb7I#JZir_QLG zJFmF8=MCSDFwh!qktL-la76ljKQj}{_UoZKEQ0}X)?P%)kozWQh}w^!K;^UlDhB_bUK|6NfsdrnIrs@lBtMF2@FrhOi=|@B#nkg{ z6P!=^7w8E}->yQbMfY$5$C>+PQ5cr0H9ze(xdIDYi7kli>f^uY;WDav&sHT)Cz1o8F&E+r2hOGUm$ZL7zj8k7 zZu-}tE8-q4jc421W81DU^VFxa&+p0Q{>K&p@*5kLcA6-UjV>K^vf^=k+$=(Ed)4Z5 zA}_EN03NfTc+za|wE3l-I9MS5dgf8~3hv{^o#X*AGhN-aUvucm3q&^>_hlTNO&{d` zO%rJOF$@5+!2s%V=(cL(*bR6!{J)-K^yjBouPibESxO`QFqDr0bob>mfQctKstIi5 zD+AbJ0B|aY)vV~h^kq8G?7rCM!(@zJ2>Os?k=C}mq}aHQ26tl+`)rjAwM$6F=5$-h zh)2vwz|h;AvjZYJxe}F&0ne!qtNP1=?cu`4|CxG=A0;aP;u17;0ll?)&oZXUvR)F^EC z`7A^o_Vn)X@WNp1PY2SMk&#iYLJh8qFQVm)f=!I-04_i&o&Cd}#n{1+QfSXnhfv)M zo>M|P{0(Vxv3!C9kzzJ|B+4zZeOBA`cP}HGxJ#Ib62bJ7s6jNr0CTi)5-zaNRp2YS z`ru(IkB6aR{+SM{obwkpBQhBp^?PYoaVVj(?L?<=9t_wCh8IgIa%q> z#!&Lh4F}YtHus?F)aaYWau z%P$mP{q~=$U*jWB->D&bhmqj-2j-qup)Wtc&Qyp$0v@}sS+RvrI-|TS{X&G2h6h}Y zs)~yTEX*~2q;b4IapRMeUEakt6cbgu#JIv>+Im^;t&um?-vN+GYLI{bLHAM(-oT zr?f+`DzIrR^sbY?{H}9a@2cvz@kpqNb-zq(n;)|vKy9LBpdVl&U`1}>Nz=IHo+<7J zbJuzwIlWg{sWR+|C%*{fx@Z3Ic=*lr-#R&0JeaoJmO=e93nw9Tg}Dq3rJvwL96(+j zIV8?q)^+Dtz>-Jn@1#LK@w5--vFWgfdjg@qc6WJLULa>pLRms0jQg2~?msdco%~Jp zYoLppK1(K^w|f1wG}n&-oby7fMa+7dHuq*^r`5z9BATwSPj%RN)rJcDYgTl1z`7)% z-l}kb8|D7iz0*2!`@&!jOv+{(*Msaj_1Wh3``8{~o3eyyw6Mk7^pgH9hmb4`-XV=4-_{TAl#L3UPHYZg_Y zDFcH>kpVVz)5{MNhZ0HJUJk@uO#*j`C1-z0O2ul43E$0j&hsr#CK5UiD6PHRRe|#x ze*36>uHc4o!pXLg(iAggD{~R|Rt;`mE&V(5&I|Ywc%Zai&bNN&H-zim`?83);cYdS zh5Zr+dL50nOsOdf(RNzqViz+?D?L zxI4e&bj9Ng0H^;(Gcvn(F?XGFHxR0Z&a~y#y>`hc`?Bm^^iH%%bdxFgDG^$C*)2*A z%o*;6Mm>>q<8vJB^0Ut9_;4+0+wlB)uQrp*YP$BBZvssounA|t?Yp@9@i^04Mgwmq zH~_D|b8PS@+x*v>g#W8LOXhtSw2v_DP$;$_X{l^I^r(_tL%HP8{(gyYu?k1n%2>~; zy6@vR=v(*lz811D*fqcO>tjv^@MW8N|fpWUh>@-;p>gqat~X{Bi1f|7?xp8dN_{TS~h zq_dZ%Y_oXro2%4Uyl1su`@LU|!Kk2#+jjn&^>Z*e4(xiaS-rMH zx`ek{Mzd`{q0n+S7S@M;5HB8$Tzp!xX%${ae%PQUMr_F?8Hf(*hqJTZeV6*``PgVV z+J6UGb*_1Kt0qxUr7M5FdX)aQRWEX>ty>#MmY#qGZ!$+OSwT!|Z+;CGc!o$vOi}%J zO&9&NAroI5ZndD+(K}~%ymn}<#Tw^x5qCje1JZXQWTcZvm`9sgN9mSfUe>%4FDdrL z>DI(E;z&8~2#BTN*LQJcA2Tgy%w}5_`Bo|wHK_GimdjrqlPzT|KVk=j$2VcN{zHKbz}^6X zc+y9;eNP$+HqW-gOH`qesM^`uwUg8Dw2tQ8jln@@Df&aWxkdjw$Tp*G$Hl5GRf_(= zlNHw%DSf@_=*1E1*KxA3=sJq&qVGKuVV9HU(37WYl2t$4kmr}P=&7e_kuPTYwq24J zkrM0MLO#;T51UnsWzU7O$+Ts(kw(WL%)X_`6skM_3;F zIUQi10pI}_Q3>!OU?*_Vbn%51YtkAQ*~f6;95|P7ySPOmA78OpR%4BCJl^^HeC{9A zNr%2*0CrgNImPE&RtFr*&?PaU44_cHM&icPD#;a&@w*d)GsgJ&e5UVZsi8@wX&Ric z3&nN&@&V*=0w<}HTu*>a+vXFKyKd4);+vD!Ey*~&cc1u1=29fv#bjEltly}_$r)#& zmW#Oygmi@ht&!HWTs3j3IW66Wmho9$KMHPMG+>P*Y)YN|^*(~TWZWjT`bw7*l$HfgVmTI3BE^K-@XV$pMUsLkLdMbTeQ$w>K42_weC;u{ zS+T-F{G@kafXyVDEGhk51wuLPt8JjKj6Y1ZD(yLtAoXrR`R?d1^9w;jRZDLh#5X*97?Bmn#Io{-|psx2rOpGMDF*o1@duozE_;QYdGa9$tN@6n9i1zDQHtRU=6D zTRLa|KNN)jXE5=9>_H&iD39q4f~KwJl9+HSE*Gg1P%fcR=$gh9HaWzj&3=5oIPgM| zD3pWh@(9?#{xl_R3_g*v#gp7$fO3fZQ_mvDSV3hrewNY z2ieTr>s;{{cCPr1I!4v$QDMq`>n4B*IEyJB5kqzcm{aoCc1)k85f2E7<@iwsM0?C} zio<@i*7Fj)Trxwb*7orW#Il-c7u`~B$N8cl%JjPmaMFcAfoiM?6X-Xt8mq*(p(eJdIznDw~j@s@>HzY}8Ja zByPCntG!G;Z4`5kFO-`InMOUEyuM7hTp2m+PSPruDY0I5^g_TNf zikZ!!f}yZ^7dFYHcECOPa#h3;4yG)gI;T4sHoCKUqZ3!pT(Jg)UjH zaRbv72Uom<-A=Xw>n)yTU*Mg^I{BxKO)vnMueL^EG2wPWL8C*SL)f`F-J*}P5^T_I_p}{s zj|Xx?x~@An@qy#%f%mU1Hcib`y;U6rl5b6Y=ol`5UeVS)a`}Rdb?lXICu(ctPkI%O zRXm^S+DEo)@h)&HSopTiJCd}2Uk5CMn|46E)bH+l-Jp(%>F|&m@shYrbXIpmnP{q) zL)I)N9L6ah16cUAD*E5+2y;#FM;A?#5gXW6S-No3BQI%b|CAA{tYrqE7W({UgFYAK z^9&*s&$ERA9M!zCjxO7{ZANH0pL^c#ulSYbfBTYXK=`*M&)z}kRE#zq(X}Q44Nns_ zHTPEgOqdHR!SXsBbTU(|OB}am0Qp3t7h0OZF|@9(^mbH7-O zuUPj9S5eF(-!#Q&4odyF+29ZA6|lB9e+g1$VS!kX6A(IBaFV=(J%yYxGtQD=ceNQh z`UdnK{+E%Mu#0rCpswsj6y-emqTJa2QAbhuP^Q~*1e z)CtX(;TuYZXH$%UckfPW-z#lH1P`Yy^)Zz*~N9IW5wBy;;$Z@ zF1&7Ib^M24oLEUW8_3FVpdVmIo=>|4g*G8V(ClkoZU?^0b!$95Ny|z9YX3F1A?Z+7 zTSiez&t0GVbmdopz&c)le7F?`mAC0e9rIPZN9cS{C}O!JIZmVb>O3xsY~IAbZ|-LAGB!=X74zNqo|5r_*6=o+z?E3> zr;UE@D*#Od)Lwr+s;}T@m>C@F$avjP;8WyiOxH}RJ3awa`!+HoNEkdq+ z8-VU@0Mh+0nZP5jkydD7=TNE+vPeC$yA2DrN$N6m{Mv=^f`;c|?=}yg7Kqm9w-%Uo zI>rXJ3}N1)ef82G(;6k&kvIJ0YpWj3A-d&NHO|NtX?cU*_OI37*Rx%;T+M}`UFE)P z^ELmPjL={0j3Ud4YmGyf;n64iMRgyQ4;(}s)@d$FiwsPw+Z1`W4|iz#9v9(lc3#;p zM@Q_o{5tipcIWJo)XSX~M`ugdCD=*OhPO}dQdzRUXQ;Xvd5>V-EJ zuVi`0Vdj=4?_F~%VnF`RuU%)M=NY0J@5q!XX&y4VQDOR8{I?*|?VwpN%HqY?bW*3# zI#w+DBKZg30ZFcvqh;BH6#Mvv0Iv%_9Q$((Je;@H9?p!O`&IZmnQBjf=Bx1`WeID+ zaw!&Ouu}}cuqxO_@#{B^x`Sd}S0X+8=?N4x_v6*W=Q4z^mMrQ@bo5ySjJJ3BkYU;{ z8X6*RmAhegM4wsR@x};ILCHtlvneQLzX1ct5Tri^yVLx30^J^Aoa{`YRSPCx850CM z;wl%QC(Yqh9=Yo^>RcYDfjObkZC4cET6k$XdG`i%>e591xdW%HsuDp?)n;=CUcdBf zx$eQ(rKgu_U=snbUfg}vzD6|}s&(I~;!1)~Xjaf>n@UxFk|8lSFJ09f-aIPo`Hk8d zq4YV*%%hzUuD7d)FqyBW?mihZnVv2zbm+~+di%N5A}EE@ziK8I<;IZjaHKHm)Xk}U5s?n=tEkXysA&Ij;8 zpl1(Om)ixP(Kb259UmO|&$zklON!H0;*eZi8p;jMJv)%o&YSPN{Q5P&T$!2?8B5&n z%K$9zuDB#h3SD?m))sTIUZog@e5{P}B3m7r!;HScI_w|e=~cz2Y_0UIsd*j^-)&k= z44YhQGfltTKUq_v00Eg5*P;TS0Vm|$Civ|8O zNe?|v`+x>>m+bT(XwEeipDFGn|H!71`AWlh=j7nYqqny146P|o&lrup{0LA-XbCtw zoCsZISnLKK;bH{xP;Ck%EGPs80&F zZ}*swj<$fp4ILgaO#u@VVUUbq>J2>k=dI!L^Bj|HL*H90`HyEgE#t7=VS@|2m>fxD~)1ZqMU7~Kuyax zyp)nU3u%+(n)mLtLXbZWPD2yUorVz80@ZM|m+*veWsAgmD10uCG0H5bo?iV^S)F_48389zzhf%PWQ zU9f3^ENexD*pD|MufB2VrMrCKRX1dsFJU5VN1-R;tcqJ-(q+t%T76q0H#G=9wqpW5 z`Ck6cdv4Wg=7glft^DD-PJwEts!&Nz^?fb?#Y(TYq=r+!xO-DoVdzF*$&&BNx#gAP zckY)5$#V)DWd?qDnSR<3oW9c#$P}UhABq-Gk%m84lOQYV6Hi5{%~Is4`w*wJqNX=8 z&^WGo@yA6iw>5w1eNpLVn~7OT0&(QSP&I2YRRip}um$ItHxTc|lPyxZaD9d|{UeI_ z$`@R@i9R>JYP>C1KE^f&ihDBA^-6Fj{4j##O-r*Ogf^jH4w)gsBan-@HVO%H)U!iH~XC?S>ICp8+O3VQ#QJ!qyj? zudlv%<;&M?w9*uAFh8%=i2ig0vOKaUqy7twsS++ija^iM@NTedUTw@SCE{kZK2saI zE);gpw9{P+K`VUrIJ8yd;nDzdX!>FNQH{~QqA_P=^gOXj`$U3`(lL@eS>tvOM(!zS zlctSw7jKd;({1iZhbQ(Iww!__fT26IA8JN}1a6@7hZ3z!D8+;Dsg(_@mm8}0ig4#r zC59f3A|Fmxq4|AXDe9>Ss6J6K580wCZRHc{*j`y=<)zOY?+=-IO2O=UXmbUYaae=; zEj9W6&7~SK!>@hPnHPVV>-6a47DHarpJK1R7yR^L;Pz_^OrDyV(c}2Ya_t^C*!(T} z;ouXoN&R8%(I<7+FR6TIf`9IH-#mf4JK;R@;4$9t18w)IUn`w;JW;i}45y+RKZepS z7#dG|OT;yG?VJ`p2Bywl;CFejb0Eh;*ZLYmY~N5b=cPLA7AN06TmKql6U0jWSlKds zpHG{<^ulwF_`2L1b&hDO{;GXSKUV3e>NAmmfTyPY(_hO)qUtqRp58S>e5!fsfb*Hj z7`-WCoFk7|+fbn^&7^_hrGvO2N;LnI(`DOdxgB|n z+pk8=)n!)aV1Zi_UwF5bsx7#<`K@?P3Vg=y;=gU$8Ou#~NIXK~i274tSzGjKN7Sph zQLW&ZgaeQJL_6v9=jk*jqN~`H+3*;`4(c5@?6ZN+oJZ27kDk*g>XiV()+<{D`Mi}( zAmSLkohK>Y+I(}?dDN4{!I+F@?AZLO%Ws4A6C%A0wV!LWH`iWnyQ|2*oxIS_Z=5pY zU*%e3is&nd{y4F8Rd~hTy@^-+;1!W8Smv2S|C?L~$l;b+83Mksi+!H*x5Nu8VCwVO+;PIcF!DPxn=_9SO8#OpcJ~X6Xz2aez@Hz`iCbOQb2VAIca5NE`&6zi%aA*OdR?bC?Py-NtU?Xd;5 zQ>8DI0%UIyXs^BdT#cE$OQMg4^lESyNuQN{6{`+6#684}#!L>acxH}`{QQaG_U+R% zO)(<;+O~Ykd4JM)k-h9#vRDOgt%?1w6`PTVet;Utodgxon~C#D{FqWiqk&Arkbdg4 z)zO~~*Ulw*v=z2~$UIz19Tva#`n=E&$x}mk*>tv98vuO;Ukqe%ci7E zHlqukLcq}?XyUY5W}vEfpiK4MDHpMNOTIyshuBW*J~>IwD7|&fqms-QOQO|Ry+2L` zs-ejyAiV@3vk~K4ceNX>SS7;ocL&q1mGKMSkA{ymtryxJc)U-mRcS>XruXlwlPP`* zf7l_7gYda{TFSXiZiNj|(ZBjbx2EZWRl(aw=A}+NWB2@^-iEO0O z=IWs{q%jo@zHdXG$oeiNQaUu_PxJ_vv>D+I*7M*hWbq)w zJY_Tskjlh@AY~SrtBJ)gm}yfg14^QGm;xX`k>EypO_n6Cj+>GnPNx{?Espv8a#+tF z9lxr2U53S@dfGpzVp@)Aq5q#C1BJ4n|H2o?gyU@2!f(S$ed=zf==DWsC;5D|t#%NM zP~5GnQR}#PQtwrXtPCts(~xN!znn|a@%164l8tPp3ocrh46J-B9(vM*Op$B`z5weF z-8Zs46O<%8z5t@%{}o-^zdD=F)(Q5%r3yNbBRZI_--yFWz?B^t@Eq7i#s(&IM66-W!ed%W0 zJCWo8|Fj3L=jV6vljtT+E75uO$DJQPj53M*)`(P4Bg4kvT_xO$5wgGKy=_dm((%sH5S)G%X!|vDF%LI8w>+?h91S z3#)2<6KZ>V2TJlrs{)*T#k2&>*}UDDjmck3J325qAUB*gvfi_vE3$uazpC~F$@Nd? z&z7-PJM|!oBl>o!JyQ)HxhGGr#(bz1McxDZ_C^&vC`+4CmbwEJnd|&KFkTqn54(}I zg&pw_J<~8txHa2eJS1^ zYf6)KRPs+4OOMG!2~&l8R1SXpV9DDr@9e28{hhz)%shHzX%jnGbl`B~BT|7WJ|CaC zY4w>NS`l2=5LzSk$ca2XWg1e=>1=LXaoCcFQl%6oqxc?OsoJ$YdqjY z`3}j_d5Gs_Zm1=R7V{SAMTA-rXSOjPJ7z{&5U(6Vx7}kF13FOh@9f@GFJje>Q}XQ# zl6G*ibxjZK!V+f6A0F23@iIWsRb{n{5*KH02J5t-56`Q1_>rBZoNY|Bq5Nmm!dea{ zXt(ov2S^ZFCt6Y_wqi^Nn|vy9CQ3iNsZR|3%PCe#1t?tOJ28F9GBI8U6ROz47^wfL zc~RFAQ=@9LBbux(##-CG2~{I@)w^+!tM&by4))3O9X(u^E`}j>KSpJ-Em@s>M?kU3 z)#&LD_XphW8bIa!m~6W5FgRe}^^WQ=5#?4_nM<-Q78IGNmTB>bd+#4TJ@2%BZ}fE2 z?*UrKNq_lbi=N4$3>B3M+bjP?wi5CuX{FrXSrX5*qYVPZAzBug+uWE+O&c`GQsqj1lTxy;M-yLsYYu#75~y~X3<0SPIxPbZvRNU!NzE{(JM)8; znpqv(L5|zTX6>Ka55+CmQOdNKT9amVh^@0WKGZm8U;_*(irre!F=nK~Nw;7WPsHuP z5`m_XkW{q-8*8saZl`~os7zLTj1Bfke_^LSs{dL2G}4ZIm}p8u5;d{U)uhSh6v*A0 zF9yb(Zsq0{Ze@PrQC0!f(a)v1o-@zQbY8e}Oqdt&N7TcITc;IQc+q(dXjUpn0kX4n zMsfM@f2UBK z-pJ!$!uOY^O6u(wPcnX%SzBDrsu^`{hz~3NIMK(%NbJmmMxdK5#*pnMp?nGvPKQ!8 z32R_Vj&;mWur*|BKX#>9JY0Ab(ZyA3E-7bx-2S(E;}h4#8|>-OG4v>YI^;PB`^H3X zoQn7MwwhESn#=x+PRdk8MQw=YFHhlD3DHa{lt)bB2Y38fngNs%x)H4Jv&WF_+{g~9 z<9zf6dL5|b!cZhB1~^cZQLM02i^5BP1G}B0@yi?K%RAmXxXN^>I_P&IkW2)&=CvsQ z&}gK?=QCAg7m}2nPIsJ)++(Fgz9xW3j^I~|h!S@5MPMZwfh|1QAO7L|dx(P{VCPe&z zuN_twjR#lRS29=?x!-3+T;-(aW8qIcU|Fm& zR1_p3gXjd*rD^*V{VfH z$DJo0)SUl}XL`29qj3Sg)osYOIQH-P98o`tCb%AIkqA&wrDz6ft&f6f&|*3}$WKaG z=b(SWz^;oux0onWfATj_qVz$$2-SvSNcFDl+YDcmE-|+-C~@O(IQTn+^~#~j^rdq@ zAcs6qHuomMV8@h@LT49A*H0hCQ=Q16`JeOM1$9rQPBMT~r&rjIUM%U&PIF)j0ySms zX(dcUzWH=^7(t5xAl~`HTj>{j$%yc#22{C zirKylT8DdQn|4p=mOPrg$NNg_2A?MLZ$V(|KcflIc>IN$mH%aDa&;XhJ6$Cvg?dSI z_+kt~4ST(wI-FaRvOTK%!0Y(yZDG-@SU=%zw%>G&&oqX7%5=rDN6*bm8d&-YUHG`b zMOCz+m{dsg#rlg^2cmHEmsew^q7Ea75}~C6Zksj6Bg*Z{AIvXyalJe$g2*uD2CV*9 zP1FJNDgI{ZP&kL28z|@r7nSc-x%9x}kUk|RtgSJwFhe=pjbwNjjIG3dx9J?C6cu0B zh2`p_=^zdCG_ zMlt}Jxf|UQPHX(*feL7PK{*4EhS4Ygc-{ioee^acm`DP*_Tz3Q@R7{EXoC{3Pb0KeqG;C+q(ERLeri40&>5NuKkR!MKU;C8^|&^9;N>l7?^ z-YvsZtI?E@Kc3aF_ttw8zPHTwHrM|xlOk7wA)jjvwN22fB5)cN$G~OHd_$BIAwT7- z{iyShF%%zI5CdlPZP4Me8rJBk6g;a{d*E^6$uH$$<><|@;Yp*_)oO2%(~w1FP73$! zHg0|WnxOUG8Rjp)uAKNT{6R}Y^33sPTStJUDt&XxU1#il-SkurU7^U3oBe{FrGj1y zl_n-rn+{u7U*AedYi);v@IFrFqhXhM=6XKEwvDGx9s1*8G#P7FVMJ-}f$xqKJ^!A> zuJU*q%gi*n>%4sk5@Ya zI=oTZs)D^4YQ7j#U;Zi9;OP!I3y0$Us}GGx5vF2DgU!|25S;sW&tD2Y+G+76gT$xU z4U4y%zR2qrd8&Pu?<230rP9egzx$WpIgDgID)~^Mn?fty+7_lcwRe5h;7(DrjQE|8 z&)Txe+xgXc_*pR8|FUcYQiFhLP2=}G*%>8A_&7DyCnM26oj_H`{&1(tzn$saa!{yh zH6c6JI*7{O6LB=he%(fMBH;w9FzT6Xyi+AhX~%NetCICXn^!5qiN}HvgE;**58?6) z?H^2q+d{qo1gTl*LN z8)+PBovPbOJJPNF8rQXjf?K9-ZB|HgJoL29)AhJXw;Ss5 z+WNWNIN@kIFdvhp0-JDy`krXIJ6~pP5pwb?%H}l%Gp~dl@_3iT9&b$YwO<-U_$)F2 z&5TvU#zv^n6%iKymft@7e5BaY=6+)1A+F+Y^(NXMC7u}>XP?pR%+fuNy>_bKWcat5 z3N5{)Kk7mvPvxY|3g ztpd3-_y?I_fVflh2bqxQcL2>aubRL9>&DqKnf!vV_}6JGY3Jt7#w6cZuJ|uOdfA_X z^jm-5ARSo+x;^`7A7Ykg2+x+0@&Plxqs>$1+($BYmZt%~ZOom4K*X;7{Nt-%(xDji zM3PnUxn#8@#g&uOPJHySmGi_%kmJj$H0vqNu^O2? z&mrU`<^Wx+yqkb^*TP$i~ZZr&u`e&v-ZznbI1o0D42Z&+>I zc2P`k{Vc96hgbKKRLg$087aVDVwpzJXvURjH>W}-O{KRYyYV{Q5Lla`LouU_@fwj`~esnpa4iV%||BmLb6T)>b5xg zgXgEXmGYSG=~=l*?%|^k)NgSu=c&Up+=A4Yh|=IR2dBXZ5|OCmj`pErz76E00uD7I%24JiRux4{}l(dVhoX{|vdXHi% zp!QqeoqQ5_I!8UU-3lip(A&$BnTz=`4Q4g7i5d##?cV&SQJ4i5ZOWS!b6EVH{a=l4u|aeYz^1LZBVLsVW$>$q(W$n8!Bl-ALj_|3L{ z-~GL2&0Y2MVS5G02Ry}4!OnVH+mN2u3HrYx;Bmb4@USV(eS^DV|JIo z+)*LrI=$ZtinE37*T#}dzN7ONS*CmC%+;)ue)ImGk|4-LEL7DBS3vfdqr2AM;Es?caiVz zt|obX)P9(4S?^}!T5mQrb!}9TU**JHqO&mYSYYBH^zI+pT2Z>K$W19h;8q-NDDz5|>sRfmm6q3trtf!^qPhA`rPjp|{ zeQ`|qU=+tEKTDKpWPGHnWeL-66v&f>K1a#;3SB3MPN*7MIS6LkWId>Q6kgxhd;%;0 z$ltf0^iBMr4x1^+s>t|O&p&#h^L_S=McZRju@Bqn(7EmU%e7e)IMu&rE9Q{>`=?z@ zzR42(=k5SM2FnE!j-)}5QKglT=jyNuI7jVe>M8P2wq`z6f!OjkKQvPJWaf3gRlL}0 zJLG}Rw{3wz3H{|KT_1emH;|KkF?N*oT5HFl zgMO_lp6WvQqSob}_o()?{r%NE*}jz&tMdL*g#khR{3oI937OFf9C--lYpDoFU=t@y zo^HT#S4(^(Pq&OTUA7F*R(CzWn)7{s1=vt2@qvzsA^QV$AYL8z1}%u5$cOW)Xy*M| zz%a|1OLIDKv?xDa8hupVZ#x${vJ;}h06d+hDJ_92V8rW!ADwO>Xii0WHVck8cvNB( zjR-lPyyN{(ql-_2F?3N~H`^e($clO-Uyodh{Yh0Iw`c$KecI$75hk}{-0{{~I``p= z;8)-6A2|o+dP*w7j-UO+w5vJA)q;Kv9h@{?xRdZneWtXotM|HJXy@k zwtF`h2+ai>n_=)}iji;!Aq=0d`P4%PC#?MfYsN#85c-&beN%RYMeA#{G-lUP^)zHT z8Q2*j&C;^H+=j7}%xcF8Z;RXxFI|3p8TBT~$geOj^1EE*TcN;fqeCIDPD6shfuiU= z7*V*x@u{vuNd3!a$!M+hbIv1eb#=YtY8EXkb1x!KX~AdmHtN%%cWEEsj(CtoEZPpv z7*Nr{<~dcyf{kaq*O`--KPxIOcFT_^nioRgq8U;(7Ywx!W6^v0N&B-ME=m}5>HXT zfh8DJjmvks6(zb3FWKbYM=>hi=tH8O7#6>u`S$Ibux%h*xt1SYe+wl( z;!u08pMKWN-anzaEHSHbkngsEtE)U)!Y#jOfvJ-Qt7GgVz_C~AqRNVY46G)}BEUvX z+^muFl6CI;@JB8to~eOyTGyW@Nzn%Y)Mxq+25=DRPBfqJ306Bn)#&*oS7T>cq8P&x z>lM~u@dC_gH~X~k5Wi0Q&IYQg{Rh|l7bwX7A*VpOXoYAyPUy(ZNtU$`m#JVCgB;g> z71bwXi=-=l!6_vk&kCy`g3k07U|+>sR3BY*UfcnD5Q;ovW}| zelV?WegE+pqv(qlBvxMnv@`$B@%u+otB^_js5zZOKAXx@TLVf>{3WG4Te?F+4RJ}^ ziT-k_3cRqTT#-Iy!)foSqo*M=U@=~y^a?lS?jn(TpS(6LNt3ufc=55cmyBIM_PB)P zZ}rDtnzYBpO4ZLA9d*A~>TZ;q?PcrnA+dD?eE@zRI_xt!P#Hru?I9;`xDHhlk6>Kv zz+$Rv``HE_vVNTj|9HMk)-zu9a^D2BE@FRm3M{A=QjS<{oottfWqV;tG2RH-RRmbm z1E0Xs4ndhgK!<}}4O-?&@v9BX@JVedFDM1NGJ&B=tiZ`*qP+n>EkdcF{yPY_siOj7 z>TVuZ3r5LKLCK>rEUiO|E@g3tsEm`<5~@>F-*+8aj5qH};$|1q%t?qL#!>Kfyq zwtT*kegs`_wJRMlkd&>-fG#~p?kJv&no@{k;oR6Ls3L^`F|BNA`4BN ziSR;`wOv5$T+0xE7jS$Ux~*pSivW#)6Rv~IhJ4V}#^mNm%(GHcmQ23h(t&U^Y2tMv zWLR8XRrxldxf9Npb2iEqndOj2AHop~V))HlRhH0`-2V@A-x=0qxMdrpi-3Ui5)=?o zx*$@6q7)HPkzS(GrAY?~2_31S2nZ^mfJlvW5F~U|dY4W>iqwP>0x90l)8?KtbMD-E zo;l^0Kh%8fec!$J+H0+??L{MGJF}LR;m4H3Hw8QOgT}k=%^HLH=z3T2p@FTK6mn!> zG6SDW2qfMmh)~#(^tW6ORHZ65_{2Iw%YBYs3Y^eZ>iRAw3ZesHOLlM>x)pzQz{po+;~SRLfqTvQdVDb>`L_3pwwtoQ~s?Di`}HYJb-z zw)L;mSW?M8tOPk6TbQW@T|us+d$!e|@aEhqS$Wo+yo6xb_)u055Y^>H=I8@bjl-UY zl(a*$+Z!_7GBYM|yPtDr4%9C^mUuw0LuP24vw0#*ID`@v``Uy@&rer;Kjp422YtR; zQ+R5G?d$o^N3NuSHyvM#hpX26pICmr^YF%mk~AyL!V&^ITvR+=ZS$rPh zFw@dxU@FrFF=Uqap&L3|&iW+`#UD$d<<%d84A|-p6+xmTchuC0ssY_bOwv{AwP|c> zC3TtS)%{T5OwN6R5KV7|2L5cZ%Yf;j6ojKP+tTD&kJbcC1nbxeKjXB0GjmktGy?Uh z9TaS=!a9nyAkLs7i7D5)6rg&aGBoywFRf4@d>!24~QD{kY((E{!vC5p|HJK zApUFF#+BNK%+q#=mq>eooEB3$C56`IbwY8h1`68feKDNa<6(P z6wU481@amEZlG@pIL?>B5ieJ-&Wu}qFHR_*7TcXZQ2SILn>OFv|E#I%bS`ra9Y89d z%tuUYLXS_E&(x9eNCE(W6o}k{bRdg0#!zcZ8~^wm{^Y0kMTJgT_H#8QbDfU1k7zPX zO*v%EFp+1f8aW1OX;Rk!8D6oo8o*(p6(m#^Bu1y z0S=Q(f_snU$ckTd`vktqT8D^*v-E9=+=$1mx=# zCl7yt?x+NV;Jtv-0rFn~%YP!JP~){IjJ%5jlN)yl`hghei8a4t!;a5tnN9aSM9n=8 zE@Xxzu1olpyHQ;X<6PtdUzNI${*V>zM7_sNVe+62cbaE#x2r#gS1ofp8&`GXtAm|K zOQ{xxRuh~FKY=@phjSC!^>K&oKzTd6mMuKw$jxOs-b>x}T{w*4;@Y=cm9iJsdrMDN z&`=-Kkdt7}xV>%!jZc{)J_*YoVB3jns5^I4w_vm^!t>>hSgnnlm{&iFiS}%s10&z3 zcOgN>e|*zmd7!PR4HrVRA&3_VSpiZVbo_K zSBKQ#H&mW3c1}O;`y>=vmM^D=^pdtEaeN=7FGIANQ$&C&1%h>KN3%$|U$G+8nG$cT zL1Kr80MZkFO5Y1I3kji_$LgV@A=pIH=?VBbtLcaa9Q>ngB<^ra>P%NO715OrN8lY5f823V|p>JY=|_FcUJ0bnm^>Q4^)Vu@>9)=A_r{q~b0d z(%WkU?n6a9i=A&xhq5$-16*1x{Fu?v)g{()JP2?q`&--XTPeYPAh-m4EQhU0i+BS* z%D11Ms@Q8#ifB{95D~I3hv$@=Ue+E5A~Ow%_iI4#CU`%jDmn!12tHvRVB2P1PvcSi9IaQJVGeJL+Sn?K8Z5~vB{@ZOQwE3*lmE-I;e(`K?=wG>= zxqv>8ggf9cgq5R#e@YoI}XM{9!rYA4rw1(@Kat0ye5 zkfweU1n;8^4tGN~cX0AfcBYAjv>TG~g=a%6UW74&;Ai0j5C$2FD#;c*&_?97;5l$< znwycnO#AwYUG}wXC5Exu@Yct9pFwb30Bef|E5RP)0;VN!XjTvH2dl^laE*Vh_F4XZ z;O}0#sW10c6u`rjC?^exmkGArXvP}D_+u_4ZCxBmjD2=yrk%$G{gFnpNCF(tRoH8y zEG%*XQxC!-38KhO%dN}Z6bXWIpjWrS_$H)wGtXgB^$Rb%LEgCTYxZDa3lLfF53mJc z72ge1)4@(T)pP|)9Ujro-V{bRx#Nmw#oE~Hi`{rkUgSy%%#bV1*{w3U(T}folG;>+?4WE8*BS)DqY3^U7T`FaOJEMbTcdWgy6g^3Lv>^AQ(Pa%si^xGsP}9sCuebp z`#BxWT!gcQ)=kfX3tmnfDCjIY|L{+w0eY<6BDR!Q)Zg#Y=ObDbAoe*7P6+1x9ps$! zP?@g`M?jd~`Jt>8;C3cWDjK^`JZg+~7yFvLPUrIU*imo4+TfSE(;uvQzbb5dc;!lL zfOx!~z=w8PbIY@#0;dCVS-T)WH(>MN9eiM(uWY-C*;P_ad9l&9*vz6s*HU5(KXCGy zuv>X<@R0Dmkar;vz)IFOC-!h2*pZGh^a0c6^_PL}t-YJ7QSvJpvA(KmDG@}C@Xe%O zpov=`QYyIt@?*1E4DsCzvG#I}dVc_~Moxxvk`xJYm|2|;4Vs|?1ZP0FLHNBfl&?O| zwtYe&z#DZilm$zjmc7o+ADiNx1==S=@$LkD@pc41NfBEYu7G0sNl~fG^raoyiqOMH z$34FNLBsgM^~y-liOcb+NvvOxlZM?EsLffTnNtonCG~Obv!*$29L>#-72TpybJnZF z$|t`50=c4r4(Qih)-PbfMV(F*;Z8F|Yk{9B=duA-Fm^JH{-Q@AljJ6AhDK|~< zJ7`5=8!O>Hq6!s367zTY@nphee(JdXQ8dxMU)0VcU^E}98wrBz!VtLm=Li51nnD;g zer(`oF5jpC%ph>p5#55Q*TS{N8zGY{-@(sx)Yqu-N(=OxM_8a)>5rAliBRLtpDvq7 z)?ZK~@;+}H zpgzzsN_@?hG2O6&RO^KI~Ruda`OOV=%T?dW29ow$CN+v>-O z53|7iw!z2p!IK$_$>9A9>p91__o)c&L>ZWH^io~72P5fnbBa^-)~%n*qjqsomb$ji z1pw5~Xdj>J-n)dl1tNd&0Fvh*1Y=?7{K7!Qodf~+{T81Obx3^EOyDcx&TOL^FpYf8 z;t%289tihSIxc?*_k-H9$v0v-*>oPB0cpITdh+iM#-0d)$UzqJA#{rK;b_ik)+saw zky-d^_B#FO`8p_JZwzzvu3qH*q_ONIk90!g`P+Bt*Bm#0;;OXBZ#7N_AQls2MPd$k z)IMJyvV_<+BbT!|vY!gD=kMN2M#qMzU;KA@{v#R6NnWBkdUFFi4q+l(A=~2S|b8M3hlR^lj)Ij<*&=HCPK#@d_=?z&+_#lMe$D=N|B61?yr{rKxlxB5n zFE5zf{c@~c`ABlf$JvdTvq>tln1#=)4UW2P0GI7)` zsk6|4JI4-J*uVVHLD-q`R&TtcQ|shuw|y-dOGRrsmS=8^%xtpnMNS}qJ`lT_49>rW z58Q#In+wjWwdX z$O#-@PCg#pInR!Irz0y#Q+uxi9Z5q&q+>{Sv!fVH@Zxce32 z(O#EL{K|=Hvc;&&av&BBr+$oc?!oAIZOWWb#6XUV`)_f2!K*SKNInG5 z!tgrN^qbtXtu>Og^!Fu#b=hXaMDWvn79L zL#<-t*s^tA;Sze$^Fg<1(fC|6ho`rv?w$!23pMW^parSVzg?~sVm^Q6D0ExxC+OPu;@iw19|-o2;Uo#hPvG}h zEOYPvhEq!Hcvd!d(2-=m1+fn~1dH%8EJVEsKLq3i(EqfJujsK~m@;SKq;j~rMhk*7 z!RMhh+mS>|*o|Rd$Vq~bApZ^Q=W4u((xDs*pPiqgQlWGzzqqTwTR^k6U9{IM^U4RH z*d?(eySd?PILX5O4kMZdti1{3@+Q8HaI8ou|bQ!sfe%Yc2Yy)(zlcPil1zmt3z04k+EQ*W*(S0 z!O$0%+F^5Pco~cR=rSvBCi7Or%$C~V6qmb|=8?MRsD0CiZ8a(>wn0i_?P+tQ(^0%wJO}IHb%Q1#HHAF+J+O?zcn{~-)7H#RKw%R6euyPTJ2lH(d;R$8 zqTMHtR@*v&OZN-(7{Rm-1_WKMNY;&KKOQf%7F!V-AHbf9#$?&MKI4yy@FuT=$Q53# zJh`+VpR#&ug8OqK6>`oHUUg+@142@TY>_d=3sU z^jsN&J(3`yL?RI80U}Bqf^r++k@EsIfdSyC`CGf+wg~)1>ILeIQS?K@X{dSgs|RBg zPUyVNOzly*H0)Yok7U_;|Ac78;rS4VF?l{2;1)^IA=%GX3o*kQ>fay?)2LVp`WDp0LLX(6tx0Fk(w zJ9dY}kL|H;{ygeKM z3nbryJ3eoCaPjD=`JEM-EzmxKAPVi2db~T|g`oH4ugz*V)UBD76Z|Ucfsn?W5qfUc z@tub%F1yr;dKFEOc%30bv79f)-{NXr>_uU49byLk8 zIBMte>^bMs-PE8SH0P&ovP9Yv@4ukA;+IC_4;rM~A<1`>&b*?)U;y(evJ(vmu6LJXn zGZbRFh7^q_wzqR@T|PLTpIwZ0agUr0^c3eE0QgdS9QYr=BB~^${Q^A#+Q08?b6IEl zR2a5=h?Y*;>^%1sg+eX>gL46p>yX6Bfng;HL%?~sl^nO_pK$dm?uL7j+=E+*7@g5T zd5)QHqg027ErwI7w{YB>!G$nyoZ@_@c(e}?TWWXbRcFo5>OmInzc2-w&jzOBArM)J zJ(Bv#eD&d$OTkRQTa8J<6%-`4={nIX;fz<{?j`D+bUkx`hE*FD0&!DZ&6Z-lmon;L zSDl#$-lDlm28gFiSa5Wc=wDX+Dyg?_2uw&TAnJAp za&KnfPD)+7Vl5uJGw%E4YLa;^CVApDK>7oyik}!($VI0T*&DfN%^dCXcQiS{Y;#{5FeGrCUCnbngq>#4G?H`2onFGW`6; zd=+|&V-YcFyLdatF50R&W%!IAc}As!m*MVf=F|>ZenM<(BEY$;`A=!VzbMIv%OaKV zIC8Tz_-=j5Qd`Ui-3KT9-bX&Z(*d5{dv3LqH&apS@&NzT>-?Q>s~Zur?H(sBr#q5w zvPaywd9pb7A&4GCHPsaUGWc)6=zr1U{LN*lmLULVq}39P%%O!{!yX5W5U=aPr}KK- z$=YvEOEEcqw0-#&2jQ!cy%phS7=dkSj!|c_Y%De zgEAwwFaH9)kDCIK9{`zYV!#Q+b3{dV$0c=$PF3@$g1(vBwI>r))D~s0e=7D4RRvFQ z#d$TsdIggl{TS#MZvF}F_dv8>YElJo+NH&nyai-}5LF$HH4gO%7~;(G0qs1(bMw@^ zOX!y2_M8!aV6nX(_vw^oqj#Agh;wz{WrHD~4Fj;w$X2POq;TTY_&6|wo#*V$lgVAN zCyI+hPJ{p+keXahf9{XqGVll3<(*dKvXEc;`Xt0kf8d&h@7D3Vvq!o z_dJbjImCHqI$VEjr>wxm<20cb!8Y3|Jdojh2I=$IJ(oH8Ir=pH6A-yE_Y_!&pks&w zyw^zA7Tg{n-V(xZe;N_Lx|VulC`G+%6X~f54$Ef8Wjt>_1=wR_mn_v(606?M6I70r zY`?L3a%Zz*m{^O>P#r=@?5vo?2VTa0@w89GI0i zOF0Zru52kJbt$SVJ1}d!^3fSMRWkZ8yyiJo#;L0cRNn)-yEJ$R2uy-bmyP_m+kP4N zR%p5OcA@J<;r;DiLn>{(KN@W<_h$lmvIzNuS|Bct!iJ9WJEp;FoSUFc3@upaGxI)k zzo(K-G`X_jtgiHA%MlU;R6pHY4C2tqk;VJ7x+%BSI7?#$&lFb$M_DB?Ugh0NJy7@! z^qnI)R|3{Yd+pHEH8~1vLrt|RA7hwgtKXcw94S!SW^$XwFe4O*;(iKlF1k3ORA-bE z-<$G0z|pCWtIn^oG}(avR7sw+)(O>E(2+Rhq(1Qmpj@hJO)$jVc~h(<69;p1myFDJ z<=yhZd(8Ie7bZKu2{1Hs<|!Bha;jfh%ZJZv4{CMhQRL9*z*w8WqjTooBb4TgX2>Yi(^RUg;)Q=I_MwHn2T=>QO>SI-$XwGm+6GLPi;=7|r2 zdg8gctOGIx3CNZ&cFBz3?T=-`tbBG{6x28U6k1bKB5`AU{O)vb<@2U7RSR25=QOLW zlhYq{&rrPrGzrlJ=i=)CRn-qa(etrE*N2Ib$NN$VW4RpV3%O;$OP9i-DG*KKL}C2i z=96w?Cpvp9ANzbp+~zarjlHTr?dQjjbi=7^36e7Qb6I_XQ^1ijJjaJCY-&KUlnF}+XvU1Rf%|2sJfLyPfDzJ1)TCS8ce067EbFJ4|$YWW3X4j?Y} zm1nw@2ZT|>lYRn&W1O8Mq~I8v2~f#SKq$LCc*}{OxhF0V{3W@4f~8D6EBRC-?A$AG+i-k>0-Zq0BmU#i5-fe$8)#u zu#p@`%PM>qgWeYWENw4HtI%*M+7FB|+Ht=?y^}(HqLhl}Vfb7Kgr@m?fNcj9Ko7Pq zAqp*~N==&?aiSL4sVhJ7KX%IxIIX_DICNyUHG86({Z1%Dr!&UL&=6fcXo#EaBbFYS^518OA3z%DAo?x{N$$+01fxF?D+T zdt+}BIUm?n2!r%;S$k#Z5Un?Vfu5jZpuAErk%bE|Rk6C5gI4E!=ZRy}my!!9R>H}P zK7I^2mn%FlwtyG=pvP3HEICu-LbPxl>72@YT^<^8CK0Ic0GUZ4PoiS`sZGZn06BPqj>@?f6Y!m5lLAOGtWfb1<;Zr-9QU3i~;KYN8{D z&%m8|>`nQbf@%H48{TTex6v_S)C5x&#kJ%#o9LegPkAE$E%Q#k8*YnOhNQ)U;8({Y^H}E`Hhd8xKYjM5{`4}b7{{7lSoamJD%gU?N}Bts|9D8 zvxZ4_PhP{_9uYOd8|Gp?B{|9TwiU;1#jo`PX(8 zOY2_X)%nPf_9-|pNY3^>389q&t!DSzfxtszWu$Asu8mO7N(N6$NDAT zy{A%Iw(|w`h72!I%t`Hoqq(s^p+HOD6&;z){kcA_KWf_IjbuTJ+S0T95s6Dl+M1mp zpD7&@TV7|j@NntDEIs}?Hof5e`f0Jgg5~TS$w8~@>%5|gH#k0_v{YJ12g*h9N-d~EaaCx?D9~l z3Ip1UTcua^O!NXlG;7o^8;tKyf95wI*+T6T38FQP)t@5mqZYaEqXeF}l;lYW7tx&7 z1*Dg0I535w$}M=&WmX^K*yU8+$G+~LGOs^YU6-YQ;R{c^l_g3i8W+ z0E_74vN_Q}K<5WEcIoc##ZL#!T|0?X(!_(v1UON`h6B2N>D=?RJ zI%o2b!dOHU4bV5i)^+^?(YpxA;$W7NYBw>H2f_EaXk5F!U3`i>XjlvxTJ63iJ^kWD zy>;u)#-C6d3u)*{J>RvpY#hS%;*vMe`%sN+wjdRxBy{kkuWc7zA#iNR3 ziLSsF`ZZ?I8LA@#qB*4{ryAhFIze~>+A%Wiz$|v9MW)WNv-Q>%UDJnq=RUVcD12!- z>N0x$Fibg@^%LUpFHpDzx+Q0FwLO~-rygcb2+dl$JGN-fZjf>LR)>UB&n3?I8=3d7 zO#<rvT6maY26HYn`u=iQX#6-Xn5!PTg?LviX?2!_W9N%x9tL#;&aYSy2>iS_nkx z0Q?<=pCy178BuDSm@Pyvs4J^3{J=XiNdUL3a)%z!x58h5w)F9?Kw26K^U3O;@1h9e z=1?^E!`X0GP|NMEhTp8jdh8+Fkk15RG`CyRilBoTqQ{#Tc?q#l11Iz>XIcPHNgc*mFN$ zlX7*$`*cbALwQ|lwPa0jdJZFG;bo3clVOPqpJS&XOI^4dqGCDY)@Xl$eIPeJwud`I zpyJx)25n?NO^96U*A^2{w_ykvdv_;f*ebxX)da3lJEC7ZPq=mrn?*jH2iJa`-F!-F+#>m21o75~nQ{=*O8K2ZD$}+F>OEp-4%RSdYP#h( z#u5>=yTR7xe>3E%mveR3zfwXiA`iXu5;4WCt)Yd63SrGbd7mi7>XHu8)~V z`i%G&JaMAB;MYS_?3H@syQ-bh7j=~|2GP^3-{#}MSOf6XWlVbWR9#yKl;@}4NFd%c z%Wc?Vor`$qZquD_{$ccTmydCSpocRspr;y^6wq@!wbh~k**QI{IM^xmKCi?|toj2Y zbjta<3Xxqim(`8@60!ggnGO8!;Oe$`#S%V8z7F%m1O?vg$dFVvZb)NIFM7@X9Qt%2 zZ6ySvzYxY`CQ@4)c`iznQNZb~@tIQ~&;m#m$ZN#6%!#bJ2W+bXwp za){|Nx$!9KzG@ib87ji;Z}%_smR2EQ$!esD1H0@CVYyjkD3tc$N<(4ys`*j$H$Odb z^D~>%^_>0QKuvBz;UKAxXmF4euoLaUVCPya{uMLwC7s$&PDG%DQ2Fv}Vnr@XWr&6+ z8HKiGWuS~cyV+d8yz8W)F#K4~`S@epGkq@q;B1VJl#t%VYcr8CyuUZNiZt35IgMO! zZ&V)$Yf@g+Er1A*sewyUq)!V$Mn8R{&-F3d(qxAMQ$^|yyIpz|aoy2PZpGRu$cxZ3 zPbgLzu@=b4=+fvo_nNNRu zpjRB-wQgO1gT7!)TNe_Pu|zs|7FYFnuT2nTCzX1~ zdrlO9;7R`lKKI`gCI5<2;S5zPEP!D29uP&}Ttr5xBqbnfONCU;t?aJ()dz_E0)>40 z&^>xWb&$Rd;LsvsKV?Lae3uL-8#@!k^6SnUn6!=ChBK2J*sR;=4<}DtQDySWWgX@F zOAPy~ln7|t{o~o=f4PF1Hy((&sb+*CpubuJG-6{FsBk+ZcBuz2bwUV6d#Q9PEGS9% zD#f9Rxth$vwz|24GUun7xIL!FxRr)E*y%|c zjc_hRW~Uy}9(Mb_-R5q0b9 zE70avS4P5;RfL6W=}x^qap#UKY>LFD$h9`!xRzBnH-bo>){l8NWh^o2>pYLGBn`ZU zu`LapB;6QX!m&p9I-$x0UObhryuMVd8@-~97fHItOT&TtKEn)aF3EeIkh4G<*&s^| zyla14^cb^sp%Jy=4R2s>`zz@FFQ4$AsfVWCZC}&{vf)by@_vyzqv`8Te<@YmffCNNE50Wib`ufapVrGbIi06!K9@y;;3Wjx_AH%nl2So{zGf}gu|u3aU5+CIFg@UDM<^SD_MRr6 z&wqti+?()uTfUYR_1u?fgX)7t&lC4eN7~XB4b%Jjp8d2ySqvb?kak1D zd1z>p3y}wyRf8rp4G}8$v_9bk!!4{VYoi@$E?P~^R!Q$&V7ry_MfBRK_y>7MQY#_B zia-kFg%iZ1h|(J>Qk0K}TGw%rTQzS=`sydAzPh={8ru{nC}gmxayheF4bsoP`2BD~ zOvC`f_W)|V2BD<<6}X06qM3SQ_i$^P{O+giiX$0Wz0)0BzoQAJqI?BoZ_7j@sv(;L zg)l1|cL$Q~XZAUq5`dLdE>%XcR+t5H%TJ5v@E4^l$xtg#i$X6&@`!kXNNWH{hWZ{& zVVuRP;tL3L0P9Y1KE_767RTqp{mg!>_Lactsh>Ni6H?BzcWgcFQWkmfr96(f@XDM2 zGB6woMoWMckf;eaG3W@3E1!!tuD}KBQh0WJ#Z9t}L{XQ52T$ZWpM1Ah!^tfbpTt7> z*Kd}8>Oub3rvT&v!qrXYF8Kw-9l{C=z;;%37jF1JC8XLxFQnX^Gs*MFF+Wo!_X|{* z1tNP4*Qhdk+^!n;DS6RQi4IdOaZXyciy4)d#S4cM25F1`*8>&9LPOmrEvZsmZ|n}Q z(|1{xc;gI_dVAr-4z{UR22Nihbsty_I+xTG`fN@VI~Pu_>FIv_A4SDKg*DT^3TuZX zb{UG^JP->u6hA@M-!z$-Vz{3R}njJTAbhB}{M@dO{O`qOXX3jRpEJjTOzuh*wAvGy)1QHze}Eo5*H}5GV9DzdxtYN%(XealV3*9uKn)7K+jxgnm=hwN z?rEY@t8%(@Bx1^>rp(`^usPdpe!_TX$xA>=O|szD`aRRnJ3;D6!7lriWZD)5018O3cocf)f1U$w)`}9Rd}JRSAZ?N)heVf zhw1rK1zbI21L_&$SBL_NutnHTUbpAT#8li{ex{dCK`b{MD?*dNx&LUe>Hj040|u&f z>>TN90AxU0|0wi+V*h6=2sYzjbS=6KIgzy{`OnRn{_m~EVzgL?1p5Aw2xhf=IIxj% z5Q4?oR3#?4aGmIPMeDh?<1LC1gyT^;Zlruuw^!#coWI z?kQA#;DxkzV&nu4L^k_-OI_^{04rMEsPRz*twnJ7ocNd`v(gHeFBo&w|=a@4&jW@y#bRZb40;(S1&ZlZ*C??=hMcrpZ zMVq7MbR(X2o^g$@N*8{xz`gigPP;r(Bucx!yzzR0#%t6u(9Y#!9@Y?D>!56rD(g_iEGAff#bU+S`ybp zU+KTSV3yVj;3Bn6)IbllkWg5l<;~qO5N^YZG%-w9{4S!J83NHmI~Y!;w&YaigetN|qgj0u z5cfuE1Z}Jx%zZO4>GqkJsB^`SV-=ZdX($3e>aMncgc|>a`PP4xAN{lII2h1*rPU^w z0)*g!O>Fj+N}zqodSaasQ7>(|>CJX(UA>OSPGg770$Um4l~ z@u+Qmq1$&OgC5i8hUuoN_qy$azz=}L=zleN6%xa>FKIa?TM=~Hl!83I$j!}*ENxCM zbn~v;ZKQ*}bB`JfO{qfh3C6|MYmL7^EhMrM=`gOo03#yFLT5u6P9bXO6KAIg&w}Pz zzo1zWZhagJhweoO(c1*^_U&`|G=fFGY62mr#WXg<70<=G_1c zP-poFj*x3JgB^->6i*<>)?q$BmU};W>&!ae8XrOj231z+b?e2P9uN7Xr zmCAB16Ra~Rn+U%0?2VvEoD)fw7AY83Dz1r|GD+Omq#%6e_Ii_^ytLkCm4celuCypdS0b58%IB2H^&# z`6&j_#X@rI1Yi+1p7Y1 znRas5n#xblSO!Gl%^YIBskzrl8>dAJhf&dZF~cX*uPe&0%VvFgnlQ4lowujv^0aI8 zg>_acY4Y|FV?r};&D*w=C) zt&cUTXr?XlXNUKPk9``04@WmU90QsRz_yn}4_*8>P&T#t+k?2D_TA5pIlf5rh+Amg zRJJSdN>OYU(*FhG=5A}Bu&4msiGEs{0mcEh(3p%L{jX(kn055!gZpSUi2h%m?Ee`g zshLc1tD#MhXFgj&UC+2)`4tPa27YKi6>6we^Id@NiPUDat-CV&1M|neK2Leg?_8sQ zOP3V==M6$fPm*M%LOcODt%x>{T!eEVAVegB@h>zGZyG%^mbYio{`{V=afWGAc)?7x z?q2|e-wPx35g^xjR{jpKNnLU~u}jgm)3*%1#e3cXlyc${mncYYsp9?Oc&%W<@%6v= zEYz&#xQQ^Bs-FU&$`0t(8j+4=q~Xqrkp22`l^M8y0iocc4|SD-2`?R@R(Q6hmh~Q| zX?35Iwf7Yf4!bIxxF7M~8jw-%b}#yq3UYv#UeVYNJ=SrwDF+kukJo*)FghE3K3xCo z^X@;cN8gFSny|(|vCk=eg8}Epm+a>g4xUR1Qng-52h=U>9|1ydjCCzvZ65i8zgg}tNOt5*ioi9Z2uzc}k8Q*x z<~Z8>Vx}{FW<~T3&u$-B25^=Zq-bglC}&Mg$lvN6c0F;u<#h=B35*K3V?&q=p>cuP zuq#`69D=d5PWt)QnXf-X#Cq;aF6mIB!KVM*!UZv#>q_X1>a$7I%czK)6U`DZcLI{T zEhoB}v!MycDF5oid7lZ>M(t?qt(~~+tK>|Q6fUooZ>(k~4yMI0 z#u~BR^y=x9>*N&_^L&=Ad^OyvQ*^g$GByRtW)9K-3hJ;Y@PSWt5oo&vt=4&d5;Zt& z5vZDN*7tr(ne|R3U9_BjXEilbcpvXze5(H9&`03Q-6DYrOxU;up^Rd{%AkqU1v9Pg za8z^YJ0{Egc2yeFVS%5$CiilJ?#&*zd-Xr4(SI-7{Ck`2Us%@uV{WMpw;JICAbs$X}`634(hAN%VQOM<0M8%Q_u0q42Zk=N69#XBe#O z^*0#f?|0=9JI{Lszr~-SI)3}VkTg&&&vT)jQDI_R#w@km2hnn=G$V)P1r2)9cd# z!guMG{|Lkh8R7~TeRrbSnxx)Jjvr2(j?Th{MO~c^Ij8=8VYz#@6|}vqF2KcU3?B{x zxaOCa0`k<0!k`b@XAR2iYf>pth+>_AA8CgC4lWD*`oRWZ(2~3-uQBe{T#8P}OM!*J z@|pJ-zKBSX0+)U)lVkKYVcCXhTDPq&mGGt>N~_Bw6CRfdFoier+7mSf)E1-XV=7$m zE^?v_K@E@FrIowE61*D;pLuM|Gy-onPdPpH^ccrg&(SkIFSsQYL>j4nL+LBs;;IYk zapOE{P9|^?t)8G66%P}dBo;sGw`c6qjVj=rct^2n1g{oGO%Pkl(-XC_rKvCP7A8g| zJ#lgLXgz{MpTVK@V_i?v#SdS8V{h*x4&gr|vXl}UQhvTmG4ZRm2bTewvvTzM_uuIP zh7&NvEm?t=kqPA0Z3IwJ6?zlV3(+LS39==F9>h+}?cr zd~L>aDTS*F9?UQ{K-tt-cK4I|N)=|rWM z-%UUvGQCGEvi+Vfo>sI!EWRF5Y_0qtLuAMGs^ZaY>zWSz?)9e)nPb)ZMy|k|hhA;8I@cbeIT7Xe5 zvwSd|eRg@@U9fN6q27NhDdaZ86s-RJ7$AnMfYxQ23`GBnWt@(xJJSQ^IlRYLvz`{8*a5k;JzJb}&N#Zuj($hI_~N15R3hANR2Kfg+Exw*ddilZFdO-t}>Sid=$05pK9| zH^upkz5T@D+^2kBaAk&b%U1fd#QD3_Dci-71BzCsa#8f&b*!(h=iMtVTOFa$F9xS4 zrkwlAPiF!G1~ck>D5stf)Z{#1zTU3;v0=>DawF){r3*HTD=Uk}iNO3<$?fe9TI0KV zq6;9s_o?n*3_g^6M#fP9Zg{I6Ne-jmgFZ##9DIwDe(NR?bMOjpKSC`LpL$Y+QTjpn zMfX|uC}Vs1qSfX^sve8P`OF`Wy`5TjhfITy5v?Q*$G$prwdqzJZg>t$ur$KG0d!2k z_-JJ7@r|UiSF*uLnOh=zwX}~<=!rNwiF;povhhH6AASi2eDLg`PFp#pW%oIK((`k` zA_@7aWK#DPSXe`IO6GYcl+lLk55lOs{3V2nmuT!xYXdwohi5_P5cKrBogKLNJ?s+Y zq#kahZDJk=y;eY&HuRS|XF&5TcsL>AVyDz6hEeyaZrB}NniCCQVqfxk*=$EuS3Mbo z{{l5N@AQz6gEByi2n-j54{Gq`bik?5IleENZ{puPixXT<71-RnXC5^HO7nFvisGF~ zELkzmQ;{B4@6|Y;eNk-?SQ^jN?R;oK0jpaHfRqe{Goo|?xCr@-6ZO&#AaEn4qQSpF z-=6?=$ql|e9GYFOq~Wm7llONvY-%*7Fw50tZD=+swOP6V>QV1_cXdRRDqZbV3g}SS zxi_GsxCcoMz$e8i|dN8_{!0X^#5^qE;?=^XUCdL1diA$S+W`1bXW% zw&EG3ca95!y@a6LEdB*D1wiS2LCEimq;liCI4GgE1jMeGKe+(TDFdAhZ;PhTy8#AV zFVJJX2r|7P@P2=Resq&7B7T8#%_v|}%-)MiUsSm(mR-U?n%*KM0TC_lxTq7UL)P9z z>_0O-4aMQ+m1#88-tP+)#=D zJ>VPh5MtYA54Lz$yf;O$@!Qr`jQlc7McvE~8EztY?ki`gGfCH`&Ch;A9ioihnKRWj z8bJ*SV&BCR7v&@kk`S`N%)#Z*myh;tF|kGY#}P+1C$!NECGRsA7iAA*YXnu|paXsE zrk){x$hreiTM0PJ&Uso0@C8o8ZekT`D_m4y_czD+>?5DB^x(_I`o&@lAgMh{PdUbb_}NRzp{7s+gFYc_K!BN(c@5ZC90Qy+kR023=J~zQ2-hr z(1@*z(BPT**|W%eU)fjcTKQJTBEKxGKIA?kq2o0m^%mK%HhXfiY^GKrBCt-a1g}bN zps16a$+=C+*wt>ns5t3_qE6UIS#8$GuP@8Q(qvn3d*>*;WiA-Z9Twf9Rxuwq(v<4? zUzHwIgjL|tfU`QabPT=8R)|>3I*`q2SCJrzZ$yV8e&}Kk$zf%X{aX{6Qj_6I5)6MT zb4mBXyUA=o8CnSVrl)NndXO1wKEcRjqNR;qKTtaXb0kK73${Xjd)xrV{sEcLdt(Re zLqAm%n_{cxRLCo$t35GZO3L)SpcA_E77y%=Cpmc^@1dE#QJN_mB(y|lEaNdiK(G>Q89?Ukf*lNQ_&GEsAeUMIFjEJ+4 z^EtP5!UNk#Ipm#yt0wK*Y+koxt^uzt+>|XDpgeISzHSDnQrcCbDXCVbWl)0TS`OeT zW18X}`FEPIR7Z@!RrP?^n++v$Z9?{yOGerdr{=P|*F9Q)fsRD8flg?JHxRvsoi_S& zI(ltZt?vu}Hzfl^5-yJTX1cRvn)4v z<}*(5sBdh_d>kvc-8YjBTE7DVEP>R3xt~TPI>G>ICcqLi-@z2DZ&Ih7x|w2W=q6f$ znhS{d()>{H&PLQHbuMtF+&`3Bp zIq%@ji3d1D6pt=*LxNk4gsnmC@NC!JOtHGFPWdLCRB~hBa8{V{OI9 zicqOCH@A0YZ^O>qE#f;DQe2!1$_4mycWJZ!*IS5x7x(*r!d(CC#qrnI1ICAT-$Xc~ z2^SMH7B95ajKHp;rfj&?B%&Y0eQ8Mk98uXnj}B}jX>}>p9i`WARKo)l4HiXvH@B> z&Cxu*rGxEbJF@b63deq(x(3kkZUfeNgbR-Ckhpn!YKt}W4 zNa?s%Jm2eC2yY)K*l{9RlL=Vd-pyCB8mUX41*`jXc~+1zZeHOs)W^i4mjA=vdq*`D zw)=u8DuM{oI{^Vvs`O3}q=^ve0s<-`O?rorC`geiU5Z5MJ<>aoF1>f@y%TC6#k)P{ z-gV~AI&021=eu*)oHg^0ELlKycHaHI&+}`~@48jLJ2&TpeP6Bh)AaS17s^yoS{bWV z2LJ+ep&ev=h2>S)@E@Wp*<;qpPbVC+eoXG(N#oyz?fZ0dU!8jsb{rJ-OLIkT2+#IF zh3@)&u_LnDbmLpE=U5E_=8NhHw#}#!F9%3MqsVfA^HOJxVtnCDpWuXfO;RXJENUZ@ zJ*AV9^3!yo{udn!+M{?jATsR+ z#{b!{2P(>RZZ4^GviIyO#mzS+->a{L9cOcK3@~Zj^bHHDCB4D5)L)@4x3sxg-%PMT zAaXAOXW?@18tbHFxTpR(r^lG%S(>qqWlxpv!g3Aoc-3*&?gu4C3yDMHP#pZ5_R=7= zDN{)f=SnQawBs64aFQf}fl2;^Bax$k;~=!%pmogfK@%K&&(~x?J81AnT+51;Pm}%* z!-#GGGtO{STN2T2aUCBHOz%Ly{X_I@Y#&)A#&Jy11pW1gsP45Ihv747y(~LDik}ZD zTBRO^kPI-7o=Yf()@y<;IZ2U7U5 zm=#jFg%dth%mt2jPn#WZ}?_ z41}=DrUvU+kDVOV50tQ*C|Kq7em_-k&4|4q!CnW(2T%F15SHPWj`c`)CL9wKuxGn1 zoaf_!suk9391@@#v5dHe5$lrtJmoXlDk~f&=(u|PJ;>lk_`dXN3rpe~qbol&er;SE zS91#~^SbM?q-$yi);|t9TLYp+r4C<@^e)5O?wmC&ll+BmQTvCt*SSh_PJ zdO1HvC4Md)=~=Qmb@`HP{5xY~a9x^lX`}$+)YSm=cM{D44$V>+>rx+*cB#qaBMjgA zw1ATk({uG#F43IT`sZO$CF`BbE~R0$24pg2Cu8@r>%UHG`x@jf30GWzUKx{aBa%Ti z410F@I3qOMK`ZL2J(}{W9m5sg+KM!niT!7RZ){k_Z>~@k#uXbp(SjZHo<00xt?k|m z|8>#W#k`|A5_E11pasMP)-%X7h_o{|2XF+jz1~~5*n~P4o{kE9tEi~>#8Fcd zD8l-z{XJhM-y+|8A|i4k5et9EhKeaoo;MH;n;2*SK8e6U7-H5Q#Bnnln0kM@UKifh zE?Ks`K8Ja*PNY;D>DZNor$fAoibglFKHhAS+;w~H;(_L$HwySeR5F+W2oiusE^Rrv z6%kg0x&!rJF2KppJRP*Mw#%GT72SF0bba2fr){^C)-fva?OZ6nS1q`{OuE(}RKO6J z7UZb3oKO_2SZD9@*O8-eFYK)8L8gUGJDOe8kY7M_MiwPG zxrtbZVg(Y<)CcaSmt#kmbWCKAm3rK|5B~n6r>82D)9G9AfYv%$Jj5CIQ1c!pdFlR& z^8V#sVBv>2&Dy6|Uib!Sks6}b{<2YgO+GwxFq>_~?4K5z7&xb82h@!J?I~(+0e_G8 z!1qZrH3;0FU;**FFzGv`)sj_MNc6e;`fC5tk?WxIAOqHng#Pfij=FNpi=z!CXft%6PET_H2E;>RIGDgE z1fsf70JUjF41O4an*E4Y*OqGEP#JTLqvEO7bX>s4*752Em|ypMLHI-Tpy{;i5&?jC zGZ^4Oxy5_tRdG0uA51C4(7R{$Fs`Levg}#1C2)#RUM@Nebg0C6@4wPDK^RSnTovPc z;Qg{Ni#z^3AmkIqtmxI_0VAuU=5j@n=83t!F_xzxi&Zsqxp%rfLfI@=MmEO?{~mSgCQUB-;wt>s8V#b8m=2Mai;@uW^Fhd ztRtaU@hU1n*aFj$-RsR{?Q`-gZ_Mk5G++O_IKIz5EqAM<*|V!Oa!5r{A5l(-Y4uL; zg4``QV`Z;dimXt9vuj>%fP8P*h&-^{q(uIqDR|z)s*5`JiT?mjo0nS#4^fM z@7I*|_&A?o@Nqa@XsTm!2bN!RRa(E``J9RBsc1iSCn|z0FQl-ZGEU>XL9SO@ZG;7{ zxXqI0@R7PrT!>xfPGpD?>m7AvezvfC-zYmcfh9u)Ix~fz%UIOlYA;}+>Qfh>s;brW zC16Fu#DL9KqtUp?C*}-^zL%U|hm{Hy6cF!a{KfU=-n0I5akb6U4Lfp;ahHEO)qbj$yMDQpAXd?H!^E4W%02kS6$?+(xB~tP zCe7}QJONz@O`E(Ox7!0_bpZVvyW_rg2IS2&sOZIp0N> z30pvvE24a==@E>Mf@|*YohxT&HGm*Rp5SLN0lp08!c9d@_JeE(X7w=f*#MhIv15s^_HVx))#-~{yZT^dF)(gjuP+%2#2`S}+;E}tJUGk9BV(o(|`xM$0mdT>&hSs)Y&yG?}?M-QYd(~o@A z_7V6Ue>2{zb(*-e3s<7oemy=TL3Lm=)CKkI}4Bmo! zJ+4s~LD;ku1~bPShcid`Gs6MhEP!v*+>Z&r>fYNB135uRL%}InH`E zJ+dDrcd6Zhwu{25o( zgmmRQ6(i!G81ShWedx$KMiR*4ZsPM2Vkg_Y82=D8ub1xf9`@Vq!g8Si+!?327=vXH z2lo8v#x2~_j(yMZyZ-^7_m6PyBz>St_};-GauGh$D7Dnhg$|8ao?|Mj_!5vhpo7?= ziG2TP!0KtU8bu$_yokWWVMA~_n4$xG7DNTrS6hguDz#gaGt0kG->Yiw3|escU)4eWSM0I>=Z^ay{vWO%IBXg) zswWF{P-zU}rKwz6gjvZwllYLx*94uM<09ghe$1bHJ_L< zs^;Deq8GsW*CCL>0Dgk38ElJ~0h%jy)^jQjhbGY#Doc|Mf}d)fYkbtU3F&6%zosCs z!S3SZh6voY!rAsktH0HliVt+%d5S}8PxU|4xrvgzzQQK`;^|?dByH!vSxjVbgfg zc}e#Pn-2g2NBT^jb>h?HiSwIdOK|$5rNVk#4??xV9hHdd60$U%Vwg3^ShF6!YT>v_ zx*0-WJU*uLj__rwpYA z>K@=uyx&y(+{wgyxr4pGd_xkFE`A}vD}PJ_Igzty{^fmN%FdwMT^K~M2$k~ zZNtN7ZiT6vzSB9X7rhzDyi@b6cLjz zPJWLrT%s@>Lmw5Al^E;pfd(&e?_ime(E%L)#%-|6V6EWYMK}WhUpM_j1i0dui7;3K zDZ?Kk?t?BM^^+MuPlj*&58K`GzuN9#ZkTg(AR8?5bkA=;_L=E&X! z;1s0J>OQa#$o0yP<>?MFNKv{cNq*%_ChK-o5+xOc>h=rnY-DxxN`b0T9-s5uRP4Y! zDleu_`LCCQc>QlK04`NH0=~1O1<)6p4gle2@IsMm9W*7j6oBdkCKIec+5kzJlyO6t ziQGvzn&VjTH<_;X{hSyes-g=BK_k0Y!}l6?6vSS7c>t(aWcLL6HF3% z(kIENP)#M(1wY8 zZI;Y*!mb>`UQTJc-7#o*&0Kv54Z`q(a96Ev@IZLHH|<|dm? z2N0qFKGd87b{WbS+Y8qkee>;^6N?@1+Q}|=q&2#i%Y}{8v7TIsx8t@>aDOb4?3H?5 zzixfuhw+{n5P6KE79fz|RGS-|Nw?1zrd?;}S2Z*3$*~Mh`+C429PTh5J@3dnvq&mc z9eMO`_o(sV%V7(<4;P65cO{B1sG3F64q|07yaq{wnxmOfI4uFnDx^)UN=X?P!6Z{O ziRmKchkEzm_m7z+drVc*r`E#Lc?8lZ-gf8fTLdtscewAMzDJ!jv9Sd;8|$eX&s|A2~EP9 z5X({fB#E%F2f5Ml7U=%^VkNXEPKI<}bPiJ#MR!G3eE}`gozBgdWzMK)$tM@8{?cGd z)O!tk>oL%3w%!`K8t70hqHD30sre3eiD4i-j-iB4@r?p9ffAqcx%9pZegc^o)_4be z1FB$kh+LY_K2zjFOEkQgTX^g$gveWq7s)8%jw~*0OT4-ID*VTG>##m_WSZgTf)TU@ z%Li{!z-eKXe0&vop^woDgP~BV0G;}E@*yU zqLpyrpN(Hm=Ys~2q2OfEP& zxvKUN{TSIfI_WucP|5f@BPd*jpm!I%Tlt4*->u2{!u1GG5mR;qc5vvHX$2l}+z;TI z@}M-6;^0pcaF$|0Zp34>V5Ow-DEFHPv#C3GKT#^NbEHAsS)$t%qp34pS9hJYx2(qg z5WR`P+&}+N+Q^>`mBAeNU9y+uK4Ob%Zu~W9W1zouk#245v3A_Sbk zSukl!lkI5;28=AexGIf5ZFA2_*X^*fo115Jxb;^DX|DX7q#ChO?ZVv~(PCSNJCq<*ny(KN#%$ z!f`alD_vnY5_In15_?wu0D%epDT%WD$!bo{WtP0#H?T)@8_Sib*|GFJ#!))$C#-YN zw$-*bc7lCFNIETREO-6Ua%VBemdatwcY5ULH-bXw+rIT~4Tht-FKjKth z7a?NBy10rcEyUyX#6Ko@%LtRgTezCpXQx#Gr^aLloXT>Zt`9XWIb^EFG7KG0_^hBK zwkd1a%OYGJvfUgy2=rM}yHtSd2jfH;)=Y+$m`5$SyRzkMTPPQKzCQRI-S?iWv+wE# znTlv6>#Cy)&7LLy?|ev_T7V_+pmtyumO-BuNUIRn0Pa0acTAqr^P1$jSe+entEW4e z82j@qRnb7m+?1Sqe?;zg{?cp%BX7k(d=9`|{BC{#fV;^+5wt~K6)e20K2F0nj2S$H zoCTq~TG)s?Q+eA)1)Gs8W@hHHa+v2A6ZUZ;Z^MY(xk&Ek74CDpUH2P*P#7iSA>|5j z-$n!>4xcR0U>Z~+I>5|0U-u>KoGNS>A8+kcG1rAk|NV5Y>dorf)fx*k8M=P?*AbOe z6gkz3jras$!G0wzzl8q^y;Y=T<#4+kvixZ=pj>Ap-={>1!tk24c^BF@Jnwx=y-R{& zQ`QB8j)F-Ktass1nbjF{jet-{&P5!Jk%Wd^OxEBL7l?wDSy zsf1S65~Q-RlS7GMTG5 z2YmcL+cqtp$Kb9#BPj-My@LF6f)M^Q3^ex?n>XrMDxPw~z0&MOFD(Ft7u%q}QD*(~ zT8EoXdxFTlw`EN=(fLjRRqG?$g|nM1wI0Da4fOvQ0_VmCpxViMajmEWlJ1R z(~i9mo$5XQIlZs^&H>-xBtE9>5y^>mLn%!%7laID;&o5?GR%eL7iK+#o1_!xI=PH!HoRK(L))y>;qaq z+;_i;TOdO(*_?!>`AJK$4F&(DysiTb?_dPYG7t;Wjs^uf=hRe>Sx1BKv9wHf*Vpj$ z2{-gn_2v9vIUwdz^Gkg4eCnv%B%M0TQUFmI)Cez`+7EFr%6w%lvF` z3p9IyBU@`>Pb|d2|1Jrc202}b61R;sZ;8=eAx0 zbPoxT(se5fm&bs(dL;jEw`7~{8;S+WuhP5?#BMwmVz*GC^L^QD4sb8ZZccu$`eejX z71VQ-s;t>l?Y{2sfz+o7^&HWh=?V($6yS=tKUe$=D0lNQ4ur6EK!z6-222DPcZ1vX zUc{(i--5d_Ulal2Brtz_)b9M5kN)C#P7eBKRsEP}Qr#!mtKJq?k))p0mrovQ$Vj*3 zuk{d4!jjzf_S-D=H~0gDS2p&t(5Bkjj-^#idg~gFd5t3Ceoj&EyuXZ3T?Gc1nmiX4 z+GYK~9qH{kpF~eTdu)@D2n{d;n#$`0%SruHn?>)I14`&EjAIT??hsq@vqE2R5B=eF z9AWPw&ihsdgzQ@qU)ZPG;2VwhxI%IZ+7BGK+|gWTP)vd2A!PaN16uZ2 zE!i+s4QsTckxM0>;aLn!ki16D^k{3A?u2d&YwC2X06k^eOKE6e575+HyMq|Nm=M%v zv^^Xn4ICs<#w2G@)R$AB(6PcHK1Ka~^bHQUD;h%E7OuYv+{*H-tZkZnT9&o4X2+>P z`-9|Keysbb%Z@?${t@oxzuXZ(tMDE=y6g;a+2Crac7A^oM`WXRVS9NRn;7K1s$K4E zW0os%aQMT1W~Lm+_M2_Lrc8Gt?Os_;vqLv5L3*>zbEwjdZHC>BLrKc%^CiL158Mrb z$A(FE5^S%xe|N7z{5hV@5~l}ijNEUz;(hZx(dadQSG&>K==x>=J0<0*=@T};OT=HA z%iy#`=QhLAVCd@x&rzgPmE3ID4ashCQm0s%gG94WfZP%~`SwNPDxOkKCjJT47IM=M za7k;aHI4vd4E>9CJ~vw+m$iClq3}V8ked|nMHdC@81EuyuB-iw>gJ5H^8e(?|LyqBL-W8Vg{0eFg^?LVtj4RSjV}?E z$pSeJP2X=pOAnkTC+p5CF?W;s8Vzx4{J6cmxo_)0Xqt^0c7f6wd`=7C3M;3{I5jdv zKNxovagj{O9ws~oWb=Al&eP6k&II95J^13pIgsu=0(&@S)&s{}2Om?O-itW`prxyy z0qa7A)b5J4JKb;a?Q>1y;;`~<1~*P?nJcmDd~MMxS8px`@{=`-^ta?Z*uzy$E`OaY zoEl-J($rLZjz5Bb*t~Na{B-oyu!jQQpw_745ocvZ@as=kWHN8CCdBwDeAkr&du|lvr-}=mD{L(~Nb@Xyy>BzYAiA`muv9Cj_zJxA^ zFMo#NZ3Bpkb_eZ`4Pqcztu%Xet7Fig{}Pd))1xzc%ar=Be76sAs0X?bx1pV^_&(d_6-k0xk0Z)_38Q<2#K1uJ?p8Jgh4y-^|KwmVE5hK*vBQBk5k?H{ar1Y^Wf>Fga0;HYFoX%Sodd6N4!E~tC_-jx@JhG9Qz;kuCx9G*rWF#&$ z^=3}Jo;tMWQ|v*%-}nL#ua)l>&Dl(2d^#{iI*mXkv)Bs;LRwAt=92;eqnVvusiV%- z-n$90p$(e1>dvh!O6gyWjSqx7p}}U&G(H({W}LvGb7X1s+sQ_7p1m6!rBJQ`HcFrVNVMwU)~iU`F6GGE&i*ZELWB- zqRzF8Rkv98ZYBp=Vb&F;O`=`?E}770B+Jfw_$4g}DQrSfrh0TyqvZ5KNtK}?BYv%; z(d|js@18f{D8DH?cRlsTY|F4-pZ^I^@ztCAB8HAQ+e6C}`0>T^zOKGv7qS5-rZ1i- zI`hf-SVab3i!Lsm8Z?sFUK%gp$^?;~Awq%Aw*Xwh8zRmo)#e9&-^@8XWoPtx+O$2@ z@x4K~_|0&lhMzyL*0ED(c{?&;WyDkn?7dhy_)sF61vq%lz_?`fv2AbVvv;^9%rN@a zBXmrcFg5NO#@8_*XUaZx)IpNelh*Zx3jf2{lq5-OlD^+QRy*cn%PZHJSa_`8!p==z zMD$7v{rs81^O;inCuqk)qz5KId7%NEt>pkvRM&HgTI=dPkyY&ag)c#JZ~TQq<=QRS zhM;Dd78r;2OI2XluDuAOQ1N~ywW=ul_k=_E_m`!BBc`H+GX1=e3pQ{iggb8q2XqZ}N?=s{@3J+QG znb^gQ;}+8lQ$i*6q1f6lQMvrm96rNIcdbsFdCM~Ay)y)xrw@z)`fPMhe1Wl>rYU&u z+q6UBHS@kS(t9%1fm2LISITOxKfJ6`s+>t#B8?co`58^gL z(WcIbY%z|d04)p@ZBbd3btDV=37Y~G^6540pl~<^jCPB^1gDP9jbGZ4AqANAHQn5o zqDJD_5lNYuZwUMRR(-Ep0qTv%f+h?iKz1vi{pCuF2 zS;Eu54v{nd1TxSec*XnY0WDPM5U^g$JKQGJ+^(P1&NnqxCwEhIrTtl^75WZ#@I#`C zReY;ov(n(@Xf3>jBPFB^t?K6DSQ)6^yn{q%u~aG}RrY`}2>=z;yNtkp@>?K$?tS5b z`UEntVH>_M2JntejCT%H1)o=GRWenwmKRdIFL6v0`8|pH(=94OUwP-soT}gR0_jn& zZAaZ{P({@m>yi>Eq*`2vj3Hv5LYl+& z6DJ>gl-z|a&&o5qATG?6t7K*Z~*cW80n)o+6p*@ zyu-Q6KD`b#t0^D%y8n|}^J;PE_K4bUz|>?>aN<^0X4*MqQaA`m-P4q(0x}5K(*VQ3 z@MH@3(-(z-`s_#lcF_O3G&{@~s-Maa$MDmCb#>)arL_<^oNWLUQo7b}HMviSy#EhP z{$|XK3W3Zz)Aq(eE4bpcQMUP8PG)kuvF`c+%}|=ppcvHv+mH0C*WOP)ve|HsggPt_ zhADE?p&?O?Dgh<-lQyDr7Ej)|$=x3$%PNl~k|rV}>Hzp!IZzPlwkIS47$Jj`Ib6_~ zuW(52cIGY=TUAoD%jwEgD+LHNzyPo~c%p`|(~*#zs3JS zp$WS%R*5hHCk5MRa8;vWSqg*(PpYI@)zQQ^^&XpSN6PJhS&rtj4#m*a-S>)&XmuPd z|H_LaULTA^-I9O@ zX2>>SfEhHFkH=2Ud9BO`v-<^pWN9)>hgHUY|6TYj#%vC$Pfg=Mn+|mB&6yYR1(L_XXO2G7x2&}x%trwVAhw3fOSJ4tZ{e_z| zFI>zM1GeL+pLdr0zPI;2`lhZDdxR3ZKGwkJ2)l&bJzt4outV6HO?fPonf5xmyiA&R zD4?E^8UFdpcIFjFC+YNiH8;t&7K0+jiZmwO;qMPh7cxSDFk5ZZ$9l_X7{C3z;^e#C z13`Hzhilb829vDEoY)#a1=8`giifzzJu(&%jf_O*MCUJEryE1NrIO_s%LE|$@L(kC z<-@}&NNtM|XWyA&B|U5{EFoTx`r=F6*`O#x|cVx8Cj?HF%$XO<mlW2Le}ZC$Y=L}_E4h|-XT~-fXo|EFJM>&m zqbS{tbqU+m-}1l#K6ziZj=f*hfmBNO-yW z`$k@X%K3Ph_W|dc1GlqHO`dHYci8BqOj(;6P3nYdl0^L&(&?1|g_2KBN>*%uVM5UFXK2sVnujaNuQKMq)f%@-#0RKpDYGd;Eiu*fHO_W|75*hOW0fDo_0caWg=kVjqg4?D-cxe<@_l27FZuEp0V^K`(N$C=rbeP5<*is`nro1qr9VFR)URf;+#E9R zmd@=`Yg=1lit5e8h83+-K68+!OV4|iE|~R$FC@SlFkeIE&$`ppr;sc)73tY*E%H|rz6uVoQT!N_iSWO zo)%pEtC$Ab4K!9j9TTsLcE$(T!7t5kd%S{&M})TfQsSt^2%3`s#~ z=q(q6c`(^3jDa*?&5J(Su@o~nWSeU0DSa{3Uo98R{_{eVX6mv1!}v9Xu&ch8mpl+z zTY^=$W2eNZqiI^2MN#|?i~c4!leam|Ugfh9VMG2;%09~`)s~1Dq-Uq@eA`wx%jy71 z*+G(Rz(J4L3xsj{R0~(O)NkX0Z~~1~!66{3zB_uO zW(f~bb4osQH$`BW=?GP8c{c9E=$Nx>6L$}j_W~BboF-sJQvLUYRKGadZX`|E4!ih? zd@fu;N+XS_>k8yp2A?CU68~z|K{4P%eI%914x&nMFSxjH@4Iahv&WQ7`$u^U?%Iw; zE|qD@Fk}g=-+bLR*@79vbBJxL8BVAh!3pM=4CLj<$!Uy-uHW_@XIo!L3Hpum#R$5B z+6HH>+J@|RR-?qa^0X1YM2}7PzVUq8q^*XGG(E{UWpL_Jnc&?!^(A=-jJ*Y9U_nMT z088&+N!#Ysf~wdP-%;nvMcc8&Vw}6im`_~_$726p!%KOfnz*YTirS^K13p}%#yY1y zCQJd=YRjq(UX|A_Eg>f?DTYkD;CRcw-XWX4_=fd;FUP8Tzuy?!*57vi@>L$Z1q^?U zH-l7-djJj4RR^;vy=7AUcG(o;s4>wWcQ-yiy3u(4sjw2YYBhMb3ny1|29z6_%J9Q) zzFYhy6 z{lS4V{J*xP|E+LsFrjTT3=!gHg8-wVQ8_N&q|9}F{Og#qMy|@Z3FJnqlhCF=C=@{j z1v_FDVdFE=F-t6Urn4;sf$#vCB;_|6ac`2Odu@0UHI)572e?`^T2|(HkF(vH|Afu6 zkm-Z*5jqx*{MmsL>&L)%pxv6J+c75GlI1-_R2lxuDhSV@{X>-S(G)&y3dhS<`Hc`r z{z3{ZSeBIo1`{9RpYwOo6Im>p4oh=<2ZkxMM_bplId%7y7Xuu6B7L(nUnb7Qbi|O_ z{viVXj+67uJr*|a_D5Cbz|4gDhBWs_xzE*;ti$CG!}9DoxCGjG1^O*2(_KzZWZAyO zj6kjd6`d5^8cu4WLSM9qJ)pHv%L`R1!}%*Ne1N`sw{ac_Ud}@Zz?KrH-%M>JvU8k+VQri-1yS=*w@EttlYn2 z#DV7K5&W0Kawy(!3v|ggZWlGfW4Ia6g4F_WwK^!EB`gy*1E>%thv;N9*LLJeJy-_3 z7!N;k%>cHWGQQ(KL|VlTC?=f5wu7V3ab)u{{ zSIjkvBq1;BWRT|AF^^r3rq{5A$!mKOQfE90-)WCE_-z#12d^s(WNv*~z)0d#(Eq~~`2VZhga1!moR~9^wu-2d z{Js+Q{+0gZf9RSCII zoVO5ZLwjW$$_qTH=}@!7xA`-?T_B2%IqiqDxrhSO^!IJA3)`_uV9v)f^0ll$ zZazaxOf>2?Oab!}leIKZE3^`vC@ipUDYW0CcI{)T{MWjcwyTEKUyEIAiHH74{`?|_ zK&D*?e+%numN#@Px2R6BM;r<&{|1C<&4&5Jfc`D+@_8jby=qSNGYJaqIeMeZK+DXK z`0(W)qOU{&W&gF&jDM?p|3O&dpZo<@aF@1#1N>Su$7Wwz$9Zs-**m|3%2;>_5fm`t zUb7h|Z)V5jWhMcfm)YFCP0GvdWPSuL@ILX$KPXVS7(E-r7Ye{_gs(;jlKJqWJDXVU z#lHI~U9A)4&6gpGR3!<8JydQcQdGnz!(T5j!ba*pV;0b(FUHhEJ+(sW+28d!pLX63 z>=+bL*62KFRBFA>=yK&!cM%fGPK>e25)LJRFp#H1?--cEdx%1m!QpDy5h?6b>* zasU1vF5?_-dB+1DY)OH8gr+g)+V#8rG5K9ubxJz9`W^7RH($_PFcFNxr>K1f6^YXZ z)T!+Z_g3^LkoQmx$=H#ewW$wNZ_+8+!ijqSdP~0J)BX^>1pX#VbMLYxO?ETo;m6v~ z7IQ#Ij4#OY#D{|PniaJo0|`Hh@f+nRDhNyjqsLtzg8v|}`8e^b$0{|9-D|I@`RJXb zC+fR;zm@!J;eFy^;4z{k!`GfPsT<=HalUpr)7qZmE+%v0uAFc0wE15vuOr6jiF;d3 z8)S@+ezY@+0=&}{0v$51vXhh7V^V+|fO(FQl15cDWoyL?ssejSQ?KvN^qriGHNGQ8 zbCKi{V_er(_gAE)cluuMF|m7TbLU8(df~uJgF}}`gFb&>zGrYcw7ni41z!9nNe~&K zH!Pd<{6j>w?UiXbv;1Js`HglPFNp(r;)k|BJo*rZhrE`h0f}A zfWUPJ@Ed}9MB1y;rbZZid?grBF5cyz;`dS*sY( zqseb0bM`2t3NV6?*SIr4ZmFxI2z7O*^z#-$LCgQX&C!v*?x(cf9y6C#@Kq4V7H$GDCA(6s#jd&Cj;384bvwZ3H=5d>=3_y z{rY4*QN@s&B!0dw2k4SX;_5cU45{hiPu78-(q15$lAkQemsB)j?gAZ zS2EmQuQ$Z(<}LNBh$5zJlKNj&8_OI0iWZ#`Y;!TijX+U;w-bO?EEqtSf<-A#%3xO1 z|H&@TKL+^a)5b868RnyC_oG{9M;Ov&)O$-dZVYPiq&`#$5J%zb^6Vwi>2~#ql=Vh| zIl9K5Z(^jc>)h~?@bmEt&z7-RKTH;_P^`hw0N8>6AYFGRpPq%9j-!@tqaTYDdk31? zS#+)|$;2_;k?c_L>JH3ar0;}CJQY~R&*@U6nK8o|{`+3A@d2vf&imeT0*wq~=Q@*< zlk>HzF}W#cYL{*J?ENWeT~mk~?Zb-r^GcyZug-(VA5sSyDc2vXh~aBFS`w& z&EOy7R#~l2k-{fJltw_Y&p?S%cOHHz?Wr)}v_2A#*HzzR1h5$ZoS|D|-B^{v(Rwc` zRvp%?ICgbt(*wclaw}#rH65Nr9H!eZiRIt5?Kk`od$1yx26CCq3(n7@Fs_%%<1Yf~ z!v+9V1^i;-3<4D?$H%a1IP;>v@EStucSN59N z84iQl-?;CX&=O2r$2?yXukyi$9;z+ZD5o;U8CB)miTh8)pk1OOnrn-%R^mCN~qYOd1$L;RMKVWnbYTH zo%mAIPA7873!ax5BFBl4`p6H_b48Wcf7>@R^&cy_2cN1>Fu)I=A*OGP%sEkZjHS8s zWXno^8-Qi~T0wWX2bdyX%W%79fPjZ89x9C9=s=M}1u>qBd(H-sJNB`5gN5_;CLMR& zV??D1-tjf$T!lU+YC?KeXO8?8JKO7GVA@bz_G9=ofNT6-^IJGtS-WHdS*^k*CjTYL z?FyO~z@GCI=`aCk?+(OmMNX*1SXTMreuaCkYN4H!om@}ZlV@@*9NJByUcdYfr8i1m zv1aDb+wJa`#ykRqR*P8+Rvyd=9)d->1JP~HzAFMXFAYqgRzzwp|)qmgINFg>X9 zjV?^1`ieRAq3O?LQ~El4Ans+{qyQQci#`F?Paqo-+J`bNlQ!Cw-dDXYr3 z`SsUgx2wP_I)x_(&Z-g*TgT!jl?-OV_nYry5S^BH9`z^>M4j%s1iB z`;ef0_X%;^8O}i0wH28gP4{`|dg@vD1*xzNhj4->ZyZ?SYo$VMf=PbPQ(weUC3IFYHi{QJbK8NlS>`&T>D@)v9`qe_uE zbDEC6RSrCqv(wv#oI#r)REojlDhMGH4Y*;X-OS@OvrnK$(f+ zC*`Q{drMY%eykEsYLvj?^D@lT^0&R@Z4O@nVOmO2dM+knj-r=NMn*peh)A;jt;YLL zG=or}qQ7ohpR{lW$+$m2ztJx10ol*$gRcj^|69JiGRZ;ire*77-8e8F%XN0UH;w!J zjAY`UkVyO&mmB;iZZYi@#4?Ncn8$4hL?@p^D*{u{1KU{xI8sF2TWaSmv)Cv!)? z?fg7j*mGs9E%AkLSt7hP&|3b7#*Uj#`+LQL_nMo&a@JHwlk9<^4fKqXdd? zrW$nuJA-DQ9205O3O*E8Z)i`>c1ZoD zwnZIRJq#-P#2WPLXa#t-j-%hp0ak)*uzOHf^v%PAwixmk_IlqfD}Tq9PYgrQ+7B+C zxwt5zTV_a?--ET`Zvk05FRBe3v|x>viU|h#a1-`vx{V0@)$T5EU2rp&ctlgw=epr- zq5!LZg?z3HI;)M~)Uzxx{$>?gO>zcRNj>{>$JMsn%F&^$i$zpjCE~x$hjQq|R{#>6 zW(9s7XMhhS$Uona{cr5OcU+WNwl!P`iUcVn=R!frAd*Bvfq^I>29T_Rk^}_F6jcPt zNku?Gf&v0ckSsxpj0lo*DuO7PDq=we-{Z{f>7M(}-0ALl=eu+7*MI7-63;njpS9Os zdu{tz&39>L1=Lp)p{IwhmEJV$9gbE=Iu?74>NMLYd3raPGjph9YI^6M>C>h8!G0^D zNt9W77VH4rp(q5#C!0c%=~$6$6;Wv2(iSEqdEG_e`Ss^R;&ZFQtej1)(DtAPZc*ho zBR;*p1xo4ap{;$**|xUh`xDzAG!6T=VpP8w4Hgxw+;Dfow{P#8d}+l?nPOqSyB?%?9Nt(pzeK#(BrKv z%LtM=nNyDP?C*Rc$=7fC6a*Uzd7>XR)v*5Fw_IC; zS}R?J83V*%61d2|tSBG(4snH(T#5j#;&46HhV zjUUxQ$5_ARG7*puO`a;6JKChWt*Wvbco+;;UQvmzv|BC1Fq_}Wtcrg0CaGvp@QUX8 z`23yrM{h-Zq?@NF_ZFp+Knt+=XS2~^-!0TPGRkLHH6_n}tSjB=tv&nB@Q_0BiI(!u zWpBl=UZ`hv_Yge&NS%s@(Xq=u=dPc2o>)PYY^V>q6*k5`qfr0h%p_3_cZqGch&{zA zYy7q&A>(|)=wU_LRs}VTvBcl#R5A642Ua%Wg3g1qqnS=6#YtWXY9Tq5l^Kf2Y=f0=o z-Aa~B&Kyp$*WQ*z1Xdz!V4KE8LbCQ^cGJnpio~SiGQ$})rAxgv8|p&hK0+KEluIJ6 zvDW&urw!xR`q9&UL?{D7CHQGoykk3jQ;&Qyy$Lg_JM*o{g=3K{-?LqGwA=snUfne> zFGg9tuNQC?)9>=&Voy$Kp4m56g+~u(1X9RJ+KB}kN*y7HZq`Wuy9RK z1t0t=>iTBgQ|sL#vX;Xr-pGVaa9~YW>7ZfRh5_+Fg818@VmSegw9}bSlXW#~dQlxO zJJ?hEqh92;b{GR9!P!dnNI%pps)(?N!YA*C&3Nj6hs>{9)xn5BogmUGRr+HJ^oCm6 zk-mh%=t@Znsyx@=mHRUK=^>-yC$mKco*38)kM1d5RDz#OzZ+zJHRh?Qj#yXswx@i- z>P&fv#OfRk7rkig;O>YrFMZv^`|oe$Hj1`OEgS7RElm|&{kCkB7oNK`=oLTt#Mto1 z4mb$8kLFf|dfU!*H>GOM#6_)_E9psHeL{00MKAHp8cU1lryA1>9-KCic>}7gWn+Ek z7gdk?ORKs-@?HuHK;PA4Hts(_Z$%3? zc(UfJmdl?DeMa%ZZyE%p!^r&Rz@o;oV75TYsHP%xYTn)Nt=+2^%{4|EOyeJ0b`F}a zwI20e(%-_#TPvLPGrO-*U_RHRO{#whw*W*i3ZrX2!y@Qn?>!j z>TKD5jwh;^&*7x+#ExOrK8UZ^VepXE{g6m`ZF9>QHMky)l*=1mt8>w&!O-qjOz^;mt-7NnQ_pFTJe$% z@tIV~f=u{I#I^Rg{Lt#m=w!^5L5-`}A|jLDbZi2O7ZZtan*HD@+=cv9bR_GFpW=0? zxLp&24^U2p5^BSAOFZ+;ZV(uS<&0@mg6ujK%m^68M%dWhtJ|p_6=~&>>;Wc5( zd4tGwtR8B#ZB{;0wLH_rA&tK7z0-zd*lCHk3J-RCSRYfD`B5W0x-Usz7swpwy=J!w z6fZzJ#qe+@8~K;C;1po9qlq%88iB|TD4SFUsueuWwYkSerVOlA1xJM zQPD80fy?OJ95mbm#*Q&@^kt}etG{bpnS7Dl>#D?4TJ}IZoJPenq+4aLU?$=PYc<>J z-Y>@KSS?Jb{@y!$`K-5bpQEmi!Pf&rXp5xVAKog|3Vxty~r!OYId~_zH9yta(g#e1TAYTL<-Vmp&hp(K$FyZ^=jQiGdYC`G8$Biqb zM9-eOqkG2<@@S4hky3HJSNYO6KI7XH<3_6Kg>Im9bh95UNLu>AGEDSiqDyddot%>! z{HQ{N(dfF9Ud~udL;Bs`d$5iv*OJlklZK}6BnHz!tE;58nwHm2Vp zfpX8+ZoR|tC3$2`YTx@*Dk#(3t90zn9eUPwsyxnC^+l6b^8#&mCQmPA;~igs;#rWN zEoDF8Fo6j5*^Xtxs-)gYt!>8|`&*2)#X2_*XT(%Ghd@q*Hz>OcIJ}RzZeAjK)y>GZ zr#z5Cjnh&Dn-fZO$*}Y`YuV>bE6Q>>x@Ro!{M~2x0h|YrNU3K@AJMKiGf8 zc$yiDE4;=n)tGRE>CHQLN~;qj3V4FkQ*T?Ws8j|H^Y6TMmd{~)pa6@w;V(#Ss8JKIKd z?78J;BrFzw7++`6$9t~p=#F1IP~T)Vp5lmq@$o(4MJm1pg1iOBm87eRA}EL9#dXKM z-E{5lxK$)Ssjh>4kLZ!lky zbgo%=Tb|t@4@1DYZyzAY_}@r+xS?58xx9^mXf{yJC9F=SM1jk)fF|Y>qhP4|Cp}L7 zRri~m`iDP$VbtP>C8LX>v2sPDl{?u$xamD)b#l0wyfUh6so0CA6eY)!i81 z1Cp$9bZW3SPyd-xJ?zJJGU0}m)Se%1;qnIB{0BXUiipg&VPZRQEP#65j;V$KEFl3; zGipbLKp1QgHh!H6sP(>%{N`Pw(n$shWV^B8llDN^el&4|A#&ns#9rSxDB)APp8|sj~~cm0ntN z?T{8X5KM+l{v0B}Br7XAA`Q;3aRaY33k#bb1YS$pgS^iLL$4D`G> zEx&&9?2~LsyHe4CTb9ZRti9N8&2apQRy6BwIyQY)BmIG`U>J$L*|7F>1K%My>cE-j z5#7X+Xo}eKg{D(RyTcRSgKj=nyH^O{PUK0gBr<`1>`e0ul}H(^;AM70u{Iy?wY)T) zyo$@s^wVsd+q@tREECy54R^NNY*&}#LHT&wY6V9Cl_&gBWSU4_ZAwd>Nc3%9jh)Hc zLH%D{lBXuvM&@;bR@O!@<*9Y%MAk`qB#HL&rLjxqgd)04K%M_2ktSIz9WNrc-OMEZ zSa%k~T7C9K=+z^8R7loIGgAK-^!d&v$7gU8WX{q-?{B?n9)$Bm#UZi;f)l6Egl4d~ zG45bGSZiq3;k))o#3b`U?WK}(+_mN(_dJWc0Dj7$zkH$*Hh!?55_dzyUoR) zY3C`|AJXhc<5+seb;O{cL3l0JWk-gTrg{e1Rl1x^9fQ!EQ90k3{D$sh!Paurb7%em ziVIV9HqyD8;fy4G>nIM2Qtvlg%cCty&!)5x?m-YLJo zouDmu2TA$>t$F)dCF9S#k0Sp3kh^{ zuC@jz)Tm-9b=cwRUa4x5_(?r^`)6OM#=OP~XWdxNQUpBCi7lU`!Hnuoo+Y*d4lNGoTpKAyizX2wCHr&<-cZ)-Ens?oqjq7=IhDHfvh zXO_lOaGI&x;&*u~>4@oDog}*R0Qm*sqpn2h^nb-;ST_BK>jvb+gf$8M=4Um4&$-jg z?ZQ12g~pl18cCNHq^%WpolH}SYiebW$wlZ6$zrI-Yd2_!qr+F~9<9;~($GD7Zt^V1 z^k#jik^yJLTwa@&gQKoUf#FnUCAhSTp6`BZKm~vzO5Fi(Un?k4rFV+byZr=dyEDyc zax1*%dUiZzVB#?r;FB+!QEJ=y*3={Qi{CSZoU*KseP+*Ff%_Ry9$-BEWsoe28Yuv| z|Em}X;s-W~0xr^CK!-s%0&v$tuq9GqRzuE9b+1t1FF=7opM1#@pm_`Ky=1aSx9z=d z=I;=gZIeB%-v}4}2G9L_i2uI=2FM#kK!ZFjT}16DcsqWFXss*{ zHh~lgWuYIA5GMH)1~mf0{}WC#1R*zP!LZz9DcO_n5hJ=SZ(#33+rb<=p>W@a%8lW0 zLk6_l0JPoGB%pFgZ(Go%ElpG7NBTNu-j1UEIhHP#b)vjgn9gy>@X8pY@7D z$YtS+v^@QeIJ6!)H~8%4v%4`i=TOt)!j$^|xa#qD!3n;JO#2x)?i5jzV&q}_qO$?= z;4VqQY}i@?r`*EoBJztufT8~0eNPFN!vQwdKZ998&%7^EK3{Vk$Ck-0pO7)G!{+#c zhg0z0Q5H3 zVOm3c3m%mws{ONfKe>gwn{>vchkDun*LV>k0>Qz)SVs_A!B`)1-NCSos&6;5pGK{o zg{%S*fhXN9f*_t}g?1M{rT}fbGQ7(H-~DI`*+2QKV<+@1{sBP^A%ZWOEn@Oks3(hn z!qzoxGx>Vw`9N0=S}s43dP=-B;b^t+Jw?4%duhKbda*@E_d#0X$prf|>PvAK^J4=t z7{um;TCAXe0eg^7Jcr}y+4J?N@qaQnFI~PpO5I*@jy2pCail@ZuQX}u zxn)hL-ayj%%~CQ=)ihlZE73Q-&5O=s$AH?tf8u#>lC{O8m{}Xo^SyApwE0%0=F@(# ztGueFh!5S=qFLXp(kqv9_R{lM=hC%)zJUI|s^GWxkN)YiC_3;#pL87)t1ej6qowcN z&-V=mR=!+Bg;+P8Dy004PlnD32EHVgv_-VSn-7&Gqv;PG&)o{o$ZaR!`X+}BHufym zZq+925#PYnUI9BM**9Fdk7eTfsTjE5Cp;64*I$y-7Znc%o9*-COKv9H;J@EXQ#CUiW6rd9) zYd?+26--s-ryAWkEr`y=STEXc zt>1y<^ZIzGVZeR@JmT$%FS{<4R_VGz&V6Vk9R3e=2Y>el{wJG1ettVh*ds6-iT+aL z*7YN(Mc&lEv{8k?isX83{BA%;{Vj*cS~r>8KjSn46L)h2<-5ktI1*J4^)RfYH8&Z( zGKuY4g*19w=>t>i(1YRRTsz8v1<21S@o#)O&}jH{KB!q@Q6v4g0PZdnrgS0Gd;l=T zW7=hZ`3>lx|MWFL_4(ia0@~`ryj!pAy^@FW+$wgA*fuQ8J29ST9_uQ>Zc@ln;6awH zHUvs;n6V@?TwJW}dp$<2u~ee&u}-$bGZOjM0&8EKu4ph&#q3rGAd?q-*{Q^Cd9;m*A@JIL;RB>!6P*6up*!fWso%ftM;9Y?y(DO(YfV1X&u5NF zQ~chkPoKPRYEp4Qi0!18NWP`?V$>=P5MrU}PCOacsd|65FrHb-DPr!XTgR_!*?LPq}fGyB)?&VSiA zP;P*!2)tlGl-!&FfyD*BHDjv=E7CwWhx7R?7J)&_pXC;fN{OZf+w0gf#OHA6CGPi(tdp4hb{n=oq|c|mykEaQSs z{H6$%2Nl3ZB@+PCKYIT!iAH_^?nGUp4H^Gnqz6_rX`m`brZtWcGy@V z%zg9N$77t&E^tWrZWX&POPVIbRpMS0B@H#{rp1hHOrk8U0t>KU4v!PgwkfgQ8pqz4 ztQl<`D|BER&QqG^=k>Vk6zYa(dJ*fT7S;8VH_u#=wifm%@_zKkkn?t z8Th)KqWki*GOHk?ipo`HI{UR+wPD7#ld4AELsRk|)NjJB_+PwRGVgx}&s94=uI0E4 z%6zz}XDA1kadb3hv{Zds`}kYz8v)&7qHI44+Y+XgYa{^BP4j;RTNq z@J0c-`TH*@lg?~pdJws8b1Qxag+DrDFuGpP=q=QArNc+L#J}k3z+G;ex*Ac|tnnuT zYAm7}G_)BurEH!Xjz$SS4`dcjjo8iDZ)kq?z5Zju{ogxazgX?x;m`Qzg%=R5ehR3R zQ|g>l*RJKzzH?s^oE=Q|eVw*g*rK6-L`U*7Cwm%uzq78g+4O_>4Oo&;9dv`ED72HP z{7HK4YNqcMFT7Qa*I*0g;+Ul9m>Az0dw8+8d!~|}ecjHsRZ3M7Jg89q>k)O{54^N- zoy=CpqOdtkFA?&hr<3e2w)ii0Y^eB}o9K1ymL*$)bodpqIjn)Xbv9h6F@vo zgZ}Q*_xG?$-k|N7=mT?$g}I*H1Jg zDX*-m!8{O$Nx5|_^NZe;ZAH5lfcrhsRks~TU;kDoyC z6;sx{F-GpPV#x`q_?SjN6q$a0N5m%s8i1Nk-03NNjAL5i-?aRu7hRz^#x@MD{&5)`DztI8zjGsT#r*vwqpdl!P^D7ji!+ zjD8nP?sPi~8m8*5U8a8jyRp#kU({a}rGB{!e$yK${$#rs3uumHTn;HxA$|XTDX7!z z|C%HbUqhxX25$DyCaWEVaR9DZ=OMnB6*+*nGO@elT4(rHp#x;s;n#fDf1InYwO6s- zGi|ZXw?5!1!45ux51JjNzufU=@v=p&mcGtgt>|mEFKFKnyijH9eX-D=yM88TDcTze zAs((w5DK2o3CK*8XNXPXE~U?BP^Z#tIFw$IN9zw$tC-`^Gu>I4A zGd?<-47rsiO}w^3sW%{i&z*;`-;a+Oa0{4Gwte}kUioxd=%XJ~#eesz{Yq%@55IwW zKCXFvPggYw3_5mpiT)|n#44Tk#KNrublWn zg1^duzCz!2*^@(sp?2s|B$x!m?^mVhfBPv>;KGQkDP%oMD1qx9eCv7(1Ym>)1@xD$ z+9QBJ_pl-2x|p!jGC>Q0-@2IMS@!oZ;n8 zfmJjXlBL+EoN;0Ewr$;KEsCf;RygXge|>f>Xsyps&}r%PSw?UE4JZ|*8rJ~DSF3Vr zVv22x6@h8Hh;%$LK$RAJhdJsa8b?h=%dV3SI6w%AKa&!B%sWG**T`cCF`QM1Di85E zI+EA#YV@Qfr9E%c>5sEw-U&8T2Rch&mh2#?zCkFs6qw@ubjxwP`<6>Rqm4$HDqUFO31s?ifNbtRioD9LQe)H&!!X`#QhLEkIj6_6AFNW(?JaE7nluvMA%r9m zI1MJ-Iu$dZ`u{1PfPxS-ode&{tOY_JYf;pq_!h;s@ro3PB}v$Eas#M|WHa?p*ZdQ= z<3BtN|1B~4-z)(CrUMTZAQi6K)RK~rqD_n^k{V54Rja^)AtwjP3&dQ=Ncto!0m98r$lS=F zbO1A^k=G#a`JVpb9RKBAe>*b#3tvgGXE7&os0L7mclyVAY(L>bBXfAZL)t!MzeADO z2A6LQul*D}KTW**Ph0%g14d{ddb$9%Axu;PmEHv410D&Cw&K4t(OPfp=E3ASOe$*2jX$dZn{Pz&UuB{8(2Fb`aGd3IAe+4+b^TWG`}{9K^rl zr)%}d6Rp{p?1aIi6nF2QfiPqNAo~r^$xQA?SF9D|bT;JQ_aKz+?-$fzb;75c<>|U7 z?>$W7%iW^baalcx8*fT4-3`dXC004??#f&Oez#{852#EwP_0e|7oQ92963YzQt$v6 z`WRX#hiXu(lUfvLO=lp(?2OLRcXiv=2_I)a45iQ;Xn5f{bs&#_I`%0=Tim$W4m!B) zF;Tn*YZc0myE=Iya&gz!uR%b+p2fVpF{kse=z~yt=@*d@g6(u@4le9E_OvC!tw(m1 zySa7gveP|YE@@}!`6pMj56x}be%+b|Hu6?1Xy-T>K5mvCLl3`eaOh&;gt}P02d6~Z z;+hEqZ$^aH^%yPC$oC~ku`!y7qT9i$+-vnYR>zOmV9vIQPArCD>HZV#6VD#LTb|Ft zD#j|bsVy~6;~MxWSD_(R%tUEiSbeoi98o~L;8yP2yCMN51@l*~&ptY!c1NI?KN^U* zdZ<66xqo!GFd^7TnV^hDHFQsQua?!`{z%JcWc?(4zs=Vfm1+pbOpocV~h zcNzDZ-EW@-Jx4UR`)kjvU~C?Ziod=_P4+WCot0BzeOc!5z~zJMA+SoT<{0i-3v?s! zjfOW2RVybMOOK`uRy^r46XBV$w3|rq9!c06wu7wg0l80y8jJ;Ti&RB)Lg5%t4}qD5 zZSbq%a&E`4()i1XlWUhYj~zECZGLzpQoXZK=Mhc4xisXfoXc4H&M;n7LJl2`q8E3T zRs44SiFW=&bJ*V9((x*-7twRRl}OP2lzzZ$(B9Kj#OW(U&+#me z@)d{{{TUUXgId#(5Uf=U9E>ikJBsYvhc~5O-*R-J>acb3$g|LM5-@Kq6r8voJol#X zP@kVy22=)3dIxt%k3L(zidosjR(KN??8Pp!HuQZSDAa_1NQiq%X~=bQo`*#Pd;NFXT!0s#wvkwN*{wy4@c)CR0tXfr3yB7LD|B@(6Miy z#3>Rvk0x`T`!cWgW!f23Jl1E^V~8F(L}@+=ywHrJ3x~sSJ5{p#5y=rtPK_DGy{9E1 z{HqgL-*PjcpRK~ka->)NJ1Il81~X{-@g+3Vl;c9t)^fOdth`}~4Xxni0|#kOkyt!3 zpjBwwI9}wCr4TNAHLML4W_mX;Lo>Tw?!)J@W4uLF#Z;FfFhMaQ1aza~irO*KTe#z@ z!j8Xu9UGSXn5}Y_q(R}zTibc@LzZBI$%*kz>AP2`9Mt)lOuQ#3X3x(R5m{9GbzOKS z5E3C#x5n{=a@D^b;B{=tX=n90Ol5tOblhNGb&Rt4Is#77X@!+#WxY z3(ymF7$D0i5~c)%`mLfpIanBad(IVLya$)5YyM?P_HF!dlhKqc*7?Kq^3b?T;bM}h zO+oaGP(-G6TqYpRUjs;OhA&*Mhw;`rV!fIfNl(^IOKj{gVeDgwLi!uqqY@49?XLH@ z$F(dqoCBujB(0(|_5Yp=d-z)#_YJ|Ss)XaAET^^B?!U>fdEy6@{~Nr{&4)++m2(Jc{FqUk=%z8X41?~_{TLwi67U_OFpgKRBe+RB%ik&zk5}v>oHiP zxIGr+59p%v3-0Sr-DN-WLmaQYlI^{ALT6e0JWL`I4c1 z)RBO=P9|-hSdR5O+^$F0(=I{~pyv>3fd^8J3*TB%2}R!_yg<^ySA%^(j_8j54iU2= zD**>>6|d=UB<6HZvJsl-31lF?>kZxeLaH*v0u+7~7<+GHg44J35w?3ksfGI?^;kwz z!xvuQyEn>#RI&_sdJiKRP`!${^m(8!YWpD(2`eN5DER8Z9Q@PpcSvjx#s8)s;w`kA zN4V!rxAPpY2x~&o;0>($*5hwf3|!g|5BoIArIn5HTyaA*#7Z}09D$8q-C4r>VVS3m z!;q}_xJJI?10F|s*SXB1j!{1$drXu{PHr5Uv4MQ?I254bNalMEY;+>NMZe_5Pyk=qVSVRLBuDlY4rIsx)3@&O+aS z(c8-mTUhYgq{FjiZEKJ3)AcBuR7;idYnk8dTKgakkS!qGZ6ii~hcKd+H}!$j`$=#m z+WkkSP1%TYGKj!n^N|q8{xNeO)AVpiPyYRpoBcs2ZpEtKFuDS@t_V2uqk%iD35q{g zN2?HB9-N1)Mc*%ly}Lrakt*Kze1=E9yeh~IyjW+a$j@8 zEQ0A!la*flX?x&Iy)svhBaBQ(kecQh;rpN2h>Jn2`U_N!0gt zfGQ8^t%s9B8@!@B$Q=`wbZW{Wk;$IRe_#lG7ECr(4s*4niQ!Xe!+Sn*Et{rXL5&r) zRHhv!r6F8rc~x<%&hdH#K^cwv3%g8$ki3-#QdI8CYA^<5YkFB+1AKI|$@fc|bZr~e>^fiHetVuEks`4$ZtQ^cKd=f z$#qqY&x2s%Z8_eGsg#eOCJcC%53QAn(FMeWZrs`E{V0(0N&7PdnP6&aI%0a>+S-MO zZKpT?atl|8R<8!@HHH8PptJXYociE}-W4Z=E*3O-o>GhO^Rq=c%D#DjH;dWjLuy`7HSqsmNW5MpHmw*|S>XMSkXqOZ{ax;|NOua1F$X89nOy zDX#unfteUpB?&2=N?k|0%ht4+oI*8x*>QqT3{D2f78VxVbISfuR~0!mHOXaiH=$9` zF7w3mI0)plb_Vny&UUqzAcxo4uuvc~dkuaI4&}P|B1xKgp(H@SnOuOZbgQ@5c?ze9*wx~-TU&_hX%@r96}z*BuL-tuI*0tETaB+9t1xF2jHTOQz(2rIHv3*DXWmV0zk92A^y}J_*?X|m zr#G#K0;JJ1|8K$`xE=qw_&Wl6vU5Ls^9%vgGWZ6`lm~vy3RRL!RirJ%ms!xUp1?n3J{`naIMnlb2tL zqA1nabN>fPGlnN_|@RG zu|}wH%M-cJ>c4W{rf zhCgP;+!6!!hpem+V1dPqYJdAD`p_B#sy#c(@rA#6FesC8yG^JA#4Ei5poa)XKe}9P zV@&6nIIN>1{we-?@GZ?}S2;J9lCMG=9P+y%H> zvH-b*d=x0tkUaKWUFH5KUr9xgbo5fzQN!hun497qEo=az#)C z8B{M9KrMe+63_#gLJl7e`wscbqQJofRLe7fpoM8 z`pc~XCp||m#CW5A-YOvP`waGqW7BuYe{7absWHvz?iDoFU>Xx%>kCXI?prmu>^Mnq z$*bPkLJKOh?FAK2H4$%!xhIK_z#4e@)s zloV6JZKieUi9%le0`--)07XGs2th`h6qtjz#!Ahyx4@&$Ml0|gdxyPSt`Zt&*Dzi3 zbflB=vyir+IhK9ih;xE?lai8516YaU0L{VniT&s;nq0~?)1U5c;wI_+_GjHW%~f`w zxELigT}|YA(Bvd-o}k-NlD}5(akO6G-cH9BkQ@@RvR3J3Yli%;<9hG@(XVB zG70w_mzoClzucs9#KjqxBgk$`FtN=z)X8IEEtwm*ZHC&YyRQu}SbS7l zd9K0bRSf$B#foI5pf^fSj`LFt%TjC#5t-i;`EcnWa@}EMX{<}A9Gv+ErsCzj{n#WI z+5S;&E|J4s*(n|F_hK6_?q)!X;WOF@5GP-0PNRv8rYKxBWE6`DQa(C!s0z%)(LCSF z_03@-RlbqiT5=)*I-ffk)5f{@9+-ADpt;fW0!YxC_%1Po_#^;+K9_RZ3NLC)L=b_- zpwJh+S(X8PjXdM6LuiAStcN4yUaR#_nR`1Z7R;(ACTba;yk@x6dWyOV;K*(w^BaSQ zmSTN~i`Z|iz&Cm-XO^P~UVhJJPIxky0v$T?5yaOm1g`;ex04_Za(`o^Rjn}U81n{!CR>7 zckhp^&-YLp6U4}!sDrX`4=q`6PP@UfaYr3ZcM_c5SXeE2E_zvHM3oQkr_(21?&Fej9qgeSv)%_vXSo6tk>6#RuSAiR`FagB*syAriN1q7+6 zXE2ofM4PID8zATfa2(>pwvnPi_ghwJ4|Ol9#3ca13x-%r;az5mc)gLsCVgW~-SU`PSfj+E%b;SDJ(Uc;^kHCoR;0i^635G^=>3cK&UQlwNFsKLMDv8Vj z)1Du`8cdVCnpN3TOH1TZT_obk6vCTO5-9`vqV_!D40+h9gzqIFn?ik8zh=G~zv~w@ zNqY`grrqVdlCB6jV9;?~m~%pyatSqRyYm$r*Scvxjpk}|nOWz4Ps5}3*go$R&oj;l zVIu0yZG&Ci2LaH$pGb$l2^9X_|MQ>d0I>jB`wh^VNj+0U;0CZ|D{>=1#bpPu)bWOR zKVa6Z0NXc~gZP#IME$e;HiY&o&^HgXO)8j=5l_N)8{_ZH=^{p#4A+9@Svc<%8lMyk zFYWCi)6M_21br84ZRGH-RV2u`03qr; z>zW{bSg0da@v7(Ju70!B7PX+C4&uK{o<;dJZmel%3JYCX%nRAVJbc`0R%EeNR4joz+v({3mp8dDkDhxI4LJniOE}N=Pdp4f zH<`8+0ijjn zh+z^Rb8ygEExb9SF-iwQ62rr>LNjsopAXxmLlKIsnOYeZRvi&iQis>YvurlIdP6vs zu3bI|`z=L%m=J=R6t8*=uQ^Aq#Es8RRnC0Y+1Q#gAHDe6+(72M$9??WlQ%LO^DixA z*oN{-tHEycP`AP>zO_DA*38!=2zyI4B$AH}Bh;LXmdwKLi+_C_Twi?f4r!X(MzZa8 z9w$8=grWieqiF=^+1Q|qu<0-JnWvbqEM}Js4FBSMP{I1O>sWvyCK7%CZofWi4eHIZ zQqjA{V|PNDwcUrjc4>P#xo^up(tY;jT+6lKnnal7X*KEzSZQ2~SWyh@kh0Po>B<;~ z2i?M*p#3vp?K|Oj>{0_(I9_8F>u;WRRfC;H*nk@kfwAzeHVB86u1CGZekM$C7v~s= zSK!7k>FbUgFiJ6YY3aSE!ZzH8P@h5B!tlZ;3!F&tC|rGmV@wF9c~`AuOVMsla>5q~ z;NT_SA@v`ureT3{&?Amh;=sV0tD@j!k*nLI5-P7q;ZM$<5<>g=>q-x5aya? zM)=^_^|7||`1RMzs$dXjzvR|4k;To^Wr-EN z=jMHx-cSgrya$5@RnGu?1iv$ku#jpt2(%OpZsN)sK7=dV8`HWKIj0nzxOsuDsr#7c zBbw>ldI(Pj37~@MjNzRot!n&q&$zYyW1^uuN9>Dg4KOtw;(vl5i8wf{dca%Ls1Kn# zM}=d^D|CR587t5*=W1>Y(yoKQda`Sly+{YLxelgC7Bx$STa|%EUsXA~>h7?YJzGkt z#~oAr-bM_qCJxPYK02N+{xE@1&PnyCGQ@Ay38Mv^?nNhdDs@ zN_U=;B8V1q=@C}g_jPpm!fuV>LcvJC_mZ}WBL&S4koQ4$O{{LLr?V&?nR%VbU_kG< z;ynna-VfTC9Z|f<*UZDo;HKaOSjh_0>)#6V|e48 zkXT;NVMB5Q&f=-%CDWj$ubQrm?ML+kg46_O8a{o&HUT>>WFPtt&?j^>485J{rX$)! z0Ci#H!Kh}h09((x#JGw0}>QJIJz%exi~72{j?ooji#y@y7D@fZN#1!0Buk=a9ccSCcd;$bUL} zW_Hy5sN$m4qe83H*qYVqnFxfz%pz0MjcfJHPpV?S2oqkZaSOcO zLO`U|_;&w^$o~HxZ~W)G_mYc31jmqt+Qm$4XUdlbw3X2B)J6 z+iwdBF845oRBk@2pM7n3#pnsM@kB=-YC7AhT&i(=2#aX}wcvfvm;R9E$4k}bW7qMD ztv4hGD2s?CFw?Jz3{a=T?!)JcL97WvPi5VxBc)F#8?2w7eZWQ57(}OP4xnqL?tC=> zRR+)}{1&i!9e{6ppXaYjzy$b7odO@;fEug;E}!FIy27 z|G&N)WR$Le8}$P|^fIcLd~k6nr*h2WTRLpFlSc1mk`Z4==Ug@^8}0&7d$S&>R)o}X z4~WawG~Rw(E1e^kC0f-AyZk&NY9ivQGA>rk?p9s{#_j3xI9F0KYWit1G6qEr%>A8A zG3K?#%xyE1s&^foNDn*|DY6xpB0@gSPinLFnC!!H?5jQFa>nh?UJUsV6Z1m%U^y7$ z#B#YclQujO@?*RM2jud>5>P<)nL)Z+w!72jI zEr**~4?}XNUw%wWdOcsKYjzRdBAYX>K1{jJ!<&%?Mq;M#c#;C^xDm$)QW1#z{J5~l zx$)!e%2o^VF2lR0Dls>GwNDyA-n`6!<^c%-^fW0K{$*TM6bKNL3Q&tIWfWLrBGVih zc-e=;xA$^T3(S|PC(3@o>%l0HLunNF=h*n2xMET+f_>(>$7k^m<&(n+%%3Xy8V)oa z*JrtVW=f7DK0|Pe{{Y1gYk2HcbjWG@#glnhLQCRqi@LWH`LnEgO`w3*!#MxoMKwNS zoa5Xsz=z`+y-0Qh#-Lkr3i1QoM^?-01L5&wwg)iTzH#oYLDOsCg+5jb~_L-+g#(%Vns#%w_5& z*1zJRep5p7i)H$6c^~z>6_H7atnb*Gz9T$^T5RkJfSdj8`ubm=aLU{ApW- z-|^NOYEs$LOXs_`zBFXHDa-?bEC^F<{o*Qx2_w$oa-#3mx)z$6;7d({4!gY8nguhe zE?jPdKt>Lr>AN-&jfrz2dM`m8=%eITsuUK@bv*g9qwlMd0XKHQBK6#)hJ0KuSke>QQwJeuf7|1woJHeB$ffqwO_)R8K3`xCwL^D5#P~g%3CM(Xxf2&CSBgeDw5oc* zRJ6EIBL%H?fdxj3TcHf2?axuT(Sr~_mA{3*Vb4$%TN2peodN7_ob1=5-ClfAvj_Zq z9$mWi4O?zbvZLJE_-()QUkqh`lPuwXupIlpJifmO;{PrA&=21Xqq>2zRY@uMmAcCE zxYvtWhoRqY7&rGewx!h$7^moNp1c~(`enr}IjI#M#&t_XD&p4jhSm`QANXbI{o#=C z*@<*$W`4?s9$BYNws_!JZJfX`(0^98akZ>Rm1!0e4iza)j_>ueq9ob{_$h`1HakyE)OEdeii&E#>*QN=LM4-1`Dd6gKYS6SUg#8nxPo<&X< zG?=uU@X*uk)7>t9QL=n zSM7IK9_WYYpjb?$5A#n)b8U7${AGqVigh8zBx{n8AhUl-yV`-mfH48F@ck>YnMAGq z%vIC>!`^!bG_`K&!$DM3z=$-dQ9*i9iXbHs3y6q-2q*|q5fG3jEtDi6DoTrph#(LF z>0szc4G;urBE6S@^p;QqDSq2~=iEDY=H4@BerLX0X8cdn_U3)xy~?wm^(;mNH1Ai_ zxPL~qoPT$e!jb|_L61&qt(=8KzFIj$`kfwWJZ&$av#(nB<5@TzO5-$mUWtmxy zyfX>#rzB#QPC4$nF>F7|(^bOyDEC#T$CD@-J?p&_*DeV?{D$6j_#GsF$2q||Wv;jU zYV3p8n_h;g#g&p<*3Y@5GB!a>XL;`bHRDF>1YpRt!F|?S32i?R>>Z6APg#Mk2K4*nXsqE;c44NjO^eBL#OUDIh4*u(Uo4A?G0#BqwocW+#?)W67#5PSOnw)e1!|`^@<+mC+YZU7-zGMWo?k%uNwhT$<9eRveQEQ;L#|HlU;}A^Y~OLz35`c(?wp@C2ErqPx+<7 zUgv)u=)&JegRG+UA~Oa~jmN&*qcf3%j`sCXU6am!-<>j#+dl0i_$92gTs;B&2`Wfd z683tYV?HVgQAi12V5}zUOi0h=-An{4U{3QS$E5U4m#RzlrkhwDoECr1;6y+^ox%@# zJX>-Wh;(z?A2WGsIg`BiFrKGx*_tp-eyD6QOIceN=}YR z;BHTr3|xfj?q8e+J9aNHeQMx6x^EG_bLhixDSZ<|0gO_r$YoePCJWkwo1I+vbqw#% zijpggI)K}^BP9jek=_*h!TozZD;2;MGFMM_d~6~}vh0p{{D^6Li!9C9&+$PlMR}%nEaWxVc9rL8OTA$e z*9-aE&PUA-L(r1 zWuO&FU0|~kaK|YG2n7ZJtCK&vihh3T=Suv~oC%=&?U6OA@9{V#g}|)&1rN0FHr!_} z{XrMs{fz9{L-U67Fr??aUI!BNT;82O(b^^FW2CfjBy5sWS-QgyqBB#@ldOsDHTZpm zaop2tp-{a%?KLs~mBjg5?lPT_I2~wuz)3EkVklJg*MkF;6T0>~`dbo3gi%unU>iFi zIu~h-7FrIc`v8$y@+Z*!-_ce0kBvwm(P+}8mF+V)esq6|&v#Hds~U*z@{9HSHNyxF z#gAu0R?q%u5VA(%8Ie|oEV&Ehxll#UXJAWW?%_+RP1Tfs;edwD$dQYS`fByw#A!+8zAb>fPGcrj z2SkoIjyr5MdmoxSB)Pty2FJiJ(+woX{dw4>-n5Sbw73&+IXvv+=6##)xdl!5xbHo~ zp__Fgk0i#Qvm6NFzK&$sRo!{$$&uAhqA^Xvk1k7}Pv}9Npnxx)aG@F9%c(Lwtx)!MXXOxiyX0@%ag{d4gTmnKrX=e7%$8m6r&V?j-Yy z#pi+MLH1@ac%9q`vn>;liGCy9qXS)T}VR&Md%Xv7`tA<*OyaU$e(j zHY{ljYW$5HHfg_(PMtRX<)AEzqjAio-Up+j)11wv=hP4Rax6=e7p)Ff?B;I=b^u+EVd%F`Z1?#lQn&1avJ0k7KC2+P{NRW|`NG3FuY1IrPR93nP?^DdlHg z`7^Kl^CQF@NeLp0Tp;aDw5hzKxt@h;zaPGKX2WF}ho7?A>sPP(AR>BZ<+|TRXWE#} z%$DHC`0FQIEU6HA1fgMYE4)~&TRA(qdH?Ed<7Y(U0iCIKV|b`-Y>Bi=?oz%2-loG> zehv#WuUu$}d(cC%C$$?>0y7V>TG+o{gm$?D6lNi~DJP3ZNq(@UHAsBv8_61 zN6I5fTO-e0P^BImzQgj^TwmofYv#OzSAhs>UWCzgFuflYy>@1W@6N-9St8!Y2t6*{ zv$YC89ied*%7yHAXx~eQ)XVcD2FD5L%&X+)G1=5=Q`l`TPufGrRT@E~RnbS;$|-%* zG_b-%!dUPl#+|+lszpo?Py_wSabTDctO}mgWoj%V$)hHp7~tj z&F2{@rg28z9hUbu`ZqG}+8c+YsMr>1?hz6-oUM(1s>O0NWM{oQy0_758XT&@O2qOF z7FU<1i+nb7xOJ3O3H4ZI{fuzWlx?gSt4dIW82|dqPQ4BW=KO(vU(cR2OdLwoU#3@s z$#X_Jg|r>vgVOtD6qhTap%8zaUVo;H z?cOlE{AhfqEMHy$*l2cia%1ksH=j*$k=%B@8?X>fAXA1_#zVyrK9)}XI~9b)BGh?H$?NNPpxs{_(_<*@h@WihvG?*PW|0Y!zO-6q1o z-vwm0o_I~+SBlV$5V|D45knMdw3&<6f!z3242miMHla<0O>}NmkUj%FQACe}^Dc7{`kGq`h)R30-9pny|uT}D2zN6cfGu4sk z2tV$My-kyhxrnVXzY@o_pACBH@X>iOLFkHch4`$F1AXnu)YC+hi$s%XAQ<8u#3!_; z)qYgt_;#ipQ+=Ea38pDJE9q)Hlef7qX9urN;X43+K*UR}s!l?}YHv^Eed&1Xpzl)h z92(PS%+`>5*`rUa-F+HrSFfOZOrb_Cc#im`5SPtvN(2J1&tW52L~9Hw0w z>qi8tNw`j060jVxAfp##?uS7anU>3Qt{bFyc-H-hcS_n_jB<>uJ*m^3OiHUjw5MB^wD^dgSrygX z>nEw<$C-Er_kNrql6at?X2OPAb|1G1H;AGMSAA*SE87)Q__l+=(m|KrWFFB_oRn`6 zQgBqqu^@a^<*%f)l$Uve`F*pPdkSfm`HdQwRzzXQY)`rje%~wH`}Z4Gq3!MI`kq!$ zbpf6PDc>!-pyL`5`3YWS7h(P|*Zfw#dC8Y1T`hM#>nTVGAc7c-*_cY~9k`ocxL5i?#+o2!%c-qK z=F%DX6SABSy)0z&MR<~s$jCO{Y6QAAbLtH?=jz=32;pjvZ$PBOY*&k4J#7!!pz<;v zOP9)i2O(1!s=#0pAfY9T%wvijcT{(3el_|3C*@cFw&(444={kdg_BGm+Y+dKz)6+) zp~@NnyQrZ8^sO()073|c=-K?U{7Qd%zJK@i{=SCmU!Ax6=UDzPN=3iDsPsSoNmYC( zbieRvQhaa8EOf-~ZK!dC9#b>ACKz(d?2DVW0X;v^HMmj8dZEX^Z_+x7@BC0#!N>#u zh*@%l&hcm))uWrrl=G$?SS3IC#idE3^m${78pGy>{PWk{`3+*s(Ez6l+zmFTGY71p zv-@7dO{j&vu!*HLc*vUOtpdDEf_R_xkiRvt0?mtXrk+iRMs*2`JX9>uYAzi=i|bVM zFF*6)K-yt*cUg#C2Yn{j>LpIUb_1&1y}nQ-V)(I3KKazJ#ifo*>4nT83_sktH2$27 z9@AHr*es~Ik!w^~sZIwBKvyHIdp$&mdILH`9yM3aXf!dwK6JXRAIIMue=v2|b<8k> zzT@Z9$k;pjTC=hQub~+=g3`(D8%>M_41fEt8~!*`soGaBv_6Xz`|M>&LIh2kg?Ns! z4kDax4Lt~LuKcJYruRaXPcu*miS#q;p8QxU6kdCNkKavmgPbZqd%X8l(vY#xwNzz; zl{F29>Qv&z5K((k_a`QNGW!1ISFg6Xhq}*VgU0qY1Wh4QJ{@Atcp*GEfS(%ccuU7S zk6B@JT+{c{^>e&50qEKm)-p|_mHRF&-_C=!Z$+!jRtRV3II7tOrnhCyZ$%DSoK;w! zfdp)b=W%bTR$8*L^TIh!9QlCT8T-6$cTH)sP5hoeo~$lAT2+%m+pc?7&_x2fWwN$+P|pc|Mu*1)bucXRGCn{5)Q&Cd{JP%iA7r zy>xhH?qtzUkb5ynpKv3y%TC|)SC0#I)&br_XMH7P%S}T$mUyy-_uAVj-wNj{GlQ<& zh|KYYbAn6v-RikT4`~T$;dW0Q4)_T+{kPgf{*zJu2{!$WBA|9YP2XXf70F8O34Pw@# zp_om`vMacM$TylLzdGzT|B?-t>*_luwVRu3k18ff)H~P|ARQ zLjm(oPMY;z=6Ou_N+R=ws;tE`pb0t)@S<-T#!uSDGutvu7I^!P#}?n6Y7{bcRF!v={uP{5@lUDO%h& z2hY7HDE_o*$sC6_!(W`mThhVqtbtF!zUH6%$c^rMowZMjdGI_uUC7Ir8&qbhzFk4 zfi1lor)z_ItM$%|(+=AEzKSW#*vZ9a(M@6r&uY4IvpTS6C|$MuV}YIqi*2?_>a_@) zu&EPQHiR0#_I)cWF?!yVO=Y{h?r2+)LcpCv_&jV4`uCX$+n)#@#{s6`&vM6qnMeHe zK>zF>=zy8|XsES#v|sjs1%Aus;{{lx5)%vUgfJgKxCjNzL=8YaX#hF~FKYm1>^x5_ z5ZMDf_;5hR_Z7m9@l+Ogbr(>c=LaN%B}nabHgh}1o<;`1?Wr>&Vt0-dwbNKJv>$Ue z9;Xms>!<9o6kZ7B$B+$xgNmNL+XW-Z!N`D4@L>!afbIffmgBe3lK>Cs=NFeGew&fsBd6MEwA6J@XBUEk%n|F#0iTZ2UEniOP2u{NB%C_e7nI2b- zS1*LSTVf;IwTm(g62*H)_fP_p5NrCp<`3%B-*h9Bdp?hGBGqw8xZ#Jlw%8LoX5KF; zS6VI5`fQQH5KguWE<)5JkVDn1_n}HY%If0$UKs*#;o^9iaXGfzpaGk61 z!LqFi^W1LNU!_fa*wC-=EWFA{&(~qs&Xp)@wBX;wvsDY&vx$ci%>8YAMos~)W7V-U zo#1ntZ%yjd_QH0FL*5&NWAjr-7#E?Jdyyh?D=@z?UtAI2AG zL?nArPxY;K_%CvP`kIo;nEXt$pVLwTEG`S9~r-OhFx90)TX`U(xT&dp|kp@ zyHV+nFohW86hn`^q0zkkY9>|LhLWi3>FtevxMdb|*gzw<>|120BIj@vob}rK`KB)^ z6@H}^8ok2_1EUDear|@IZL;Oag+b>xjbalv+wthUDucfCDx{oV-J9Wote3Z5eFw#E ztXm?z)K60Qqc?m-X4_dP0))|h`8>X|Z_MsIcZS5;KxcLiN^cCOpQyXnU6shMwh=Y5 zgYUTS_v$2x(C}49%PPN2=+mdF_p)xD8m{$0GOH&|jOKO6uV2qRVC0_Bsh8ccznI(0 z2aJK;mzCDLsq`Jha(ry$VcJ~dvNbe&qD!d00r}d z1oICiQ9x?jbKGTT0jO}zH3KSzvA`@iXr{7skpop<}+{QAH0NJ5~n!=etvz0XklC>BINJRj{G zSvKNH%1yQwPj9iF_IrjR(&! z9vvGMe38a__H>fwS+`>%c^cQ82lG^=T?X742Y9JEJUt9CO1p8VlkJ{P80W&vT4Au^ zYcC=VmlQ6Ycq7hj?bsKIAF}nkF_bhiV6`W3PmV=V^M-~$<6})2CJ;-^U1XLxz|hh5 z*9J~@6%kKkN{s2V?$-onqIlTVyX;7h0*t;IzS*5cKbB&EfP65F68;@@(T{+pFlz$e zzrC3OBX(dp@Uu9C%XbjSE@dHqQMiYNrlvTdLg!uq;FWQ(Og|Kd8B4~=<# z*CC|Y_Ge@3W|crWDnPMgqkj|Z5X2DTBMUG>wg4gmX|FG|Lb!Ix5KTydil95JNMj68 zyUx#h{L_B@T*sg1@#i!1)As!H+ef2oXo@H>S5;Qd&69`?Nj#*OE-Cub$~+Ht{rTtg zjnAIvH#6F?CO{KT6soa#@Ch)?Mb?aPn%U!}{>eU%{C%)b9}9=X{1~%qD;DRf)ZQw* zit679+WaA{o8|BDZVOiUlg-z�kThn$ zT%T&=wFTIv`1`Y0j0PxQK0OWsjSR`}_AT(D!KCOAsmb-^%xS~E?UWtdDive?)UGpY zI}!IDF;=HGFKusiEI%H9VeK5AzS<8?pnM`z?ayN3 z!l0Ucj5c@IXUh9a7QL7SFjveb&Meszl`v>4ph>p!T(OgSGxn zB?kjysCqjX^=0%_cU?)@=hW@&>PE`f)xX{xF6 zD|r@=#g2eLHAk3>ky`yW3n)p_O0bB6qi%!J}Hmx|wD~y)@_5asvNEsZ1_)wv-npYCSCRjUj4An6^54?U8GZ@ie+kmUpCL zyMHa)Hq!i>*@lM8Cnyvs(b%pjx0&7)VUgS=6e-`K!?(2n$4;O5qM{sgS}Mh{q{+X8 zZXoyJc&hhGVIoeZ{{IM@Nq9figwZbyqQ`g~d7919>u-{I;a=yuglPY&gs4v!6dZl% zXxRAcqfc1at~0OA`kMmJa1f)H6N)1WS=XysJr;G5nC%m}n6DM}igT|HR%oC6G2w0| z7)=qpMEMR<_t)!H7>=j(PM!gh##sPAehQ~o<7d+x01W{1VKmtT=nIu+jKSB9mC-a= zXBed%K%fD#lM8M~G7it+DTUB10g$@oJE*je&eaA7CjMDq7;*rTZ3E~yE-*rDae$g= zF3@hv3!u#k-$6?oK-jBj_8nB$3&i!I03^+a-UOI{$+6kMI4DN|^#1e%wr0aqk?4*q zJwV`E3_#-(KX2{-%&J*Ok`>K9e+Om$@qirY6FvgTAIn`EA{2?*!BniR)Ob&7BK z(lY}cnNH>M#Mr%xiL|dQqp$*%rQC7Mpr1aH$Is@ghRYqMxGf!f*7xK`Oo=0(1-e9S z3|b$Lf4lRHrSq@T^nO;-{sw^YkLPj#^T*Nkxc&Y|Ny3F>NW)G*VaK|jwVOH{?~Ct+ws~pXm8woJtLPAe0DMTuUZckGmW^JDB-c$LE9P@g}qvI@IIMHWzl=_Ki_s zgAdg5a%a}Y$yf2Ens*CD|K}Nl|4nS?FLw(RptbuRKZr83SpYmSz7)vBXsqYN%42ie z1R|Qhp5OKn*P;IT^h{k2)Qwc}_aPwBgz=e&8%FgX?PZPL+xE^xplY!t1|ijKh+fzL zdXg4%y8N=0ki7oKZq=Ccm19K~mm3+D10Q$46cKgLn?j0qnQ70=r=C0jn$)l+y{%;^ zf;C4l1M)X!mP~7;qZnUj$1u>1wb)Wto9*Hm)z$Xxl$7mtw5I=;%M>+}4Ttt}4ZnJL zVS1?2w$QpjHu4&nxTi7`eF#CrOu?~}6!1wxQ>CJb$QG<@j*@6|s~`u(ewD?TWEQ5R zW7F-gnPIUs-@r473R8sz;$rHjR2M~xeHGy1E_mw@4=KV#C^VTQ7Mnawk-wlahF}0G`#zc&`W7JqOs2Rwiqz+GVY!zn$=Qla-FX!M|zsVrchuI62U&U6rkYIC_b*Wib${UKo!wlaW6+4MpU8~B8Ru3V6q}fym^V$F_Bj#x<>pA}VWh6| zTH&iK-Dc;d4LfP4iUO&~Kk(W1wg;iVJ?MljY|7L3nK@f9s5&&j6?aCa{;vU$55!ex z-+4EuQa+vSns?K_chz#Zc|;^cS!NLxzzHZfVfR+bXP|~5#o%Yv$7$LM)uFrbgQdN- zUo>72Wo5WFOa?!SJ~UPNrf3@gyYfIr#v@w*sd%OEQRkOl<)^F&5spSKhm%=Zs)nE+ z;zz&BGVm##944Hos@IjdmZ_s-*xanW?DEsMhY@ zJP3u}J9FefV_N^qvk4`IZHYnqi=z8Wx~b`#UKCEE%fJjV;_W#2;CX@J=)I;x_S4F|P?X*&(9V%c1A?6F_182u+6?BA#F0Rr@1)as*c)#bS< zfD+by@IHGjPe5=zS*-Gym5UPT^z%0dap0CUbo=oE)$p^6btZ-1{P??~i!|Q?#U`q@ zCC!r}tOVwS3t+h@+!LKE@7-hNoGxPvhEFYjGMc>WBgqY;XZeT3-!=wU6eAWNvpN(&ZMe$BC-iT{>>hr?qhmkOgfS zLQ^CmO9H&10+w+L=ilnum!j{Uj(@ZV19QC$L&m#(Ec<2$n7C#5z}jX5#fZFf#G^vG z;~nCCn$6dhDW>7cC>L>7)rA^DzOwl5&KixfBxI zp9+)_nStlTqW19Y7>C?B#;RU|!f9w$rt4V%HN(uDg?Rj4@fVhcF_Liw;w=&OL~a1) z2YFUm>~hi3UKbK4KXn#jjv{O+S~8lxgV;|bv?$Cn48^ILG9f9X1Aw$>^iPBPFEbKA z(O(aDgJMRA!-Xg-Tuj8+XwqwiE*NQB;x8&OZOa1{!Cum664`Mxmh9ay#)T;!p55G1 zdsPS%r|KC-*Pt6<#i;0}&=?smzu0asQ<(8W8xSr3uHfMRr?2m-PEEhD!0*``l{whx zZs*53G<^POvmR%rsi{!y?x2k%b%y|2IXysbLq1@v)VYjgwKLUL{oWJK-t>`HpqXz! z{)QS|dJMgQtMYS=(YPM*e)$8c(uC~~f2&4HJ*;pUGa>Yldh2aHubbxjiu&>T_mbA} z%yE<}-n+gw-gUTZb2I()NKSd8)0GlsvFWL@1ONqUNdnl4Z9wl0%8ce1!#FkyVW3-< zFQ9GCZ@)nk6lT%uidNr2W6=OJ5;h@3{0^F-ReysF7&2@jq}Fl>ovjbQHHB`#P=XQY z?e8G@GM3Hjuvnn=RW2BySt_s4x8dZJUw&4L9u1zp^B=AXFp!^l10Tk&QeXV@Ue=-$ zZD%Q<+r8weO1p?yaa3sLPBPqY9PgQ%M`I2q^ggjMH;7Izm|ecE@VV~tts&tGILi0V zulo0YQbq_a1E9_H z1oU_^;}i)t;4e!~R>yMPcYHS4V;Gcajx&gVxu*`Z`3$8yJS_WzAi^@C@(3zH@r7_B zSn6N{PKPV0}XL{L}CTZIb{IO$p}r@4%<%8R&nTuM?XC&|Cz80Xx8ql)pV- zhov4?s?#v`%nQ!xF9Zmo@rEeNN}HB(GgFR}8wa z7!!Q$dS0H23O{b}i|L&M&38E-)PmF=d3%0t#F`%DF z41i|Qp8)y?oeTkVza&7ZNj3*<`TjN2_mh0{R|)>4cZN&O{91gs4hAoNbrdj9Dpw_b*gX#UbB8P=Hhu z&_ataz!70Hi)hjbhBcb#K$D*X>I@=vV-(dNpPgki0riFU=Kw_pC@>t|VQ?}~zze(J z(;i>j*jpvtT%`24vV??=C=?`W?RLHB+AJsk`f}^MzOE(s{mFj--RwWPaOTy*dUEMg z{CJL+w1cZHIpSrbi=d@>4Ny+eF81yB_PsP4T!-nKAI|%KzT1D+&V43Q42^4RoZMXr zOY(ov>EwR;1wp50z%0s1gmZWBSD7bK!>!e?n6}#iXXS}!hf34VQnV&S$vTAg!z1>* z*?|(_r59a;C?~?E?!j$YSR#Xh&O%b~g{3qu#Tv-r>NX-KB-D`zJ`5GnOM9p^1RM-o z?K6Rxt$XT7!Ke_>X8Rx#2p?>5AXR*_+i>{fe` zu#2xPuX4Yzvu=7LlkDR0{6^5izIWhkv;%&S2R1NCoN(+teQ0c9Ft@RPJ==cm+X}OL zi&*FW3syCc^dvs-e!;X>`l#|46-{#_jg;1(B0p^>oc2wo=Bp~md)S1h;m}WAS7M4&v|*gpfqlo)!6aan?SZlCV`MJ$L;chXUCsNR90|+%aAdtj7_>*8cL878x7cCh zGKuD)e1ILM`0{lvRV1oYINd;^pCWC;Z@geW#;nTsZ5PuftOgdY25H0w^FTE^#(ARD zi%XJj+Zy@dKx2$jp#!k$lJ63Y>*7%3BO^SfYc z$tJ<|;{ggZKB_UJ0|tW1nfsrnr;4jRjq#Mx(%+Z$uZgqLv zH#p8hk{&dNSoTY0G>MKgorUZ|ENo^{=;W;lPyhr^Gcy+15)#=F__G z_uPIQ&pi};M)2hjXb+edFzB^s(RQ-38r~DIBRUhZ2L`YEO)2PPoIfH8D%^W9`cPDO z$BVr>tqKyr4D*4>$=9J|pg`JF8tySAtZ_4@rC&Bo^+CP~UwPjW|7Qfr-9V}tg4-w9c1I_9>;oXG z-h?oYEYg7AmkZ!L11m;dM|+ir{47QQp&O%oS+TR!8mSh%qa;(H5aJ!U4fAvdyu^W| zUUBpl52D?4aUn~M&KLpODc3&6@}ChtEq!LVBk$-WCMr#pi)qVjkxYaQE@+Wcmu4E< zh3wBr4%5aya7ScCb(li#XMi(aShNv6)bp4+8kyiy~Fz z*Y6_vmNhU%$|+ae*iN1Q>@y}+bMok3cyy@$J7HHS1YN7UxL!hzLu{H=aQMtzrXnk& z)`zd3wLa;*R0O$mRydt1%;+5~ZBQR-e~RXmNy;ZDMzLNhltCE`^`0J*xc_X|nHS>) z5&MSEWn7bBp8rz_MOk4Sa0l+sj{wXP} z{}br$9(L<`esFLuqYt;>#eWa7*1I}&%%JTv7t?g&ae6M=7S8Izk8gzST`{NuJtqK)w4>1B{*{u^G7xyJMV}LM)|mKQtHxT0|(r)}=HtFX^!>x zz)*!h-U{u%z6croaij2U$4T2px?+CR4*!|SVa@QXz`>~lN>qUOR=Yo(siNOo7;!Tqy?r-rHU~MJnAJOAU=3v^>YT*&+ zDBhWLos9B3XI#!J8lwf6ty*I=pfLPnzEuhl`FwOqYz8sz%nzo)5PM`5(F5m zthi6b{^v>32IY*kn~v=+lmL?Mwr!x$G7#A9@ue7@fm1FqxDX|cFuz6PA$ zvqB6Y%h;n$k5=|F%5Xr5bufFX>?o?jaRYe?X-XH45MXhIX( zNl0g_1j4QXH~y6qT8$kicRDKW7X>*7b{Uv=cZOWr}$!=JtPjk*P(l768pT z=^tUUZ|&g}Ju5W1nfbM45JOm;>Wc0--H9i?22{8vU$Mwku+TINHpc;qj#Da{DC+}2 zs1gTndVwhj>=N)J7ltkaXUBXE&@?d=ELen)$grUx#`6*&sCryH1GJ&MJ7d!iBTk@8 zS?HWj(m+S5V>{!_*@rE8XaaEJoWVO{gJz7weOTJVfSgo#X zXds2L2`tQAB#d#OmU`~KN+=|}ng^kM9|OJRRr*>{1=qUz8Acx5)Wetm3MsFg-s%)z>zk4X#^tv;ORpn;fl>dbRJ`zR#k9!jD6~*RE6o$IE^B|-N!*%Jn4#PJzk{t zVbrmi`XTWx5~csPh2unUy}u;{Ej~kc!FO?!?K=NKCBlDyS(YhG{mQl;GzJj7_kN0B zkN8q`3r33B=>9`O9XBiU|BS`|uAx*ukR=tW)=fqm7VK1sMiQSF&n|o3@W^-K{#q(^ zFT!l>W45bCjottaSeU zo7$1M6t}6VfBUBYLagnP^8y@!Vrpw)Us-?^B=mZKca9GRGY)qBvMV>g!e&DqAQbg~ z+eo{!z-r58pvRZ~xY!#B%#3FxNx&8r{o5A(?%!Z)_dgHBb&&{>34_3mcBX}~J<811 zANpJDq}d%5tcUS3p;T;T?o(kPwkAry;qRTcusd2L*YQ&Ao}l4wk3SSy+s46zQ470m6`o6Y%^2YoJpOC_6~MgqV}Obd9D9%h@Dar8T= zZyKoMywg7mdH74omE{Y%6kESSF~fs4WTn1LwscooN_5-?(du~jd=L;$yjFm`XAZJk zL$i)-GSH#^MVRGXHsdw*jdh3-Rjz*+pQNiFB9f*Rv^-n!YP4b}QtBaz%i zy)PU2v$I9=)E^!^;{V=5i3tc;8!?=7@FKINgz*;e<1KNM;45lg@jKz~Mg)9Rt{EuJ z5>|BhFQJ@+TU0+Eeeol(eE>VuM#K(eH~NMhu6(GMo*E$G>gKNG3h6Y)KKRN~hb<^w zf~TR{g*keP?pJf&TG-ktN!pCvIO0;jyFNdwalf#hpyb*S_wOKCFc;HYH4NCZCpRfu zHG{xEa46<^R-Ur7;Tyb23q)8GpCN?`JE6;_S@VwFfO2AKG?fi)$GX(4%Q<=U ziR+>q_s-4}gXRELmq9e4H&K>eRRm$1=V@}Y%N(2K=`(_jAdWPM8ZR1qf>2RXSs5lE z&`Hqj9*{p^Joe}o1uDaXRThqL>bqo}TN=(%&Nrg)x_k+MzNIJm8+gBCvK5#yfBj-Uc=pb%mZ#3CkTwudBcwn&)qu(37R9)UoF^1c<7x z#>XpO8bM=HLB2DgfSpM$LCKTS3W*jg0p3e4IX7J`&&F+I=eRqk)KE3gJ!#Md>ddO=nMgU19BvTgM>-&kRrkA@JG>$xsAzlU$kn6 zlnL5CO9VrfSw?J{{f^vQ`;J`|g`n zi;s?v!4z|zFS6Kbp;8&Jj*8*0KGwm(YW3c4`MTQ(LA#mO`j#B21`c>34VL%!y}yI@Dq{{1Y_eq+)uNXO<$Bau!@75M zLE^3tmSOHm^B^dh{^pwx%nldqB*G&?DA9|WJq`kYJ^-R@ZKA(}UOh()G3rpqfG7nA zpib*d+u|uSr(%-IlHG($7(f2;G}+=}t^>}_aT6<X8)sXNKXaMivn+rga0F z#t3>g$N^0`^BuJ83j%cHACE#~8sH_HPsl1dq_7<>enzEbPAT7R?TNi1wsIU=!IqY5mLtwNNbk|Lkm{!sCXo|Fat-(QEWh?==Z-H?#QDgCAjB|pW zcpCrNE|l8=5Q*KGalnjfK{M(W?L-|$aO&rIeFvHGV3OOitmJ&}Rd2XZ*Cnc+M)`4J ziCZ+Tl}VR2+=8%D*Ww3QFYCx%O@21WiiLJZzVPVHPvMt{uuMoU>lV%-CN`mAYSx8XBh?o7*s?>9*tdb|P7|!BZhIrdNuC z)5?Vy;nZV{R$rd6JQ@e-eT6Twpxz3FLeV;BVjlJ!kqdT8rO8mNllSn;6ELiEMAXWF zhaVz;eeh9?qNKL;J&(Lv_M*E2h-(sk$;nTcNn9b2UqbtTA0jfHWdc3gw_A9h64R4R z3Tw^aD7wU-HwnJHu$imI2UujJWb)!w$>j*eoAel%ovfc*H)LL9?#rD>Tlb&8xjVA^yry`qB8Q+=yHsKFCFTJ|IOybzacWRQbB*atK{3 z$zXvyqYqMs8q}_b$IBF24Au+KB}ZZ*FQOu^@vw9Q%}`ZO^15LxJA(?xvDKBHrZwtI%CCqVXWUl4r2prle|&j zKornx#M?D4m*6emzoLis&^l@6FRbtm;|tn|`^A#{t$-U0I0HurMPe%m#eQoP0VZDH zLCKIG3nZ77*|XXY-V1v4p;;?YYvfQs`qSH5mqFWR1V|xl$%Sg?0N{Uzs!vhM*DiNB zgJYe%bDw^G4ghNB^YoJrk#sn+4{AAhsYw!NjLnjv1wR^-c9ZDwD99(loyOp0f9s5l zJs*_IO6$SMlDQHMKUMdYtBto#c28EPf3*@O*#*%=b`4UD>*AoHyV$dZvDVxLfwQ|E z_Da6ZNG&*h|HyBvzaFx>G2n1|$v)#|ttT^mHZ4pIlA&QJ+kT7^2mrueAcv2?rMeTX zFm|4&4ThB7RM)Cn(`=Fk1py}b5#JC~#b|!_5r~NYHDFh45ev26EtBAZ_Qq0`7#{hV zp#iJmP)%(fAvRv4C2g0CP~o)bh?_+IUN$^QfN?;DtU}~{W=1qyf+sn6yWPAjo7!Vr za%%py@KXc{`ZeJ8GU$@Fg~Gi9IwDu)TIu4X(vV7(Ni=u>kzJ=Y0<=XsoZ6r3B75TS zMJX!B@F6Xh7h1b_?R%%4`xmzCUw0}QxqV20p%0%(r-$?bu(`xnmc}vCCH%HH3W&n; zdT@+q$}^KFQEM~zmADCoKNW8&BK2xhxL~NJgBpGH^5zaabgDA;E~rA(JGEazbeC&7 z-%-C1dJI_mMtmICoyts6+zcxu-ZUd)`dYT^t0C%pq&yiuX-5E4Ov+pODWwM8X}umz zIZLv#VmHO*HDt16~s!PfYoaCN>&}+^m3> zLSl)~Oz6$DZ*jUVS@Y#SV{2b2mv3q=Y($NRJZy^FYyA3hWQsd6deYoqpA_BpegXcf^(BDpT_&t+KPE{I@19Cw#`udQDQLYY})*ZnX*OKzw zFf-SjcZUQ;Lz<85y?s#eDYOVs`X;C{+TrV33?ZO&Cl8bb8Zv-P{HYr~5kDJ5QwMNU zD*%EDHw1!iwn?fcx`XA%aMH(B7=X|AqcgH`(-%k(!K4)cHT8r1VQ{A+W^n_Ej3C4@ z#^FsE%>hWs?H~gSjAt+co_1mpP3ZxW_Aju)KKdbaFq)#*{~bim0y6t}v>k2-$j1%> z9hEWpKz8|K;^+UVyMvrS*Iq?&(qu^B#estrEy_eXz{szD&T~-SPtd)I289N%g0ywMQXv z8Yjh=#7une-bi}Xe%`Mv&?5A+{Oir7zM!J!n{VVnOS|Q?-Qqy`)GL(3)CqVEnvbG> zIWWDtqKihoWW1uMaae}jpgE9y05BA*?KIBNN$;l~ND)(DkC0b|)Qzy$)FNIu-`iIz zcHzRy5%&N=r8|#iWUKPG?lZ4Qibfn6ylH84h{x7C05q@rj%|eb6O@m9OGQPPmwfPT z_9q+H-k8HTMno6b=99u67``~N_uBPUfJ!h94z!lh0{LV&6G@)Nt;m4> z;F%ac-NE&hZIJ`kPB_WHHuS`%$u zaS+%X)bh2`goxon=1?5#&24jJ#Hq?1`4tgvyFap6FN!EJP+Cwxq>D7Ek=~{E8mjb!5&|in_nX->`t1U;;JtToD;o)i^C}>J$PRpIFv5}p(7v$Swy*?dx_Y|s z(Zp(#7!9+XJ0Ck}aN!rZGLoI7Exw-%QgjC@s}zDMy0hF31?<%N+lA+`xt|vm3yTD< z@|PA<%}9TGC9AI7{qUIF3F`BhavWJnNyzAG8^kjKoX#WT9B^#ll`Stz0D=vP@kzhr zwJc|~tKe)>8=UuoH!0xYL7l?XQ?Tw2Z=fqo2NAl_6U~TkX}fKu8Y@(JwL`}=`0vlY z-$1>_Paaw$3i`jmnx;rA!QCC8z|>8@vJYiW6KBn_U(Bqg<~zA->uOS-Md>Rrn`qF2 zzvNtWSze?Oe#?o@@JZHWnH#Pe-b6ab?`pBcR8mzUL015p)5O0`Gr@5H=28GXP-_$c zAaOqUroa)|QSFX5N{7a05G##y`-E$2Gc{}|$dc}Lk>Ho!`zTaz@jhv;aZI0s9I$h(PPL(jZP(@;kt!pz|Q#v{>X)7NpWU>P^zG8N+gvb3_j~ ze~Wf_?^xs~7sy{|@x^1xKC8<}Tb&sxtbJo0E*kCMG3nQgc>38y=*Y<58TXNr=VuK? z_>eNSQ^c+C_Cx2CYEi0tI9+P3a; zKMwNw4fiW;Fv|Q;&~rQI2%EIQcrty*_z=_akGEnTW;%ck-oZ?l^js&O{E*; zhYrhD1O`g%JL0EC9iAYl{r;sbOUT!H?wj$CA_m9TCV zJv#ZD5J4JPrdF_rnEX*6#L8s5Qr;Xe^>KFw=$tCsdk6UXo zVq5SCtFLN1UFhxJEP0A%uBK?nrsd7F-TdaS2|!)Z?U_+{+l}RNyz;_PDMWp!Bw1_d zs6jz?M7Yk)>0BENk@4QST#si-Q?-BHga5~l{7?Uz=8SWk;mtKtJ8xqA?5=_mdk4su zvR!x@jCIGQe}hIUGCB^;Yb3decgKcx?(Z`Nl^v2us9pU6?y;Zx9|eaGx`1&9j{Cv~ zE-@<@J1)_kVnvW6YniI8vunuwe$JPpeyu(KiKto+YHOouhqO&7JJRJhXbNsn1wdu( z{F+3)wuj5oE)q3THDWhNtj-4^5i#3LP zg)z3dcIDi)-;W`(q?F8QVol z8Mw!>`_H`7|M;!~FCbe%)JPH1ksUgC#&RH?JjnsaBbO542igPx1GX-?Ejn zmTfFb{GC=!b`T$nw1d1|;KGDAEYq+%wss~2}hm)UK)?hOZu;QY29kCoY0RR!OEIx@g{{sFW3vkFjvv)cVnMvkzO~-t4}79seC!-bU^7}TJ=WmI7wCf#PML$ zan!)!s9pJrU8_}){XZQda2{CT z!b5@phXy}fmoIu$3C$5!G-En#I!}|)^Vx(sU#s1k>N?}38f{VAhf4Lf59WBCwwdH@ z*$W>^G_Q7^OuU@?#@ns?>w56fz$*y{n@bld{?-J;7@&Y|MFCp1Pq>4?8P!u_-3UAg zbYwuoV8-A9yh^ceeMQcD_p~C$zCp35388RiJ~>@%j-UA6|3|A&4SJRd$SHYO5VLC7 zr#R1m+iA->(Pzq*57uRFj3&7#C3YFORdNNZ?_H1K;5QyB6x8Lt|7fGC9CycPC$i{! zq@qA+aq^G)5#djeli>)rZGH5(ki*v(Bc5gwzjUJzT!Wo(2JNT(}M`9M) zT*P#c#Ji6`{7g-s+v0lwV?skn7x`BPqv4R(I&Vx1+IG)%wiz_nSNRwFu8ZC3U?`)a z^-6m5ltV6kr!eesXv%+>#1dvL^+a^+7dJj?z4 z@GOu&_y79C0Ci6UfiZ)mWeLW!ynr4Vbbx>q*&>Bp{n-e#aTcCdL>coT%2Dw@WeUKD z1pr!G9yQ+uP!25&{Ue-O-n{Eqbmsl-#}^m*7yEO<6ynpam~(U*0jl5SEs%ks7gdzW z`z4U5Mh>_P>EMRrLBuz62V?;0o6&T;m4g*nCM7}7jUq|?FGS8E_SI=#w_f)6gY17v zkpTw9DWRqa!sx>`!+MpZrCwW$kvVCS%rhA$*7?XEOHZFZrc}TK&Sk!F%u z9x*{#q5b39JBaL~B&j=_K?9TGWJfL_m@?jqV2!D57mfmkF5kdy8CnuXMv^r%P2)S% zyiVK(9}K)hIMr8pZ+ddrqYifY4?$1>-3qbt5t5UhG{xnY4tiJl#yZuIf0DzB<-~oU~!Ne?Lu>0phVj3}J z!}mAn8DgL-YeQ&$UJ=!0>Gzm&=!mSH@D!)K1R#P7)OBd7Siv=CjVy2TXNJ8A$W^BW~kDOHIZj#gU#VqkQ`71 zg}4*sLQyF|L;Df)=)foU$@dU%jNo`k?-13jAe-c}%2W&uIli?$LF2~x9B{tkbssL6 z6wVj5NOnmFuZAbr4sY8H)B(Ql^9#g`4A%fDHoqatFwzL*>q$6C#wV9VKTp{q>SNn3 z`iXm+m|(8tUE!6RLK~9=VsN7fVYpPc9>hb;gxk4sz}PVD3u2h~PPuXh-ch^JPhWl3 z?_{jT*6t@6cFH$C6~=h3B+%hkCU>UM+t|>NoEnT>Bi1iJDy3q)q_?ykboh1kGA6ml zkmEa_wj=o(F!c{{!cA(XJG+pt)Xz@!Pd^pkw6=%r@;y?LKFI-F5Ho*+Yy|0-)S^eb zPMQ^vlgHne(kZB)r%tpyyKd8KGM~YI0*w9xr2eg@g8(RPi#B{U-p7{IsSm^l6CjqB zPg#+)gi*D`5X5iLL+%`~_rG=0fAU@;{00%5q0jIxx2U0x;5TPaM^`Hobe37=Zm#Xh zeG~pfb7CxYHqbm69Kl7tRvr)KkZ_AT0|$EwGl(MJ%0sQ)^S0>e6vwN_bHCqsEl?W1 zV{$!+3kI~XM?JZ1k0wz@EdVbJalTO){HB@R=hducWvp5j-xufT0?5ILlgU=?W-P&a-J?=Zh6x?Qk&cC62que6yatGw)K=X~~F_cP}&Pr^dRv z1ta=;=9ky7m}{E_iZ0A8mT8sVW{`r#bBeq(uoplkaEmfbEq*S6zal=dg->M-vyXFZ z*}JCiNpTqb^OB=S*4jVFg=82t0s{hDn1PZC(OcRNksxP^B3L}{#4hu+r+d9`&%F!} z*HC|^H^$fPBXK36(Tz=Ai^3myL2s=I5|E_ZQ%OUCEVjO}3#*R5b5%+`TtzP8zUofLtyJ!Ek9`q(1 z`uU}iv>msOsj4S$QDUXyMPj0&5Uvd?K+LHY1{%o~3dA$?L>(-3@~Rx7a%Nug+T!EL zc#cQW)#jLk6#JI9r5bD6bl?<3$-Aq6D=C40{{L|%EY+T?%g*jvLqoRojbIiHyTp)B zw_;KO&v%feMwZvd!hU?q%E^}0P2%CP zO_ASyDaE@dO2=%XdW;@_IlvbU2kc@SbLD?*BxIuXkerof64SH(8qX1Zs_7Q&9@*dj zQCazyYyW>oHTHGu(uwIoS8CQ&kJwh#PrCj5W&I)2*qZW#g?@+<=ku7DtS%mF%3nDD zTcpV^Yld5y-p<84h~<;bg^l=(b>#HWld#Oir3U0Ei422`f;lZCnC1h@+s}x5d0YEj zHonnZD&2x>1@kd%bPQxaR)VbxtzR0yp!L0req#Efw${7h6hUo8Z^kI}e8g??X!k&- z&hDfvlmVC=PJ~1yYtD+(<_o+F*H6{17JQee>QuRqs5;nYKab&TMf~SJF6M829OskY zp!gmrc$FVl9t~Y}HzUmh0o5k(Bn4jAwsHFHm7_r==22^Z3t;eH+ZMmji|c z=;ka$S~&{G!gxaY_FtARL=@hl4qqLHnf(S?nOc$3lr64XEq_8`AhGC{qRk-_T=<-h z8*|<3>U!0g2k5u+HHqDJhn$MDVSecfbfG5*pgSvY!xSuay&-Y6|8f|wI>Nu=QQzn6 z_okwJ%B|K_1NOrdwM`GSuP0Sj{7Rt+0|f$QT+1Ux9xIp*@uT>D7zdlMuN#WWcJxz- z(4(2GVZ(RAHtwrxas9E|mZ{g+64nE>HjDW)r3m!#B<+(H#Bm#ta7F_pe!Ub@d&|Dt2`fOUt=7fIgf2(3^D89sd}I(8gRO4RTP8 zFSPauEZ1YWup^A=-tVr(4JGz=t-bStbgkc!KgWusR*~3cu9iM_Aj8W7mnD%F=)uk=?TM$zJILXcER!Z`jmyC}Hvb7tma|U*g$TSjyhV=02fIbgJ%TXfmhmcM z@)9VFJN*1!hSzlZ(oJ<+$9aRXwUf+q$OcYtPNA~fVxLFmUZp!LAK8C@q|;$F)j-m* zZCS)Jk3o;pmzw}r+ZKlhBzkO7WAguTd;fm3!QDDIJ{>~l0e}M5S_9{^4OnK=&6|9ydlaK zWK{%#VNz?Pc^oA?y|tilf5)CzUc;BpWm$o`CM;rKC%60D>dP{K$g}~kIN2@7%v$2M za{wDiaMU_RF?Km4^;;@`v}3BKlZl3gjpO*kpdYS!DKln4FYi$T+R?v<{eOHZf9mic zsc^Ibm^IdIMJ=N7ikq|#Ozac2-Mz)FJ=Z*eHg=>ZdmTXq6fldw3Gev^lLS)bdE^1W z4Lg3*0D@%1KvoMZrG?+1WLdNn(Q_Prv<7s9kB0tq z0X9A>9oI-yXrs6CPTsDEEjdo!et8u)+td^8k1%qq3f&<`7j#&1j}si0U5q;Fk6RGB z6K=Pg>yz;#-D?@-Uwf9kt}~F$5$K6oio68aIH_ex>wIjC(A9mz`hG*hCPl(~xeN;S zx6BPIY(tcr@j!z>597yOtvTBrqgjSu>m+A4J-hM^2!ES191pDauMSSGgO-C7BY-+m zz6<*i`0g)o#XzZQU-=u9_X_x#;r|7B`i~2PY2b|m2N6t1gE3fWRl$#4g=&lYkX!zJ z(ws)w%u6wHYGZ?X=H%Q`>Uqi6=)mR=NFppwLIs7{!~KWAzwk_ zcn1lI`?T<>Uc{NE!XIrV4Y1$x34g2|J>dR5sQR1j*MD3gj1mF-+LNz~=fOtRn#Js@ z3z}wM*zD}0RQP@cL?r<-KY}=}Wr@1^8tgF!+BTiDGN^PYeB9H?1 zQp>DU$a;vNRL~sh?Ttw7ZmdvG{<`<^)5V3TwYpAP{dm!#nKxrRg?JuYxNVV%QlV2? zg2jx5YNe7pMTq)ZY`DRA+Rb+%RGcCF?~ZDD{^BeAJxu-i-u(#z{~LA+m4+`BAgiQ8 z8?~(S3DoHPBbKc7lp>+hl6kXk4t^8cIOC*A&QP9xlT0|>IJ+iU)1B&KX^aH@$(3U! z0=*R!Am7uiB95~30GT;L;e|Uw=g*t!@1f#?RII$kCmdy~>|`uK&3)C$@6wyIu-Q2E zC@!HEHLn{_=~jamf}nTcb_i-`XpPjaSvkxV7fWvLWU4T;ZS+`1m?_ijnxwLVtxCMK zf*~!WNsImZbJ$_SG^Ef#?epJ1=ucbvpLh`F4m3o!an#^=(siDWCb~f{Is+Lezjb9sP@4%9>~8TzA*o^6^9>YT5{%1P6Umm&swZcd|8F% zZ+f`MC}=o#wrceFWM)_MWCkexThj>OTmYWb!iij6Z*&57aqI>uQ{Wf*aq~aJpZb5& zRhcu}NAPLV)|MkGoIzBBD~9pOy&fAK5ZOJN(j>2)NGoQ2k zX=Q`IqRX<$2gO1@?*?DDAH3nfdik8jjl_oM<;aj$0aUiy%ebmzpf3a5@bo=-Kq)14#LNUgwg zFkruj6)v%A8R?H5^$^rvd{THm(>lKtH{r!ql;_43V>@&Ao7+unMJxMo!JzF+kCw5u zq~RSrNo|u&qJ2t;r?pY_n&pv2K9k*@<}{5amg&!t8jVIUPxPZv8E!QI_< zLoD{!NS{F26;2gqdwY2hh%0&I4_lg7eEMP6Wm8z5Jlt)%s_C`(Af9IkaGNZ@Kqg-%Iz{W{Cm}lXZqp;6toV%7;7ib^iF@hO=>O4K z*H1NI(W_qNXsb4*>@>iuadhwgKU^W?kPZ$JbVP_a?%0jRr&w)B!0x?Smq(G6iZgw$ z!TTG&*Uxnr#EE6{D#YsB@;0cLM~;O__%;ZOWmV!ZXt~D?A+2pIQo*jSWzUSQ!y#8p z8k4%aE*5=yRC)Ct=#@s%>*s>zR&)2;hh2M}_tiX2GX5o=QpN>N0mrieXe)Hb5D8>g z)gVP+AQYkk#9%oLfN+GD!jF{7p{y8AmIefD^2#5eJLCcZh7n)j!j&#luZjs6N(mMy zvn4TIDS2p$XM|;&) zQL??H2&lrp#A5IV=u#f^z_D5IH>kW+_%}%NV;RsTJ$Z?XAKVW^UWB6T1KSZ-kBeF1 zVg_iQ$QzSOH(;H!J1s(K3Tb^r)Jd3gjmK(9nup;oSHO9o+If#_cuz1vE9=$E=*lbS z^Gn&*@V{dQ=BonN?;IzbO?7Re$rKte$mX+PE87AV8??S-^>rzCTBtxqUB)BGF$&P~(*?pZxT3 z*j2-u;%Kl>(FFSKyUi8LT3XCiO?1=`+(bh|1npyJ~k&4Eo^VnKeK$jP}c=_w}1-*C^?KJ!}u{fIf%LAX=;{ z!Eda`tG}}vOjc`c<~a^)t~K=uNy#;X)2^=CebrV6(T|#nTNv3@Jc&*`UG&8J2hLhC zNt~88v&c`hpKH@ZcZH7}CyKS)R$ot>l>wa@d5`~U@n4KA=Q3R13&RHnP9raPM%&?L zQJST?tq!ca@=B<~JabvPPt3MacUk9?bn1HckUY!lK zdWv!)f9GPpyy;+k?z?Fph7vd zSo&q~Il0?_{TDJ*S_JuvmjXic2y5aGPxYHX8$247r>f*a*RBM#}* zo8lXx&)jCAtyWdoEcZO~&CML2!AH?Q8%JRKqB1B7;TgfXAQRDhc2Lo4p0Op>Z1&z zEdAB?AE&fm7*S6Zw^e`S3ftrveb5yWb$T!1VDBYy3)uhU=t-;_W+S}$df=EJG@fAX z=khuL;Na>t)F&EwTox=l_o>{=p07wQm2rDKI!jUib&sW_TEic5D=oYROlxxQ^I4i-7NYT5D&8oo%TAXa6(vRm zN#lYgbfzHqJtBCIFL@-ua`E=0S-*mvwj!qm z`XLgVWOG{Rtjjw-!*svZ zOuP{)^(@%-6WyG&=bSd>iuu^3`pkOA(Z^KJ=&iqWQ~XGIkQEz&6R!-D`i6@}kqL-s za+)qoKT=BV-2@5KB(|VZg4qq;VKTvr`BD$fOL{d^rqdD31id001Q@%-!JIydYZf|k zu#Q#BiSfB9$zK~$4@>(#m&@67hnttHJ!L*V2B2Hu?m*tktK|?>Fjl+ZGN+2oY=(+& z4VBlf@iTmDOi~Z$dJOm3TX_8B2Ot~lVfXynBmQx03!YKA}P565Rv(PR* znCoP@@TcKtUu#Em8%CBY6S>^Qv=BmEf~_4Vm6O#T%hpt836Xg<3TfDMVgU?{7r}`_ zFXK`mPsXdSVjR!JMJZCTDxJ&5p=+HPX&wpg-$B$j*a~$$EfPhdlcBN_kXgVLVHj7v z%q-B%>{h|!aXST74(2`zkw+W%u#H@$G-OpeV&|{3E>!xL@SDM+w!4XUCNPf+KEn;5 zq6ai}((jczFGK0r`Air*uV}J8vHl7cD5LzChdz1^qx~arprYkfF{HwZ5^$7E^5GuqtiiQlBlZE_vDmd zKhM4dB`P(*`PUb-k1fE};+3C2B8g56ltr^;?25}>9iCQ-dsF7Y4{WT}Y;DQ7JLKJW z$oBw=ZdXh4vM|IM!hGzkHKBOz?dF-SD0=d+VE;PA7{vv|D6KJU4tTWpf6_36a%iEf zxY0HQ9f7+Dw+JL^FInx_7biw~GHiE#6+t84qYTuz-;?#^fLqHWvxPA{9r4UQ8OuLw z>C?umtwg=%>z;~6-2FklQ_*e^kjp@Z=l=f~sxcZ9#|xUaZfK5`+SXMyhVAa%O!Vfe zs86x8p1Z8h2@Hg_N03V&0BVu|%$Db^u&2)D#b3*Avg~x4VaECnie|mJ1FzY?_D*#g zAE;A~n_dh45$g8Zpp%Mz{VCOMdq|K%(qHuy;9e^nuTgmEYvCCv-f-&D z!p)%<)y!xSe20?@@`x1}kdGk%8pnSlQ@!921zj~6{|(x1T>%i1YS16^ZA7JIGXBxv ziW|+Gv#t|MSz3OJf4*hd;#IBnU0%LtuBwz2p()aw(tI|r*2ZRHC3Ju79wAr)0f(v! zRm~ie;+t-bH#bD$7S>F+)#_;HsvB1>P|9cJ6!x4STD5m!3FFnz*}*E7<1~P7UKmHe z(h=3w^<)7@uOCR&bYZBry~;0{J~uCZtXfU}FU9$P{u%x;z;d&x`P--qH*(DFMs;**e`}(8E|bDvtrnCKXyD$_OV!hwcpP2 z$`;HGLynyyenviLwqU!0k}eivMBS(Y2eFFh zzkWa1Yji2Pk!b(GQ_csd>3kcn!e{a8*2JxkV3}&a zhbyHdN$Hm4c~e}4vQUg4L8ai_oM#z1wa7M*AO*$&w$S;se^TpC~Y!CxgGG&z~L|pU|j-LZW1M` z9M8H7@HV3kf)w1afRrNn3C{mF$WjV&W+Vaful|ScM%jg|h7cH7+Lol+d2kLWH}ZnF z$G}O9dE*Y>nMmgP%m#bm|%>FulBPn+XC={n!T*6({kdP4vUjeK-n&d1Bu4 z@ww=i#Gf*f2QMq*n(i*0dq_3_@?ZPckLBP75|yPuxLkl-&jAc7&769B5Fx$sq7nAJ z)4-3-wTF{>Yi;=RY}yQ|Mv*3Fdxewn;l!u2vhu11v)A?e#fI_gUAs~xKFcc|k@?#- z>yygyqE5~+|ZVBhQpFq|1O6i zlgHJ2j~h;SAGcN{WIeoU9B06Bwz;`+y5rL*7S)56$>&`iyuvpyt-|M(G+!vlB$Vd5 zojNzEWf4#f_z(afhduxe@*5mj?JO=3z`x3|`+I*lFZ}yo^x>6oe>C`yhk4$ShIa8T z4-iTlA`=YB?CxgH-@aU3rkHRBN|S!>lf z>#0c7PAL%_fsLrrmucF!?Q`Dj+m)$aYo0jqErt6h;P5|Q;Wpaf*Ab7bcE-0S^$rCR+hebt zU{1n`hgG(b?G}12vx6k+UvPB475Xvc^bli2x>kcEq?rWRURpbW|BzKIj61|np1y-= zErD@&pl!3P9)%V#BQxESE3e}(&poHU`lUxe@OE+^$8NB;SnI&1TLYmdOCW%|T&idB zz(Eh~K)R@bfwp=lIxRB2_=JgzI{=-$>|ig;eetG~S~u9{JzvQi&Z0`O!#m`Rj4$t| zejQGKJn&p;rmPr4LKY+P0vRzn5d=5XfLzA{wfiOH zycCzIZZ#IFv>fz#{IZrFi^{YZm3aOTN)J0<3Jb(#E%!mlagfezzOm{i%>le%If3h{ z^183Pc==%;UrsA0hx*UEGC#_5((|?DM;>DTLhkV;eTIkY8EAY`TyKJ2(n;9oqRkR9*Z6@?W=d0oA zLCzaL@cp2l7hb7I=+Q`x0xuP{Q-f#(lqVxs0gHh}^kcxKNb$n>PSt+oBpm(0#|0Mc zA1r?5bFRxpUR}(i2H3QWp&QiqRgl~L{$q)k`s?MFn2lj0rIIN+zJbum!ZW+2#p^h? zWy-LlMP{6`VW{r#vE%uU(te(s@6h^jMO#0LdE5B;c|jrkJ@3pR0=0@!S?c3ane3yI z&ou!#N)&)R5*X1|Ek@09nDqjzmg3}w{usex~HU>L7@H)1C#J~o4qA< zHg={}chMdhLxg2q7v7uThU+PldK1V1%#3I@nQHIP>QoPpXBang=N`26fACDGXX9@E z$?Z7RE2CH{z%Hwq?uYzwdUl%f{9rZz)4}4|5#gaFBajK;fuo2ksP7oHx49!#tQlCd zPPePiby*u~-n%)mJxG>q*7?ajgOBX~`BIZr6;WS(#30(QW#wn;hs43#e*suSh!x@0 zV}w1XJop(GY+19ReI}sdg~u5%U~tsi8Q{<{%e9I><R^~G`5#cuKi>QzkuC`yMApg~x81vqjp*-b#xP9c7QJMNYVqt(r#PI4S9c ze0$jA%Xtv_(12@$$Lm;(JL!vxj9r)C1<}qny$QEZqv8RI>m=+_O4P#4?Nk#w>eiwV z9Z)vq^Pi>0PolF_lPt`>iHFJPrwK65?)U+Iwhfr=$5r82aKxd2_mz?_l~yxtHw>A{ z64z*B2l;+n_PN1=;c2fk=UU#{oz@`nnh@f(WrSokH1r5Yr-^-dp1`5llR(IC(3&B@ z$Bfs9mz)|AhyZSBM*`q&n-%27;xe+om}e&*|3FG70(oj!0Z^vKkQ1nPS%-(qTL2Q<%nJ=dM+h z224uqNXjNc5)IO3>&X5yk0qv^`CQl)BaCpQ>x*N=^@l8Xp1rwNJG#tCq4NEL&=11M zWxCOdd#rsVt*&Oykz5TFdiE&xxcMY2JJD2vb;0bkYuCpkY&C!PIguzhBQ@V#Ua+zC z?V)k1Jy&v)#nx)k z=TL}>bBtG#ox$8G&x5)#KwJo(W!Bx(2rCaI+?zPL6k@uf@&0KAd7-n3axihn&q%Y{ z&+;>ZO=nilcd0%^uGlHb*j|b2)y>}e!~`XFCC1gvu2z6yFc^)Tg3=vHWTlYtF3PMfgICk@&NHy-0+FrWeEfDhy~WD1de*xe4t+d^rw1T6DMYDMyJt1OV)N z>W@v{f@qWcjox`6-f{?!5!CR{0w>DdE)m{iyqfF1vRBj01Vz}271V4(oASHP? zUOXA4&+Es|KOcz~L=U3kLliIL?sxqlkqwm|H3@az{b>87*`bs68`6a9#j}c4qI5Z* zhlh9K&so23i*YbY6gV>QdK20$?lp~;t=br;$bieMj0kOJ9eww43w1mBkV|)ZTISDr zO%cp7hKOaWsbXw-Hv7ecWhZ&o5;YIUo!Lv+tXm}^Qk}YP8JR| z;SjYfmiZ1P07JSNaVK@+jB4+x`*5x&Z2^YDC}BJ+&K9GGt-&DPHbY4hrlgGg(=^xm z8Jwn4&0@+d!=l5z><|sjp5u3urK)^c+;F{5{HZ+KNa)@S7^!ExZ7g z+uk@Rm9+18$dg!KVgHj1Wpwc_h;d#2%a-Pc_mZo7Khrw1FQNm%@ZG&{AAX( znx|aBvuv)8*`n^98s+wxr%QRSJLb)ai-4jzl7j;v4Hy)VB26**4Kg22|ESdOU$EqI=vjQ(eDe^3#t#EBv%*(AM9~@uq>AX)BD7C`Tc{&7(@tx z9|LYPS^#4{M>rL4vc%Z)N1MHGE2irgJ2B=^roDXcMhO2M-~y@TUIg+uov;+_B;rk6 zD#!8?d|2CeUAS;<7EUDlNm@$6QLRUU?6sAga+8^AVZGetNc?*$&Ks*N2XwX=b$IUs zHDW4)_|vHLH%Lg)>fAdt zdZ)9u=I2n_Ht%f7`(svL)ABcUX5X~(a8`AgLQ+DPib+?-`&o=S_s}4zJb11CZp0Z{h``rp#DA-fxP4coVvQK)=2{fIVDl;H7YNkAGJ-0dD}RRW#SxG z5(o$`cj`-G>)&ch8JrgmgHkn;p^aK47ia)BJ3c_P^}HVcr+C@D#~`ss#AhCH8fUa1 z5N1P0-#f8?);}G3S)LrF6G^%}MgXr^+`6QuG5*x&c9cHt3$LQ;fIlyg8L7ub^wos9vg zGfJ_CljfZV?oB-Dl(9?#CgAA@aSQ}z{0FX941lIX6DUBA!cAB63F>E`IP0PG&F5ja zClzI2dP9m#jvm*|OzI&{h@#n9)%^AMac@ejyD1BAc$Zt|jZ4>webVmT*mGzFhWQ+1 zI4P8z)diS5dbcK}z>F_KBxg?2WComL5W+}hH+t9u4(quBJ|CV4xtR)1L z!=4k1<&{2dF1W1vDSm2Z7%myEJZvTD6>Y>cwIpm1bL5OlBccklpdW_MJ0gk^m4iP9 z0U{$Hi>gA9`}k{DYqrh+i-FV1S{l(Jkw(L zrN@2f|G&)})LCX~FFyz4MXxRh;KG)j1?e9}#7m+`POQw+EVNZ|FJBf>OK)17M`R$X z;sM*j&TKinQJ5lihbKzw{2^Of&yF^rpXACGxd*93R7$l{1yaHo z(33&5juKRYVG~8$q3c#!hXdXC7ULZ~;+QGunB}kGroV>V$UophF~UE9nVSMMO7Vuz zr!JMz978wJ^~Wr_7razM*At11N<1S$(7RAa;TfJ~o!yR=CSjCts}9Z96`Y@3E_YUt zOgg5k58Lof(Ri34>B7Std+tb15WCfG0h+!IUWwf4L+}4MyWJPPF_~XCe$4*edA76q z=Hm2)bDyw}#gdE@z%_tONBC4yCwvhLswJrgDFpr%!1_<3h`2V=#ev_T+vSwRSV_nk z-*S*7AXm-+`cumb05(-$PRcBE1ddes2X~45fD#*mb6#>9W!7THA)J|H?KnaL)8bO70LcD=6S7rDkFX765PU^nq2uy^Fw)m2- zq)c_41i^i!gqwYG!1@$-Y4a}oQanEQ*CelhihIT3j?uw};ndLn4>KNP;$vRuu8ZFm zl$LOemc1S@Ez|RW*K0*_us(E{F%tS%Xyln5edp^#@ZrxiDcvIU&N@Bf~vr zcF7R&gCrbhR{w5LVGtRf-oe4SU`EN>)-}P*O!aFUgDnlqoz^exL7 zpZ-&r6S{GJwiQ#p=Iu@b%wnKR4dD%Fudvr`7hFUq?kn^^bR@U`&#xyJ>wp{xZ2blu zquc@erI{e85OgUSdY}vQ{0(|44L!;+l&DcNWLkFE3_A{sD|C>|Dx6mGc77QylanpN z^@?|^q%=XTqJ(Z`9UKKmYYR`$F0&L)>ub*C-+a0rs=&+wIrgg6j8fsbf0n}=_);my?OKsG zpwX0oIYkH2>ZS-QqO(~9>m!LgmH>r?J-N?j+V!sm838H`^HMaSx zeN4ip)0g~}LgsG%3y0aVHih?fKGKyi>GjEL0atmKnD+W=+oN%q%CaExh<1cs>(nIIY+?NASjU(CzVK`eLPuJo^Q1N^x%TU0NlzIL$&AkG= zVmF>4Hl`q(i#sB6gq( z*8ftpi|a%p*=6tboiD|l>u?)?sgXefbc?*+ZzB@ zF><<{cQ{z^ac4fU@%Ir5)Iqi+Sx z5wD|3(GI94N-gwWW`>}6gZhgcbK5JO!3=>9`39Z%`I&zGlkSQCk?{MkbhUMzUHcw1 z@I9_9Ekatg>MiGfvCOxT2mMMWW||lw+p+D^A8|PhU}S zCMfOSgE$8QDFm(S#*AayX=cF9GfqZTakaZ|4JUZY863Y-XX1m-+&s$~Xa%_F;I{op z#q+pgR5#27jc=bYT}Xp+cvk+OCsck0c*cCs(=8FDPKTSOrJu}a=E{0*1-|j*SPCJd zctu6=H0q1?@=b0m)$p_s@FEbT(Xt1#lfjuK@FO&t>I;Ldc~nl7D?O0Kr8nw z`F*aHi%{92}pR}&FMG0j1SF@maZ4yZC3NpXAlB0A`$Qj@c*r0(6c|eTm^PlDM{f8*v6cskaWn zak~*Bnr{K1Vs-|ga2d4t5H~D8m$8iKJBBJd_bDvpfP;`rpCRW7H*r{0Uw*#pnbHN)&bYx*qgf-YMO`QbGGrr<0PR+ctj5ChM20#v0OB zk7WvbGk*OBL2#6dnL`=4jT_a$O83dK&1^yN29Y`P;Q-T;3?}yl$42lTs(Qa$O}xE6Gi&-iPKjlHsvGux;X&6vw}vT&Aq7P6z2s74 z<DJ?tmUir?1P{zDwPc zoKX&*rJTsNW%TLJOn2#g`KuHMnEgZCpUcJ}@|;d&Yf1y$>}>65=FI2j9#(WvsOM+R zPX!efQFCR2s3kT4Hnu|Qn88BVDrM(>6#O#ug>dZ9G|otS+|8v-hz^0Wphy{Lq-4zs zlDo3#kRcG$e&@RIT6>=VyTUFSorTlNHFw4$+hSAxVYpS~nFL;x)E7t#*saTdGgH3y zae4RtH0OAADn;ch=A6(QzAwW;OCa;2v>pSRH5-wuWrTP*b!hlCS!baV?97JTxgZgO z6$*UG2j6BI5!z^*YqnJ{Sxg4jhduTA$F{TatH~>Pt_{q1r1Rs`2PI zFCOTc|W7ca#jzxaG!8;tf;Z9EkBJvLJx#OhO_mZutv%`aTk`f)MvNrg>c_^;CF zEQ7v^j_|%h>($lc0O3&*7_l0_ZeEb{zwo}HT$gMruymB|AVqvB%3sLx5X{>u{Z7qk zQ$qHCvG*QOO>XPDa1am?B!bdQRGNT*G^IzRi-`12L_nH|2uM#91OzDp0t!Mzqzg!w z8hQ~0rT5-zXdyrn?{w|G_Bm(obd@;vYRyqs!E3mETo zGwE>Myi*_ItfKX|t6t5kJ$PGjYtk*|((6sLmsfA~Zti_E>Q?QUl9*UwrXLwqVG(_E zDT0xi=ZUxmD?ek3*{AedhJtdce3S3TdQxNzVxM{Wa_^p*Hf}6DMSx+)Q-VX19r>U0 zRKP}jQ^`AdKAx=-il@Cmo9=XnRxp-^L1)LF%WdKO3>S=UZ>dVjW8d?Zi`?pbl_M-4 zyGhrRp?S->f6ui5(@A{r`1s0v7@v>K?p>j6%|+9AO_{2l>UF-vv6RI3ieIEJNgBpW z?Vmr@57_I(F!*MPF0tsf=)rR|iAr<_)`$HG+1Y?UVi~O9&S*yO4}Z)^SrEFf0z8Ba z>WvmPH!e`cxJZFIR1qAA{HDum^kHf-Hu{(=L*^MZI)gRm91eO@c6DxLT#Wu4=~< zbwUigx1sY@kMa2U!CVlzn#6d_D*6oFjp(moc@jIDvs`)E$YFT-sJEYTb*%PPy#X66 zF8Pw}<bw{}R>fH&axo9aHC&P6TP&D7pB#=fc5c}5J~DzV54l3FaW z9Hhy#WC_hX(5~2P@o^hx;oe#VzeQOyS+*Et=Bf8XDW}F1-iD)YS97rF zQpS0X>1(hU7}j7(_&qUmDi)HqfNgp4?P)(@xX+$+EbsT`!$%fd>T6;`a64%^I^8nwGU8q;^Dy5pe za~kft+Ab6L1P&?`rZOIXS2{Aqhnrjv{oK1d%v(SAsZ4co(y4ISs(=}h#lE)sN|__d?xb>QZc~}=uV=}-2Wm3i(JyRBdKchp;7e>gs&zeZ z+a}UQ7j46xojYV2e`SEbX1JZfVj?}>c)(b^-L(gHhkrr_%$%5V+|Kx|KU51?&XKN8 zn`mo=0TO8{5p#-QZ0%F%PgjXL>71L-<7=@aKjPB4H$rnygIb}%mgCfuN3uCHbET=- zJyL9AcmO2hyJw?^6LEoM*wzwUr;GSch!GcT>q_LM*D5bN=iZ!LbVy)c^m@qZ9#T-% z*KcCU%=007Bx(DW`<6QHN}wEiRKKC<3mLgZ?aKfo>LKJi<9r1E(h|B6IcOZ~4jPaJ zz4b|Y^x$CIfxo*e{i|AdnCSPVBdT@X^Gf&5k#+Eao>#&$@dH?YHd|`^jNBvzsoTs7 z?aufr-$nHK=F6^f}>5!+HkR>=L;k$Qa8LpCy2!8TqlaJ*itv?s}Zw4 z%dcDXc`N?gW>U1ITeg_Q*@v8cQ>nuoPYrM7vua+<4R>bbhvO8d>85ZhO6C`~7 zV7X|jzpF^Sf;uDirz%pziCHz~@F(!8^rW_^d>4il6y1uMTK-uE1}5Vmt@4*b^z9Vy zs5tZbn}GN;%JSB8|BKt3;V>rBd4la0PpTmP#f)>l)m|9=rO4+lFW(+KY*jSr9lqAB z9~Wmy`8N2s;zk>6Q0oBfRNaLo^yxC#Db>QP&ofAh7=%BRkQ%z z)-d<`gsA73r7*`yynNj6bB~<2n2Gela$o>R?C*1?Ks1n`#nu3}YW1f-V^hi4G-@e~58&3qs{uy7+Z2a?(aY4%BUL zpS7K`jlHG_K#agofMmZ6I_5*cdU_tegT6=o-vg*ZTkEK>iOsIGlipX`WGOSt(wzk6 z$A-uG=G^*D`{t1cvTE3!JFUd}4*EdW-0R=brT!zn{tHori({s8P}?R10Z>-+EXQy@Pkw!m6G_psRl)t(Zi2AaLIJ?60=Ed@A?saG?QZxE8*?ynl_I-2Tr&S;X<`N ziq>Ca>)vZ~9^skM@m#c08MpLdY`{wVy?fZArS2<(n5o!Qu?l_?eRM>X6aZT>JsG9D zq}k5R1E`hIs7SLs_fnTPuq=vTgc+2gmFk&1DpP}-Mdb%U+w=al5jCpUF=X}Rg>IwL z$ev3G<2=sSn|MPE)}Zht$LOln)f!bEArNAly988o^UudlC-1;+h-!xldys>{8H_@W zuXjpO99C-8rg0>CE$8*7cdQz$b)RK69Llb6^nZtqGLue1r+-9x)yu|gbNHY~xTur3 zY8lJl+w98!=z!q*6IQY004WzDev}N3J*7Y0_Uvnn-3RCVlcXz35!;lFPsF+}MB6dJ zOtY)bfJ%b~6Xa!{=dMV0Z&?GFzsiGj;kLVW_ubbVa2FNJL|P-jeC}u%_33Ze(57`3 z>q_I>Z!|gUrKJ_|lC|L*@cgnDH(P30dh#n4WY z-)Fnr^r)b2MPq|(n+{rcimSy7`qy79B$_XhEYZX^t)YYHRivt1AzmozM8R{PE)3obR%JRH~go-#Pm$X5|0cN7U&&LRZ<22ynO@CnN}5uK%MkG61iEYchZ zpm#o5RnjLf2Cf<*CipArmO)RgTAlZ3IF+-q9i{$An2-U#F1D_>(vfsYLDF~s84$eI zc|tnwFpsTh6g_Fu%{*2~`Wmhk3>vBmXn=+4r_i&4zi)mhi0f8N1;6t*THs$eXlO_G=vVMr zqpOVrdI6L8tak*qS@@9@rSgpWa+&GcZ;|z%B1Hdzv>-bO1J6j`s0{yY>f4PoVd|mR z^q2HD^x#H0_UIS*n8P+_>8kROd4udEsh8xV+>%k(&3-3?|G#ZgctIMRJRaHxeL^~q z%}B{kHZ6`mGFPkb3@C=&cz@I$qJde@fFJ=waV~-$7;V6l(khDhtKhHBNE7t2i_QDK z3QLV&xLv-Fy@Nk;NEWzobXi(Hf`bi06ecEv)13~7BKe7ljgw4FgAH|j{%VQZ=h^IL z!Qgssj`}uqf^tPB2bf}Tbjs9HIsLd1D+Nd3MKXtD2_@-{$n!lL9+pmDb+MolbwyEXWzOp+_iq}c@!mu4?MvnQ=HHT-c5OgV`wJX- zwni^nAtp7Z3m<&4jT$T0qo{ApZcJ#Kw2rGcwEFtM+L+x=fnlk7pHzOdxnm*LP>G`P z)12NEjB}kxSh-pizLtskCxb2%GV;&G+ajoWoL&+NBn z5w(|sb)QF)%iWn`n|a2oyWL`Om=tfVCg1y5y-g7~h`Xou_69yXr?^CD95cH7PqG4lpp7gBOsNmZyfUzX5VLl}b+M^H10 zN^*3W2D$#REKK-%^mQ(QM*-W4@vb74Ja!xlS~H7en85F|H$UZ{_8l7C#9yFyPahc=kKzkXgPjMX2AQ_4aET>=1gj; z^|~0$cz&gSbA+VcyVvTg{u~Kh$F_5-0%%Q=LlFf-sxD(%!`fb>s&x)m{F{L7$o~m> zx!;U9-04N_%B+@%jqoSO`RgAbvzA}$@FVF9bQI@292Qp!m+)tchRI8p7At6tzF zaQ4j+g|jP{UsA6)HJ3fDePsAxl>O#DPZ((rY1+q7z7~Z z0jLm=W#zMqeu?^oAf$vo#cY6lIde33Uku%l*tw%9GexTq44hSVb z9+QE0pSFOV__!gQIUsN8M!Wxzfpz_(@Vu$Yv*VPk3sJm!Uku~<$m82L>-8R?;VM2d zWu7OkIv)Ilh_3F*xZ-&qJk2u)1iQShRKd8!?V8^E!dW|KH&e&?ec~Q?oyv13>ws_) z7?khsunM)X&i!zbNYsWy@$P-#$fZr5cJ)7VBsab+Yun8T@Ou_XqgbJI+M>~yJ2$Uc zS(;jJTY6L4pPS?0xO`hb&+Gg0H^C}zKanaFB4!$my$K6u8a%8Bs}o-tad_$I=b_DJ zzn#J`Js8lN`_fjEN#a;2rIJ>d@j}OF?`wl%gTb7ZSGv{l_Pwfype@RN$@ABf`s~#GG+ZbIB|5sxFaH(K=0dtwXmlZ+L=iQtT$4FJRe2%i@b>cbEDq zqQ$3gUSGUmVdG$~PP4RbKJ94Wq@c8u<6}e&^&O!vk2F>;_$aJW8%XM&_Yq3cm()Gy zBb4ec1%3pqCh1G*3f07S3x*Vh(R6J72E{vHFLz!yo4Dj3K4C+sWU=zW$wOOfH%@m> zxTM@pcYQ3HWM*k5JA9xyauxNpVrZ&MjD|Y!?O~Z+ewnZ1GNyf(jg$%({mt9`2PNL; zfdNuOb;tq}MhscU>?-BEVHgAp7W+_T{EdeAVJvq`9VCP@PPekyIbT$sK7eZT90WJ^ z+HLzaH57`A4)fRDD$y_h%BG@xy=OQ?sqz)oXSbyeSOP|K z({NDerCN8x*7h{k(+ zKui&7lAg*RVn`b+U_|vQ4Aa#lzBJc>td-Ne-}3uwp3Wxmlg0OkQk+&g zUK8?ht?RB|ameVqz{FaTNeX4es$)_r&x)w*G>g*Ssl z08>fMs5k1GZhgv``}Ci8O<^8yccJYpg;uWu;^E%Q;qy^h>*~Ss3XapFG!7Gt08ELt zWY?+aod)${4_k{0m)_UB0p*J+D@Xx?eg!zz_dwvnZ`7Cit&MPDLc;R#z-hNd)8wb_ zCl`s?mP&^)R;9v5iMg`OwP$b@jT8LeYaFM)U2REa?YoQVDf4sKSQoh~V|u~uWk9ri z)Vnz@RUcvLY6-N>4b!`U{7H^X(zAM9YY%hNV>J~h&%=UIW&6$R5;Obj?6U~=CF2XT zH>#=*u zHW*QmyXF#;Bou05VbHv3>PL8X(IJ>ZGOZI$LiIVzVH|qPztK`_lnao1wHq^+X zNgdul)4RrSX0E))Htfd1mwP7Z$rN_VE)0*3OLWN;TG0&m>{oGT_!adLzZRZbfHR`* zZXPH^Q=x|JZv&{zIeaeak$;;7nyFL zjGm2Rl7!LtpMcXOVXsuo%2m5_7IPGvncs|)FI=jgja1L!hj|wDKMQhSSaa029NUKB zd6)?ybr(^C1u>w|VBF~Q_3Vpi_ zn(%bGrfNNh~;W2}ifcJ}NqI8?HI9eVY9*mUi zZ*;Y)DGgZxW3S+Nf*?mMM_zP9G5v%%>M(nwuhnSfUl=hdt)_lC&l@!&6`7*r>Byv} zKX$p$>Y0qdhpyK`+&IJ(5SNM7p+F5)0`_ZJ(sN#C#$Il0HCN?dyxt#X5mfWWEuHKN zI;4|j_L(bJiY!Jf@mWoubSSb}c_TmG?~i#p6-Z6N)mm!-$0qLeErm8lI}$&jrrzk+ z4_b*3Q#NRSyzLYXakW1f92xL}Uk3P!a_VRI#z7_fdf$mqFmBDT>U)jyhwupoJN{L;U{z z968qTxrYrpPC)g%w}u{DZPdG7W_h(Cm9?^@HdsV>A^PT%k1k$g8MS#009NzZrB~tGtpmoQvsC32j*D*_ci~w+#iu)yn18OV|WQc?8C@x zR-7H1&9)h&-_YQ8cA9|)INBmj8F3^|cT$h#(}uPojp3F^7f<5abIyIY|Y63TOgf0PPAY84y>{vh(+t z!96fyK{W+Isu6+jngcm@5GL7^Zsx34r3ZW6D4q?`Jq`*4iCnq3Ir ze0AWZMJxM5O&dD0y@t&W!V}woy`KB}j)cNt0lB z7kY@HVb41V*>|$y_@M*iyDeEy<>{@@C$%+Hah3)@PJD4In40SiP4CkK-Nzi+J*7P! z)l}j`j^*WDO^y!M20XsDC#mlC|FOh^ek$i>UBl{p13}*%Mq(WUc;yxN zTsMIeh=j{#krKNt`BMWF{^1GyX0Bmz=n8(q2N-B%*_&)aA1XMgHvI(-`uhj=pIG;Q z{e;O5BEi}~fD@ZDg*jrL1oI`VGE$KeGwLf7z*(M47CqdL{s~b!8+CpB_b>Nw6rqNb-X#V{gcH{qQVnh<%7iXhL7QwF=D z62xMsKrkPZ4O~KKAgLX-YYg9hnv2?&0&|S9h+!xxe35ipN{6Hd#^!R-9Dy9=2e@Tl zj)NG#y$>)t4Jcdqp%D7uT0i`N4R{f6(KUdcsDk5skqsPU?q9=|p_D`r^eq*j=E)5J zU+)Bl^8PPa5400CVxvW25db@qCXHbjNynthpZYwU5WqgO73dMolv|I(3}?!Sp*}0~BVL_Jf)F3# z{buuNX6yqcbzt~=ny{OL_h)M=b_}SlHNARBP4;SDgCZiR;~ibd6QRk;p=G11vY9!4 zlqvCVMcu%kb5R%XUNapZe}3}aF)(uV*e@N}GfskQgiw6TmS`KQ+_f!FSg7f;6V%Sn zTXK@tQDVFN$|Czhj1BGCPs)Q^! zBZdOWxGdO~>wI$np7)w;Tcc|*@_gXPAOgsqXy0NNE1!`2#x`R2><;ZYkMrK-Ss8b7Z^>Qp zyb&z0d@*ZVk%tH#r;RFJ0C*Rg9h`e@-){PJj&*Hi&wC#ctB%unVm?VzIsh`0Q-!F=H*H!+ia>zE*+S@yV&~0kbWgEP3VssK9o6 za%#VAG7NR>fu|NGl&!2RBB@c8b2zC>X8)sIxQ(~;(Rxp9c&WA7*y6YE&3#Rly?&`F zV4I#KX2X{r^un>);5#-C!1dCHmPmcg`viIlSefg3+LrtJ1S1ynLS8+$Ly~XC8F6YU zM!4^CUSU_UStEa>@>#ux&km|HWeQVl@l- z^xXY>#tx;gOU|S*+D1CXMmI)>!cP-)v3ad#sAzvyAI5@`j04QQ1QR1eggAfLcEkxd zPlU|gw*XEJ!I=4kww9heK?(hkCEO<7+jw2+Cq()28vs%`yOQ`S>oU}fhKV_0n|N+T z#Hbdoa;v6rI#ObWDNXXQ;mXSqM_GkjOF7MY4K5b{DZ=!PD_FOnq{7_qA^G=p2Dc0q z`h|~pNyd_O%%o;mDZDiRsqNhA3+{XHN7o0u=-UJG8ilxy3=!IA=9OnUxtZ<9j_AwF zZM(>lURN2!rx&IfJUYaOSvhqA_C>h67f>(=I(Z3fTz!&y6Naiu*3nFo@1k~doo%s% zoP4a@LmIj2%KDQu)`kX@T&g#sKE9o=+besnlyDvx4z+PfRJ1zgoA+phR# z*)~xJCUXRXf8PU*8KltTaYt8GQMB`uW`m-I8NdSO*=7{!F7UWhe$yy2`x zE%ILGcGLY&A!<_*(@}Iv4HU-99wi^EO3%)kT?rV`*?5e4y5-*K<}v?e854_{DTvy^ z+2cL8&RoaOPUo9jVws=suCgc&FN|9*yEmQn`?z2@w)T#;2x7Z;&N#Z+CxoHSTrWki z=v7Loo;E@2y?w?jYoVUj>-s@jmSzEOZ~>z;3%aCHlq^ZIE9-t6JxK;clQ)T-#|uNV zNQ?)JRyw|0b`nJ@Pv;7JZuYAfmtqty6~H$~jqev*43dC*nqmO1g)tJu1KmR)gV7?i zbG7LzToOTT_M(E_JGsAMmYA|gWTM+rCTsizVl;^Y^EG%)N}^Y%5;SHBlNg^BUmE`j z`6{eKIEI*WC>nwv9Ki_Gs5fUEtFnmX4=|gZzuxCu7kr7KkOXELvt2?$Nys*!eTP^O z1L`yCCnQ(j7S(@Mfi|a7+S9RhG!IAu*rat&TeMe%vv?^?Tb5q!nSGy{vVA-V6V`ht~Q89km!gx{`@mAW`Azb>7QZNkR7&JCVYHl zU_yLALNFv&<)rvmNXOhup`PZxXE~)wSCX}1`SA-ZFo9pJN0C(G`guoaq(7(6@x1}G zZ_=!Ij4JB>5UHL^7+RZ6HE_gXv0z2}XQ!b%t0^jFbCp*WhvsgQJ7i_QzEqt`6` z8NQUP`bHDU47ZR+_@*v{_%DQ9nma`@977T6rTaETZx;jTaruu}(R|1;bde)5UGfOQ z?oH=xF3Pcl4M;I^pKr=FCy$|xj*eC34VRf+5FDZq_n2Ni66R`si56LGtfStFUI#)g z9an=TU1(Yk6;pZdxybITUM;7c+x}jpt`yhd@cOudWE{~Zp@Df??lo`)+5tK ze;O~x86QcxMb%23wO2ioQ=!q5x45;J3978g($kv9GW#yBh(3~i`xCNGb42ZLfLG1K zJF8-Kp4I5j7M45cjyvriUWs(`a;1BJ@0I3BmUQ0R=R(;@_d4#=t}|-Uvu2Kdnw8v# z->G4DRmgfKDQBK-#hM1>?tcl;MO?UT5cEB$Rj4yzt$xRK1ch_NMz2dv!SJeO%Tee& z6FG~eky5wN*e{PRD{;7nVaYSq9)37&sL>N?e8X5dqSEvr)DH{8^%G;|jotFR`7SF(u3Ko80{uposle}>{U zA#069SI&TOJ;3pBL=9!ET@=MXoC_O6f1M z0>PL2k=BtuiT(&v91sWiz9aRN`L*qG|D&A>SD6-HWDi&CpmD{LtLGPF_2BC}AS0rh zOI)YN3gN2pUR(QZurnkMZ$#^39NR*rSN8A$E*K{a{^HdTs^Nvf8hb7>c26jcvXIAK!cfJk&d_%hc6R=0a)A>t zNp0&A97wi-OLga+aT1DbpH}M2&^ha zFe-{@+WE~a&2r!D&7b@hbmP(qN7h5lD?J7-Ukc1A!zg{+SkMlYf8FKZz%GwL73=)E zGm31)!Wy9&9eV%sTbYR;%$)|ewX?03m~We_MY>8GIa*zQ!f=fuRg-1aD#hugN0K&s zM}hucPfZh$+G#JOB|!1LRO>%nW`;?p@dFB{ko=18M$(s zU1D8sR>?U~8|VDUjY^$?Z#vq=1Cpb)+V2-{rK6cN2N$n(A{=G4*(Mj)Wh+;`i+_tk3S zM;GoiZA~ZVrXyRB9$)vAb08>WjyT663WjP%QIsDNZ}~o|Q?iI#3QDqS3>@5JzC;(= zfcm7~hIru%jX+*TewPjTR7Dt(T07)><0+G@SJy`&a%ZPa(Lkv@__=j7`N{NC;?0ex z)Y!?B%(#0|Qwa!KBo9HMzm8jLb~AK>7u$m{Uv_pHX^m5O`8LmDgL>ipM(cc+L}ZhK zs{M<%wuwTOQf!ZlaQy4knNDVd^*Uj)iji4klarI)>c-_|W#wTRNj|Avh8jQ4GTpm( zjuH~9ajep@TKno4s1~1!#`CE+iiVTO@OuTTT(WrUnQl50?P>NrhmA59riCu%*Au3n zBIL7kO>%)I{lgD9snHj2?I_+KCiOYyLwkL9zFvQ#XiBU=*_gl`wV8TbxK4jA>m_)m zHQp(YMI~@vALXs<7J70cp7#y=OPb5vrz(9f?-AolhCb`O!m~G|o+nnKY+qDI(>Wwj zz1TJ9dsUMZ(E8GKcNvl9FzVbE>y{7}@=#p+affbxo|J#FtobB6cnr!Yr#v6|<2dBS zMAg3XHct*SP)_tT0;@8}W%fY1FI|QT=Fyk^J$>`!_oz(%b@HR*z{Y{(`a~3Z8cKtY zYrC2h65G4##l{rl=Huex&v(ByM7qO(zVr@r!`+WVtRsS{7bI%OLs>JC=gO1;S8PmL)TFcYwLQMg@KWCzVtRanIA$9X0Ki( z-^(LWS8Tl|sNw-&a6%-x!&a)Z*w+}6_?$%jlHpnCWRIOv~UHH;S=7_jN)hS^E2 z>`>xyv!29e_Zd`(7f7n355Q+qp$5A^CX}%=sJV#N`QwkGnp<24L8Sl{7DM#HE|Q|n z=pTaeEEglcf9{d~<#8ZwPz}cLxm<5iQp-M|_!2?YR|I(5iVvuEzD`mtu-V^LxnEZN zeyU*cD6GcjflMJAYAD_3CnSqT7D9CWm&EQ5ir4-MIzwN8gc7!WjQ~oDYyvQ(7^~Y< z`N$?)(X}+}-s>lnkqtaJaMP2X$m`?YfP6Z}VS=Kc9}BPV6oxlfmGZu?8n74UjjTzz zay?t=vvv2k&*#qBpw2iaWx4dVusl{zZGj-rpWjhm2x4^*t zrHWHl!R$4z?rBjx&t%Y4#i`lC7iKNHYvTHvMdAIY4=65g^5-#JB5% zXI4qFRf@$cMom8<*DB7E*^oH>j;^4(K?4bhI~<{Z=|1`Y?`zO5#E201?ofHoEw5(9 z6L?d_GlEa zM<)qXdEN{suFG44ksJO4eUj!h!m|BH zeHN{X__;?Xy0&mCkU(iK3XX`!3k*(T(e7V6rrypg=1xWv#16%*bT@+`N`7{BIl~r? zBat7j8YSw=pJ|Z43udD|rH#r8(F4rq2}ypJ+ezA{=y+6dXs~fTrXnPH>RHWI=jnC( zX13d(g-XAiE2(*bl(WC*wrMrMXMwjD!4aWJ4a&o_269(0$C3Gi;Q?YZFi^pkv>1(A z?fQkh|2CB8z1sq8qnA{L2CQj=5Nb|giQS}V`-nc9i6NDUKkz}MlW(StYOt`UH4j9O6~yI@EvrqsinD)K zN!uB99eUmtKJPL_A)Q2n8w-J-jc;h*JAED+~UGogpze=k>d$Xx4 z{Yq2DBuN>8Elf}3TkZ-qvmVpd32xp3_n-GWU z_ErTjyw>50hw`-g>NgLjvyfr#D3cKZT}v>P!}_Jt$_XGw!iMaDVQ)dn2x>>h;dOMi zh0o+w4r11>&}WQej>4%cY*wCR!Q6SktN>@i4ZQIdIncM{1IS}TWV23$hX*yu+Vkk=}z|ZJmpq`qG^bFOmpn1ADDh#jZyeGtsJ06 z;{J>q{MR?;|9!T)eUr=IfcSnLjU2AG3O$g^NK*r9fFeIZ0B)cnNz$_^(1A>ywhDkOR`Pw zgWg9Fu!|SUjY430q!!Gl%}~HX-p)@hn|6=E z1!pN=ldt`)A>r>V6?q5BDLO)za0-kYhI1&(w&k)Q8`5)H%?|GvT_9U6S$Gw-%6^+_ zdMs8`GE%ZfqJ7QuO_bJ|zPPdP#0is|{9S z9PPo$``wc7ZBlp$tylhsALFtYZ)-j_VKuPXyxc`4RmXIL@%?Z|R8UUnTjMq+|EZaRYY7IRai5+mxw3CqTt>d*H@a9T=H9Ccs#KfCDmliO!Xd8Gug@%>-$h)G(9BD{g z6j|2UxgM1-H|QTFv=crOU;69%%tnvCP}#~%qquG9e3)9Dt8>m@}#s~(dLzh>bO9`S58-ZMdK3ru}+%xoShcG&TSH4 zp+e=)kf=M4RH9Ge16`k{iVa+GG#fpb;PQJ@A2otUrw%`=q27sSf3kDtWX6J2Z&cdX z+7DjlZ)J>xVy#+T)hh~J(R-DLa4X$i(ANKF>+XS(p+XY^@D>>S)Fkd+$D}5!mb83%k#SWxuEyv zkhRN)!6Aj#F1gFb1N-I1pE6p?zSZRM=r^yha&l_Y0+AiSgwb1ZYs*Qj#OWEEGYQaw zV@6xLr5-W|dj9FOed||?Oe2+ce>_AxGG*YXYic%ztLMifY@d$l&p%!$f40+>9D9h9 z5;*%$sOtQd+mf|A*`?Q|Sll^?KplGoS4MQb)uf)@u}~2dQuKkxc_Dlq(eLLAq%OZ2 z^W)uj0JT91bA_p|x>bL1WsYC_Rf53}K$&eynh3*-rcBZ!J`|Ck;|RH{c>i!G=FoVz z@Z=q=(1o;(lHGEVJe%2F^8gMKHP=f)T%>M#%%JQ?C?*BVC^B=bv^fE zMql%?h&t#YN$y=(^XeyxAn*No7xTaEN>~E?3}*h)A%Yk=!3H1zIoZ93AV@_F>c5Ab zgj+ek`}CVzokwyLM6l47?o&Xq<#{Sg$w2Awqc3p*YN_LJu?j`h{!+$ln#3M*2 zK~~Ds1f*-ZK%lER@_>Ug#>uKXu%+aFzWAWAyv1xh2^Cvlg-trQ=4RmY7!Ly)KhH}pQL1C5s>)p_a=MN_ME z%s*#;g}4KIWo5 z;#Xl(zQ!`o>y?vWzZ1prs#y4RV*MmEnvNiiZK+FP4?fcVL7eX&h0Y}D@5sze1^kFB zC2N6rz4|MtOLky)WQFV?@et93+mnnWmViosj{O`j%9alifUT|;Cud`}5O3vaix)MN z(~TnL^OuA)=l+-jt?h4iOxDhR>cN$gV4a6ceu(;Lya4>!=58_+>hE1z-7Dp7vz@s` znNpEr?_OpSW*2gPL1jHyk6T@D&78jQOU>RV@mssNwa3;a4yEY`^|8HLbsUix|FQHa zxzqJ|1@n#)O($nYU6_CIn0U?wN%8B?S)bR7e5vHVbX@z+C(q>3ZP`@&kQ*zAAVp1l z(~&AcpIBaFm_=a1Yfk5V<8P{9mYXkge4*)`C)(lZza(~+zA$0oe5K)mu+WKeTu&M; zPr!!>%s^Wrg3saun-cqQiGX}{ysVEI$MSY^&ZbgP%n7>yvtU_?@NtpXx(KmC0#xl zonSC3IQ!6Y{;HT-p(wrjPSe26gCCRp`!2 zVD-mH%s-mk&jx@CAF|FkB;fX6_Bn1qG$FS^H8PU?^MsqmcO*rKT_i% zV{mx${Fv>vT-_*6`p46Q_iOPt^)f5@i{|dJP?;W?neSPG*3H%Kh0WnST=UlCTCW$p zsgT_vaik)6eKSceQgEvD>WJj~>-?vwTIsx)DgxfMOFMLGd>cg2u#pH>m^X{lV!e%U zdnwP+Tv9d6*ZT~vlbBqCa^fq-r)4bd)3|c%jI4GOldy@fmHlt!0-U#$=;fY9%tf48 z=%z8^RGb~nif-yRyD?G4i6BVw*TWA*xIjH=V?rkw>T2M}hWSF8MdaTmdG4n#c1Oan zdZHnU45f+McvXA;h_XaA!|Eg%;nLN6%|VweE|oCaMXaeB*dci-?p8V8u@|;~FD=HJ zFD<0kPl65Zxi7d5$4$dqF(ZDnoZT&XKOyYSBPvW@nJ&&P1HG?BMvSyGdwwKBH16I> zHt>1atY&;%`omhYK~`v1dt&0tE=p;N{ z*c3E;p3n0eHln8$pMb(Gj4z&aaaz{mTD*f+Y^h<~(qRVgYJgGhVU}&QjnKEO#5zr( zhU&W2UIKdR(cs9fta~P%PHQjXYLCy?Z^ra)wIIcku&u7~m3~gbEtWf~?rNQR&US+n zCFM&;H$@zST6jAolgp#ky?QN4#^S?7p?Q0W0bQ0`&P*GYA015`57!%6?$1_g*jbco zY7FbJE(SFa@>aI}6Q6Irnt+kB#boGOu1r(&+dUpn7s@KGcDDU7H!D`@!s>TpLqvD? zk#@j3k}X9sbh^DX9TzriSIJHGWSZ(ByzRgpJ&0F-y7X}MaNvAy{G_=fql6b8??Pk1x<6th|YjzE@B66Ej?q}2c9*HFAQ2-M&)G#;=Q6)@UG zppeHKoRB%9I#6DaYT)W+7z8~k3qOPBd#{Uhq}ojVX;(Y5&_N`C=7n@f#q zp&A&V>|qk)NizhgC`DqHupGhXW*Nln+uH@+-lppPJvjRnqn(PCo{xMV&-kOk2q;x> zy^Go<+evJO4^iRVwut+<`0}UO-g@(M;y3-}9+huty@Vk6RAO2BY@{R4#JVS~hFtl` zScuZ^&P%XfsUDdK{Z;Qy(Mm*5eGYf@*Pg9@7TKS~Dn)ZG7V^k2eDrw3(nF=)3x%tD zexj=jw#DHzc?5X-A^&t?BQyj>RhlE|8Ztrm>HGTPxh`yo&&K7h&V}ytj&>uq0o6n6 zwAZKHEXoAC@@mGrug9PCV9d#by@i)pVY0k|usTDkz>oQz`%=;L-if!%;wW50%$*07 zgSrJ9Yt6RY8n^2@cNJ08yz(gV;YwuIN`QA;&)dQlSg0(2OCcGk#iApqdj_~KF_&4zYLvl-^_VY_)_DReu4tMZZrwDi#j2Ie2rUlU z1PVRP9aFlfME|H-KHH|@pvygVT*z!F%ve(^EsC8AXio+C?kglzc71wBM>T8TphHRU z$7*>qx4|AneS0ud7dX>u7d12Cy-l8GXFJcOay~v}P`aUk4b+s2FNB3~Luqfy@VQNr zp={9rDy<@A6nev2ke91S{UuC^MrQH53}$s4;x}xa=>-&eBYi z)%w%z#*QziRwqd;xWh#`=vzewWFzcE9WCK{PaO{)(G1?GZx;n`4<~K+Pq|*9KJW4+ zVX2Kqh(3hlY2QQNm05lxkyG5=X$d_|eTEmt2eS@f>VxN({^n79>|5c(iBpqpk`nmU z#%xc+;IvTVB6nB6)7+3vE1pq;kYH8BTTy;Fy=H@SY%QG={)RL<(s?px8w1$?gwv&o zgI47Ds%4ltJ10VmR-Jr4VS%4>+v^fn5a(*KOz4iv4u9N(X?QIXgQdHA9awlvf2+@Z z?_V@ep7w`XXQB5>hRXWKI!S9fXKzft2>-Egx;*TSI)CnDEw3w@Kt56j0E`BSI(qNB z)_z;e&Ax?^{F)s3RfRa0WHkdyx|r%4mgepc&IrDJVjNVRnyL5*&xkowoS=V$tSxYho7KHX z|H5h`f6Y(X(kHbz1C4^S4&<|&cpd~H4g&*ibWVeyjgD<+3&S9mSGoVqjrDK$68L{| zzCfC1+Z*RKjgJb_LRT$@-txiNq4r@}tZk*S&Kvh$md>}y9iD=?bN>^5^}m!z|HrNr+6fDr z+)fA(CADszLM?Nq_~JUfCj?~rM{c|`?>9Z5*ty05_hZ-?0}E!1mJ#lG5>?T z_YP~i&DMnzL85>J5$O=5D@~dxEnuOD2sT6zqtZb@n)C!gdKVEzAt)d!BGLtc(5o7H zhfoEn2_*y);(2^$&+K>ho;mv+_nG~j^Ih|o7x)WFp0d`x?p5yX^F7dY<3#{DBwC5X z^cpVEhSdSf@A&n?7z7WP35}cX9S6y#SAcCgbKQiAKs(@1Ketdr*yz9V1Ja%R1rn(A zi#{LF!-v3O0WvI5@DHyXTEb{5@T8{5<`w4~3p!k9y<56`<9R+3R-3*W^Y3N8%U=Zv z^#8^8`WL?Ve+FLwwK`Vh#3>&erB+e^Bm9<)H(ex1Q<}baW0ww9)QA0T5GZGGXld}4 zyii_C3!(u?o%(md#a{zvX7jNkFxf{JM`A1Ud7OGL#YuK)raq*UNA%?Ktibq@q~>hp z=V=Mq;*2x0iL1C;0!7NZ-Zer?t6xKUN-BdG_N0Gz<%zeb^BnxqUM9?zC$?@rO6WTL z^-JDNQu)DGi@CwiMY09`(`{J0(yo2PpXvMo!ES)UwgvM^P3;WO>zIdgNMji1zAt~L zc;})dsj0x0H1}AcLb-HiQ)H~=-8)9N54O-pk-W#)R14Q9J}ei%Cm%2P_;Dhm3%xc> z!>1rYVvAW$#h7P-z_YHIuX40&iOxuQn%%ss1l^CPMA zzU625zx4XcYGL}nq2u}=`A__j8zgjsYPAbv6Cd@27s&sDYBF&7`5=@{`d%(QnlwIw zB{4D2{5fWr;RafDQdh-WYx}+EU+gWT`ltQc`wP$c>F-w0;Jf`1xjKNpyd1=M75tU= zh-7Z<<6`yif|ENq_U*JN2burDz5DB({6G8?QbtRHoj^1Vly|_>u8i@2U$V1H{i_5( z?!P4f{u6xt1E1vo{V?#?{UNOb;FniJZPy&a}yh zja^)?kSLyDdQolq;$>oDzcG%!_k8s&@pijSi}ho4eoU=~mNHF`BtVy->`WRc^JJG3 ze5|XDj7@REO$KWR$CO#a&TxP!h{B)GW$~px8MBhnE>eH6!#obP|frY1L>uhjO6A0q2Y&F%jJo&GxN{C#Zj3x5TXjD{X2 zSp4%ZuWnCbes|tNpQPPi7_0C;+8Q;I%lktZS?0>XM ztXOt1jwM|#SQ^FUV`b3y;{NAEz1M?!?gga^<)@>xk57pN$$hV|eU@pall`49up~D5 z?Vj<{(#YFUVuA-RXftaDWw0p!dP@MT2|+Y<-BR&EE;@Vw;}@=SLdyUpF$S87)xb1q zuU^dh45%A%Yf1zA@S{mB1BCGtQS`} z8~KiAK~)dX=#z=&M8Sv5S}f@P7Q$s9{Vj5O9XkK-3kjPoTf$MTXg7$6#^L->3~Nc^ zM7t)_*kHa~W@(heie0MIEJFEht>_g`6V6@6Pd|P*ba@wYaE;DzFI%>r%!=3VN4;sNx9Tb-$ohB1mhSNQk7VPYH7LH>7j+EtWXTZFmol4t_3d z6#mM_dtPyma9l<(+n$VlYM|rgkBe4T-Sqw&Yx74h0dy5b<6NN|CE^xufz!}olXo~k zPfL z4_$DYf^#%K8S_k1bAARgcOuU}*Ha6gd+n|Tp8F{pbgtHFW9&I8FEVkl0R~Lv8S~~3 zj+IrolASAcL=x*Bhi0C+JI^mFqa)0OH;3$8{Ht5WuUYcY|8{go$(DK$em4M?tMFS6#o^N;ML$}3*ic*D1JJ%;wRYK~zROY!wy z>|dD72hA@(AOb(bNR>1hh32QeS?4YX z24u{xXTUQw_@}Ti+R084k|dZkn6uz#o%MEP@O+{0tzHX(T(4iYrw^C)1d; zXa1R2f12#>*w;_Kv4?KjgdzLnkbIC4iaIG85cmR>eAQ~i6IE(f*W2y224TsPPlZ$i z$~D|ie1UlXZFBoSJgDYpu|;mWk{Y%les_Rokm&p9ETcCkPCZ_8nHekyzs=RFFX$%u z5#T!PgKGAa36SR_?D657x`oy5SoHkf7TEV#YQJE3ZwR6(NUiOkhx?Jd>xQp(9)Q;B zs+0BCB9|(q-2zoI^?gu_hTq_<<;x;k7hHV=yxuuoA}82CmCi-vMSK^LN&IMYllj95 zMxn{dkSG6FL?!>puQPl>+ONb^*wvsy#tu>9(VZn<(#k{5m05uVn`&3U2fer{@6Q8s zQL`luXR*bpkr%FiZEgasN|*;_+htLeHYDoxsB5C75p(9At825@Kewy&JViFVEnwYT zd)8GIlC^X4)gLuBzwWgD&O7|m$);botN|NTZ<=B?B9F5%4KvuAWL7%-n_!K z+v`c-X^wL>Yz8JUbI2-+BB9#EG0Fh|sMy@VE{|>y2c1wd(g{8NN?CN`iw}Kd_R!5@3?MF55~LOKT%QiR23! zAB9RfHLms1D;`Wu^!IsXZpy8c zxLhvcW8B!a6zfTCyh{^4>5?k{Pl4HN$r`$gh=%LrMKK&Xh^xc+fbm zTAOlX7bz}pVk_TE{mHUe_xp6FWK-dj1sC^19=d4*r3%#sOj%BY?0yZyaV!VTq|dWl z%;({9w@KQR-K8+d^87xC;|S0jYg&J#>Vb%nDTql6o1co`ym}9F1Tsd^0gJ>;86^@L z4KipBN|YqMI5$_!zL}T9^QNN0N{6pp{9O6);VxsN_P!vbv8Sx-S<W-4-v0Xmh{*6PnvPMQ7ek)uoh;hX=3Avr%@fE%L2Ii+_ZfTwx_Y z{=i6>nGIU(%GehYgy1kg>#XtifJD&pVvcIRR63N+`Dc;$9Y;M-)v^U{x;}?wPJ{ua9hW80jC=VI9;8` z#n~hQP;r|EH2tjTK&Pmdu(1Y|^a3`zd}q6Rpx>~3IQ#feEL0h@gb-SoPf0rDZ*-)e zV+;&QDHqb^7Uds!%h2R;UmNk1P^`C9PBNZ)@v-BMtt$)V?8&RtBW+gSuIKD%d_D^L z*6;2<|Ma4+J;N-RNO+38m!b(q78p-8Vc*UltG10EpRsf#erT&~8z_yTKD|rPG?fI1 za^cUu^tb!_zwiI_kz4|GiV`oTR<IlB?6#;F|=QaygIhKmv+W0^OQrT z(OZ^ids9AGx`I6T!JdSF<)Q$dHbd-)bA?BR5&U?=84MPHSO|J$Mqoul&Qmr66T{qvwZ#ebJ z=b3&|)`zTTwz}u0zMSOAymxDai;;o+a_AdNyZ!s6ERIuZ!12tr)e8d+d+|R*BzQA2 zYD|S>7lXMI{hD@Rl7H!}CSsD|dqsdpo4LB7eTi%&r_<(zlW*4r5|bW+@x=pw@un2m zZ-t@4gfP?w;VM9Z&h`vFpb8-sx9Y5b8MsFU+$liQo`2JR|Bu26Rsx4l{3&7>$hl@V zR6J5cCudt!jn4EmEJlg(+8&=KszD4iAVv`Md74nSiYu5S)tl0!Hd+)b;3(O^O)@BX zz-N;yGHhOYl>eUW#O)L6X;ePQj_04(@DK2E|L`O5IIxnLxWYRGA-bR*iJ?}_4cjcV zFIX>(CrQ!K4{g|bN1UbnE%mZV}<~OZvVo41{*)2_14jK%>4;^O+2sO?<8DAJXwm7zbMi2PSTI_={9twhp z*8$q?Jog6dAJ>U0Nx{7owzAW}2rh4#<_}+C5av0VrN6-v{v%69`8nLBs9Gg}h0+ng zGAe>kQ}BWIsjw81-b6^d>jX8o zuc}k-gwIme?3eCuv*Pz2Lk_GpG*~CQL$ua+(vXgraAdJ*d>Y%h16s7?fx&D0s}~tG zG@~0vd^_!8=b^loZP`J>LSO#8)B>2sm2f`0Mtm4rgyNBV(9ElyJNSedhw$@T6Dg3G z#pDYlf{PRbk#N1!3%PH>9L&DT)hp@o{PgC|`*BPaXJ@(IsbyY$n$)gj2q$#IdvO7v z3MeI(n7LTZONk_7c_`+Z1#cc-pFehCmVWIlVd3&FH-YfNP)`Qnj1Em8V=!!JW!mxH z07K$RL+Mc3$L?>`(~!mTVh{d+q|<17{Jxl&V|y>wr$T6pU=M&pZ8aeE?*swo(skdg zZ&1(-6^VVdaew6Z5gmdBQ)`NuK11Z*34ISlLeF-s}ylVisT{TkrA^QrP6@xY{PbA6s`&GuN1m$UQk_Uq>D z%uT0rKSFqBg+T$0W9_4(%SzfpzNCdI-9#dMVzjxz(nGJZXUTc_X6Z3V5Q{F`8N6n-}!U3NxRw=JD>nC!{d5xsd^PqJUWmyZMOlx|L8NZc3mR zkzo0$yu)+u&Z|`ZC#EN^q1WE~&Wer4lQQ6dr*J~VRUnt(q_aqby`T7UvXfA)DN1C%Yz&7&T;G_=nw-ltgStqJd-E$}W<^zYDTb*X4>~h0 z>r_A|#dSP;X7mYCrhjO1_-y@{a+r1w&`x6L)&l!{rkXwALZ57<(q!7Pm0GpDg?IOs z{MGO_G7rWjhai>@$Z}CtMbDWb+Z91twtYw!(2N-!dQ`#HFckv(d|#?apeO@#l%#1WVw()q;4HJBCk*H#WTCyl~jt`{vsy zCIc}dL~xFQge1rHd*Y*T#VOGxNQmCf3+MMvxBJgik2BsdcV$n0g-FtF><@wV3~!m-=Cqc=q|Xv*)^Q zCFojTsd2CgK*$}{=dBeZLhBDgDC%aw)CAG{FzjA>Wki|B6<19yG~uTV%XituHrXUc zPP4{%40vX+Mk1Xt!|FBL51c1m4-y&-kaHEy;dw8wY3d!#q@Ol{flA#%)g=O=8P0x& zI6Ns4ZI|Wq(kI=JC0F8>6Z(RohQGk$IPHuN{7;UWQnMi1>d0ZcDenft{V`aF3L%W^ zarf9f-<#s{1A{6p`+em`c8kJ}oXCK;30`Wd7VXgzwWOV?G)uZs-Z$NseSwYrhVnOC zez;C0Hih^z891;;&ZlgWo=q1mvrh&*N9#nu zJ)&d{oCz1B;p#0R?y);Gwqp5X6_#pen~hQXE-^Ul%7Y%K7&R}`TF4nw<$q`_g7jHWmQ(-jV|IQy4r^Y z&g+RLjLtDE69(Fm)V@t}^%OW7i={#ljun-4|r&(;m>?)GN1lG2BTU93T9 zVQS4V)BMDC`X>Nqw)f>hd||cm!|uj|U<@<5loB|Og`axk^LFY|<)_ErXsUxUlkWGm zt{lqtD1L6EJtbal4w>CR3}eWx)~NjkQ@O*^yicB3Y9MV(KWgqO(mv&HowTAYv_L%u zZk>L~s71pzCK#s3N^3!}0hbOLz?r~_zLidFNNM%SI1EbWcui*sDJv`rPK}g#fys-M}K`RI~ldQh;Wc>$Z@Y6*9pljQ8$|uzRk%fN8!P8!_ z_R@Iz)8_K9<#(T#I^Mp9RsY@@{2spi4UGH8(ePiML}04}{us7r_FG%2VUSN*Es7#l)@U5;mqR;381k+&uYR4X z!}V~q6`quLZAoD7L!;cg3UOw+3Eo_OQ@IZ=_VMwyrIED;C|=at^e@;0OMS*{G_y|U z_s3@5Z8+eHmN_?}lEXNm*Jl3x0jqjY4SF3OIQ~8V`gwZ8YpH=MI zsLUXCBR-^f&_lLe>AUsl%GZK0b%qo8ZTNQ_ga-(#BRPKY(Td`a>5&!xd|5}fKosWOrmw(xhn|6*U4z%E0MjG(G0tGBRBl925 z#8bbAGIRI~7ik=r#L_rXWCx1d!A1ZeA_HvRp3V|~AA)$sqIzef^w4m6HgAi-IFXH5 z*OR`@nK4MZhak4iI;^TWILuvLf`!Mp13@>tn8kXZ5APtPwItGPxe0(dyS?rvPm&~ zkNV}bZwtxwL4eW}j%|!$+{7pTJ}=8rVe9I#l*8o$UX0$Ru-#_tZQ8=Y&cbvJN1-KE|a4u5*~{ItMo7osgl)9@@fOm6~CKFB8%>TqnM_hSh* zmFWbe(U>jL95RQQSVU}S1JTs)KOoC$K+FXEsDu0iKW)LLRvKmbr;rUxhLgJS<2Z^g zznNM3Y72@b%PG_(@5qNJ4Oix@m;;|Yte0ybYZX72aUX`o2j7U^)eDQz;LjiX&iBBr z7#;X7o23c8dVFr~u3`*x`gjMd3;SOPvHuyIP5>~Ar$ton0{sLf1bU=7WJ`TQa_K24 ze>^AT7Ev--UfHuNBi-VGMV`jliRlxr&qQN7VAp&Buol|MS-xfeqQ6*O_tt$eHH|YV zBd#Mz1#KaV3%V=M?vrM63?nV*Sg{Gx> zR(-Wh4a5K`y1tXU=(1rvl-ZnW^FYGl8IE0=iTZQ&LQ`)&7H*Yf;EE_08^M6MH{oN|5Z_Cf72t4yRxF>2C$|q{o7;c}hKJdNT zcGxG~z;cw#%H;l)pky6%m+d$ZDL?#Mdexrd9;n!V<3nX%aZ^t*)#*X{a;x3yzRyzL z8YhkNJ`@IjDt&HuToWV_D4=r2O^4$Yyk>XY@tz|hluYR5yAXucxCsO94_k>pZcqOD z$$9!gOsxTK!j`5_oULD>TtW=uhzX8I2Lkiy9P;WdV*`Yu#oSLq&%*nHv^;mpku$<{ zZeTX)AXMpGI4OtVKpMTp-vlNH&badlGu@*R_yclX2D4r<21YB;`6)m-P_jPO&(eG1 zartWp^LKsuPGWufe7g6aUV9V_=TC&6uM5(YqW5A;26=#vOUfjU21C(RwFT^A=^8XQ zQt$Ir`8X{J$}^L~gxeN}SFH6BJCys(ga~EmBI1L0G{!ETiDppNi(9pMAU6-c5 z;O)9I7zCZBi#>BYWO@G#Ozh285a_fh{J@&UuB#*12toVM`%wieONU8GQ*uQl*vSu_ z8%9TLg1ZgNEU#ESW-;r4WZN)vrYN3(Mzp6wsD|SywFKq>c1mD2MYNtTu+qrc1?%$Z z^e)4N=jc@j{#=g18V#ws7|3pjix0K7nw7K?ssi8VLp3PqQFeN5c0R>1+loi@v#IOH z_UGX|M|>GgU{k8K1B6!DY|mLw*GXZNyU&dIIb{P^ycatYMDHd9lcNxfRGNHnGE) zsKdnb!DEMO$>gE)v2UuXW_pIQEAo1ym!oX;dk4Q--Ja*wJLDe+QFr|*Js5)K#qkx<#*y00}M}@x5A0^7Ry=)a*3=cmd79*zvd8~Od z0lKa3oil(MXwN2?4UY`s_lH^dzPuWx-%_i4@#y8aqqIlF4g0pEE$1h>Ud2RvekH=I?g3^XScb zIHY<%-sGCJk~~LE_{rGkPo_%O0-o6j-sL~^6*hKHbBP&PGxV+R#sy9@mBRq$4%}^F&Izc>yV5A_C&AQ z5DkyadSBGe90dBSHG}q=)t;!71?xzgV3h~WJy|8)T8KbF1)wj{z`@X_LmF0 z4a}Bh8JqZH;^r@NU!UH!{}VH0$8sm<#7-Jo91IdTSqeN=78vrGh$`}~dyNKT&xp2F{5f|cWoXqsV>JrpH}kVd!h?s z$@j^c>L%gSJ>ReP43*uU3xbIAC0(BFW=roDUl=<1Sn-&>Jj47JmH~m+1<3%Lhj>CD zUcql2NoN6kIIsfi+4I+Cr?kWZi>T-5eF)-bzN^7{G+`w1A*lW{boKy89#yb|U0eWz zTh3V%KH|py$6H#Z4ad28o!R=)8G8 zRhpEs`XcwmVP7oq(GsDcZMk{it5VFAM35Bt03*100%jFt0Dr@3%cu=k+!jbaYzhK} zyG{A+G6JpQ2ZS02wCns+b}UxtmYAJpppd88PS4x?0g=Per+${4&isIwg0hMfaNONG z0ObRB2O3adY1;FDKt9faZ3r*N^a65lrvKl*>HimR3k?Avxb|9r_uU2{>c$juvnrIF zsWOO&lDU=v*C2t)VLbU6Y1iAi{u_M6uY}2NN=n-zg0$AcAphq98IzB%Ux!T-?E^#4}X0t22dLxWOIklKwc)hDs_JJL7y zzaMb1iD~wNtjA3%7G}&IH-&661!<{~R%rXDCc^UEH<4`UBMWo^J@M)I$0)(%K1IW2TA5g6Xagq9YR{r(!e=XgA zb!6Aw?DWZ!3Ce5fr!}#e4jC~4HjeEsbGS_J~0Y$03!g1t5zsh9X@6IAZ3gwfR?Z%~s zKB9=lmk`*DHs7P)rO^J76cw1?DNpB!X9d-hjju4{*D+Jzgo_LD{(!uQ2DZKCBamUBLa1ZGwPmlW|NnjKV!$PIH~g zosAmgIpyri9G1T7zSr#K^_T+4^M??~Lv2pzR`YwB;m+$H5ElXu%EzLn!_r}l=NzwR zyHgSq)&Y9_lCKu@X}o353E@XNAwh6IiX~`+s-VW=*feTMX>x^$(mbUXk&hPg z@jla8sbqyD-+RiGWV5fBqmk`0J|g)PFTy`e^j_Z;e47+Q`)f!BWLLwHV1se!Sdi9x zia7XYG)nkX6_+btfQhOQoJcb*yx!;_=F^kE>y7Z9;hmGu5ve+#?mrL?%V5IPN?@k5 zC=2wOyO{6V@0{+$y_0Xicl*^W|DI2VhXTd|xhg_oq1W|ew#Drkx^Uyqk*jz3P&=Vs z@Ev|_#Can|ZiOfB55-+}VXg>+5dqsppmAD|Jt+Q8#4mJ3$|da&oho?}s~#MlC+jQoMTi#1sSs5sdkyzPwzDXcG#)@?OXS$u?#_HV`)3X7NJ)Nf6nx z!=*X5U>TH*V<=K9v^1t%AYh*dR{4wxY~ea9AJ2dgb1?DU-il84hPdqt;n1^zzL)A zTsIVpC}GnJ^-vTO=u^qBtHgOga}8=&{X>mR-U=L`Q1BntT5~9jVmJsZ^CXcx|VP{ zV;-e@%YshAHP8zQl=gNzrQF~nxrF;?IX-qS^f&x?&i&BhUB8f?{j~zYXCO8pc;wUK z@0TR@M|cw0hb=6wx4s%}^!#wWK`hKUhTl5E$NJ9AH{;+@bQc9yi`%6oLXkdA>bp-Y ztHs}XZ`SA!r|B;lXwUD}-O^fp{zfoDBuEuEVH5yB_sdJ;0K1!mf_;Eh>QaBUik!{? z<2yu305kOyR)?w(L+FvlAvoe0Cm`Vb6T&b%O~7_IurSPc6bQ^B0OsQCuY+%kYq5bq z=^%*R_5-VbIR61+{cM0BI1SP$Foy(AxrEw-oO?u32PfAQM>_*C>!u$JkneKM=vmJX zqjMEjmb!_la&2o?x;IQ^N*~#kI79n}>YquT+^J9CTQ9(30%%+_}+ab6C7&qtN)kVIJY!U^cL7M)Wac z(MmXacff^0Y^~`Jhz)Tw%J~L)2=8{wK}~D$<+-&PT1)#wx5~I66W9oWXbiq~Dz*VR zh%?~((pmnA*gG-=eY7(ra3p?@t5=j+|6FDj#2r%-zchP`c*+txfv+Fs>b4{qRo*0A zw`@VT<`hA$Fu&L*^F%nA^@Y2lK??*SJdEzdmPt1GegjP|#tlMBJ(&*7kBDQ*GkR>8 z2UW#4Ukkk(+)#$*I#7kt!x-*Y@$>^`8=+;l;X?seCJ%_b*^s(nfFZR(Ncd25O?)`U z9t)$OSIulbc568047L#q6pESyA{q@K$zlT5Z0dj-0j{$#9+>Xd87D0Hk_%>i9`I?# z+K5QZ0W(W_@7NJ~A0(HiQ)Lk}5`-I)+cWz?zy!9G{ZoSDXWKCtp~OT}1Z?LvjvnHq zEtCm%wkuL}C$+cpu66&))2gon;j3WOVqONkl|VUcM|%!z(7>+dtyXQs3&JiTpC1rg zIJEVSk|IU)yh1!V^r01~T5WU+H zpdlY`PJ!u+r|7(ybmjv;AX+!T$Ea(A)6as#_@95rW=kalRm%48+{suRR&zFwp!00YbVR2HxC@pQc(?x}Dp6j&0appXFdN9Z^(U1MK@2ILP)o z0=VL*iB0FU5Y?V!^_QsWrXb|ixj$urXieaW_n6kpMAT073h1GczlcZjl0-My7HZSs zqlzuy{e5kzHGP5hO<-$Mnx}jY#wW7G`Z#RG8nbBDUg0l@SeAg%%6=8vGO0DGa7vj~>>wB*Dror8>|IRb-WXABz4 zBaG|uH1S0Yr59j@uh^p59d>LumSQ*nOx9Zf6#uXNXx7*w%;MS+PRxBI{BEFzl6B77 z{%I2!%ari|l6;-?R>=^sMYgdSHyx)r5mp z-PtfE^@bXU1IX2G{{CvBdj=x&sPXQe{UgKYl1?e^6@7S&^?jE0QPJQ%v&vBMpBMTs z#bo~o-v6I|T|g1_CcROS-fd0~pB&|G>X=hYPqy{^{_tA|o|EVsc1AdN*SqZL#ZxW_ zZ-F2`APBQbbit1!>YTYJVqbmO&UL8vOd2b`aTTjvSR8lKddbAXo>xN(3?|SN`o%(l z{}#peS8Ykzc@{?gnS{aN*A8~2kHr|LWLmU$0MD+DsjmXn`M7Pdg&&ZRB@iS%{EhGO zr}tyl4S)xR4FOao_8YmdxdX_`xukqLyckc{2UksSg$mGnrzDDoaKO?M!9`OikUxRT zcOFO4>PBw;bj;qkC2w%`(lErpSD01c59@u`&C@ z`_&x}xCKHuTacx>8bWV+2H!d8rp=Eb8m(+8GPvL}hSaF?tyV)Zkzf`NjIw?A}s);uKj z$FxhfxQ(VNf8`ctSg)VJ)uWgKB#6n_;7XRvQ#lT$j>kjA;-(f8v3qT-{ zd5}8%Qvq!4PtEHXz46%^XGf}W#nA<{Fk-;(g6W|ygXGFq9ma!)*%L$9;%7nttQG)q z4%~<$PCY@pOrhrT!=ZCx19eon4gbnUE2r-8B$&s|kOBrRHn$j%HnO2=pg~0@Go4Yk zy4+(IC9|=B)@)Y&z`5*BJcS%Ce!B_w)zDzG`X*ZkbQulk|3o{mU->NYJ6t``EfB#o zf=K>h(o{Yb3O;1^u7bQmwGX%hNTUUYkAMF7Kfi?*7x0kpK%gEyE>C3v$8s(?gUyMo zx>G>k6Tm|`WHXKubgam(&UM9jt7q|fCP}m++!NZH&qBz9nDHz`5v%<*l{~+b$WYHn zMT`bBWD8EN+bP3LWOzPvh58^)S9NJf@2mM#;XfHXzkM11nAfB*%PDPOq@i?g(+E5K zEoS^VW_JlT47Jw_&P_?JGn6%M>OVTF=XxYLQRbE@*G$j4jCFDieIe&P1oj01+I^Yk z{ZyM}Rhu~K&ZkXk@`(lC81wqXnXL7~Tmq06QIPHC@tmb@qI4rjUKD@A^7>3R*WVB> zDvA<3-+o~fv66hUs9``_L}7Q>-kW*s6I|8VTXb{%pRaWvEQbD}Vd4oN_-MD{hq7f|$=OITH) zodDY)Yxo1AtD}v$MQ_8bIurH*qi0zJuKevFq!w=c6>ilHltPhZ2QhPlAQQ)5hg+2a z%=^ZKD=-4!sEDF9D5_1ELe!F`WTXa?!J?My?GRwo<;*qeHH*rv_BLC7O69)6c4Ax+ zXw>bd*ib#tcm#W;I#cdz%D3jaia7jwo|5?^9WgnzP>)sp>d^E0^H6|iRp>*6GOcHm z(k!mwNJypS>n721i?mA(YIjzLmIo6`iN}X#3~xPcf(~VHfkyf{{D74{r36;{1ZYo% zG-?U=mdLY@1io()y#37}yQF}bOU_#Se!?=^_y~ZHEVPqg21hq1_ed)(7ZtBazV8)<(~Q*(UYaA501yJeoiNyWzxp0pEpTRd=-RL2K~_3 zflHH^T2zTobp0F2=m2ppYt_C4Z1IpJYCS?%A7ZB{ca=fX7IbPU1Fwq6BoNObVpf^KU5mE}Vuflz>!g{T8NMX1MaYucZw zpVa$aa%~Z~=l>k{G=TT87*?-Z;vh;_hbRF|5mr{=H- zx|DhAIQgORJ|CCfcX#B(VlpjT@D#=$aoWR-bjlc-wK>@VSBF}V_BdxU4bPkeRvNCjPU$7zm4 zObr%_qVzX<6SEQ<7IFrjLQ?6o2r-5HOGz<_b?*(vK{0`Ao%6-XwW*X4mE0|FJTh8& z;SH8&WXYFbf!3Mfa>oiow>=@LC&yrwMv@)4UU2lQv5qLxMj1>_nKAM~D5 zRz2G6=6b}C^R1vDk77k)f{p;Yo*#q;x&hIGr~vhyESiBA+2de*WR@km?n(=M)Z83PAG$nW^ZCwrfO01hws(s@R|J0J@h%WkCc#RgVhF`!X*m@C zN!RH01>`}W@@ic>=4LJV)*kR*`r5(6RH+m&cWg}pg z)F9jMgOHM#@oem>dp=-{SP-+~5Ogg1OO5K{8nM%C7yN-|g3by(QV@bZ6L5z3t%`aa z-O=C2)mJBtgZTt&>YZCtyr_TiN_sE;xbG5pj+cOO_Ul)??$rIWq-=+vCqL>CpE3jzngMb*gcUIet1sUJ2v@=i2I^3 zu>kD>8)z6&G|5+N&Fs%dqfJL(V{<>By);7-hKV8Pj?waI;LMb!$v}HYr|47waPgA zDl1fR)ecs}_1=nah6R5blVR{_oUQJ#Y^Gh(3`aK1B)gdz)U{Ls6!3ozp8npG|DNvr z|Is_JF#r=OL}&NGHv5&;%OaXnt<$ACRNJiz}YNmS1A_Wz9;8G6>Ew=>BqO@lRK&`1Ot4nkBDM z!o7%_WS9Q9G*9Q1NdA1SwDC_onX!8r z4cxp0JX)ouG%~fH0N`V43kz*s`o49%nm;o> z3y?t-#tuV~D5LrN@=|{$vK1KpCD{fg-igLzM$rj5qHwENBaff-G^#4i-0eml6shJ{EcU=?s`2fZ1g`*bDWl(OSaC z5p_eM>F^oHeGot6ddz@7`7XsSkSj08N5Dz#!ESTuG#4u$tDEk;R(DSG2(Z^G?ir7Q zY`%p|X-NklD1oH>x|In;L@J|S^5Y+nV=z{$)S>G)4LGCML_O94Cm?15Yx5=$&*H+| z=b1q52PQIaF`7g8@z`E#hVIW$J-WKKQ0_}~Ef~uK5A$28cjDK)jNmxKsuova7h7^n z|5cpL>BLNhrqIcgS1!Q>rh~L${zMjXAkl7db^@@-dwoPN_T@XP22>?Bc0f)S|Qv>z_?*$tE2hZGc79Y+%N6M&?yK=?QZ$g`#0T-lvrydA6 z0&3qJ(=|wsag(29Ppb30WAw8eEx!9?%I>C{7~62Q2lkl&sJ9<`Co|do&Uu}}!jpGf z-=v`RNCp%ezzq4OB^e+AmN33Pb^;Yq&0pHPsOp>O+ScwWq44dDNJ3{Ro8q39@o5ez zh8bBW`d$lS={)IG2U?7hUN`zogg+vq{D^u`?ImK1)>a1F+60Prpne=t?|eg^-SO@` zTCTdx#)tb_BHs`0lG%xIc)r3Sj=}CZl1qmLVWXadV zw>g;ksgw(AA9dQx^3_Onor$&U*S%n-HAQT7S0<9aK%$9z#y!X@Wl>D774V z3Ldna2Wo;#8IVgoO|L<*Y02*7HdODTB133bmJdwYg8 zr^&L~_(;TVueAw6L^|KcZkyW#yN^v&rxHXnYvznwOUt|>1H>m|pZp+xQa78JIdwHu z!zRboE5syjX!+m|$YXL5Le9F`{X(ly3@6yI;i136^#5BiKZcf{6AX6w=DJI1kod`M--;x9znM<0W%4sT3|Xs9M?t$@oVePS`8?@ zkFailTdn$y2eIr%j;EtaENSfr`-2ued#ysm`^s$Fs%P60@zYWabGZ8`;<;!+2Gar( zvT225a6zKCp2%m%tQHo3`c|;&`i39V7FmLBBwg@{#r+ecWWA3@2Sws)=mmQn50I2^4FHxSx ziI_-!NinSR(`#@(*61!;QvB$P6PMrOp;Qo>1SM(<=HCaf4L5XZ>T0A2Np)%WWb~-W z=LFkl7ke202ttpVq$X_rUPB+^wf17&Ff8_gGruOTqtF%!?EpZTY8 zZ5!D(ieKC>uzcWbUb)SP;#C;00Ei?si8FoqffGw|n+<$+WS2K?c8q0x=-aJir!Z~n zXm^ghlE~|Cgr|f-MS41}WOGrJWY~Z>P?8@>*y}l0%W&KQt4xc3133 zq3AKQo6B1f&5(pW$lLhAO>!#5j)d1CA)+;4Z$V}{jHTUTv%E)mFQ>aUbx-t{5saS* z^{xN52@D{k;_$_(@gI;OWUbQ#fnC3kFFQnINH>pz>ETyX9^p9ErUCvW(c_Hl+QJNL znb&t>z#+kh2#GwV)@+;RdKD?uzw^0zNorNXBldNx+B#$KNBl{*R7k!TRTDje;g`>Q zS84UaXZS+gVI=_&8Qf6!zeBvv5yYu;vL2BtqXUgY*L9@@P4ue7qfe|#UTBvvIHD(ImY8tOjA7gm3km=}Q8?~x-51+gG7JRXQG*QnQFqAf-6c{YA3y2?hLTYpfq2MwHCUbF4lYRj8i(x zgY7`nM?JCn*T1nnc2?#|>~h3v|~2&B^5n-6s4y-1+;2D&i3c z>z+z3=2P7qup9x$zBuK~d*7o=?d2nir0T_O3#p&DMiJyP0a4416C9VX-S+al%X{s- z4(tU}kl-JAgTFwozyBHj*^j_x!)XWko3LD9sEaGpAO+LFH|CiT$ zE$${J#C|T=I-{fEa$CIv2KbAD8$~1r)J`f5ksDxGUR}{A8ReeBJ^k$I(&=SkUl#X< zCT><=o1un>?%JAlzl^8svwB3w*SQ_mm*4MF3KA@lSMm0oc2IZ&d*+~N=1?<%{Y{{j3j}Xyhi_8 zEmTU(ufaghxz}^*}EY!zv-$TYi{KWr)ub|_|V(vv0v5fqne@Vk4?}nqF zL>Z%#tDEnuVymSD`O#}vdV<>&4>o}SX`E9EW<$J6_M}`P!VR4e5PN=kNuw0Ml8FN- zOV*I^6b6<$TEsy`rLq%Z4BfvSrvFCV@ekf@1gMCrJrAS`2Knht{O}zn^RAQzk4a3J zH&26p zIBzgL%k!^tlIZiG@@F*x!}0^tk72{iZM@)yMD#om_(LcQ+)k0?c&ED>TVBH2ooh#4v&MUp*~vWLjN%!sU6icknyD_cS$j9rps z&pO7Eof+#eOTVXk?>YCLbDVQK&gp*dJ-_}mX8C-c&-1+3*ZcJ%3%EzS5t@TwnE+?$ za~(Z3fFRRckf#6PN~l&r+k++yG_hjw8RTH8#I+mw;GC`fBuM=gv++0lK!K}6cL6#8 zDXfgQ=c=VV;-Ok;i5A!id#qGXtn-FflH->e!``JiwkL90gK&^4}{$NowuedU?{wo(3XFV-Hm$0lbE zNk@{;Z);%FBp7G~Z;}b(Xq87l1)xZfZqhu9?tmi*eg{mMRj{1*Sk*5X>9bN}ypi4? zXeR;d-Is-jMJM949@-cT$M|8xVHWCir>;p;m|bobAVYz;$GG|Q+JXv>4O@mCJ!Po4EY|AIz<$Sngke?1fk#4-d>e1$Dl zhotd{>f?z7zIqbK7=6)GV;2E8;p$ZCLUYEGb-mEPVe9{$*Ysnl6>$VWOcUp|WZ3|g zhX_UwpS?K`UZ}8DK7rKsRQ5|GhT`KNNtWZ_xx^7gBszC}LWp6zI?SJ%S*M zzQ&%=g4QrjD^nRW8182mjtQvMs6i_g6!U+E04VVcB2S_(2K@qEU7R{uaM*l2n^#9+f{fm-FlM7%UEY9EB_lcyEDuUG(l!`y6CvVnOPf|1HZyiOg2cYw&>UgOLo+udp1bc;9;Z8uA7` z;us4RQS-6IasqdYIC)bAqfNU0N%O68r|gpy$&e)a_6@G=H^-vbyC67_OnQVCIF}*m zG>&A%=f=vIF1KiclE`~0cCg|7HB z0(<=aMiTPn1Y}N%&h!&em4aHZUBA6_)9g!CUe70qojuL)H#!#q|d`H0@=NHVq zCn9;5M;mWnGdZkv=dJ?GbA;C}sG8UE zt=Yl7SsC6uK8N*{M!cKLaLVr)AYK?E9kQk;yoxi8P>eU}%iBzm#?>L8-3?ZxHQ2Ww=!gwNR9!54eoIC^`zSd#8Xkp&6=F| zN{-H^!rURaP;<(*YT_ZQxbk`|gN_P+Swf-1$2GU6y-8ceL88<=ou8o`?t*QyyGZA7 zVNUPiNiWZcfO#NGT#dz4qWlosl3bB{2(+g^=xc2gT7FsG9Hk_`APIzK-dRu=Y;Ze% z*{c7=$O#gFSNc2w9mkV6CGstE5Qzc)TmdY4mCX^oHV^zOn4)W0=hio-GxU4C=mvVk z>+G=K#4rBdOZwsa|Dz%+U<-+W=ahwNpvHBPLji_%P0>xByNJkuYIs7D1HfC(aI#>m zw_MNq;p;zR&fl_}|H1!3-vbQTOokEWt}bio3Z$xe((EUmlQoOh+Ugi*u#o1cu_q5B!!o9!W(fAZLS`U;VS1r zcICmg5E?ZL+B~g?RP2_xL|hhTnANn!loJd_wW0^)f~Wl7J^>L#-rox&peRVZNa;!} z(5XQ8SZm$Rx#u$VqW1gQ9!e|y zlytPU$AeCbRL#5N)rYD-`Y=GwZO~18`#9^1+8d)H zqpJ5wFDdLMBj2Strn{*jo{s}Lm3QxjrHXIx6eDd=l^0PXk3sjPA|Di~)*xgSF#LR{ z#F*@+03hkL5R1Pr{!hN&%r%9g_{f-2DuP%vI8QrTPhv8soKX)$Pt(#;?$g!1d^1@^ zI!l(y+A;VCwjmrq>3tpn%ltx$93lx@+g_L^8aM{(U)yIhNzAK zCOde-`o9HjXzq=qT!3OxYjfCUGIzm#ES_hvCu{k4hE+c~AphasGaHaqL5$oYSX1hV zDDwW&B&Y$|A3f5JOKglll0M&xvG-0Gl|*- zLFjCV%SI3EH?Nkt)s9V%;*Ouk&y&uV60B1ihfL!Qyu?p*pxf$_M+WprCh6??8oh<)^RB`hYeOkg12-Wc=!S0V(DBF;ef22qZIsHJGhrj`*-F(5PL)(v0JB&JHK<=KFmxfTcSp38{{2$!xb~KAc z9wD+?1|48}h*>M|)eDpUBSO~$5Gx&k84_8WxD$;{L|X|OKsix0IrLp6xbzoh6-3_9 z{eA=P2Mq#Ews%(;))zvM_5TR92%|(#(zyacG4aggc~=MZB0Yk`R!=Dl+*3Qa?xP;V zB(FJ4f1zglZAZvkpBNSz&fYSSH3@AFEM~&ynbFAYFzPDEYzH9!Yt<_pU2^zk-7YUjI zdF7bT5ary1)SaN-nY$_h3qUUo+XI}k| zzx`ktRvDN?JB2BVna)U(Z*9aI^{m$S`&ZRXV&m5~<;}n@$b%8Xdnij7LBKYD_!0L) zr7?};ow*$oT~9t-nsN!rMDboftj%&ldwU3p_z>s%5vU0RgW%&yX)Hn4%muQ-*km=* zOO1Bm3O^LTv{azhXy=g*-HBp4J$O9f_7gRSUU`J>hTRVJFz9edkzz|>%i2&$)uF_pzZi9vw#$zm|O;ieI#-0+7^k`0S zCWp^{pnWe>L}H2=zQiQ846A`|HmZYe`DBpk!ZZH-71?5w@KMS(4SbK=Fh^0CEWITa z;`qs%F5)m}b?R3Q%;8I15{| zc&4N!PPMH4Fx|35B+(|-V$G{CG&Ek7p{%e#mF0Y03SD3@M2p%RQjtV2zR^O$w?_AE z^t!&dyT87n1EH2rg)83`C*0hSIHMU~*utq2Pp{6+X19@h`%oD0^wld-92qrsx67gU zx9@l(^;@zuMq>8GkP|=j8~)I2$zQ3yM_ZAIyI_0^ zV6F}VRbr$41Qgary~bT4`r(22=uSkbwL!`ev%B^2*bjA55T)5-0Oa%9N91&6u17GS z&5h{@Cx?W@dZ)DSILuiKB`XyM&GNb?3vFZH+E|CYG&3X5-L=7BVPjGBdS_OmpXYRh zl9uA;hYtoJG7%Qk-hgf`fItO;iDv8yg{9M&-|o_v4&|2jyx$KFf0Dfn$XYTSH+V1qKXf!8(IO=plJi)djO!E)`=5MctFq}otx$X z6pYL-F9G3g?1F1yXn>r~Tzv8%F*4 zjxEeW8@Hb%ud=h@o2^c%Sur}8gY4jpy1z_#u;~ zVzW~)6E4w=FEaaj8|b3P=>;H(@4XSifTXemQ2-+fek-6Xb{>Klj6xJ}yWtb7-=Vt5e|wEKlH^E^%0ZdO zDL}(0!mct?Mp4w2k1e70H(lSlANLPpW=F5ODV8kuei9*KA%qQ9d~;my{H9}om3U9u1JS9W}BNPrz=B8EH-uB&%63>|#re=r_*N04PG+Edv z9Oh|ew`5wH(Zm}p$K-(WNyfWr_d~#(%_^V*!a@~Y zsMx20%gtMtY!a?|@mRfmRw8pFTRulJhFubCK900ECxXGT_{3P&?)a%n&2;Syfo6iK;4bM>00-rMS# zShwlzrsZm{wrrU^ylJATEk+EXS_&ODW&lw1=%``T)=2@cRBP6$o7Q40njN|*dV$S) z>icfd-qUt(iHInXvN*W0M~M(e*ug^G>+5ra-KC}*aWTlJoBh+HyHu}Zgef`ojf`U% zFC5Q_U>aPE2Vwo|MbwbqsAnr;}kXPy2Ry!1E`5AU;_-szrxB;56uw{)syZb7*HQE zfC4X7=RVaG>UkF_HoO|Mh6lL#*5W7B{ZGE)tNt6_LReWIBInE61C_9tYRY!pmRl)( z1C!dcnkBD0_D5XQ4aw#>9T*n>Ns2eZm4~_v;(?lO#79gGcU6Yc4jHASR;Oc(S8+xL z^13zoCk0P8vD-So{7}K9+#eEu&D;IaFDqGpUP$|E$LW{9VD9Q$Bg5t=Zk@Z65T~2p zTF|)rU6E8wq|o`A@~n6A+=-fiYr?nyO=789I8W5^MVmW%s9t@Lj+bjc7OQ}n1|@-y z^`wHkwj)>;?{+FI41&5k2Vlho%UJ^=?KvU^XsD4|n-L*S2O|_?tN#9q_VL+E@uM%8 z_l_=9Ldd5G>_jO*8I z&^K9_dBA1&u?^Jil5-AH+flZafr_2epxxTkGtdit3wRzLZu|$fb|7qkjHMDG9uR%n zm7a4fiPyn&M;hLdlsw;W`_aVG;|k@%$DSPy6`L%m`@iNSt+)L*YRdrt!gqO`9OP-3 z;#xAb979e>E+ty^i#*M=)VI3lV(plEq`Yw3f~2mUBkZurrE5$(`z(7eB9=ja#3y!K z-!D7kHp0TgrA6mi3DS4M*HL})4IOjj?M=_4MDmUc3~9ES_a(67js0aU zQ)_cwPu9rT7N$2-a%QV0_=C!d_VK{`6;0X<9;u|~gz9hk`VIQecha~>Np@<_8xCAj zqS^V>?Lbsj^1+b^bqEw}(Lcc14lY^kMxh{P0_^VQFuo1AW*MCJk++?Vmo)Hg{%oDz%d2QuQokxNb|zILFy>rKJ@JzC3m%^hEN( z=}q2{<#ZK?Uvn|@%{}Uaw0@vT0SL)^0;)elcG48R{tO9!#G}z&JS}>WD*~z|eQ1hw z=H0PlfLP_rEc@zk^T#_w$V{BnumJx zyGB;77J~{o%@{dMgu*s)2j4j4f8PY}oNs$N{OXO2rq&r=n0OepKjHihrHdyE246n` z_^+{lJ&)fEd0nec1?T!{hOt^6dbl5x_!+X z`VJGAxEo|3y@K`5CCn$iG8=??3z~OWGCnUT*t3(inrSy4u(21g?cBA8w~wq1k!Tdb@Myht%^mk9k5_ z4kh36hU|gRT0tWIzycAfDRfQ1;pEbiLjm;h(2%zz7=Vt$%$pl+0TK@-xs-Uo@ih^< ze$VJDo&bTq7EnkevP}UEfF&qf^>9Bcf^O1%BsS|=m0eLiOC-Dg0r@!k?NQq=IUfT8 zq{zPmL%u2t`f=xhynTaHJYRkXCgbfArpoTI>iNK5kq>F!r2ySSE6;6V#T)K zycv=&LfXECg$j=dLJTG%3NaO2W+*1oCVb}9UD5r9Z0BWc7V4yHUd|s=f=vhUHfLC! z<9{{G&!u_DGO=iE#s#M~D@QwQUcJv#TMN?6^W5LBYM?! z$gHS+1-c0UWs&q#<%a06Ql24E`uBu>AqCNy$S#q%Youg}9e2j<_FOcYtS@s_Uw#SL zaIBwO7_*baxX)e>UYna1JF>X#24m22oz))R5{_rjqu(tZXHZq^u!&w#MHiJ8p?<#{$|u|v8GI;#N~TC_m6!{3D4DU(J(w~ z8XQT`xCfnX0oC&@LxjCcxF2`71pR;ve6U18V;-*w;|!LA3F`PcB|}y0XGoAq?95KN&M1ddI-LpS z*Yq0LhuvpSc84Bq@zZTLnwYrX^!ti9jd-bGPpQEsj^P?3;jZiAUSb_C=UhBws+qj> z0@>vPG<$AICg(IaC-$gP==if--_3Mt$$o z&G~0{Inwg|a}KS^FQPtXP__*XFqP#KYKaOTm0X5)IlXRO-1gv1^ab-9uS#SC;$)vW zJaM~TzKJIUOnd+9l~d2>Qnqn@hVV-as}ou55Q|uId06)1SC|SaybEd9Rf;mqA3beX z1d=_$w~!_DM{CMCCF~s8wGTCMshw)J{VTrdzu_$o32;It4uiGH&K|2#=d|R81+Tty z#@H6?_L%MN&r=uLa$0#eJA0iX#Ll9p^WKo~p)SJ@5~_x(YUM%Zg|7=An)+Mx-hDHv zeA!D8V)&CK{^^4MXa3@O^~%3>z#ucAo8@5Z@vFzqi>Z&w!ycjw>Pak_HE;oJBnhzD5W&%Z(0Q67QgzHhbHxn9%gl`rcZG=WiO>xUw^rB zI4kefqUmJ?k_B~@tQ&DN@@?WyZq+!Djj(bk_B}*cd>D;X?@`wrtvRKHKPk^mHLOnH z6|H_fCuSV*8N%wRkb0z1NWU;%Y9ev=6N2~FtOK%uYD7Pr#DrOn$DM1mGTu?nLvWbs znk`IEbIRzKltFC)J_L2JurAJkrP&)X_zbpvi?FTiP1JUTeM-knVH!_2hl&Artx{`R zmFQ!uf-a4KWk8MLbK6t)7#@8~f}Mme?HThKeW|_jPGD#KtEbBv!X{Sxm!p7ZGA87i z@<$EX`5Cd6I39BV2I)Gtqwrl0>k;uJObQk58?X(mF9~9rV zY8I2XAZ*fg(W%6aw98}geg(>!1jm;>l|we~N*zSr%$QG`*6lnb7Zz4eq4Odh1v}oD zP0yU-F>%oTGXyzmW%O3j%WMld_X}?A+$%`qaMOuixwH>+I}me$J>jPN$R>$5wR@K- zkPy!nx`8bj>75ytuJ(K;)xJyjrHJ2%oU=p3m@n{W&@3)@nYAbKn}766}l&4 zn!A&$n!-`E4BA7JLl-7a zx6sX@!r)LIa;Tal;x1p9iZj%D$0wWSnKYtM&sY|wMzc{2m|dqcmOrz>K! zC_B4uzBWGN5XtQD)zLHz;CD)@up~-5vw{0+1RM}P!N`Wg=5Go2ob16H9vg#-5av9< zi1<#i5FY^gV@>03WE)&@B`6oHrmVORxCa!wB~gHRDc1ZrnBT0_hEo~%l(_iHx;PI% zwmz`XHIv6IQJ z`jazdd7s3S604qG2!b+8DrbZJqfLmLPfH9JI#mj>nQ`PwnLNeorP3s^HRrZW2ke3} zYVZ-_Wl>F+9_4`RWEDe=cw#T*z$^59FBokkbVMJkLh)unwYcRP5{x}H$`Jy+`x+nk!@)|7ip7o~hBX?#Q* z5kXUGpQd(~q`Uy3gikW%y{5>7xcVN0#My>)1F)=NeT<54*ZL7+#$V*mru%_V9XHrT zm6s4C_dDTJ7pp;MiYSF@4ggyZrvnAt^U2tyXP^KGMUdmrW7Tyif*`PG;TkvAIu3&( zNg|~Qvw$5&kamHRO#PErURDoK)qwoPY;F~YPo&U+5OBEt61?y%0y_rKr!FLdEBNo` z8$c>;QUs=-4*Mdw*8Q5|DQga{_Z5n;#0JgQgsyXO0i90+zO=}eZ9hX2q3j?N%z;tl z6sx~q^H0`U2`pn0rD-zFKW_Lm&+}Gn18MBbBQ)Dl2-aPPaMT7Y-E9@khi>PC@_u*EiHHntU_SiCX5M0JJdZl{Jeznz(@vM>S{e)8-p4Z%z$kFqPJh7Vh zDSTf2L}?1>Ol?7%5tx8-YL9_6oZnPXDo{dPrl97_8i{(r?Wo{9&6ZRD;yGLLH+T` zuLefHUWD3ssJ6~`(#4jg#;j@0ga{?8#<}gk&`)K$*Sr@=Fk_Jp?55(S^II?^W)I5QsS3^Y{aqr2*#VEwa{+LaKO0~Bt2K}t%fb$2<`C= zKJ*PYSbqL{+sOvi1F){)d8dA;dV5NpWPdMjHTQ$d7Sw?qD6_flDT6*I^;_keUO7+k zxH^AO`3&(2TmeD!8!OcF`stWVCfo9@?^sG5vXl~RbSz3(0%clUKi z4lsC}r>GyLgN)^(2cr{Tlno%8t%ou}IMmE=_1I?!IqC*}jv?He<+?17wQx^4CBCFS8m~)Ap&NCDiDs6QF z&&sXvwcJpd$USGRGPdy?_V$EVfD#?I5=f#-#JvLd=XuXpy6 z`}ZFJwDJDl!oY^p+#=Y5moMW5QLnFV`l3j$A5Pguj|)NN5S|V#e}*^ z8mdUD(+X(tW#MD>7CBkTv84PH973w(EwT+1o?f)n;s`UVt&W1XSAM#HqS0Q7_Qb zT`9xU2&z)rVt4T#hJts9!3#VbkeLGYO|GzNMfCt#&c_pq@VSo#C`R6T*n)x_$BXx=>rwvU6fit>=@EN(pkRM)mG~6Xxur! z#ZUajd>tv$D@i+yDO^oN4v^gK`3z|c0P8an3*=b)FnPIT#%9thCu54r7yZfa@A*Gk zpOzNHW%X`)`4YBh#Ae5dYrBV5&X<*{6NlU$p@5>I`ZfZzg0ks_jNL5~SF$UzefaQ% zGoyZr*C{J1kaj!&vP$v2>h|AI^JDolFXEfSa5Ze`PkHG{th7uUXoekSGKzCN`i%P- zZ{46=elVnLgD!VtqY3}oBl5FOtB%&rqgNT;c!g|RkgW<{y@1{N=FJ;`P`dwnV~~Hur5k1F_<30SZ)2YS zn-6@)hWu!K_o&z8`XthoA67Dj)u5>-N>-NbuUf4fJau=rZZIeHux5ejP2XGhY9WqD zPF&}$K{8!)HJr6&Mm$?kJ^XE9eqo>zi%R_9!6n`Z_Xri)SV-Fjx7?q;6X3dJbh)gj zDuI5`Zo4&V`%<9zRGGSVMCa&ee(<2U2~!2t{=3p#iVzRj^xQC)#}Kpt6 zw3az5o}lqZxxscAVbPcbM+lQFc#iia}ye{#9&(J zwwOCT{eIWT!nIpw3rBovylWj;!nW=4_R9P2>VF(5`QdqeclAG7Ygx?DXUGgA4mEhf za%>27XSg-3gWfBmUpW0$tl}kGuX}=rH4;nZ*$1!T<+)J4rB|`xf!yqEB-6ouGX|a0 z9s3uuMF!tDupLmR*|hcY#itOTt$TMWLazU}IC&WTmo5Tor5aF9O@Iib1@VH7 z6vtm}M}!!BhVaO9qc?a)zL?f;9?1W&X@7V8^9SHqzc?|!`^Ufe2fmMo{os9}At+nS z$AuyFnQcSOI|yyq4oXC;WsiO9?DX8M{w9_)t-OIJUL-Ds?B2szXdfRB@yM9Y!+Gcu zm~kiN33fwVXL^X5c~Z>%4OY4pFU}opcp+B7#B!K@?=_zrN9DN-|G?qc@ZMc-`*@0i z&JJkNMO*>O3s689nF_FqrzmfX=dUL+pr@(qNI)2jyun)a7x*F`DB{IY&KW_89M=(x zTD1@xU^}GHBkxNYK0^$hjLFiMfC`@Yde`bs*| zuTJvU!g*gNysm(6!IR@Jjdzr9<)JAnx^{V*+ zYHdP;1qCk^SPm=f4m}|1l!?->-cgZr=L9;Njgf zjj#ByuX1x_hP+`LtVVo~)&Kv9)z!)Q2$Qm_%h7mNUr^bmCsfrdwL89MPVYO3qA$2m z{YgCVZ9_yBeYpb7p0Lx>kY(fU#=#y>8p6qBPch0zN=Ka9!f0_R)Tezj)tnx zUL{$-hLI1nY2VNFQ2;*eG6XmYH65U?*tFqV8&b`63ZOz~5Kcs0EB&T`+ayE2Owr(k zG4(E{)|=O(0sjH~Z03zEwSHF*9eK4`qhL7gM`@tnohp5=ga1l|@SP^@Pb*CRJj(pv zkF+@ zAXIB;r2IxWPd1q)o?*348Tbs$v=(s7dk9m@*KsjH@O=Y@Mw2Vs91^d;Zhqvn>5}q2 z<&pWCi8aL{~{UtjvM(gk~lco!y1$;AKfz>e)UN;r=9ZbOslDOof!2ds!Cbnht039 z`1kCdnHf?}4mz=azu(34harHleMTPo2dYaOcS-A%T;Ob=s{b4)4ju+tumNd+kit0$ znvA+8x+1RRRYp9mQqmf2n}#x!KI)~p7e zJdYk}_;gh4I>u-9z)8Q6kgH%a_|OJ#<$INTJ#YTks&c#X&j=GuJmts_=Nuog)#$@B zvnJO{g`K$4ycOrOt9LDM^?8d``greferaDPFDHnF)*923tO*{efMU$Pn8Pe9O#AqQ zs&m@|rz1{JZlj8d3ZU_=qyf6C&KpKc{6?C0QwPH&OP@Y)?h0zidrBHl$RsuT5QDRVnK4u4c z+^+32WG7) zpU2x>IXB!)Z9upu%ZUhCJh-$fFOUXL*;;x%B;Lb{XGBs(NF%X&-!jk z|N5rXvxBIItc^v?$UPe$;0>?KceT@NwFwQ1nchDuF!Ro@>i)oOv1o^_wl0^$2B|yY zTRiu+z-dTFS}5!mY;72W2@`FP;&!1JrUoA6S@}WFck*1`)cj8&a~5B<#n#<-zxpBp zGh|M>58a9Sc*2gnwQFGj1>mrklnA4%iNe7YakAdKuDh3i25qCtAH=M3iifX}?|g=w z*wqVab>%G}qeCsGS}GClEh+UBlJ|IBUhhf&Xwv@*FoS5M1UVdSc}{aVrhSPEQ7si= zV8zWJqx}-Uvp7V7>$M>}t8=8YR)_NOBWOWC+7NGK`fskuubb8H^~+z`z~Aea|LR@V zS)@(UvwI7JGKwGW589&F@AHIvr2V~p2Aja^106vHQQb8FDZ_z;jSIC%<}P=~WI5mY zbd|Ds1*N|l@3PzU7=Po8G{$0zFhhusIw=j}SRNld7R4MH7W52CZn>Bfx^w`8qRnZ^c}tWVCoIJxy)VDtMe3dcSb zJg(Bcz8w2*vi-iP4#rZyG0s3RIpvoB0S`;b_(I|vQIwgSMEhrmxoi3m-^`c+ZSzP~ zdQrf|SaOGRnMvZvhqNfA={KWJ;oZ%3&Iem3T(tGQjZO$97~BJdN)=@5>e?IJrixOE zb;z)0d>dXmBU(GW+RSYrADfM;Zgb=4jC3;Vu2{Ib7xsAEdIayfM{Ucc2eG!IO(>mX z=4TxH)3eNU_WjF&yiSm!oNnTwpiMqQKKMD0qUQ=w^)RvkhX#e$OJg}3r3BOcnVJ02 z*8S~ksBmb~t^kTk0MIU!6Qx8JLcExX`%n!pJS73nYBPFXoNDS1V8Fh)6ve-+#c$r{ zPw)E6UmF$0T{pP=uY2*k7{cG95)xpy@=0pIhq%U7;$5#10w|0;m#3@GF|G1zEU0;6 zL_-IRqw-yd9;je%cmg?umk=sCjlr@JQk{5m+*}}B%7;rNt6X-vy{24W^{`KHo@s)3 zY*dGNon1mB6%!rnRS*-?b)>g)XWzVXrIu2$#3QSA9M0)b!)Zb1#S{1MYmF?*dekKr zXg|E%Y&u9(@H$N6_3VcBUR1XcJ@Qy?bH1%ynqrBYqg&#FT)CzhCk5Z@?Tse3HmMJ<_q9%db;?R1*UXBp2xnKJ z=)<-y(LUxLLLaFeoEkh-a_qsY21it9<~#hPW)y#;gqgXHz(4HUmS$hn$n{pl)Uqgf ztB$P!g{VZb2bIBlX|N%s4wzNfqW#jX->i%O??Y~op02qRIt4tn0$~h=H9&115623N zUW!zSU{eVtIz@fZ_UOnJU>;K_0@ z*+X=KCx-EkKbPJl5hmf|>K=lC39JK)==jKl?=S{+y<^Y2~FLYF>3wH-!19G+?|yX5P4M7k;S z;);L^V85!Sc(})5pC8SgliTmbhDB|5wX0nA={nJN_4a27%R8wvWBuWXfHPz}p@!#f z=iq0Bg!^YPnUs688r5w{*)*boo|ae?UCp&Fr#KIjEvHP2l#zjegIO#5{b6a7@kj~4z|W&KMKu) z896ndvCQih9!%kKEvk%w&14JLuvhgdF4SOf``X5DSPvhS4d=T5_Tv*h7yVo`X}THy zk|T|FF1g+Z$wx@6Sb^ST!fo+J}cet1OX1eO=r5f5q>wV-A^R9lc`U&F~BgM;X$J3e=dsJP^x>j?K zVycI(1nPv5?<5G}oPCGoBnoD1o38DKU&^ex2h*~}s71BJn(n?d6rNX8M^4ll2Q!6l zvrp_AZWQ-?jfY}$@&_4Cn!b@@Wri(!6kZZUy{IM_^saUdJ^Z8)^k#Cty%TEbh*Zy? zCx$1~V9oub68hi6OL-pr&GJclf~h+mGmw%P=b^?=ymr8>^dN@ADfdG`X_g88gs6>9 z3D5d%yG1aj&;jB+-s(!&GIz{ph)Rz)JcxW3J<<*5M19=E%t!>cyf?xjD+VJ&D z&~Bb(j}GL%qFT>&&BKH<&0pXO} zgLkMGQV00SW;}7N@v_tjUxU`JMHEj7aUy7 zvq|g(`SQI-2pKiYV?=2K$S`8=8O-i{SutbX2`h^vw~%-+xov4ky###i^^=Zb7-vM&a$<%lVU1go|H%Ea z9(|FqSPM8OUOn7NC*WMstMXS@%Pmt)PK?YOHmlc_AKx4^d^C2JW&ap|CMxs#V8)sB z{n}w!OWFL(rYAR*7Y$wDr~HAl0#W$_R#M4IK)VmN4A0Dq@2P$<=~~$oLwgyx$#OjoM|*AYy^8ZpaB)oapeg zSWp-@EPna2tSF*&cIzj9EdgGKgQli(3baphV((-1cRV^3Z!|Ia&_+P;=-Awy#La9< z!6+8jGklWP4@Ium^D*|fb-0f>uQ`%3f{&e?)QFaXlr@dQMi-0>iS9N-!#Chf9%l@k zW#dkDNvQ9bJU`hZ*W24y;gExV6L#B!gK(Wsi)d#P-XXdX8S3uj#(?}uy-%=5mKP9AWSz**5N|JTHm1D8 zXszNFYZ=M7AqGh=9-TtfI|hC|aUL7`yA9kxrq@{Wzl3AKwi@(aaF7DR#ftoE zvmDu8YohR?3&xSMLT|%)$;sUm^YE@p?kh{pRZ!;IAr!Ul8Of|=WuY;R?z@*^wKq^Q zWQ^a@T@Oa@V|oHgCX;c8K5qAO3 zIgZ~yJDO89kX19D&79}cKy@bdJ%W8LN*=eghfApB{&S6jYmp0RgYuj;n1)}dgo=#E z(ezx4akv!W;b+JJ>KfSc&N$E)l_0GE#K@a2Mv64jywL<5iqJMD=nPLKZXyIrr#jn6 zP6y@;HFK&fQFpjgu9i9CKDo%o>`uqbj;6-MptM#E*>2a!%Be1TBx+4^e7cu5VUvxS z+*xaq7bH91eIfN4V_5M_SRpag(-y{{Fl5zq@PNl+QH;mp%dwWXsaLq&gKRnCA(#7{Cl3EXWccA=-@Nl{idG)hwz;29wSLD^2Z70+aDI? z)NAxpIZj7jZ|mzjd{N_rN90E+=m_z#zLY7rvB=}5YD#vTwXb4&dSG|9-coLLF4ae% z&3X>b@S?vl;PtC|F+|d5$TjC(#Fq*Y7vA8W-FJ%XRDTT?=fm1r5w0uPw?&he@G&8a zb~KjIXOnH)=l9)GZ$A1N;-s`7mzg5fgj<#5>dGOrX%j^|7DdWjuM9WwjG%YBqIK{& z`6Bt5s?m1Q2V-VWJ&ICVZ9?vfHFFK=-HAUIklF31`hrpjw<@&MNt6wdSqSj~khg9e zpoNK5A|q=7#D{LC5rqLEVHjfVf_Mr^c`ao|pArUt{+o%%|332f9hx0k@fiYJ$_-!3 z$-ioDGQibNQj$zB!M0!FzR1+a?jo*OiaDZHLbLbz;BYL2EWSaI=65d1U-;&~yKhfJ z0KL`))J3$|Tq5TT5b_D0_m;C;1|;5*8U&Be5F;trnvMj-qj19`Ro|rA_N&3iE+;O{ zv#r9lo!L)%y5)8EMhuyT(sTO?$u}$}(I0v{$+cVT`J25XjPmrqSuX#TtA7Q6DJXefdX5FQ24=AmOt#X5k)3byYJi4ZN^&mGu zDL%iAS!2u{EBy?ioPHIKZ(M*I>>^pNrWrZCt}3fK zBy~A;tLfbEnjwp7sJ2ZNjBXsA+jvq%D}E@2Kbg5Yfuqw?>#0(jMYOv8@p=0d4Uc^8 zs*oMjj1RRh9&Y&*Ns`4}W0^QpP<*)%o1Rs@{Nq4y0majnpmTt}wm;!y*0?@8AlF!}vtTuwU+wAsdx`dwaKYYqiuU)TyRW(9G{1No#uMYn#%I?fve$Ht2@Fg#O=qOY7#6w0|{?pw%VdS*3sw zYh*oA0O$XDu$gYEd_Qg};n9K2UQ6dzLr&|u2hq;Khjr~vGWplR&zh$}yp9le6HeBb z(~#pm4`9QCb{ZO3OFq4UY;^2RFD$Ygu`sb!JHmcR6zB8**!%9VruuB%ARr=Q6e&`p z(z_yvv;_4NM5HRcL`9?*={3PZZvp~>0wP_Sbg7XpO?oeZ0Ma3$gg^>+JLR64d*++_ z&CHo|=H6%i(B}b?U4HwwS9#aF-o+OA1;jJ?U*y67C;-w93(~-dUd0qLO>wLAUwOt6 zWfkH{T?-!ar}YD~W7pd6A5z#6+oCh-8$f;(wyT=-_L0&@{Of6iV9!+E<_TCl;ukN~ z0VFj=Fek)~KoT!BrOF%69^;;T7ZR+M4<2$I8(o{?Y_a21dw4~1KgN6wX(Ks|5H(+_ z4QM6aAqsDz&gBA#4luRxdFFxY6OPEw0wW7i!8Nj<(gurjq3E;DWU=kQ$~;IQ;OZ!x zDc9>nTH1T;{R#`0je7&#jijgbM@&reJ?52g{cm=8rwlOH>v(u23YIaK=K0BHu#Su+ zK~1-;F8-&!g3YLjJt-+;I?v^HsX;+8OScGG zdFiasJcp_eZ*7TJUnD-OvI%U#otdy~46 zDz+rtV53l$iG?&vp7q*ZW)RY-4};9i+(lg?g0~=#4h;(9oGFqDdA1^bCLQajo&(90 zNG!n!1F5ZYUD-PB#ioqC_Fl}*7nyR9JlFOlzP|U-#sJE?Q_56AU3GkJq}W>tO#+KY ze@G7w;<*;uH(KOq=@s0}!4_t3+-x@Zu_iQ#>QS(k`=f8LyL0V}UrfKpWre+nB^hK) zvkzyS>`Jg*>I9~AUjgRho%9ObMT70^kjLqttoB_)%spy`Zl`{h|KSB*9T+V~qH|N7ZWQo7JB)@`{ z{VWIW*ilYq0GT=%C^ymq_=Rd8-EvM4Fki?FP)=U!1d~~Xs8#;XKC)jw?ccjDf9v;t zHwr+7gf2%#a=Dm@BwntR0rd@Ey~Gx?(B?bbLjEWrfY{0);(uNE52pY3ommp=!uMN za@+*p%etVbNdxfq-ckm#Dy&B8T&=RZ8H9cN=ZJ#02JP=YVv4@W(ooT>sK?PFXXW0ILS3IzB*J>#R3Veg(#0!78fAjHy5p)JX0H_{|x_ zo6%2sW~hK`pCcEdmhG|o6!0dJFbtVs_Pvb9f(f4B4WNblmQ^qciv(|G)Ml%cw`i zlNn9A^rS18(o)sV%0$e#!x-doo8o=LLonq$Vul)E>)ARHgjClHeYvYzGDl^^rDRXs zZ%we$u6Voj5uHBMP9FcZ@PuO}M?VGrd}=@nKt?AZWo#tocXNX63ij7gGW^IR{7zP&t% z?nFsq8klUlAOTVDIz@NjpNP>A?&mXwuw=!X1DjhKwNcyOZNxVnrW74t<@27N?;HKG zyQeAltZHhG%orlZ92;jOvgwhe`rnA;g%sn+Xz$}CL!3Kn0KfDUm6h~}$QfGIP> zHP2zV$aPIFcW4|Mdjt|(EL$1dcYaG4HjS4?Ce0Ai8Yw)uz{Ih=4J8R?>cwx@V-@%H zxuO^0_meEIE6${qwch+mHvHZ>Bs{90tO##GmYHWUNYAj1Jg$z;4O+H$R(k7<(R?U;lir>IRo1igr7KI982W>SXGSZQpwCk zo#5J+nTt!}Y*R%5cPTd8h9ye|dH-9T2~Mze=IT>3i~Gnj)~U)8zZl6J>LMjg4L??^ zG>aXN5dp;3b|LS+`@z(X-DBHP!c0np1LW5594dL5+t26P$_?~%6_jXOxF@86m# z1yaC~sr2)`QIrcfsg9iGyAygZEe$gehuX-x>EO?#9t1q)S?X0O%>>B48>vBX_0UJM zKJ^;!S|6g4 zB%QnKB#Xa={bWVf+ssWnP!L4|zux2iyQ^8wiST2H)8>RZDBmai1VqHCVe9OL74zVNV*&hzgC=l(y|1C_GTKk+1~MkOn{?^GY%Vj}764`2*ESDlTA zxA2_GP2@~n0PxhxsSZbf%i8@&Isf5L8VpGj8I9Zguj-Xl)s@xU zV#e>E6^pqW{Gy=X#6$lR+3z%$eIchQLvXCd7K2y#y;@x2I{6V#Wby498#k~lt3gJd zp)6!dZImHR40}1nZlHO7k*&)>Z)zsRqdevog`P}wCd4&22vm@hP}U5S@srQUiRre+ zA)>C9ZiSx>O%oRK9+fcoeH*(KFIqOLA>&>$gm^#5zL>@YrL#eZ*vKHk(msgncGtY~ zK^B{I)6n_tB^fN~M&lb-R@`}on;Fyoi4?6K01*D(`jFQEos&TVr&b(+PT5gd>g~i` zHIfJPV4{}~Or}lJA$Lg`oQW8h)zmH)I<}?J{&k9KsP3B;s8K$*!OI32ujZy_S#jTM zB4c?jR(FUfu^dL{XxZYoi^RKK%JXu1X$RmL(+JUT%jr@1sv4t7YaFl*_SvpQolg_y zD+6bDEPu&`o$VmyZ+@;rCJw7T<`sg~BRD8@S*O0gY2fQ$Y>>o)!%6BG4b_%po9o5P@VIAo_4Uj0-?OR?J_uLkI5+gKyi@ldHq=rJuc6E74M2`9Y0R-WnfXd zrCuLQ-abLL8*aw(*ccFsF7J7gP~9_fmNtI+s@`*3bzY`Ob2Fl8Lr=Xisoz zbYjlg(C?H|D#uCD4y1w7iTjZ<)}l=C)#Rt~Y-xF`toV{6kdMtV@JLaEND!;y-j9!U z6Sv;X(|RlL_+liZDRlketvQMl=m-o>h(^c6b7NVF{vhWfIceB`Zzu@X!2tQ50I#v_((h zTmAfx{oXw8iaS%CgH!jmzb%P_Z_Yh zZ{e@w0>7E2$Zx6FKq7aq9D#&|sG|XZ&e#?Z8pIm}jd~!<>Tcwk_+MQFf}RR%>a~eq z>HVOhKgnuk|XOPSz4=t3lT#XA2oItbkNJu zJTiUl^IVz7897^sE(_oT(~{JnN1*Xa2q{UKj--UbI|Fc=C`OCIU=KTx3IG#)k?Vj7 zzG={sMq(X9Zd-yNSUm8BX3DGLP#8fRP*z*$1g6phT$i$7GCiCLi8XlywDPdBRV_W-}qB=oXFJ(n*Rsxdh`?EBcd!pl*})i?H2^ zYq7vSdtL)fZtLq|Y!QCuB%;pzi5nm&mBzfj!F5K}2ju^k!&HO=vhe|OA)UZW`C4`a zT0R7knXzb6cnzHFed7r9u@z+fcM`ULl~(l+d;h<{ocasLJPwK4gRR4<3BiWqRqGsj z)8GBC-RvO)1)wy_iSew18F@l{A%KZTn+Y_HLR0MrWGVf?U-zK@Z&XUaom&LqBT&26 z89yyT-bYF&`gCUOHd?c~lG0yQDQCA~?svlB`}IpQZu8gCyiY8T^LN~qQ+oZbvfywY zZ%s}mNfPBq*DJ75kzAe}6$4pqRe2i+TaNf`*CCZ^k?=9suB+#EzJzTTL#?WAy~v!J z?lmen@hiP)f0bJLw^p$J=7RxS_oN2#ip~DvskCyf7xt$(8j|}3Lq0xtmT(~aQuB4G zJ7Qg5sqe+Jvb6hDAvQ!|hrX5c`{w%oSAotX0y$FxX07%7vt+0wq921kA#Wq!T=rBw ze)64f_I?c2Dc?F59x9eKZq>n|uX&$rgv@#;wV&*{uZak zK+BE0k{Jd9&XKpf_}#ptLr_*)Cv9Dn+CsgbWhuBWM$fcB1GZuEggPW>wuH&Gsb|o? z3Lb?DT^jBZ?lM&1I-N(9^BU=YyE3ZJ_6@o@e##z^9zeY#vrR}n*1ZCozTV?APVPbE zFmHGE6O65LyqqwqFBJvT$Gxn~M=~mg+`j0KtX_(kQwD{Wrv4{8)Tw(Z!IsjkDQQTU$DK2HI+;V__Z zr-}CPN*Cg3gC|=Xdjn7cXN&O&tD4UzX{yby^%mBj7B~XkA97clSkrAtU9}<(Xk7Gw z>DmG8w)~K^@Zj}uFc5MKwR_R5+KIRU!LsU9RitZG)}%vdg^h%#->u?Vw4M#>Z~-? zjunYX(8MJA1w{(Gxn3U}N*Pn}5yOAZB|A*+eE`?y{>rNb?kb*qhtI)nwWWoYQM%yN z6N|fFoRFmztfh~B*s*;=db8s89WWPPx=;zNw_7t)4f$OJVvlE}D_BY#?NV#)yEJAy zsS7q6536G*+K#tR-(oj<(nz`Vl?(jF6SQ}Q$V%xJ0YT^syyo9c0#2jCnhH4=j(Q3? zjZ#XOtVZ-z&MC_BRDG1uiOJ~K0N-zBwr4~ zB^EAhrLE=eD8J$OhH4RAD!UJVUL__G|Cs|2!+?uliK#Zlo=vgP#IX&QD`F+`*b8oY&628>?;=3B&y`1|3LJ zP7Hoj=MrH#@Emf1L`fs+W$_3|{TGzn+a9WfEjHGHCW zDnw_9{2)d{md#CieG_4aY?PCUgwG~!)F2Ac1LaN zKZ9%0ySA#iD=}{tA?oxZez%)`Lo=AG%)mS+dvB#?Uc0YjieottlenTk3bdcz< zWo=N|3gIOJQh7+`PF2JUtVvziiLFjx5aSM*LLW1`rU7qlkS0XuMMH_|RaGSw-q5Fv zE(40aaU{b~6`#4u(bwRQBL~LMqXb@Wh6w?2qktsyB6*;hp>5U zDb+#`%!DWy1Nyat0-%TFw~ChD^8h%qG`AJ*F5JCeH9`BK=FR&g!AKexxqXQUEr1`W z^_NI{)H|G%le{8cwfGz?>Xp5#Vu=lh<2$~)QhqB%KOUlVW7`k<=vMyzIZS`!tMh;E z*njlvR67PFW{VbTWpa$7ZMcwHh3e-)OfSxEcWAHGRXvN9=NOU zt{^s=#NyZ}ZknFfX6zc>JW?I6l++!1i;wjqlAl|tA{+pD^&F|iitb2B@uL79M$qYbK z{iFvx4jE}h$zO!*U@je|fEWM#FOby+og)zR66Ah5DHk^P;CJfonqAvYxD2vECv2(Z z)~XvUo{l6E4alquk0UfF(#TH-sw6^)zh?0E2cWK`1E{(ODoA#q+M@U1LPwyqv*5fw z$^!`Y6BH0w&#^p^CG`LP^cVN!A)PNNEUJLMJo7pVNX9r|!+seQzh9s^w+^(CO}Rhi z<8P)UE}-XC4~yR*dqaYUmXRW0f*u6Walco;ms?4goC^-H-{h^S zz_z}3z1i45>Y*2WYvfZJlgVHJj zJ&kO?J76jE;LuUZH~eEw4i%@xVCYi7}scG^#G4h=RX zztbC*R$mikuX5PlCa?JU>d5yZ@nI7+VKN-x(yH?7%@Uw*#-RPABo5B5a*VMG-RSrx+%sIv&vIyZeu6-`8 zotwrE4OUcTT#kG&ZKOu5t-S_o7E_I1x^TJkJ$HdmADu6-W4)s{ZUM0LIL?65(gTzW z?<{@*zW@;IM98)z3D|+fJqkx4<>Ub9;(u83>*!F(c|fP*QblP!Arh1C&&sW$8aC9$ z70Yg ze$D9d3&B)x*6Z`So9oV>GGf(~fQM@4CtM3D&ml3NAz(V$C8T}L9&6ZGil!uZ2r>1K zd33egUm2?hAu|5L0sm2N@!x}*LXIOF&FYNEY1p_%a|Sh!hKk}2L?>91yWmu&zpUBC z!{^EnO++*041f&r;x!3Y8RsjN+Cq}^Uyi!ZSBwHh6BJ2ao38q=zJGs+UjOI2vAmWx zB7wb7jJr>1wL1a;Zfoj!NdjLlDQ*j3P>($YN;SW5;C5h9&|nE(H~%^FM?YmhO;(rUM!(@Q0S~CHIyALD0H3X3ZLX8c9cO?D!b%uJF%)% z%gAUA(WO!&TR)O`K$^VnrRIB*VP+MrUX<$Z>$8hRGI^orHHJUnSgQ*O*%QsWCqu7a zH6HIi>3rsbVU(U)QM?=^(^VqG+RgQ+yt6n;EU5s1gdVWNXKA65@4`WF_DKgiV4^4R&xW@CCjQ=A>d zBb7r(^Dw}!v*k&Whk%5PkgC~)8m!)kB4{aw5Q)>2;Ysy`yY?NX&4M@B>~Kn`IaZvs z2Oi2FcFl`aRw=q`t`@2GEyf_>-unPU=M(UrFX2I1@yPN=TOwLp(ZzAg6fG0kJ)@_# zC22KznMWWP0Sbp{ITpt%z5d(Z>p#sPe{;b4yX53oEC=Z;Asi?;!UtgMj@VI3tkG!S zL)7#<*gL4u@dPy$SLjgvaAj}$%<3eWG97bhj5MrX09n$RC z`~Z~p(U&FiClLA3pWOxZHqh{L4vVC)RJ99}?q^RN_`sshRv6xm5SckfJK(P#|KI4)~5dktJ!Fe#V*EzFF9PDMq+B8J3+@WvU_^yTf9v%0CT@jZPvoD0GciHfOzkd<4 ze25)0ors^@*WvEtN6>d$$ESB8!1C} zVr;D21u=Icq}YwrPMui3(F*eJ{o7p*Ode*F|1Nq$72ko~Z;X^Mh`ZaD9j^X9!x^1>1ZKtET678%Drc)9+_JEuXv+6h_PkM?CI3*=(+xPN-_1lNjgG~)r zusKm~=|x_ELqk;LBym7045!5UolG6gI`4e@Ha`;}>@|ro01CfB_!zX;PMf@DAoKVc za^(uI&7IY=YEy)*at_cEiJ$lji4pfboi%RGDa@RwuHj2u=Zm4~g8TQ5K+|^?CcY!5 zj|V8R2FP_#c)gBKw&J{w&9})bBKT8o-Oj9-V94QQSWr!AU!<*ZWB+j4vkg+N^>vHN zR@Zt)l#upe4Ba&Ow1N$4Um}~4=^F-l3hFz)^#Tx0kt4(~4-mj@6VO_`GlyGgtr}kl zPhcN+SDLdBb?(mtrJePo!~STMk>n3PntJS`T6wk7ibX1k{0b}O zQoWv?zl3_ZN#3nvHw-(bTv%#7q^UyZqod)+DV(d2ah_|0$d40}f zS<+Q1_u7J16-6PVN_^@oGlkO-iuFhxLB*>+$sM# z^z*{$C1$D-JC2d)xs7Z77UskDyORu-Xr?zGW%8r-p*|@2;zK_p~C+InPl?_PUo!5)Z!$Qk0Z=QgUqN5yckA^D@zsGvm`L{4X#w zda=UV+dtnnaIQ4f(?4dCP#}(b@gCqR@L=1?5qSV4SJp>24F8NGCBvJYh_o=u8+B!# zt^bURQsI_1$VBaNKLu>$Xd{{q-7XoXsA}7{s>ONCZIYht6el--yqX^O&DbeRdJsT5 zqgf{~`eS?ND$p&G%r8P?}>;0 z;hZ0#hiuf6@jU{CBFpz*AWkp4c$XQD85_1j%&H~=+wb{)sQdsP;ikJ?f}IJ>@$$W= zk>F#Nc};(fJeHvx2IFU@`b|^#{%afbvI^`ttb%vDQml;XqXs(r6+5yJVbGVcY8OCK zz(2K5ulRNEUNNv=YJHqLo7;6ZBvFhv-|3JTJ6zA2!;hWB09hnOKxQNR?UV8$d$aJd zsn`70vX2C`<@0E7S!|XxeiP{#4f{IW#4l`{28W_YjzHeC*dq|jtQVj#sIzZ-b3YuN zaH!^KVvc6$U&bs>8=T#3{G0Z`KN8wuM^N5b#(plSn$Z@A>>HSB^IdheUGKScNh*oJp%2l z7I$b%*(!%Lz1P()N>qS)wHqPT*XRj5wt!c@cU>n@)6|B2Z zZ*snecq)994CFS_4S+#@7t<=eoR!_s!ol%~h6mWnV*CdE0@^hleN`zliP-&8F0^+) zj{W-7`BU0-)ACyz&#+baSLC-8p)u|RWj9G;-{cmZzn&^B z1o`a%CK^B@V~`jy8DWDyq#2^x7ysi$y26-9&vHmI1mLecwqBR5J{| z_P zxxHR`KPX@X0?HLu9N1b%*#yWKeq(i$XFQLm$BnY?Js*+))4ZT6!&>3b5Lj%m^QvEP zOF{4!;~zN|jKdp4Rr_c8oGu-|(o!N1%71_SLdjaHGYER0Fo)&+nnr`Pg)kCCd~Nn% z$L&hT%-_jvaz6gNbKB?-;$qYeu-Yk%xEFe*IZUp53<0iz-gWJSCNXsLmFQ!@@1V&OH z%Z3}iUFrH&GP@JsYWdceqZ@_Anx$bh-{_cH_*n z07GoYh1*YcPt$Wg`*wl4AHdPmB8`_?qXXHDWy7`v>PP(eT@rN7doISByt830m*nM9 zk1TqXnoh+RYfZfvJ!!tv$_lU89q`G-^cgLh4(@88L-D=E%U1ZR6Fo?bdW5v1l|s2 zAhge=mU$gIj5w|9{?HhZOqjE(cpX69rZZ%fclySH;{`|-LI@WWJtukGHdSFL(<0l( z*sbP$;rT9UVRqMYPrlQ1dCXJ?lh~_s0w|)hpBB(50}tnz-ZiF;Y)TKB(Wwq+>L0&( zr{$T-+bQLaDOhMy09`%_xYQR&lGGrDYqddupv9AswdTW}nb_qZK5u%*mY&>!Hc%m^7+^nGkj*p?M)xQ<);a`^ayBp*I5`0|!Tm(<$@(<6|Z zpkFfRU>?7KGYC;UM>JlU5y2KTm>1{Ruu2)B66*?bdbDruD`J&=0hRIIKp0jFa>}-b z1|dw?Dq5>44s(Bw@9Oe$Qmm-*K8`7sJGI?Oukg;9{aa%jhH6hTh{8s&Xi7VUA!ubX zWcJMsE)KG}p&Pa{PjPL5{MvoF0^~k`Q|f?13C#pS=ELN1Tw6tqzJs#ID=*~ z({`^;O{qUrO#qeP{K!g`*DjW7n#pxE+6Q!S4j+PJ3rE@>fhK%L@xIuEV)3+KH_rCb zFMRqXSETrY}oKKtJokcbyuP9pLZYh6G72B#XNmu&TPQD!OmPXhB`8nW5DJL!5 zb0?<>cC*kxnYT&zc;Q2@y_*#sU!Y&-^k?$z>{ER>0kedR*|kt4Mhg*F5q&Y!Vqj+( zPJd4A?nIx6%*}J=PK(-A3Y&O9;6f}|F>uhM3a?LBojZ(@inB?gI^8Zb`dn__$2AciSD|-(58{&oHaek$! z+mP;9XAoDcGP}U1GjA3$Sn{HH&#E)r)yuyNXsxYb%{)}w060afJw#%CX?QO`)Lxo_ zwKYC!mM;~s*~$oSLo4d*H&UO~_x3-!+}G5ew1wVPoge>U6^^mL+uWz0T?r?;p$e}V zIH2d180HPy7}eYR?97i1AAw4o9F}BRQ}5|WO{g5F_jKbQr3qquJsGyqOj}fB`!;jA z@cs%JX%?|+=`G7}lfClNYL?Q2H<_7>MPVXp3aiy-gOLYCMZJTl@r-%K;uhp_ljRz8 zsMS#2qs-S4##wh%VyE?#Rfw)RL`X)ln=YZ)!-$JUawDE-XGl|u}wOA zvd>-r^fGMUt{L;vbKvl=s>0EgK7MtXgVrioGhm-On6tnUu~ey-DOq;rj1Vgy&vY@s)jw zM)T@f<+X#Ge%D(sYL!m$jk0o^hFjq)p{7}RT0FzP}eLbkz zO+I}t@=jjkw9?x|S^W`hrQyXE?OPB@z9JTbDn6_*|Fg=30)W%;s;UXKd({R;;Diu{ zUAjJ3&riYT#IeQXQxjU0dBh+KX;fW%&d;W84;UZyH_;S(#3 zUtRjqw8{B4^lL0TbrB*XOhe|&pw?rbTYms66;VI;96$vmV{w)|!i2Ip!qSV40;`I$ zFGT;Lb443ne5_4s785$p#XcPW%Ev@Cao(ql@YwZ1n|2mX4S|g5GNMewOSAT-ShY!p zflt1~JXU#k2Nia?+nGV(D+^nyQ3%N84vQ1Lk$>8^o~PHJZuAgf+e)5IGjdq1D2Jt5 ztX6~@?92^&jNIZt#RU;Z-$3~-KCxW9ZzxS>?2K&!OfIf>l9QqV z$K;U10CwM@P#;|z9IKXgAUFdkiG1#$Au8P~hcQwc+C7Cc@;Jxtj1y%8=)Qn)~XM%{dtzLP?`JBxz zw^)C0SgYmv&bOi8FZWlDI*8Xu_Kv9xN1)F59Drq5)Vo(E2oy8X0Dtx19C+Q- za~mN_>Ks6H10lM}CV>~=xauQG9?}2%V@IHavp_y#fr`Vye|R&Oy8mbhO|{W9x-yeh z8Y=^ZbftwL&O`RtOOHVPE2eVM#1uHTYuieNjnKCTWJ_3%oxe-W5A}pWyk)j*WHXvl(Zuq($h7cu%T|H>sjEK_~U*z9*FnRp(C~B2|Xs zj0VKqy*^qG98QSFtH_CP-vS|$iUbPQpX^HL{^1N%+n0fOF2qZl_AO>QlDsq zb0S?rPCy)^yLJ%ccqfIYQybGe7ILkgWEJzS33OHuIehU)^>Ru7>ET)F6ItLn;Xfs-KPa*MBS?99e(3USbk@^hK=FFu z>^7_kP7v}8M2PvB4>T|tl4OU=iIMu7zQ5Eb9LoN3MU{6U*K}ubzSw6g|9QNKW5&~g zkMlfsX{H8LA%C{Cf080Y#3}vgs^B1i3?A}w?ngeBcz?u7-cR-jWK&fa#n%$})vO?M z!G3Mato*V|@oBephm)GN`_R|e%38KCzblzu$qb$^l+2rE$d_CgR7%^5m}7aQXf|^Zc4tSxjb50np5o;k^`DppEOY;70xt=(Zr>R+>Hn z$!%mG70_2nPXq$hezidqHn$FlTik#Stz(D@)UK}E7D+@BMwkX#yZ(Kwe{8+D+FMX# zTKM9AMTSDJcWpd^gB3l3%BAo0tIIeRTZf`vt9);}KBIUwkn#Czts6kBpM!r&fa84U zNygv9u`k<7R_x~bPgSuR$d}_S>|3$8%ty~TUvDZtjPErD?Mo6KfhQ#?VG@95Tc8a3 z2*ml5QGN^G1u)9P0no7T0ABekm_6!Hs0K-4 z9z~KI0p|HXaI~BPN((@H|NcA!ZCBIVLYA1+8Z6AJtx457XYLSoeyFU3j8^^tZ~>vP zTCc2{WOXn6FWXly0m{B=vxo;&EI-i|I{2(saHhl{qGVxfWhdZLv!w6 zuf4?PXuc=sv>3oA*uLeYm3~!enh2KiqH5HJj?ZU8srZg{Ewk1@>+91A*KrtCkPYokbWV!yB7ID2xF;i z{TahgE0e$4jwURa7Cd8hd7W0HHvS8nwi^#zj2cvni?%Bh%fikIP0F)M3oor-#Z!`RG39w2EcR`0> zd~)hG8h-<)m;hS?2XE-0*4{~r3R12c2SCNi2~b8vKm2SReI8)1=nz*0U+vcV8>`#B zgKVhRKHfie{%xl|v!$H=?GCwv6YE0MV?{_y@Z^i_2^00=2^;<(H|qrz-0{p?qNmOv z-TPiD^Mn(*5uFqTa*BHz(<+Y`?sRDGjlzk^W@aDzxGM&cQ;T)q2Fq{}d(u>Ms0y=8 z;Bi)^1r#{O`N_&6xB<0q{kld-%m;A)UC_4Egu(oeB%+d^f)lGLumlqvDeIW_yJdp( z;*I(a182WQ>Zx5{z#hN)nra8~f}h*~_&7!xKZk6uEln&iP4862+X-=|JB@A2dc3hN z)!*62tV#ef(+_k-abt88gwc;K(+VO}MZbY#_ z`4Iy%;sXdy!it|T##_?eu39pA+xPLRgx0$=)_QMaO|J7ogq4&(fIp(<5@3$VLpdB^ zWKIL^25q@iX^g1H;HU;3!uSV2Q(SLL3Y|%s*3id#R7$bLkJ>N4aNe2byY+11Sj6Yq z#{tkEe=NTLC05c34-LT*M~4|Do$Hz#eT86O?YEg2RAu~{YSJNY(5W7}AGu7w#2ewN z%%cW4t&2oU;8t!A0&jPA?H|0>3B)*ZSI6xLL=r9n?AuSj)dZ=8lbOw6fDg1o{oV%# z|AeG(vjNPfrsS@1=&OnM))d#$&MFFYqPBqL?2gNXIGDe0D+Hqwdie&z&MT%0xmj{USf_)9q!Gg-JeRe8m< z%UdyqFY~dUrD0P|G3d(0sP(e}r+a_4)4zx(+-eJ&Y(jC(R$v=S2~kQ; zLhGDkuY;W)-cG0SKF1^b4HhofR0Xnfq>@7#NW?3AA!p2f`5q*t6KOINwp;_f?t zRsxf`-$BYp;^sr@43itGoh97l)WX%R&t&S8Cj?AWO5Z;zfn5hh(2k{M4l3G*6|oPv zt>kMJdoHak4la3q_mip|E*2}=s8hnM`CnW3;=M7`CL8m{fAsWJJojb!bA669dvnYS z$vL9X5hy&;ZWcRiGt}+uSRiiQ@!*ZFZ-r&qz&=sU@xZ>`cMkjl3iS%c@Fanoi$f%j z*V3d}N8zLimBt_D7UM%M+zx6!c6xU5TFJxglJN1qxE8JH4a?%#9a5E|IeY7DNp+Uc zLbb2nU`=9WvW}UhU~z3>vds=Wp{G1;5{}JA*?|ugw@B^APCyqWoPPu0-S(nd)B4Su zSpYT`Y8&A9{`qHAD>48c@ezoX=Z~N8b^=CuDd01ZrzTb>+BY~Q!`*r$o_?u$AxUp+ zEK|&-X%QL{ui8zo_<`?vc{N;1v{-%O`mL$Bes@&-47#{5I&gl9#pq#rNQ;hF@&mMV z?3?ti3BMAQl<{=^|@>iDXMhi%1p+86z=(`JN2Ihlec z+fF7r8=kG|;r>qA{Js>s+Mko4a=ykik?^Q{k$+OZsBovWl(KS|g}twgg9gs|C-1H& z?0k+L=;#Z6uBj#n^L!>ZK3Por)*vt{Pcuu)6;VJ$ur^|_ZxtS!#f@B9MRgJqAY5~waUFiO%lt4^gptUl47eiLChcC^P z|EKw=Eb-l|sd^zn9WYgj$5jd&VjNwr`ZdKv3rzTQ1k%AVMrO6gBhv}*I~^ZhXyfKh z)ONmtLdid@~#K!q4Zm+rqqf|L&VlZRIR?*1sow=p$Vv zs9{lXLJyANF2P~Ww+A|Ue*|#ImrM@0FTh3U1su9V)}=}=pTFP1O~(*h0f)8@iB8*<^#VX-pz3hIR1GT^_CSty@4`w&_u*uMy1j=?Qr-_pIok&9Z1MPfqXs6LSq)!!bIs3KC5HLA z)sPkF3H8fTna(8s3$JLLyT3(Oa7Y!4^1fpU3u5IOK!4ms*b}zqksE@<$AnU+<6a1~ zUiF~k0-7#xS6hGJgyOfG6@wApVWndCEWaq69gE*?cHk)x)&-zwGLRrF`;qUd@N!Gs z*%(Kluv!nWvLvRh_KAiBNk1eG=+2%V3gwC!9~-O8T&Z^H5M{rl=MIzn_T+vCV}up^ zkF#3S2J$uwJF#_(PVJi$aW{w}B9v1-7n_ z0)i-&0H_qOp#WB02NbtO4st;4-YAA#CCo3oU}RNiSblK*B%VBjurvs*pcu;UIBhyG zZfq%XSjemCjbYhvlOK9`XzLZ#Ok*C>U_+o_KB)Z?E_P{zVV`qn}iQQjD zplIgXfmREO3nAzgw$z)}T2(0&wZSFt7Bqb33Z2s?_WN-VXV=-aj^5R`nw*{3XN!?6 zf>~4bKQ>0q*ivFu+YNf2#ZOO7><(^|9bLCc+Jq>1KV=+{^?fM-LOwlrq{Sk77YrsH zgDpHR87n>@N|NZ0KuNk!r@qD7< zX&!}?$Bwv1XYuo!8<;JiXLpfT`@Bv`kBVkDEND473(#uKz(-KG=YVA)n!n33A$&#? zyK3NR8hTZw2lkWVhY>?7Brw(=)m1$0spKy=|2vG9t_OzIaXOIhWkrsTBmsQ^pGTk4 z5!RLtIJF7A-xzsyhU;LtV_tD;$-w*vhRiQz)jIovq47g`EAa|ojFR1qjhazPu^D*A z=@tCUE|4Xks*}NS-kbQmHsNNV#qmDmDKArN+Jo2f5clGwe2sy3FPFUsqD4zaN1gFhjV$rU;vuEN(BOV@gUJ;)@nV{@1W_=qcy}0WaC}r!b%04NV9@I z6sccF5rj6kuPDVWay=*owUmUoic;xIGN1T zhJ0sI^LS~+;4eNIIElyae!gxvL|(wl3k~Y-CvCSk!p7k9#T)dQ#nFp3rTx+l_saWL zeWtdhN6}Y~{}Opi;h5_Vv~T)`%%lnX-Ioi~2b^~c9;L5vERmSPG2{Yq!C-((s;=X1 zdf<5pGgh)o!yDZT34Br};^g+tFl0}qcs-bS1u+CUc?7cd=V@+Dn7M#m3bcQ97?i;u zm4(tAI(E?z<0RbeXf%GhowG8Z>(sOB1-Cigyo)LjdGIPD{Sa<0_YEOMX+v-O@SFh( z8ExqRHFA_?_k8ib=d!0Bg%kEPf9d;~UyI3Md_X}Iwe8Aq1bVy?O$nCqt@gwstQ^1r zWM#r_V90q|BOEWWf;!aT_8LaWuDYmWtW1nz|NII5r=NZshCAvO$)U!rbB6V)MF413 z?E^RxWdk7yZ3Q%N`sX*HPB4wO@$lVi+pp~ySVvC}y>xCS<^q~qRWX$Tii!Ap?1UXql+ul#sFdg+eu zH-B6inQ(I;F9z{PKsv0cAOB_8_&?6_*V4CR3gKQ7v#}$q%n9tT+xZ;Rf#Ttr8>=Ta zlp)cuvhFy_HxyPINV5YcB8r_dRyk7IgXmwAiRvFqSGKwK#qqjY4|{q*h^jxLZa={{ zzfb`B;Uxg?)CtulQDiWO*+`E;+u-@^6Jt*X=R^il<((`zeDmV^0FBB*;PtW49IRk8 zKV+0tEBR7iUla5A#mtOAtdZ$^T0L!bCLf?Wwh0-Z1aj*FJESEJ-rlh|P;h$O0!uFV zV0KUb`7&iQh=&{9jB)R|Z@jY`FuWWt-0o$t38b;yh)yXd9;$ZO0u*PcGgKwkf(ZE z==RGz+f>$@!DbP)0P|=Thg|Gzlbf(ul&!*%7GYuTvIjyp?0gr)%Fu3R?w2ONCY-W4 zE#?u%WcD@+bgU0dkuboWI0LSncFL5kOE1+b-@r@LuHWyg3_LkVUl`bz1^!6+)Cewu zcZ|1kI6ikeZcBT3r^{2q*X(hWrU=^?t}JB%?H93mL6F--;Gn)JfJ>o-R?02Eqd6A3 z4n6^#`KHjNH4KX40rdYffXPfBd@zRcoiN4^&1pZ}ziy?meN9Kr-AiW9O_^~JnE`|z zTg1nB&PBOxZoKJzoO@iOqE`VNdCzMY6RVa*nI%>ur>gMrBkLU=VLM*~_4Pv~hGUQeDf&nKL|Yn3hE z+jJK~Xeqe4Z|f)nSjxPEIsxra=xzk^5CGKC6d5j>WAJtGIUu5F(4&Yc7`L#+KK_3H zA|#VDM^}O9!(Vr9zd|g2YblqM_=-5_0s-xx6KMCW5F37?9%g_MJ~n-S#LJy!Bl*~> z)74?E^W|4<5li-W(QalZ!_S0^zEnLY%uF&zc;J1o^1xd&ScD_M1slK^i$I)r3Crsx~o^O$^&w0b&N9hF)m`sB5Jz1iT2y*j^fF( z1PXQN$>67s*eFeVsQ*Z)oxXY~wJ>0iCR46*yj>VP{UbLB6DQ4@taBR%CfH`F4d`LGFN>XAoF@q({9Rt9c# zM*x&bEu24L23G_0``;+dvuJ=9O76~eyBNI58?9nkioO2i&ULmte8}cgq1unEx&Hu3 z(JrBh7YE4kyG+Q5?yO^jYoGu#5)fy)a>+_SpfUwAgIkp}I|u))_xSg5{r}<%{h9-r zdt?zcsQ^7i=J{IJmKHj&D5OWz%+^^i@F>76BOiJ{1qUK26SRni0RCx8@WNRH?Z`C7 zXv?U&S7}Ly^VFQ|yINDI?ZFF)1aL*ie=kD(m_x*8d49h z01UiCzkf4q(e7JO88dJ$B>A>9)=r@d<7_UH3a*cn;5;UUN_$|R$gBRRdB%bur$4g# zP%@#<;@%)D`T)gI5|UDf3HbFvb5zv!^vRWI)}HILfjjpSlg&S}jv5Dnt9)9Fr@+Qq zxXdzwke|xYtfTUF*_>8f;}7}QZ;=5E-$kC^eQ{m52MA$08f?~@VpY8t739yy>Mq~u z-qdD%3qKco#ZW0@NIukAtHpU^ePk1n3MiG%gy@dA%VL4-5zBf8#+<^27Vk~wwGdgA zZV(VebYDNYGu3yX!u0S3xSWWE^eFemqP{&T%D9vOnP zH-4RYkr=`fgKIgST-m=}eWc8xfpiFBy=QDgWK{k-Oy&UrQA4ET?^=q(r|#0d@CA z;gHdbj=^suoj)_=?5WghYJHY;1cnLC90#9}QNBZxvK~)y+|Hb`l+!BZ+>B$RQ~~y( z;~D@sWCO4yj?I`oA0uNp<1PcnG+BqEzapp%R=+{#cO%h=S8-PpFVt@{Sltd8un%pad3|_^m(hpl&TF;(z;+5j(uLDYxEVI2Pm~O;(RwWyj z1)ZD}z+u31244~NVmZdB<}1)rgmOQ$fs1t7oOZc)Um?SM{GMK`Q2+Sz%4eaTG+GJq zTaCvi@3n`c(WtFEU2AZCp$sbUj6D)NP za9SG;L?%LE(yfpJN!n#&;zib$fHQ4brv}<}i!Wiu8$~>x#rc_qj!YQJCAgNWJQ7b+ zJ++N(`|#phjn5-zmC@z(0cpi-z9GUli{%$!oPM9JX_gK#?xTUPh!ep_&3gIl+D@lJ z&&!0`BfDTW$1AfK*ZH%e6t~dj8)?_l$B!z0J)_=Z zKko~6KG7F;_xZ=fSQ^DOZCOK6|Eme14|B_*!Ww!dWNk_X+EKSXnq8*|p1ibOZNvK9d0@qpB!i)mmr35{Ek$IAnjm|IzK>u$hgOz6voPVq{ zf=~q;Q-P>_V3==J%r|Vzww$|%>kE!9F1%Wjr{|-a(ihJKU)DS~VwmqK{w^uA^F87$ z<$V)7`Q)f#iPd+tOIraYx7$lm_M}_Y)$KJhpJlbpxz7pfd7n3Ez*^V;Y%?m?w`O?; zVM*qzp(~;m`+a-j1u&2QOX6sHp#NP-A0^P1#d-L$LVbE8T492F@S^6~TOqMIeOKQa z8J)%kL0vG<_?zuKM;;8!1^P?_GuO%?^eQMUJ^?EP0q|CVOuI`}2NLh&^(`+l6<&J3 zd%gQ^mtG9FSmIowo#@>(w~qjL9>3pyA$}MkT|_1dPg%o6IAsb+w?x|(4 zx-5XF4U}g8E`%_^dWtOVE*#jIc@>|T<-Y1{a{_-`dChqxCaJyg#Y36{X{JyDyP{u3 zVOOL{fXVQRQ|`=hE0w_v??5tb2^1qL7FzGr4oUF$zmJ}wBtJR()PyqnPCCFru{HH> zWs=rtC%ceu+&7nA)#x}LVvZh8CrmMpcWcD=f%j`$NVX@X&E&jyXW{qnLj6)XJgSnV zIr=+4$Z>!<&$S}zFto~Sx6PRAmVJa#=6;7?-pd~-0r%i@#sY}dNV;n#ytXI4&D=^B zjt~uCnQYRvhPN{w(TSV`CvQ{T{ic=KH`T|F8Yf{kT{txJfW@Ad?$l zRpu|U6b$`p>$J_E1|9hf({#6mMlXq6YLi-i8Xb+*4&CZWrfXqw`$3ckh=OK-Qm!VY zUxOAm-ul2gQdELaG2pMxgqw_6G=oe$^UJK-Xq5@JAvxS=EiFc2jEPda?G4hS5Cx(a z#_++9=RNr!-tY}H9r;?|CU2EbK*PSm;Rg;t_6-7R-teqShX}hdGDCr$&`R>pQO~09 zg+BJ3*>)$xZ-!KjCaH)7pB8pSXsoO2uB8hK=NFEh6!yipF4eT|0_b@2z!Dg|DI4Fw zTU=c7-M?s~EK62Yl=fBElN$w_+S*<@+FmO?92k1#rwF>mKj-gS`tT4wLdH(&fJ--4uFs-Vhgj8Bt)95JS?ZM_PqR| zBeP5+F5jEIckfdEGy?g+?CQky3=(d@($u4&PT0nehe5>2p2F8~MrCJnb6nKUW?if7 zYUaHW9h?8o%kELP?@0@77LI&_WUD-D(Io+S0{!>jpgl1-lG^N@b$rpKIjgN-q82{V z(R7yRgNeD>VWTisa!>-k9ogsMrT+MyCy1rE?Jf6pvDcszhHRl<43eeq=ygqkv$8nR z3q|$wg!C~aeUsouk1Z!Q_BW_aPBc#KH^~1-*hmkdH)e-k|IlgW%X}m9#fYF9>Gm>N z6)1`=i9hWSaf?|raCNhCdO7Y-4<~yALc=Y&0wtYSl1E%-pXEoyX$MS`)B{JdSbzbp zEPY>y5U1iQb>^JAL%WP`KHT+FJ{b|6j(u@(-sqlh6>h(^!N4}nb4ED7bX=8w3D6UC zYerC#jVXQOgUDVvO2e_x{NC1bVE_0)`gtI+2~+@VSv*P8;Hi<0ggK5Lc$!xmlYb|H zmn%?V489;SS!p?comqzfGbU=Bhm5LH!>LQu$S_P_kfod z8*LG0qNtQx*D0O1p+4hl{H^2~ zvG-KkkIR|#iU%t1>+U$YXV071oYfftb3+%3d@eiJ@cT5QB9eX1-ci*(ycVzwb)=1H znG(P8X2va1RM^azyUSQ+_z*l}CkWk_j~GHO6u|_rC~mUm0+bFmMkKW)z{nO<^fFPl+#%LC+pmw zGda{#!Wl~$?u*`uIq;hXIM<{Sa>|Q`KQ@Pc3`@H)_rEL) zJCo3kqY-7?ZhJ&Yj0!JF8_n%;5thb7sf<;N;bZodL?{5e0}bY|ZXI9LYx}eX+SbTV zp6m{T6-otY^NoJLc>e5_qpS%FLN+mrWT3hDi;T>H2lmgm=5?!ERyH_&lGjsC ze+W&L4ynBL?aEQZxRN|8)3rLk)Sr3TF2Wdm))FiTLJPm@E`YfHllj}1nj}}()2tr; zSNPa(gvRvEEC+!^-=1`wNgSavHsEg@nY=|nfupoU+W2H7uvTs# z)iBAoatgFX>25Yy3Fnviz3q9F(l|N%b*o60NK0izFMV*;mu!+mC=JOStlC+M4*~#r zWJ<%y)MZOnlKNY)iT>YxQ^G=?4zbj^-yn94^LW^jc;XU(2yAW?IPuqnU;6y|`xO3a zk6Po)iz`Bwx>A9S!3^uorT~-n>fFlEo9Jf)W5%JdJv}lIPoC_20Fc+A@d%5hzKbsl|z<+SJfAtdh2mv5m?@$|Hf->9%_y#U*tzU-i;HmW)qk$&_(c_i{cj(`d zJt=vCoPh&=#?u-;C~0R??WYw(#dw*+@$CD1O0t^2X1?IG^%jGma!AMdX`jV`&ZF8P zj+MqM7(NEGp=aW%8PJW@XQ$o+p8sJ+^gZzaOfVM&0kqA*j}ZKjOT_E~^2495K2CEi zLCsGr11%L9MoV>USK1r1Zi?CmLGKp)W0mD2}Q*^yXcYF?{2jn(>sIkQJ^E%NfJSmh{97BWp3(! zrMBZb$cCl!9k)bj!`UR5)t~G1gq?hx3TVW}7l85?mqS2))y7rF6R=)OlOCloN&h;J zqhP@&qR`pfPibn)_&%~e`p9~PQh;2ZhDZUa4jSmbOmox(Ofbjnym1iD`V1GiUQr!(L-dZ**H1xe z&jC`TC9vK?*MT^z01o}6>M3)or^CnMInq|Zz;Urbs+{(UYtWT9J+dsjDIOAtKa9p! z^ZIZ3+szB>hoSL&?aS3$=gx;U%ytiD`m_1Sk;GoKgZ$~EGz2hU0RVoRgj~gSB$Zl* z)M^$?c0P}qpQCARJ`xEToBBx!Wm5_yD`0kJS7?MW2YeRMEi*d&Reb7vl zBqrv3xMHlzsMLjMI@%zR5FN-Kp3-7-7N42!8OH;6zdcs0H%kb(F{~cBzscPUA{~BY z5uPX5QZh^O_Lqo?7;$$!g&)f>(#k7vF33+dBX48sTKWU;#QbT?2iF?_Y+fu@K{)CQ zh<4*T?^J$kRz&?;q)ek_37oTaD!k5qNU=KKE4Nu@Mn=Fb`g3pYXwl1B4Ph;i=RMH4 zR=mDixH<2+=wJW<>f1%yqk)(TP*-E-o97u6w%gyArf^7eywo$J(L0U;ZAsu*U%4T! zHl3-%*@P%Zq&QlO8>*eH5U_elT`ukCJ)yD?J$NcFHFqOTJj4C7> zX|Bs1IW+8CbY`?4X}M@p7I$gj5}gE-jV2Xbay-iF#;R`4=(DBtTg#8i-RYe=`FIs* zQWj7lFvdW^xvoN@0EQ6uX|9JoY^UVXkoc7Z24i`F>Vz-4{VKx2L2A<ri6yx$kQVCHtP#j3{Y+5(Is`{x{zvahd=)oWQN=Uv< z*}YnoJMdmO{sYCTpyjpkNgA{h-Fw`bqg6rzcWQom#@tx#APj4z&8<$EN8NvF z`u4}BCSZ)n;z+wQKZ&L?Iux(lkGH@oxf|=Nf~L7_E>uPal(LT~v&(TxF3G+}(8ixv zpg9)7fcfASol|tEC=H&)GeQn}jn;#+lf9zahxA~th#UDQg-@RfQgS>Y{lUe*$JZ5j za=3(c?ETMox-ML9Hm46hEz0JUx42VtAy>sLB;Q2hZ1KoXg(E|6)%g=-B^#@l1GaZT$Pj1P;A>vc3~RNoLzueeLb9@upipvob3BhDLQ3Eehvmmmx^6nwwPW3u@FA{~gqoMq9hjWignh|yjYD^@#=u-vpt zS$GPMr?JjDuhY!DfB77%HXnlHD0S&xzmzhm_H*MQz)}-~y6;X6G%Lj$F77g{kLV!x zrv{4GdCoSE(^4#Bb(12_9c_(ejTRVVfT0Rg@Gma&2CHaHToKvxI+^8)D)6m$avya80(aD&IZT{?&8`V;RAX5M3`SG1%(yi| zCouyku%~u*7tZEp!)@ePHBl~p4`!NOp4Ws+E7Uo@_D;`Rd}P>Z&te675#taR>ga!O zb{oyffB!hK-Tzcl>3C+!KBS|Rc1+_Y0pR;5J_MLQz5oujaZhn5jH+Oy(Mp*@j+?H6 ziDK^Lh^7kwbt)6cIt&8&&FKdLqAxAg3?UIX&Aj_u`OCKQ`#FQ-}JmRn8vvolXIA#?EvLowZ-` zhkK3ko54X4phgakrVc>_KD4kSSa7h&?J5D%)~qCW&QWw>G)UQGP z)EeiXa{87>7nsL+eZSt_xfW%nGTq%Pp8n24bNOI}SKr;?7}=Tu1Nvf}NL(!9R5dQ3 z>gUHgyf=!j@vA;V5>{sa`JT>zfW5&Ty#Wqe!C(;@7U%TY7!T+4cdF`WB6QgoAb-qb z+87=$f#1}aeBU>?3V+$2Y^gDkWF{*cA~7Sif>o-y#N-CK6X@!}u$jGoxog8o3N>t$<`C6&&8frTpLjQZkian)vTCfq`yH~yZ0RaT1?Ey5Gt;v+=7hy4Qj=8 zw&zn9YM^8-2TEBAa)5|jF!>F-O3i%@$X;KPQuU2Pk!Sl~;2PTJiK2A?drOi!-t!x< znuFpbe}mrt!|UE#A$W*r9}n(g*ctLYlzg$X-LOOqjRdVnd6>zj9>`0S@wJ1h=Y{31 zR^wd>qQuK&dXCv7)qvjP4+IxVnjAzBCea3se5A8@Iy8P`%2PhVO3O}iPxps@LwSYd z9sMuq8NE6wzZ`_|1RmxvPsfIKa{AOyv4DG~6)&AB8h}h$y)~)Gd@VC3V90vj`&e!x zfL1j3^%GwSadZ-%H+4};zeLAZubN_0ObJht9El3`e!xAazx1weiGx06a9rby!cNRR z_w13wRdhb5p*1+Z>apV=`6(C>9X4yBmq#>!92xe&ftuWE{T{r8%35~4xujuAW$^## z6evwp1t?6%E9EmzQ{VRP&$i5phFh8Iz}oMRYuifj$0x`(R3_>G9~-?F#e$$qDJ>~( z)Oz{2deEz|Qs#;FFAggPs~)FF>dtUt+>v>MGvJVA?PZ~qKzKVy`FN7;wCbrQ_yR;8 z*nU}v2S71)b73_LH(*phSpc8fr<}Xkdgja%l!VcT&zDj4yTVoLcB6G)JetA}M0{ao z##Dw$Tnd$KV6wFh#$I*CH7zdo?Be}5vV6~EhIJPO4`x;!2E3j^-r;}xZWhRflt_2a z#>E3W^k0It*2LWfXv(VVKd{6CWy#)n)ElUa`&6v81TY8_GpZPdJBm2(EiUb^DVJRS zT!nf6_FB4|+jF`AW?mg;?cJXlIQ?|i(ScxO1)z+51re+q){wxs@Ky0Hnl+5Ml?)q9 z*In6{*VUBzyc~L$%Uehf@A5>-+$T|G<*un;N|P*g0LBogw9cajjzyfeRjAN5XVyP` zSAAM%8-M%Mg65i2clzU3f_HOmJgpa7+({WUo@zuZ=UJQLMAB5nsmZ#00Iuu-8MBVbwCxvT* z@3^d0tFU&0r{63FHsJFK$zux4s-?2q2vt9Xe2(QgiK`vdhwT>GP7wC2elyuiaPlYO0r3nDwPf?k<0J8?#a!7{!zulTq#q((mNo|Wutp9KRb3^xSqjAjfJfxp4@SDZ`Ux{gln?dEYz<)ZG~QK!FA zN}t1ctma|%D6c*PpK`U1^lbs)O*aCnpGOVG6t1d$d1C6?wShh)QHrM{<7OL}<5?i% zMony*PS`@!(VZ}(lT)Ex-_H1jhK;y4qOdj{)_U5)Lp%Ln*vnTT8I%}613z$rEJh6Y zOhmO)`O1E3yeOU$n_cN!Hn{xN^3*|1n3Yu6y`X+o``P{(n)ax6{oH?oruP`q^|6={1me=yaJLYbkyyrS6*>p?&%<}d2wc`0| z8*=ub8289VJ(3vpC!5tg49NRKJ*)#bEuTIqv%b%VsMF&k{SNW4Wy)nHAcm% z*JwiGGr24R&u|qq>23F|k1h{wBW~gh2w#c2q>G!Ja2c%K2feC{rg2P$AA<-8 z#4Fo1x|eh2%%Uw)EojU$%YF(DXG>xW53O@&b1B6ZV>~7gryrh6xS0KX;{yBJ?$+Sb z>sl`qUHM)so`Zsl8?StUk1yV~VHpzPaOPjFR@gGT##CWk=COXYRbsd)yX%JFz^7}{ zlMgAWL=loCLVzfQ=?Rd&iOUVo{em@b9P>K!G;?$Dk?Nyf*5tZ=_RI8jT~MeM=iQ11 zKSBAzixuumJliB3ddVSlw^4p^_l(o3OQf}{mn1gHtvFBWhJj!RbCQa>-qb|v36(-% z%i8t*-O^2hmQhi~WGM=&Q7-3Ovz}3va(LCr8#lVeWN1t`Pk!KKz4w5C*E9P#pVVLf zCXI=bh00VhzD#TetQ$m#yI&mDP;K+9dWGnS+w=A@vY%gxT6&+=yZuyNQt~rXwBYeu z@7%~jEk*je7wOq$KZ&LoRO|eSNv(Xiax?$gH|~tsdu3mvgw>Y|x+LmUCBEJIV&t~x z!u9ykK2?5L#u_39K62{?l3uH(U_^vRdn1uc}rmKLD$t5d`}Hcb*fn_P|dt* zR-6q&iJ4B;#pciVKj(`7!F`}8#{Xh1K ze(+5K8!K8%VorbV+kc7{@?DutJ*~a_?(ExX|E&0V+#PV#R82ch|3%C*zy;w?=})QoUXJKWU4uVH*tzD+Pa zRfrq_wkW1-<4@kqnOy~qVxn)aBrx3&|vho^$7AHP3>1V zTHsJc3@=|&-1Q%z0<`#q%I&YZ2)w;dJZs6fau^8cldX$qM9~{$e@N;jy3#E zJO9t*#N0`V)EB!K;%A35z+<55jc8` z&HoMyQiBwFW!uUsFRi-V$ zdFmeLij8z_kU;}N_$JrAfJWQycYq-J*!mwj4T5WJ8DJ|sYb%;-U6{ZA>{L_!r+Leb z{j<|654&hT8)iRCj^j93vS^Q`w3g~_LN}khGR1L0! zPXc+cEHCmA`NqTOH+?EVh6u_c0|aB|e}<;9vQa)EpAJOKSE|&E9$FP;aJDJkd4KNb zvmxF_M7-dS`7IMYQIJ`=}l7IEP+k@}>icQ&L#h>iyNf`BKrVrAOE18@7xu)}b zXSpz!loT5kt)WOEM+XD{Mrz zO%33bdVJjQw_*u4gCJUKCt1js*RkLd&~q?OZ{SrN;0BnM)nVv7n3um)7-~*4z8Ys( zV{i62QzupK{lvPtN+e*K?)}dx?*GA4()S`O(c40}@g?l?+@+zNxX?!HbK+JctLKEH zzKNNe@pTQpQ8yovr*R1*5R`Tm(ZC$n$_G%Iq5eH4lF%OczV8RHF$K(9w`LW$Rmsq%j&eFGW9!7VQ^ zSvVgcz5_NQ!sD3T;|pqWR91jS8~w*HhSddkL}3@w0V=>kzy;vXJj;kGqCYh%6AuSi-ins}#lK7gi;xO%1h1j5_vk{8jk3UK+pzZ(b*uO@*u+|wD1wNsO zN0C+Wm&R;tFE*4VUbUQ)r1E7;Fn3&k9L}eH{|-IYxEcDI0q>8|35ST5L`+!Od^JdQ zeI=@OS?sBqL_+q@ul~{eAnZCVw2gl!3FAtC7q9fqTgJ2R!mtZzvRnSwom*kc0bkAt zN+kv{s4VcmFftUq{W+WG;b4H4B1%7*&pkNe2_o|POeJP?ZI(KaZt$C_K; zSXXN49(lwa{eJM5Q`ObF>DmNDRMnd0-0kDrkh95r56;+7j17y8LKfYpSqbvEV&Su+ z5P1aWP(*~rP0XmF_LlK*l$DjCJBV&I{7Tj1G({7MogcGhUrR(z1Te`;SrEJwZ{zV3 zrwi&w&C;+Gq)mPwvdsQl5xj^nfE)NjC#-VzYhSHE`a_coR|jOmrhky`&^@f+3bi;Fj{B5?Xt?%pEJ@UMm(J7xL2od$<%V#VAm_gh^ zN>SVHd)EX^@F`gQXQv~z>JsG+18D(Co5j`D#TKf{$JbmH4GrfQztDGNL@-)U90 zv-L3*;`ZrtPwgVY9#ZnqjwN@uFk^3kjwBZ`(|^mS?t@+vUNh=UrI zfpE)K&jIqoZCb4;Jvr6aZJ7@`&ScH{rhX?iRCKtkZ%WpZNMIhlQB#}lqrDlzE>Rb~ zkPY77#a95GQ@;p+f>`SIP%dS`+rt(>PLJdv*0cm1z88fl(eGT+8s+a&s<7ImHwo_6 zN(?SfjcO@NU)Vi=eXHF>DZl<#)nUyq;Ut_tm7x$X-DZJ@k!?q@wNcCO%*e0sF=5{BE^ui{g<>0P5k>y#xbY^-w~4XBpqn5e^x0;2$8ZJ)F(<;K6m4iE+^cy)JvvWcw$Usj2T1^dJvEul~xd+ihXbBy2`a-;Z)tg?LyEAWrOFE*xJhhoQZM6F^)0&16cQ=JCqVSmymNtB!UWDs$ zz3mQYK522Fr50a8cb8?OD1UjN*N%rlrOPr$$?}@v{Ya6h)#FuZb#-5d+FGwD$xk`Pb)0d27O}!1g?>$)_HZTS9r{H-ISspj7KXTY-d)r{(Y!_?lwYFuI z3Lnt>Pj~d0-%qbHRoLO0tsJQVOK7$)Bk5m9Aw0O;Pfpes_10(S8tY}$6n&0iEVy0= zZu7h?pJUlHpM7lh)+t};w_}T?k;U}kyD!aTmUDz42lI0Sb+LvadJ+QU`Ck!P5=UtL ztiv2d0CEK;Wg~weCx~*(WZ180mqL1NinQtr8)x*&F4X``y_K8zC~E@vRlMTz{Qk%R zcPOz!$*FTXkR52Q=q}fa?sgH;`%+yOpBP+`7#;6hyrL-GH(Dfd5mGWs**Ujw zZ0W7#lQ>tKe`ogAtCdpU-k-Nh_x#5dHu(!O5@Npa3_d{@7Pd|{YkHbPh0^Ojugp9s zzX50te&tgcWq=bXb#dVMcm#S-&A$y01BC!z0ug60-!l3yzs5{Jw*U3#AHT*X2YlyX zqxSl~98n6{;=T=z^kZ$uDvi6MUSp9JhdgErHq)=l-XAxL-9wJkTG{>d{kYK-_ISms zJ0On4Nd|Dvux64v(dB+a{eG3}S?IRRo#dIhfLg}V&Yz1{bOUC_v)<%xf2kVUd~B{! z4!wqSj^l6!*hf6rs5u7Qx8}4tA$``<8<%LFYQ^5D=)Jz#5MZ%+8RLfHmTS z?3#wFQ)``($C}stYyZMTz0HYy$NNhHh1&>`lDVmHd8>_@tDQBaE3YFvo3!AUEfRD% zX2>DH|Ms1*nhzYyWL9RVwbCi*4_Xpvk6r+`svf_xA*fD;`~sw%?ys`SllIq6-=SYl zA#)uWS5;8*&Mwm3IhqqHt4)vH5q4~b;&{jRP(HFgIs5942>K15?O*;c3IP*L*8=M@ zryYbyPLGIiai0Ub1YTo2&iX-d$_G^my!@W>f?^@w0-K1@K}#U!RIGj}XBu?Av~MQg z{Wdj_!h)T^X1;a6#vnGn17t*CL(mC~ER<7<8Vxk0xKYn4r(a+8tC_UDP1Tj2|E@j= zldq5a9yhj2`^Sv}$el=ZQ^@sP6eE&TC39_Y4eRa|P(&S_XPdl`;Gek2=lUtmo`Ad* zIdo&c;ruwDxc#yU#unR*hC_oVLK zrM^ewnve?x&;vRm(3S|_l%w9rnOJB*p{b`|nov_01C@xzq{{>d2MxACwGKmdc;aAW0hn*o8}DBs%;R zD~U-9*2vy_3yUD9;l8H@c-deO%<8T#Gx5NI%i$$#QL*4@jR@F9G_M>HEp zskyg;x#nTYp^82DJZq=%bBTF#x+IRL=Brml&*|oXg4=?-rJvagD}G!P8uEJdxwHmq zUDToy*FHbV(JcTlg(R8>jb0^rZ50NI5Obm&Oo`m5nj*Nc5rGk2#+V91R1+Kn=ml+Y zC~yt-Bly4!=w2FyH-*D?{!(s3G32<(?q5rx#Bl-uyad06%VD&?`7gMKPo01_SdT0_ z%DFAq?p2a3!!@tpJDWNhecAepn!705HAY?rvxg(z+PII)r317h=x1_7NuG93>LAf; zA$6=DRtI^Fk;t43Oz0Pb^%WIjo>x|WAWi01tKpt{>>$j>Wqc1`b=KBt|zS zVf?3S;p?j|aAT^vyhX|T29sYIX7jay<>TQm08+37Tzs<8nB=lyGMDKdDE4_|ce9mb z;2*N{TxWEA$Z!uc+YlJxKb-Xgh@%-QCxW2ofhC=Hmd%nns$9cj8`aXeZJ zti+2z5gof$;ZjRhEn1(duqtwuKNFxdb~lJ1VK2Y?FVLtMB8H@cFsFR*_+ec`oJ5Ue_nU@EzObp|6SG6|ESXK|K!auTA*>}>)IBEL>BD& z2=T2PRZYft|0Byypu!DK1ojg~OBo$l;5jt`!@_9)d{NO^5+;-24jJ^DDDop#d@6wpJ`p!Oh{0Oxi-Y@^7N zqx!xnSM=Df*@K-02Xz`+_RjRjJh3!S+U$d_h5wr}$iE%xq1{s^9N>5&p?1ir=^KEi znZ|W_0v=0r`HDSxOHuL%PxWpb=DpHBWfyWCf&~SpA=L1zukg_C^*n9hGc&$ZHfURJ zY@Mjv_11UKJiVS3vGjiS{>T#h|GKpfe15pAhRXuhr+Iwm?zGK_Y-QJb7IW1_$BDS+ z7*L>oU~8t*hXtKZ6bBH#BBAB^W2^kPf^Eazm{!M+DTopR_P%5;=RaHq1t$I{;FNzA z>HMRD<+{1}5XMG54|iCI3$EErNz+)vzOZ6mwbowvq~%t-bs_e&(8}~rT@W}QeM*BK z(88iIa-k4m`Qij3QoHhN#^|y}7yoNsf1Vj6KWO_ug|7YWINbk{SJLh|5K_kv z(V;*_qBwTG^_Y3u(fo*=+{G{AKI$I8qEStoemPj<z9 zoXOV5c5YeTk{7N=hOB>~13CTM9P@t_4YPHbJA?Cyipt_78f%X+%Kjt;gj( z1Ge^ibPIrZ*+C~OsF6M)P@E|b@eXhfG3Aq#kV7mu2P)$eU~K?M)w|yy`+?*0b4&nu zbsL5OV6p%Fy@06QSrzyBptNLO;LZ#F{24IN_o#G#jbL4ji*6GAMs&f<%4y-MN`}gY zu?N#$;e6*KmiKWVpZ;k6+_lC85V`jsfFx41a)BJlyn zg)nzPodR4lFHCE|M|U|X?<{4@G1RlvgrtvYnerOy9 zK06d~qgZoc+=yM`fiksHrQ+;Tc%ls~4dMuVuIB4%h8^GVoNfCL+(r*b*OXeO0=AVj zpYJzz)ujo$;7o9I2^Kl4p({M@M#*Szi3Z|%Qy zk3!0gQs_oKu~3VV9nXvh9dm>Z0#6MvTl)3WD0qf`z1rpTN$=ij@}7I^(hCSn{Yx%N zUyJ)8_l9~3ue0q+(wRnw9x_aNrY?+YT=UX+?@}aTFMO{>g2_T!u`y(MpZg)!?78$z zu3}W~uAlsh%SbXU`_92nKjZ`O7xZbWxFeD`P|y)Dw7s@BwWajQA~aci%1cZX4SJ54i8r zXV!NNt_#=oXs<36a?hvqkG$CN)V z29gZ;yuFvXR|Ws@ zhw)TYf;TK!XV(`YVtmF>ryLoG!t?8LuhiIneZ5>PcBhKtY~ozqio6yF{a;rz4*G<; z+&e#Sy;?phb-MlM`qw2ei)U&#I|NjubYFN=Rb-EQ1Ghs@vQWwnb5dCG@t^E~93*E9 zJNX`3rKjeJY{I3Ik~f^(cD0BV<*1FEz3YE{*#AXjR# zhmaif@6>sCxt$4x5i&G>ps~3xG0r5Z!h@^1vXLHFaZgtp80X z&+A;ZZ|)nKd$kGJ!__B!u1n_BClq-9MH8UT&+o{aPS;S!Yk#(7Vq zUe$bUnU&1hKsB0fofjBb}`d{O( z%31=Y$Y+1f1HL7tm2pIayuBy5{K!&;03&Y!vq#9QV|sWBFolG^fL5qz-5FJU2l35h zUTlDwb!uWc_Ws!Y3~Rwt=B97IADlbl-p>bZg2*sR1=%x80O@S#L1n{ErL)ag!9DcZ z$n#F{GQP=)@Qlw>0J+D1d=PVf+p*N$v?*aZJbxSfN<$GGOlHUI(%)##8LTWayu$0l z)=~x_E3xAtsvccEbuF{A106ALzCef^m_EC8XEZxa5+Sw}0^h1xOK7w94SGeLVLJUA z1h;5HBO}T;V^?;+Q%b&~%YR;SJ-nv2DgD8qHUTdFIz+UrmZ|u&7vLCsgvxa8@5M-b z7Qw3%rto7~_%nobd{a%%Oi6oK$>@WJ9Ot`hCLilQEZkvweg5QgP40vHx+*474gq34 zCj)!n0A`0`1=K&XBY=gn41!D*fBtRYhw=1QbA8+^q%)Gs`dxyg;#kf2CGmCZr!hCb zLPkW3euHRG{&b3J4Bms-PrD}rzan^v+1X?}qTNS?_MkZPDU>5h!B?PUhi4`2?12;} zqKQV3-A3roxyRQjS=%6A{TDHT7XSL6aEx{6Di3v*!m|6zYY*z+@{hJ-Gz_4pgY=8a zodVOW{DEUs@mk``vfi|;XpIt8?B$*pFCFgfi%7S+r(8O{)o^WVeLbJ?Ojg-E^SRQ% zS&M!_`X#w-SKwgH#;}r@oWaM2FJPJgd&>vF6kq_R;M|1xdrX0xh0UiV*#i{gY*cm% z3H3)|3(!qP4&#<;A{C7p$ej42*MNuTp^$XzgQS(>H)E={)$su6wjc_q;9GOJrQ+Z0 z#HvVliR}3kn20b8>ohNUUWx65nCg!?_-H6&cfX79wyuCd(1HJ?^J8G>DM z)=v^&^fpTb=F!+KMoC`KJr^Sqt9Uvxh|Ww91X`E=u40^oT*!q$h7ockrA=YZOsdKu zl*eXja>{D$C_#q_KOp~#GALpuk@wVO#=`9ci&tG;AaVPID7-JJloX3^+euLVTm%_%ML_Z(463A_c zM1KEdM^Ht5$|r5B8GNZ(_vxH}MbgXJJa&jw_#oq>^(|`Oc+_>Np{n+EdE(6r;X0-> z+~?v-5c@{5y%^^yo{Oi+j{u8jpM2sGAq#IemiR)D@IW<0JL;Y)69FHV62EaJ3CVaY z$R#LQbf~^wqzNvCoklMw5~Zo_v&w)PG6%(o>Yx~1MBLtTx%w9(_V1Kk zP!Rm788hqFCwqW3ZOa6gH`X8<6gciriDY*^Db`u~7ih!~)QOcSJ`jUA?bc|4Gcw5H zUYD-7Q~*v8?>FP8K_^ypNavuMRdvo=iV*2c2CPHE)GAMuWBTX#`lP3io@>&-)x=(B z0uFd}xsXgr+&~C`*7&puE8-~0HJEid6EBxDJufs~PCm3aXM^X5TNI~k&HME7xtFKUtp)*)@qbfV{XZ$i|KD~mw8n;%j3&?q zubNLY^h8U~P-g_z;5TSqi-F=uTRC(f#M(NBemtn)B6M{kW6m(6{zmHIR1ni3-SI!; zv;24JPrUK;nFS#GrD)8>SEKQUR9CxVP<8{3n)@h>T+YSnh|5vJigayfq+G2`$ znNGGK-cGJ=QmqaZJ52lLGIxqu-{165j5|}>csJd zGcoo{1}2Po#ZSMEnqB|Sd{LVNbzAj@?wZF_e$W;avw=Rmf!}CLq#_L^@KW_Zo_HNGKtY;(eWSX70>AbMJgJ=bIVN@A?-%LP*|s zzt4X5v)5kFT5G`_(2avc!yyYr78=vOQ@p2E#yCZu1t zBBUp*;)Q}-b2|=udYXf-PciHC(a^zx^4SNoloPqN! z9d0*Cu)|*6A8=zgX^HRX!cWFeQO8%25Bl6-n#o`~1uST>avNy4|K?>fKMnvqtlEx#?-oYrl ziAgzH_jTreec{P_nw7aZ>jl7GY|{izv4)k8`}cr3;Py%u8~)^dTn>F5tlOsb-6pc1 zBzbP}_XH{5pq~l`EJVP$fJ6ae<1+g79M9x>mF)UFf>Vv0{~n2@5}>85P6(Jt97Ncy z?}DDFfYttxq`d4lRna7Xp&Ry@i3ca8xjS*Rtb;es)>Hkap@rK$QXNKD{$vmpA zZOl=+C%b`;KZwuN9~#Yx$zEo(?ZZeGtfk$iDn}}Zt%<>w2Pi?VBxOunAbPaXyEj%* z-*wR;mH$nOYurl7nf{$t0j#m4g3}EHwPjnYcLmJuLRkXY#`!#YuE`yQa$=O?)dE-* zb^tvaSy7Yhq;B?Uq=N1X`?0WeN4hExT6yjVXODe)1CVsKM01eRz9YwUU>%(nb);KF z#}ekF;1-Az{7|MBpm2?*AxTNsExDizHx{}f)44=@;@n^C00_T!Gv^7}JSwZAoeKO>4Bchb6XGQ=T<3gMLY{$|3ZoxI6c z2K?P@JgZG3SgFwmE(_>cvz#r0mh%p&b__R5qFKeOFon*tSOA?7#n}R2Oht>{#1p~i zmZouWaTzF+yEk+^63p%j6=d;jxAkM=mMuy;cAnPje$3yTc7>VN;nQsAG8cw2hq=^&Bz4V)j+sf` znxEvf_aSHf8GwVQHpEBHc9iRU5#vs$ldRf+qDD^n%|R_&K|%Sb1oqf2zW$HZwaE>~du;BgYeunD9)+Z(`KkJNh z$^&kY@2SssewO^0beH-h?CJC1rR3$IrYD8$@(S-B86^XlEENZmi^KzIrBMdp%qENb ziq`z8GXTJO>C`yw))+7Et-woXQ_@Igv(XvxxgQZ{%PT{bXAtFBxq~|XoClBC$QBTl za)eZ5t1h0s1n4@~&tQr|&Lu*2bnAV-gREaW6PX>`yGim#eAUtY2O(Zz7XgS^KnAZ$ zU`5RPCmpXseGYChzw?6*v#!w)2ki$q~2d+t(XXv_{G&itH5pW>EkbA z7S8umOu8OR&o7<5UuCD2;gDc1Xn99`T!_nft8jY@c61NvGAF=Q_`)z5!c}DO_M`dC zrCF<##IlMKUNJ)_iC$QuT&S!g!_^rUPv|6Knz z2n`$stOizGXySB9eBr~m?;xj#8O2<8ra+Ut#Ox?l0BO-?l*pkf5(7|Ja=8tAY3Q zZ>cS5?Ha)$(di>p7)9B6B@0b}s1qJcbK%iVfv0(N6%9uP0ax{S`y1znC8m3A-X>W= zJ=D9Q^hl@e30%zMOjBNOq?+!$Mn(IPl?AmKpb}7XVL5aoD1y zCpgjwEuF9I;JC(;#kJP^kQf5DhO^+HEgn!Ff9X~5_<_0CZn+qv3w?hNLC!FCPV{R> zZ#OT!M-pgv;Zmg5_~2pLl?uZwKX#;?hlzKUcrAS+C2sQ<6iNsoV^LRerWls^Fhd}mwec*lJ;~{uL>r-j-1v51BD(j zw#kzp#8scQr--{`Ri1gBs|5MYU4}MqpALyTk8B@zYVl~GNAuTLH)~)OstAEKp9!v} z?<-S0iN4`S_1ew>5PtY0=){SIl;DcAYxtH#nCzfgq@}9cbB?lstSZCZ?2l@ocyGc- zg~ZiXZEtyRwh0sD{JAFP=Ue{iqft+Pi;2LY9A;sGYSRWyx3C-|W69g1e5v1!cb3_5 z%AqSy<$&i7c2kP&;`_C}&GRaqcd>IZN}bYqCOK>ak7uPSS)R>@t-{eph>0t$vR3vp z)I+eg^=Mg?1t6CCHES<(X~?Q4qA#KwwvAmR<~LlK1d}^>UK%@-T||)z4{=-N@Tss4 z7MLHodwGC;>(PSPvm>sjjvIEGlqP{htucelJbmz#LlSd%poB#|# z#mNcs>#6B%^LLu6oX(IFm*VDD{}I*a{}9#JenfQ}bZp$ze?pnXAXm_INb(Xu$N3NQ z0FOWO4q$52r7Z*u=gsN)Vk3J29}b?sVC;+`neCGW&h)uUFsz05+7^uW*gP$8xn+>4 zPOjYTxI8~)Za=T{b$LN9mc&JPN{na_Z=Bbn-hPy#{V5U8)WXu(FhF9Zrfq*Q)t?~1 zv3P%<%Vv)`Uzby-_gl=8$Z&M*-4qgS4JL0jf&#)s;HW~W_@uPXM70AJ0N++FR4UztYYlrB2N4>Hu_4T1#rg^=8VXwEEIs z@q!$9{dvKlO!e8<_%BA*3}*94FaU4^d9v3TW#P~6vD5tB`2lOGbI&?x zSTE{|y^N8&4EjJ8t4fP0@wbcz5{M7D3_;xprd8yiZWIt0ME+=HJNnVeX8U`zvXN?g zbSa3LK`W-3)NJu=M57_^Z?j#Fjkw$;}JL?aAALjS$l)0(bUyn={UAmTZ$C zmOp)1I~r&y9T6;R_P*mxKPiy>24p=D*wy5({Cl>P1O%`!#}U|ob2DF;e#k>;$h;Pv zrKds6*1Y|RrH`Y63I}pdWd$iGmt8N6qXIIRkZ`+_=yU;MMgyBYl9ixrAX^{%Wk`(N z_I*hlnKTuWVo3wv(J|YqA`t(hV);v@IyYNHP2lAiGZI+ z_2~flcf?e<-I;qmXmV7I&|)G=i$3u?$Ulz^f_eo+4%3m`cIYfl(-e*=06?8zFZ~*c ze_l*bZS>>i$>Z^mAHD)j2F-wds2-Iqs+S6<)rjq}MSX(#jf`P`GyRqAle{%KeGBu6mfc>(VwTg;@Fl-u8`9HuS?AK0da)5XUPwB(+WQUQNRJ@Y&V=BoAqjSaD8 zh*`XM~8xqH5<-xiZc472V?_SOM$QM~h z?fJY8AMna)ueMJljoW@Q#QUX)g_<+uBb7q=>B@c8$gx$N&1rFVMSXA~k<{B$%xAfNQ1*eQMuh@jvN3@CG z-fb>kkaZFf=IJE)!ROMefL^^uJ%1Y2+^H|wIB?s6+!wlvaybrcT&qx~POBFOs`K2V zV~tz^8nk1Lt+x9Th3bZ1MJ8|75s=-7%eOJH?Xx%$XbYVL&6vZNES#=-&516Cw{>O? z@f}+n-&V(Bm6yAw!j&y2!Cf~P&J`!|n2PKweWfufNj1{f?1w(x-vV}#OU~myO-Jw` zi1Q?ra#iY%mQ4mAe>t$3U2W@NB_if*b8Sh5GBWQAh<+L5mK&a!&iiU#Ymej4C_;{H z$}j&L6d{I^3kN|zv_$7JX3Gc&dXlhQ%jWU%-Z0-Mu3{~dCbv0KGY^;5JvxrvGdJS> zc=MIh4HmyI8l!$5@4_U+$eZ$c>DZ zgiEz&g_i`p4z>c0hoKSJSU}Y{8(g*=QKU`~?k$2T?expNt-~eeqv7Qr zkUcoajw5ha!<;qP#-Esf{tb`!#{d%z~_5oX3jv^8pMlB98tRE#0HJx#Uu&e>n_XP!a_F4 z&U=&D)l^~$rD?6{Vo&5>_TPz{-MQF!*DfNZq7Y4zINByk3_Zm3X48T&y_38oZRmV1 zF>(~Jun_5^0<_&V5Qk@Hlz^t9o*zv`3*X>;evEh2R`5;r%`g85kdvQBVMJrGvAZRW zyFfM56avQpVoREKD>zD0mo7ncN7XK&-r?4^{z)&TbpU)a(qbsFWQ=b<1Y=4*ri72D zeg~zvkyhb5>Os{O)TE(?ebNo>dC|rJ5x5{p=0&|m4=156_W(FQOcZl@$3V>YTL--F zsC`wvgfK1l!WdCHWLy3f#dIx?dQUvhx#qSyVG!wIur@$?dWlc$S&o`JTaMf7X*a7d zRcIN4;{Pb+{w!0o$yicdAHBCD^NFWjFP3ZE{c%Fc;^#r_qA(wpRRM1FOknkjGI-E! zdVv9}braQbFmc?W=JMml$^+@O8^FgNe^7#oWq<`_j@B!^d#HBlnOt&wPr3U7OuoS2 z$#}GErSL$x66dvNllG~q$q1UpOB0jXns7>xfzzj^0||p4dRNRdW@c*~+YWFChHh3D zj5e2>bDkoP!=&{~eE{b0Vr%*)K?ZXdraG-UFh|d1Rp)BFd3MUTx2CV1&!w$YmFMlr zrh+Jt5-3l|{f{-P?9GKFo^$$^eAa-DlE5>)v0LHgb@~Ohs6fITeE`FVCzi3P3}!Go`3Pi!v+}i)howCGOju8iE%XRM3lZLue@or zkV^~7UR9=`tNtgI0C z&7@_)(*clk@CQs0{vyO@aEDK>bBNZu&`R`3J}lKgN0jws#L>h<{pIXO@8tv_9*2)! zeg=VyI@V8zsnNfe1PDUjeaHI#Ga!Eb_~X?0^N}D!0|Hd*Si}{MNJN=uK*@x*v(*ha zi&ClZT-hV8O=T<3ZWlA}Ll^~%7zjAs*kHkntK#K+PenTUf4SW@BWhZ*KYEg*lIslb zMZeSZp57h3U&vg@_NM=s9{%@){PS=9(=7d)FOuy#117<=zGB2oMQf&588HjvKACqn zLP>s*F58A^w)32~DX5IeuE(N4%z(PuDufbghX{dy%58Fdub`(qcDn<`_Z5PK8|vF3 zpHe^ziW_=)OsZ^TtI~gQ4PId_knzMhiB_%cO5!sBM~6y%lASzVc1pTU8gVKMrmh%ush- zdN9cY%DSK-&zGR>+Nq-T1WC44eY_I2y63=!Ew(gMDo}8#xIbRaDJyN}!i) zSIXIlWU8hY9cxifWavB+J_5@Hr{>2PT3WyXLijpR?KNYng6gF(oK2%7I!y8_VrJIE z@Y3l?uL0F>=c`@h+Bz4$87s036r6BLk@f^cYKNac8;Sa+%vu>>{hz>k3O&eDf=xE;AkXjyH)8do^zfv zc(2D}*x<(VO_$>d40FfrZMk5k0m}DnA5$qBH)ZG7wT^u);&D$Z5Jkws>VVvimNHK` z+HN}f4vJK~EYmJqx$4S4oR<}y*x6KjnY%7yl z6QP~R(^*|sZ123!3w!f7OXG(5GS~fPoEqy znc$=^yppHY!Q46a#!LR@)*He`P8^0Pj9%4sM3O#<;@n>j*m%__hfh% zCvMWa9M_5 zFVBC=CUh=-ohNb zWc1D3;qT?MzB<(v#{~3=&%HsES$zV4jZtL9Y+Jl|4(mh7X?x_OJ89~MGq+NnvBZjV zbljr%3XbwJJqgYm`g;T6H^j;x-ojS#N#9M0=R3&Uhj(&#aMQL{c0`YR>kM^Br7F+P zj_k#P%R+%Oskc4YaynltbK(?$_!6xc^7Sh=BitXf0m$L`ceu+f>f{CN2I7rGg4|WC zZ12|9w;D95Syy%0G9Ssfa71E#(#7bn4*0p%`PDrKarXQ*|q=94D zpBHz+Jm#R?Jg02pCf2ozpkWio=4EwrjIEkn+59+tQN@_k1h|SW)Sc%|C81Xe923n| z>*Bvl07`s-W<=43Q^=pxF7b64-uxCcur^*U#-|~`vEhTh@BZ<+YePv?F#e?6(g8lC-2)#q)-)e|^6f}UhF#!sY)Fs%_Z_2#< zT&V~0lJi$`oGMnuQ&;wGYuq_fe0{}Kctf=0DIR<1v7VcLa8caA$8%izQP`CGk@x7r zt57=j6eFx@?PDg(5$p!mvBwhZ9@?Vcs-7P8A|+*gvOmd6b0=4CLtjuRy&^dAL4;|I zt!Dv+Y_fsoRjMWZH{??S(~hf8@61AsW-ODwIw{~wZ-4ZbO?w?wH!bsE+h@&7p1@)L zt<NsS?M`=-A=vVd_=cmhS`?HPNm>FgfJxeBXZDiilNSRxcxVs{f2Hs zq}WJN!^h#`d}oh|r9(x4R3ZsBZYpstti+Csca`|3ieW|f5|@6{aQ}1X{NEC-1Wx+? z7UTc7E@1zS8zJ8rCDES4i>yPa>#aEf59X_OgHG^r!A~PurMwoJL!Ln?r3=|lmE~el zam(>%*vQwxW6RjwK7i(r3}c!7ipBb0?2kV=x;G@>^0H~_#wXwWi#JK@j}3Il?lSrR zp)u@Va2NmMRvJdXyk9*^7_QG;_cYX1ROcwd?ZO-YeMM%N*ZMUWlR($A z!M?ga3U{omCax||TGcsC?M!m$*KgW1$X+v7x7(#Du9g2XaHqt`YwS&B@~K_*Y53^% zC2~9yX&g$kZ(E0R^evmRoP4?5fUb&YXfl=~U%ELcQ&*{3c=xy!)n)d{{aSWTB(4PK z+D-b}+7C41D8S>z4mL2rvcrL)d@~_X2VYPqxA6T>0c|`9PUdUK9k#VIfG=zE$UKc6*WVZudCVC@09gaK@*$&tVokSaFTN_(i zi%AQMmeN*=o0tW(ND?_lnxRdHcnkeD+Cm(pTrsu5@9dx7qA#CN?S z?$i;~Z(nX7Ns%tpTHM5mo*pQ8`%IO#;39|%bh}3Lg-D}e4%^#u4N;*wA+NzV1sa`? zH}~%iC;ZkzWEa&XL&A&(gqAX zgiKs3Mq%EWz~;Fy@*4ddrO3(AKs#vXAV$&I#paSfDV`5ga2Zoo4CsuMt9uM0-o&w^ zuQvk6Sk2LLsH(t6SaY*j$+a~VMLhl;d3W`TnBp;k&P=1bB5Y(U(m&B7_*-7qz!}vI z9?TDJaG1EDn;*)`y8BO$a=_vP8@&y0EXtU$y23*baqK07U-1UouSTaNmem3?(2o@g zNN(i}R6)ccf42blW4|ej{&UL_rf#Ni#7+`Xkkq!#0ACeO^E*yQoPmrQ{t$W_JzAIf z39j*2z~$L+fM<9Lqt$qX2q1XAy;`u6T7QQ3wL_Txu-VNe{+smgFB%7&3=$#V5o{-s z;Q;m^MGtPn+({5o9>;`P2PnCl zOmh}+J{+C?OztC3ENh&&8$RlO^ZA&t{;U?9bLj-AaylR5mWrZ?WkDBw@^pVM&VJJ+^7qqyMcZ1 zr$S$71YF33mR`LK@KOD#o`_fP=BbQ3uw0-^ED5!K<3SA){jQLn_PCa&)BQ0BM=L#3 zFVJI+PLY&%X9_Gxz|j8hIaB1pYb1%j}Im=bGz1j&34ziO|wdG4uY) zMKE>E(yIdk7(kIrnNOSTU}whSYKvJf-FF!2e2Q z{CDJgzV;SGXag;-X0Av5GlW@MM8=8JDE5Az=4BHSx1e?B&D^9XK}?z;LM$!`a4;z$ zoX}m6sftyk$?GkEDHyzLb?`fV!YR*U`AKPm>Sr?*icqeva_vFfM&x_J*eAqyBtFsr ziCN$@+G%#YLP6DdxZEP__*&ET^;jgpXpN(nRk&yi5~%q(2IHSvjs6|^iY#IQ$_q0@ zjfP4P>=yhyM%DY>I-*8zdrE{{9U~il`G(S%@?{)IC{P8Q^c}?GuRyQ`wmhM}y5+dD zXf4(HGPJkux191Qn-ixm_FyyC;3g zfpSH050}}tEx0KQACsQN^XFOlcurlfvzdY=PCW;(q4zyg3mvP% z?hX@J=FJ3pn@8nFc~0n_?L>|r-1<1K(}F2yd29;Eeb26_J_Wid%Tq0iZWXkro?Ywl zJ#6|r9>~9NW>T?Z5c4t5(RQfVaAhIK;s%{q`+@5;@^YLEJI&MwUbH2Z8Y>JAny(9G zD2MXeUyb3yNHpirqsAD|Rwy^Y2XpL;qPKC~D*8?ba=n7ClJ(ZycaWEj0T+C*X>?F1 zIebrd_{!a%<=&5oRh7`FM{AdTw`ejUSae8=a)CgH+O`9@P-@Seg6a?6X zLWGg_h;hS(vm56YaQ^bX)FTxN2%4*x>?^2Kx?kTiv3~8iNd73Uik2@r`t#kQDi^A8 z=tuR^ml6p-Rvy{d(v=t)h_XCF*J)JH)d3**3NEV@bbR0#MF8g za4>;9_kmZ?mx707d2-$_=KK!1-1HMGs5&#=&?gI#53T%sa3Ic*L<`5Cn<5yZ??Ctz znR2B`XV4*qs`-;QYJ8^8XT9I3``Tw`?n5lDdP+3=5LHHCyfSnMl^nyLuHXIn?qB2>Z(W@Q zomojq@x98*%IJ5g4sTAYMM`#e!$2U7vjLRSC2s*J8t`{Jn1@b+=f6gv$>);Zr-lYI z=AA4(7f&qo2`X}H$DNG66{4~7db&2C;TsdfhE1>7_@pM{blntDjhN<74TEF7aO#-^ zi9YTJvMgS;A#-$z62b1L3`Qo3sNFU-n+3nsem>EMuOu5Ar>Wbdiy zcB1d=4qQl(<+8O^WCrk?Mj(C3z#S2|Gmmp~)}(yRiAnd8X^Ix{PVJ#5U$#rL=1+fH z-Hy9T@t8S*TJYdLp)`Pgd)$>oYgVwrS?ViJ%zs-GX`#W83X9(y7Z;n>nH487%dt{7 z#;eV#%w%u5kR8coJM!p;OLtj1UmMzV=U2uGr%r5**d}vlDRJ{_&@<&plRf^OL>2x@ z?fb7uOTSNQJ*2||eds}9z7QN&l|MZW=A>x3@QmR^Tf*ZHgW%kw8+Dsx7|I{^z1$=t)H>zr5O!Y0lz|2 zrVvgfws;7fuECAsFOF$dce~Sl2Qiw~M9z=Q0nIBA-N~pZ!i?UjB;@cSb7bmDiz~eB zfWU^-Lm!M2sPH%Kt763x{o)B%ooMj4Y;^|2H$Tu_T=|NSJBjx5XN>vQ>EcCpc{T zQ(mTCPN%a^^?8*b5@(w>c0TXmY|ceR!}+kyO#EfS`HGq=$QBC*3;jTyX{j4e{GM(> zf)DJvqLJ%adBu@1%?eqlSGI$uB~vg-#J?X=F1I;8Z`#1(dm3%IpB^IpdRF7Zi#naa z>QvI==8bSZ{@h1*;}bU6MvKB4o5Je%?TpTeI1b_QSW#kxq6}W#UZQy2)xr_VEMrgM z9h$?F$dG*_$f>9bbnQOFR~5UHE+AS32HiaSKVAJ#GHI1vi1|os#4}|1^!wVLA;D&U z>6_R|Tu>Xc`5pT6)2g4VRY0HwK%MTcBsJCYGoIW{fbBvtj)`1N&8dUJuI?h<;(wpUl>_O+}eN1e9(c>jojquUV{MucQzJ{V&9Uju`XLzfXWXah9NQ zxiUwud|Xo`gDLYTtTle0Z1-C~MG?OMZbybd$lRDVzk?zg#3}|kRBap|>yxo_lW5P_!+ni9wHOXtAXr(Fw_2@<$3HcEA@29wZISo)f%EfRc3)>BK+@ z^tt(}p_^0`%wP#eLWC$fLcYA}R>*w_iT@Mc4c@Og07n;o0!-4}Y6QnF1pk*;pQ?W_ zqarByU{?Sg>!fTzNcT2CL~dJIjJ^Aj3FmRJ2{gH=ifRCjA;kwk`(ghVfyVn_^ycVYzGj28bS;-9tDjk zAyA$KmID+vR+=ZQ6&Yck8$Ru_nY$`G-!z5TDt~XK4t3w@-M3Mm#yOx@tN&$w0NP8UYEhbh{=wZu8L*0P6|X3w5Y zakce(Tz38Zk>tgbyB&LgK;^RMrZ7-1r&m-bEVO~k#7nTeNoQl1f!tL0%Hg2td*?gP1HcM&j}BYhW4LpA=O*RXNgpwct9lJOe{Ng-+T{iF#Sk zZG9E6KY@u@Orr3GhjRI{EQp*Oxqqi2EPsB2$SA22^+qe^+Zh*yR1o&zzo%*LCx9s! zug~kY&W6INrEN+GTA1P#6kd`hYPu)lfnZ*waCbr#UgW)B9{HSZG!ktz%Zf`S;Lc+2 zp`AK=JJk3d~VEO8qj@)plIOqr9)5PEKr`$E|hCYmG=rc z*3PfCV|t`)mmce0(2)uZYz28r|3r}5&vHcZx1hhD^*Z@-Ti_v3Mt=#IA3+Qw(Kulj z?l$_xwB*!O|0OO7wqT|c3%K~CPb=iRz;42mzweLlprsj3aiP zn5xVyggwe_q4V=G3KPUjHRBrId|1mF3b#@2J)H`|&9;Nl2F?@U1*>8b6)}~>)0^Gp zo)od^WMMpg= zk4V8630{h132yH>bA^qK>|p!z_PE_#*aqg_DV*b6oul8?#QR6Bvqx%~^4ql!xb=MA zQtD#u$wz)VSm`H7|Nn}3`6u4yIY%Dx*m-{01+~$=*Gjs&wYuUyS{)qrw$QCw`SZI-1&QtFui$Hed%OC7=Q4kz|g8P#rVq2Luo8!729`ANW;nrH*VyT zh_s-i6OHf+=Z#qmr6GyMIa+`t-OPXKQ_s%$l-vL}763tF`l#}+KA{{s=jy2Fs(NxN zL9H<jU*;7M z8->FB$iCBhiwl_-iRJ;yA-L;6(fS;xjY_E+B1yp7jW9_A%)Pne!MS6J)28`S@&`-; zej?+61}KuZ!O`kW5M0*=$UDaX*!q7KB=_gVG?uS93;v(%!D!`-&dP5qWkl2!m z4OM2d7C z`2v#ExP8FV@W445v8S!b@*U(0XwhTV08zuOSzgFWDol`cSO=7PLx8K{bOT^nD2-Mp z?s|R)wdVm}A6ECHHRP?sTrB<@fB?t-a5iM}z>NdE?!iXzLC@@WkN{wiD6oksBSqu} zLAGBbNlJjTq0;1n9r1)N1$UDk(xKEG}-aPRRAsny&jPYf&4YEAY_3m{G-9hx#+lHzV9GrWqAd7 zua+gRI^Ci6g)pGZ(xYm?fk`yMcwrc%E5!g^Tr!`(&}ioV$>f^UnoZSCmUGrQHnP3j zfh10XJkIXE+jdwwMW63wa*^AXQo4~-Ur2{4bO;ZjJ}TW`+mBy6u>TJ*ge}P=TFUa+4Jb#VnPu14_>v8;g9KVXaU&Y?9V((Y6_p3PhIafof@8kaq*$25e diff --git a/assets/architecture.png b/assets/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..1bcd1283794cd9d3118e4cda0bbbee0dff71ea14 GIT binary patch literal 952555 zcmeFabySq=`z}5LiXtu24N6E$OG+b+fFKP+cXx<%2`VTO3L+pSAkrlz($XM}j^xlC zzh{WOxBIg{`~1FZo%6>zi+3%@nR(y$iR-!V>%Q*$nHfTr6r?cFh|wSr2!@QbxC#V9 z3ZCQKynG3~^Ma@~2m&GM^|=4QNyWgG($2xw#M}x>>Evz)rG&bfn?N9L!?_8TE;HP* zfk%$li%=++bus-Q9WTk+R$&!#S=wr!I!isvjc=vnJw?IhSwfz1JcQN2*Yx(Y)mNY1 zn|JOd-j0_d=iYe}+*z~Z6HQe!UNaWHH$SR3=9|;h8Qt_SbYOi)EI(yn``~aZv|4w1 zCS$cHboX$#es=?l##cA@$c1{Zk~UIWcyA!IX&^M#`w+WEflasv&3S!u4X=NvG8sO* z)p-$%Xnw~uK(azh>l&^aSJ+?m%bX&)>AO3`e1oQah${wuj z#>1OeNA_kt#ozZSYUGUz59Pyy$wydw7JT=8^S5BtPfhS|Ygnk(+}sds+@izOW|H#| zR^PGM*V`_u(hJBx?7Bp<;5KNmQal|Z>22HAJsrW5yye{7j$K4%S(#vc5AG?*Me<42 zjoz(aqq6^-bTWVAIIDs~A%#&W+_~AajO-4<=9d_JzLyh5(bWfZ)rXtnTkppvGHcSK zZM{>oDhPuZGvK5WqEiP=%I*Z)d-Geq?=4dw{m}W@fnxd~MKuSv=EDur^pdVn%XpR7ZmQ)K zU2K?ALM4o*tddT1=VC&yEcz86ttEZ4xcd$a>2c~FM9}{Gj?BgV>W!&I>2VLui;Me5 z%!)FSJ!gwc#!`fKNx5;E9!io~ud_ELYCAdnY{{TQZ7Z`%f3BhFT--PCz!5nm$#uxN zWHd!^(`ju6J4(ZL%zIm41(N<$k#FN;PbcphQt>OB>L2|hHZ{{76l&p}89BbYyD+uz zakZin75)zgnZy*}5 zaV=EcW?#8Nm~rdgXma9_o^uiWt03d3Z>mGI>=w`SJ;%|*R121&VJbn}z}_IO-X~_l zm*zWJ!4ZJG2=$4%m@pJrL{y^BM)`=;+CayI*8 z84P&AWUg~W2Y=P+P({AyI?QrkKV87Wa2Ab}LQ=*u-Tw16&0J-dA{TlV%dx_YPm1H8 ztU9JmvGfAbXmhP?{W5Y$lpiP9qMDCE`2BK%16~Hh<@@q6^6^EQ2D6IEmbAt$`K?qi z#9yR9&*^N`&njrYNNfPlfVJs~ZcI%K(a=pKimcCkAS2AXbFX5{tSkTFZ^@?xpvFD_qvA$FQ(Iz}I!Tre{zdP!0@!G0xsx-8sY>- z5RA|HmYJ?69>h4((?l-Sv4*+3U5FXuf?BOK&v@Gv(k+G2n?q!WSxjbDa62$6MM!2% z<|P+#eAX6s6?s5Gp{cmFaafJQ&<_cG?#Rz04J z*UFolk(rcHYK? zXS%k7*X@*&Dv??fy12$$eg+9p-p}Om_3J1@ZmX1?94MnMj&wW;lL(`ZxFavTg4O;cp}ZqxAbptOak>Uh#y}E>c$e zH}P*}rcV>#DFs{}(|FxH67bCPE4I|5WOLV2sQz_7r?y_=P z=gt}O6=KVXWr39QB%#w9Tlz2J4LYnYX7cHjs<516K?_*o3=tw{Xl=B;eCd3H9sNdQ4Sd zbII22^|oTA)EMOiXSdtUKGX@L>b<*X*Oe~Vzp1b{6Y!PTL_G?Z_ql&^%&%!l#+SpX znQLWeTlOtQ`n^iC{DyZ+X?>X$`vb6y2sx%?;v4EvS%2CWQzrFTM9q2%ZHp)HsY2dx zs^Adt?~I0|z5da5W0&X>#gy{xD&~SQJ1lFJ87gtyuOBBjOpm_KFPKjKyvbRsZN4k| z=zDBC(t0h%I>f@jz3=jKdSn*!SnZ$`oG#V+ScP(QiWgTer8x1urlaQiU>UH*=l(b@ zh!NkvG5u;$y*O`Ns5>(Ar_f050L zGHLESHd}18MTIM_DYP-r07+ItNNJ&8mj z5TdX8r6jqsS;U&{-4+!QIty~KwG7Fw*gjHcSAg1M(&<3w*`a_O{D=a~a8Gum7ZEpJ z+2L93zp1Ic_2~f>!6y-iI+aE(I@GTLPgKmQe|c##HL&MB6s4Je?EB!F#n)Ty3|OH$ zt%6_3S7syXE1VdOD)7kPJa6Osf<*2%H^#~ZP002-qH*9 z{!VREvsB4JPKJJDV?5$m0b@89&B)%Q+}V7rV;0B-;8`T^@i&HPaXT` zrCLoBUMw`<^y8LuTShg+i=$=P0m!;-n?A%#NSxP zcs%VMOeG^Ma3f2ZRe~f-FTWFRyI+xiRCevUq0+a{s=9L`-K{qcNSb=82s<&3 zJ14xQD-AYi^o%LYjaxlR{r)3C!C+*o5!c zRfJn(xFXnf?cXaFc(RKvpjcY2Qj73h`!%ze>IY-m9DZn{_%Qjcbg9RaY?FF?{9)V@ zHJP?5{KD%P7|fGitiGcfnyJ_t1J2Zs-(8#)EzJB<36Bf16ie6L^=X-BSF4SBm5=Ls zDLGVYHI2meDFx%ru2ZMIcXEs3y|~rcDWSxLuco{tIQPL6hE5m_hqXr^MphTa-=; z6+z$Py;I7)T!fItE+t+T!bC=bazyZcT#UK>j-ifjaKwAf_<@{kP~85-Z1_D&fkU(4 zOL_+H@h=m5ShF!Y3>N&fMq&tp{t_A>MY;pIkvc`AG_8r&F2G$CRB_>3o;;=8$h*X4 zdLuuX0NhtJCQ7-u*UBPxChlVD8b%LctJI_z2pl!@UJ7n^VpR@#TSjN?M`VEM&o(-{ zs%GGQ{~DiRAR8N@WyB&CdDpmnLX^N$9u~+Ym=JtopkV)_T$6N^^}A}EkCUa-Ze5)@ z7*J9ZxnD>woex^mt57%uspdV>g*B;DHTo~n3f;8`$u5^%bQRy9M{dJ;Teit87y5}A z-gHs7ypJ=@g=;1I#o_)Z+%h<0x?A`QR;1$AW$bS$kJbDe-?hzSk@xE=jQk>8mSec} zwwUoXR^d}EreRYxF2Nkm_-2a0wo0s}M4z-~2xoZEJY~>yq4vYPoFj!47ZZ$|S&yZ8 zBH!qIyDD~5^xgbi5(<-ZhR7v8$n%hC-!0ycxka=^4L2b5t7z@$=&|!I=B%qW4E}c7 z>`q2roBSUKJ{vNZH+&kJ_tDUMcPDT#1|{XGK*;Jm-g7_@LswoZy?4wLjO%HBu$yqbDlfe=fXXL5th)GtsRGBjm*d35;GtDDna zADcgMaXN`GK>S~uIkC83?___!P5+XCL2Z%o3NMqPwqN-D&@5u~0nDqx+&eGB7Osr- zk;w&-7T>v=#NZ~3=QVwII2CK}F`L<}bPJQ^k<_dAgmlEJ1fNmnBnO%}W3PPSQ6g-& zN-M>$klT775&i2r#oe6{g09;(A@pP0q)PRM9I}#%0_5JtnPH8stjX`T_DHoTOI=vF z=Y7aN_+VVTjm*IxL{>>$tQ-WfCFIK_V$6HIsyR*r^O=)ycisCq$Irw(pZE>7%*YRR254T^b@TDM!J@;BA2ZGoqlz91 z1c^VCS3NO$iuG3k>ima<{i{w+l+o_b3)iH^C&;gSa#(zq0b=KU<~jb?W8a(#F5DT-3N9uW41Rr=M}Yt^T4 zp)!7cghYO*w1jW)5HXY9UfDZT{<}Q zGP-CJav(=*!AFR`@av6$a6q;Ele+im_O zgxhoUN;Bzzp~y=J5_`h^oAXEmAH?52idBDo6l$HRc57AanVRVA6CZSgv|`JSXv@|Z zv5D~+g=$$3{2vym+6PgFxDnYl_4>rpUYx{K23S`1^=Yt=m&xfiPZs&h(A1`fw z;yq^MFEJ{~d*!I~uCF&3htVNNqORN_j%-3~X=F=bn%}aLtJElqJ=AE zkObYA4n}H(mYHfE5w1S(ze$9X({ESYV{-$EqU`zfeIbGNJL^!cyTPy5-4})fA^?HE{KN5h+0v+xD>By?`csVk$P4!@f9Oq4$p! zyjazmaG4r{Oi*@{CIVJ*;ka~@j5O$^SS`+6?r(?1v$|iIUf*unn|OVb3frVz`62Hw z0T+*3TvJ5+CTMBo=^5ojff_93WwrMAN@kwO2oi5T@#J3%E64FOm=qqZp$SGS9^QMM zWq&KYG@w}Nd*{oS*ywLDNwJvi_1z0&j3c-|e^3=2?GJ{@Z(s}(sf>382yS1^S*eOM z(-V9(aoaS4q#bqFlPY|j$Mw1u%GAB7hZeELxIRRsIEFN1SF?hbIy*fHYwWkyciDEg%jmwO49Gs)}Pd83#;nmNCoMg!T0=`@$s=mJxF=xH=%?=a=v>D90t zq{F|Hn1f2*ASaC?v&9hBdBep_7mKtn?Nz78vw(Cz)w%cY;P#aH(^YsP9bbYrJK8^X zS`M;9uDn%#NZn=gm5c06j%;UkB{wFOMfQ8}q?_MavLrmRpbd4~Py3-nW{hUAB|m1b1EzNrqx|rn+LO|iY-YZTeG*eEs=)#^ z<76o4!fKuradpkISy>nfev^>zJ;ssjd$6iIDlJ)Gn7ri&7Tu+}B$O@MaKr;Rb6s$0 zU1*->;ZQxe;Ad2^>FMZd*uE}}t||I1oHr&5ifv{hNe~>7K-I(YNDz zZFP7gc^E9o36U8-d*1GNym?L8`PJ^IZXi-Wk`QNbq2%aHTR2XoL$`l__7{r9JkLJD zn9)9NLf&uP-==TE_gAK6Twc`$UWPJuiPUX#e&qc8gzaN=I-L&YbhnKxF;`5YV8rsT zri;bWMRf{}`#W#LnhYU}*SwML3=Uiiy)en#6}Qel6tGJoWhmZ3m9N3O_4emv z5!N3bN|-$84Dgamh$0dU3@# z+Ao!c4lFY-C>B20=%i#Wf zcCI*yip;N5&X)Dr*l3zDnc86^>>GXdqj*AX7_yDu2u8EnM>U75oGZt!kws_(ULC!V zN+FV|XN|PjWxkjukd4Z(;sH0FIQUXjfHfwi%4nU1`f@3aj$(qBACsrE&}n#s_hv0)Jfl;INy#*{R09QfSFxP2vr65^zU=;?ImR=>s|{z?Ic{vU?&cuq}4)${--^ctmLfkDVpWKaw^qi6|^bx z&mwoGpD#l;M6Sas|8|QCYUjSbRTc-S9lB zDr|6`m~OP;U^mP(UbZ&=K`*#>&n{^K5=rQ>pI8}{Y5|Ywpru+sy=Q;Ht{a&>h@ts_ zrIaI7IKM52ctFt`k$vN%FrK)AraE>wNyDhsXNM z?5qs_6qk6KktP2U(n!b$%Ib#dyo~3%J2mq~;m<5x1SNRWZ-}s@)OSmb5RnUYythR5 zPwqz!n3KFtP$GoKb2xWJhVx>O*C7E+3OnE>yhZ^6x$xLrOiW2eOzdob2kh!3ddCP# z*DK)#D)xj?U&px!ALol5I{F$-H-&@P{9m8>w@0P037UzMp-@YAm46XKUa8Rjq&FX$@Oq zk49XX0@q^{`~s9K)Oh!m?-Fqvew=^%K)T!gd$aJb-5U-JGmQQ*Mi;J|J<$=~rr1x; zeV#(N75mx#^>m}XP>_Fz&TD1IM7UA2c_O6FbVAm4=a#en&!_8J_rl39RE^X&I^R>> z^lK&4pJ;g^KlJ?3{kuX~l_}jkQ2)b+uRYseJzOKrz!FI?d0u$|6VpEi37WrGS&{8+ zb2Mh&0Y|k|ad@lcYN-MC;fT$_e%k|iIesHsYZe1zTSF*|o3$O-kApx2gx%~6j4Yu} zl!j1Ka~nZwcts60rMa;nwFZ|wo4lPE)XZGk!vU)5p>W^G!_tV)m|9o}O~8#GG++&N zGN5#`wz6^LcN3&OZkHcCN4(8SO?iBYlcgZ_19>G%FHB_|6f3mdb9o4E@IwGbMm zfP=9KzlylzX%pa;Ahns3lN~=RtE;Oki|cI`TL)8Cc0N8nRyGb+4i08;1+$~Ojgx^J zvyCGSqKT6>#G#Hx4(4`F=C(GJh&BxjZJnJ2sj0zv%CqrV+sVuS(Y%f0DGC4&RyPAX zR(2LPR%>h4zuw{KB;f)YIUUeH-r;y3Y+JIbKpkzJ9gLt7E>IgMn!h$-Z1l(dcFqn~ z$I~%3Vue~kt-+;^pjYb8?SINusi`yDGBZem5lw-{STYi!KT%*khL$YsRN&dy2V(j!1+a$ zWCW=>SlIr2N6E^-$pqXWNG)e>_zo}^elZ8Afs?JneOp^AL2AU1C=qua zn>VGv$)ZS`JAy0R5f*=H^Qut$lUFA$U}b)Mijwl!Z21k0PC9WkaDf^hcLeS`Ib~#K zU}Fje{(WjtXYZT;!(tg3Kn+X`*bSLYjEp&%Id6k!h8(g3ep*OeWi7f3SBb1 zCnXNyfH*|-5C|ni zMqKp1+wj7u=mqfNFr=Z=pxd8KP~cw9{azah1@-#^6%)Juk!DB zK+X;OZ-AWp^4|eDr?0;wa?aR)L*zV2{SA=wxcoOj&ZWrT0XdfNB7Z~VT#Eb+kpG`jq}CK3brEsvrJaY0hNhwaTnvd2|F;?n8}7f=Dr%+w zwG2Y@uN9W_j(=a)bEtpEmiuIf48R$p3;so@IK5pM4hKJahaVx^r>=J0Aa+xR%JmXaQp2 zwUoZ|G7C&GzfVFkRpj?KetSh2`fndG(Wm{_Pp`!M>!(D{JN^x6$T`%%<8f|WzvJO| z?oPi0a!w$><8e+PzvFREAiv{rP9VSI0XZj--vK!%kl*n*Cy?LqI46+b@i-@tzu|FC zAio20P9VSI@&6Wqgj7}3Skp)$^X{7zT$Dq7!}ui=N#^=sR;|j#Pq&sck+U9sxh_uj zVpd8Q7nk5PI}^+x1(xPE(OCjGC~ z6k^t3CXjPV`3F$WDditP`QJn-5?8TqoP8GHA9{hD^Z7ra^1lM#hv)Q{1$@*QZa8$T zY|Ia27B}%mDO*OWd`&q#c&6f{L%@(O&CqDY9sW#Z-P`uXKXp^v;8V6a4appQEa*yZa^2_96{ghVs1dym@r2KU&lkJD_sDc8Nl-OGTss;h6E!UD;WvEtr2Tdv>uT!wmpHMn(pGdZkwt zH~Kb>&Op^;a)0>{w%F@?3oIBJ+_91 z2L2y?i8I_#&sLjB9N*VckzH9{#%8#26{N_dfr*Y@2fJLL%fz);aEz2r7+5&~chOMW z+VwwrF_k%;%R~g(w+o)aY4E-6{hcB)n$s&LzWhj>o^Fm-wiJ9}Da+89p_l>;h<=AL)Nw3||=s_#Zb-76n+&@H2EFwJq09(`{a{;{HLKl7b;C z%tMio0m2}vy!`yMdH(RLr~OI&1Wm@Z{;1O(wR|hV&Pm zFnfD@oVA_rx_{cBLhLD4wJ%%vnk|aMRKCiLf|Uz0uw$b*ZJ9J?!-K4zjdb4`vmP7# z&>%i=1-EN_um|}AQWU8BF*{!5z_Lt=G~YgB;y=9y0--xw>j}AMDpQ-A+?#s`GUG6Z zvWSX`JHcY7*VX#pN^dac;7gbvwc@t-_O_k`CZzY16v^M$1h^FdZQ%;)S(_8T4optJ zxRYU9rQ7VpFSsMEFWvy=+hUA6zcKt7EFa>i^=C|PXpW|co=XTe;H}^VEi7|8bQC;b zGAwwI_`^}UgU=#&><^S7q*bn+;AlFz+b#iIs+F7i%pq>;*e#~!=Dxlla0dCzdO`{ovCelVoFd3mjaKU{Zu{B_hB{lwXQjQHV=Ly|}| zhu%s}4rK!K52#PCBu&`x@O~Ck8xmqq8ofDu0peHxY1GDuKPlOpmThW0=FeY;;k|g~ z;&dhzqfN#f_Vl9^3oaEetWrI%*&nfvq+>l2Wxn&uoys?V=OKWOM z0Ato3S=Wq%rHD){GT0X#-}-BlzcFWXc0cj_20!c01kQzv08=GQ5EwwMVpeezun7sIqr1P2vgMD7-vu_!gm3TYc&%6+c}J$WA9&f# zr!o=KW|XJBFJqP0)Cgzo4K2DH5AUHY1H!0k#z~s=N=r-G{`^{*LeQCG;edV@7p(#A zU-(F*Y0{qoaF5N)@XuCxi>ok;`@teT0Q8_%Errx<&ly4SYj5l3BFeIs*~|&X*8&zMpPSiZG<_-rk4% zd@QW2%xnO2v2hyzbiDd)|7R)!LPErUj-GDXmF8s1yjIh^h~dv9VoVdzr#ArqH6Wat zoSeMjhPWVOx$RDr5#Y1gSq51K5L)d?^$z!sHUtS@sCevfrQ^ZJYod>XD;2nULQYUn zFlolydvCLS3Xr?-eaq`tyl$j0oEa>{&f=_qhY32o&|S@71; z(b3cgV_*$MsUB-=3`7j*{hp2Z+{YGsF_+S9yIv;Qq>05i!>DG8w z_9&y7M6AQ3Yt_wHxAqQN;Tzr*-s8<)LeWE3_#l2D*qH8JzXia&cBrhX3gCe;^?bLQ z=TB@EzE;3sFjJ$GzIoBppFPM`k8$LVKz^nY795OPfTYlGQ*s*xb7_2Z$IAxH_9(5N zcx!8mwV+B>!1&0Mb9rj&bLV4lza6!DcAt#T%`$*YPlh5sA;ibu=tz5Jb(Nz$ujq-L zBmfY-ULQLjwA|b*6>!oec$|2ceU7_JT217 zMfVRV)Mk+|fItp`;?b-23QcsJNmM-aX||65230gviSjl-FK^%LgiB4WDn`jSV6@}H zYmg4H9Rkm-Ypc>8uq%fHy5p;C4l#I*{Jixm{dLo_Y}d1@ z3=PI*rRjIdA?X7@s^30daT`UgS-Csyd>c7`b#QQ5QrD&6P06!X@=q(nMDDkzHA$~j zG&g#FQQpnXY5Tert;;~YzT-D*qK* zv}tnEfL3PeCa^5T!I;6+`fF>Ccq(>jv?2pUg44r{RMh|E=^Yh3i3rKGFKd;21kUr3 zA^ArV4I1S*w$)@S&xOOceOp70U|TpfI~gH*zPKyC`vOxXHRj?+7{T?3knQ2Ym=(uZ@p2FOnDn_MyJ`zbB*;e7Kv{OiW9i>oLAHrC#T=5F5 zfJ^Le58zN7a!%!zkaU-rCrFlzL_U3bjRzqz;`wg@)VtwZMnWl9uT&b6UNe)d^SB}GhZu&-v=$Bm{ zqu0^`0^+%yyJp-D=F0Wa3RlIyEQIt~$2LHdhuf5lB&LF?ldV+gNqTLkg*+s28(sgo zXzHuJSz+qyg3J`(3g!eBRRFXO5pWfvWRq9DNNp>f4C`KWG2$dPHv;cxK6!WP;#irZ z6b#G|2dDau7sl_)FRj;mia8HW=bFj&w>2A*4HF|ca9^j}zK5?$XngkPLsE{!8WgLq zt&EKKR=0fPjAUx1KK6%oB?#n$s)TYwON$Itcs#yl{|6yvFn?H+788EVJM(AIEcQ>d z$LrrTZBI%@#>I_@&idnOf{zXOljhzYW3gy&ZkTFa9on~Ib9ebC*@D~AfgJyREeYHw$!G}JjTzcID2AScz$>z>?f#;Jns&>Tw} z$|*m+ysU(PjZ<1PZY7=7JL_9Yw<51_?x+pVTq3PZtlR!}LDWS*=2^rNyITt%4Gn3o zdkB(U*}PLZF)=}@ps2`o3MXhKa{Z$G*~r5isez|B-bEVsxHIKh6S2K*I&$?&gxjX8 zH&|SX{kb7iGe4%*)KQyeEAF+|0M~0_-E}TM+{q7ogm(|BVfoZX6L!4!6G0yJ+J8@Q zF5SG2_Ynv`ej%pg*FH&Am3wY2ng7{0@x3#RacRb#9E=~Vz%!`k(baOWzC6_=D+hWK zUK^Gbd!&oPk=rZzG}SxxrYTt$zHwTszo3Ocir-L@SxITevA>i{9fwOL!-(HLGBT(b zjT*9g#||b%Jkj!$j`)oUCKeWnv*7U>hAbbhB=9FiNuJCZJiabCb8qeXovEYjzV}1~ z8y%xyx{eKd6cXRxf0=*lIqtFfa>=JFXp>3F-~_fdm}8`} zJ#g}h(NC;EuVwP>mG90v@dL^swr7@!ZK$jBSGWlA6O4zI9n5lVsh_zL7=szd8P{s` zmQMZQQTs(9JhmF}wJP^JQ}q-Q`y0YSdpQ_SpqhYoUbLH#(?cLO+M0317i^@J@eouZ zKed)P+nv*aLR(F~P{=4iOI`X;)5l zd+F;!CL@lQ;x*&83M)shOOnPl;ciEORJc5y*AJySh4x0Vh`X;#D2oFc?KY!*0eT`C z>V&)+Z{FBDAdGHKF1WPVN)QhIMgl+|$8*)gqF9TaJlfJ5qi2>gmbUvPFjyGzTGA@n zoD)CsY4`(;NM1Xw5!hWgTgo*P?XHKmQUGyDuj5r1*y10nBwLw`rWV?tEwZ#gxWPF5 z>P^5DY3kX}CH*k?`S?O@O0s_$)dQTSS9ote4SkIOBY{5bE=_N5cek>2z~-?7I|F{j zrlX@Ho=n)gb++(e%zrLKCqiEXUIq)N;J+PUE9G^hfunb{_S30%bc-6ncD7z6IPt0= zE;mmDhDCdb1o?oYm?G47`NIXkKgJxPlXmpdd38V6K#0=ubO7TZUg%luoX-a4u(D#$ zEW9^6n}rEnF%@WoHEl8NfF~kg2q(v;|CvR^KOi`SyHvC@Oe`O}1mjddO=M|b*MK(6h3O@4jkSA`XVefL@JtGA{nNP*9faNC1}7r8*#1B z_9I{c--9-w`jXgNM(bS}-MtHVQSj-b=7O6*Wgi^oBWRoTS&^t2!wa8pi(MUKRO(#qu(6pRD|{a@T-!P;+A0HNVoY#No|!lD8O%eW=8x739_v92x)RvGA#ya*-JLz9T+qxZ~zgB zV?b-Gwt!#I%BX~P7Gbv7NrE$#zb5O0Bg>N&GOnd~t;&#+l3M+M^Me8IcrE?opj3`o zIBguxL&r7^^vMvMddd49YRy-$#6pJ>4RtlFIxD0X7=RIQQizR1?2wNKOp&urLS&;$@8Iv@)a=6wNCuEByhC9#y(vcvldE z+B8&5pp^lyB(nRyWu1)(VkdS#?4+){DS5nFK<=A7&KmWgnR+E0v3)3vi0%B_g$Q1q z)|2wu?rB1~^v;|Z=m3x>tOK9xyn^;PTr(I2jcCFdFlT+e7zRM+gIGu9W<78egq_UJ zny-6QksMTXprVYekAwbtM=Xjh8eF>{=EnVn(gu-EiB@F#x&+R#T=8)XyEt~HnP(>~ zOMZCbP#jHZzT%$op#b`f5FM`>1wu(poh;y&2Zl%&kG>X)&_F@@FeV+?e$dr1D(M~tQIGR z{kjE6a==0Kt%XDc>-{4*Pg5ij$hbZ|jX2dSJUA2wts&!w?>6H6Vix-xzlWFOs_luf%^2Q#XmDBVusE&!br(>223}Q{MD7vj2%hC#@_OP z@CeeIKl3cWnQ>5{s7cAm=Zt{P>3uQbR7!OhsVsBW>HYEGBm~@7#c8mU6zBkX0_+e# z>RTo(`?v$4Eq&HAWH6T1>=YUqFycCW>;s<~J}Q5`qi~?gik{S3m+$E|G}nZb?ui;D zhfmrK<}AxWAS`ka;>uw&&eaGzDpXX|n0Hx0pvlb3a6yF8&{gsK&;QZI2f|{#mY}>vPx@&yDUrL2ygclRsSh{dukvM&$mqXA|b56)2LfHFzPIC z$lx|NHxq+#zrK6Sqxy;-TwoYL{y71udU{xez7{!t`g#+j?=~eQ(<`M!7YGsYtI3rH z5eD*oTdy2<7jqTwFc7Oj9(FGF6gR77(yX-z$n%aA9CPmUc_r;dAz*-Lty}S#w z4dnW9ZNN{*?DH0aI5TL#yiX4O@H1*JR*Qqm-Fymsy!}dz8%R&E4*8}IqN#1%pQ|!# zRbO9OVWmb0-ByM$YQR81=t^*2fDl*PA_-ZSX`@}4`3&L*jSMcr5tZ1-?7@6LT$SG@ zm(1wtjydZN@&Xfh1A9#a;=oJDw&`NkhD*bi57Ej>C-qomZb0^2`v^#n>8J9K zRy0$%ME=UvoLy;B(VR$$2#F&}gHabKm!T@b0Xk(gE7m$rAzV_I>NaxKTknAU?(`Tu zlhGo%K!br=5RmHskWcWJvVa^NNBD4~sM^vml^&rw%s4HY94|s@Gx|MhDN=%cYHY{r z13@+@k?`(#O&={Rg3bq@(Z<|;kS6E9($Y0Umu;!6OXqIH#`)sD>&2-if_}4XNX~eNJ=-b3@qcUs4kNy)%<}9uIS0-#efW zOFblT1hNH@8qE9V8NK?sg@guc{&ZE5kcn(rb8tN&K2-hG%dX+Tel;hHa^z*MvbVoP_GRwx&YUQds z@h1UO0q!@|3b-!g@Q9Vf6fik1R8vwJPdN^nqnuR@)Dflth6S7SdsT>uuAQsCvOHN2il@&Pqi3CVy|4fsiWKJ}6x!`?W9v5WK0WDGxbyolR0w(qf~tkk|HyO)AR0ZyHUce)I5L z{S^2SNa?8#i$G=&92^YlBiOm>mKiDOJ+tCK3$3`3g8B}q>^B+~6tUlui3KXsLb%Srk+$zD;lzbr}(R9UkB+f$TeNbF}E23CA$ek zfkA1;Syzn%;eya=pAZ8{*_dg$_Gj*k7?Fszmng75#G%IVsvVR^`{4l;UU#VXlAeg@ z!oiwS98phAjQ1|3<*R3dU4&h1$ZN@Yo-J9bLB6OohqYa zHCy<+W^H}l$Pw1Z)s{!2ghhW_Jd=XK52!IP- zjo<&8(l?jZ30ni{A_ed|5m0#WuLArGLQz%aX89$QYBMrI-XsOvb>gM*8j2)^W-*{G zlJQzcv7j^2Od>@qG<<>GTtnZ+iX+2goU zDERK+2G5Uu-u*)Mp{iz~$eO0H(I`;@O_I6r-5e++N8$}}XLJ#l->adm2NjS3oea(F zg6OsMvO<3EwW*^+!=f7=fuG8-h(J_aFNKk$` z@vr<~_2mP8q>wQkT>1euS;+f6&fqQ0xJ7vplkC1QgwzIUQnrw;-d2_8mLVsxjz?tg zN!VdezXS^TKx=`(?PbDzU?|!aE*(3w4$O>JTz8Bz^)eeH9L62hO zoqRMCPW--_&iuj-)CA$JJoW7OWDIR*=G{W?uAMSS!Y+D}UXAV+X`L4S#FPkQg^Up_gxJZ-k-!uI0>#sV{) zoae8=#tfjAVQ(9_vwqQD%7*7Bon#DJnboWf()ippRC^$FJ14Tzi_{Siqh8jT-Yhzb82=2YR=H ziV&ddv0rQYd0tf;JNr9Zk2(w|hKlTcmUMH~AMSV#_*(&=Tv@7sC*-^PunM`a&g@i` zW*>a@&@E2RS@Np(DJcx!@e=e&s*&>Q1T|xD4Dy^2RnmK_2R)z&Dd$7H(w9F=>jGan zy1pX3&x2EQApb*1>m&ul`I*eT_P)jk#L=fupC%n-Zc_IL5?vSYHvu_YJX+U#koYdF z?JlJ{Zh$=DGa(FQnjnhgAHYD+Sp2NIljeM()*8RBJOTtUuoZWK#bwj#yOVueYaT49 z{*CFIH=nLQb{XA>EyDODh@X;}xUle2I+hmX1|ZMlw)w6TJg~qRv0i@Q;KNiHJd%To zMc&JSGUiD(*v9|ytK;XLB7cR9wCT+6j=+~)%3ZvbfyaVEPpK+0&(Rt9`F=1?vcW^oPgxC*KXIJ$N;R<|e3d;0JvErC=&l|DIIaRlu} z4U;MRg2kb3O8$MQUb&BykCW4kC$Xm!1-#vECr-lMZlQ-=q8o}+7@?zL!AD#d6*I zo_kc}b2#^NVIk++)-RU5PaURf9q6_$f({N2xbDdVt7LWj4{DRmga>je$RFxy8}&^_ z$P=Q*-=OarZXc|8S-enJCkm1u%^qFR(Eudan)~-MN7H$*4{;d%h6Plviv^G$rZ`?9 z;R?1HK9*g|SzWn6A{oP%`UEV&fl47bK46{2?K^7|b;ymHZ!UlmHW-TeWeYJSN*?CU zHBgw0yv!3m82vc{NO@4cmAkNd6EE2I+Y2(BmF<=3d&0u7tFH+pa6p6spD9Q=8plat zc|5ZIs;hyWQ|@z14AiBWKYVqS%izCc4nR`;xXiep6r}wNJpFwax{wQb28+Yu>9Zba$BPAHcj#VrfBn0aiN{RD@x<5IG37A7czI?q3mXFQ?xH zALv6=6Br4X*AVp~@KKYo8XCP6=iJV+ME6CPD6jqPm5#&1@{}V$sItK#{qjj-?{g(` zH9IYfwO01VkH(6eB9 z^t@MRS~99jR;H?-x5AG$cLYUs?uxTk*j$^iU)ihq3Ztml7jlS$0%5?&lgB-~7cW09 zR^7IHuwDVLwzPG>poS?z9^Gtzba;>?M4{%*;}!$c7u4{?%%O_x`{{VPEF#Wc%BLc= zA{P2m((lp(miR|V{#6<$T5Htz(mvy!*^eutA+Mes%N9`D>V!ps4=8}7QCc5^lR09p z(Xw?-;2Wn}>ARmmBz%1Oy7s2P|3}t!2U7iiZzR!DQlyN8N@n(!QXw<4T@8Ehd0nH0 zlnB{cWUuU9Q3#cBadFLVTwMF&8ozU?&-ag?zxw2p>-`$%Jm-0ybKdN5&739uw@%X@ zbT2K$7e_#802thl{g)oDcHm*-U4XN@aZq^j{<@8|H_`*1M1nRr8Y@{0fww_3konl+b73nnvY|~WRw2!8S#&_J#ji3-) z_$Sa#D;dT@;u514RZPK!y{GV#qj`4i?dq?%THVRJBZ**ifs=B5$72wE%@hOTk#0~N z5A}5J=cOq}>EwXlbuyq)foBA?12Fu6K;E$km|BVI4*l0qeJN>a1_o~x_}(Uo2*vf% zlJ1?z{l-jUwq+RL&`+^577Lhk5{PyPA#a33)#`2Fbm6a;{c}=7c$qc_^mpQ3sAp9T zlhr9utf6S~tQssYjzAh-v0r3sTa*QD8ScNT!l^3S;+mVOz%K(2Tf6jL5CqC0Ok}-& zE`6e?L|#X0N2*Wmf%v>D%oOdZ2SDqrkz|ro(D-YRxaKE>cIsSD%2n~J+|i;XMw8DK zuxh5r_N`YB&3K+F%dqR0o&Y?tk~w|9P9(7{CH3NLtDlzD>~gJDnMH|_9BQA@M8)s+ zWSI#0L<1s(xZi@sW$uS>X=1igt{KL5U=z_&Tyu>+zOroBs|HsB@_5D;*Cj5fAEpz% z?KB%O=~?v;^d|f(e5b#_wHKojeKLBuzE%ps_x3^~g}^=dEW!JO@7OW5d_QvAiPnSR z(xP{eWaVGC;gGFs&@e=A(8#3M_Ji^b6p^o<$F-Yr=K1Dfr^7R|4a(fBd|*dD>rBT3 zz+<{U^kj4@&`c%1@>w5OIHe9MiY8l31QEF#twAB8C zfOBsd=Wg!lYF^_+hy6j_-eE{jX@BvRP8Lu9p{C7P!vCu80lTBbq#4!NmldG(p-OF{ zXYsNuIBd>9J=eF~4|a^+4?IYZTSUV8$&=5&`VpU=S1L2bg||szR&#ig9YhBqOFB)T zJb91?h&Nj?HX>xD93o9f!=h3ahPKyXecf4z`4xLmQ|{%xmem1r{jeaH;$Q7yxs-u@ z35-h_sS>!3=;kknXsWx^XO;0MoIgSs>6y);F14uemk(`f+Ud*35AmPD6)OAhTW7{IfgN+%Fb?k9TQ*LPZqbXSjC^X)|n)Hc~EeN)-VNByq? zGVw@bBD+15^N?WcDI_GXi7uOr08SEC{yOYGB4q_yiliQLc2Zp$aoR04^~JKmNOD7) zkw9EWc)Sc|b%UfGh~J9%dbA_#HoAo4H4 zhGtn>01uf+Fim+Va?5vHmhCxlJwBlWN0l9@|13I@?bM2t?6`%WF3w`^x=)I6qG_a1 z-c(hnpEvTid}@asnhn(Fl})J~HE(}gMg6*VnvLALO=nNtfnHo%^|IMlzYjC>elN2z zUkTmGsU5|9ZpW%EQ-B@>ON=P+An>Ji;J?N3Tl(|j8byb=;skI;dy#o*bP7$7{D0z? zS$?ux8202vUWv6R5j^bS?{6;@c&e)_m>tX;H#i~;QRij7d~lyLrxmhnqOA~sAxkBH zs}F8}qH-#A*cTGCQZrtwE2&mOz^j9;s)Q}1wC1rv{xUh@UptGhIpv6-_vWi+{E)m% z5*tG=chEwpsVHqimNzdZH{Kn|nMs;(1sN23lEwM070stB@|eyT!F`u;=`{UjJJV%+m8v$`0{*9y;-g+U4hkcoSK3^3_rR(3u->-i-L> z*yoV~;mgrl-BRSZV~PfHO;p`Os3b$HZ5a|6;KH8?VeOX_d!00Rj_r*>{H*P_SoFJS zbqZZ|?(ye4H+halXbUHr+yj!#h{?hp2-Zu29#E zdpC<;a!9Uls-%Z)z~K|{NuVl)H!9uJb>6PaoNum}$i`U_x7ub>pSC4Bq0?!!NE-1Z zAm-q!m;S$_ag2wiZTzvd;;o(wc=KafbiISZ{HZr*C?<#gFUKZ=#XmXM{Vrb+%tpBjtxGk3!{#SP2D%F0nu02)qUsnyU&>MH_wZ_7D^mK7H6upO1DbU7#T& zF4^fRYgb>TWV(`Aq6FLXQ&O|t-5&eB`<%=iZ)#gh=OOc@vTNciG^%cY=4pBiNX#&{ zyO!Va()l@mMe|sTriritQbwtJqWGUCU51d+LQ7FHyiI8S|X`U zNZ-?d8MI@$N5IqBTBPRek$F&UACNl$uF66Ka0C+&?Wu<(-LPAgR+r+v^L_U~9P%O^ zuR`bdGvR$qB7ZkN{d$kM`KafaO@i%H{#*oH)m-#TT*p~iRAP)p{NF77f$LwE?pX`k zD;munzsG~EZhbQV{&j>)2)<7GgfV~(Ks2X1>GcG=ZZj(99-!eSXX?FK@C~uqn|F3R zJzW)Dv5HxVap^V?5O4y=`(*l^9G%+r$F~=V2^^t`*kS;S{rd(aCIaf(kwY&dq3=Nk z__Z4(4W4KmWst5L(mI7Y>!UnGPnLV(jM@2U5m&?gy$B@oU0dL?q;Jf8C6-{^Np^ft z)mqC^zL9W}Ws|{W&2L#ah_Z-TvgN924cnHu@)SQ5Khdw7yH%b8k^;S;&F1C_yHtEG z>!4ee;jNzeDv$QFrRb&y^?#k2Z z1+w2PAN@ogZpbg9?_l^h^^DFNQC~hEZVFX<&Z?y!-F%g!JNCX|ImLiKCudV0BU^yN3nfRWDf~-drEyVUb8P(Zac-8#tiX<-U5wM;nlUUH8 zBBb3=#6ho%+b3sO%M{oRF80_xHF`Yw-X?1#&l{`BbL@~yv-q*0O1~YjlH0cNNA@nM z<3s6g4#`sbYt*DD5DsuZ+|>;CH29~L~1st>7P^5t&f55%c{1>4VP!rK8mOop?ISkKwG?vKHZM-vb1HvzAc zW7%2tTMb{DI0Fx~Wqz8@F1EVFq>ENgeF2wQlm+U99&5Bc;ivVxGl2(EPlAxme2@kn zTa+c@DB9za(f11r@6TR)j9pDt3_qLxMT-bpLO;sVH0=fc*7)|~1-4RSs!C7%WM@WPfLTK6*3i`(=1^EUmK{Hlw`TCsfKQaV zKnwyv{O!FsDpK&&tPR2|-IP%os-mX7Mg-LqT+g+7J1in6{Bs8wP)jJ4wI$EK6$S$h z5WW&qesmsCuX`ySPfF9>xbCiWu^)9k^=v6j{mnsrf6A!YxbKg2-0h;ogHcv)B!ZH!=FTLYI@n zJ=uWsaF$%s`g|z6Gs)$r&)zA^5+eY>db)OBAMBmol3J~yv5Yx1)A{kj zbQzh_pA9I-%=BMz=mjn(y=&-h;gKaB${lCm&m%JZM1*B(=NA#8S-yUAM~Qp~LLknu zi&f$5Cn_(}Y5x?+1&|hGYe)Rd0tvY1``TNwp?D(GX-mjXFtxT`N-ntv@%S&TKK=5q z6bur!U=HfzEGz;!{NJj72VkG43xrwKy^9k*W_WfJY#j>w`Jge&#RN_j2FkZx?Qmn9 zEB71b-Ug5@`sG>wmY$vcLrSM-Zvs-q!97QP`$j_gCqA#?H}{mZ86)`b`o#Q=0>{FB zD26mBavXNP*F$1;yDqoh?61Y2zdsvxt-asTIiJIXW(AYoZoT*85Z^sPkJ7_TZ&(1o ze76)iR2U*Rp}bwuVE*G<&CMvTm_fYXKGWpQ^y1aQlFinpWRFyDuKhtmzcq&HxVq0BY5_OdGP`e`qIOhEUtO#;g;&w^C{_br;$d$-ID7-Q8^g+8;|0+0!IB57%dbi$SbUi7T>|7<&UhJv z>#5kP^FlRpHVL!HAf7Wp-RVzyS2~WFS&1i#Rchwxd@M)f4KZS1t_vfp+kp}7Wj&JX z5%`V{oI203%c912O{v=twm;vu@(BZUW3j7knOT8&b{;_K4WICqBL5lYTDa4jtOJlK zdogY!TL2YV?zRF)wiIvmg+Nv6R!1`XRV-5LSY~IEmj>NGutO_ru(v3AuV=g3kbUCc zLv4J_uI#uAT`Dx4Pk~MHG zg!>>96ObAqZ8$f(E9Upya;Zw!pSR^2w-!D9Y2Ecl%(i>n$I+D94>&o^V2%05F4tyfNx;x#hvX?DC{NrFUs#s25I7X@&0B1IM?Hrm=+oIib9 zLVJ5V?^?xo+_FzCajnXFXQS&K|HbsuJYI!2du1_-i?Hlxg2c2lKAi+g^61{~)}jHQ zM0CjwlPIqJj~ZfePtuQ^S58imA=)9&M^%uyWSWqm<>ABk4NK#cx6Va}M%E{2UA3<= z6C;8bWikyldtG+a7wC3?)qkWkuGq?YyX&KbXN4#p+J^N*7%Hmq~sEa z*QzfybPHDMsVXxOK%6XZXoH>uZ2~hz2a0vt?FsF2fdL;JKgT?d5{UwS%ZNdVLu@`rvxGml9|C!OmGJiVNa^TuuQubj?|RFCm*4wRklt z*VbDWP3BEkKFJ^71{k&f1iH&x_V!i#1LRBgOPCXiLIep|fV^cwnNe=b361+AaQn3m z4yywFrC9vPFm<`UmLct6L84qH@uu!iAT)SxkGV7&qCHoh3xZ9_Z(eSIcY{W0*hCJT zz;A!vxKhAG^fj$pM0QKL2r>7fb#>uaOV+r!)>nNmlfWzi>0{S?R&e~k>dgY5-xsgFF(+WNLpv>=Q16@LHbysdHl(=)g3IjwA}!?q+lbop!OF}lml_=9lecZ{ki^iAVV3x&&Bu~%BBhk3`E zPYYpu6u?a9)7o_mZo)hektAA;_O#_1&8sX88 z^>|||=n45}T8nDp>5-_sesB|BTuZ^o#QGZN-X6-|Ey9fdxpaCfxm18iv!S(qrOueX zw>%Gype&o+lLRt-cfPk5hzqwmhbrdVrl=corw!a$N;!!q&NLdOciTEl+mjVK$9IpOUxBk3&Y)YvPCBoQ@6(?} z1`g&3zUJ+@BdGLy3j4grLjvDubsy0_aMqJ_>IqO5H`AdFX&HHn#R;mMt(segu}V}I zfn~qFP!?_&8{g6Pm%eA>{8vy`-p=OMI>?gVIY~>g*VECo>oHlPEjj-~6sjlTeU8Pi zz(4-N!jKNx_Y60Kk_g z4k-xQdC1$9=cw1-larJ@Vq3cp?;nUhs#}A=-#Sw?{Z_VPrj5c=XvN4&>o|nS-YZw2 z84_%yR|X;%9gM-igIlaApS_)3QcOUBX#q@NjHj=)NJ4AV4&WV5-fu6D|iC z30m4T^J;6g=l@h87+aJUC=E^eKqd*O`6L1%>c$w}$A)&ToeJAfJ%NR|PpwR$< zNc>$XagvdFe!BX~$kuP8eImWlRHqJ3xEK7Un!A_GHm}WkO!Cwde)*dXZyJ zDF|0fOCO`~=LiM~679)JNo4K0X?L|8ACPnH7HVbo+|{BP*&`bgQ=in($i%*-H;%cM zo+riLKAP227IzOhbVYgjO~en*vnSK#R`@tu(o(F>9z9wW%l7t($-$1OLr3Wto|;MA zw~S+|DPVkYs&7sg1pG-hNY$T31ls5ZumC zMd@#&>$0XbvCs^&0t1zWFKLz}Z#f!OD2n70lr28p>DvfXa6oa4{0#c1gjE4yrU_aoDwM!sy za)Ni{ErY$;vP_R>6)W&HeYQ;yQjizsnuL|!Oj!bq7L4Aj6bSLMZhk_GWX)!EPEov zAgbD~wLM&u)!c15iL=mweiryj^G&_A3xXAo14UxN zJ|(Z>I0hCU)!%tax0Zct{&-fETtvg#zm2*m;UG^uy!(vh0{>nS9Fw^e)m|q5WOY?g zjLOG*oo{r66tn50LKVcDi^lg34(c5}3HeZk+5vXekjtZdR+0OHS%N@oQ7k1*8Xc&a zC+a?@gjSNz%16sueTHvg0~3sa?S0pVxF##*A#*@$|Jwk!?C#1~RA=AyEjv^o8mD#d zN-?yJ+qWY|9N_AFpbq4ZKL0oiAdpJ12)2_2k(ck7==s|$cA^LGEqQv%{v`B9NBBLn zj%~dALXRCgI9TGCvnFf$)u+=xr4DFS`CU^@-ovXwJKL#f0V)4IYP(6lmU%up)sFVp zO7WA`b=*b%Pv$~D4l7zvFWYFyWS9di(`IZ&6Ya&TN>MV6SJ|LWyh3~Y8}|!Uz?OF% zITV7sMX&Ixl#N`%KRg|LNeW|46Y5>QA07X|A4tclS0X`=a@BJ#Cpy5Gm#wYZurzHQ ze@!~`>4#ntV9{H~oSSN>a?FEL4-Cqs>k=@PM$i8@ED1OUKF}7$s(muUjW0`p2C#P6 z4~4>3>Hj6K>mUAm*_JM}li3}zUNbb?_~Y`0eIxb}hXebjdducbAR9wM_K*f>+HmdByOK?|=as)h ziS$?w(sY|WTU+%ziro?#4X?`(zR`j3XPn=3x^48JrbcdtLe~<7suQj&IUfeo1OPn& z5QIRSYh1?3ISReLN9S9Ijdsr#7|o^y-UAuI_2NN3C`mB%V1cRr`rDr|=gL%tB|FW9 zgnE^fUFqJr5NAyzRQS<)>igx(46n!S7d7=N_w%O-@OXK6cM8;pGFoL;HXPslMAbW+I+ssj!Esr0r^uy4nprMzVGpcTevs z`N0AvWFp%98rqm+qG5^=n4g=@7sY%oOIBaMeO4J-rGSf_Y3cs5w$be*D6!~sRb*BO zE~uoWNnGV;KAPzC%jVDe~dotB+$&yoOm2 zo>l!+AFj?`M(%VA9VgBXhHGTFy&g_V96kwY66VE3<>$X-NP@zU?5_B=T(dKl$i%Rq z6rRPz{0V%7kbv&_I0`MyL0%dsnR7U{oOJlX?$R!Vp_;fLVQeFhbz8RGVk>m*OQmcb1wW@8!QZGXbq9?vgPx z^6QB^=~ankYZY-_7HP6pd_e1AQgos{)gP~_LA8Amf~^Y{Q7@k$XojR^Wp9de=%~_Z zzzGKmAlnejyMysezB zE1}ElBM=chvKmr1x`6pK%ReW4yf5!0L}k}zA2)fjp|!BA3}-)f>&8%vMQT322bv^@ zhtpQ@DMfs5p@GdX)&<{_8%N5Qm}wBNv#Bs?iaw`lMswl2)T=Q(kc-qBk7)p~_nm9= zPF#bVf-ug26)p9zQpvZ$vkIQ(9z18fkTWC+cqlN|(o-2q!xK3UwF#RS z7Dtqw^|=_=hp^Aa-puHPq?_BF0R1$LU~)F2IZ=JX)V|%Znz z@IfBU+eq|E1?LGYI{bF|%W`mOk?{udjb2O)E-rn=Ar8}*tC^}_wRy2XW?3N;qfg_w zMYyftKjNL%5#1l%79?_E#+K*w^mMf>Q$U=^Qrp^^9t!6-=+dG0AYeiVS6Br0c=#i z#y{!j!Y}&#tFd}*CEZ+cSy`OP+*62=u+AW|M%iFCH#gsr{^*(5rvOk{E{2nhC7g18 ztyDohcZ@l>vYCIx6UPob+YMEy% zE{20ZjN~wsIR+nha$_%%!oVUGR#hbq^gSBSHTj5jaue)?(0Aw;*qouu2Mn%5C4^k@ z=6$5it=6=*3hv$jkAn=7tO*xN+}2xj)PY-r%Y7i3iBWmpm2H|x62z*pvawmFiaM7V zk-c7z03jYf@o| zcj|ZRWReK&OyFt64!nd-jL3kvU!}`ArQh*G!fqY{9i)_U_d&Ra;Nn|vk8S!m>0a5r zRt=rv9XP>bFEeygqoV)ccJ6fdX!+DHoT3l7Xy%XGcXWHH4t>6>qtogrY9pUlu*qEL zh|qx#->pSkewUH;=EIL;_6Cw$%3*{g3Z;-aoDLRh6|VhHS1mZ>yF$L2H8%eK{Q2{{ zfJO1(;n|s)Mwn2Ozk!9hTKBs!p`-HZ~RS6=gRn!3q2iRkG8Nn1wtIC@OR-ifrAm9 zDbZ|P@h7Q3J*2@LKZfU_UC(ad&Njjyq6ryqSI72DPEM9|J3%m+boZUuo`pgnV5|C> zZ$%dFndP_1&3^HY{g6~apoilI9}70Y9HIgADf-r=6rRye=@h!M(ZzN zH9Q;q9u8eE2GeO1c=4nk+*jz@A}HN+if(t#R^@bsWLfTk?}^#20@~_^bf4pKECu4L z#HIXMFISa-YANH)sZ%j+@g!%-o3`xWBLM=hw{@<4bF&is_5D1ul~2IB1KqVnbyx!N zEf}R|XB~C?&|pi;^^b1pent-q;pyoKP2zRS-i3x^sVOO7T6Ph~M|bq2?Jv$x>AjR7 zx-Dp8%FL`7rgm=F^*U^wE>(FE-`Jl&A7VYqjdF{sKI>Y%S%$}kb#Go9@QT~H?Dcx9 zgn;>Q#D2{2%ggLmpd|eFT`Y zl(+*`Bo!3UC*EzIZ?i0{t4m#SvYyJ`I^8cY8po9Pb<71WL;p*g=pO>d;n+8G%|Ize zwvD_aP4i@qL(_kDIg;dgHXz$YXY;LKog<84I(XRu>Mxma4y@j}ckAy;hF~==HPr0L z-D5_v{3RY^9{HhgFPH`$iK^@CK-@#s3Yg4r-y6S`X+s7|b1);N$dLtvE{xCmhf$naOqI1-IQFE=Egr=yqJlQxiNkEx8wJ*3jYQo}BHOlW=nc zqxlitwaasJLICnhmwzL5*KOw`Y|^c$QpM!Qo%1~O#=e;gdE>tXIu+wiMqD#|UIV{u zu~kRWnp|d`SL>vlrv@P|OQUyQ#V_P#d!u-0$4u*DKYjqxPK4ELqRVi%8xdOCpaOtd zjo5O_forLKSZv4L04$eNmfEEPK!LihuPgIIT-Q?m@b+h-@gV=z6cIYz6s`|!G}YA9 zaJBS*T>JvzRft*F6*0kyM3IMrII2HG!Z0!zFIdud0N?~9Jmi=JgGcl)R-WDTP%EDS zBW&O+P&!jNjqtpVX?})YjAnU(iCr&+ONf)Yg^D-kisElLe9+`kQuQtAm-6P58@!o! zATa2SJ_v_qZ>GZ^U-i3z7yVGF)*f(;HjjFBaVwqxgAQM?23%wq1(K!5S+LvEr?UJo z=~&}?$7AmTv`})sDfJ->EY-P{WCr2_Ygur<%1v1hxY(G-Z_Q>l_($h zNKmx&D`Nrb18GcNg+3ZNjSH)yT7vKo+B-TBJcd;*A_G9H!LQiI<>WW$TN4{dLrRv& znSqd(8%2HXUG!D|vDMYyJujN&K^pi9M`QHQpFbiU0}_jWet)m&oUE30gQ>FQyUR)D z!7r`-$o2y&E!l2U4yvzkt{^z#@D1H;r!;W_@2$S#fZM-Rv!D~$?3v8xkvUaTx8-)^ zFaPyC9jSZG7B>Up)|NIls*9W+h?7r3ZMR5?%CAg<$+m%hhREZc9=vnQ$sVd+F!G1I z`Rh^tx;SIt49T+TBO>!R1HNFl6hpM=HB*)SaYV>0-m>lDw9Hy4DK3dVlY(5lfpq-M zsFOJS3FgUSs3MHGKUO>mr|wbZuW!aT1O`rxA9+%FK5YDJz`~0wf!|?)!1@(yVONG( zmhAZMjO<}dtQqy#cfX%Hv0aSTbbsh0!RQAy0^&guc$N=#fa8u`&AAXn>voH=v2|Q= zac!ez&m+ekvc>t?vuC3l8?xl>hIap`O+eTBDgHLm%tZKGI5`ZYbjPwpiz3Celdj%% zD=I51E4)Y?m_4EI!J?whC?5?CufrpmYyJ06*=4FMyKQS^#|D!s zG&4HwP^m9Jd$9l@hoi*bF)uM@#f!C6;nP)shyb=07;0ncH<|+;BseUF0jWtXEGRJ3 zjPhAW50nUCdmVuhq3V=Wx&=paE~n(88HbQ5mL+wsXuORHMSE6B^2{^h|1W*+hG z>>kBbI+WuuHDiL#+;IsdW9GjE9ZhSeSRUFUHy4j8kVq}5t-8T-6h5kQH$L&qUr81$ zJfRF@e`P@szC}8alSq9I&6+gAi}#de18yat2NTviAK`_VhY0uV}^U zGIaU(%U*m1cp*wUZ`t(*tx&Rt-Ko=VfC9g|R@}Ucb%i!!a+j*{Td_Z-9RuPjxjI9s z{!xrnKi;R7K+yiilP1$PQ|D{5vD z{s8QaEVMC&%Oib#ZPjTHy||*au9=WH#USZ@>)F7R4~(|TN2~EjI^BXC&^hUzjn9O5 zz=Lh(&unn5_TJ{0Lz<+6ly&ETYcCTDvq3Bjq4i= ze07ev+fqCWhH&M)y_svwIzn`dYZ2WE?m+%P1soO7u_oP|71#mYqaaq?7GHs;1n;d4 zpA&vse%g6;scHs=ff(budF0x|lt#_U)9iFMXT zvgSkT!n{`lCK^9nvX)Z3+TW}$9+!V>Ki3d}7)^Wa#ogeC^WI5I#j64;GPm|Cn#8_; zV4x);MJ-hJ_>Dk^P7CEFKv7FY9tqxxG)4EusT@1GFdAXsRyg<@C#thUROyS-{UjI&8N^ z=@J8Doa~}yS-xSQ8oFJV0gf&48`EIH4X8`iLmFrI*eL+j#*2vlT`CX@(XcZGLIQf! zIJLoCi*%p9$t=qxKYiV_;rFwvs#Z6_^Bn4wt)toA^K!GzU%o@$(5Qv57l42(R_8EH z2-iyX1=NAm5a0oxv&m)oZ$P_sA9qPEDZySm(L$jty~v5L^%6OVLr8~F7c;oTm)JsR zY8+n=;3bUQ+F~R3*u%h?eScpCAJ`A`=Sps*yWX37_=J7SAf&()a?|N8jfqC69QzPW z`=%U9jJW+SSDglTXhh-DJ*$$xNQDK8b$g2q>m{!QRD3tYFISe@Jz4fmc8`kC?I2(C84{TJ#d*-~ z{0d;zJIeP_!(*WocMgIT`8fbKwj#B8n_pusH&n!4$Qw#sjLphWhO3xGHne<0c%@WM56sqyZ7Jf)hB>Rh}k;&FQO8Ex|{ zJBo!U+sHxy5trh*j0qR0bB+c&lEfP}@h4VG_*%!KoKkT!k^$d7#7u%sh$~Ur#lESlN&ls zQB$5(ebhxv-%ol2EtYK&^$=niR9WHD;2|}v-k0u6UWN(?mOOqd20MIbe_W^E58pm6 z;e?w!hBI+kt{4K3f(k3~wbYD08t?H7`z1q~rhB;tvEOY9leJh0S%L$vh4fN0s>RQ{xp|lPYK{DdpK2!EjW+kD{R7gN`9X!y)h&sO{Gd!g1zxgOtKz5i z%lj3#4o?-IDd<>kD+JfXWG#7guEf^%t;!95clY~g4-=ZXCC7m?7vqUZXtQjV*UU`4 z{x0%~0Y&9ZKlQ6TyU1*_{5W;<_DY&GH8Dk9P-#yejz(=w4b??_T?QtS32t1}66uiq zg)^>DJ+Jwv_zSgo#ayA#Z+79>b_+^s8mQzpMRl@BXK-LIqzb;zIv+;H>C0yJPEH*s zsUnl-y-o1c*8s>QR+g3y^S<-{ow6PP9V9(l2c4lS!_F3L1kmrjnHAAdzb(al zFiK%e^UhOX4#XtweGX>fEAH|(7Yun$XX6vhD@-iWWo|bZUVW++n^=>LAzrx9f%`{r z{O>sWSr`V~9d6Dg^%Jgd%+QO{qVbc(PaYHW&7PhX@Y^#9LUl9EpVl+9)`qb_O4(ws z>l}nRjGobl=L(E+hnU?j#!!5L>$uiB7x~tNs&pdCIn@0<3r8^;Z{foBLaBbKxLAa~ z<%B)eO*FDSi!?-Vkn>r17fZ^}J=X9(rQD;l4xdu3_hZx-LJAPwIb#Z+voEVjTt1(8 z;?G+26sMjP&Go$jHiZWFqbag?20g39K);ifh{Uc~JFn$XSf}8-^kKzsNh2e=tHvBz66c$uJpgK zZv`5fREQCpYBb)iu2DC&+DwA&6&<-$f?CzYF6ap~*mC}&HE{FypT1LZEtC4y(2X+{ z7FqZwy3w`}8|>S~$+b1fNysKsuWK*KW}*MBh@<{H89@5cyQt;Cf;zo~_{8 z-Bj;C`;??+zlAwePdF4_f%IG~!KbPa8tuP}YIFAxp8&ZGbPG)O-CW*|;e z{V=_S(BQ9Mx^|fI&s1#nT}a$(Ngh>$)9vCA#rE>&J>m*7xxFLjMr|Ep5LFCt-5;3b z%Ndcx5ClW9yvy@cVDbQOQA-G?H)*{T+djY z8AyC-5kKhdP4?FOTX1T3#uI8!W9V;xY`2hLzBBi5_k0Uf1efdUl9}EcRP6p<6 zPTohKp!y-EnMIi{!L`3SMebIZ>p`KoPkoO#CNT)V8w^j@M99tDXO%2nRsKL(yFeIl zRi)u9Twb23f!9++S#+Ga1z!fK01Can`x#QzYQih7eI7Pp$n#%Ch?VEaEf!IG zZx=iVbtfV(gfUtNx>P0xUsP`MjCH~URv6aqQ)D+$S@$)-*hB5aD~wM%VW{j}qZgl4 zwdNX^2Rj<5IsRkscjrDVpZg0aB9qANuM=%d3Z~Z&)}{%Uv!GT2$Qn9Gp=y_1f{aw= zgFz)|TVR0xlsu$)>Px{s_|v^|WVz8BMUF-nGO3WrCG=SCLt$L2s+i67XDa17H%=oxJ%7LMyNkLZb(Emj z=5!&YdHmHkQBJa*r$hkZNcTNUJXs@J{w>Ut{5gj6kh>>Uv%N4_C0v=g=(9gmx5jSQ z6Ct4VMsX(qbErTmwchH?juITju-hIKz*WH*{~gV}Z7(vg@`jjo6R6mw4mw1R{h2&p z`ff-`Zr&|?0xUdPnKbciU%n(uVgHV~9GIc|i6+IfU>w+AZIrpssr5`fij}CAsdDB= zk8-yhv)h!_sS7G{g%_J*v$?#_j6*ZFMR7;#1Yg~tqt($rGTmNf?ML$T4>xD!Jgo0c zWKl7)kyep;szUhW_hSZPeT0IW<~*yWHe(-8N*2lIyKhY?dr5m)d)>7QJaGZhlkQWO zv7-TJ8|Cbtg*Ko4b>khc)NV_y^U;4IxC&Svp}Nfy{Duxt7#*wY;xPFRVgz>03)CfO z>#!{1y2HF_E5g#edu*KRr!sgz|{0K zU&SR{EcK>G?Kv2fi5=svwyK4I(gTNxm;$x;{P5hxF`KEWqIONOilq$Uj)aaxa3Lf8 z-J5nF&*Z_s!2Szr!ihNf9h=L(?Z-{SgR&d8J6E(}FETApr#dZ}#Eq#u7yfNT2L-T| z?fD@J^g9@_Yp`E&Mdwgtd{t%sdp8xXhVBAO#}#?8vIl(m$3DJV^Uq@tLe*ZIQ!1eG zxLuI!!}%D?RVbFLroyH*jmz=*IhHbC0A)p@+rlzF?&JpEDWypRzD1NX1cL-q? zI{6URS#PCoi;{2Mk)ig5j+>F#ZpVnY8AJjgbrTnr)2wyU#tIB&G*l6-qqzc}v0;9?% zt3so2fH4)^P8xsitgkxFPqNnS_RtwoYxk|p0pOj7Krjco^#o5{mIiy!h8*6U# zKg2RW53>4Agxh`cYcMWz1XMXhx4_$6pRXYhO4VBQGSaIRng_n)R!&8*dF||C;7rBq zzaqqGpj$vU%v^G93{TBFOM`f_+DWiIB0q@Q!2W|`FweZ}gd#s(f;Iv=tfeKv2K}9r z158Tf37*hy73i$IdHWItMFM9dMZ7w#^+gKS3qJ5vx}T08QGD9El*~i@e5ka+oTHv6 z)%w`|0FZ|J`ubYO1EJd2ZeHxcmsn9>id3U2gNaxceeyuOhrPKG5YWthhFc%T(bo5p z@~WfLF5rKpCoT4Dni60L3OK3ge;&tsRxzM!IUC1?$U?PcR_|}Q2S_ogocY{S`6&d6 z@3`NK?oD5z$I3f!_qo0PkxG#ffV0TfpOPRh`7N_A`(R|E8lsb9qw+B#!h1>ky)vlL z;DRruu)WxLpkKZGequ0y9jirSxXEDIJP_%r>_JDN9(lKbB{XuXAt747TdwduZfae0 z!XDo1VpR5Ac+IyTO5m&~k1$-+vNAGY#a+83WgZP2t*ZwHKZgK;^PXk+IXB z0#ga6AzeDrcA%A`@Te@@24U*BGKAAF#1>_3(NW_FMur0_jNh^WSIO+X5FkUe;gTMz zWlUB0YpYXC_)l3f0Z$n@AqJBj2+y~5Lgun}+)+T4L5)S_L0R%U$^qGp-1>Gwkz6H1 zxS`dbvsa~|m?ZC8(JCud`yKwJKI(5>ULSB~4_eiPeZU~lG;Ts?Fz_As2ks-7NRGhZ z(dU>lr>}m58m6kMDj1~#UJy7OAFV`I0<9|2?S3T@xulXp$i-22>x3!qcr6w--sUQ1 zHKo7@1Yc!N6hYw0ABvD5&8bY|Ks2h>tKcygmNo}Z{LTSE&_+U>Z+& z<0m(2QD51crn5m)Zc_VP9@S?%4pzQrpa6TYSLU36&HzD_`>6hY5>sBRh}YOLeH=tn z-k`YZdy}19uxMWCZ;f-&8g-Z(tEJ@y-YkV?~ z>f*TSkE8$ZB0}@%3B}y^-p!scTWbabujG;Upf5_kcVuqTz5msgM>kCJP%H`bp|hJz zz+kaxXDyLnCetA>%{sCc>~uwZ*mm}Sh9A{$b0=ZM1H9SDXsv2Ei9B4DTam`@$LVpl z5AtzXFJIp&nAA*R95Z_I2iS61eu~$gvp<0y+pGlDbi742u?_4|24mfU#Ng_dGL|+I zk$@;b+qU2N)#goYqGyG=k+n%}mbL1;F?J~QUncb2Epc|P9#eH45#BYldFKlgO7pE~ zAK524xUJUBUt>VmkmPmsFni*%)9cqjjGWVXkvrKtsX5^5EQxs5n!hKncA*YS5 zqqF&`J-9M_dV4Kzil@Q^Gk9Ob@fQsuphT0$=HFEel=#Lyf&=KVLdq@Ti$?ev7gH&g zBu#XRLN4^vnnu`uB6|DC;xnIRH=Q3Y9$#Q99CWn1iTvuHas=DI`v`<7x2&BYLh()8 zFt()psXy6(xH|G&?8YrQdwQlb6gGt^O0Wst0d1;^w+vXi)8+VdrpQZq`-uF%&LgZ@$&$y3#J#x9z8> z7nehAnQ$wTIhIUnzxzwF%yiP4`*thB4_rJ5d<0#K*mn2jjRZo+b&}c)>{IG7QTa|3R8PF%+S#+#Kb@XZ|?# zhF=y@aenAi;|okwIG1dNF}z`mC1UjNl0r*2^Zp;ha;CSaIIxu-ttpRl47~PG^rZHR zNZcDFV3aq*vvJ8BCdnpIYKcz&`d-F0-ze^EIfor+iO^PB%Yj|z*a6(?^Gk+bJ`;Dh z{N;fgVQKs3CT$DTb1T3=0zseln>l;xo`x$wuoMZ+eKhM@%hgPoUfc38iFj~uFs_+0 z*Mc&KopV9z*an{z*VT_^Ft>aCSVChwDdV|j)A{c%TJ#mwP6K^?KVPl?oyQs<{g?6l zW(NEslzcu?|H0+v!4A$*d}@9*=slWmPIE{J^u3`+Rstz3dD?bdBJ$?Z1(wC{DswB) zyZwfNB8QT~)(#>Fctgm*gB-XE4L&eZ*!@7fxI$G^_Tq}t)zBj_Tq7OYM;OYN*l6-X zyn>li>tFeIyGknb?4k&Y;Wn{Ln60NKz1-kGWD_H#j{(xN~=Jb*JiuzMxkRb&K-bu=E zMlwO3s!**TVy=Ic^dO5+GbOWo5uG>x`+TxdXMOlCmZS;3Ypr|ZQ-bngtFx#N)y#`) z)xtqbf&R4b=u>yicD$pCnzOJ35jxi@7oNH)PJ2Z?lF+54hMwZX(_dFdr_8xq9;YMQ_gSYCO3t%MS}Y^vvakJyVuAZ z^WSQ`-%;2yKOPhesC?95A@YQo@X&^K|q5uphtb5 zCoT=plI+h{6hAoH#RJoyAsMKt3_{FeuLjxpz$QF~v*^c`_S63uwHCV+m*2%!oh$#x z-kZlm*}i?iLque$6iJp+D50{}*dvNck|kRyOV+aQdy2BON5)zr5wbI52?-5Z#y*yq zWE%!!_a0;E`91gVdH;MrpZAY@{^+iIn7OX=I?rSI9^d18Y@iVe+qNK3@gHzfmZ@9i zm&aiu260kCRfJMmrSmnW)Ht4t(<3+&g)i3nuQdU0j*IHlK?0P95S%}G6^{B%am0t7 zyd0o_$hh!qH?2ns+?joiJFL2s}$2>XZ{{xIx$ls^=}QZGw5{kRAONbIYOM5E zwiT9`?4i-eY(x0rWL&Aoo={nSFKXJqk$@w_1Gk>>VW@>d^zq`=;8qt#f&$Lzp()&NJlg(KpiIL zwgLOX`0gXh==^x?5@A698sDVl-x_$l$mWs?{1cn;VQD!$cV+ z-pF9sKObmBK3TEbLZ>yvPO5pc5ViF4nE|gM2MhGi)_711&)#GjK>M_)^jQU_b(e9a zb`fqm{j9pb{TS8Yo?n`YQX7lFd*tue1NH`Z=l8mMf4%RqP2xWV`Vbf4+l?UNA+y3n zVT!xP4YnyN&yC7h-c{0t;Toz}uKc+T$p@G{=m2~{bwzu(MLZt%m|yA1k*P~y@YY|^ z=Mxpy{}Mk1#3%4#ItC8_TdR4pHfwT@0_yYA0`GHU-C?!Y50js7%a{25P!EKv;$UZ? z$Ro7>C?!*!KSo*SLDv?_&{#8`eA80ld=5`*%{Uxs0A6o5@#|b*mxJGPb`w<l_Q8dMho9VH|UF_ z1sRn2JzUJrE|w1=A-zMo0d)Jj&)g<$2`xqSALr1f^|Y3|slrkNn=F4VuOH}d0gkR2 zVUzk|a#V=6p!JI=D_(qiY~ym~-$8*9ct_LsyWNO4cF8YiZBLDX+#LE4bc`vZwm`$m z6Vb7Yi17YB4)tdW;A~^Yo|@q9Ib%qf6~Tb;^A^n&Q0G;dz%BIz+;z|ZtO@<9P|*OVvCOMdojMthQ`_o(U;A?K6E4gQpvWV?2>J?^^T@! zIH|Y(k^V41l3yMjLrX;gvAD8{xmDXtC~~90=^7dIR?Aj zH~PbvS(v~))&Wd67P zAs>>I_yrv>x)0-4yoXk@@RQbzyw;GwqAv7Zy^p!=>EOV7WkQ}Z?g~0B=+BCmK?)#= zxdUCF&?)ypVH|`&ULdaQ*-e{m3#;?AOsbuq26_Pj@7@EtydPT^bNfnx=6{Uwvrk{_ zu*nLLh~#2uF0>b$K0aiDZuR>ZOVXBibv|F_75-m~7zaFM3jO)rt555&pA?#F>RqN) zTLduH3Jsaw)CuwZ243T@=hn}ZKOas{;(Im4 zAu`zZ>BcetPtZI{$rMIs4el%tMt|tBgGma|1l9x8u4-q$6Q_80T53 zu1O!xd~jBy-EreMbZ2XN_`=1pj^pc7Ax4f0U%tPxS9?T)QsH||9W3&K{)QfTZ z1E4oDF98cxs-WCPCqt_O;{uyL7&4YuerDe$mQq;^jgn6;Np2|n0QHkXDFBBP21pD1 z3rff7Q3%hX6MzalS1z;IkwJxM-)Hf@y*S=CD(Nl8b-Q(SnANoB4K??%EQ<{<-}_?I z;UerCp|u)fOylqafuiS@i8Y+JZ~RBmC$V|Nk_9opQ^q7acD-9ld|ChuDD+-kqY*bX zK{dZdDf2q~Dclzj_LPPWd0B7iCsI?^9_pKzryLHT{)53w`E2_^bC=RY_{8coRU8!} z-rbWst-qT^4C8NcY%ltFt}(wXdI}a6UVjS@e%MAJorC#VcMK3zuBv|}WAv?nPWBK$(<+G9sy`@Z4&+%@h|jUZ4< z;GQGC!S7H^nf2wo_}05Jd%n&Gt!@KH^mg)xA6nVNbAy5RKX^yk4S?7vmhP{a`R44t z(WFap+Geb8P zbDr(Gb%i4ANBlUtIwOGIm`;v>S{#(sW|q81x#G1vi(4dy9?ORWJsgIBAYH!6Re@*l zJ}}v@QFA==ZGZ}Ob&KnkLH?UcBi5sJO2$?3^1uo{oSkPy^Q-;p1J%O2CTm__v8(8x zhaj(kfUuL=G8vMtLc^o_c4;pLDNp>lN?E%tpj<%j=K+P0I|}cCGIqUo7EI?$b-UEL z{pwNt&Wp^y1Y1v2TO4RD1{C{3U+4Jbq{Tupu;Cz6SrU$K@+@YEq zU(W&$oifA*63#mc4`3G0^+(iDo)q54s4i@|e5MGPk`VX#DO|rQ%L6=@-DM1?pl$f+ zPKSAYvk9j&x%t#@8E><8%jU)}vcIS_9E%l1Ai5~QIe#6tGx2l<9m&=z*}4$P|s zK7#~zWRKh1z?R|XZx=7$wIFZF7dv@h+U&h8DNPgvlJ|!V*mH7~o$0=g4^ej`8vqf| zD-!WdSDDl%uS)8UgS^7Aerg~tn+5DpZl;V8D#n8@i@31v(bk-xx4ZqQZMut`kJsLh zi;cBHe(MGTBaruSqg4wFh2Tx2&a?gklA4iH*X5^`XI1cDK=xe|EkH9Cl?fUU(@&0= zBJ&>bPpvjTcijwO{#F_K4g%r>_K=c0^f9i-GKJ|7%P{+0ZdjyeeMJt-!}n0fWy(rgm)< z0vb79{TDZ_x~ro@J-a(##8C$VUFd1wdv6DPegMD1d)NUk81?(w8COH8KzcYgS3EK{ zQSfQcMXCj82CX-5I2(sO?t7l`ic~7v#Ipxb?l$nhpKg^Jj{-xE$PAGGbhkGQMngiQ zH~HIcMk<-_NA%-QdMw%Xmb|%L*z;<`WvM;e@(Ju9I`WZ4j<}hM6nyxnRYM%=VeAAd zT38wCnmwp>vN_X2t|j{{G6A4Ub8-y-!zu#o_YK93 zPU=31RO9Tu>Cts_ac=|h*C{IM;BFnE%}m%A2QC8wQKtHLxF+~YeV6hle%;BOeiHy@ zPhbBA#rR8Sb^y-}`c5SH%Wj|;fAQkt^HQ<3g|nv-h@bbN&wdF}X1owexl~Q?OvCw5 z46?pLDnxH2aFIfPFBGKoDEvM-j-@qh@5-|D(_zrY5aW1xtRA#1+a7w;{heFS9^^}S9VIxpWf z>T#cFbKXvC?Z-wCKAARp#wP!U0PF7i-rfgirvO!%{E3(lZ0mbWez%x$V(z3&?&wa0 zU%}TBx#!<=5X5^~6K0^ZoseNIz^ZolhZ}M7Ox!o~oGe!}?5!RG3>|w+*f=UtTLSb% z8ZaVY8tLT2c~+KPmFGt>&PsB_F|K^eQ)^DdN=g*zb8h?Rh?5f6?R!h^Q5WWa{CK?l z3RaD{FBy#Uk%i(vMa;lQ4|O35X6=jD{=8icbW9)RA!2zw)h^cGya?f#8gE%ty`uL1 zVmV-L9C3`LLX@FF1$o?1{DlQo-ylVG5;;+nQQ6x1Vh=t4k$1Bf@$bfPA9XVmW#{VZ zE(&a*1iXi~lq6B#EXB{bDbAU!dLOKqH<8}YL-Jk4@_CV36)CL@Dd+*^gGx4f#jW+uoNLT~yD1qJ?c7x~JL@C%fgSzQi+m9E{MxD6lbF01-l zDE_itW{1si&reiltRDHufla=}z?P6x-;NWn>MGf*7sKQk`z_Xf*1F-@F+3FwJUlb3 z*}{zABMK4okM$UrTAWYx1$pk;p34){P+huTOP;~iJ>w5~sdg3JVPo#qR}M_N7Mz^I z1-M#zMYrS0{B4JRsc6w38lbbL%F3AtmigUN>P|YEN}3*Sh~Z!KyOtq}Kn#FX>A-$E z%XcvRfa!gwjbw#}e{%Ad2P4JLSHhf;?3XrTMOOAL&Bn_>1QLtGviS1)&gZbM4#-RM zJ+bc%CRSZ$@qwYV*KTV2oqf1)cpOBnXzmlNkGvecBNrC}MPU`$2%)-Z!O(jcx2Z#a z26e?^i#=8roqu9rJhX$e^Uyg*dr*r#Ym9RqFQR)o{ipS#WrVk6%$pcx>bx%7FEzk% zidFC-rtdjxc6>PA^;Ksv2t*7m!ffmFe`Y5ot$*vr$n2~pDUr^DfO7( z3d%Td71npRpW2p!o7*iFtsqc9AP&wqVOBq}u1@g8MzSi+2jZpY`Jf69i9{^p#%-jf z(Y$pK$nuJeCAdRW2~uOw_mgBh3EQ%&N@W_?8sWP<_#whXk#U05eb7-WyjV`Tnrp1Q z|EnRt_HW6^r_cak`$5YH7aE{Mjb2?GwOqpLRc7{Ur=|U*e;wFsCRfy9^R$qxtv{*e zExQll=Tes~3`9N4h&7N`Qx6I%^;tR7X=d{2dYcH9WA)2yS2N75)d!&A#ZQ7t1?9!- z{PzHqyCLP~Ywlo{V=|RE)?PlFn)DF}vPYl!&G*Zl+xasLlYsN(oK>4G9D7dA3R;M? z4eubi@vDx&CS2?V$=q6BnmDChmcb!zYHQEa0OAym^bB)P@3oPj@J=2;J1Z!voaLhtb-eAM@|I~36f#23Z-QPp05rB1LUEu*he z(<>2QMeMbM>_&gUXgPTP}M1)O>`YC|IiKr00F24dDoxS5;`{elNoHO+%)$0{mK>3OP9zOZ% zYu!jV8n(p6+0hRXcu!w7=)ngz@j1_B2w`j_4+!iQv#?1h%AhS9*Q%e?lZ<&XwK0Z* zm-_GRsG|s<=&vp`VSS~7FsogIXQ}WdjQwGFnfaR_b@Hq7I0B_SMw?KV&E`-+S2dI6 zlZjg@M^c8m2O{@sSmtM9Rt*k2Sb&_x0$va240Z24EtRoV#(tM5yQ+QtAtb1>X6a4h z+bjDVY`yATik)|Ngh8+8ZJ&!{qubz8-z+%Yt;(fBJk*96rI{@4nKV{mGJ0w}w8_cM zBm87E$SUse>w4CXxd~c1m)6loghJhp%}SD+YEskhKAx4;>B&(d)@Mm7B6BKp)tw&i z<=m(YN)CXUNKuz<1A>Ed<6R@1+?OFbh(sv^khvynlrkX(ZXw(R(&3HQXM_Fjsx%iy zz_lgqhyq|T^!`1IEcW<48nnsc@)0tYJ)PUXe##gXjJR=yH`Vw+TS+^ibT^61i4r8Z zAQ|~$sh6_kaR@}@%hw?Ck>BNT?_NK@QX=FcNphz^pTA8;L6g`w!+`HvebrA?7Qg^ z3Nc?-xuGpAxWRnzoKgUcR$bj+Bz)2OwV#aHMID_ZP9B#B-NzK4-O+zA?lO9NBf|Nz zr6BBQ@$!y7;1VJ7k?T>$6wgn4;$Nc-Jr;`rFAyPfU{ZhkR+@@kkq%^4qJQ=3m6j1> z-0`O%`JcTWdtkmil=a3S?LUeJ$teFbl_m zrtIH!eKT3Uv~j#(4?e#$5mFJ5xo-NbM z1t*TgG^o~;m%?UE64sNAX{dmFl~+PeFgQSh&+A6FWpU4$1ce#z`(x4ZxBLimls4;V zv(loX3yEUh!dLZ4yG!cOF!A)ZPn~(r4$7|J8oiZ8Pca{Pn9(J$3hgPlOcQ~rw}%V1 z{e&I3d+C*V&YRB)ydFZIqA>Q*AC()Ik(TD*;lcSm+or16?^;Ez?#CCKCNVyEO96Qd z{#Y+Rf` zmU&5!-6vaw=ZN(!u{Rn)G!q>#Pjolt@XODq2S9)F>Jl(3Y*sk5bad{kxN6f&fj?~^ z#Ig;i!DTqm@e96Kxp+taPmpnxwZz5y&h%Kn z5*}j2HYYwMq4DPV_T^OIcNV`&ho=uE0q_?ri|vwQ>gq?{cDrq@N9XxXwf{2I6V;?Z zcER}}Jgz@(WzPP6^9S8E9)%vqE%jpoLt}ccA;&!O5toax;Gy_}t!N-pue-m~F z^m7Zm3y{#!35Lg&bnt*=j3zum7}8TePak#RNJ;J+doGf+>>G`Wx>ur=06Q|qTo03I zw}2TTk`Hq-M#VUGmarfrh3lTur$gRlXl#5YVNB)1BrZDFKkva*uFO|r0{Eexa}e=( zprgO{`axTUA#vUtaAPoGqlGxayd(LshW6E zx&jZR0ML zx3U}pm_I=HF?H5Fr=w}#ngu3X_~KpWqAs5NA?~|v$F6!3TET`EyC4v1lkNiVcrzNz zpZ&_IGV+0GM8>#Way`iXVcgj_@^lE;0_o(blSOhHuu7%74!wWkTJCq$tJy1x^5;Jg>6??5QTU@e;k;M|QI z%h1MmUZ2MHr9nkW{Aig?qMx&~du(xy0OdP}yHx(~|d#R=o;i*L8>d5xQIS zrk9P5VI`f7LZGitfX%V-e7$lP>%Mlb^PwttLMjttz#@^_w#k|$bHB_l9$sA~J?9m< z8fEa>ioV}vvMFn<*=_A$yqNYw-B9aaJTNcZpzAV3&2Whmxv1VTAqkZHCm3eTMm5w> zGNkP3u?Z8DC2YBB>BvMugQ@byh}Bv9V4~|9hsm~8W%_;=pTWKms5&cy0J-9}v76l* zC;F1p(jcvRwo_)|nqpiJKJHIQ9-K+g`ffc*URpWdV;^S7sel$ zj~7LfPfqNoguS)2oNz~g@E;E_K$)SZ3v-_oWyoeW8l22*ZgD;cyZd;#UaB|Bfqn5?zLKla>5Qz~!y3`G;g75kdJkHci2hG1w_2Mxj)42!c1dY| zBCw@hnG`)R?<(}W8LI8-+uuTf!!@)b0ePyt>mB{0TJ2Kr;>%c-V6)Y351aPF-c9{+ z0Cm3`lDhnIIZ@*hdayiT5QSj?O4q>kh!Zqdan5j#Z)WKQg@uJp2D81qliF8}LQ1k0 ze2VTH2Rr&ZtPR@O!8BHcv5QWyQNO-gc2ciPt17yL+$O1}G)Bu9<1nTC zLju}e;M+ys?^8c9W&l%4(S7ZQ~Un3Kdx>GQSoMP8M_|{O~>!_r^cP5YQ4Y3LY0Dif*(74JZZ3Xc?#US?7 zm43E5zJ^@tF?+05?o=?fO8KcAN-A{mmPwodvR!1t?Th-}1L&7ulXRhU~FwHd-v!dr?Vd>qFm(HX!#D4+{ z@DpQCHtT}D*tT!J zbdBeoDR9qMYw)i8ASZ52HKi_7lB?gD8F*~oQ_6$0+wUlqol?>UF^5SE@P?lR-Tn$15L$r@ZZ zS+Yq_>{Q>3$P^eM02E-dbbm;JOjkSX*UTx|*0^2z+6soHKPHpegC6=1Arq;fmG%QK zB}}u7PD_J5Se2VVH+L%~$CF;{ay_eF%0`-{H|-XRGUGqVuweiZ%*JE$dY6 zB1}w%$0VeESJ3<&%r@i0_1mFPe z+3iCh5*>GzAAADu!cnOl8L>J}jg0o2g{u=_3CSA1bPTGL(>LGqdHG((YomQV$4-t< z&&(UYK!FrKJM=BOFu^7@c`l+ z1P!q1*1v7%+ipv9aaYZfR^8ABpA4i>@~u)DTmImRPx3}(5Yb7AZ$?K)1AV@%UG`mw zQ<|4rI~KGy>uYUs*qajn$*)bthrri2v9-}zew=dix~5{I`>}QWd>&w(z{@fQn>1F1 zj!}}s;c&19a@H|Gk8%KFh;tMnEiG~9Zf@p~%c~YY2qfb4P~C^gVu-y>#poe3>%@Z? z|I>-%F}G+M?8j$4`hWfUb;;WdfOA+%QOxow@CBbcY~&1*Zx#1&BU@r?@QiT(6e0HV zChLII@H-+sLH1bloJhb>;n!zJae$^FJ^9cuUpFH&Q~vA^WYt^X0@14k(>D-El(3o2 z|1e)r*D8_K`asvXWQjXzxC18{jd$D07x|U1N+}pjJ{;lmPO+rp*mC^-JM>GR0TI2&tiWKnV7vA4n41xG+Bh3Z-;2yEB#9MAu z+|xYTwJZ}BX>N9;XGQX3>z}}zW0&9Um~aa(wf%eh zS?JFNy1vAE+kzr=>0_wEj9Dr{2?Sn#bF7;k){VR2ix<4OA;!MR@{7fHpVE~410sIa zb^LDpf8sl}18InpLZJhe+k9Q*zx`N8kl`R!0Pv~w@($EKJR`J=s{%2AKX={Qza;Wj z{EbGU$a{JC^XxDZYaga6I~x~MAq!YE_%a4K^WO(Ams&1n^eD6rb1q*o(bLiSFuG{0 z@|{idawUkk&Y5yNtjeO*9uKF|v^1sX1IyW!=mY^&zT?Q!K5utl?j9P}u^ex0`kn`7 zEl8;<4(T;RYl@^@zT-koy6y4+JEkCZNx!S_wFD`KEiB3R15d{aMt*fF0z<~EEskbl zaF@9*md{@Uh2L!9v!NEzWX3O&xP_4F@?lO4Hb=t<$6&(U-|nChJW^QP?_kNZ6M<-w zpX9P8)-8?Fsc3bh8GDReF%jmU65LrjOSbj6vKB+|3!MlR&}tNJhli*wpHG?3Ystrn zT-rBLwCcUq-P8Be97~eJJsvG$1|$Ti^u=X-hhRBChGKGD>L_P?2hFdP2~2 zFmc3-50WR)bt#-!@`?z+T!1lDIGu40+~ECq9^Pyeh*t?sq=&&)eTt{$i{=}Mb-rX> zzGbO3=d+yaMYVE!u4OU6-Ex9P5Dvb8*umybVwCRC!Ohwx#F4jkL`Oc0@OJ3(Qs{c0 z*xq3OC@X&TZf?;D^W9zEzqnVvl$$!-@IqEwgqaCNuO`S%q%KOSG&h6higJF}Ui$K; zv%{uoMw)zeWJY-j$KMfq>uf@A^-8(T74CQ+APpM%;4Z^Ge6k^YNH{sFZGFIY?3q-I zWBC&2!5B7k4Daz5ojR@G4W@N;fRuWD8f$&dw^zb1edh!1s7fg?_CjE-y)I zH8bu3u|bzkHVBFJWv!qo>jz*3A_suYn>lz`t-qB)wWgr3@X>UE+00v&_#EMjhk`zA zJ+3Fy^GhpenXx^F+aSETl#+Dv&?|yF=sN(6!0p^97R3AdkK>9@Gr5L;xY1VHyrC1C zT3g|BN2PUM-f>%5%uS5{LOE>Vr!<&wlCGqoS0)}0q_2WrBRGM(^h$tGN=mxYB6#-# zl>z=&=N=+e7Mcz~%l_qMLkGGh=->qkhv$Oap=xqK*5rv`GsuCJFnBUb=<0@52?QRu z$}Zr70{QBa>Y+h*dOkpZK7&-g!ul$iiuxD9Ug7uP<_u|XIPzrRv2_g26*MYW4 zUr%Bsv7AL=37T(JZ1&{z*uh5DxmM!^vEk792EevE7zJT7@P~?nYFgj}H~H#OT&A<| z<-Wy5gTws5>=XP!(_d_Q?B^*M8$^BXgAl`oGf=aJlfoI$HghsE5j3h>{_OZ#G+aCA zd?ps~rU$$myytOgDhq>-8Y^<=+I%feMq=)_&$bN;$fCZK!V?YI5GsnHsOw4gCzs zdju+^2~bQ3&rM5%;fskj1aG#g3qnDbFowa=XyRC?63+q+8%g~l2CvJ(8ep2n28X~+ zh#{IX!Efj^|41##a82nc6knmkN*Vby;MH*a#9t$6DqG=c@P+pJL#u=7c4 zd_kC{no6!Ro@G_~HDX-EDiUh-3>L{L8KaieFsW4i1K`GzBB8hJRR+Qm)e`5g*O#AN z@iD2HRttUdE=Bkv)}1woI=K@AH!u(R*JTas1IgGTgpfRzI6rl)de#>8<9^{z{g+k` z4@<;hX&JU5B4@mCq)P@gu92aMf@As@fSH$tVLdb+5qvS`fLx2Ec=qH^w~d-Fk@xS? z+aS4lO2lyM+30EmrgP0?nNlu+1GfSNjpL_wuu?kc98_-s0cLdDCD;?-pTEz^%FE-I z-b&H}gI8aA3Ecjf7Xk(NV3BtgXJRKx<%9BSvo!=2ZqaBfmax%kVqw@G`rPpnkJ$@{ zBa<(s4C)W>Ff=i-4mxZu|8)vlE<#P=`j<)2)Y8hjV^_E0h4;;=4hdiBPX|kb=3Ss~ zPHiROK}hdm3;Z1qAi}GzZy*FJb}QT1XU&ku)6KIgysW!6g$4f63L#TzejtTztplWj z&if*wfY!iZ!JnKl+Q$9^P(rOdTNEJy=DE*KY6uYzRkM_ZB{s^-QB_s{6pv}>=HZe} zKv>P$Qrq!eg37nvc_rXHu(Pvs23${Pbi=8HW(#*Mov{do(txV;eM&?a7iazOFsS`0 z>A&QJ!u2oM4!7ryPn}RSOg>j|#Q?BLFu6ZjSrdQ>y6ce6%OOSbw;}h5MRg4w9n?mh z#Lq%t%cl%MWfkb8z|mNnTwA&8>)T4szmtwCCP)<;JTb^9MX!85+1qjNmM zHcRizT>HeS8krV+OvOqeID3FnoN7KK&`(Gq(4CXz!b-#;P- z28@^PhH0ol;buk%RFp$b%^o&2dOSMfB%Lg?J1T@S*9d`-=7gapK7K*;1BY@gocfil zWSmU*62&0+%odh77u5OrJV-+}Dp=J|4NN8&GKq7CIt%j3@BNsCS!xe>)qClXmyB?2rSyDW2lQm) zuoCA6R^H8CMj*mx-9gIX?!$BR6TA|4^chkve1j8Y&ajJIGuKd+%H;VAMsd!PzgCm< zFzk?S!mIQ0)_U`^nRi7%2f?76-?tZcgjkRBL9t4r27)3=0r$s|t}Co^FBWVbE3yDl zhd6(@#d$nw?W@F1e;}EfIeY=28JH<##;qc{5Y`ELUCUl62#&Bx=G+|7Exkykf!x{ z+XE3mF75z!4=1*3hH1m6Mef_ezjuwydIUtEV74I+jd%pxKh#9v7QoG|cUwcv9u=Z`?JqI(WcxC<}^oK@4zF;SP+foj)I z9UUwBhgY=NfVuEMxK3O($a^w@JmY%i;GjEr2o=Z-;G9!lZuhs0>7moy5#Hi6JExJl z2x{JwP>X?~IZRQ0kYxt?mKT*&a%7b}_v_}(}f=9|6nNxQMEzA?PyZQFsVvc?Q zL=^B@4i(VNJWjUTl;$NhK0#Lr{nO-cYn3`WLvQ?fSoIEK;I4pJNx3UBJ)=eqP^9Vk zRrE~HK$}GA1J+lX6m(RDVAhrQo-Uq@N+tVO2C}g?k2IYagz8&8x2NXP&<_MTIz#t_h#o#d2 zH07M7$KR8}z^GA~pq%etlam(}uAE_8Hi2vdpL{H|VyX8$;HsQ(<@-1?kMcLZWX~wa2j(2aHZ(kmfBCJ|`O`xVI*5Pak#Pp_!#Gvi8$7=$5g<{qf_m^UKhW+b zR4Ko7FY3@ue=AmVhAF6V^nO$|hwT5C=Q9?e&Sr^WgQ$H_rSy#t(6G?BH>6&e0X^9n9~6_kh)ajE28<4fVj_Wi{uW0IGzuc^>c1K@K@7IaRn2Mz+4k3WN&;`LM8<4*-Gb*N-zP-VdUEPhkPlJnVKu?wi}!&xJYR zxwy3A11ZBv)OkAy`ie3rF)X}@{97RXIbp0L&s$kIFLPOY%XR^)3Mltg-iNN^W{m?W z6bFha&C|u;?Nvxb99G)(7_~a5DMWOKmqMYweMMpi8W@^LJ<*w z_UHJH=;=EO=xFMhrZhKkM5U!2gPP@C`iF)XJPU-V%I6X@Mz7D7f>bs7;?|xzvGsVO zMrS>4igQ{$^aN&MCsUu=i#w^Wt-fCW*xE~dX>R|8c`ks!MkBJIes@}+%SKYmn+8QF zL=*i9`>{_UcKE%q46o`S@CM|(C{bc6Oorybyj*bDpl=3i?}Y=yE^V}^ODIDB@bIuF zHZ)XEmc;``6gv`08*MSP!T?{9XbYi9ZAd@C2r~#lbHD3FjT3+SQ!7vEo;5 z9Vt|lG_!*GFtj%H7P;=%oX;dMZ9@BgTj!bv(hVNQp1>PcQc2KJiN<`HRocxFWO>Xn=4sYoH-c2gqrs{nFI zP{*~FnB*L7qp0{(rfI{;h?vQe;O_j?fw>v`%*<^ye|UdfzXJ!wp81^v#DNHn!IpCb zo+PkK6cdKODZV7~gmABS2`*#Huk7CPE4w4_yt!pV=H+1UV@0o+YTt`<8$JWV;P!-% zX=%U1(yC{6!4%$FuNh!i0!plPkOV`{S$dnzbDHRbSka6GEe8?V^>EH=rEiZJ*-K9M z%hBDM?Q;;Za74zxcgRw5y7xE^uD-jwTiOI}@|%P7vgLe=i|q30Vj1EJ*uy7fvEsB^ zvyLHgCLr$V4Yg}z!qJjS_tF?B-oliS?H)BVw&_ln{6kn;#%?* zUap!IA4f+s+oY5-M@GnaZr(8mR&m~+sN zbnzyz_K7pm&T*i_+K^s_Jx5(2JvWL+y_^f@gGjiDi9cgu6~xLMqxQkIH_aA9Buu&H z;|BF|x7P2juW-wfs{wI&I;v4%%aSiE=)Z)Bf}zneYTduh_G0T?ikTp%`jdBnIYf(_ z>~uzRc<~@S#b8ZImDs)M1)lO$lN84Rg66)D^t4TlPL_u>bkJ=3Qe)RI6MVO?fZoA& zGUsI@WG2ZS#&MpjbtJ=>JX5=L7IZP7zXr|82Uq&N%5b`;v5d-v?(YD5s>QiPMmwhuv*y?$grP z8wtf+n2T!$a1%G7AHHaBwXpO_rFzaQuorEQusmE-QK+Q|!c>o$l5jSaFu z#vT38#tG#PF4aBN;Bm#PQ4;-U@91}8X?PmbYv->Ii`m-ya5e)tPpBm!Q{BfP#Br*D`5`o+-TsLq-LoBDrN+nUTPw26fi({2#o)Nx z@SMcU-xhwA*zvvFdm{4EZOgPR8SIP0G0o|F<&#-UWDJT?I?&Am$Bc7u4<*^|(~2hU zBzHD9jNnFRO9SsxA^cJ{06l8cxn%q#<%93q7bF3x42-R}m?tO8*jZ{hN#7=R`8e}9 z!!*i6#iPycG09?jyyej4a2((@_qT2a*-X}eb$=JjI|#`csr~4tvc^ePH+HSSaDlY6 zP@mOT0T3i=_@_XD;|}6o#fa z)pb&ucYs|#FCSqYih(grba*_#q9H`=!hn-*L(k*2X{d(@Ve z$KeT%Z`K2uRkyOj#B5>DYyID^K^r^IID#lBm*>4WXVX=a3+@-HV&28??|YlC1FNTS z%P<@6R8Y3i@Y@TBA#H8(*bvrht_3daEpF192^zs%apvJIlVn;NY8DheCIo34C(IjZ z_FF34{Rc2ZVB!d8&fm?Ja|cIA{!4RJ5Y?D0pL6_bW+-Yux6m#y?=-k@zp#Xc zgf`jY0+gakDmTD5=D$X%BATTx{KYLQ1ah_@?ZaxNMAfOz`FrCX9r`olF$S-10C~i# zWy4uEar=TrE;WwP#Q-tvSoDu)^+Id`?%a?BLGpX$lM2mtxd{)uP;Q8pai@v5Nl><8 z@w^UFzx^;!jG9^|T8<(Bkw4+|YROA7Mz%w?cf`q1=%!Y_y4%BF(cwywoY1^D? zVKJf|c6Im-WSlJ46A{{MDqz7-!F_MqQ>Gl|4pJ=+RL+&R)nVi?j>Sh``AR6j($eNFm59E&h!vPDr1)WE-AWtH z_9B9hH0~M^eCUAhuvJ@kaI){JjHQ&9wK%8yz5t87VkvcnA1UM8Uw69FAazaB?HA^e zO&$v=H6P5GZ8N7*04nLShCz}=8wSYl`c~tT0{}@)TEQMb*@94G4@+52=LurchY?o@ z)WCFyUU{}oDSU)(z+$n_mO13Gq~w6Oy6m0KM(5WK1SR_r>O?Uu^hk&kE4<`bpjC|# zT7_?hdcrzWqHs8h!8C{Y_(_A_$^b_6hQ{`gwPqxZ6n+%hh%MohH>vPv2LL&;`f^9)Q2MUwdBZ47=kLS$nd2Yf4col@CEyS4cp8ZQt3ePR!E3 zkoeAOGER~vuOb8_l+U%3>=c?U#XaU0xbGS6X_)m+_xIOySn!f_Rs76eY;9U|rG8HN zW(#T!n!Qe6{vMtC09u0%ju+EeCiSfN))a|aNooA1oEZ%&?N%eYlWf^nt= zQfd=5+%+F{bp5Uo(%hPUy)Z6wn!IBNO9zL6u$2n9TV zULhge(^Jy{Z2FV-YNe6aw7;wX8S@c7$KVtEu=>E^nfGO7fSp2^WoTq{WLL>T7J8-i zqi()8a*QMg!4)9EA+URZ

`&*#qNkr(ro!W1Q*wJeVzF$O8{7?LU8x0qrxn<|Wj6 zi(mS;W!l0&iO_BZ)SsuuU6#Y_4A}A&91TGG*RX=1SOb?enJegyB0ysi|5DSn%t=V| zz|;Uos;;i?bC2D#l}`=QdThb>A_F6i^9At};u<*p6rk2gtcSk63aH;?PcFkPLOOSv z^;~jCiWcxe7C&5D8l=?xF+R2|yz!!PeTGvYa=y9~7rDvGmNvl5eKcA5qZ_@Ep!XF3 zNPN5EIwyqpP>2#>_q%=};TzVD5A?AID-C!@(02J4?Z*L<(A-9;`U9UcXxt^n=CuKM zExsTz;QDE?QMZNz&YaIjb&@^PilGIXV+EaP&dK8K7YFsWJ&RsI^E$X0K;#8bU8h3s za4!#YNCIwD<|guY;V-Z~i(-!0BVWy3WqIFwUKRtF>%iU+y#!`zQPwgiqLKT6+W9@F zoS@@zMXn4^C#ANXiOV6-Kg<@jYx_1@B&qw$_)pPluf+3;zYGdq+P~)lU}Pxgsq!2J znkR&042#sPtsLEKO_oYNooA{gRZc}W3iDWObKQGUF$YTGfl4#ZU*#}>*WhDJzFy}# zT9@6G$PFhW-*K0lua{M5Q|`hU`%o_69ncrjJVkkmJFezs+KR7aEy#_f_)0r9I=;M< zk6p?%!7!_66_DLuT9-6n`5Q>Hb_y9wey=4)>l-?CW9ACSdhJ6GLcq+@6yzq3McVdQ zn3Yn6YscX3xD2_k29g61sg`r!C6I4)u=Dq*Sk3%m zo}TUv1{dhqaJYYex6-`GwNHg6{Tb1ti(nH=RtVnVCm}3)&^Q);BEzh##B8m+O4Ao& zw~EU?(|Z&@H9ke(G@<T>-dDnOlvnMB`^~*dyfL>xXnOF{h{9&Gba!@hKFg5-j5UJN?7*zfgre zg}0j7_u$=nACENn%D1@n($H{j^pW|sY?X+4p>Ph1V6bN&Q3kv-WX=!~J)={|{!@fL z5f5Yyn2_gl?}kRw0)i}J=DH`mmF-wVw(#;V0r^5E39T|Z>F4j`6@btQ3x zB-s(#DEtO$+?3GCxgq$6@>sdC6PvgIRn5cR+ddc|&};{Z5YhSrOp2R zji1^2la+RBzk2~F1Tk^Ydd%HfhkrwGhY_AEkpJJ-ALifs`TzA_t*tPjESAE@1OvL^ zDcpa`!`LJuG)P@xQ-Ly+*+v5UzkV0!CA&9iIc0&iz<7f4InKoQAT*}}R&-5gw$@hw zO9JBpc?dA1;Dp5j1qbLioEyOHKR!$?U=yW6psJSq{qpm74vZZ~F=GZ&nlk&$kh%$gQjVKi_v#+A^>I=i7ZdwvOfh`S$R2vdJbc~@(OD~6BlsrpQ-)f z4Hmyjty<5o#$|Bm6YKFRWWE4sXDnkFSFIN7| zjq-p0gyO%y{EvnCcftO#APB_2o8+HM^6w`3=ZgHhN&ag^{@o=1wIu&ql7BAAznkQr zEAp=;`L7lEca!|rlKg8){<$RoZjyhl$iJ54zgFbmP4Zt$^8Z^);#B_+#L8H>C$`r= zGhe{q0f0h&2RG_1lpCTB+877$_vTbJF!CRkx>!cb=aT+R%>5zv7|YJj=64?0nTC;? zufa;zaL)dj8f>+Zyqk7fe|+k?rt_s1CyVUph6nvX9C%D*UZ|U^TyzxsygA7MK3Spc zBFJ7ZRsa2x^}jfLvF+_iYw#qGrWD`Eb)O^xmcnS>!x+Ef2 z`J;u~zV#i2gc5uS?nE_eU%A?dj<;{**zc zT>Bv)St69Pm5oi%IwqgG_ZQ_jyL!$p{AACSK0BVc^#Smi|3qcZmksxPD)O*REh;We zkR60oVstz$0+?wT^TM_9N7rM=G>U}({=VwV%_92N|0#>SVSv_=dWqp)bo_xu-PeaV zT)tUk&%gYiC*s$1^go^mk;lo^BiG&jBlqO8R)njkFJ^Vx?c@7iX|2r1`G0O({EYV_ zq~Up5R>nhSO_#LbHga)1*K}Ip%Kq18x=Dq=&fo`@czlo!SS!f`60I_nOW#hrmh=&q zfs?}9aXrdG$*<%w!j6{aUs;WpE1nvP>fpno&w`NT){DdHKUZY!>fg=rVdBzc>ZE-{ z{_3qsJF|~P*mF(tr#Y>h@?xV@XD^=8;(FS$txA`@^10f#+e*Rr6Z+%Tm?GyYWy_qd zjRgHtje7dv^xc?9?%1MW&!k?z2=S{KnwaFTV!IFVnX{%0)399at{DE^ZpLF_;Lt+l zIr-u0KxvX!^pd~L?Z;DHK2tsmD{{LY;(C4Bd{^*o2G~8Gu5x?FKi)_xOjShq2dFbY z`P$U0eNyuOVe31u7EEGiqMUW1HmtHL>QUsCSf)qh|k!A=16=?#30s_*d zN|D||Q+kW^9+2KU1QJ5t8By>3-+TE!u9uL^oH=Kgwbx!}X5ZcKr+#^)BaS zeQh=D~j6jjp+sow74O5zQ%8~<~9SenKE zj7H}A`@lGcxgak%++dyGf&6eoc2I*OBKEAB;e(?``RbJSEis7{;qBevRJ>Hd^(eR9 z0asVqfOs;QQ{uyp+z&rVMfdRsZO6Xlx^rzUuBuAe!G%3O7WYKn+u;)&!#BlX9DsvPT7mB(4IU7t(()&N*dD^~YM(eFUm(|s*%8kR|mycs) zltTQGLBgmt=Uc&|VNLav$4=MPn&@~Qim%_?jOBDC+s(CPYM}CPzH66o#gXK~xs2%R zV@<5E1~(&x!BoZtCqt5KHHUFFHWt1M*$*qd{eKS^C`N;rG# zDD9;8VISt3VLUtbsR9GDi}SRGu(Ye^-_MB*BJ$W99-a=a_v@NDa&i5~K#%^1QdDl1 zc17pG)zY12$Y@-t*tYBi+gN_Tx{vGT+b5?1L%x-&+pOg3lZQ#{9wcly)oOeqHz4EdB3$tb+4+ zM#cXpz*Xz(?X1dooacJ@xif@;{;ZYZLpChIR+jX(a-R}9g`trEu;OF>_=}Gttr$?a zPln&mJ;zn*=8jfWWc5JL-e3>$YkDr5bG5H1%i-ML(4KYz#r(Su(*08L`lZK_nKIn3 zrc;jI0%PFptsYAbb#Gtmnn~1WzB|Ty{dyPVl(TW~85jd6fB5bnhh+>Od!;GAF1ixO zC6b81PmiT7Z!0?T(VqLdbF*GzaKVywMTq;X85Pc#C7sAc=U*mep z4_xIgr+yhmRpxL>z~m3PCMnCs`3ADnFm>MdyXfbR0hSZJqF-gq@O;!rrbOgcbAN>+ zA@TiAFYCDuY!T<+kVajQb@Vmn@ozp6Muw}+S+#|i7y3l}z}Z+s85q-j1t*vL86&NP zcuAO$*@dbLhpQ^xH~Ou!zzr&8$muVj6IBeoluYwv`TjH)xuySigrbqWb2IwN9Ms2K zdEd8%FXr2PR)xlAvpDE_qE8)l#!f4K8S4_@yjX_6yVusGTvuGly{>uu5|ch1^n-A= z4hF_lTMdS!Uhu|!pS1VTYPbH)HwM0tlEOEiR4HpIAW7qr((h!lbpoKZ~uu zcj%ZWXP@4;#96AG5_t6!#-@Z2;JTC^!$Dh?6S_k}ltAYu6P9)L=3{TZ=De-7p#HpP zuV9D4>CzR2+V|!YWvFG~RU#mYftO6y|6atFlV2#!4|5v}cDE_Nz9*-%Uy0tR1XHF)JCOzl(B$|wIz>CX)s`gdld zX0@sw-bihj%;~N7!(nKNj=dh%$%cnrobqj#IhxEsDx(mAsg;r{tkvCD7g}UVpM01s z{HE`DlRc4Y`Wf<)$q_(@Y-tddIX8i*O|F*y9L%#${&euY>(PN_cW>VapM6!s_175h z{x!yU3t%vBO|VVx_T|8voSoHUQy0pGV*B`)t*unp?cM|iXLCGAs1CZ{cU;I$6HJ*o zydn>_5K}6-r=lQ%PL!=a6yIz;QgAhh0a-p9m%k9Ld|ZfIhJSi@8M=v;S{zu26*ID@ z`Hn+RYsg`S9YIwHUgwoT-ZKh;qjULxhcx+LY`{618fQkHa2%MltmET5kE8*XyDG%_ z<9MsFbYH{PDTl<4!w*bWnSS{1tIIY>a*W4 z$&)_mFPu=JNfDr6Iiymatt?NkY6Wa=2W5aLHI)6wSz`AtI?px-z{DccuKH+iV$?d{ zVjoy_+r8OjbNz&O$6?(Wn%hf(lH#$Pjk$vW!V8~9Q#<*+uKq@5DiBAv=}INtJx&ir zC{^PLCh+^x7flLI7|boQ*#v}f<}(2RpC$g8o>TbqR}a<-2Fs}VO!fdK6VlViIv$2Z zBvoVymIYT`IIFZuhz zV7Z9C?S6dVsSdZ#;-TM@0`_NV{1psJJ?#SKh!$W3RE|US4VZ#uI^Jq;svK%G>^GiJx%ejsc(M*wY^0autYOHF z7`L_zHjltOjR@Qu-dDI^>D7P&EceR9Vt7T~U`G_Os2%MxYJc$#WP$o1!t!6;HnbFW5SVtwmlwEK_l4)Ie;iZq=T zm>k;Ar5#Y5{svG@yvmpaIN{D%U&NmR(xRJ*@(&JHIVIKNeiaz^sz}-|Kat7$qResx z{5hUN>2oK!^mJU0HZ;u_0CsOQk!%o)_%HZS{5Rfnno!G;#fLZCI-vm>4W~T6KX`O0 zton8jhY%G^=1|OTkL<0Fci>e!(4Rrl=RCv&DWqUt|r7I{zEpq}l&l=c-?QzpHl!mZtrfV*2-bfp2|P#7awFIertI z(0FmMMCM>1GJ*fSwm6S$nRBp0uA8O|zmE$(Ww_?Cx1PV&-`6+o^cL7o=$&gXMU_?% zGwb+WC=O5kY)|{5eEj;s0o88&XZ$z#BWaJoEF4cNUOfC~9bqu7D8=c3o!+80HzqT~ zRe!^Wjz9Y%Ht@TJaNUa_wS+`AQ~bXN1IzsK!T!Gok&%KjBXHy#V^B$+Y2;%C(2+T; zec^(xkaZ*B!og^&*Z*^Qt_JQ$VH=g*n4s*zuQL4+pJ)xNvBUBQG=@^qdyCv z`t;TY7z~8Z1s=zqKlxhUhi)&Pf@b=w`?$|Gv1J)V8O^FM{y*9+wMvf^$E7 zOC%(s`Y<`f!De6IoqraP7Y{z1_Wa)!qrn7VVBL9&+c*DXAchZ81mf=By^9#i0P{QP zl=%xj(xVz6sD1tyY$~DJs#+$i1=T&+1sqB#?+KKHM^Z|gPT#(;9YRn4P{0BR&#qUif3i}-`D{O`fdpw)fz z00u0+HMR2L-`RL7oc~q70bl6Gw6Rk@V<8ZJn8~1NVFm(4AZpqWae-<&he3u0FHeg9 zF0X(#Ie-HCaTkyMYjs+Qkt-%v#QHbr;7CxIm5Mx6OqqBEhXb=KuiUL&nz=q9`s84A z|Fga;_c(b#=*m=_c7#@Vx@Wli${(v4eqwQtzYL%l04oJ@jIV?17FE-&Bcpu8kXw

_U+#9kc0>be4ijcC3uL?+g=K1q}zP=^J zbC{$`-m^}AcMG!3{mRN-8*0S+HAT;OFo#tx4J(`GgMu2nH7qPFR2$dB3UFmjXTjj1N^6|9govtC}Y}@uW*23uI2B%8IR<@FV$IK92 zT7@Xi9kP{`cw?^Y7_C5RqogkS9pO|xWw=GQ*%b%JKD|pIZ`ACNS7-4< zWB}TK6x8m3GV%lHP@O4<>S)d+^6hp}uek9nGBR(NNozJX*zC>0Z2SJL-{RvP9#Cg1 z?_b@Yns?o6=~>(9Q+=K5#_`ojUyK3)iqL}&~ z&+W_(HjpczC5gku#f9~nOjaS2rct-`ja8OI?+6n<-VWlgr==Y))@+#Q{GkOMy<`uQ zkZTlL0$#&0|jO&rEfMdlG_-IWeAn@#J zBCAkPoaMjWlU08xK4toQsmIrB-m?I5)@d@8Rb|vuE*Z^PC#I+Wd>XA~k5XcJaD>yA zQFFB1@NU9-KgVr}JnU!$P5tVn~fC3pn{!NadKp}#B07sXf#1IFap zP1#DJ5rr>H&VtTPd%cA^zRqE~hoy=!*gwQPkQr{aoDrh;FP7llzt4f-HYfa2N1+(%Vah zRZdhYH{hMzXei$LUbui(J#XA3y-hW_wE-^cp5sIeZUoJjzqf${&P7o+^H?*&uIk`1 zfbsQzeeazQAp6{JxF*Lx@R|;dY`*8*!q6%CNw(tKF%T8ALuwqo9^-tKx|gM3kPqwXw=7vwzx?E1RwUK!j0s0j zVz;O1`=EL|x0_inPL2J{`rHC9;P)%{1IRC%nJr8}^+8s&kcP=EG~gC1QZdkaqR&ey z2|F)MK*X)cbexIv7uYs@b4ufUOHODOc)GgUHeL?~jccVol_7xWxW6aVL3RtEwp8?|aLGWw6Uv{ytzg0Bix+_$+89 z*nI9R_79+57t?uYu$*OG$65bASkL}i)6oq_h=1{^Z!SYMwmD)+D~DUe>La^`v22t6 zTwJJ{InuJkKr2vFDd=b)Mv!vn!0BcXBg2PFaLP;uWnpRqh1w&I z?J+|_5uY2|9F_wf!VfQx?0(obk^1WCo@Q(kiwT9b|87d}k3`SdnJ= zSykd#N97As^x9wxeVxG0wj+TOzkD0iqkHResnr@^Z5Q!FFM}@*z81#0ups6$~j>^|k1PppEV#c{Cp5n9Kxy5!*gzBi4NgK#1!p^9ll z{e5@1qXR*F=vOU(22>``mie?Zw&Y#^qYW|ThT%Dr;r z3{FUhIyo3r%2)AtzHlYNmu=CuU)9kym$$EkRp%N_^40+6;~NqYakiGI&kp3XbO(`2 zrO2C%Qd^DHMM?AVxnM;h-FFvgxE@V{Z@ze;OtUA!n|yf-w2p>edQIJ6Yig4$LnzDp zN(Kab;0P~^C**mkMMC6x2!i^O`)N;e3iT3ksKqK3+B(){AvO)W#G;|P&21G9S)cTH zqCa2_+ljzs)HT_^p4k9|NikiPilI%Gqnm31_cDBPdIN--wc%L(++6?nng%5=` zpFzZ!DRcw>4cF4yIQxxZ4OEDa11B@GSTO%vXMRla9`2{EW&yRk^b64MiP0Zn9kG3e zOiQ-NX>&lw2@wa?CeuY6+d-OpP@yyzCh zw3{s)9iYa(x`UWWoT+UJEv(q?sb}-lDRAm~gT^U=)ll1#m~@C;8^F6ZWa7?Cw>=fC zrXRRevPDeEE8a#@?;lhlwn!ki<_X+l*c{zPaLRPS*YILgKUR1P+@SqHR$4cK;T<=QS~UewjFDLJjGcZEuze+;0~ zqa7Ig;ET+2+wRmVJ-kobuZHW~SjFBm=HQ+&0K{v4@kudeS^u4dX4 zqAHW6nvt|uqnGF(%aynyov&_zrsnS6I%p#MYsB{!ijoQ=xz;x~-z(QXY>K9wz86wI z3_{!UE%bTgzWSCt$tjAX`+GeR1FA%HT0W&1cW0UcRnByP$yEmamQf#{0JB<&tI~SJx8KGpDnUQ$VS`%4 z?y4;gl#ZM*cnutO?I}i_<`LR5dm~Qb20bw0P_J7T(`zGIS9+`D6g72(RnxzHs!9j} zP1f`@eqL~lpfB}Rf3yIj#QlX(2ANYIqd4^h;%^e^#@pcN$3+uMpb77Tzy<|-2s45Z z{B$7nN|>X4h;Q@>d%iuqgG4bwy!Rg&rJM!_ax!3IwfkncsPldOt|CJ_{l6Rc&Nf4O zVNKKtQHr1URX}Fx#6HZHD??C!CoS}8HfCyU=rt^dRPEm#FcA|gFXKwI21HOSfy2w9#K(3e z4c~6PTzV4MSR+xWWqc0=RjZNiCqa*z?bBuILQdlt0?wymcw1I};8g<7pHO)P&+Xx z$dq01a==B`hvSS&zjH^=Mmu<79>?q9%9Y!_a!yYEC%!o}ndFg+w}Zx|8`~3}!eL2@ z4Y%ek{Nl1?p;glsWvPv`bjb~=mZ$4dQ2%>vnh$GG^!j_xJBAX~26TC#7Z|2~w5gr- zG-&&0G`@TSe6l_);Vk)>f4u5Y?a1VI(D3)FUHAIvD%aF;jCJ-IPofO_t6lvtNnNoJ zc|MEaNV3e))O9ZEl3{B|qd}oMd&YC!$v9}{L$TH1JDkVJu~!ipc`9R55)IRVa5w_z zD6o6%R2Rd(P$^CrK+*!ugX&o2)?dT7_!T|+P`MVAM;;1;B6GBI{l((Iu9^gK@+ljN z7O=8AHKY-B!KSLGmWN~XZDrvka=zEM{Q~tb2WQ6`tBYfS@t1kHb7Zg%Y_+a&g8~BB z@jWumG=a!gIyd6pZ>0*2aB7@f@&u40j8~!etU!Y&c{WZn-Ck@EAs|%-lK8Vx1ETcT zRjDG1_a0ODyWWQDk&bE@v1fStXW3*hTsJZOJXOo&i$nh18uJVu)BS>=j|d3)9(H!h z9#xU2(`cDTa}tEbO*S{8J7~lMfrEoK%BPnGr@1Mb_J$u*sO{1Bw6<;L7Ko(b?%PjKvS;*m|xM+7k7nf5fw;EszY0=Khps}r& zYY#fw41ASQL7;DusdYuAYvbo||5|Ylni1hFMWZZTI&cCWZ^3g!rG*?8KdfXbl`|*0 z*ILQROigLxHlw1hBl1*C)+F2}-O>jf)|OF{)(n3ve2W0ID*i<|lY`u?g}WV~UREY- z5B70(6h=A%C5$??KW2%Xo%W*{A|p?9+kGh+I2_a2rF$bD(!$ z?xK#-Gd5gRHIN3(+l`qEh@_*_33Vz;1g{N)$1}%3yTsEC;-dk<97!;~dNB1ciwB^r z2128?)(rx{0!>j|=R`(2uuB3HK)DiYbqle1R5iXCNdaj%ubp2-tz{E1n;~ ziRuE5omRJ2ozXkw8OJymOcWs3UaE%#@>Dgmmy@x9u`s<0Z5_d zf~zv<@or4XiGhpQ%?61c+Q?)L|0)~?WF~cer+E;N{wzmoM@F`u2&&ttQlVo<`2gtk z7%41$UDzJ#vD`u^>&LMP9gS&5w)U+-g)6v_Y6RGhGo&|#6!y}O5Ts?%8z#OVfAg8 zRvSBtEyr<7;?j2a*M8yw$#upsQcoF~HyhW)nwZmz&;i{j>caR*2gLEh=Q*y-VL?^M zsjZ>uBdyjJArB&KX<5b8gmAAwFwQ9X$h0h~AtcKu?`F+8S^Bz!N5G(zVA}Jd${Bo( zX>ZZ@O|CD}>$_yb__1T<7Tcw{!yC7HN>FMFb@8bii%irmSg3BX%qwYZ-TG{1Pa(~w z{TA4=eF4ry+Z~55y9_IpnZ(*iT_x@lJ=1&2N!**qe>3&dUmdqG8nB~m5yNuGJ zszX3&*TPB_@%GHRX21!hm8Xk?4KCEzuSg3|&!*mtv_kI;)F1~}uBAupP~mdsC%-BR1jvnv{`N{GmPBbFZnHR7qdMI;7C}hqDf77j z!~(ri#B@t0D~8`L25FQ>YJqnIyU46 zGVKE{!GS>DQ$RZWYtB_p{ms-Fzha2{k|EGAM$i$b?RRE}1S8$E^S_86^+0ML{`eGF zjjePs+=xyqQ7`YwiK8ovy1(pwjpN;=bme6++^@^oy=w~6_=6i%VN$WYS3)d#(`^ee zGf)%;a~~PaZK%YRaO#Zx%_kX&OJ^Z#ru@Wj3yCH3II*f05N4q|&FHstZRt>Wi`w<_ z_-{hMqM{b^L0Y9%mpdCSK=;))L(4E1y`!_cH2}KRA5%5Q3ZL0pKG}CVgxQ~@sO#$_ z4uZH=E-Zv$Ae0l}xW)cfOV+S#|9v)1>VjSl+T-4US21#5A=C#B;3JtBB#fw1VawaY zeTtxz)6|e9y9OG~ISyopEKDjy;((B(iQtHDr&9CCyx!$Dif~Hq=>@|R#Jez z@t7T{(axCxf<}L+cX?2>s~r7zX2Bis1EH8oN5zn_`#P8sgrfS<%ZnN5ER2#BspE52>w|(Rw2P)XP|c#(eyQ%T4~xi7Cl!^`gAB?riIn!P2mp_& zBGEBtIQ{4_^CLo_A@brx2872WUzWKvS5e zz|t&LX{^~mv*cjS`{4RU9oqVf2BivTr38*a{3ffAOLPEfi7RY~$;H{JPwml7pk~X(_KlAg*@Kk(W{HCFq($gq1h${m67}z6cQhD&Ybt4z~$3s9rb}-lHd*3aS&X zmdVj%hI~%Ri`-Len%vELW6l(zJa)X1`0t-cSdb66DbEv4TMpCMTUYLsCF;8rl@<`_ zf$}A&asgES(kXpA(J78ZbBH+O{iLq3l z<^Z4oM9rfshJF)o2q!5&V&H`SD1lK?YfeEN+6Lk=Bu#fmbeSydfhpJCuE*Q%JKaCj zdn1e};RcUg$N^X6>gv5#c*fCf@qU~2V%s|M0x z^`8A807zz+kuKU&5|wq2kDT5A@`$NauDauamXE*}s_{D7Pe zMPY!%#b1Mu;d>&Urs)kGTZEP+rhVyU?gt^9sczTybZ{34+bmeuCDR&Dihl&dDw6EB z06^;)*wSd*e%3H5NmuUDChgqokhc|!eV^;pbyODfBxIY^AW0K((!H!bd)ZW(!ay_5 zw`!+ZGtp)+7GoB=7thdJ54y?r@b8+5ydmIL!Zz9Jw(g?RpiW!MiF4269<>KCu|3-ck*e^| z4cBxlO5YCzbd}zAR`c%QUQ@{O4u)^VF=3<*@F@BF72l+Ufr7{ibmFc?MbleI(GJ#r zBYbBeD0zI8Q6Qk!4)Zd&HjU8o}PqJhJ{Bl+!|5HFZ09ge(C>My^xXqXAhG(9L zPOq2Fyk^wyI0xiOX{5YUm@SX~{+)&vaQm&jWqlf=-a!*yM)0r1D~%pVkIr2NPdtNw zzR)NKZXN#E_`7u-xJ}qOd*zY1dv|LCo1*=mq~;BjNJR!UslCwl|33BHd8=0U7)q`1 z#AUz%hAo;7K!RgqB4}avmQ8*`0Me-*uo95;3`B-zg6a4!RJYu!yYCs@oW3fPePIr% zbm~!@T&q|wx69SeIcmyQ43rxtrGvvIp2&#O$wVq0BvU$2UT7zIQQUO4-Yfe2vQUrZ z#>@`E%($UyRANRaD?H%Hy}O|E*gs{dL8Z7R%SpUR4?U$^adP-yQSyAm;DPp(leSI)U3In(gH=gYU!(SAQpQ|HH%DFW4sQ_SR4lfh{QI^L| z+RkuQ9%>DSE*^V`)Hcb8iX*+UU=Vh|AeB(P*bKaj1jOWgy+*gr)Q6?S7P zP1{GIUd&pff4Hj^F^W|h^?t~uQs5t2p#My)Vk$CgcntN--W;@}Rj&V=H=8#QaU9w& ziz3$_Q2Y}O3J~|_Te!7gBAt9xw=wX%iIvzd%|`Y>_HlP~cl>&=E!e>TLRGChyHc^zmDpkr8s@s z&i65nSsL>qfMW7UvgMS0)TM{oggVYZPY-yIXCT$w)z=>nDs>mlbYo@)RQ(_qOO>0lALY@$ z9yTMO8U@YQg-{%m8J6;GiCf$bF#W{QJ_(Q!t@Ntuv2)xhdHE~PKI9>%wuNX1*+2z9 z{*!f>kF%$6_xme-)*-HD{b(Rk&{}8guQu@gT`b(AKi~ynhBj2vZ(x7pO(S&LV?+LeiAVB8O)D^S1eD z+**VKFI4^7<^(kyV>0mbg44}ZuEt(g=9v?HHT4`onvQY)A+65+!Z4t4Y;%)5EHdO_ zI10!^0tp#EOYj<5`J_rs-|Ra4QObaO+U-;11Zc44g1-5OOcZ))BL~R~6M3RhV#T$P(Jw&JrCv{W2>iL;*Y1Q?pe6vFGEC<^;4MT^e>>r&gErF<8&*2%Kc zSS;7|sUbG}xeYL3SGlzwN-*MgK(<=&xiI&_nt-2K*CbTx%tkUGC3!zxAvuHUqjPK(b=D@m_L^?09kRD&*d=x{Ze+PYLVz-?hHZnoJacXPL!AYsqa2G~q6 zZoZQKl5M}NhYf7z0{e%?=h%cboyw(#9Wy5_w{B-AFKZ~~U3l=*vlHmJJT?Y-jjrZH zxdpkEK~BSC89OoRK*acgLl-o*fyxiW2Tt0?P_X$el*Zl?$aS2ph8TeGfM#X~zMQo@ zP*^eRZL%2CK4PYx%QI5gIau*vuokIbjO8E67Szbg(+&$Gdtq)w@OY9BXg(pFqfr~yxTp7_Fq+(=ti2+c^(ku7JmoP#H8spU2y3kZK0W>Vnm=zu`zV-|wsA1Jup@=vrH>v8*;?abQiY3CB-i3R=?) zqIl;HX|o1E)5hu#DfC7_gAL-6tY9;q=<8Ek8iWc6`Oq64DOtfT)F#8|Asyrk)e@`= zrp_C3yac0oyZyc$*SCOKXJTCBoF)3-W{Zp)AW@fpTv9|5={QqV#xST&Kyi|4#=>4= zEWuW) zK+pLqiM(cFs6fcdO|ss1K!La3&+weK;skCOUL^_sXGxTg+2#>itA1b>X_#o=r%!} ze=P1D=h8?rwrggcDTl5kPCRV)_MK>1&=yEs5|6;UC49QQEKY)I&+XC!t%#>Z%AanN ziWAhc5>}R`v85H^kmm>$Z)3xa!+|XPCCTHay6zKiXit~JFhHnh3E`rmqnj)V$f~jn zk`2ndco(3+?PPn#A*YI$=G?=Tu{}!Fy67m@g-V!xFq;Br!>nLwK!gd~bQ#WjW(>*O z*|6fpvr?uUzdF|QLBa+I?ZaHTQ*y5TAl-`>SqC*R>gRv9w@e2*7D!VBN@ck3O2;S8 zy%p6sxeLcQC;@#y5moZ_<3g$4w>p&TiryZI?ICT&87#;*SoybVv&>%5*5sLOsmH7Q z3Dn-FQlhisnbpN29L^cF zEVAe`0C6ak?g+b+?lT*7WFx7x0`s9jgpkcC|)*B_nn z?^!vG8#z=`4oONGTSC8IqY=dFwQu08;S~-wmOp@AInXM+;w6>G<4=g&2xp1HMU2{% z?hQLkP^t}$*MW*49=(@)ppl2cdS=0fH98ir2cSxVHnISfix~tZP2bm!gTFM4k{0jF zDs_f(WvZM4+AjVOPWQa`AO>**@_){uPT0fk^UWeDZ0^^%XBO;VMh`WQ09gojuTGqG zHxn{)TjJtJDw9EY-lUrVUqUq_SV)-yvKu8Yx z`OtSkduPyi)d#(fqk~1FSo~bDKQ{HZBol(x(|i<<&!hvD(GDSw$PTng#c9UhvRH(` z{u+EKDZY`MIlf;Y(h3URQ~eMS@(*kQ$^#nHV78gvkx`BS$!dt=F2+Uv+NYGa4fVueOe zrMt9kP`l__o;G5sqE830Ldq#q9db*vBt97u96ZT6kuU>^8zs)s#&8f9bc`J4bJYi5NE92cY{mbk7;!CopJV}ca%j1i|Vs=ZU z63Ig`1X+!t^B++o1Etmri5|;Yq^+$`@1Hz#9+{%8miuemPF1t+9uD|#tPg^e?0z0w zCX~!iXY_Sb&f`+uL&fW=2TN$^`cy&ad*zt{q`l6cZW}d zuCTl6x84dsZ|VDg!Zn1$8|H`4!`CtGeBFJFqt;E#4}Pu-Sl3Nc;CT%kYPEotGzHzJ zvWmSQp`o^OWwRovBKR^>Li}j&PnZnHEcb0;665Cneg!KJ_12hRvX*QTyNV`*_YHQg zz+4n*Y%Gydo7X$W-w$?!4?D~fhaA@LzU`Xe2`7jtO&WONi%C6uXcLdK!**`k9r*OZ zF>SIX)*$Zg$FV&?6^x-n3-BU)xc~KJZcQMjwrU`=7V_Fn|zMd zEj$*tB^hJrf;fAJx)4R?Ua*i1ik8hsl;y(v)3}%eFTTRoZE_H*)bUk8inIo&BmwXnxc122s&7 zXf$L8i>LNKE1|-Ek8Y%-#(}io8-CC2%a+qpRVu+XIEjoa^RwC$73J?UTaD4y5(e_s&I&7>=xvH)ETG9iQzmJ zTP#hvCq#P?_Y{U8e=PX&J&#=N`;@7}q@hpe*J`!vT*w(-@p1PD$@zoz^{=9eZNsju zEEi9D=-feOF)YphU>qioFJK1p0}cUHe1+1tFB%q>qbD$_Dhy({rBMr{{@_G84*|U+ zgNR;7phoKy7BLv($&Hly{yj;zV#)U-F0)_fD!$yG8{R-X3)ao=&&4c+;6|Tmdf3Er zcSbdwqChlg96!o+Pb#a+A3yY%uOgfG^9N z`;(jAEhkIZJ}Q_2n~f^#qq+SyYbW+9ta{%eV3;k4<9NzSxoZ@gdt*VDzP@*eM*n+4faF z1h460yarrm?K+lYG)`TMlqQM%z+R6*ObiA{9%dh5z{+EC_FKvcTSq@r1v`wb zX_b9524VjB@@~ZxPE~f*TP04 z8x!M)H>1{h?o0&@E?lR&K@t|Y;Trh9ljY0eWlY~-XWFLNe*9#KW1P?2@EyeJ3!5;z zCN{Tx>*D9jfzq3L%+#ALh?T{uo%V8CEo}Ikdq`^4-zlSRCC8X=_QXrc-YPv z_JSBe+ON5l?AYVocf{;?0_tV$s&D$tC5_YkM`0p&+IQ?9k&w?Pj&`jyO}#}=jV@ij zfVOz?@FLPw@^I_xg^BqpDoab7yf*awqZRM|rAxy9E;I9g_m?Gshv>#$5Id8e@eafz z1%!LCb-?wz8C+p>$2jGJln2YqeddF`@7${5p%kO_N7v3Lo=ea8`urAswg2;DRQ*?3 zzQ%8dooYVJ$@m;3vX{fuYj}z96R%$AWbp)^`x0P5yi9%T$R|WaerFl4p22n7MXNkD zX)vpzojp&|1(ndrk914qINyhx`gzMKb4{OZBMfX<+=p+~O|r9;Ejx?I_5IpOn8b^UR1S%Huv{Up zE*BkV=5n<~m3KN7J|;_6m*ZX&qXw6Ho{90f=_9O~7+2~G%xozr62Gcd54~RwS*b|c z9+pT?fREH#AR44~9gB)1c|Vibol&Ey-3T&ovmNKsZR+hK58drn&HCbSV%vkuv7Z-q z>Z7ASOI8jE5m!MXI%jL6`a)?;@??oRh~R@Om!LHbDEtg!1!;ZRBa6@=dW{by;vDLQ zyYTpCz5@MwgSdt~gf|T4vf(CqZpdoCVzhQOEtwSK$Ja(gCd9)ngl!C~Khj_83oD`n z!+(QUDq8UQhPVo%{WEN3*zO+{4$M>O%-^lXU?=9gcg`VvZJlu!iorX zL-dt$O~07KkJ!2d5F-n{@A;`5IXm`3bJ$7ebH=xGC>Qc=0byjUfOY|pHp6^yJ|SwB<5f0|QU z(Y85l7XvN^Nw>MoTGfLu%i0d1eBvcG+VbFJ@Tx(OkvEc(YlVgkHk`E`wv{q8%Ovj` zg3)z0aGUya_OW$0x?pE(MDLB6>4s7pR-=3w+haadd7OODVRD4n$vE0p{nuB&p7`(G z`bA=a1?lI;(ftSlX~B<*N-W#(n}HJ3j()CvO=)_hcJh*J#`hv7HX9zr6Wp?#F=3A@ z%nAx^XUJZi+A7pJd@SXuP0Ji*#KqSN)L#Oc!2t%dS10y%qnpQjDHDz}kDc@$JIMuK z$y8Ci4o#*{rudAnnx+M%+NY?>Hb1hVOv#7av0Zu7vjtDC7}QkW@#%p(W^h_vJg0TW z_L-UnTG(vTS~smY6_#ed%{U!d7BA}Flnx+N|BI}I@!?NVkIJbgw%jHG#4K_Ty3Tx#m+!F{lQc4R z=4rq@>eAw6x?jc|AA6}QKS(V5O-Ns!&vrf`BQWkKChn5^`lE=Ymut^2k3tCyjPJ() zHf6EY&A~l%jJG5xD1LVrtZ(>fR*w(g5Tat=$%1k2?J%_xAu)%q--`&~&JZr(!lMkz zC*Pv=hz{MMq{_*X-9oiH#Op`>PYBz#aUzvjs5j4l`@P8!eM%^O$UcT*U(`;X++mrZ{7S&5W_+WqxhQsLHTE9BszoJyvlMD* ze2>#Z+Wx0Y2}vr}x{mLy`xpRp3d22MpN#+rrFctg?I28MN{SyfPwmv^`6;t8P*}2Y z8}dYGm|*J)tQ-Dnv7q=o$i+)%#d!LUTFf2~_7fI<05Z+c=!W`|w7aCFf@kZwUV74c z3gQl#6S;Ur?*F0cI>4!J+xS7UvO*#wA|tzyEhX7|l&pl1J+h8fB4lJ{%gV?|$fiih zNLKdV9D9%NKK1r~z5na#>h)IV{Ga=|=kNaA&r{sIed9cPhvKKiB>l~4>cMARl-F#2 zY$zAueD7@FLm0pWma zgZfGr9Z>0fn*V5yPYOKw<|B z!S8w;>?1={0q$xL080yf62fC@9LR&+?&&AXgg81~zx+*Vir%QBf~N*?5D4k`*UAGc zvINzfLLV29H2&BEzQ#8yS4cY(t`&ZO3bG_m3^vmI?dZc+@$aO3b&2>QOw zwergiyf^lC4xO?63_p}|tyG2Cy1qWtZPMrZ=uHU5Qo&hpqhf}p??kvcrLJQRH^O?P zriXXuxZT<5h7Ql(v$-?~9Bq_RHTf#0#~7)V2ln9aTLUd_)peUwm{EujxLeH_N=Z zr`n?MSX=3EDJt!bQwI87O1ItJI&9li3kBGF4gfRR;S0_ft`)3(KeCA`oe(*b?Wpx? zawh3+$>YJ6I==Zu^ODgI(iFkkf3^c*$ouz&CS%k}#JC)2x57qe+T`ot5Ew+7uXn`A za(0YS_$dw~=OJvO_i-nCGEfuD__xLUR64m?#ILi4TbTI`>{rpv50Ult9!tM8NC=w%UOYdAt7igi#`4UCicdeYOihLC4DZ6Lxhdsx+tT6hGh0vc`-g*&bc>Ow&Jm0c0VIYGKu zoVP>~a4p_ms+V#dw+IEr1-cTW6vyn3Vj6PVg%J^-Dm6z~4qWrsbkN}y3QPs^iR&;oD(B zm-@y7IFs<7hn4W}m{I5BNKRaN+$TG@^)z+&rb@E^g8^^dxifSS3A@aTZPQrrVq#>W z29+NKeSd#P(=TCnHN}{KbdmiUH{+|8hs;6(^%pT8GmQ*AtPGP3cb85QJju*_d-Sl$ z*GGik?~Mm?@q!Yaj@HgvL<@moP{0TNK|ykEv#AU(m1Q>$02{1UyDs%bW7IY*bT45H zuu1AZVfFrj{}rncr=a7K43YC4@GZJ0B95oLW`nEJaS0lbFUD1;jk=WZop;TZQ>690 zzx8m-P`M~Cpk^-j`OD4YItvfP)Pf-J`*CJoDjbN5)CKmeOu! zBz!Q`27Ke$?0iQJiEIyRPm^6?EaH$Sa+CV@Lti=!=`xC|6|2O>7AGP7zM zdFCYqqBL#7HKwPRBdg=&iNj)f4*tkZ70ImxKJuy9KQcz7d^~;umo)x)qAy34FAjW) z^f+UNEGfu~`9aCeX0-v;7Vo-i6GiJOZ$FUuqo1Wc;q(V(rPt&h>{ckW4QY)ygB^Aq zcIbY@;Y@Ko&@vAdsb$qz%)c#5*F|>VTQoL(d!$+&8OwF8Tp~rSoG|dmCB6FdW>p zdUZs{Zh`d42tfKZuwa==VVWX?^9RG1=jTPzs)na_Bd@JK?%J}yt$&>S!=gzWH1BJ* z8^8)h6>nd;s-KK$>K1SY+7a3~&D}HRb=V|rT)fBfJU`&@(vWPUIHH4}uD5)4r)#xR zG2}c8_)gc|nZ)3uQJWuohvMX$BF?C<@0$9Wn>2bHHXxRvFg|^^9*OH&KWuDX^hTp1 zOOIbkB<*Xo#?_#PO!^9v&n-UE#_2Ohcyl%`2sZOZI0R1Zrl)t^mG`KOmP128y$_SS z`FO}<)vKER46ma1r18)FsOTRGZw)soe_S7>g^aJ*c=!w==!d2#M^yX08pB3j}C z*0DIr@W&gs+&&x=Rr<392IT2!W*)jp@QK&Uo|4ZOQQFEO?y{32U5coljVvv@GV`s_ znKYb#+N5QsS-Xa&mI?(B^i@a)gDqh1qblaQA0PqU^1emFu9!{078 z0lj&szQeD4?klN^|M6t)EDFgf1$OjJ8SG+u!!5F^sS|?l*$w}ZjhWeK&~-CIw2Xg0 zVFsQ5ke$&#k11SWzY@m9b7y0IHauOJHPv!HtEu1fc`vE|yJ#riT03cX+OmJr8&zuKTj zANeZ**jA!EYj(`axbSpjFfN{E*dTmC4vJT~_O_}1XNyhAeY1A=@1z8L9kAbW|03lc&)UA5fx9lmEBxU{#SX$@wHD?0C>U>M4 zuataPj0}?`4R@l(H<}F=^+Ou60b&~Ue4OGMT%&xz*njl2FS2xYGYpS3eC`XO+)Q)x znyfrdZVpj+LI)xC6oyWa`b|vKJJ;@x*;ziS(=S`Mi1jatn&L5PS|rK99#TCc2u{_K zy1*nPkSaPiOgL-$*g->9B#p}}AD!!bttV>oI~5r4_$~ewo2yYpJ0ouwRJ!(X$JyS2 z9?B}cCCqjB>^as>s~h>jVJmm>jMR09q#RzuUcwzh&H4=jfcT;@_9J)rK0QMqlw#Fm zk)aGx&C!R|b`N?(TS@dY!@K=VFl zb6E(Fa!EH1NMydVNHJZdtOGIzJe4J9Rg!G_;a0YUk6s(e#t#2J#!8m5v! z{HKd&>WZG){*=SB9I)gp;m>VVpzJcZ!*12FgLD+H4eb$nqHmq-O6Sul50(-6%mt92 zp((xV*E{DoP7)8E%f74J6t$5HQC|PHExR$Kc}35T8w+tg8LGy9(+$)smgh3^`zW4D z9BTai4Cl$yr=KzE7Yv9MPxX^0{$?nEH2+)3AZX-d)(S8DD6{IpX?@yp#j3!IwZ0CM zM#)L_hc6@xd3KMATKwkNbsUXn1hCWYTNl5QN=SMpwp<<$3u=>7x;Uu2$Y`hj|yEzYRNf=5U@GtEUliIr9s;>Sd<$ zhKPZA+%{WAS|hXPYfpsFG57s&*J>S+gQN^V-%?ca$$1Uku{KT&9>xXC5*r0w2c-m> zzB2mR(k)@c+ze>Ksfad!EuTe}>(oxzrVPYbGLhSM3{-6kLZxAZ+N2lWX_!AnD$B~1 zQng&Bs=YP(TyCR72%Ms|l7qEy&%k`dRiyTEU zHCD-xzbOy3a;_xG}6Mu ztY|-`FkmI^s`;+qr${vmtS=zU`AYkD<64Kls>8m(FRJ#B1MBBhTKSjE&wm-U5~Ad} z)`m4n)URB8RQ*tAxjKC&10N(?vChZ598A)W?T^OSK$Kf0E4nAFxZ8JEkHy8!bDx9- zy+8}=g>z$RoOPPUDC$h?nN%X|qxZMJ4-oFm^A7*fIHgAd3oXapnmh|%zFG0Elb?Q4ywzjGxvZas%jnJM zYVH_}a^D71~-(JYlq{ts6xTl?exxv9= zUjVBsdn0>n!_haE`|AlS1j(fvF|kHd(_S9g+rwANu%!;F=&nsvv|pXJa;8TWJ^b#7m^n1Lue|CV$cW=i zF_Edqu+~IUq@6!!$q|=l*Go` zUg3LZLt(|IF$fX>&e{%N*Ue?!yeuSclI^8!|5z$GA?b}*3D=p(Q{EGK5Jr_(9=cqf zyqy#3-dg%DfAO&$DVMo~GM(EEI+sA$5Lgd z>J4Xfv9paqh|b!GDDZcg)l96#qpq&=t!7zDA5}tMD$Y}xL5d2cP&hHg0VzNdsY^1hY2jeOKo2pG&63>?DboAS$F2tkbM)uY8f zGY)(#Df})eDSCZQd3JRs+hD;p=4%^iO6kQq0u0C>w@v=e+Gr`MJQrYAYy4$EI( zT_-PaUO=jR-q5UU%gQv}6fMqm&GyAGec}FMVDiH7$C0R`w+pU62uF}lZys9Bl&%ZF zelB7SF={TH90^_OI1t)^h&K<1gMLOi&FpAJOh!l(P%Hfo@ewDcE2x2)thgHD__%V; z10q_}e@>r5tKYpp_x=9TMWRy1I|%9Y3N@*?eaHY?P@{X=^Ydb@hFFimWLR7z`GSz?p34-i=}9ncs{`0#;^brwXEOB3r+y&4ka9Jun|2>2$sYf;)}z@iY|aJi-Ev z!U~N}>fd8`2lGk-H!e4^M3sT&+&37aLO8CwC25Xi8XYEvI8j|nro3sylNf)~M+5uo z9RB0?Pr)fFwH$!n&g=A#O*mYAJ2y1t`dWQfo!S@kB|YRxFSun&KFF_H&S&u5ou`*$ zwd`m+5FPK~u2|nka7+Oxz}aDp8%yysk1beptC*pZF_sQ7b{ZTBqvp)4CUmeVr%{R_ z@h6$kS=*vkX&3FMR< z0scR={6yjvbk`6Vj~6Tnv8`dJ^4Pc?kfS;5N;@f}isK=L{wbLxV5(}+!7I*%kJ z#C=1<)toNhzKuRAXyb(}T%nNe#qR|(*9e$U`{IAsoM72zxGIAs*c3_8n1~_ClDre+ zh^bKd<~q&SHir6~ZJ1%H%wN7)_B@<>>dS`#AvecKH_GJrKq=9zS|dYYd+}*IB>kL= zU~`*~y_UVq+Q^Rz$W{kqa<*@r2#B=SJzGY`e=pX5adwr_QY*$B?}6&xXiUR*?T0IZ_(8tG$FH5%_ZtGWsCWjG+Bu*Xtb9fa6WhJ0n3vASb^&>lKgsnIvc zqU$pi24IOak2tgpZtaVr2?%ya{QT~^cXexloFlwcZuSl31v#G6moLFdjMMS_@6<66 z-ok)!7Cdh?aI`YW!4I=r95H7dd*XC{@2Q4FJYE5pgO!1JF00OykOw`sDGAWXU@5H0 z@og{hj+_vZPH!0=4`slvFTTyy0b*fp+I;rqS!?UueNxUF9D=6SKZ+R7;)^P?AQ1PB z^vd^E#{nQIbsbf<>~-wj6vQUnioZSAr%wxQtK36x38#MH zv^OWLUBV(;-}+GD;Lx_fx8^ofd6H&!FYJW#7kU>q4 zEj%aiOKQa!~kmD(3r2%Y}2*Om7Mm|e!`p-2kzY~10%94;)aQh3Ok z`a$ujZw1_8qmR1ki1`=>=yNu&B&7ue+%m`y2XMwnZ2MGd-3C{!a}M6m!j>u>8M|4P zPGvhdOq_!YT`_45AEUsW<~6Wq_^5<2_+s{y?&@ASY`s4m)4_%>%hW)oHqfpdBN zaD+OO9OuS!?U?ck3GLk&Hdim6sP+os6mp`ErQdSA{O7by@|Xz(H0I0wmD;}9FJmC; zq%92Ya%XYt^Yz?DgO|X0ShhI~z2Sg5=&sVJ_u+cgzAXT%Qp{Q#g!=rilS;w=a|b;7 ze;G0mpT_hQ6T33%xud80X*r=CXhA?JnF`Gk=Nwr}*tzjYQPZ#C3BOwZslpVXf0cZw z%&ZD-uU3{x(PfXI0H<8=Mk=Q;*{r%&W5l%mE7b3>dSpo4KgJ1kp>?f6>t18Xa`c)d zHO2-Nde2(kqisyY!^cF_?RU-Rb(y6t-mlRvupmAP`Q&}QUmY@=$WJ_|RrZ+Vcya#O z%M3}jZ-lT@%?QT#s`-avRm(;vAwZne)D>58W87)~YUx@l@C`XhzoVjE){hvTM0CGZu0MJ(T52HdS2ZeefrE<%y zi>{+I|B5O|>MtOu@D?Di9K;_{PxH;H$zxPANbqB6;$Mlp&SN?emc5u_iYD2p|0wP% zxSqH0qDeR+>NquTlb9Ke5d#SjI#3N57#vbEDk=Gxvd$&>htgI%YWTtxG5L(2ZYS-* z1f))!#I^@lcJRK`j8&WBS^GKh^p>sji#)2O23F_2xc6$Mwe4mJHJ{01A`G2IO}kHH ze-vZwY}!Dw+TUE;_)wB-kgiU#d)!1nf9iUB-_?cWQJEa}Ih`Pta<2;O9=X1qtiD`! za$)(87_nL6hKfQPJ$s*WTOv>U2L&i7j%FHTAZ%Y-)xL})a+W_6YJrDe#RI*)Lpvub z)CbDDz@TG#UeMaDU^g3;t)L~ovZsOm^}Dy0Zldsek`6>y=lXY<6dgcZUZ?}fW;yTC zOMppnu{lz=cde=R76Cl|*%kbBO;$EU8T|Ow_m)js*+^G0zi98X2RClA_T!)M2|Ls2 z`Nd{s)~3Go4RS@g;otYN;gNx31cLgW9KUC(p;jHSPE8?SEMjqAd2Mz+faq24hbJ;< z`^)S2dXn;?T4l^tShLyAXtaCTJ`k(X0QchoRe4|93>Dm{R`(*)w1@JELDN+ed6lF5 zb4{A#$fR;QkPTB+oaHwU%T{f6V*JCwB@S8wgm-I$XIZnOis6Zh6tFI$POP*QYc_to zx4MG!@zjWWmfkk4@cs9w&-?yofoBm0%4Jm<$D~Fv^OwUCJ^Q!irN}~Y@@i6@PgsCK zy||36)07kvj}T|4$W3Dv(r{qV(eHS$!>@hy?_Qr|e7ZVSAMsQLK2Rmrcdr%?U3X8t zkrJQlca4Hn$z-Lg98B46!ynycO9r7pis>SMX|X8lmXWQ~jNC^vt<`s#u6qvp#5ka6 zLGmGrY*seX-oAP4Q~jY&stdAup1b7P?NPTk4V-deqD5)*(ic7#p(=4#5-09cGnq}y zOyRE!+35aszpa&j#w^gXV~Y!s?xn!832PCaORgyXXi-0DBa@yMz5CtenDTpXLty*j-VC3 zOI32E2cW6UcgJ|64U znvl6g=hqLtih3H=wd4CIkl|6NjNvrSW)Ak*wB)~bO){^$t2VHW<>G0Bo%7T`%e0@k zn>6h7M`RK^<9o0(;cX1S=LTnLh(QfGE9{5oUOOtRd% z#ROClbaz6+@O0s-Yu3|_$y5jW!cvb6g|@exYB5(m(~VPyh)=7Q2e=ga3f6}(G1^cc4F~PVyZd@c2{7ZZ9=@oluIUte|g&cQQ~Zs zXl#Z+v%gh$`t*$~MWWNess2+qgoU3={D{lw*YmWIDr1S? zo;c8=@0Sv>I?;}@ zN-_EQY&QZyFf0;G-!m#AIwV3j)zi%~dI7x)W4GT`QY)76^6Z|l@9CaCUGRE;50K3d zZBd0TKuQ-H=rQ_fBc%2^&yikY?Z=LhN6eoC%MoK-C%67yUmA1M<#g1ciL8=BV0hX^ zKXtHZAt2%jZ8hG7P;C6Ow)o$j?JCWcrCt!UTSY|Je;J*B#&{Q95qaC|ifw~sa_JRa6v($}Cu1}cQ$480{i=@6A_St}DCv$W+zWEwk_)Nr8c)7h*12XP zLE?RS^yIB4w`;ldO=&qCOey*&l_tr#!jk9BW+_$!+iD^3?$);}>6-`riR6J) zErqG~2AH})WV!Qo!cYIV%oqwvo{SJ&PBCOJe0cj9XWHsHCV3G1%ochUhIL=`+As20 zcLB4n=ZmsyUG?6V#ebjoqHbw~C`_a9F>!<|qm=c?3!wB{!jwA23a=v#WmtoNjAF@e zr_9rH+y3#V?#ek}FakDAqY@NSgtctdRW4|~=la;*V?7J?88#MV{pX8HjlZ0)!l^~Aq+ zjX@F;LWEKglsufvbju4Q1wp>w;9xo`1($S@pQy;Py}X#;U)4TgoQUPWSO4_~9>Tk; zqe%2yQDEcP2ru^_WQ~WtO!~M- zBJv^+i4G=h<8!T3(ceLr5Z*Uoa5GBd_ECk%$k45i0vw;Secbdhuc3u>-74{B03pqn zuen~H9iE8QYqQPVfVn7d4qaV9Wj{NGg&5O^cI1!I1qfXVM~6KHw-t^&sxrc{%(47X zpJBGbY7MCapaP%-fBC>VpZ`!B#!MlP`UO4JRVqPCUtgzGp*K3Fd0?IIE}e@$#FL2) zS5GZmE`Vp|gJQ0C%9Eq}?4gx#SZ_|kl89LlF$XwNbbbPT-HA$SD>6r1guKbr2Kf!> zrAVqmY?%TPO)tavgL#I_d5&k8Xflh-qhYDS^4y=xNPO*gqaJyq-Yl-YlNn^*EURnn z*Z^*Qy>B<0G-5UfN06p;myX`f$g-J7jRRp19jw&!;Pl&SSIge+n%TG>lco4U1sDOi z*4?*`gjD38NACIG+Lj+FXO?lWi_Q%19orBV+}l&Rc#(dGf|C9jrnl3y{jbmRdWuO+9Rw2|0PUYQj zKXP$g#Sq1>*(1fJxN#gFBm)(>vLIcdfM1K_sBa!Ae%11p`wWGR^(z(c3B|&5^atVa zXD$e%hwxi_iLaQ&*z6)bxBC?Q*a$2+s2PkFDie-Vp8>m_dw=%$!MbkB9j92VM-I+} zXK1LBBcWPpX#5V1f4hEp zq{GLt-L{9*lS#@MJBQ2=J{VFj8k7F{&^oTalfW^2V2(j<%>e_O>0UArz1z6^NY2cW zK=KF71X*CMyLISA=X%kFYY1BoZ(NLsDkG+i?$uK~J1R|rf$+Z{wD*w~K3ljCD$Re# z7aVyw=7qT3qtRb#h|`LNA&h}AjO%FfytwpY#|BV^C2nBuK`u>tscf@wreSsNjV~q| zmN<;J6tX2E&(4no-y5(QvEKtxX|RQ)VPN7|P8jTjS)nNpLJ5j244<>9I~ zGB~PKcP^!VN8Sg-^-CbAJagaG^Q8D?Iyk>V01tYz%RqD%Yo z3P0wecI!Plo!cx3gaR;7pS`Q^ouq6uK3_!!ncC*J`A;TSn1#g4rv%VN4BhBa_X!dJ zsO;LZR8KOT_IfFdTAD{T5iXgkuZ+p;-=h*>lUt9g(N)TmuDj-z<`0pO@&0}c?fzp! zAAIU9yO4*szI)lg4nmD7m*+|5SE84hTB8L%+yuIt$Af#V_An*GOpxu70JKvyL<^uN#5z4k>X%OHIEXKeeHwdS?*^MKvbrz2RIJ;H z?>m8zzgM|0EsW6~hx4dEBDVG6oX!}pZ%M2s=CK3z!uv9>f)-n63>O>zg}_HB)B`lfwcZ?E`BC7Mu-&a*Z>Ocr{V z$WAI3$s%Ck(8{z;S&cxadSLw>C@=bR`Xyawy|fuP!o+z0EE}=N@SjCof7}3TpNjxc0 zjDF4buNz#!H>s*(FeE>VYTkOtb(3WNX;<7QvgD2RV?ZoEvOwg6k;Tk`>pkwBj{Q`!M%GFG$YGC3G#%eiX>W!K342-U>dm&wdpJKA|@L!c@5xw|s{J2V#B*j>7 zDI}Be;dqY;x^d3ci#$w^uFinQ7L>M*tD^eJ`_&~HaU|zOO$oy@J_1&59SLPT83G%3 z?Uc%yitP^jQ!RIWf7bcqWPFT4bn+-uknSZ{LLvyH|;Sm-4i)`1xun{Pws zCV4la!GpJl_P%IFL!R=aFpj*X;$2jD@{Di$kmXF zcJV<4zQoznga~isX+bhE(Oo z)`$>z)pPHq)YlxhgE{QE*Rs8c;^iD90LRR!Egfq1lxY%osh*|<*ROI)j95lfBaKKdlSt;XxR*DOTi=MIR z^#u0Mh*wtP8*6jeO>?sWn)JixrG_wlxz+g;uNa?S5F{pRwM1%lgR zi_59>X?dYzVwF+fL?aVuhOrSU(o!D?aC>DWORazZU8sStv8F@A=3yY=!9A?}&5cT_n2=N#PEm+zereXiJyVpM}?r43sxQh@dVG z0k$Me)Iz>8Rs%N*@soL}6QYjmKKy@&R5$DD08noS5&iQNBK~Ft^y@>z2vY3mQjwiQ zeQM9L7fQEf(GDIHD`@M+Bgp1z9Af4@KKk}8!XBdcVoCJLHHr>5rO!oAUw+Y?5t+8i zzrvk{c0s90kmGl$aDcmudhWIzMJMBgF?xX-zvs<0eO%FkjNxpmF93>YQFfCMHPFD& zJ&*;atsl3$!K2*6qillS=3jd(eKvQLNk6%~ z+Q&yij0XQYU(IH-tUlc-TQ?lCNo!(a2EZzqy#ogTHgJ!fJTc zdjl}OPkI(t9=y!2QMYnI{YYY|11|lumy)w4$6YM>$!X&_RAU44|64EhB0U zYBs;!D!QL4ca3sh0uS12yOyQta`a(4der;5Tg{cHZY2ee!_e^)2#l(pqil+!`Vhu{ zKV$QM^B3xR(R19y%#me-RmfZ*ymZwUrlWNQs*iEaD{%Ax3>t;=;#<`3MO(59UvsjA zn2h44&S$pNB0D>0S`mm-z~&W>hKWp|9LdWVMh{^(n%@l-y9e#6HuK&GN+d9=^$Z)R zd6xmdpncZbEsiCB_9E8X*I76{`Qx$4SUPA1bBE8*tM5BSZAu1ai=dGb?2#0LegE6+ z7He`=V8DGd+P#PiaVx1+Y0%}Mo9?021)#v*Lrrbo>l7B8&8TayTf*IEE19AR4H@)F z60H?8^&5=MJ4S!jjbQ#y-lh9PwVM~%HtrsRi3IWl*oQEErH4j6-qp@-?zK7!&~U>* zL)S{%yv^Lu!LdMEP3K}ki&j^WbrO~Zy9NokcLz}LaI}KAJd8q%@5)Bt+59F)KR8WZ zUnjE$cVN^M58_1~QE$H51rkT&=RACpesMAEk)?uCyZGMN9K>^Mf$d4*{Li$riG68O zzls4^yoL_WjvlW;^hzXvvdB_0`KHSt7T<8Hwv zu@1oUq9^g%=;d(N19VZ+Wh>}6zZ7Q2L)`WQ1O&&bs^3KS13X zaz9XZm>P@$$Yu~VSL>z!??$Iw`#WG6>VicAkS{kF+#_v5SbwwdHE=KA8Tn|}(qu%8 zyVSC)(~H{znQ9!1kuQUxQ}LF8&mA2DI7A{DYoP*MD#DU4xat=-co3#h=Elx;eAWpP zYFhU;kgr6OWA8&>S4n1E4ZAYb`S6ZJZ-`dyXcFF&TNcF^LcE|J{w5HP7Y;5JlE2?*5Q|Li0Ao%WHS(1#O@v-XCZ!^C8gI|+O2fA5-g3$<)3aG-MjgHJRzLL zL-N*H>QA?k`o#4$)^zGHKk?xL|6L7;^ZiY~pcv5N3joE0Wg1@XHO^}sdh{;SiJsvG znnJ)jMp@92_Cb-hBO0UZuC{*W;ZW5Z^A39N>Ygf%)G9^%RNbb6+-|y2XwPLkMuX*w zEBE-6=iEUP4e#r_R+18;7U12@(B0>Tsr{mZT|8Njl1Fy(yiH#?WRmDhuZ2QBx@8x3+L*ebvT zJM|TD8O^DQd8grNO}!_YNHAvBYvE?y1hzVd>%Tx>b-M|ZD>-Wc_xnd)hrum}9t_34 zKER=T09Q*AHG7-GF^-x!;EU{aKNtk7WwqH6Mt%Etw<6E{T?{{7w7FUaL-V->8R|yv z?$ARmd)=aPjiQ}0JHLCK!RbWhQ_qpxR;wy69P(mnHS*Et#^M9*W6?KNZ9uwftVutcG&J{*^ zd_QUb9v-r=td+AMF@Nj)Y$#4&4fPq$@UTR@YAr6M^IU2?VO80R>Mt)dl&d;26y;On zK1`R#oJ|=*RODR#lpwfgY|nfPh9j@>)WhM@ryLyD>YN6A(~^37lko;c~ZA#Vuq zN1o)BRQHV}cjU4w_HMiN7WcX(nZ4UO(6_SF6&}CQb9`9yc!inm*-hkj&{gVlIYT!$ zB8XonHd8)W>YLOY&NsWw%~aF&<+(6A0+yL<1+EAkSdGT;_E|XauT6yZUalGD=Jw36 z`)wjK0e|e{Vub0sU&H6tE>@{i8NJ^}sA;VT&W^4$q!&uEk52SXbtJy_V7T3$STGP5ii#G@063*0pk-5=zuN za!$6FJ<3OidGPn_=`V@D=UKC&J4bYC=dls>xDuO~GUofmdYm{$ZQY1n=iH+`Wck~+ z9;RdKAKcs_BBd(R6>70JZ{J#?7uG1T61HFQVDIQThqUn_B0zlmdAR26s-0+G=y6wj zVpfvc*yFbjS%?0AZ8cS~k5f@N^pDg!69d1OnVL)t(p08@6puo259q4@Tv?r8I71&B zn-Fq|ja<+A@!KGr_;|;DcBiR^+3d$p_TFbuQzF=hzLRCCT}a-3rNv9?qmilLdCAxdU)sUZkSlZE^5U5Gp#!cnbn z=Nt?lsQm*I)ZPELr~9x=;kcHiq8inUlu1+L?P7I{P~J^=YTq59rqUlC@mcdOkHy1l ziN5(kHD!`!@6^a5e(>_SF{~~3&RD#;l#v3QaE~vf2hYWoJSxiv;#}NYZxbeXUkkj4 z7x(rIfjVw@@Y|J%c0?Gn@}Y^@#2`8BSN`nxsgAY}Qg^(U7VelU8a~|g<@e!RoqIKP z^G(jy@EOD25@x@!pP(#69v)Q-1NIN>g~bn=uD-A-6VN8pb0FEqLOj&&c%PAdN@~=j z-J(j$H4#g;*zLU;&#THbk)t%?0Q!DmxTvj5vS(%`l_0r zD!O$m@9y_^#ah|*k@zl8j_Sn75IIIPyuAF0yUt!!ZkpG6`^?Aql0xy~zSj81$E&gJ z3r27^e6Q`3fctzhNU23Z7M@Y7#inls^g@~FR~#28&B$3!=XaGqw6nN@=-EB284}%< zRG&aCYA#k%)zNb*Ge}-yoLFrh&#`O`|Gp8PEbHm90+r*KQqPQq+`g59@l8F8Nx6@M z4j1S7BfXk+gMQN!VhR5EXm5r6zGo70l?69cpRl*yY-{$!-LGt(&oQs`;+%3yVef_S zW*-!@-PO@7nM5lb-UIVr`sjC{mG+`}es_t!BmKP#A*I-B zktB<^w$gdHMC|RaU(R`?n06Lk>P?2ZKKtPD(k3PKc|^;78F$NxM&(P4&&5eEV(qwD zU#1;l9>Qatj;r(e-1ICW+NYg4h(`CKDzTto##ufQUVhq3v+dfNx+@p1PZ zcX%7#pg23nA`|`NBJ?iB$L^cG{5^HVgY*C0OHUyl8WMCkDPrF)E)w4h>+l%a%81+I z(2bL;e+$|XlC)c8F~Ixof$E~W_I$eL!fJEJC5ec`t@5H&F5v;)tAkvliVRC`mrrnL ziM#`Od4&>N`ai&*ofgRJtHWEo|NeT^UjJAp!%JNZ32k;*f&-pXN=9sS1B+fkfGolORP|oOn3+r?w1IvNd#+$-M&K2MOZ`2=QhP`Yz&&~ z9vtg_zPFQZmXycMJ<<(pDWVW1=*hj3u{9O`oj`LV&uFx>Df{uxi`u2`8yMb|(OMTb zAN#)iy#()TXn^w{CpjNq9{2uekov+aLNgWIkSsNJVNuxi1BtxDiA-~fpas`F_4D#k z)HK56d#@ey+m0-?O)IRruCsWGaPm-kx7dzFOxjIN*|JY|O}9o{j76qv)@kD^l=AoX zWvE>fpRl(x^P9v-smh?ZsIJIB9QGVMtD2CYUo}==2o`#OT1w5<{ram>ST9AJ_IvXb zru%D(M~nO{dO-dIZtL6CPbo${Wt0;FUt~M>eslMn+oMLR%a8G@3d%OA|=M5oo^6q^Z%!|ccxMAg@MCUgZa1-JsU%OvxyoeZ2cPL1! z<_8gbnTmMq2OAS;<~cM^Xq19^>3#2@zI-{$_}6w-&K9)?x`2(v4(=J>L+uz)72^v0 zljo^1{eN+uBATy#SOX=r3r3lTvMwyv>Xjbs&nj-59i4d=mY&mB%#lcZgqG^gcxvF| z;t!v6(3*Iz;c1(quqlO``C@x_>F~irlV{4ky-XL#Sz#aL3X!OquN_QY61t<6a-=`b z-nsJVf$dz%p6By-O1z?;4>zS6DMRQ%3l98=7VaP(YTd*3X35OF;@n0j=VV>_%?9A{ zY`4O4ww~V$-g(hP3w}hbHTrgb>E$RZ?e*g4^{F>`!@DZGL5CJ%5p`mVk1Q4K1_`Rj zZvB+oKPW^kEdEwXhku!qVY21VuvFQ3Syk3%7q))|1e9)h+oo~$BaXnO(h{Wg97s^ga=EJ?MGw5Ic+`Z)cA#7Z|!g1$s@y3Ve7ibg8qEF z&4yX(^DA8o6eq?Ov!)q1ZdeWyL;)m9_lUaiH{)XCLXvAt#7J^fAyoBlrjtnt!$*RK zu;;ghBweSFQ6kE6Nc756pCdwq1@v60P)~?=HaCQd`G$`{QdHCM?8*e7Y-koDji@}ka zCp_zZQjH&ae3V17v^a9otZp=(s<1ow!WEJw9DVnoVUps9mieifV>v@E1Xa>O7Vq;% z5ND1{KIO`@y_-N8$J7K4Kcxpz+&g;cCVg`IHg|(eGF`z(0kX5WlFYJKRD>BH>>Tgp zEC?|&neV;N?+(PTEp;exAiIO4A+ZxmaNLEE;}()mHOCKO_O$=08&drDs^5r2k#tRz zWW4H)VEISwtwyg3ATBn8#xG*gE^|^}zX|WC-7~O;-k;K~F)T%4G?xYBLh~;s^RNp^ zII866PNp73$|wl#mAkg)YDm)gUfKM$uHHoSFZ51P)E1yq9%r`lluG*(l56veH*Pgh zo;cRa-puh+1ofTFvRTfKwQNs1LS?`i=7ft*U*b2@pF4z3*i)I|FKV3yyC1Drqt4{bt!6=Oy+`=K{rWc6D-* z*IdfQJ8z}TpNHMPudjUE)3_G+DEG-}i2(3`D?VbOPHtaLBbY>L?u;LeLbwjT%a^{J zvpTpKjQ+nJWIiPP>q7z7u*E8w-YXot=6B486g{`RflVEwO$TaO^AE8&U8 zZG23%a`5AlbMOcTQ15pWwW9CgCrm8X9tI+F734+W!sM) zUhJ`KTXOP|X*Hplp5R2pe{VU z>HlDt(oAj8^JE}Om=O{77Nm6vDvUV+_bSRrw!xj1%5`M_5AHJ>hdV~HwWA+CzSCs+ zoruO>`V;286)arGjvLDMe$I3A4btI=3(;}g3{&k<`CTbM*FU@96||4wH{Ob1z(B?Uo(9gwW7 zEoE9!*clAgJTb^gZGzH^ookNerQMoB4GH95BISs$!I1lDU=~qdSO}J#brJ6cw`DOn zT74qcSby~qRCyY`KKb{RGc%2MYKZv;#rHfIN>RfsAk#pBLL;Qbq)(B&L1U|~GcLii z$}rRZ128TZhYstJ+|wCbdeTxFvj=Amgg1g3sy5#Ut9yygYR1ZsHBcAs z5+z8iHm_;)y*hB$mNrHvhTtNY#J{+29t52o)Q7y+k@Iez(Nzc~K50v8OC|WGt=#;kvLlS7(r{c!J`Bok7w|CEKB6#|s+Eb$? zMvD}kE#CX6ZkNEtK{EHq25Q~(N-h;PBOllZXJSv=9uX6?M?IpC-id44I9gO2bhZ;) z)uwDpR4`14o1KME3G&QuRF)D(tKWj>^I7uQdQnbc!Dn8alr;Sfd8~Pj9E}YgQ zP8L@WMSj*m2Om6DBoqxpdg>-?R{Ion$hxKO4GpEsA?cQU9& zQ*^qAUjxptGvH=kKe6Gx8t#~Z%;=JkmU=H!{(^zM!Zu=4H zwik-sFT6z(;-M#$WI4uj%kHCXj$_;JXm+Pu4jr440td3tfsi1KV*uVt-N*+tz6qS= zmwsly6^yq;x!R60#y8z8k!S&$30ma_%=-6T1^+C8v<#g!WU@B=ysuH_1^RWkmHCbW zX@HAH??ovS8M`Z=riZNI}q z(Avg#I8tXPyRV^WKWB9`#Jg{s%+LmfJIqdIotWF?)E0?1S_&Z~p@4cYS;=%0N45j4 z3Z=#<&e_AkotpUOojIEi9WqLn52~LVD*RG$@AAKP#D7MRxuy-Y+{6HvHJQ6%rJi_V zdJwlh*YwF*avn&zm1}vzvL{A&Dmuz?&F*Sn-maeZIZ*#;9ql`KM|*T2>N}s8KD-9F zX%jEnmRW*qf4C#6s@fz?=?O6?X9(!=w+8;a{;YiMbUxXagihZL>c{`b-g}2Nm3?i) zaU92)u`rGW6_Ig_qJSV4kQ&P<#sVmYA|)y)N{jTC5X&ekZ44qJB?>AMN<^eXVgp1< zL=B-PNGOsJ0)!BfkmTD(&=H^CHSfHCeb4i}a9zqdCn4wTwb#1Wz3#noPAvZV&MOT7 zMrNCu4*dDuovbLw6&7neY|r`!2fOVK^m}sO{-GO~(FC+|N#m;2dg ziZ=ah`jwg;DREe@Xh4`{i}etfj#SRTzavWci>kh(mTXJO7&QQR0{v;EV~Lz)A`l9i zeQpM<8v$1faHH^d#!Mn}<1c`3Ru1-UzTo=dwl^onCf&%~1k|@q%*Sc{RT*X$*aS2xP7pS9J5ukXz=BswUo?oD1KLDyc@M>(70eF?WeM#Dc** ztF+NUQIMdt!zfB&H4u^``p$|% z$^b}9nSty&6Y1*!2P=~4;sv>s=TiEMy1wswR&K*)rC!@G)6#BOyc2jAMx%9!gJT#d zdUdL?{UE_4s41XN1?~yZJg^eLxTaz}`I(X?$s)_CAvuJ&ZZv{8(6msz^KO63vHs4W zap79`3fK`cAi%b=KT1A41G#=^WX{aR3x4+n=__b7&J!?>j=1fBUi5)v#x`7dm1AGJ z2~6M#maICnRU=ig5`=12sCNg+*B9tLuJ6yzag5Q52AV^LqW}4y&5ijUYDw?;xsD61Xy}b6_2c;OrGN8f`*s~wtmBr7J!Su2}FnT zQ2*0{rKMX^yj1kx?n(<&T0T2NLDRYzEfdG&{#<&Mtff=81>+k*$ zK0jpso=zt7v99nKA80NCN|gAFUP&JCX$^q>XJ2e51Lo-uFS?|!=egJ9)|NTW=Q|Cb z<;`q8;WF~vQxSM^*X$aJq#1yv&ZtSOgg=*Px=_cneHjS5lxlA$-QNa7s+e8CV0eK@vEKl?0NvHXT94t8n0Z%?o<9@Q+~8D zsM7!uvh8eL963P=Hj4hHtB^QzbI@WJ?{hD}1HYdU<_8YOAGGZNB~4X{^6bOnH{@T- zZ&=nqz=Ou=qHg6tix=%hKGF^pq7=3?j;A3#sW+7{ReCi>P zl$mXLI$W7yhBFiJNXbvcm~ zmuMdA+x+QleaRrsVvALAaD%Ai|mj4#m9T&rREecmrkmh;*9D5T~dO>B< zs{)XLD8nmp1}c!ICH)PgpImJ7;mO+(9gCLDKKLmm!eB1V9Q9Hw&0~k=p5dVLG^`QL zj>9+=01NiG8G5)m|_RS zAKorC{>wac(HtK2QJ5p3Z%G~~uU(3gZ2}AB!#Z6@w3crjsm`^fo`mD2B2%zT+qQ8W>5o8jx!nrx&eKegh(P0chY{uxY6!y zu7FpPnE&F$j|bMwIx*kF7SOPgY!^W!R4Tl>`higYP9WZyWGLI+55(1>d<)pa@1unN zx`X<%p_cmcHRAGS_W7JOb5_S|5_|Uqmd+3D7|i`B%k-i>-TQv{q@%8nSO3lPJ;+n5Kg-KPZ=<5ag-|%}_Lg>ICBMBiVMS;XsT1GP!rn_=9U9 zqcHmK=yfkac6;-S{-C9=RoNS=Ik6*#eWk!!RJ!S1p2wN~;u>#9+GFi`wmr;c7;nf!8K zQ0#=(R33>|0M53;R2-aS5@-s+@aQ z(+I?{<0ZeorY*`@wAwuZp(9T}={WR}a4Ex~6=De=-np^|5p(08SfHG#`$8QYR?NBY zuLh+#K)02n!a@a=k2TAF+AfBq-hGzVfCU%un{9+V`+-Y5Xhp74Q>F8Mw$c8>E6R#i zo&|RHd$Qq5%1K9G!^#6E>cRKwHv*R!V$(DC9eytk5sajqOwL#wq@A-kE(A0(*}WIO z>LgtLesR@I@V1kJ{z0H>I(Lm`TcT%H4|%!dE+=bWS(s$$W`|X8hv?%%Tzr#_)?xRiGjz_Cv0?}|JV@QT|hRaQEZZGbp~ay;Saj=>WfucguggRv8#`A(6X(YMXV zM?&fxblksWkG9S2o%`-pkFh6bssf1QopVAfoBL1j>IYdyiSFhcDg)>dx_m~#VYdX;vyOMvFECVu4%IjmNbzyx*APcN8Fi3h`u(+&LYtAJeC^|c%qG<7c zrb`w;t9iiF9M;2#FH4^Ca#XPa5|0zU1^edS4g&NZt2f)X1<<-CP*_3p0~lKaSYI-3 z!zxUK5k9vpW^A_>W;-rZ+}63F_3`eul|`e?$JhV-x<_!S=KNmp_|6abrRFXUSO|FR z3Z8iEy71~j@sFWIF2n<|kx451N7}OPJqp}g^YpTCDAtCm(U;#7!bk)0{ZSj7vGLYtdIg6Y zX3?JemjO_RaCOa`{CW_8-(O#Au;-w2Sdh9;e*8?wJPBnz>@Qq7b8Ggtg|YFhioLQx zid=7X=dfrl1liyk_7@QEOHBKD8&v!?CJM4mU;2z+m4Twu`QzR*-;G|I^(z-3m8d!5Gp->;*+pW8wm#-S?e1i0(f^rSF>i1tYzYjA3 zmCQ=TS&2vWwTppR$0m(TfWT4DV&*>xGg-HUm!JyPL?tEmPICa|WY_Fx1JKr@lx+)p z;Vh{@tpA=kYeU^ZXj#4ztaGf9wKrc%yVlDCi#!bgzm)V@YOL4vKXM-QkaivhIUcOg zzf%5rs1@*ZUKgSYER9zs;7crrPh7Rh|1ka_=(x-x*@y<29qFqpH|93QU(x!y}^ zYP(`49yA%~m&viZAx_{+D2(zLkg9BVojqw<0D@wSYg14sE-D7nvfq>Z$`?v)czfs6 zRy+N5_070pQ}_FxsDmn<>SV9PJcEJ${O2dAue#k*Plj2wF$Q@LWg$6E^930< z#e#j-c>B-t_X z()ZuN{_0qP^)UoxYhD-CNZJZHb!uAn)+L37ZSRb>Gm_^3ac6a!vvaTxr_a)vKEJfS$)A}@Vo1FGcIpg90BY0**a8r`*sngG2Cx6S{Wy2>|C{0J$?W3 zJMT78*E55<#X#52Y`XIl)C}@&C0~`jex9oafgMxt-W}MtYOqpLqOxTC+PaujbVZo) zUqhY5`;?UqD)WKte}`kw8mo-kc?`HK_*49qgqVtUD8CM&^1g3jm6wWVZf{(i#z zT0{HCAM^+Yj5Q{^N1vGseKCtLLE?7Ro<>#C3Tpq&BiG zIW1iB;PngfFTj{=qG(1fOa9Qd@kJGhcvv+(#wr%14`Ea% zQtO?pFT_jZIaq}79#gyAn`LX3`=g~nA8vRE6ywADU{=n%+LM%@@7o1NE+43?%XJ;R znEA7$?*57M&$1R{f``m#NB(#OR>sfyPiYcDZ#v@FDMJ>h!559ZE-U+$ac#z( zj|eJdy;Ey$dR7YSCR@+Mh&&thn^}A-CTH$7Vvl!cMAi)Is*N?rcoJ_EAQLafCeW>Nn8HAc6irZ zW08l)M|2?F42NfKA*aSDGF2L@A^=CqX$+4HOatnX7P{cc3CRmUdp1+mTxn5#x0byv z3^+O0m~ZN51iW2-j@t5~ZEx6k9)RZ43$&D>gw{~-n?S1DY5L<+#-!%4{DR6+5P|`o z&8H83)P@?WuFF1DP+V8Ex%ck3*K;p83APVNO6=F^X*F33O5!i6z5{whyuG1mfF zZP@nTR}?#RE4PLMR#PVJIW^0Ez>?;i+yE@FoA(a*T!PLuZrM=q*J95cJKTBV`XinVHbqB>k zkBX}|%SuP;J7zDR;T34AHP~0s?dA@aO(0Dl@MRy|=~8Rn3Lk(1RD0}CZ$jl&9@ z1@m6S4`dQ79{{ik!ixfSBQRFLC_`?u{`|=?6RcbyoRWE_khTo4SH135j@c4TuaH>j zdqBf>=j^#lx!1-xD^9Vg+ffbRVCQ$3jVoV=P@!hD`Eb}-v!k~Y7oDTdS)Hap?;d~@ zXcX17siB}_*uXp4|AU*I<_|FTh0@J>^0=n~hf#Bn2an8-*ixwqfvgGQvQb$h5T~6% zJ@aO%zFXSmk|0rCUBW!fXBju6a_{2X4=T!~%PEd1++7Usbr7Bw zP%#UWdU)4&5h+I-lEoJ=>0x2kW_6wZV(x{D8w9;)BFW`VqS_67tG06GMsmdYUTJ0y z?1b|RM(^#vo^oG@oEx27eftacRFzh>=*Lpo&`8b(l{9>v!Xa1M1YEDINpt|UlEqY4 zi*Y1lO1$LlpF0+5!yAP`T${+9et5-ECkx$7t@Y%X` z`OJas&N#>Z5L~Q#-N779J8JKoKg)0%1c{%9Btg2TpVidKLiaLt_nWm|xK`u}gZ*_7 zbo>CK2j-g=KnwQTxpyLHwu={jeqPj(B*-0{*@Ekt+2h}x0VeEhFVr&wVw`9G{;i{z zJjospUk~21hmif+{GYUzH$5_a$G8C4BsXgrMLzCPb(5sC9BA9hh){Q5*RM!Rpxq(TDzG-PymwiZZ?50Clngpx2QK zNk{T)dShF|?GueA`Kd+#c5JueRsp>YU)m>bdP5!PQ8?AanImT_Q&sXV|Kh&loFD%z6YAoDR58{Z&9PPb z6V6w?uveR!Rt8|JG8G29Q8BKNwYRbXyk!kJxVCA-CqnAZ_?HF?jl=tWt@>LWif7#c zDe|Q>w7?8ZUZLYTb}4FnRTq$1hU5dhpLgPT;6uFZw8{-ArJmWUF4rx|nEkLw5`qp ze;CPeDB(jI!WQ%lTh5IR#*0R6DIOW-&ypbh0Jta$_tZ+aOaNTuQsB;g+?ixATeX=q zkqtfn!Pr~{2Hk8q5hH43xtazjnPt3byXVmS_A?L9r;%Dm1i?&Zx$8CL`GK(|FDB-XQSm+(nBs` z=NFDvHr*e()>?IM00qZWLqUgEElUB`>HF+OcK{k*xl-Fc8#IJ1l?sb~Ujp0yJNs^m znszoR|IU?~S)Oh|dZRPze2nIaf>+ualEr4Wvrjvi~DXnzvOt^Fpu z!C=e2JkRoB5TwU|=+0HV*p;C525=MC5wc^#-Wfy;E^X~vzpI+E)a9l>qRP5sps)SZmS`BbgyhnjeQ6n32*GzPoI z?u3O2hWZgZw89m~i&AL^X@cX?J&lVg*6%^*O9Rk|QL|?%#|#ah-0!S-b9d>oa?9(| z^*=7(mku)7^&R(+S^6_!+YPd29C;x+&di+X8-AtzWr>o$Zpp_WUUs=|z?rgs1#`>K z&rRMN>nxNy?aM>_s45u?N|IDqvQiS=^`l#+pr0<9|7`nv+T zXQX1^Y$e#<1NWEAw)$m!K>92Nr}om7y3NYJsu_5QL-ygBGqc&TC-W}ano7mZkkGz6 zDG{`le;i|8_=tPlTKv<=f<1o;^HUu>4;l}}CpVO)+CLj>X5Srcgo<;p)UK(q@e!mZ zXxpu@7(bgK+* zYIk15#bj~%oP@bamUH{Tm!b=7^j?C6bZ^iXtLyOK{P{EA?>OrI-8pWe>e5^E-b*|0 zWxVtb1vLevQ}_Aios^e-ba>nZY=h0N<{Z5qs^G2v;RmqLRn-w5-kFCtwrZs9!}w(p zp%`*_{3hhLqQEwp@Qp7B7|?!Z3L2u;QdBktO!&(~>=%)gw59Z^vjKL{Myu();Klun z%i8(IXQ>zQvt@wqf|h)7^B5I$)q=L5I36nz;AS*`!xg4_DGnAAUR9h{p5OocF;L(e z8cKd!iE}ybXY$x~L7@iFemg$`EI$A)5L$+V=KtmlFAx<~3lR<^^eG3xXdo&JH0GOi zwtH7ye5-kR&rSQ>5@^yTvfT|0Oi+%%Zg^nFIhTbC7C#U?7YKUdrvKSZu`CAj!~C0*$;ukjA+?& z5?UNrvXy`4cD|=kU;P2g7H)sl2$m(unw9rqup?aC@}dXE0p7-PvPHDL)kIV6vHI~+ zytEK6)vufIDAhA^|MeFWhiYkQ!LgeO7-r7W4|g_lpFwir;%>K;O)50W}@q!b;h*-!CQIy|9S2>^3()wb}iTlw9i`-GHpS!#SG1%a*S@22xfI z26PQ1ndmP8tugBodrc}te;D&8GXFz|&+Y}wxqfOvF_45#91qPr5w-ZO1*L2(aVc2% zSeyR9N@v?G$e|i%f!WshL;YmM?%BN%%MM}XPUtO9#&&=#qa^`QavHP1C-FSD&J}@B zK}X}`#&D0Cl-g#`IH$FK$&Z?JLrd^R2&g)v3kqpw0Eb_&OkBTZDyajHT7qc%H0(}0}MSLz4Zul+#7rw%Z6EjbdNNY;v&uB zwYF%_f}Jy3N+trs0`+o#!Tb(Kw@+Soj`JnIb*#OnHV(Rd^&yQDTvQBL^;ZmsBVHc3 zJE&e^2*UA@z12dNq*5PJG$8@uMV+V$v~>@;&h!6y?8pu@Xwd46SH>@-f`&QPK}Y$v zk-~S>m@7pes!}U1XhDCOdY7V2Nyxk^<#0PGmoa9bldsd_WB@4Jy8OE4T!N}Ma;WMp z+yOPO+sdq3`paE)2ffj z2tb5p{jy*PNNO)}LjOaFnZ=JO9PzK)^v@3m@C4G%qtEJ?x)EdV{8O80Hjd63?4@Y_ zHhZP-uQ|UrvU5(snIxF#;Xu6cZ+!>bzK;uI&Y5Wc{0;fV(P6NCzxCaD3YJbF;;|gu z;O3j3J9A~BpBJ~kd1+#|`b*pLe|-6;Uq4rF|HqgA+5vgP=}BJs)!WZ)`qF6zY;u{JVVquP=f-rd;?^T~ELKw*{un^0zI%+L|`YSH}2z^B>LfR6%2^w*EHv z`|ZDM`4g*j<)tj}Pp$Z4)#k*tORAq8n4#=2a}Mm6U%z}g>+g^KvUTpS&noGcLtk(F z`R8{NnfmL4U$%dP|0B|@y1V_$TmS3Ek3W5NOWptS^7g%-dyoIiOVELxJJ0{e;}Zw} z+YZ^JC+Xe1FBh&5wvTpiBYR*X8IQx5&@J=6`6>E8SkT1rn?IPct2%A|DfWQwlg23fk1Secs!Xy8ZL!fB3!h>OYAt$Kd~{=Uo3s-DLX2 zQ|&!%>Z$gacCM-RI5!p@#wa0W!PqoK?&zPQ$<8NjOHXX-PEixU)Q|6NpozA_CaQ|&QL3cj(&G?+{^$TWaVwZ}Ao zOtr@}fP7<*X#kmOkZAy!YLEYU0CIxw6grZ{k)6l$U(`O?vLeg!&Ia8r*Do%1iXt1iBt2 z%V-5MY)V#^;P}Uj#f=zS$UGyjcKuU#%bV3o|J2^HJ;wMS`&&$g|M6Ba(0-kT1!zilyPYZ_#xnB(uAX^{Eb7E`vSL1v0MzS{Z!7i5+jL3hUN za4WRcG88{McD5k^#p=-sWF|{=>{(b-epg^=bU>Lknmu{NXZ?%XLIo!SLe(h!Oxa;J zETAWIZukqldy=H= zFnit)7bBvL66*|wbn4q=37S)~FY43xy1|ek(R?LruUwn4wT@EL+EE|I;eIkr6O-XJ< zi8+O>!u)`vWZJ&Tv-DSXGvf<66m0WYO;1Kx%|L^+gr*j?C;mNNFVWE&G4*0#^38Kq zzcJO6cZ|3`vejA=4ZWpkId8;i9L^UJzMF)bjsJ)2DXbf&HLAeyj|BTFgN@% zr*(X5T$o)W-r0FIa+r*lX(ESdczIqdxCTAsu!KuiM>2zZli}6wNWL5}iT z1##zfSoL zc(9Oe6AQ;s#HZBJLb^Q8#tBLA@bG{m>xl>=JfZFslF)GaJ+f7tSvH){+~{vg6eEUoxnoxsIT2A92PU(M#{mz zn5Pd$TP|#bSKr+gZrJzXMYxezCKah8qgNOZ;09+qwi8@DJ+t$e`=j=MJtVN*@p_}* zf4w{I-*0+De0Cz&b{lm2?{i%;5zB&LEbY%j64HrbhDaaDNFTh~mu38F6 zSI?I0t73brv+Av6E&fNOCq)A_m6<|o!-N9_cxGlMk^uZg59!nDS-1viDj$lO9G>`n zx@jETz`OI0bd$fY`_q=q&3`?kr43c~4t zTmJ2=78!1kYHeIPS35)D)e3TfhU)zAWJ#ajZFsPtPCpi&Zb39B7!SQ%h$N^Jv73;? zZN9WMNG4t(Ggo{Hf~Y;oL+)++S|uDCYinx)Jg+$`_9lmsV!_L-%nb097PLQ>1LXNU zeKH|K9hp-)Py(-};G~8;H?${ZG_Rzj1=%|6#uCvcpRjgL;o(ngXQnYs#$7h`-C&f9 zK0%&ch8P{P8nHA#*tv^S$<9K#sJVwT2KvCX9c6hrd0 znPXk*h-PGrs?p6)ZbImCR+HMgCY_YFBD2%q8@3A0@D`~zRPKHsJk^RYhclO>zd1Bz zSF#x-lQ~t;F`Gh(v1K2C?t*qGw`aD00-HT)o>i*At2ST+e5xXh(ADfpJV;0~4hD8i zN66MAeR^sWJ^^7sgTKYv*!m)##H~G_92XZ?zs=s>K6w;{7UZ+ZIXMA6&24S{PpCtc zDzWfFW{5kXtK-?l)|-456@h$=VNu}K{tQRPnLq#rel7G6D3UyBg`<7GlMls$;oSJ_ z{h^5VO7$lqnLn-lfu~|rUOFIPFc`6Le!0kr(3Kp>Y}U(`xZweonM*n&bk-uBa6&q~ zdbS$&FL;7^dh0-5RN_O~6RAUh=~k}_)%O&4VjMXD<-}_p!T`91SGNbUlHtXCij)8+ zifcibn8mC-NWhMyskM*xTC#N_UOW_d(IuP}ZtXRlpOgG&qvrVSGW)#!pMJ}heaV@G zd!`HL{%~?CJ=IV-L&^YPUaQh zbLFv(#`%gCo9w{m;(Q{8gv)kMmsu+Ul5XP*TY_7BC&9FXb41k7EBEORR0+4_al%Rm z?z@j49-0|v-L^9n!DLnyvUN&h$kGu`VQ%hqcrZIkA3#dk;7&ppIKKs61B6Ia}xAvtzB2hSN_x#c26(JiHnc zCP;)=vttkdEIAqJ1UP$W8*)>!xe@z_*h7ySS?D3xz#x2|@HANk6D}Oc40a`SrGyS3 z2`fxOwjg}~4NynUUuk>`ZU@GeMOH1W&ug^d?V~fo;(2*38&t@$;tj3%!~$Mxm{E5t zZqh%_`nUGUL(;KA3_mxstXz~M6}T}fH#}h%4k^|2kOO#QH3LJM?JX^wthBcpcqy+i z7VcV~g!EaJ(#gjwLJ3_XeeaNjc&Cu{$W2hDk&g2A6FdN`s&x=xGs}|$JRSZJ zw&uB75axI*0XIs<=99NY0}KPy=DrSYJ#x6?qS9Jqv>JlhXXD1?P_5c_@n%l-;EsLW zik^Q-2<7NWmaKCWKhRbT?2DJS!mDXvqj~V^>R5F^F3N@01Q#fURVgcm&HHzxPj7M{xZaf}fw;_5 zpL+pr!;AYKBpR|IO4-;xP+Cb2pp)O5y5Ui1X@)mv1&Pv=8OGa2r<#zhlg;c##jbY2 zLUOPME4tJpY;v$U+D7dNotpR(Ggbz!E>FvoU)mbRlLe|^9$6{&W#a4_DcNK{JBO#? zYGJqFVlG;78*bpwIYQ`CQ;eAry864>t%Nc1d?^C94Jk<}!Q|5GyML1VU*Ir(6BA zb8^-q2SCb(S2v`Y#ll4_JU)HA(H_GaNj$JNIT#R~^s*lJ8cnniMRBEpw>3647H~6V zfFOF5vPK;=vc+IZUE7*H8j-C6rvN#RcK7`liuu5ks+TSJJPuy}C~Yyxu|1w43k)SY zwMbvEP1XNi-0dbh9LMo#dko`6;g(K&ynI^#v!p4ILT}`>pNWwyObH8Ca z#&~7ma7`o1P4=$Ouhl;;z>yck6QfE4D1c}^ka<|~;)Sg|*OZgoEIH4Zhc#871jIMt zqfRAJM(h0fPCO2U^7fhvsK!Tc7xvf%&{)h&o6>8D(^6i539l#)-AMIB_dY5qxeZS^ zuog*BMq_o6!@USaajdEEy%mxGp?3A9ZPz%AqXlFQ45K;4y#m$dU8Ra7n5_(kBZq;B z)sbGMJUsViVnk>*ueDGI>7$T=l3`^~$9Z`5 zMJ3Z)aD$-1W3FX=el5-D($Ce>xlB~Wp(jJ@??-LUD2o2%S)X8EEaT&eaPbQ@`Css0 zZg!y|J71ucNR3j?7>m&xNP8Wl2IYpX>t+PFlF5iA!9^jLAqhi4Nl-nMJk%QOUUto5 zeQ8u{#@LP9myZ zM_vZy2AVVu@+Oji9`95}64b^!W+MqK*+5|OC?H!QDSLJ6|{!#pSLI+tIl+?G^Y99L0QcOVH zIfJwU7*$B$eRwsfi@ES>9lW>(UY)Snl>qOpPu}mdig~XR!`|MqEyfK^qIB*ewYwsu zpsXyU2tj%LLJ5%sZ!W~3K^oHVY&&>{?S9l#agA|?eO5#JVWVAdK|w(eLi_XSCw_ON z{%a1aNP#kZ20AbN#fJiMd-Ud|a}B!zowqVw9%U$ut%gq)e1TLc@G4N#3nrOianEgc z!UVq>3M%7;xKm!+l)lDzdagmvS2er=7q|7Af)a7?+3Z+2Q9hC$3!kmDvOmig2pE-_ zGV7ymCi!Th&gbhcu<>5J*tMb3UMrLQq;qdh`Dlx|7|Iu2JCmvajK#vqRCx%Y>wN}@ zHMERl11gi0px=?h{du&tNKk_q%JlT~2o6`&G9=e78sOzvV6W9%F%=m{q+}cqR3dzA zuMv_U9Vo@L`krC?w`_}BPVIDQ2}(+S;=S?nWla9g{rguiU-PR8y8=Ach1xErQ5v^vlh2rd4PKRtNYuIleaJMEGw4Tx6I%0 z{x+;-&ee?>d(95axYY8jazR$y_QOYyV4by4?z=n|?w{9qA^C?5&T+*_xL|F{(s(09 zxbL|KRhz;=WssF6L>Id96v4UXVHPPBCk_*7y{K&KOj3P){aWdA*&axA$S3uh@Pkn} z83T_)LEJ3!D4BtBbZ|K0@8h#+8Di9^{&*H(WNT&fX=j$oDk)~NL9AKu*osMebX*+> zDZa+V;ar}?lvTbWuIMqEFIW-ITWzr^MNL)ppal1Eml}!F&JPPj@k3csk_0%$CcD6{ z{d>&2IrPj}@qFL)&hmHf-jPyf2+JZ8ga;@Y z8$TYCDX~!WAr4AnxZ@v;Uz}!n=*0@d?J?!~B(nH6IfFAxCuj!>EsjIm(S3@uIOF5k zO{^2Sw`z?VRe8>B8zoV)Xu(oh_#rh}Wtds;Hm@Z9hu+UwASl&vJml3Y`^cnL)g+IoJLYfP5{)cg~3EoaICj;`3r2#ElfYY-x>Q2VL!Kv z5;ga0cm8Hu|BVNg#aD$)JwDnWP)h0%i0|R@nvdi=Xp;>F&d*-?ki(aiQ*G!7sjDK3 zq>xb+BA2#`irpyoa_Z4+k|6>?98r-1ZwX70w)I;3SE&Xb5sm|Lk*ipGtB!P3Yf0>o zGb>FtC=Mu1?stB#L_1!Kc+IWDkFgepb62MHZi~q^oxG~23)_#&J2w`3m-mJT$8x)O zR|*bAm%aL^@1wd}|FIKUof{QAza}^41rFCSQ|%$A3O`@v}cq&g2>Yy;F_xnt>ky;n<>?KT~aN zI|?C5LmZ_e6upWvQr#&l`ib12{^zENDF3i9(=%x8cqp(AG^)^upIkjvx%?pss>Vq= z{&(btgpOuUn|44b-3K_YO_`;$3Rn&@THR1J{%Xpb|x3}`E7T8ph zsI1on)egijlq_B`j5bZfIq@AXr=_KlK24WUl{KUqD>ljn4Z4x0cnEOP!hj_f2cx*ckhhd}FLicRQD8aO5mheZVaS|byu4SPrlYM* zI+|@Ai;ENksd_788Jh?$7$uBijYXkwbQ>HBRnt9uTM$X}9}E6`c^#)lBB)~#lcS2j z59yFX08b-X)b~ZU)xKiPwIMrYbGo`*-pV6gjnhaNStBM}+t)BWCthSw+vh-L9d|mD z*L*;ND{)vjY1qkMgJW#_XHySN**iP4Zd58`LX!H~ycQ2V)Q0#Pq%@#3+sz?ViHI)= zt}JTy&`ZGN+8(InfoNJ8^6@T-ITQ^-LZpzh$Q%$o3i87WMXjxQ;x z6^wW4$UE;TUeO}k>Nga&9j5p7o$*hYvphHjL{Ggw?wvl$AJos*LXZReHz{8AMLN)g zD|!g?efPg;>J2HE-;hliCG1>PO!4Zeho z7fqv5A=J=X0-j&0_D@F&8>VDiMRT+ESv$*-?pfF#P$y3d+oos|tz&TW5#R8hHkW^$ zV!f={N5P8hS|K!kkPwX(e(Z3C7@=@QL4`u5V*5>tm$@Xv&EPaZSq@rYXvcK zi?HF?Oo=AGn|#C{$Xho4V1eXdkCBN)5rLFWMrEH%xvL4+4%H4;6H&N!2Cm&Dh3L)fkhz`*xb6enI+ zk|Yqt2_&YZvCvwWhmhu>RMpfhBvDEAz*xfS;yb#ErYe2BK;K6AIKDiKly1U16HC(_ z^&ShJ@4GuZ7(D0v>z|Gx=Yb!;KJd+UM`Z#}BrSIahdWLcwtHnlseTm7gg(VdPw<@! zi4P}2KrZEzISpoNzEVjbv3)-iHG!4yLziqM`47}xk~au%nv^Ta0F zx~efif13FpC{^QWvX37hlJfHMNY9!8wQtu|n~2{7`j4)ObU6H4>Gco*vt~+RX2kNIZ&B zjFOy&3GqjSPk>RZ!k9zFpFe!!_qi(a{{b_tLm@_;5!Q4QQQPJ-5F__2 zY^^TRWv#Qxq~Bxt5~XbW(V!JBT51 zh~u&N!;U?^mv#9esooFPk$^%Fp2dk(IbH z2P6QG%k+nlAaY7-b#Tv=u{%)Y5PBv_;)@@k0bfYZl^>t)UfS;Do2<(}Tkg=f6ybnL zp>1J&BLiCeLHW{9^7+(X|Ne?46Fp;$hxejvavXyDpBpAR=CJ}w%TGzMresc$d z3aYeo@|y1%k0esXiKue9kW7a1ZKhkU?UAiH<<1l+2gGUGK!dw=gBH@5M=AjcDN}MA z)KJyfv=xHSLxWj32*lXKP~M6+NV~q@(9n>?i4W_GCxz6xJ6X|ZBgURV73(KX=pD|D zY}DIa)2NQI`;A$lOTD)2yx+JI=IXiJY8 zFFaUt%8bD}3hT+srXHHI_jKNtFYJX~=T(^LW{U6EA5h2uAw|jZsiO>%gpQWcN#+(7 znTDpOIc^TV`iXU0d+h*ImEEU~#FHci7-<11jKP;UqBbmvs0K{enhvE?Ie_ZE&-@H# zJQRs-0E`%T?rPU9oSN>(CAF*Og$LjJ#8E=|<%J}u2A6aty**xkrYjYiZV5q0Ody2~ z&c;6?0e>dn>UZuJouzz$asj`ow@J~&9~+-C9@ta$#82&NQ;+_=SLA(4DhuO7a`l+U zGRE5pKyJP#zs)FnKvI+Ds%3tv=H+8!MI=BPo{(Z=W2=ob=_C?Dmagza;e}*5okSUn zpawmz2N~!6r&>j82`ZbxMH+oR(bHF|QAuF9$;JTQsrSWuOwV-PV-8-{KjII>X|%6A zSN;n5$yZlfOjb2~Q5XgV;5K%xAkw?0+vlxmqYCkJr^)2!j@8!vV>46Rw!b6P3HzxQ zHlhlQG9@3)KutO{5XP$u3m+a2!apN{SB%2jFQrZiTHT z0lF!5YX#Agb;=PHF3AFcfCLrO%f6b^+Fk)twlUt%R!wF$|4i4NN=E1&r!z}=YVyJG z%o5P=L6R^O5;Q1iz{dc*(-s_Dtr!_eN0gI3O*vUYBA4_~_SCyAWh)7+!@n&!_WPQB zHtB!*tzZ8|rXX%maPcxSNiIPvxTtcVXl1xL*z^jj!q{Pe0Qd_zbW(IojJhmE@;=>K z2t>)UiMkuAc_BsyYEk`!z}n3mwiwF8F5?hR*L1&C`@;THI)GF}swsQ>YPD+5jXk(Y z?B@0+E{WLfpMYL2&LB#QiJ6?OW6|C(!Fo?zGy!91b167i>~TM;=4pq2PAheBDA>`|;qI|%m z`g?mHqN@hB9ioe{G|>@%u(n~Iq6U~CN5S8Tee^{jdRlj?YmzJmDh62D9H2Exx>htn z9`ia*6P28j@~L9XCo#$g{88l{vvi7jLKro%QNd=~?pdL?%%*0QbLxbD3%DIux#Qzy zBw>gn1Dd*A3bRLCA6Rc-`yjIs?EtD=nWg_1@rv= zm9JxYNIn!JoLFCbNQxwdaP3(~9eWD)taR@x*<%pt&@A*hy}D}a=+r^Oh&BIuCM$(! z^%4)ZOllq|NFM-do9h;uh3vfrN|?+NCqtqDI#02&2Xx~=+5%~yOi)jvNK2`qZ!!Jc zM+yL}9H6A4l9Q4g1il-V%)x)@ZtZyBcZ<4QoC8_a2xtx@Cs7Fl6(3Bx&R=ejc|9FU zw%Wd^)q(yr6VVZ&tGt$&5J^qZ<-ZCBLdf!|VTaYe{cqtlr3h3*TU((Bv=1w7w+8eh z$G~euMF>&$0YDT;Oc6z75-=mVE~l9JA>S`cWTyD2md)`ehdQFDe36VoYUKJudO)n) zIb{g@-mDnVU3Mj<>hB&;FTK?YKgh8Gb@xw5grpafk&CVoAZCAqA^+HM1k8PT&sLz= zI`aQN#@+<3$unyo4%*h~RAoAjbwil3D6Ld*L-u4kj?$<=6=PJEST__DWC_TUgpTi& zae<7Xh_a=wNCJtd$ez%#fQS$ULX<7Y7D)^dLK3on=MG?Nzjxk${q@&CAR*8Fock=- zIoEY363JKCODov^$^8om%K?`jUQ)s3>MOZy={!gxZ-}ABK5Ped`dz_A$7QldC*P(g zmNj_u_qFl=*MQ-vfhRK<(48(hD_ek5MLuRiN`s;``e4Qe^Q0YHcPUbPiG{}gaB*X~ z%~Q#dgM$MhFE5Xv(lX6j0!0*0Ari!I=83-+0yuC(41xdkp(h*EID`vP($!J(Y~@|@ z4!gJ2U-{>0UhNk58uTg7Mf&NYP0@SIYO$${6{2Dh391r0<-~dg)Z-WIwvI|a_fObH zh)nA0V(G@6&Et4}Nz3zw&k#;k{j1yE{UkZ(lXorPo$n%km;u*Q95@fdUmm@8r5$`{ zfep=KOZBe2v6Az{00BX{#@qV^s}rD7id;lJN#IgdY_{&2v+mkdYT@CL4HxLDcF{o` zwV=)7ml97%hERo=_xf$f={ve%d>5F6@JM=CJ2$9e7XnwvSGtyJ+0c@{!Qw09O@0L2 z_@}t_SIle8F2fZ=mjYMGss~p(7fK^d&HRLBifzlio(MOMU}8z6UjV*;5(d zY-_KzBd#6pfVW>*rZk(xW<7ffUE2V2Ytr?po3>T_>Pqu}_nUpw&uQPH_^RM(Y&)E! z?_>cYGw~01@LO4c;~4JctBP2?PBcJO#^FNB*RUt7=rD?iv-gUhx#LXz>m=#FuGB zo}E5gOF{;lHH`o;lz;^l+>Q=AW5Z1U1#;y)RHaUZio<2y(c1}htnL_K_?4_Vwx@q! z5v%(b5{#+lH+BHGr=ku=J%lWxY71#%3!!FUx#G4|W9k)qj>J#86kI&BY{2L3ZJ)Z1 z@+D7xYi7&zL@+V3ElXxVm5a~WiT0B&D(86@Gx=Z*e-D{UnLg|$d$Z>Id zgiPZ-+5?utY*H{M01mV?W7ID_em}FCZzk^qIkCZA3y<)^QCRk&4F0EgvfgzM8AeO@ zWVx4Iv|FYvT^9j(3m@i(I8v>EMkQZK@_h2+yi{e{ujZF+>A!WWrR0dF%dfJ&nf3=& z6mITS7<*a0CUqIng>x1hABtSwao^@PuaW_bN3?t@m4kVZS%g{}cY7_lI+JsK=7KQH z;7_eLT}TFJw2yp6zciub8FrimEV1R+LrJEwVR4l z)V?{?19zFtM>mY%)bW>d-nGG$hEK)c{R%UA`*-1slx4?e`T>2zfbv36Ea1Nn@9rRn zX$%$&LP0WvibCO3%C>wtLA8=JzLHR5S!WpgNhDZI&l+bqMQCVf z__>iCwfya;n0&q8DNA^SdSbCV7p^o!B*W#?bDvvsUX` zkY)N2H#vu392Fa@90b-e(Z9S34Cc-0;jWT*pL*xJU$lR^4UqNRFwhq9;R4MDra6Y- zOdUJV*4dc{XuDw(ts2;BeyIB>$kP@( zuoAURq!0_X3AO9`h#+eTbc~aQ>=1&bF^xB`1IRD7)r?9A@9{^UT2gXAe3+z|;J1rO z4Mo{6tOhF)E{OXAEB#dAGEVxGzv+Vd8O_U#3KD|qq*#3t;=lZq;n9^e@>VUR86_;# z9@qS;^ys^_?zg6F-3Hj~Q10DjiTEZJq2}eh$jkQu#Anr$PhAKEM`5WYYUC#)+0rK7 z>n1Q|JAJWM9_P6)zI4B(xjO;`IhKc~XU;?yqy^CvKj$p*9gm{Yf?j-M3Lk9R+WUH~ z&!*_(pE_)cK9*W>f1SyKVxlo(0)O0*zkZ+jO}O86tU3nT7(^9}gSOheEmbTFmD1qN zuYbE_|H~IY!;-b3;_`e1mFlNjP}1yynai~Fzo74`$esuU^a8xKzZ7*ZrZ88_j(gA* z!3KcRzhg;#xwbpe=bWc{ISuZ9D zEu9zW2%@Y%3W5FvXope)2xzW~jnY&!Bx7mQLa&ttE+vD?$uz`W(1cb;A`Dd%+up!G zKW3Y5LiLGcKwNj)ACg@0VqCBI8Es;jJhh%*^-x2(|JGwNa|@t5^DugGLcS|t@C0Kq zUsc9xN`7jIPzWc5=>JG-IEx!|U{#t;dUli7)8`G3?9#~JJv7XaAEqzaVD9TGdiV0* z`R*4UDOr62dQND5!)7XO9&9x&7H_4*hTNN1J}6;izeJ8jeLneYU!z zJlXc=^4Lk~B8sW0ZvMvhRVYmz%AWEY08@;iKF$sCqtdi7lKKc~&s299vB9M4HJhS0 zA7Z8^{G4Z5u6EM=BiH?%>wfo}U!a?BUTokTK|#mTcSHCVv}!HTs>(8gK{BgIOfNgj zGAa3%ZB93qK#vBN1On>~ORBDO1{VxzEE{GMOvk!U4H$tJvCYyl(#R7Pn$!NUB{YN=Np)j< z=ExKI`{({%``I5p-fjQ8XN%LW{dvzHGB5or?UTR!GmYWm3-w~KF*1$Fcu4AI$QIpU2VJ&^Xu0&qL6{m zV#({6j+%E}(l3|&RPqN90ScGFa}hYz{!NSLMogG+rUBe4yi?6Yt9_%%L}eV_l(Q1Q zDgzJXpT0i>jW%B&F?rsp@-zJSE!P^LA^b4M`*vge#-%P=Aj;S`N~K|p!;z8B_`5t_ z?+xI%`nsPA-))GDjC8;|mG}K)Rg2WkmYy@QkPCC-zR4XWt%}#L?eQ>?o-Ltg)5tYA z^+|8C|8aM{F&H1`-H~<`7o)dp_w(Cm2Ar2RY7tbixOj<@Mtq5 zR~-7*lTW?e>^!;YS5M8nv*pok>zSeGiuQT%O&K3eqFR`eLLu;v>+f08Eu2?Lxxb}sa7nIt7AefX`*?2ay_6{u%t zc5(ys9bveqXr{ux5v9To?tFf+n6z8lF_uNsW@XsZ6P_H(PNsL_-qxX07ufL4?OzcQ z-nsC;$+?ig>~e)CyG_H&gehVNRT58iz>h;OgW_9>rZ^kb>Nv|3sJr=Aqz<@N1Y$4{ zB04+6#+`u@>JI59o`J3#_)U@!n>lBOO|c#)r7uevf!F&_y4lz+rIABc+sgK89P=Hq zG7~Hru*N>M9sAcRBarJdc=Q2YE;YZSgy}Q$@oJli`tNj<n$ff4qR?If7IN5+g;MtApq!LV0(>}^<0 zbMw;VjVC1~;Td2wtAf2rBh`DN!+|fosuv9n+2ft6Uf*5Ca~y@KQZV{5N^(f-vn_bW znKNf@RU3^)`^H!g?+Gok5al+0O--Kti=G}|i*0TC*UXZH68myxHA}CJRYQVC)un$MS?qAW*r-+NzbeMsd=e^lc1-iex5d znEcHX{u%bxw8@9oa>dfD$DtP219R~cn@b}P9lE~HRZ6|dP2R4OnM|fMENO&AC4~Gu zuYT%}MrcQaZ`&tG+`~}n)5B;xRKvyCU%2?qo{NL-Pm! z-M3;e4aa_7O$*om*|CXzH}z=3118zwx>f=wAVxfbMMNIp12$`c3dk5l^Qw%&*6q$f z+Vmi=o}Pb)C49VO?gzf!Qu5RAmmktf%umYG8wREa`GKn3tOAqT%w)S2PO?tlb}Kr> z<-q#eHy$iGHqJJHf3ghA`x=NkmIy|O1-A^9B<<_b(M_tNIAI9~u@EB1ixc-yhxjHL zA6R|R=Le^y-R+4$=#_XtjY3_92vuwV6y<9?otot3L$G|90pLllylgHXEj_DL?{(sTF*Sq5%A#Uo_S1D{o>`@TGQ&C4$pi zGiZEnf!h48an?4kYkylczWXD~cfX8d6^X@5=YC*W4f{q!N`b!Ds-A_!ARs(~*ANA9 z4bTFba(8O9D#Tzific*{lXp!~2=CU^^n6bgX^`^@MfPR4PMyj6H0I%H2mX$~}( zqN0ssyH{gj;y*iC2Mb=99Ey0L@ia|*hJ990G@fxdKECWMU!_vr%3aML@9FM7(ZbHB zc(V-Wl$E40>R8<(2fPXO!k?bzCT6i3fPV?TO&1c=>;fbKvDuW`lJS@?4c6|~E28L0 z8&yREKu5cWH2}CqVGLfiro6g3w41iuJyQSy_nuSxWad)l>VnR|29w9k0_7eqB^9bi zjBPk$0AuiOlhbyz*gah1hEnSSwY9hYg#dVOnhL?mR*n8ddgZThEHmvlJB!I31#R|? zV7)PvlD+nLD3}+)|6!tG6<#O0>tQ#)OA$wd@qi4VTwS-zvp7;7Ifw+D)7J`x1MmsQ zDi+oL_(Q_z?P#}sTd(h25bx4U=9QE@%eAwyu}9g*f;kKAGGVqmd|Wp0IwHO*aDl%H zN_H&}Yap2TY}9I0ju2U!RjbQ(d$W6wL`HHyr^UqFE2N2?7fOs?r)y{1{@Y%QV&<(} z82NV@0@mCnB`dyeA<-V6Z#GCA@Nsc*+2kKOiK6K6es`8?Aj+?rFVh`Z zd#wM6h5mN$UYl20RaMflpgq+DUZ$YFertw#CqLx2L^tV4(ZwnQ;fkikqO%wH$oonAwpcW&_I@^ zW$O_iCBq&FlqBIGNdwzGzywP+KGLQb9bJV-o*=p+#?8&`E2Q?ZiU*!0Th=x*@$_`D z8<6Nvv_S6K6Xd+mXJI@gNTE=;4&{0$sh>)4f#CRkO%yHpH8+dpzdYQw=X;2@oAq3# z8Fi%@_KQMJf5RC3(mD~;MqoVY)m3FFmsAQgVht_wey@*^Utcktl;)EeFhs2Mm|4S= zjN58{JFnupx=l!}=oBlx;XgbBg-LL{4@WcvAvUMUsp<=SG-zl@W8XEthzGxU`DB1t z*f%1rAJn$NGYqZH#$8j~VDPdrHuD9PRQ<7y+ZffV;z(bHm*J%~?D^NPH)^(fyFwWd zEpy?f1LM_w54NidXg@+B;Y>V(^cuH;v(TTC`#lf=FT~evxAR}vkSPWas7ww0B#rJITAnH1IigG0Hv*Kn zt_h!!E`lVJBMp=n4M5d#RmtPNjpDiXrSt+Z6U;xyzT(Y)KFY`yrxHhSx@YuNc+|r6 zoQ}r5y&=kEc~v20lYdfY8<6OMsqpJe)b;ppPQatj*R8NrR!-_gyf>)8Fw~KDOsVcz zN-#3n1yS2U83PG|R71_XozV@*3{LZ(O~6Z?(ZHw(8R`fR4(1*zOoSH5aQTA=9fM7c z9sFLRO}h%I5J2wr6$6NI8ayM}Fkt78jMcuerNxhNm_n&Z;Pnr$bOygNxWzf~+0~)t z1SZzjk*b%ef|iGycGs}c6MSBe&xmqFSj|pI43K-Vf{ig0V>(L$OA^3`Ts{aU%=qs3 zXduG4;>V%?qT4F^C+}@Q@3UtaolE(*TlB_Vr8t(DrsSLTd|=bT)D@mVPagaf4799} zLNh9H6Pt0sH9-tA! ziKSeQM*F>`tb*0bAc5mddV0nd>JLFuFR-Bd=HP&zjzI3svS2w^y@#Ts;eNT!_iKatOk>+L2Ncb2?0Vtd$H zzO)&1s}>(;Q8CJ&xZ$wUoiDzvR<)b1dUWveQR5TOK;szrh;jv6Ob-xr#m~HKYJpFQ z*8V>-vHD8*&2sj#w5@msQgpG?wayJDROsOaZZN7f5 zZDtnW)Q@ljAhBG<1O}lT&wxxx;EyB{R^ef~?`i|fp9Yo62V?KCqlzY%%}-6OmmnvE zGP9`N8pqMN`1qV;M@k(jm|MO&(~PD>HfhZ=z6mr;q-<_PE79ALneCeCkE5;jlkHV; zw7XU8WW9EeKrWXrUuq_pN<|?#Xulle;NE;|eL@#{=+MAZHBp-xDZF09<+^Hjn>!{m zrCBW6P=HnqNfzzdmPH^Eq%NVm$o|exw^`_bTE98t`Y4u!ZA%W(u=9m<;pl39fAQm) zMMZvEXvy-&5zdEafR@un{5a06#KAVmDm1#3iY2facPb4-S)}|y124Ce6rfL%zKgfk z20#3I(K|cf-EV5gmVA-bbTb0o87-0-8Wcf)5mH|ilr4%5^XJcJSgN<9mWmz{SD*_6 zOz(i_O>E~w=7I2vLeaf#q}AfOgU@UnX?~OEwFjPFke&AOv!vLztLIB~P^ILvCkrJ_ z(_X9e>_doVt3v2H)n79MMs$fpn>{t#g1I`%0}*|jC@WCKiS_jKOzo&j7ml(N;EvNC z+~e4};OI*Qy95NjNVju{sJpJ?+i7+U>>Gdym0?$KF5_~sN#3k_T+or{!PN_U!$MWz z!Q63aqRL_B!gK##YAvx;kQTl;0xj(uEw%iVb`z?yqp3Jwe&m}>Zt@Yv;D`RF|CYho zUHNnVJoQ%bemufV(l_(wzkE!8b^vBw$Z$t%6I9t7Q5`>hFQ>K>RAo~E64~YmBIXXF znKQ;+tEhRCp7eR+}c<*DaC4FOuhIGmIO z5eV$eWo$p>#LDh9rAWvHijw0TVl~lI1|A<^rLL~<&OjAN=cKAPi%r?_DBpU_mFS|S z$z*crhz90$sh6Z;mKaLntHx99O4pn9;qmdBr{o)UWe_w&GxgSx3{4dxzMgEHBQuew zE96FI25R>tPrTIyti|uve*Gl7V?e8veU($FTxCA-Qm^Yxq7z|fuoboZDb$} z;dWq#5qoMNsAf-clUJLuqq_1U-R=ZHdF0OamB|q-?@Z2cU~^k9Qa@@h==9F>%ZX+M zBT8zHI;~+~n2MY4508OT*2#AbIE>=vL-*H?^WAe{tf@YG8>eubyU z`~B8}t=tj%NT5A<$LOR&8h_f_IarWTu+(OSqT8@#k;z83f(8_52aKJA3dvSe-rzC< zCR8omsS}kxI~IG)s57`-34`U6RrmKJY&iPmKNqdSgMZ23(9dimMeUbBMdqvX*yBae zJvN(7v%Xn-?6FKhKQ~}XY^GbhyT_i&>(XbSQsS%c1q1@zO}`_r|LM3zVTM96Zf(M` zdQ{z(aG6Ph4IYl8hkLuRmKzhex>6vf6OtgarW~`Gh$4vC-V`I;yZLEs9%*Ka_Ss7{ zAF1@zFgyR7NzhJW+E*CHn=hTvS&lH~LfE_yv>IunBW%hWiw$Xpnhy`!(vUNa9!x%(ee8wPSyVLR3;-VsZ zJbosNRMvC91rJCuK?O1&&hnVZTDq~r*;`NJr%`poj2`%2E zQS@86*x-GEf!i6c<#HErsA3X#at*<}x{Ob=NUT7~)D{GJ1TQBiC-qYNZY?)?$Pu(o zl6sXE&9p0ApTfkB5`hL%4(AMe5EY41b|X9?{rjZ?YOf~hDR&Nq>Z7755Yqv$X3FbB zIZ;KXDk+Q2ut)TB>oxI!bR=*(xYwl!M7R6{V?JLT6+~>PWFvk76pF~W!;~rUpxIUnwy;SD?t05U$D9UV{}E36nf%)oD&Fh zKuFq69!hxt54VM63S|saj`sx8gg^yZGGbgqYXcBjBmfYR&oASSV3ZXwGA(P`zYPdcOxjB_XElOsQbP~3SY!;Ya3$j%M zx2d;f1ny(WZ4)*uIen>^m+zz6PV!58I0ajc#xT3ux#M#E!kIPjyVkiVKyTb8XBQgR z`^Y18h)p|%L9F6;3Q7S-hBYmKkv->aj3AIx!!uMW@|l^JPd%P;*!2I%<=$q|p7#>v zfIeET7@=zJ6DDx1(1t;Ll3^2IjQL$+&}lTF0V>8|g@6))@(PzBP=whenL>J=NMg>! z{zEuP2>KrR9B3FcYXP0;FYafL600Yoh#Ey)S(S^1zP;{bX@PX#&;-b z0?4AN00_@v8C}Vyc`)XQXSi0soW~?2lyYy6zv~wI(woiozrxUF-qrH>v!^KEa9fn~ zBWQM5>M>{*Y;J?tgkXqK&vqnXw+heU>7=Xp+lbneNW{~LH`xynEkq55J3M@paX2*8 z0ii(*B#>tT+ihftnSu`~C8izK!t#9gm{s_@0D=c`Cr6l&iZ@5WE_LTh9Q{^vq}Bb( zODyd(5%)0=W_&67ggvUu2(pLFcbF>VjuNY+6$84`zc=%FG`&tGcS};A*bt)E>r-#Z zl!bBNYgSsoP$kxhWyj`=AX?t{rciWoJstd(LRDdX4AhdJmnj5Pukda*GyUABGP{|D zaM;JE%MfuK1XWJgIcDoAl^)oEC4#RQhM)Wme+M+Dp^)NQYF%hUUOw|0%VUb?C+1~u zIUDc%XzH6^c!ZV%mJMvVeo?;1{146yPa-lUNUre>L%x;Bt)gBI&l-3aplb%>wG4Z3 zk$6T=4~Wb_5<8AVX~f)A9^2S*VjBEKDcTDB-Fx>kEKRPpDVvc*PKG_-*v0P&;u^myjl89nPhm4&L!bK}AD3&q)slDDue(Hh>FreA zyFapg_j8l9Ht*SwKYoeuosA+37@%$E6?Jk|fS|ZFYAGJBgkQCVY__C@VP?rV?)M0Af zPNdx?-*alU2-cnHk_yj;3 zC;v%0Hm=Z@rmKR4@u8swjb(NcULN^vFoT4STWH)^3gz4!q0s`;F;JR|XZX`}ZGcn+ ztS;q_P&HK~#vtAFiVmoC(5Y`*ex6t?U$EEd){?nf8*9y*KZLc8U-*CAj^d%{t4P$N z&*|_?1J$k3Vucn(=DPlFJMYwo>Z)hRFRsEv3T66Y3ZSrrQ_w56^r9Gpj~%%?7|Cc> zjBz+T+!4PX)r=|+!VtQB5L<`7PvkgfEzlkGUc52tRQqYag0@wD5;O||jia+7EOQ%C zng}3s+;%AHHY-tG%^@ZejZ!M)CjrV5f6~zIZeo(GA~4W=FBEjZP#eI&7PFTdy*P(% za!s6MS+$h%S9?(*pCT7w25o+1- zJdHpV)!VSso8-e!pMf6wT5hP@0H^D;YgcG=UEtsC8vzpPy}_pZ&1m|m0m=b9gUzX2 z4Q-}RXI=zD4tKx!L}eDIkA-ZGc`e|Q^=4fAbM-GcnA1P8DdOx&p0B+BTPV{4C% zTJm9}GVEm>@dsIM=p%=Z3M5Kej^Di7myStPgVj1=i$5d7_%}&Ao~`U2AFAI|ya)8Vq1*AM+nq zm5vdWQb>tU4sSutqXZX$!Q;!-qep=9nslGWk*Aks z@*?B;elY4&(BN&>%gDXl0fHdRzm&@l39|_|x`pNU5ZX_x2YP8vYFvYApiL6ckjQBj zk#U-yb)p(-h{&y{U9gVKPj__@>jlqR8;CT%IpNK%{@cT5h#Jb6FZub?_U#}(4{C|_ zjUe`_vv)OA#hK$@qaqY+cKjm!MC$QxqkoiIk`{4N^R_u#jV8l-f!9V|gfqJ-B} z9TFhjD{7h!(UzKi3Rj)R#E=-0udbE<0ZbrhAU?w^?=~dQzi|YBj)0b(s$ouHJ-`2Y z|IL0*|6Mx&oETM_s3|*M>?RwC+LW_=yAirqIVknSqo%p5>h?H$IW_Gmiz*edJ-~I{ zGP9VF7)@(VTQb?s&d#)Y{~=;jKooI6?aoxXr*f|Q)SXOI6rVH$colz4uQ%vTteA*k zGqJi|_mmdWA=zK&Uu;)8zo(aJCeX9z5X7udev3LPSU{v^a?BOiNX$sN1a72yNF^_t zaN){8uAxP*s^Jzeaq~(3IziL^NWJN`=!HpH(JD$vBif}>O;v|3(9(R;;_c?Rz`M{Q zUr7i!ntsgLXQ<6=Ltu&)5{=bda)ngP)W{OVAyfOMZ;;J*2%&0+@C?*Q#z#!X*C8}l zqFH7fvolA*ypEloAKoE$mMP8~#ryR9_;+{RsjfHtB{P}YFZSTfm$mDb&fww^{?n~q zh!EK@jRm62dcCF9GF)L9c7jgk935C0WAGw9`7mP;)D%xK4tv?UYFts;gp%L!K)xyv zfF$o%UxMqRS;osOE(`1;z<)G*wcqObx{NYT&32Aqxm10?bV9=bGK) zPcBSdh+9}d4vDIPX^UK9atL&@e5ng*6dTQuO;)y@i#4RZ9Bo5$0Q5A7#^h!f5tR6q zudW@<4X-Jbtj-6pYW{NWHqt=!iz9IDy(&(;i`KSmzrQ1g&9k^CGRw3kRNrTF5=Ye`u8w|E<$d~j>T<8Jkandp`L;|r>Ve~s?@I})5!pUg zFf?64adgKXsA!nh@noU(LWN2hgqttq8+8Fut=uXyUlA%3E#GVoc`D#8ZNS><1Vh*B zD@GwfhbGKyX-tW9CzECrq#6ylFbX$Xr=E7e@mQ6n5fl+=P>?}O404u#d*{ziyN5Z<6$oMv2M>!tql`5y^x{f#} z!Eqmee&C6MOv}W>I2t0aungTHX=;leFq*{WFfFJ_X({=bsoQ0s3?cYXA}aM$aX(*E9jc5W^?-b<+k~+|N>UZ+ zG;-GIX|#6KORD%@rTIyh^fC}gruL0=<8%5-ceAFsb5XnEQytDMF~d|fs71LnYWXo5 z{al@q^v+JjbU!e#{oZ}*o$r1DM=XCfeZJ1H4w?QSnk4|3IVoA0zL<5@C zfIwI}6EQUCfK~9y%ChXySWYX9NyIqsKJ)FDE3QAgeKz%673%FbR007SwL?KJHwLGP z!84e;LT1=FH&&$!8ERJrvWJO7B$Z4-^wuXlOp}SuN+mf`4Vr_@)vNB*zr3A{=H)>= zcjzg3q8y#y(#U_Tp!MuF-YF@GI;Jl_!l@pI<~I^F8;)V?s&v|aeB^Yje&~uI`6y!$ z3MujOFzWYHvmj7eY#Jjq?5D5pUER`z#(iOs9$e&b1e&hgJEu##cT75g?irJ{mqyRiPMV?h(Z#a?RsuC*jAVihYgCtJNE@Fox*MFwkE2 zBvc2JDT?yC?qlm2E-&TDJd=6iQgwC0rOVQUScYshuT`aJZ*71SeHyKnKwh%MN<`f1) z_f{{)G-Wx@`Jt?AQ3{9SggM%r{eCUXtbABoouJ$w;XFc5OS4+*=JvbVM~}X0GIYp} zE7Qe4ZN`F5mDMYS!uirNTiXx%a${Po`aqo8>5{)!u}qTKpLFnGKXDf)ZbUON@h7Fs zR8=7z9d)|ZMwNFuG|}nwMNt6_!e&?T+*&vwqoPB#!1e6)7xzMEziAKfUnt)M7|!vN zWr@{Bn1`NK3&)e}yHHR_|9-UIE@t)OuAbMgDv;LBtktmQnefkHCSz|VlFi6WT zqTL`GIHH!k;5$(N$X8KajgD<2)}SimipdI&+qcs+kyAv}RH>Gm1H|~UmiPxb16c@z zf3FWz7*D*gYhyAL?E)Y@&%?xtDVl>7&4kCy#N~?@f7VF`22&l?ZL9=pI;%S;Uff-g zcR%oOvWN8cbG18m*`hJ@qCK&{M-|K_(H6pZ0pAB_(>-leQ>HW_OxgQOC9Olr569*7|b6yHk z$C<=Ni~oblwOMx#{P>U1(w=!Ww7>nYrV%b_%l%(}@a~hwRmV0K&9+z_b#rXs`{<{e z9st3ue}ji-4y~(t9oZ^6I{J?(2*bvF5mLQ4H8u5o?0Q^R)A&5+e@AruvEbrlwrga_$0T__cC1Y#nuBagIVUMT zzJy97554Pf6^y8u(1z1_!X{cuA)E=UH@3{~|&OK}d zs0(vxP2ip{nDWC~P{TO>d3;_AFcSlJh@!xzB(qL|!uJxNwgaH6G^Y==HP_TL?{D|U zs?_RPr$9l{>P<%(k9>U?XIvSjJ!5D1VaV_k{&9ve=GHx0`i*@p(+(;(gM^hogVasoI=I9PrK~gE1hHnY3p>HM~OgeU$SUQ`b02w38g6bw%K^ z%C{%AH>6$3=eV_gr{Z6*ejz@bC}a^ZXsp^B0|Kid&p7+2!3|`U;$k9>8OvZ!D0evI zk6)RP8V5kW0aLrK!De>tmvffCR|{Ga-(ce4idM|;fh=dnLUPh|x=>EINMsjdRDwU= zfN|VYgy{%sc%pjL?Gp{hc+GkP6~ud|hvE1b+`ngDHuZX4bNhkcz53ydmE&gg#0~Ku zc+Gp?zV(0o;)bNAUP|Bku+#E(BU(<^YV_6p5|>+?FXCcj?;EHG56((K67(1Y3_&?! z^75MblwP6meYoMj!$1atv59%I4)Yq00K;4W|MIl0EntU3-d6HIHz;Mv?b=JGa09Bu zFc;|bG6R;0+?VF8_^03+YL%QEAJ28+b*b1MxU-5lEjvq35R$5>nbEgGI!X=qGIf3l zqRa?yr^7XIO`%k*EU{SB6#F2~% zP8P~SlUvmOSnXj9nhxN)U{$ZHr3CJ*zI#mLiI3+j@!w@Wn+npHm`|L7p5HYsu-?Z_ zr^k&w;7KRoSh)Iwdf3vOBOZ5y>d$`VZteN{PQSj=F5*==fp#=^n|pN7UD&J#D{akf zv|YR%NdI$7Vq0*!c)}PVy^76e%L;528-nRMjaSBX8e=<}FiPrh3vF`)?tolILMs%;! zTGX(B%=`Jid(+xA$J<3*s5h;i!vP(bNeIL9YtdBsLG2^ef0P28T?*LVA5tjl7X^Dc zJ(RzyV)J0u*76pK52TXhXzYCxHCV8wLvXb?JYo7L`$?*#A*traEfnhA9(_qP?ihu- zQK}($cWHi1i{Gelmggqs?GQ=3yq2X%cC19NO2m$T?mPYR|7vXTXQ^!)Q|brXX_imW zs`M=*{25+#{E+PR3DZ0!&`mMGHO9P>N@w*I7Ua+B`=14Vm>;-Y>lAdWzDrHDy1eet z2N-m`3kEU+{kb8aqn;O>@X1b}^tG{#`srhg4JZv`E?l^Pd3tQ)IYCj@=?igD`<$7v z_4W1VhSmeP(X)P37TYQqcVD;eS+oa}jLo2ruo@CQ<vl@?x)?P9$k29tm&WUt-yi>zZ z3~JIfhAuDo=#wuneMqvjX+o;XXD3z{HE1f$h{wA{yDR-Zv30x0J`4Wa6xBMg_+-oa zh{OEy!;OE$q~lDdF`um2zk8xA(eNntaP<>|Zn7ZQLp8QbcN``YqHHctWQZ?pzIU{F z)`7Zn3+qhxuvqtj3mY)c!`Q_bx7T+b?oW4>*WVY!$Dn!@lME_ZvpRwto{)b^Mh6aIx=%fvhdEJF_eq_)D>(j%dYn?F|DS5wms19@ z%Tt!y~#*}(mKOap#yhb);Twh@gW>-5% zeOf)ov5PB6?NDTeWicT!Idb@DG}fgDv+7%?W&6-eZ%D5AvH$viS>DtI-4O3i6Vm?m zwdFUxX>Y@0V`Jm6_4lL0{@(}feInWEv;6&9&@ixi_39PohkN(5Wr8^eYjWB06*ZXawJ=-a@5T4Vr^gOrE;*BWqj7oE zU^~%}o0IZha(xDYy~pYeKql5HC~v$2s-IR*MWV8fsPzM1OY4@oTU^Z4ZKek?pO727 zT`$W1M~f9Li0M&W*w6x`!@$V*4>U4X)^1P)J~AMg7>oh2m&Pc?JV7zf|FNDjZPDI= zD&@dVX8e-`fzEKqn0v_Rb*M`6XPJa2m3IgT?SeL~ppqBW{Z>PZ}XU7+Q z_5U`?iJamuXCK@9@sv^ezctF1mSOQ)=H<(nAK2`VQs6WL>lDxnpPjS~N_^{>1Vv2kf#^hl_{?dsE8L2U$jqfaZB?~miij6@fwl{j$4`S}ME zDJzZSI(%@8Rwa2JsdBU!Nc!U)vibd^4$mOFDyYZ+Wqv7 zGnN|+r0)+?ka*uQ0gR7cCa z`YGmdW>n#5>WI%6oRK)|4V>y_^`?Ijzue8~t6UEE0+UR;O?S>e-Txs5od$J{4Un)>HwU6S2wwb&QwL+oVGUPnV8+y7iHh6jIJu<7IbDR2A#NtL5J0R8`>+# zkX?ZSWDN$g3)YJbvIgQ139r{=SIlv$;5G6>kr zhtp}`CJHjr>Tf8$>zN7M?MBm+YQ-v0pVR~mf%oL!qH_1}xJuOp%~8LW%a8qI?ye&* zeX!1@=h?F$-jBaqK4F!w_#S=aLDUNTlnpb~(?KfL2);;0%^rXek6M2_&bKmQ0 z@l?Fm_hf_MJ&WB!qCM-WSWGI4fBNMdt2Zt2yag%9vi}aJ#=QT-vtyX2U0s2uKT9J_ z_Dk*T%HWJ)>Ewt^Hv7Pd6LVAG;NGPv`|cfGIClB80zBvv`dN`j`lnI(vFr0pjM!DH zR-Kc#H%aujvzlUCPZCE}LP$T`iI1-sj(9NT(s0Vko*N=gGMUE^&m)w@^vu^5brt5@ zg5PRw4DF%r<2Vs{+<~DAr>w+A0}{Eq98wv3d|kRj%YACAEjn}Q*=V8ozE6Kux)|== zkh@AzNpaoX|LVJC5uddR41=C+XS2Tj_S+RqdHusdC7A8(KG+$~9XX@X?E{2vIMEC< z*V=lKDwtlj#Ckmijz#!HzK_13b@&UT0puMWhq?PpS&JQ71zWdn-EWICK;#|W*3^h4d|pNkEFor?0uiipcHhRXN{Xw$vd2NO;! zF=(_3Vo}_z@{;n2|J+qpu5$nA%YB-N}MNK#TivDjh zjqgzMVQ)Uw|0u;AYBZH`>f5#LxqIZ7Jp@;@%bS(=@?x#h3TgSuX|tBz|NYx7`QN|j z+`R*nvFD8C7nbdVgXfOzork>grdudAt*orXJcnbQF{ce<4OW+PPlW6R8uSMiLxq%m z)+so7dDhy<-s%-KJRTNfSC8_|CJfRFs55*~ zCV$X1>karejtdRY`7OM}#Kd!ktfRhf#U3~h?s+KE_{ackmIP?{E({Aa#2)ZPj zKeDUg%g1@Tme9w1@M$Z|XR9t={1|~gg!{4!OEz>=)}SC<84_{o)SaP3a?Ro77Poxu zlB^7p|8`&h>Y|{EKc)c5N{IO)@Ilcl=Y5>?>-QTF4mx{nlKS}Ehti-?>&e2Wcg>u~?b)e63@}DpT`H5lc(%bc=01_ND!p1vT&1&Q7V_Mm5&527I z5L)|*wj~;0)*I5lcAyu|_YJ-=2FW8h!-9D8GbjFq@k#-n+uj!0`cc)*` z>>cuU;wChQW84P+y&2=ymogjk9FBcmmafzOvZ;L@5i|XvX z=xiH+ODlL#ot>XNxhV7sZr!?_Pxx>+D`qb**&UO%H6hA~>YoFrPW=IJNzRHq7<?QyjU~ zcmLIOe>1j?3br$=&j+X5SwijDo-5g)EN~o+1i8@_whNeK?H_S45&jVdG|k;GA`*>K zlAOogA*Pv)b>-TBEUG@Rzw0lmU84AY1JvZyCfyFx`n>U$JkLcbAZkf29=LPgy~&Me z=h{9%R4YbEskVuqVG44iw>23f)!Xr9Cdy7l!M(f*&4kE?xsufAQ?$2aRtg+z&wnJi z5}N-an#P=9(4Y951UR#yU$dKHlQnTSd3f5+zzjMkqm53Qj|X*!IK&RkqQS%iqs+(T z#v8z#)R;d&MdpG#!-?PIwO9Zu;5hxh7EOB14(IrEG^J|v)46NngXrG7SjA#hOc`)9 zHJC|g!M%qz{K*5&F}A9NrX7-d)cI^G^;(Z|AK62;KV-@TTO)bZsVg-nI}}z=5}K2T zYn(T4-dtzMl^90j`5)E7Q7Ow0G#m4)OZ#T5{t3b>!@O*B|O}mU5u;xgmVL@A?*-rM5tj0>wLT6M*JhsrzR9e$ zYuA3A4C4~-*A5QaV`$Ksv#LeZ!h;pak9v4E@y}w{t+VjU9_Px_&7jKg-T^?Bw^W`T`03;WB|2^elS}T{x5eYE(o)$Z zYsW8*Ku&D678Pk|$0Xl0AE|kycHNy9@gQp=^vUI&6N*7sRoAhNn~v%C@!{WR(9}9Zl-ibX7Du02&kD9p&>ZVjrzi78R4J69WKkRSw5to;&SFDKIU-oh{gM{u8S^ zkdQ7k{w3=BIM%@W5e|1*METXPT)WrgU(~+VZzlIq*T2k zZ=U!3+XHUQ7u3fwAfx$-@?K9O?%yNOuDyvtfZZ0#4cphQe?$f67TRsi<|VjR?$MUq z4{MJXp9`icS?kdm!dXI5V1?P&sy~S-{E(QzZJD6~e z!_g!JtM;K8SSyRmef~}A@^i7DYFrOjsqRm|i%=@rqBLyDXV$#F+k`0xxge|4udb$^ zQ%RNAu5kEVp&;o<1$oUhmUdjZQ(U%>Z+FYYqtc0k6sHRAdPQ+#LNio5^HP9%?}JHx zeaLQhAJUw$nrz}5wqxR;L4QyWo2Q$5MZZN!K6JODMi`1yGFt{|VHJhWt43~oq%clr zFK-r2m!vZn`Sxmm`9;nT_$dtJM%t|@9Cw1f-u*@OpPs+iXG$9fr&=>!OxNT2>Cp z%v=nq327B*N`E3(xfKs2jkMkzvNje{Ga+B?2q*Slym+zHDQl#XBs4}=cK$_syWzYg zVt|u2aJfZ%WHX6LNr9J~yL>$b*)HMxA2VYc>gzi?sTYS1LK(G%7m)rH_PU?*(1}G^ zWSgVLOx&+t6Fqs$O_Xg(^tAHBn`cp;>Fd#d`olqIn%}RuWCwV z&L1sudatj1AU*s=Z=v}olDFT3Wuelj`ynP4n-k{FTWKD#J9`F_^hGI5SNiXg##xjhRnpqXjSZTYqH_6C)wtRhw5tyu1RR5RHjR$W9(p^ytAz5 z^!|o~Dr(kv^W}B3Q&3N0WJ04UJHfBy-(22Vnp72api(Vf=0ROa`rLmxN#(Y2b6v!=5`h2Rd(Yws1T`d`02}WQG~%vH&4hXLompkOv9#QSb|H>#-?NZE<4iug!v$d1SJB-{Ow|(?7d3vgB>{g3^mFkuwRaSny z%v?lQN++8QmCL1LYlj|<1*ikaly?39$Jm=dHGO9N<6orebZli>+gg=nS{F)PP!ZXa z_H`^$1X`#f3dB|gSp-?a60$Jk+gdkJQBbzDiXgH?L3WbRxV_mpYuUf;6?cfRbD-0_3QS&lc#T$D$y73x@I z+Se_5FGv0O`XD-e>Da|x;Z|*Bxb!6$xx?G`rPmJ8 zeUE0$b0F})4)sp%?tlFvARzlHi19kFyKzSW(H`8 zqxhPIG3MeSW?3lOUpI73sb?YD7jnQYyDm7<=TNh;_&@zD z?CA0+db6R1S|6lo=ncTzWCjIzeVA#O8@9@X>8$>kJG2?Qy>3|(qZBGoh`iDQut!Gv z6nDFK4r6ymQ-&=7iX8v6SZ+uzT?vT!LqD1G^mh$MyyC>QS!F3xKxTa^aN-Um6wX4K zvX}>w&+voZOtYh0;`i(w2;)Z)Q&SMvT>ivcwq9GMPY1rTuuDf|KaoHgd0Hd9MObg> z3eZ}4zHGZu*dEX_?IpFRm!BrnbycFj)3rGDciFx^5@{KI3$B(t6oXU)&?4Uz&lih> zrtZ^9YssUGVVi*Jjog9t(gDD0a~ORY9hby%!0dI-+>_Qldb{296X za#_N!S^COgJIBc8(S)z61ni^k;tbf>epZc^jxXAJ&2j;xu5zVzr(K$>($l=MZY?ONdFxCej*8;xs9iJ~ zUzFYvCUKhHr_3Hw&}yj$k7}b$?#x8${vL+r^mXBs#z%I~wxK4xz940<$1&|g4N^T! zS;-UohbuT`y0p#G$QGihil*t~Y$O))JMEVT+O-E~S=LZ%uq+*x62=cM zC(jmhJULCW1e>b~gdAyo9QOm=v$;8i}!XtwAHe(msRAoj8myRK8 zW@cnDjltl&##@Gx`!!{J4Uw-=4`_HaWrmC8$sQkji@QhLFPA6h%exB)TLY;5cP615 zh;Q96I)jrwy;rWkytF{$%^v?S1AF-xPhNqwC|PUnPU0q-9Ix%0D5Lt9Z3(6b(+q{> zuby6spSQhO|JQ*+$5?*}IcMn@&cvHH{>i{#15+t3VZ2Lf3 zozC_ZtnF>JQ~k7muQX3#8vJp-xnIVIvZuU)%#_r3eSpp*SsWTgirf z42FmC1D(m^PSc^BbSAm9VK~kMgJf<3wspsj?@DSB(z12?_MU}*3P&M>7o-g(c{3$; zkk{f>0ckaX8sQa3tuJN5>9X=lDrJ39Up%y>ml$1ikJPxQ#&%({PqRa0m~p~hv9djW z2(J(T+IZgtxMZj9Yb{Vj*ny_RWA6or&<1OcVXZHJblfyl&Lxaiw+DCRXxqJ6gvOJ9 z9@IYqbmnaG{>lU2ULp8_NNdSqphzso(8h&>8qRfpeS~S@a$akRm=wimX387=bfA&Y zJzf*PsdNQBidT}uLvBQ@tH6cvF$1lXw>$~S4j*zBhd3IJp&=WVWW1FLx{0NkX!qxd zRb^*dNTF4tnrixN%U;I=cYis+s?O`;Nj^FhYtaU#=D=8nfXZaf^7{xq0j6q>p_vw8 zfO%nArV?k~El9yqjexU;cP z`p|pQduXF#O-S4kJ3L?Cl%fg>AhLa$dbViyQu-A0fPH@0x!$msujiB41-eST_NT8i z))@l@h?2rWiT(RxY~t5yb&#d=P8(X5{R7oYP~r*0TJkp5r0d_Mk){IIY7Luch&0f_ z9`Sm1Lj1rO(q(jt`ukExaax@IR$u2Qr>z&Y!Jfo$Yfhg&heWj&Kbgbcsd`3 zEgSYk9LDNL5<72rs+DKoy6D#(Z?30mFNSuY z!KihJuW5K)9foH6dD|vfVqJRa6KzMYzsI|oj+E&@82=xNZg%U>d91mWJk-4FCSQ$9!$>0%q>35ZJksl9}#uyi|se$YuBxwDjf<$Kz; zDEmbxH8z?>C;LC_)&!6xvPpdz)tCcvIQd4UDt1SQ=3`=V@`s&aCx45ycm;cav@#tF z*Y)Hr0(gw9+$Au5lUGI+?wJ?tH2>J0wsfv}YJ2_4|3iQF>c2*SxD^w zBnlY_zC!M`Z8wjRK*E4We9RD3Kl%3T3yR4zLDB zp2?vX<~I5^)s9ZwtOko{V1RGlmd)dcBSzutHP8HI3|nt`N_XUA;h%s0nLC{$Ey26H z57mUKsh{|JPX7brLd~z6YiK6)8Df(1NX-C7VDo58*Nr0C^g2Zm2{qOh^T6fy<{Gxc zr`>y~E>4mnXG1F7Vly!JRIrM@y=WFceI@`q39GLhTt#^)fBE;nj=7x0FZMW4}npJP#3-L-hvtWM(`j840?91x-engaDZ23%mOb_ zAv*#n$d)08%W^MlsA|uNR+RKwa!0s+%iy+ zO{igZ1yBZtt27&VdD9!=%{1(l%x_2t%B!&D%jR34(>(`tm3wty_L&FwW5!9lh(fL97-TzIVgOhAc8bor<7A|Y;ZMZ8gfG1C z+GfCN&l*p{uDE*7Em5h3hZ>Svgn5JEJw>Rq1Paa_J8hG4pmR2e5yg4;>P#qL7mw!u zn3~5W%$ZqxNZenZ2_WdlnJCEy8v$5pGe%cD4AE$9|wV_+|2j&D`TuIBT>olzq&VPlW6fTRcG~>b#P4~Tf zimHW-HwW0uu3tkCH-9LZ^Dfn2gLsa~JI_XK(CxgvQ1mYE^y`MQ;7)s%&MPQ}#yEDT{1gIuo~l``PurV6pGC=MaJTUwc;ldT}R_(MZeaE!h)Se*ch07 zH_dDV2kvGuqt{30KpPE5dyn38Ig?kCBSph6P*e^Wkdn0nu3uASX?}$Ok0*vWs#mqs z0JM$hx@OS^p>NCDP|U^;+U5lxytE{&W@Pen#YNXW9*TP$+s&M-&Gg*^0Cf*z^N@BW z$~A13-X9i5Hjz9+;sHLx5LLXp_2@z@zuHVY8+~X1g0-nWZ=S)?R4`bpGttd_ zn=^qQF7F(UYXb5D+7s3d@?cz_VMn-0$8UI%y^4-zAkrO96^Y!HXjc88R2hr3m%lAN zR8OeS(fPLY4Ob02_99|z#tUHZSrgiH^>gh5E4+24`F2Jin|%FfpDNi#bBhHR#gaDQ zpAThm+P!6Rh>*3yV6Oe*J-c#7^8%5!r*PeUcwC6Tr9@SJUG(sjWIy%e^&8T zX^uaWUcpgZxHZKiE|iA%*q&)w*s{k&fPNVj-T17vTrW*1=I^EM`@wXe!F$wL&LMdo zAP{L;R~$Lp^bFdi-=wBhK^kI!|7gP~T{2Qy?AWDREE20e4BNrx8OFVe!h+;i zHGlFohZ+noUTpo#tLIqa_Z{yjrT8w{>Tefj7Jdj0#+6OA6uO(9dAO>LKVF(lfBKX= zX}D^0vs?3dYjT5)g{ypIGQ-)fm7R{8t#ry)`ZS47r!Lxx+g3N)KB=>`6zjkGs;tMu zw4kzRl--%ka05FA12az*+H-{s7s$d(iM?N1zKnf6yONB~5Xkf94f~J(uyf_RuK{(! z>DTA@@yq8C1Rm3J{!7lkYp zTQE=s*XOG9#5ru~YJ89t-01-x1L7j5NdHUzk9iZ0o$HwoM`y&H=VlRrJncNaPjhZB zU$LJftyPQcOdn@9B}qO~$*ETxq3`)@GP21%4yaO2x@ywmF0>(*FQGqQsVH#uA(z<3 z!xooZJ)cZZ_-y*>eC6;by=Trnd|H$$2$@(bl+@y=&tPz|Ky1swIW|&+oqY1iY3yX@ z=7tZXeZ$$aN|@(A5~Ym04VUvfH9qW7qX=L(;W=t>_DXeQUNVw@`~V6{^*(-pSNJ+1 zu^A{=&;dgRD(F^2Shn#p^3mqe#*6BpDUvByDOZpcF@%kee>~^N`ZyCrYb!g;3KcSS zQ%57(7x|GoR4GxiO;ijVC@+7Y(WjG;D0W7Ax}8v3%UtCY@j)*Uet%^dCdC87Z1r>+lW<#eDoS}@4l`Zwi*s=|62%e}gdRV@xC zabV&T4(QK0(zKnR2V8pHpSi(y9Hm@d57DPUY$jG2X{C`Ko&Q~0)eK7V#tyjGp7yMvdDX6^^e)+*AGT;?Sp ziuaxY0@rF!D`)E(Yq`O5EhvNN-8T5_?vHh$E9M^X@(jPcIjp@7{I!XuUDX+ro*5L1 z2jAB$@<*4)*maG!Sl8xbU-VWRU?~Sduw1Tlf8D;6_ytTUtCSfy=+n)VXJ#ahR-pR% z!P;&;X=$VjFK9XUxWIm45})pbaf*_=`WDLdnH~l5Y~o(8rTps>yL^phYVuw$+Ez9B z=|ta&u_L0U0G4)e81OUt^sS)9-$xtD^|#w4eGA+f?UR1q?x)}X?KOd3aOZJf#*zwX zABhJqe5HEps331EPW|ZFd|$>jQi0?X@8BK0$T_QA<-K3n*TduERjrdxjyo}nQik8l zJjZ@%1&dZp@T(h)bWuuLCES*R&TK!X{r56Yaa-H@Pt7<+TfXKFSydo>YoIlClk&uY zuo0QpM>a)4%HOW(sF!i>Qk`Rx`7P{8R-O3yKUTaq?=l$8_OIS??Y`M_=A-*iR&SP1 z`Q;%J;nRO>QjrtzmDqlGcv;(>eH9o|MSU=0)|1OCYq5{*?Bb0lrC=A!08x`-=jwMi z6695=t3wJYM1ZGt!XUE(uES|2S(}JKGX?CUtG`Y$?$XH@W@lx6qynba^id{SF0q9b zVB8+2p*MT3X)ztuuflIH;sNCUQ6}_8q$)+jufvREg{#-B*(j?JOu^Z>XyCTDAQac|PEmEa*Tou1BEYMS3 z)CIadp4L!YF@%ylnyI_bhaTx2GhYOVd-yjG54B8}hq0d|&m{Hl3$7cQ4;N{j=QiaZ>~agPhh%b zkj)$&fghED&ELf9)?%y$v7_Znlq|5DdCSY?JFntCZTB2Ztj#yVgIJ)X`fDcW(Lhx` zbBmQj^)i-@)`U|+zimhj%a#Uo=5Dqe1OM{px_|-3utfW*`hb%CsVYZ#KcIuw_pP_Z zNyrynscx>G^J{_VZ99i7C$s+mx$JJ(^ltGf!(|;LoDp4dQWO}%bjLahcqGqq(akA) zFkG12aW&zxs;pi+(>w>WPM^568<)e-HExjhZ_nxzfzqa#*uodZ4&9uJ^hpIe7_&zH zf>2aetify3+haFmv$PcGNIc!l@Rk{VK_GkJqt6u4Yx9OjL%f3cEcW5pc;! z5$%;}W;z|0j#=8xe{snao4PBTRy%sdLy^mScqq#k-c8f>nUZibC=tOc4vS^rvd)ue z*A#_N9v@8y>5kMN(g@KNB`vcVzMvt7_HkCK69JFe*b%}%LbYFSXP$4+$lYo9hB9(M zNtX+Eg)pWV>I6TT7&3j`%;KJJv-);<759sh4EnM#lLrhc2aQ~R_H7!W#2Sd+H|m{P z@^=K63P#MNCv{TdNvX~WPk8hpAa6UGSb=l`Ue)Df;R?{mKb`q+*ohO(iKZz8QMkgA zK8r@}WC3V4mr7XjNYD7K#5I}}$2gpedXTp84(3>#AKrglkm~id3Mb2&D$I)&Fh&k} z(J!j5rc)!-Dprx$u#h7P;_;L7Ppj*yn@3iN^#h5l1nt>!&F;NLj2F@q#Y=70#0WG^dMcpD zO*|Lj%4sc+@^&V#FoWt;)IK>=AruXU3G8(c!2A9ImF-X9x_>7V8i=MQri^(gW&-y| zxn^$1S5bdhyi@Z}#$1m1nVd*-`CpVXpMs(#7nW3*(r%a^lpp?jMwRv)-@%X(i9`!< zZZ5zw)A-i+GC?UZu1Au09c-O(#85^*A|O8NQGiXUN`<^gI-TyU@|Jp=o7>=a0eB}5 zJ`P5LO_->njd#HwJBhrucVTfHh8n9zzt+f%IudoEkKKOL7`-F7 zBhR=Kbnavmc@18cP*R;uGA64{ecq4D?+3Ri%Q8lMJW83enQ*|iH?xrl`a0bm7&NDZ z>$9kU&hH_ekx$&r>aCZX=OIWH&9+m3v7DI4UTHP`40dlOnpg*^lC1Vbul z>>0@7&&{WU00(OvKf4iY1Zwgu7%Ky3=xJRVV0&NHhXn|?T7!YT>GS&FY+reci5CeN zxDW>Kq4ionW5Zy?8VoJjU8&v?FYv@{DYIt#-(T8L5Yndxl4th<$&s>JLVn)L%mg}Uk4*?Pp2q_l6uGTP=baiUI`H9H31;TKv|m$e2q-draa$Rr6^!diBMi=I64{5jE^f#G z&S?t5Bj(PL$kBP(?o4+dO+(b{e`uhK<6`*wA+kGvF>-! zlEOf{{5?0G5#nN(A4u||0cq>l^Qs(NzAr@ zWK~9UX>nQMCS?xwv9>FEd#V$I%Dwex#cO3&iQhi&;Pii;?EbDmW9gr(ImWd;Ud*r# zA0#t+Wg1KMPJA$X<&Cm*R&8yPjI!@$kaC+Ws@P-=9>HD0u1Wou#+hfS7I#(LZdHXH zN$H(0-FqzNhgg~69h+|QgyDm9Q|}?6ZvWx#3_0S3CgRoUJ@?9a#pN1*;`vn{m`kAH zEb*Y(@BOMo|HnU1JYH?q{;u=stpbhve9j?w`1_bP9ax69zX1F8HZLT63DdsI0|I{-I36M~Q>}xohm~948|)jFnG+dj8_Yrtx~f?B@55AZPa0ojdoF z>1tFelf7Zmy#_4mdz=c|#egb;hTX*|3$8ryG|`x>F_n0LcQ^8?qh2`s_glK1 z)DO%m!ObFjZJf&%v`QTt17F(#`E+E9A-cy$OMp#k7BAt&)md|7iT$E5n=+!?|@#ANG`eB`_@0dUGg zbG=s>VR$3#V>p4KKdTO~!n_7JT4}bFhN>@yjBPH@8yf}mNNWK|wXjzjMaYc|!oX9Q zf_>x|9y;NnAkMVgTLQiRRvWm^{@TW8&DG^{GRl<`Yt<+O$>J_}{OhgRJ7?!>Z8@q8 zdr|UZPP3`Bcso;NI-lID*OT6O1xQ=gvw^e;cR(LX=?yWNGw_jVuI`$f~{ z1ABg0woUN&==1ppZan_inQcEeIe+Q*kGH=3_Ft#|{`0B-!oIxwx3|8!Mw;x3(wy=l zaO50;Q=9dF>09Y9_y;qtRxDK441`A(dE=YUoV()9RQ?gu>XRN}blSOdfJ_k8@LSU% zx#RfwED8r(;X<^&t#R%#vS2s!qLb*AAzCJ-$m*!6PXz#Mfe1C*7Gt+>W%8}sEre;;D-_4uy;?ya*C*E)wFI({XgKl6mYG1xKah9Lc z73T<9njELb6pjee?Kn^RS|ix)?Oa1hgu7lbd)wPWPG2?} zG0e@d(dh@6v|l+Hu|JSV`79PIgeLseT;;5eu|E9v|J??boNl~6!+Xp8RC-ER-8bWr zV(&&YjG}@8O&$g~-Cr_|#yQ%B$QG$oI;ls_yrpAEFq(z;^n9lc)Za-X?1fJDTZfMx zeJc~J!`0GCX-Ca{htks0d{?7lv1;t_iab!%`B~cwR0fc@Vrc-`EHn2R)rzlV1B~^{ zk3f1vw{G^FcJD~PVXFFJB<15HilMsh#*4~c{~8vRK3hTypcT#49)J2Nh88537KvRi z8G5UGrlOneWYHj|4#YV$fGXS)bs{_T>y6vDl;OaKe71BU$j*)G6(IBCbViOp2cLX+ z(ttcgD9l&W}kp^n!Vdv`F!jvoGlC^3lnvsl|C#N{z5-0Unj?7hv z=_2;?-HwaK%*-4Vq4Zix&#?T@sw?p3=$u6)tK+yG|lnh6KIrTg7_~s9<%XHMZ1_e6ly&3z+4%4s&c$W4S^4viqmb=Y>=oh|H-w z{G>WswTR56lQsF?g^;m;6+)X8Hk{7fQehY|vyA{o_02A=0|p}aM1w<_x99kKSSv_J zmX1MS(*}%Vo%D{`n*vX}>59ZmA|cGv1*9TDdi83%T4&Q9xBdh%SY&N5cna1S$c~-O zGX9+E7j*fB*9_7dI7e(cLeht4yMN;D%DP|JfCMtils7lg2R`7fE` z&ODcdZ#axT-wv^ram=cjyOLCwJ!A*wt@!xyzspwyLjg*-@wBt zNC==<$4;8ng?Ya^+C48Sc;8FXhjiq$Vtu!&9YMd?{3M@usDWn0wW>m=zN1N+{`_&7 z2Q78mPC*lWeo6zLvPe6FzDN;rZ2fCEE^wuqlFz2tJ#OYNx=@`lwOz^3cF-2qU<~)E zG_1O1<}r~Yec|Fh)Wj~3?1Y?Rxdpf_gDp-DNsBbN6ZLmhxSv_9DW%milg}$$vXLGOo!UdodI1`H>P%rNgNrlK%^jv0KH%A>j-f* z6I%6wkzwO}T!))De>zprn(k*Hzycp^Q)uH|2Mw8?)}9`ScjE#^{o2L){KmPRu8w;s z#V3D%8?&Y@Os6_bT`mTSz*%*1ww9@zPUCn8UYT>MRhRbY0^>sbn_5wIL58(%qfb1p zmI?_3W)0oJAgVLnEuf>kKHF@+0EzS6_V~F@&+6Qyo`l(_-j>LNT-#2tSt@UJ z+R5$|2gxkH&a1>)-r8DEzrq7b<=r+&mTfMj2e_>xoXNWqk7Gjy{EkkwyGy6K*;VR= z@&#O_B{@_3TfSzl3yYEu-4qxnWnoMBuV<>i{=f7g0Xxr@PE^90nfm-k{ znTU1v^WqH<_jI^PXs|YKaT~ys!*#aNq4q`K2aU>sWrz=6(0Q{tjv0fU`Eq)R{rh?4 zd4(>jJCR=U5I?UH;hyyM%qW*S+uAT2??jflmpzi@wj^_7b>oc~(^WE`98s+DrprRtK^GRKv4yp^Q1DvTz3*>>Ba?Ou2VwzVLzv|5)JrO$9 zLI(>f25DG!*wlgt0T^{~j9{T@fBx83q!fBn!hgl#KY3NPq=H}*^{$Z_IH5d-)*E44 z-3E=6z$ufTf3?2l^gp>8x|;$*0}|Nw$yxZ?DP{AL%^9x?o-E>*_)&fC1`nn8b*D0b zw^|#1cv{$v9bh)A1n0`z=O}I*JN*)DOVji1m@Xo5SIQ_wc%7(>yf)S9EqdfAZRJk@ zMm0=4RHuxW5ry-q!nMxBN}O43CjGTk`04+cTmR$e=fjSPi1_LH^;OHv*6osvq;GQ^!?FCm(( z)h%vFY;IU8PB&kP?^v(COmctOW@#}G>!|*CkvHSlR2|N29yFO_AJBQtqLV78FV2V6 z$A%;oqwRX#RJZu6B6}v0df5=Ou-wYZr4xYhjTl;N#~0bNg5u1ys_W#nBl&8F)A!jW zH@SF_>{_%ThjO);zA!LHf!g3A9|?n@PaOeKs@VA7bq#+ z)K!|m<*TgZt|6SK6E|}Y^{^2vXdHJmF3POsx-T7Qp=sEr`M@W}(cpmR4^9MadD&EWJeWkk!)# zGu(vBQ&*IrJQ=QIr~QIInb-JaVUv2!?6EeJ6A1HKbNt9_|GMBsKU zPeN@@^`aFCyXJ@*0C@%r+1v_Qq5>Zn=fFA0BaTquDZPrfD)kJc6Rq3lQ&2*_RN~N z7#q1RGS9Mbe4l$_nbCU|v;~(7Fw=rLQPs%k6T)IS5CS2=f zaa1cz^V_gubr)A3Y?S=Or3ijA5|s@HG^8I#lwK$c3Sywju$Lo|se;r%;j*CaxeVCh z2E~%}nToOL-$6WYUf-K4`ntsZ`^)^@r#>>f%c0Vs$!cd~7~ps4BlYq(E9&3Otq$0P zED%C-m`t|LL<<8J9|jz6>0uJ7+06**SINJQ-A!um5ekLYj>8(YIwXDQC~~_(xYv$^ zYw9x(9u9-x@#+17Xf!t^W_dU}_dmQhT_Gfz?`+WJy_*TPg2pp>508!Cn-dm%Ziuz9 zLOZ`>UwTTjT~bDdXpxj$l@>;wCFVmYmvD=Qt?d<2M??d={A}!k{2llx?B*_XLe&XZ zrad~rQ|5;0U(^Pypw-#JBve3kQmaRm)P`039G11cp0oeZiN%Py9_Wkj$szZrfY4N! z1}kC~p&-sdqG>e`aw}J1sMWxp#nyH==SDK-Qh`geL=lu5OwRtQrikvqX`#e}!qeyeLgaIbW5jOcn{573Q#^mx6WGg+jc7-3O}WIW+_+ED0-2pS+I6LaWwI>_z(!szt;sLT^}ajzSjc zCqc2blb)c(I{r9&q9s=d>s~*{<{-qOTJ=1?IDEQ50Zxzp`@Gphb(D36c%Uru6YLW% zk;U#i^b-Aq=K0O+DRb*hpf+SRdMyiebn?Fr^=4@aNg>^{Ga<=hvj&U6f0q3^j z2m}dXtw4JyNQS&b{{{Q-Xs`Z+iY$T~7X|#8y*40$b-~cOJZv8AlZ#l`OZ!f4f~>bC z=8OgF#3!F_V1r9V|UGw|RX&Hvx>@%k0KdG-JPvS-uk zSC|c~g0k4^o6!LRPi6*O=>n?((G0WC>kw{4lGiysm%XjV%| z$2Dvzp9**o`%FQ9MX=>oPtTD}R`0&sEz{ndW4IC4QCBo*E48;pyIBFvIqXekMg0k% zMdRrrd|!W}bYC1Is}BQQw`dGLO@hOC7aVSRn^|2{0Tlma;~>ygtIC3uC{MJOqI)!C z_ZY%X-$nVo`J(UY?7CB;>`z-K{>2=Am`^^|vYLYuf5%X2iv@}fY34@O5WKh$)NXAn->*Hv3X zfiqJ;8W|Gg2;J!w>cUwz7@6K0TMJTQr{x-1a1Gh0l~}%f?83U|W_tGQ06$qwCWapZ z=YN!ygR)SAmele^{FGpQSs!f%nlX<~jdNTe9W_oCW>T*av}yUSK8F5zy)=OK*EXb* zZi6T=!!I(EO#P*7v-(=19}8vrFG4bdR~-ljEG*%;Y%Ryv+7SY4t~(ASRE0D88t)_o z>L1*%MfTh?ImilFf*_A0>TNfSeSnNLQAcq+57a$Sc3jK8cxFW!xM}- zOStn{71}AHO)#gZ5HAm+)v+0>8tqzQOlZ{dIayofnE~HaEHH7j| z(t2`=`>q6WgE0jqnPsV=aI8Dqa%n z{1MG}ReUt{g>^b>{Y=H=cnC-E)Ar9@6&*EA;N>)6qLUQ?(G>ge8Y9~InPa*V+~Fqe zqB`&^8LH|b#yT5#r%gjyW*f>&INnv|rlD|9*ufrFi<3aBr51S$`t_gJg@J8c;?8K- z-XYVee;2>ctX>mcINMyk$E#0WFhqTGos+h9DF4ih7kR=Hxbk zhP5&if%(tolghNO-7U$maxU+Rm`9#;YSXdTcB%%o)~ggF!bA7&s5A~dSJ^7eT7QSD z>UY{DH~PmmtFG%a<)-SWC%@;tI@cvzW^DgkXT0&-Z`_QJBO-?MRqRm)I|-k1oGLLa zKR1~y0Ma^uC=eyTuwyOClv>m0yp|m~QY6_8^oHsEOde$_sjWyD93)JVWC+dQb10eh z?*9S?e*N=9b{u~XK|=Y(#Y+xHL?AT90@LYRO!3q3oNhhTuuT+XP_f1as>7C<^KPLu zbWaB$;|kP_;aIxpU6>psMUZ+1DWwcIT5i&aj+vYa)*I&p=tmZ}$@(h_8bfO%{Avl~ z8K3|?!{3w95qVYhG280Ine2z(j(=_#M;{#H7Q>NoBAHCy&!vk7R11*gI@L@mp_Jem z(?#SG%Fz~UycU$XEX?d`D-9b+7Q3!0i^W$R46h?+f!;l0p?W$mkq+_x7+S{~6kpE2 z0s^7Ep(vF*Oyr#Ktqllmx>KAD?ZI67?8H2ZwmA%GDDg9wL=$?D0`1`Vp(q(L60c@f zt9~hD=m06{o!BV8s46Y2H9@5IJE#;j4+M`o(uovV0t5s{+4*?Os_IoJCSee#XylVC zxScg4G2{xvcO72RuMO)*LA$dQ69VyGpnEbM!%hZ{Xc2Tf+sziZ$;LWQdEy1(L6#d$ z7K3iqg+J|#-}6-fHQ=vHHY^W}D_2$qSVy>VeB8Gy_qgFCZaj;zc;jSza~wgwS=$In z^%f|+G=ElehrRUd!vKc%yAC*6>>ALIU}h5KrTH3yC0BpVbmDogFc*g~PQ&Wp^3&ml zgEBThjc(}mzs%^`l{DhJrRgmsvtMoY4EeE}K(oIy;o9tpSmSju0>HW>$DPD^vjg(q zm~%tRGJjmQGNs#lyrCYh?K+zQs=c;7A;wr^G8gW}D?|UNW69Dln`SqnwvliAp&q295-r=;n?X6H5F?B$aMig8jBMVU6_~#&Pw>VSgs%%x|ySA1EX7NLl-o<=# zH%arAvB4^s!MvS2+k;Q!F(30`@N!Yy%VIe2H+N1YcT>W9EF7=6RMoX=>p#(ui!Pz+N_yq8qsNX zxr`>SFke{BQ<`B3Ez(`DocV8##mj$MrPkZgLo}`0z53Mc75j^^xuWr|UKU@%=6N>R2$6Oztt4gOSyOyLqZe zQ}@msajqc$d29A3%J0*uBvLpx&aSrTTvdo|qev zsJV9zzJ>k?h;0mV^A{YafhUTf00xPQh?(>(FLyu|yL>S!DPWgciNXE@3DH1B%5fAg zgc{kV!>e9|)^yP!r5TzD7ipKqtvESg-4rOs^XH2pT^-!MOV6mFUD+`W9Bjj)yI{>Y zv2{yt*83bcy6w>Wk^LfDjVnflQ4&6(^PMwZ`$YHV^uYuQu{KmhM5wICYCAh+KEs_g z;iP>s1M08>qnGF(HZC*Q4G))aru!ZMO)o(b3yl9oo%8K(hc6u zMAXfxgoAP-I7{QRTp-Rrq9t6hPRLhSA$dD8@I~OjJ~Qwzq$d?}m3Hc_5Sr4SIuNOa z6Bv_wlbaNXLk13~V(=^2V34ZvO9*}LYVyEb1Kg_{D4=t%C2$LpO9P&KZwTZfRvr5oVA1i0+#p6mJr|!NxZ-Plvdv;mY_J;* z*J~Bwraqv7PcZ*;agJdQ&S9x%;>bhd#;QccGlApScds*mS-Jh`0u5Ehk!*^LGkYO)I zb&0fplDUc=!_G@PJDcQi8rj~%M4wFuaxSfQjN-TjFxlTIJ=RsDc{vG1cW_cG&T|=#G+ptPIT;2@vn(CT!ly6K4kVO4{$4?tlC{?7?#f+Z z=Wx1&!T0>i{IQ1~y(&*{&>Eb0juS>&@o5hvrA93MKRxiU>_U&c;uhuz@=7+IK(`xw zrhFqt*%@Z^FU1Axvr=0v513N@2DfMwHHI4scg-@qYB0cZs}n$_C-nnf+03bWLb0Sf zLC3Gouuc1eZF;ThDT$joc=evMLVs7L_q2cPK~HBqi5ITyX_!hnIjaA7gS&=ER~N3e zX#9ROBgrTYqRXx00Y3fp*Goi+a@{Ku66n0G*tw53FB%ZqS4+#4y|3`n)FPT6?6Q2- z6#T#b{PzU%Xm)l7hBz4qWMMOBs90KBni)WITx_>-u-aBqq$Jy8K|w*?33mz+OA`W_ z0|Fx3I$-DnATRk{UGJDd)N$8oP~1F?rSr`BTO@Tf@{?L&)o{a1x<=V)D{I0oC@Xo| zm}cACmyK64X7=fCg%8KyIq1mj36>u_q8|-w58&qUkWe_RpA5$4btVi=@1W?F+Fh9a{;^8Qwv1L^h0r%S>9f$sH?J3gvdMzwS@bQm_58@p1x;FAFLwf6Q zP67!zggcE0nF#%AzJr7`QmJ$fKT5F?W^|`Q>>hsuCCUm~phKJ-6l^A+-*!mEj}4>s zk?x4TJmKB$ZR0I-?c2z9^FI+^H+oChC9yM2+qp=(J(W<-EFp1q3x>wGxW~*1$An7@ zw*a0Vhz#vc9K}h=oreUA%+ngxahNY}4p()d$g-Gpep;pI;RP4LvrxMMY6ikbnj>Qb z2RUaZLPvJPfaLn?2af8EdbS4*uSTe7rtzO#N;8Y)8Offa3crx3HkY;9W?t&%dvS30 zrm3}@o^YwA<>+aes=>4Obq4awWXaqxNEd0`46KTJ}gLlmT zoyrX1!#$BLCdp=w#mPyxBF)}W(k_XYD-XxwL}Bi`2jM#GNa?S$SgF zleO7M>{4<@QsNjmMQqt*Z)Y{z2-cjSPUajzySK&6zL*ifEKStu5E|r>Se)$I#!0?# z)7nL?M%u8j-Vo6`wQ0g0-xR{t_;}A%aLQHLB`ylkcFjf2Rj7)F*pf7qXq2DapVq0Z zZkFAtmR2a%(h;Lpkm6GQuR>0+;wxd{}e; z>V}jWZyJpkg;p|U1G-7a%zJ83OmA%%)#ZtDtDW|VH#QHycT%8Md#qgU|<@9@_o+TD-nCqNOf|zmp?` ziw_x`8s1N)U%!4m3K!$?TusScp$y?tflUo-=+u#l%gf6n48W+?nxJeiGt0pz)2!-A zKzzjM&Q%7OE*p|doZ%Ff6+5aE;mkTqI<+hK_YeUA3ah)odGPXi&0*^(Q4LZ@ z^s8NjVxN;Q^uEYb!Ac;Z1u_vU^0Q*;ik{^;tk}FXzgD~CKfVm(G5aH5Uy{(AOCqa|$=2o}{DdaY#?b<5iIus)9S@a!*HfXdcp0yL6Vb!xfth5B|B-_E{Y@nmH?5 ziS!25hG-i?#s#N3<|y9~c_P3;QW>DVWmhh~Xz0PkG|fd@F*_=_j_*sX>gD_2e41^~ z%aYbIiz}Y!B;d)%$bkPX9x%cAw4Z5t1WH(E^K7^XROKG2UW`{qxFmx&aa7VI1mE21 zOtd2CfXP}jpC(}m`r_#-r6--TFwIyrfcIub$L>oSsbuh+BrIW=!Ncm!f%NK?Q}(eb z@$JpLNKg4Hj7-Yr!3U$x#bRsS6vd{AP^jsnOghmm%anw{7X5D}41B0)Gpmiu2b zA;sxWv$>$lVrqfMRG{rB~3?ZL){}fPOdDshJb^P@08aDIq%6m98bBkZC zasc7=v%@62<2~8uA&^J=e>i*du%^$v?K_AqZEazyZLJkyrY@ACpjBi~rehgJ6ckiM zmRL86Yz7ERfP{|IS~pNpkS(c-ARLB0Hpmy5vPMF1|xJgOA2mR=%DMx0?b&T*A$ zpGS0dxA6&*PRT*J)elsW&(-(|OVp=s(94Sl4qArkU%%dn!}?5gk|j=8prQ}U$kKsS zt1iODqi0rXUW{WaNlS-2G$v@j@t>WxLa&@39Oq-i=oT0}kRU?q+`;`qVbfg_WN9eL9YPHNYcu zRe-+$+dooY3Fx%%y87*CEX_}rPNLuRQS3eKn?$FclvU?nSXtUS8K7c{{y;O(RACT* zC2ORF>kC-<7*I+qdI_rgjW{MvFU?mAfWMh1i=)rhofSU{XTyG+V9{b1I{g7|t1wlu zj*6A`?(kv?w@e?<%0s`gDxsbvWKr&ko<{NrvARdTBev>}xtit&G)>l_iW>KsG#Nu) z0R>acM*5UYy?d3B4*yY86f(n<)(43%19st4)Z?#0d#O0Sc5#ihPm@j(z!@ML7)hB0 zsujE>ENDvqu2{$#Hwz3!(h)(=wv3!vvGYVs*3g=>lKO+%OjnCe14zhTylnn=|2#|^RbA0h_s~k19AOLuCE=QZQ?U5uHfznF?i*D9 zY}7A^=q05kixLgPFMl-X@)wRoJZ-Di{n67Iy27;yC?=#^$Bft<|8FKGmQC1 zHhJ?rl_M9~NRX#EBS+ym*RF}13EG^kB+YSH~kTW7% zGJ5bZTP?bQDGAH$z5k&cOpc|rJo8`~#?Jnev z+D%-CS=?s(W5I2)AgT6N7g4Y9zHqQ`v=#P5JYC&x%el~0%EYc6oj|U)!Z|?@*1LoL z8LC^>`aAv1xziIR51Kc@iFUw%OmT)D+Q{vIf?letI0O;$!TVzJ($+Wm0~`wN=ph@| zw_=PrP<(ci}3 zuK}9(g}74rg=KAt0}mrF4%US9N(=M2kCOXJ2o=g|ctK2&QvB0&477*#_?^_6!k%dZ zRM2J%O|j7+G!9;cRr|zUQT)Zs0f#XyNO&Bsa4#xFRlOE#jbwa|Zk@;3TaWg41$8rc zSGUW2;|iKwxk|t8z3pqq0@~ARwZbA1xVr3du$`vyA9WI2Nu}ypRfYZ+FXrh~n6BNp6+C-nMLh*Stgh|NOy78=eh283=JMOds zElnwZ`Xz6JHW3oau`d@dKV&qVouf`_R3q`VK~;@$jQJJ3iRLroN3FGHxM)M5Sbr$l z`1dK}c=EF)4$Xf`2iwql^@@~ZQ&Lq7HXxtNb{*a?EzP@D5lO(Y#rh zx4Qr&n5vV82zJWjF-LxI_akC0)M%!MVN}uyd(ST%fXe;yUNK?1p)w&Wb~o#GbIyjK zz9PcJ?uMd}4M?Akd>CVjJu=Qow>x8g9N#n<%2+{Hx@3TGdl-Jf`oKjkNSR(iN`S_V z>=85P>%PXE4&$n*jfH^~B!4Z>(rLmPD)U;<3=ZczOFk`e2_3fqDte~L3wRv!WmM2L zC&>6!#L-l2s|B6&@nu3U;S&(VcZNwE+9xa!I+U9wQzXMSIVGrzZzpW?g@CtS=a7cj zwdnZ|^tW=MZV(=#+hz{X{X)rVL0*jN-03_)o-r~YGiwYMO3|;~Ro4y~!24-BVN|3& zX+RZAC;4|Mfb&VCSk@H0T~V6c(I&3VR5(w9CKs{48X; zjJmbg7&~I0-r_oqcji0UO~`w#Qk15wC%^43ta}WX(qM-L2PA$8g^V!Hqd`Ea9mg3U z03r4Cgs&bYg+^3SkB(iRcAAjv4`n^QogF7}AzhxIaJ1&djx(TZX(V2_<&`)<*0lC` z7h0^Ts1kzd^|>QEg0n{q6Rh1NFPsQToi_-vYXNc=;sbjZSR>nL$ii<@xP33kp|St4NU zOtffJ)E?D>G`OgVS9bF4xBK6|mnWY8>WM)gLF5|ua-(AnkTZCel-+if(GR?5L z5Zb*1t#(lh8WUr~gzKBb%yF2)7G}vVQv)>-D3qn@HJJmaZFs#6dl~br2zGyqPJ#2y zQT9C~5%vQol5X;NM(YrGmDPGu6M$B*%|Z!?zZNq9&4J+p{fxxPm%UC*?+BlmSKSYfX!E5 zVIToJfHQp9R~T2@w}nV5`C(7pl$VqfnRKWyOflpO$gD)jQxuY!D7xsh=(U1jY>v=} zHLWiR1wtIyML<;(wUOXO94QYVT$%rU20rxfw`9?0*ML5H^h(88Zt6M%H~>Fv`QwbW z;GPi@MFxQ^A&;QKR_fWh;kK~O?abMpgnfgaQ5G(=CufV|u%Z*bPAZ=yrUh{sv9BBr z3%7As>U`eg%Jk&j89{PtDmner&Da#3tc#r`Q&Xu`PX>{X0SEQ2 zoLm^tmwakuJ%1b8K>G{&<%sOLxuP#2xQ>^c0dDDHYL*~USQdOzFV?=Zy}V zYX$;y;F@JoF-^pw);pr@t8A_a8}aRJv_hMnxd)YMDw9;r*L)#-TMSMZY&gPE)ju^U zhp5}n0tV+n4(r2%#?<|E>rzhp0{$0i(LlM{Q1bQ>9d3tw5TgYQU;B44wJCT0;3^(& z$4gaiQtdkE+zii87&WS;O8J%<%Y=?Tp~G97&7rh3S<2Q}x2-YcJMvAzjVEn!zO)l! z)={~~^y$t7rAHzXng1x@E|7FSOx@9O;U}gxc%<@4xE@a;0370x8kDS6X(LqRDnw%=X++cSGd(@~;tD2- znH-ecN`L)Nn=OMLdwOGiX;)*dkRle&nr+Ed@27J5QxZI=dDL*Ekp+oH{O%YSbm$L` ze68NeuELW(Q>J=yRcm#gKD8GQ!VF&CKQUJV#=9NU*kY#n~I|EW{EmprnLugR5Xaf_tr zsW}XDp5^Ngp5hcF+N4b^_wy}1v)*PHqD()+p~5(i^6FCpOqh)|za{ra-sbfAhxlkt z(yq!RNyP3ZIG2gCn5FWw#-6wl1KxrUPfVRt>H z=yZyiW2*1zd(fL0EwlE^a3s{QMxAXrS7XX^psf_zsqZN6gzA5b^s?+x%h4?7LJ3Im zBv8vGuF9cwuC6kG|Cv-pj54d?l#v2NTh({P-mc5cT!`6- zopu5b*`%COh5Qg#saZFu93YS`#kP|q_5R{S)_8c$kkeL&Yj0l`m9;u#jY=hz|3y9g zE8cinD4Nm!hV+w3#0kg(e)A-iNd-$TfnBIz`$&y{k#MDdzMH+`emTGN@Klm`T=EWaw$RxN`S zN>y^^qn>?Q`lXs>h2rlP%o~uEhD)Sc?-tl)sjSA+jZeQms0IwqM(d5y>Y(l7T}m zOZfm}ojDp{jjhgfe-NR0Kr3YS%<_|WuzBu90gWhFqC)1bR{|0yj(&A}Mpp83G)}1y zGu1$l!sEcuB7Ije7!mx+(SA&O#$)n(91>uJa8QZc$3*@?WK6(e{xDXNbw{$lMGcXw zQs2||Qg85eo5kg_=Bb?dQbqeDQPjvMQIy*qt<``=7XZQhWm69^6QuakWyG^}UpUkV zrM*UBlIUXRv}7*OhZWX52+Hcb)IPo3uBO1>V9RmM-023VmD=lQ6=8YXBP_Mez|v$= z3a=^j*Gy~5VUQtJRyqig_%5XS) z8rEbdPV-Gz<11jgXpyhuQ|_pUONnI53K^%kiJ!EO=}ieS&JHm7&LN7u?M_;oLM1e& z(oY!z)thQ`&4eN5C{NTQ{754IR;?Fd-4;@5;Oc?DoF{uA#3m$Q6L`O68N3Mj|~w&6&5lKpBSU>BO;|;Mn(Z5eE_t_9cuPWJaO*Tx*Fd&%4df9Qz<(Yx2AKI2VWS!nl>q(}) zc(q6Du^x7lDZbr}{EFcEc@FM{)(afaCr%Ehta(>cz8L%OOid}9u@d$_s(HI67tHoW zW^CAqa2L}~-Ir?MG*@Ur$?e#V6xRrB5T3mqJQcFIAguL|kt=r|Zi zdDahXZm1W`2uX>DtBaIb&g zj(-qn+{0|0)vj>(a_q{qjp#QROEzU}3ttYu*QmL6QdNOWk*n$U$chyrPXm$_=2O=z zvtxZ&p&%Q;O~UhqE1((FomAww$X;esS_dup4+px15%{a|VywPAjG?w+BqAT6OLY?f zYX4!z10)}#Hd?UX<6*fOogLxQH!BRuOzt>}tDhT)$QSYFvJ_FEPsV}rfnahNshtL( z8r0o`XxNG65!&`?|Iku-(XD8H*>D*3x~M0qjU=6ZK8iAj04Jp^4!ecxyuUJGxf?&Hmn^kRUOK#@#snIs}BMxE1!aVbbQ5H_DRCk zc`vO`@@*nmCX&>j1!&_C@R?<#cr2?EQ0$Q8(PUvjhoL@EBi=OUfA%!2 zMmR6uad_n#+GD7yJ^h6>_(80;LP1b$P^7I&JgG&X<-FwYSkXJ0N9V&6YxE_BCyWEu zW6or1u-HkN)J6o!83;F?vNF7DICrN*#hc$&v0i&=Vh@ynn*?HxuE+pdjPXS){#^vA zZUf~TXIY;(K&p7K@UM5B4rCDy7&B~R2^(k@4F>tepYvt0G+Emj%NLd$>f2(~s=%Bw z^A}v|D^u%tsVWaQsUUxwm^usUP#kQiQrtmEgy&!yq%M!%|G`XW;>Q?BCX;Fqw&FB) zu_K4zphIZ$=Ln7X1;V&))q3_>$8~H%Y;7l@@9y0u8$CQ&zr|zDD$27!7R3<()R$t( z)Fx=wh{I+cb1-qanZkd0SlstJ3QELe3!r3HQ=ut>gEMDAhU2^Pf*9Blj-q`~zO8To zdwbrrZEJaZKRY94U<(#(iRuHVgWS>*|0i5n z0qO5q6-6AnlWak&)6b(OR1r0-B7&mn5En}7AV~^b3NEw(8T5Qjb5&l#z_7yJp6%Qw z6bgl|iKXcP_qs8WxtYFhm3kLGmR>|?-R*i(eJrgO(^dnUh^j~%|E(>?!-qLvra#`5 ztM4WjyJrbH9s*C^!3kuFhZNTo05=U?8R?{2-sT(C+9t=Ak?7Sa)&WSod$evUXKq@< z=djP-n#`f>(v9lCl`VLXCMoGVTFc~bbBX3=$*T%Py&5od+U3{7NKO7lor`Zq9ZX`K z2Kl;Pg$-ar=FYSMw-Q{C@f(bJnTYPtIzje0sO=8br(|To5!PdXV!+tNWN~JfXC^&b zjYB-2{IOXlXe>WP=CR2ELSHQH*Sn&yl>3rjV&9~XJ|QI`C`Bg!I;Aj#p8PoujTz2U4yv-?Hup@~ zNmKu_-F;CyEW(Q;-0rqJwSyqR4539HX@~45loEm@d{~a3B4wJs#4MePolR}zU)fYT zyaS&4vOy)V5<)MyNwl_S1D<%g_;^I4z0F>qwlJ<3oKxGMo5!U=*<^a2!iH_vr%HE{ zk2~d}_13LZ|33cb++%)M{4>m5v;x-*RJ6rYX)*S^{WCgvp49K*Xmeq-cZamix2Vjo zc$*~R%Wv+1xmN~iK#u&^sbp&oQz!Vx?Q#8RiThY-q0CiiGwKn!?XAaYu==!lv;psF zux|&K8g2U|ox|vEh|xiVxqRN27S_gh#+EjemQ;tgEXcP-|G&S-|MM@3I)7-vV6%f6 zKmGL6iC+oNK2!#kAE=}vFa-7d`SUm|vw<2H7Y70*mFfNJ>cyy0c&`P_0fFmzTdw#J zi3-})x7hp)=3R1Z%97 z31>x6;lJAgvs4QN=Xh2_knX3S^?uR~{D`qsm`NY#>Aw09;(gy~fibrKqt((ZBSc;x zEGHIbMY*J}jW^~dT+;O@Yb!FteU<8x&=Z{XzQNc;9p-6Ij^xcc^@U32!9P!4W!$p^Lwz0XJ`wb)Bo}-%?(Oj3s(p{)?-uUi{sxCM3yme~Bx zT^h*yy33b(B{NiZpDsM8?@;$lr@_1CSDoW(O-|-@bl_eWh~3T7(dt`1!1gCfHMKPo zGOMgzgaViCND8uc#=r=@yBnNZ3UmP|PsVbT02;0@2X{WGDdf#F2p54DzYX#Z%=oJ@K6<|I{b zXxERNwum+uUKG})y_5b^!wIz?F;3_#rwQN<0(`g%2>UNoq1_l0~=kLEfpizG} z{TZo>1`^=dC5Eu!+<}ZiLHnt0FbenC^5rJ=295Mhve3W&^|wkP^m3J`5eO*~o~$`H zdkY_W46pTq^$?ZE<1Lr{fI~ zqa~E-YedGtyo`wK*V5AR*XKU)HlLfjGV$|AE6+*2o??Tgh6b0@oD~`-CTs)B<{}Iv zjq^?zD}=RV(V9db3uBYJ*+ujrQ!zCMDxy43>(Zlr=6bGf45 zLFqa@56F~#vu0#XE*5oem@NW0u*ZJ#-x4ac6~O5 z!8!hT_~hn@o4btQz%qG_@l}w-X$L`jyP(Ql`SmnY%jyv8{P2PFR3ZR%L!>0h-eKbj zJ>_!4}wr7{%5r`F2kKl9@3`2M&yP`GM zH*Kfej)pZ1GA+Ppes^&HOd$Vs5d@)|v_Lozuo_6KI7K;nKLfHs@Trv*DrJ)Lecz-- z3y~B_$MPuI`nb@BDQl1|V*PkX`=*H=aU4ync4d1Rfp5I%Wfntsj30YfD;-@;@@DOH z>5)|$LWatQ>iS4QBbTY`Np8r7ozxi-+Fir&B3zYqThP!>}TNy{spuxmMKqKRg>u(Tw!ah&cwu^i2&_e2(YVI&g$Xkbcma!)87tP zNaqZo;lFj=H?5Ow<43E0l#u8{bSf)SDDlU*C(AwHDrq|tToTh zV8JIxUkb!)lIebglGiN%h1N03SEBu1sd}%bxlWJp7xTVyp~DuHJXRQ0%Iv3|`Lx|t zvAa=ryXdNltw>#>GTw#=_<=;L&oF3g;7bHXo-_Xv%~yrO)K27 zFJSd&H#Cr|eWK+LM%ka01EtNZWhI2U z-E{Z!IjidE*mardkT&6swdy7)B+_;!NKELaq~<0t=Et4Srp!Prr{;(T47lY788bUm z;dVMFIz~Ysy0J>I9E{YH8(>{{%RW;xPflwi8Q=iDpKjwZ;e~S_v-P#@YM;&u9ih+n zOy9|5D8siKL9-Y7s1(lt^6ajLH3VcfJuebFbL>S^jjP6VlGvT>%?u5lk}plH5G&=E z9@&w0x-S#wG>@*u-3xAb5Yez^IJiy{?{7|~`dgI}`U&bCu1Yx*tblia4;Z;D>K(f- zTEpN``l@g*V6t$Y63EYTHG0V*1=r*y=r?}gQ}Sg?IqP||07GB&64n<{BtsxkEHcc= z$*giQyw^^jO+O{YQqR(63TM&he9OO7Mre{I_i?gmH2+*;;n#`52ag{Gm#9K6r-G0N%Id29+UA1=<;!OuD73R3=Hlb;R zA=-P&&4DaXG*3TSY2rNo>gb1b>{MFJs_L;L_aB|naK(y^$bHw<#~hBv3RyHB+7wlD zXv_j&XeBM6e0#42@P9Xs81E5-gBQax<&$(ocG!{l{r7)R$Lv~}C0(^^WM9gDKz2QZ z7k^s(~Igs$cA8xewP2+7D%Z}Zl{XOX~vJ=tK=Af{DANwOd z!r2|QT{$0iRMu4C0B8<(985RBySt;jBGW@$b6UakXGK-9VT|12B8>15E zmz06MBOcDqFO>Dnq{ns`C=ZtemwbPX)o5ewLvy!Jp#RfDHgI4iZDbl`bk^92H$-$< zJxXZjmj{`(Kps$F5$F4dKUe6J3OP4!+^7&e2jOm?(suB;S`H`9^!24P?nT%CK5TV` zI^bE18Z#;?Gf;VqL!Q)gvaXnQ!;cy&_^^Ke1_!8%ME;DAff}OWK~d?}kt%uq&}wcY zF2iH)AP$fo4xcF3k~)-M@-rtnDB}UM1VKFyV4ay9@HGfEwu#dlXWJ zq!do2#$V+$krU_92m`(Inyi`K9y@7QQcGw_pws^lWzH>XLE!YroLRq2@FM<&RMGmJ zOqd2A;wMOh%aP9Cw6f;&$6yLk{yAd}flS+@B9gU(J*4w*PsKn&FONK9N#LEI2_8p% z%_+I!f~G;!Bv9JJqxz*%(XVM(I8j%sJ?_ZQD>de;sRjkZl%p%`@DR3E=c_I!4lBRy zomqSO-`(it+Xv!|$^7Abj`w9S0|_&w(wHeHAsq8}-#B4Li8cSL0)P9>{1?w?_~tL( z9!@4xO=wxZeEIpYTFW%+@!9d&eOU3+|Z zcMq}6 z-%JzCNljiuX!U#PSkQ#Q-~4Q1+E9_=LYTX0`ts(N?<@pXT*$~kZubWjvW*k)M)Hv= z5_Q5aEg&w|WF9`NL07$5w2^C~HKAD9g!;s$ap>x}@NshqEiP?!@hN0HvQgSe5tVn* zDKybOHCfsE8L6QYQ1;1xUu%g;}3A4@$`Sxz?bWahDle78YBh{HV z(%tZ&>YkH=)(+u{kPiJQn=5SlT^M)LNp%aB6bYy27}eq3H;?}rJC~fL#=8r#jsL~* zeDlrvS;WcKeipv+J@o&(n8y}IM?sw1x7VU2s>#9N;K+A%rel`~?Z^j%p1hz*C&Uf~ z|7fM|UKLl8M@3z1CYl!=lGflSphJaHxkrpvvLSrbsw&7SJ5qM0RMeFX5}C?e`8Dpd zXMe)M07}(wC5@-_A7s;dWn1@iZw0X9nk@-~z3TRaz|fK=^k+H+4gO6|$iahOWdu0L zYeLd=jWpRcp~m%@ot2V&18*?xEz?OvCz`;sHsZRE5il;Lgx1+a38uG6JjZ>V|5%JO zs2RovDVY;(liN4g$>lCOrJHU|+#u}SZ}byNC?R#{HSX5>SKAZITgi1UoYiDpS@gtr@PHyYaF!`^^Zot znC>7^Wpmd5DYHUI_7L9x-V_JO274S%N!@x*drF>`P3n}@*XGOE?jHv!Wd;vwGx$g13n!r0VmAyJ{x_b@ZRA5VZw1^ zb$6o`7-7g-nPcccF#kDaXnzSqGlLDVP(0Ms0E5pL6*1=5vU3U0`kYVbktXgQHm6)9 z$YX*f3A5RC&F`ecSk~6l7+QC0)J}YPt_hi)46iMW_wT5XK@EdGKaVMbx&IC}HvOt%E1Q1n!*40>GXHYLqY!;)sXfS+u1V;oO%i$0%Mq6pb0&fM)_Kh|_+T-*0kA1_aN zK4t3|{Om)I-ET0uHa#1v3&C$0nB?0|L6&-E8}}bS`M6jle9%lQRahmL>D=B?>iaLI zSvQyVhLsMh*-WOw<0!F&&U0)H>64^7Y#l6!5gv>qQXVWTIliP*({`gNUojao6=1P` z>-0+cDrt4rxZ6MG4rUn|eTIJiylUyf$}2p-m)~HlRoHh5f0D*H%4xN3D*k7Kg&f7V zal$C&&%`Zm6yfLTe`M3P??*qcSsXQUCFY@h`_p@SvHo8xj;#!nZAd;DrPpZkjHb=d z2|N=DiKyORpG|~E1J?mpP&f1gHh(MYU33^i%W(xD*u<@0zaEEW1x!vx@QSupy- zfy4rr)N{+KwP&xLM;n&gIh!rc9>XYpm)_x4XP>cx<$T;Ir4ZcKB0*|0ScvBPR|2VU} zb;XJm69WcA*;;Gxd#=Y)hfNOFMX$oan7~tCNT=I#>!uts+@|VBnx%#Z8Aq80jbW$D zy^yBQuQ2L;Kc8M+)Pj0SqCrNr8#7SN;XqKCN!Z=GTUG8Pmsy+h@ zy(4)tbJthDjx;YT*(H^xj_h&ZrGk0G1;&LqXV4?c6|N7;FY@CIoavPulNKS+4NXn~ zQzTxNvozDSf=s)r*8~f@e8Q~d>)M7&kel2n3*2u3U-b`uYa11lJVQIP@ zv-!aer&wL6h)vO5rd-oMKzgf97m?2Cg8!%L@~-y|ABdGboda?fDwl1Ff61N{z=JqEp>f zpsSR&c7~fldhD0D7)p2kIjK*Dg;H5sVI*r|gVNndr>!c{xK@^m`ZX#ZAcoCbAh}~J zj_WMCa8po3+`*O@>gfo;@Igy&0CBj+FsjLoM=0h`)ZT78Sf$c{k!#LLL*frTPW=1XWA8;mq*s=_l~DFttoCPl<-?GJi=XIk}8SbO4!$<|5VFX8Yjrkq{<={t6J@smF1~R$5TpwJRnZ7%bT$d ziY#8O@6-<~vpN&MU*@(&c`W+GQ6Wu`$m)NgTDnq;|25`U#qf(%vElV!6?9s}ul+Fz z(^Kmhw>q^_y^O2+4x$P^O8k%;&#iLdT+SVL`vqE-Q2*84g{#Tzro5<(y49VDi~J4h z#S6dhsE4tfB$%_}3<_cHD_!ALUD`Qr6kDe+)W|EbF-Vb?$NbA%XViat-wa%M_d|p- zt(HMuwD?yJ#~Muq@5R&&;+oM)Ml}e>hhU($3&Q}jM>dj%|r*79w zGSsltn|b$P%U>@GDSCu}K=xvu_D2Y#R`^h*^JBUP?)u|1`Fe@$8^%n#8b!t7P#{4_ zCNW0=?&H10n;8M{K+}82g5*Tva!HcMo|R?Hvtctdpv9Rk5c3cLN)>3VIbLMxPsM6~ zo^_FGTIa08f-@YMDfL%aPO+;;Cd*Q1>xDKT242C?3O12N;7%gO4GH5)G=Ef$=Y4bei>x9awe~zGvyH1tWgD~4ev_hCa)QuWN2JbwS}r9 ztT?VVHWJH9yqM8WrDrZv*9?{j^qoT8M>s@E$IXM_dw&rFuuH50-+`mU)+d6qlJvlL zvmdz*lf_0xDR1?}SUs7e#DXux?qU~D*Rv5G6+SL&gfIu8?J~sbW>>l}U5%=`(3r?z zPQ!rjutSgN#}09-xy$IEa8!j8t*Sn{Z){JdVyV1C4vRZn0ig6l6#HB2tXiW_T7skA zg$HuSn|I{PgHL_S*ZUKshGr`F!~77!Uh+y^5g`YG$Mfaae$I9hrgYTgYb$Rp=}gi1XBDc3%o4);n9*}Y_D!`QvU69QQAw9K_BjeK zcXVv+HjH(q4F#3tb5?KMc(K*7WKp^lab~+v1MA;o+8(uKk&CV_e@o+ z(1Au=r!duzrE!|g@;DB0G-Jj&tYFa`H{5-btNnj1VE$i!^WRgG+FR&$NoX1-t1+w^ z>VuA`Q4bA!v&Xa0cr7|iB7L0x(aNn08rPvAOI-PZ_LmV&*AgxzWX)ZfQ(nnwUYx$f zzW&!A90ILj;PH5-$e#%w%Wi!FqS<)3|aPKLl`u)UDDc2aYhicJK9uqK4B-^s$0 zet~)55j7fb>a#LCd*!{|k5((l$!n=l1w7U~YRC|yY%7HeBu|>4mlFkJh#H^RDaff( z=QYHBe0x|}80c13Yqv}Gh6u>4{_Bm+n;Fh)h7oiX8-pmnR!{AxDT#K<)iOurWV)$2 z?l5M}fc7exPsyg_%`m%N)T3GF=k}?xf+)I63Dfnh9u>|El&cNM)^qUw7YoLlyB$yt zLWp*{qr{pBgVX2cTlwU#L{*lmKP+g@I3zPNNdjl!do3!q$^%t-jdMJ*+wSBO$src8 zy2t<`?}58Q2^Y%96>obiQ|W7NNd^)2>QXA?d)&6nVl*0V2gfycVDS}*s$zzv-0_{) z5R~Mv)OVO!;mgU!0_U)_r;do&Ypt4^`>1NBjY8SLIpE3 zUxr4Q%SqlYYpip9wRaRb(r-h2V+$IarJ7tFUnJXIt|GhOpGk>KQhXYn+~hKL)gOl_ zt&O3+5>iqKuRHTLyLaqh9l#)>CCbg>tkjaBtjsJQ*U~#e;6D3o;+89NQu)d%D>J@+ z|C}$4pIQ6jN^GJa=Bk<%-O6*}@99|JafT10`>z<8}JgU8|Y5FSJF7_CYUxy`WZCrSzpVWofl=A9Yzr>ZnX)=mh7d?Z9(XdBCdO^e`G%aPHsVvkTL#z_?&YcNm7e0|8yWDFsl= z4E&+frSc7bR$#1iBnV=IlBcOSU){*q0nxrp5!S8wa$qMIiz$FcjX& z0i++y6s^52CAwqUM{CD&MQeS~;+$l#qPp{z+UyQ?>yQYYMYR?j?7Ej%S<|$%VbllMHYx8{BeKsD+ zf<0YAsp>IYFofKAQW~Pi+@(@4k8-BBt*X17AD^Sdo_y+Hn{TJ|YZqq#D> zR0U&GwHIjjm%AiW;cK%Y*-_KNsE z%DFoP0G1A=M{65+W7qT4z3SmZW6iO6Tc$3fZC-3gRDXcO7DQ-g)7Y1(X)G8!8#Z1= z%*dI!L(5cZ(z3_eXyC-1#}toz))X0=7kMU!aWs^mOzW&EP)B2#w=;}*$@Jc?@>X#W;P!OQk{()Wa7{VbYO&Vm9B-9G{;CeT*rg%O2{UN+qooh6i zvVvIv9Ua#Ad+Ops$y#n|NTx*U54S(->6YoT!J}Q}?My3+$SL6hiyRJEcze6zM|Y=bOf1m+pfTs5m=R@f8oKiL z6Z!I^(L|~l3_Clue`x1q@3p)fsZaoy<|TWe^E^A18P@bXRlkWvbQcBlwBpH1iL)5cI}Lp5a3j++5kb zY2zNj7r#^6!ZH}5b^1{V5>{{yawmvog!c0 zT3)|CRf!jSxVZ&AD)UhR0Ol($&@u57Mb_}b5&G@bUif9=0v?Q4E7NLG;_1jG(HgzB zETExRT&VCiNGSQ5P-0^BaV_%=f#O^&+5+QHAkCi0oY;HX((0@pCc-*}Xmc9{k+$R% zS6F?SK*|6NNpx8WAZ=9YARXG4OCaskR}3Z&Q=MvzWm@6rYH^NYhYJe})9cvGTm15z zT?G#15iUYjVTZ0Vrd_F%k|Z%Whzpa}W`JCY6Eb=fjiE=bX|mL#N5NwpL12V;!Q`b0 z7ExagM>$c8#G;K6 z?-RpEog@8igrOTtDkYxvefk&m?+Fc|kurTu+OUs%tYD&KY0m6+gz5IX%7;{kcW|J5 z_Fagfyc1(vR}`QMvdx5~4bGHBx0Qq>f;G?^oB+`K*WjS7xpYKp$%*EH#d6zTdRt-! zk+Hq2y*eXom{YK^}e|^(tJvBrwl~VL> z->AJkxXR7?zYa(>O6hruuuRZ5J=7EXhJw1ri^G`=d&1aie7f4_H{KSZ z)21#p_pZlKo0nMrezoi1u)^iL{Txl%WA7!UzmOj{(-h@C)a-ZL!^<{@IQ5otU{(4% zpY}!ikK|4&ZMu5AiDTnYk!+;)cWJnDohhoU^M&%Cw0TN>`8rNh#@30>4Z8y0U$|KYiQ4r)Zn3=b_m&Mu*L`#vera_S^gdsYYhiM{QirK} z{o2P@Ji?OLt&d_wkI zR)RBJ8`YfZvc^?m#$8#_#o^3N&d%?(KnZji1u=YpDK4soqeqV%x#^r)l$GjWZ~s}l zbdC{^=0wQ%4-%DGJWu}PQ#A-EO~JhwI=Y$EmO!RTMK&DN!)f3aTb5Z6Hgb(}slKE@ zsxHc!xu?jDmr|YEtB75L_rKrv+}B0`lB(n$1?TRl5quH2mk7^68Y&9n=UdigoTExWI@7pny!jhO{TlO9=7jYFcQHe8PvuQH zyIoUOonv-4BP=7xfd`jNk-$N1#8Mj)*@{&-1jJZ$3Rai6Q-G2;g^gF^2<7#&Gx$%^ zLCf$_I+jnWR|&1tVdh$Skl}pH_{dJZJ6KAsv*Mi47Ud`#=;cHX`xOi%zG~fV%#D{w z|2|Cflsv>F*02{cYuQjY5iE9~ufn7SJWcy;RD^+xc89N6V=Pu2!C#2(+vEmdJD z8c?ik!NF4N@Lqc*bTTo$9Myv~Zbbis|3KIVst`X~hlGTZ&5l}Y!e>ZA&$=*ldDEFMWKZu157 zRu2jC*c+mzq{l<1AUbQWzUfv4gC+5uW7c_V$T}${d%j1R8rOQm%E%Yz&Uoo3$egvJ ze@vl#;8VQ`hj%R?8b{yy?vu-kdCUVPjs%+b6cQ2+H3tA=KAYX0Z{KW4~>WC=}C zpTm!=sC-bTbH@fcGic#X6**B&)4`IrGF|uK@0tT;@3a`_t&OJX;PU~=DdNLEe*E}I zo=9EwH_g~w53ISWd59nn{n>f_97x4)rp+mppHB2r%6X)2E2(-1JDRONfkt#c|)0Dj`={!nR!5yqQxxX?bBZqn`fa>Hp)kf>eM-(o=g*a2wnq5QN5^I^_Zh5c% z`(fWc1zCf{JL{p5+To;Jd&Q>~2BvtE76_LMDxi(HySg@dN@(0TH0(}>B)#QQsg9zb zSHk@nP@My%U4`uK)OJ6jKX*bX(fb+W3c!HDc}?6P`&#X#zo!UM$zi17E?$|HmF4LU z`cD*%UBfcor6}!knXM^gPJuG_gb^lFU()(%C@2*yq!s#oA#^Buq<=5uxw6T$eA$)g zTHoL-IVBnL4r~U;oL5RSVaUUmx?c$mvCr)j=cso{b;_?n%9%Q_KO|Zy{|6G zg|cQs#8p0Zu;bwzB=V)A9`XLVCXg!h%KOykMw_*g zX0lmI3S~|wFiD2u+Wb(FFqFj9mbkK6BcUbTQr8}->RV<~q(og}hkhslTSfU&ShPYl#v|R# zbv8}MyUNKba&3&GO3pPHxr<+_LR0N=-rE#~@vd@fhXjGyRM0oCf}X__{jIdz^6;l& zwQ4J6(9zuuR?~exE*rF-VQ081^QuDCqwTK3%?|s7)18UzfEBOS%YBm**$%nV^~BF# z?f+PwLdpDTN*lGlFr_b1BHv>}^DaysbCa=e%xvtZ-OrL6xCwEE^7-*RrJSJMgR|lU zEIV_&FyR4RxjXiuU4D`S_t3u&^V8w_zQ{PCtkTS*GPguQ4>LsXC~FYTzuqo0Yq6=& z-1`kHt0qil%69yxEzXs#w+=K7N3TTTz2SqG{zxlc2&+epsw_J@yD{1bNZe(J>HNuH z&7txNtxBn;WqOC3g>Ofv2&vy>CDBfScvsg$hCTB2yacdk4bz?_)(-YClx<;5(Hd*A zA*C}0{_KAb=o34WQy&QX?%G38$?-l5bMr+&1d8Q6y}_R`-krg;F)-}Kr6U2PCySn! z_w^i8`Xm42|NZw9uDzzZ1Q9XsAz_Wr%N${Q`z)x1Kf?ir2`a}4C52%pCyaTlL%%2} zGdxeeIr}Tlw$J6d^#_Ar-Lsxd6?a9CpOLErU#gbC1x93QL#N>h{VW}=Yh}UuJndl_ zcU*Qr5h84GNHWJc%`ZGpYLWH@PIh}7|GgcJcz;+o+}vuJyb))uZT0=!4g1Mi{aJY8KQ1o+ zJ-_dZLAyXUd)&AvrsfdlOE)?E{r5LsARpcfIf~^&XqOlE6QF<82Gc=+a0|5l<7RVz zRV-@)PSbUsraB+7ED$Iuhw#j8u!LoAZ=VoCpRJ<$uPq#P=wf&Kf)ilTMi|;VT7#`o zie;RFQ}35;bM5M$*5QF3{ZVcN_!55c9fyW@s>*Vyc(&~9;FF9d1Q1$h^dgM;!<1PK z1eXt6@_~|5p*$93m-xSbV!ZP0<;y(4_4Ce4!R%*=yTVYmeR+{FK?OkSM>APB*nNoSmJdk-n1fi`pGgBrmtvpa^ zx$>0fBT$->8IqWyqL7jhDk35vAn?6#@5A=}eZTMT=Xd|n!u2-@fj{R_%ImR*<&e{?4Z8K?2I7I52uI*RxI?s6*T97rcWAgniMb!pJTd zf!d_x#yx+@;yy-u(2~5!zjG&>hahTCSn1I+4jU zDO@p7L&yAGhN?Er=f36L^g;C&OBL(+u3OrF+-?wz9!3EOG{XF&XTdr2g~F5Rr*B{A>jLpo zuh;Dz>ZR#=Q{$6}675OK90441VsTp}#U z)ZF|Ny+3BusME=Le}w`p2{d}rW2^0o7v})6a$nVJ!0R>={^ce|wC~A}mrZH(5UEcA zoprm|ep2&DU*j9-{f59-x(gxV{xCErO8-Raj80#qRBf7y!mT>ekgtrpE8g^JB+lf` zCb>ekm+Ev{dFRcZpF#AS-qTk1#TCe!(T|?_CY|Aw-xg!@oKb-h9pSFVUatM!y;nYb z4=C?V;a3)}r`}a+!|ac?^wUufd{fl3bh)Nbe6lp@!wgqRaZm(dQW>LBnyv=!TV-Yp z>+aN2@g|c!HG%d))U{Uv(`F6c&E?@L(rE~{2cIFLybKPy@{a!bCL`nLrVsx4i~oA= zSo|OK&@0`_CB6Kro1J!j@;M7z4J?*zi}f;QdhfMm`25FLRskn_1O1yiw;U-;-9?c)-`)`Y_MLus zJv21H9;i;O!fj^L+S}Wc`anNBl)t_d05dA5ap}HO-mjE;k>O>)!5`k4U`sdOk_zzn zowCi#`(SiEeN%+Qn_dA<7AuzhwmnlTtLz`c(!H4UxZ2=9PjIH(O6*y2y8iP&WRme3 z7XA~Q(!G4yQ(LbtXQ0>n@=QT9c1~%wh~o#uTzwZoB^$7q!jk&$Abh2m^Vr9m#Eib1 zI_ch?FX;1q*LqPlS8cC?m#tw(Mh0x7Z?Mnt%NS^*~Gf5v+xdg<=}+yd@D zyy?zPPXi8c`&kK`7(RbCGh?%?F9IrkgE_#YF})&@#2N4b&sopklA8il`n`dRMzroE zrrbspuJ>BehixDeZE&HH-3cIMpIrfZ2rHHil%ImFk`gwHc__?;88*v(eEreX#dSOa>q@8cA2KWDizccfsB9Ra)-6qsxIQbIyB6 z4m;}rPGQwD@nW5#1hbsJ0e0ZWdNm`qx;eI zxIH7yhA!@`0M1Q<$KoowhS_;7T-Pn5b3Yr}HFC2!DcVE*zOq-*w>q^I=!o~M{rKH8 zr-~TI57Q<2>IyO?iZi52X+jf+lMcG6=No{sXtd2w@DsmI1d7(SNV~d>5 zuJV-!6kO2uv6Ml5-16ZtN^R%a+VfBCTklsXX&-CX0?K4yUh7Dqcvq*~2_YZ#m=Ru9} zNL?md|8GP3fBslPkNB5lf??{1{hdqtG1h1_%Mj5y0MD_|_pXXhfwo^uFUXO>)~O)5 zbN&~6Xai+#vLhH$ z>MsRf{QTWVA7{fK9(%E^ea-j3eDUrIQ{w5QI{q=@%2;yq<&$;1XVW|0)O{#hS9!Vb zn>~lxzx;;z#gEs2eE-#FC%*dWz|Gy=-IsoGcMCZwg8=bxq+b}!Iqm)JLb|d|7f|{L zXp>buE($91c}r98n34)hNks)#gI*hjkS{(Zme7E5PX411Hw)xlNMG;)x40m7P+bmP zUSrd9RBAWBWfpVdeZ+hJ->wc(?~uHvwKPo%Pz&DzKOGQ|4YjocLKG?F;LG@o2fso= z<{Fw>Vt2r0i{=<^3k+!I{pT=iozN8XG4TKqkOdWjuIiBnn%bwVtc+NqXVZ0S?<+n0 zrDPUANq>zY*4}h*+BlzcFKsa)_lO5YF=@obKlfMcAmOuf(hLmFVuYa?a)KNd(DY$t z(Mm_(6DuL_USqG+h4YHyBBf4>EC8EMqDSKd&o?@*ksl)dqD(X%OHT82s!R4AIwuWL z*oqrhp7)q;LsxjUXburvnUftN0oA(f)SQUoA;p%@v<%E^F1Bh{deYJeme;=g&#f33 zc&(|jU-{=R{@eMb4fWk0fghYzmrb80E`VCvDWErAO?_T#*7uPA@`yC#yU~o zy(c^1%mkOCgK0y-8$4^iFza~`Tz}dY&b;7oI5(eN9Gpet9i5IS?T0sbx!>gJ5xZss z3tsLX01;ye2hXxwb;!SweYAl(6K3Z-h4&Cid)I88!pZr@RMs2Fxz=HJ9 zlj8T>i9U_nTtE-c?=qP@8?446`XLk8?Ad+3GbTONh@Uk$ps-qt{~${tmW*Gz{hO6) z-JkFB-H?&EvlDY-orf6B$bphIB4l2ILLr$TKh(c7#F7a2=>Q_1su;nNjKPJ^d^q`k{!=E19;0V}5b8OJKe1I%Z89c4MZhkxv&#Fyf$g z0o^o9M!&>~QzUBRdI%7GHvEy<}p92*C*%ISV2LKN7w^W);&tKABHoX9$Q0;naoD(re8 zk^?TEUVZM~!29;q(E@6-Ta}xG)SlTVOdyKlbo?Mht;Gwa5VXJkbiBYEkXRgVLo5Iu z)j{F{*mFBYOz5#~JT#JQxLvOA2!0#MOD*H8F?7XkJ{j@}xb_{4bHGu2Z;PoSif=lY zCfN^ZV&8DR%oaDj8Wgb3M6!DzHG-d7J!Uf^&~3Y`p7Vn?s6`*6W*Q`TM&r(2tVbHj zNh+&fQig^z%u#i=MdT1}aej<+)h@=Q8$W&#Tgt2~W6fhvF>XKRK6)+9^UX0ZxP%Y1 z^;JliRMjrQuh)t{SuIUh2s~o%jG#V^eND1~r!b%ATfz~Dr?{oF>MKy;Is1HnmtGsU zT~8%_(Vf0$X5^Cn<|}`uuiu9x>u_bay+%f>?wTO%IjW*YCmxxNUP8UgLJS>dN&EBE z9tL+)JO&-{^2&3B_EiS#)6&*eG2+~E#0~N0Xe{;lh>ow=>DZD-S6A>AUro4vA#qft|)(SJYvcZ`}23&^$cKY6R1Azjcd#@3(io?qoL zw_-UgH8ipv-$MvAVhKc<4atUzhHI??5F0*mS}`=Cf}DHH`(4pz@mpAPB<2`P_DJS_ zz^!lRfhD>qu1L?0B9;VKS;4=~JgB&>4adBFD^^&-0r%|@RLJzUJ35ic%NJ(w!fOLA zk5wbyd!0eqII$T+)Zwgylule6R+_Szvd_1XeG9rKts_)@cGsbK-JAO$%z)~3(xM;o zz&vZ3AaxM?+H(aDT`TP-qH?Ryf=sE1-RMh90MeFVHJBaU4nG|j$ac{E1iJ2Ee8j!m^ZLE>t2&kIU-q{6wvhK`?xOgfF`&})U1|LLncVm8q3jMVo#Fp?p?a2P< zwtkK^{PbN4{sTMZv_|*?w>3m->zK*cUz|2%rs;m4G z?_a)SbzsF}@PYB^esQt`mHTF?=LZxp4wZpfN?<2PDbYrA}5XR{4M%~#Q9-TYu>_iAHI$~b`T#T^Pof- ziS{qpRgaEZ&^j+;8y_o!w*7NxZl_VV>KD!b(eba@`|iim%IQ#NhLa9P>8be*8Uz4S z#1fkxD10o|pIFz>kZYy`8Ttw0+l%!?&66ja`+v0EQ|My}CkNX-*5!1E_NyT5Vv5(k zd^&Lw?6LR~7m5^ZUc?0;lm;Riz;AAqdB_RU7plGsp)j*LHXPF*O*)@?UH-6H3os}- z&_tKQQ0gHDNZa%4yyLV@mCw)qT4KjEVsFu`!=Ldl*)nfGNKolW0pj=oiY?P z6B=v%wF;^c$)fhuP@7>V_HKKZa#gq^?EL-Ie-M8QlI~?!)pxX4GitA&OC_Hje^a!& zg`}ygz)hVHq3_oy4a=J8uz*I`NYo1I zC%NZEtl{eEQM-KK8cbXCv^7IKDz`gr{<)ojL0}2{Yjtwxs^cMs7>gt$L)&b(RT>|YA9*LrNiH@b}~ zzMU(4JRvaExu17wF!F58pD~$c$FQpwxX2n~bh1sx{6F#nju+L~jC@j6dU94B_~Rx* z%$c#-_o17g22JB$EdV1g*pa+bwgQoy-rR*8l*O^eb z=P{_LTi>>VGXVxP>uCS6lP520FF3ubIWmzxdC0%Oi3wrvH#3nqF%-aVewQ=y45HTE zbZr{3#HTc;#IDl57jn)%bzN87w^;-cPjZS__w?yEaF>J+AS5~ArB0S`Brs~qS>&Z` z;e@NlUK56@?qq$b{PNuU6D9F3g0MT*PK|}16RL-KKiBvo z&gYYovY?TVi)`mEqZua2HYhd5hKdlOq6V?6sQ4@>6~d_!rdiI3SQ&%Ak4i5YEF z9&r!Al_#JUQ){Meqeyw{`w@Fy?E)`a;}f&ReJ@=n8VH!Ri_gN|NdsCfe6X6e)2xii z{Fnk+ZZ;v`t*D*0cMAlSXpD6;TDe1KUjOx@&o1_6WKKFu!^91#?ru!8+Yd9QdD*I> z{`jVZ(HLFFnN+<*C{^Yd?)#fs78^ND{4KBR#|*MNp(tcy_N^TEq>IHs#_;(m1xSr zG_G6OhkRRan`a?c%TYh^QH7g1$>MEhv+67V9F$##)SdsoqxM;lb8Lk_d2*Qen?R@& znoqM<&BvI&0w`UvPZlCDFb8UD3px}Bi?g9ZfFd|Fm?liv0l!#(di3M78NY>Xhd-Hi zd9158V*^EroD>Z+kK!X@feBJj(LiCcI-o#3(R&|sYx#Py>=B$0b(iTBI zD;q+`W=vj32IF4H_u=i`rOf8GHXHazb%@txiX_Dji0w z`18DdmVHA}d{qa305KY*7PJlGZqVyLL9cktbeG^yk?2ZuEoxDEf_J0W;nrun*a5bzvHpxixiLs zeq#VX@cj=fy@FWzm5g|`KK(c{2;@s4xzJ2n2%Op8{u4BE&mE|w5D3b2ODg<8(ZAsu z^zoA&dmuxbk0CJRBIx#>niB0n&8$KV*JSRR5`!FfA|q+bfwtP==>7x;mJ@*>rAL*v zjKvZ;hx|bkfH`o)_l1dshA!=otf35RJ#m=_i=jo!>p(2Psbh{H3t(VtA~dxC)_#f` zO~_}Nuz9L2(T_rcTV2H|`%6k6H{+!$kmBY{N6$i7>y*PYkcpPHc^-ec-+jj=!N=S1 zMq;Z@wQsc+5RX8;ucRY$>m(LG{45C`P;+%SI8!Mgk)jRhiu?MDM@fbTjvmNcj)-${ zm)~^hSlG^n2uI|>oUqj>b=&nj!0DSHj>`9az|ua13g2ElMohrOJxiLNxdk0_CifJr z<|I6}dL{^2h^DHlU>_vIDma_pnSfP!4QHV@H9Q#N1Ln0kbr;6sv~1=~+x(u^R}k!7 zXygYe+}OJ(Zy?v!?jJv`Flgc?j(FHP1ak#a%bfjDPXjM+VWWVsvZ& zF5)tV7AaY}o!LsoH{n|I>otc}*L9h7doVy zJk!`sMZRVKPN*F|LVPPqvV`*_`-UxK&bh_Gw75bat4RN?IdIk0?^;s{f^;58BHH)u z1PQ{6^+GW3Kg$=LiBFZfrL9*QvBsk9wgermM%Wi-Yr|c(xUWOlI7#d8kjTNv{)Dd7 zbZnN~b%b}wSGq*UBKd*RCOcs3Ml8{b!x2kBie1onhr^e7X%8%AYnwRXxWDw~o}oag z_Ja*g{SQeI6Er_}!c&0p^%l5Zj|s$FJ0r2HrUSNg6{^phK(>IsujFhLGxY12qWBfx z_x03DV#&nFwC4L~f=*kx?d#}*s_`e+9>Sb#xaY^GVH4`WxeB3&+6!3w)j2?y6ZNkmm41*atH% z`Dw@TVvyoCs<3%EI~ckGG!(o?;;kG*HTL9DKP(97egh4-Gqw=lTO0&IlMM0OUMuO1 z>-YrDEG$C}?V~+vz33uMPT3E!5-E5J$FB7<{<(NH%FXA_dSv|KF~~TY(x33U)xzLe zgF}7*h9eqL7;Rmz&gz)2k;RiMWoctsGE&%`23?xp3@T&MHDt+s52b&XSZz$F?w{av zZ!IF!U}x&03y$w?5sucP9T(IOme_dhv)wxw(N|n$aS2lr#~4vQH;K_oxBt|H5-E?jOAeIaCw4&b?rWyTzDHJ{1rMHWc~E-(+Qo z519)U%WX9<23Lk}f`9*K$G@iZP)hCa>rX$#m_G17X{k>Y@@I8GLHG>utuO%!2kFN6 z*`pA!o5TH9cv|pjTTp-ad$)v~8r!riYjuqEQ*H&6JmKuXJ+gHzIqElM^^IC6znsoz4YX;SVA-56m1dI8)Tm%Yfz>Rkw&1n5|Id1BB1EZ!cL1Wl1x*Dhy)S6()ARzS%Y~k` zGonrW7F+d4>iZ1@ywFj|hRFsxsy}p~eLToG*8>qA&RF>b#g0QJgBk2X9vd{#KE64^ z54sRdGf~DhAdK0D0gF*ETlf@yieU-2hH1ZoGj|S45@Q)wt)=p|r_`sH-ZTg@D;tzc zpOLoqE8Eo~h=yDwUWq^Ja^ZR2&iWbbMD5Z(YOF=Efj%Zba-D(fCN**4uJUvl_u`~* z@$sDG7-d4!mf1RcGp_GhgKvmMjPt^x@lrAgdB%}1>dmK_PUo`)beQ{V>^lq@9|Q?# znT>HVFkt_^ER4G9<}Y$hnsw!30aQ5O1DhUulG=DbaG_kiAU^QEITbG`MJ$@ZnSdP? zKK2MtwbD|CmRxz+IPphbTXV+J0Lf{MT>WB*l5;FCQ?q2hZLCtHd*ZK2aq(&2Ed=u0 zB&go6;m{Py52J~pp(4r)mZqvk#8<5fE>Nv}w!xk?)N|Ib9M>*R`B&b80)bSqWDRKU zJl2?hLt*UwIshiU3e%o~Gy6$6zZ!Q6>n+x9d)G?3_|iYq!e`@iE<68ca~%B2>sY>x z{>K&uE2Rx<=n=I6HKY2HP`IA7_%rlzWfc5)*#HrG3*^Rkk8CBD0EI)R0LZy2mUpj- z%V}!CNEi@HG=E{&lZ>mN|sl${5u3t`ItMPT9+hE--DegN&OqfWw(zl%BYVf!7 zioO&eAC)!ZU$AfDi^5e~%+p$X!(I6GJ=I`_cGL@`mBnbtUD4e0tE((Qx5X)43>e8o zC@|0_u^oz`4}BtwCjXn z#)yD?(-uEakG`0;lw_lvb3Hg7FewGXVkcp}jW0AkA;CWjFv#xqm1V|SE^=O4HGH{L zV7OtWp*y9)97az?(|H)W6bpmJE?$5$Cup2=t?q(Ph$Xna^3`=uOFwCCYa*sZ3633T zIH`Qw&zhGt+tD)T=SQm-vX*++RH}$Eh<)_-bZ#FdEvkrQ+oPL>rM{j(kNOU&nvU91 zbq~i_=X$0BS^d>GS6g(;-dZCow8!8>QQqF^I}u3pNz0xbY?ZWvt}9obLN`qwyqty( z4Vj#L<+p)yMkz(+HukHk(FqECYip-~F{RFZ7HJ{!|6v=+YcLX{oWI!jQrvq^Rfwj> z|6DvkIOsQH@b*v{eWZv0YxL|)LOIT-wD`SqLWtY1IHzy;8MOGRt~RmFFp}3?Kg3+JZljLf z_J>Jv9*Vd_s=}OH!+nqJqaw;4yWdYxQ>c0D5t>5OCY~m11;JYKXr86UZkBv5R#whx zxV$*r<6#GH`Cty28$WNgoe*Fhqkw3Jybcj9q}RnVT++m(y{J?pNhE8%+?5SkMhd=G z*)-{nm)w7k`u%ZZMz-#nBF`S*o7{1ItpDifu z&(O|MG#|}N6o{>G)x-cR8GBeIk&T}Z7rF`xq|r+_=FT0Er}P2ysI)E)Y;=Jh2!)IB zG)ryUK#ya+wERS;e%6r)Gk`Ny zh2tE=CuU&2?-8So;VvHXYJ#PL#{!@%OE?g3!xIY>YmmtukAy*if!_05CiHE=e;-%W zfo+WgSgH*N6-7bHfMdqri3#nWSsfzkQ5cHI=8lRb8mz^UhIdX~d3kO{eK)S1G^2L3bJ6#& zHJUF-Rc)=EY`5SHduVS8$ypgsZl9o-LWA~tS2Qmbv{)dUV5Dm145@7LJ&DfImt;i;zD!l^s zc)c{%EvVZ4Nj^+IaEn2bq&QZRmllOC1A~0s{ZWDX_gf`Y`RbRy(IXpYT2x^n+VB_! z@?l{wVG83`dlJ(dW7raji^-18km4=vaz5%mK4Q`6hJRFpjY2CU!*Z*x<#{ExjVdOR zNu_*==U#>I)D4p5l#RyxzO%2*EEoR+EV~o?%+Cc<2|yJo8zy5?wWQLrSLNd`$%Vg@ z&FLwclH|-N64J(+6vIf9FsF~gUzgXsVB6_3I)gh>+IbOy=4}YAd zf1_RUQtB^f*(2JZRc%4_=v%9U^78_YwsHLZzB;cB3l2ovrefpcxn0{I68xWxjNCET zd_q8x)_9M&R(QNO)`@Oqrj1N4at1JlBuY{y$cXY_gRj)@_C=YHQ*4d>D%m^=3*~y5 zJ{@t4+v=OKm&Q;G?$^yd(%m+7MHbDXTC}@d5*O6Q^79*W9x@E;8=barQ^~B0=+-l{ z)$+Req~?15{ab_)Hcf`>QWsu3#}Hp_%TTj2)cV{ zZNofLeDF7n2ddQ%-x5q%YhDvKfA*a+P@QU?iCv6VhXq$X;%$U1N(ltmlVJy;{CvTe z5(mK;D>1jgZlw5P1`Uyu9^_RO!6Ne+>sE6Z{$!uaK_|ml+}TswL^s4?f)#@)4^7ilJIJADjCpl@g^wepbwU)y9udDH@%BJib+uVA z)BuMPs9Vp?Zy0@vn&2ItTpw?f-wB zJnQS|kGW!R;#5m6RNszG0lh0wC<9^1f@qRY zX*X`&M?;jmGFbaRH*veQwZk@%h`Z;C1jLf=P~r;{+88kSBbKak|L}c&ga5Y^>AdsK z2imGVvXBklaoD=6)n+X7o}+YmEJya@vOOfkxt%ZD%U~Zt&46f1Ouq48Xu;(!%S2&2 z%NrRv&77^OtUOEv4v9Fh3)jLT`w;8e+S(#hCR9MCW>x|c|34k?v`lPO`C}!8H9OX7 z3D+wHSiqj8MsvFUPMdH3mBF2h!zo_*j%2m^)IW_r?%ghTx#ABI(b$w^&K%`&Z%hjvolhu zF^;p+*k@^JOFrp-1EaM0Ez>ncZs<+)Q^}Y5h_^_=pE4rr$yHr$VYaIFemW*6wua2}VvL|N*xb|cx~6JPq)p^g#jco`(ncErDXj5{+f zi;h<2bheqg>YAb?$@kh$MyCWNilmq&4`0n91FHo4M(nl2sWZQiFqdGDYo^^~KUg!s zKFVQcrGp?MaRf(h&da||;Sq|%Vs)p=`vnt|%&G5@_oJ*QY8sQj{(#X+q=6|~UPSJ? z-F8zi)>oW?z1*)0_|9Br`pN5`BCQmjB$!`{t61@^GzqrHMWLC>Ww6R>kK!5EwIM}A zxF#-aZRv#o=Nrn)yI~dOw%R+5G*CV?$DRcLb0dS}lki@hNe;N#`I_0>(Xqc~9^3K6 z7N%4(8l`L8NNG8BXAM(|#`mvQ2OB=ZX@a=}z5TqcxxWfuG@@pjOvwC3o7Y~F{n-PA z$3ZM{5uO4SvU>4Fg|V6z>3iH@q0DtcR(-@eSKvF#Dy%Reof*;&wk^qA9DjPeBv(w= z1>HVc;g%O1+!ajy^jLvOIn0cZ>V|&lSn$KCPwrM))bQBG-w%kAskK@q9LE0iCM5q0{I|=`!z676zYjfT?lxz{RKnQfxiWZxq zjt5{^!PnOgR4TIYjaUp90}Gu*6?mWoC*26@_~6o8@7Zrn-vh}XuxHC*6!8Yi^55G7 z*%XaG53{?^_Wxp-$hYSU-G8--1zfn%+Hl0wM8RKS)uV5Ddu_`V>w{3*CQYPv*`X)0QtY1UfLkx zQuNJ4Lj!#~sK&;7)r=_NB~H?5;!QL*Hgh#-IUlVBE+P>Zc8`=5OTPS0#DCF13TFnjl_L<>YFJi)nH{ZgU zp?k7H?S99Md(-Lw##op>`6Z1W<-5PcwU>J&Lb~WzRxebZPkoPizu!i0ahwY!%j>J$ zA0%{1q|9Nl!bt>~SUiL%?H{hw5Ei%Gl=d6`<@SblNGt-|Rt;cbHXQs7Md32SMQ>mL zg#-K0^pZ@)y@|zBl!+Svt3pV5TTGOJ%ZG|4G^*PsMq(PRX}5!5y+b3-4Wx*U3PRk) zc-?KUOXX9^P5kk{k4tU3iWuhkieC{j!-n(X?Z%vQ(nD`+)TMXQt~S{fajWoY0=&>^ zR1h$;2_&Uv$y!52po_}Y?R&Y|rMUsY$oz%pjMnJ!2V8FN)l@tJc1k8yV>7EC)+(Nt z%{af`y27otcJ_}N5!L%ovZMaj!l6HBK{!&>m8hWTqt z@pyGB6mED!7)nvUDZZ23v|%#-;$-!_oH~qntuDOp%n(y_S021a5mLw=!=EIyTvPb> zL&wmpS10z?@?3(e3>#8mjG~9>=3L3-j5DW5<;}vE{77Rgo1cxSN?VKkU=P#lE8# z2r#J5nhG<^&N53=FQWj^An7U&nACWSc>MOYx8>U5R;NHjN>4n^>|x z19iH&a0s0e;?1ht*D#G=pdGUtX;!SRzlILqR3~=CP?|tbe&0Flr3spEn@$v?Y71Zu zB7O)cOc-J;`N|a;WWwkjH{V#SQ^^9)RP&FR$@>MWeW4i;nAO7|-G$KcEYB#$VR08M zrJL(bU_vIi~x9SuHc<5RJ#0fxVcZ@$$lhFJ--8emD7NE&% z+{8-;4Mrsa+ltz*+^yZ?M zjfVi}Spt&21@nEEz6rnq1=gJRQTC*XmMsv{+|O1}8coS-;%n*CHxCJb{n3AE zGhC_o3gA%G=Ow|wA$mC3ZU+ZY9d6n~;b6Z2q@OX8qQDu2-u8^R09+xqrJI1K!9F1+ zFQTPiOr$C|MmPfVf{q)S5a%w^ZlrZrA+6<_%buYw_6{4wqaZ4bq^r|}C?v+Suy<#_klai;!-1r-;+4{&1#gaUwX1SwG{f;sb1}xBC~hXdalM;qum`4a>{eV3~mfWz|Z`S+}Rk9gAJNtMtM6p_2V^jtjk=$|DQ@F@* z#?h8{*NnB2YamxurCMYLO9jT-=hki(q(K=Q#7 z+=-0Pic-bZs=ZI=JtP;r11O*rsu&?xkhPaHdhoYOMF)TG+v%z9J2sM)+*%z$y49FN zQcH0N8-&`vN5FlYt2RDZAj)reI+rigxew)5pQzN|(~>0sX(p1M1xt7h<`1JUNM z%SQ>VjuhqbFkWUURTzbPupSUXCVBUlzB`?rZ=}A|6V;f4H`Dwhd+_Q=PaJo8EO|g7 zO*w7eB={waGvD=0^)idpN*Oo>i<;r?o4q9Zklj#A*VH%*oR4@EV1bz5K~bYTUX{YV zT){<9bsYp>%qpQ=6iFv_=IPIfrvp!V(V3CQ8*Qm zqVBU}3TTG-XGL3Z$^pCiIHU^!@0h=}C$Ug{xomgQ?~M++39%F4ynC#Dj7<(3w}iC4 z&Gov4Y{b4K<9k!Fe(HTSmF|!z9p+aU*;CB(k%~zn&c)7vR<~%rr%3H`4aPcB zP7)exTaQF)-c05pzJ;fVKEZE;kDzjN7JSVQR)Gsi!|V{qq1Rha;G=7jFdA1|E#^S$ zfDf)JUC`&3D_@`8%gl?{Y0f*=F3QEVTG@WFx(OHiin|I?cE z4PyNPVFh~16;R>3=U2ON)$V|K3McAGsDDrlA^~=;2OX=6NIrQo)bT0ZGj$mkxLm8R zO_RUlHhAg!F4RS~_UTdAQBhHFOtl`F2RJM_Lcep(HMr>!+V%v>i5deMJoZ8esEC*m+s?_a(Z08`g( z#&3b;i;|)Vyz`p*`5LJ36T9Rep+dbM#bxY_-raR~I;@hq?FN(#K;+zYj?#)d+9tzw zkiR^S=v&D;+jc$>6VRS$GaLZuf|xd4f=XEGO^gM$lK>SZo&s8Dhk98z^%t_eUt3+@ z)s=lz4?0ZfKk5W0>eaSEb5rd^$dH7!=^(tW+sL>kskyaZtt%A1x-%HgT#-L`Z;TZb zru#6I9CU|aziDyl)?QK?Q?6WNPh!mF<1e0LYym8X+n_Gp!LAomqBrJT9?bWu!p!aI#P9_o=eEO)w*(h52KT$ai-#0d+`_+=#v=E4#y?7YDCpB<13dyixbBG z9llJfaVM{blatyhW!x#HfC6EI|4sODHa zhtv1fKmu{=e5)QABWKs_-z!OZE4J)B z=Wafq+{KwQdn%RpD&cUZLfoKI2v>8%`^x>z+QY9GkL-C^Q1xe zl($DcT3svgm|jb$Rb8SjCc&B1gbKW-o?54T>VLHDeP~(mtgkdhVrM)%KTn?GuH4qo zk19Uzb*l9K*VCvCk?w&}FZ!jph@^a$HL_}T6`}(xO*YIcN|bsW^@9YcY6C`g(qd1h z-u}gI&rYgfak`^~cS#*3oz4>cG~j7UsB41_IZM^S=3<4S+akqvgj}CZCCq84v^*2H z1<8jQ8^cuWfILpt-kC(Ht`7{8fmS_*ryYoGZ9MDhhf>X{c3KO@?&M*t1+D}M<50qU zauOHn7dgDU(ovrCWrw@Tx@e`Ag_o@bxGcNII8OUdV^uXp zj=y9}Qm>4pb#U+zqN8O(ja`KuFa4o-Bx**yo0U45(j#b?^cV4zNnqq5cAZ@G=lJp( zQE~S^^sGhZ;1cc>uHm$~%WeZ&$%oR-1u77%J-LvGid6CkGNg_agHsr`A*5pXfKUW>I24T&*$qqCukH`L zN8Ky1;{oK&S<^_I&aR8Sdp3rW_Ee%MTv*gYB)b<~Imo}!7nef?JJ3f@Ec>4wmlo>& zAmL39B-+AX)kOD;ARv5#B0W~eQs^mLsjY=(y8W<){lpTWy`-vQ*`nX>RW#`K%|D@?<0U-}!NqzlZ`0*+hd!b1$l}aqp8*UOyygcT!0cyXpWkJ6JUeOZ9klDdxa-qH@xD0lAUgkZA`(+RHA{=^5oS+=X!nh29>!Zj%kDwZ&4?5sLQNVYhv!_O*4i z7rNS>k*i&6<8;9RZcZqT+X+Hx*!cXb3f#A)XqDEJ=&(IC*o(~^=7dP-uc~Ra?A7ls zWLQ4~T78Yv2N_6jR(cNxoS!VB%wIu}=vK~sQuLIv?tQEI>5<7shM09Umuol{CeZ@9 zFsvrfcFB(VP6C-%pOc=7Rj8LbyT6AsPq=^up)h@nbStY6UtOa1I~T}hZ^lrZK*Cz(c_g59g6lh2D;XDjNDK2;58FV@LHY6 z*~T70Os8ALqC4-Brej={x8>H}o}dCpetvyx_tcXR*1G1)bQpItJcW~g^V$>Gh+BFqp%_;z7 z+gD|>zskx3@$`n=X&RIuB{cb<3meua)xJEO(%cu9D5QCzdG(m|3S z@J#E~wSIRiz*&re;V;Ats5Vi_F?Nl`?IS|Fis4`Io6POtsymGbon5pZ#f4*&E5+;>?;qHDXPnQ*xHu!*C5X-qz zURoOKV3V0-%lKLy*{hW{IQ5eDJzCXjU!PE4 ztnuiA+4?7|A$Z^4-Kg`Mqc(^Ww8~5Sj&zxrO2;C)r60Q7=Huo^oy|^D(aKVh_$%N} zWwk`f&Cl9$+0Lt8QJU!I@y&b{k2m*{`k*d}zY?$b7V=1~SoI{A(EiB7?T)I3AeJtO zUFcNli(REQ8fEN1uYMHWsJ~(r|4@5rXAcQMmM$<`D_23Yz0CRX37uT0c55 zotDL}W#NKd6W-uWc95wxNlE(}CIiC8LmecNn}p3STGwc6bNNBoM$M^79_537#6y^G zeQgP8MyBH6Q*;xIIGD^HCWLXrycP=9fH3Ly>u>Qmak}{9F7@Zt9=M1W;K2-q1CuV$ z_TlFveY)6j*f!Q&26Zum=t#kDr0$oPFYhf46I@S>a(KDA8-o)G%P?n+-h)aCxt4Y|=}&eGX@WC!V_kYvT5q%Rt@G~Z&SD}00Ss8NZ$Q?bjK1JDs*a=tNLMV;?KsgO07XC~Q1|+}b-YGCjzsv!u zSKkpAT&YsmZslh4>|P*r2Oxf5XF8I#%h8(3^<}OHXy>nlH)H_vlo5C4h113_9NiIM z``w8xID~%z5ytXQ6j;qS4p-7rWBA@u?E#NYO}Z?O@>1N?dtG0?|L15p+~#E@M(yPi zGABVyw_ljj081LUCq7RApzGnems|0_hmY|s1A78|(cEvR+jqZe%?o1g4yIWRgl#zjb z>~rTLd2zB-kDi21WC4SbJYaWv(F-ZR9>Vb!-FJulJ~_SGRM)f8d`|f#V_$#KdW)1c zs4yIMgr}iUxwQJ~Qt`gkX}Vd72d7`4aFPmg`rQi4Jj9g|(`6Cbx4C@0-)WaLT!3ce zl^R-$b{+F=V>{OQ)?Io&$bFP95WJDnw*2mf=gv6=Yi+f7b1vvGiDYMWV)BsO_5G$9 z=}!*TQXPY)yA2iY#s$7gndy8s5%`1AURGsDakw(5aaS2};P|8*2WusOOIAfWbBhH8l z*e*3QH8ZtRp)4~;H0KH2mGeO5Ku%CjsEDWt2nf8Zp8Y%LJ?DC!-}S!dI{$bf7cACd z_mkuUb9j(-1K@VqbQwJgD+tcKC&d9j{J{=!K{uga$v zF>#Dq9I+XPw2u#YJI8Wz1nKTIciF9i3~ut%w_1+3NADki7wi%ma<`Q!%X_;we&)D% zjE9*IGHK1wp$;ZK#4K%p{Y)oFgEK}DMph4qsjIcn`ShPwPPgsk>m?5Zn4_o!-qS9b?Gi!X1MGF&ZMacSY>aL<2i}+1dd|#$dG}=q{=AfG(V!4jh+O|*i zCp8vnnkvN%GRvE}30|qh4x(b#t=46-XPQ>H(bn-6&K39qh48LFZFqt`jo3LF)f~38 z4apHEAyG^QYJO%4_Q_gNcJ}Ea$H?phn@(5ND+~FcQ@*G&Q=kg+rMUjI3;xC9Lt^3p z*9U7H(macX@uhXH<7n+m;7c+f(Vu$93<3;Q)y3GQeW1iR6Vr!Z@IRZm7+2b_AFCy5 zotO*A{~WXDdBf&my56}{n^Qciwb0~?KqDteSVGy_0z@S+t!!>Nf)aF~d_g?}tOdiPyx;I3pe{~JLiSyxgIWPk%1wJroZHYzZHPuMv)uTHZ^Ig;na4X z*pFBJFFNVvfp}1jUDV-~_G=l(e^z?Q1bG4JYD!MRHjsUT1M`@}ySYvvMf0Eiv-hdw z--aLt-|Bz28XO(3@#BJ;gfX9PJtgZKkFW${u#++LnAu}ter;gT2EvRV|E?z3)7AZ$ z^g>o5ORDHDL2`}tI9vah>+>$32>0m}yno3_Zw$8qsZ3i5bE__W>&i3&I|O<=IPDke zh>{ui1H%%D&ad~H<3C1}^b;>VO-)NX;M0-jNk%XtsPKu+%V54dxF(OpX{GLW^!pp6 zcK&igxdiPL2-7)2YC3ZBXy0)Z`NC%Zwm4f8Zu@pyydg=0jV3C--KXWcu z5PC2CK@^UttK7c6APN;$Nvd4VD2Gwjw^Jk(;R<`>NnsCYhwyb!@$ysYq<>=aept=u z_-*J1Zx;XP;_{=BOd_m+&G(;Tv43M0Gg+&dM_q=LGt%0) zwQ~XbeBo`X7&FMO$0zUivazYZD_(t}B()N4HBRkki}gu|z70N#;4OXFPU&i_`dT5p zkDrPeH$5P>qCQ<=;T|GQB1y*5NFas!NAD@P&jGHO5H38N`czy=V8E!%lPE;UVg#J> zqitb+4@E>ga7o^H=~(_o*Ow4~!_Kz1uyi42K!|OZRiw#|8gJIMXE={Hv|rPx45(&i z5X+Zsq5bK|E6qm*i<;-4qh*c>%kvDRrMPZ9K~+Ey)Q=9<%54u6l|otj8{OXE<7%rg zxD9scGA9e_b(^%ik@ZrWo)GXf*)ZcTnH7Ie%CJ`ut} znn9{!U3Z0`Cxy647l+)D()ET^N!)DuW!|C2kBoeB>_dlDNHA@pR%*N0vfYUBYWZ zsd4?Xj94$&SX_{|Afc!;!Zm_kwXXf^)vC?jPn}OUK4^HqUw+fdr5?R~Z?fKKe|l{A zIKltVVB8V%*Biw9k!uerJp60z+O;dz{dI5mQ}0((HdcBWOJbM8;5clJFFF_j%il#I z;?0O)t>Q&0=jYX$D;A)%Z>r5X7{f3MM;J}&LF|J)aK5DMMa;JmSzF2d^KVrTE9GS( zWH*9z&9;OO%uEBT`V!0vN7%gj!lC(jOVlg>;v9ePBIeij@u~}pXdLDVRF$E{%Ol6f zUo?Fh`#2eLT27g%y^s;w^AUKDpOwM(_DZH}eBTE-y0q(e$N3_bAG6adDW(s0nw#!a zcM7kLfV{;BQhO;d;TCoP=K+8R&dqqMjv?wG_kmT9OO4hCYY_{tbb;rP7 z-Zx6t&(DhIPdJsLHRI*q68Y#Cht`6W;CPd49wH^D`*={!R}~)oi~g|mSZ`sNnYa*Y zKIk?6{llIDZKkjldLHU(B=&!X}D=($7uDu|BzdnO)C-!i6-d{M-SF~yt zz41sqlf}q?e?(xUe+uud>cTD-Sda=Tdr}gMEb6|b-z>7~M1~SLJ(W{qE{_I4PvEd^ zxdf3J{HpZ4emVVfiWr*}MsJ(e!Ri^EoDARYL6daFj1RZnn_u~2BgR$Q&>a-pA|S$- zPsFctb$qtNEm2#+z85lXQe*r~U{w8mo(VaM$*8QCXabS<6%9@kP!$g0+pkJs&uS#z zx}PBhU_=fum(Cbc0G6f{$2kjWn!=5FXFKd*T(#e5{s#YdcKg!6dEch^o-)t45|Djp&E4M29-xD&#k zKnvqW0+V`wQM}IO`IYmL@r)C$2`e znDflVriw`}+N#q@q~VoZIO&wk#ZQ>du4aoS_27C{-GmAE0_n$v`XgG&$=NxDs#wVa zi}CkmM_CrTY`#RmwPf$XuQ|7N&z&9IwY;TlY54s!tV!I^D;(!n4jm8y=G$c~)ZUBy-c*iJhh9zk>sT z-?sGtxUbj=ID=Bpz!~RFs}fWU8HYCw0V0<#``iNTU)UH}{BmE%|$IN$SWe!pwM$u4j~ffi z8r+-bPB}LqG3{6SOC8Zxa?*TtcdEq?5$fr&gv90Ywo5$H zCWHFoeGA4uj7A_5wpCeC@d&l*)=sUFtFFeUM#iRYg?g)rvlPi8;gy;@?D5=Naz12{U(fIPrGDLd;8a$FBzF<^}cLhjYHk6$UxL z>Sv?=?moL5)uuTYge-J2x9l>7q7_dVrQyswu3alIqWs(sG&~xbiEQib=B|Eo1YNFl zk51KLMvxHoa==^+&XH7e&p}$dTx04D(D&~uL@3fJY&KrF9RH_U-!`JtqJAfSXS5B z(O3GF*w-Y*Ab{T);RSW)(Z-2;iG9nn5Iuu%<@a^ha7o=xVzmo) zI}9Nv?kayZsFHB>s}hS>VJZmFfxGS6PEGd=aae)C2l~`)tOv!4JU+odx_^>=|H!AI z+XPn$p(B^7GX~Mtw^h@IUckvY^Z8m_=1eJkB@im~8skhEjFCyBX|6C$1Px2}Vv6i? z`3*DiLK}Z-ZHHN6r>s&* z>4VVd{sZYs1ZLOP1`{npuZahz1?zyhajkX+0t|sR z29ha=lN5x2v;%*wh_#|Qagg^bv7uoM?f%C1v(hNju z6jiMNzTDN#5Zk62@bi4-J8^yNNif?*5%>=Jw`77f`TRNHbnS3RoB7jKz!&dRasc=Z z=S01Jd%dr~cJzC<6Ny=t-|KnJ)|bFcaC|2@@NX~c$%|W*QNtgR;2`~XSjAOoQ9F+0 zN(J*|e1J$3McA{C`+hyK|1s!SYzkbJ&NBFN-#JS@_`kn=*#x}mp3?V!Es-)3$Q+~b z-(m0v1;O>8)5sd2PRb*pM><5|))Y2JyJq-UWVztVpB0pb0iPbhrH+%YnX^~o)(tam zJXC3NN%^FM+%2d@c$uC%_Ry8Gy2%xKJc}b9py)7;HdC>tYH^loY=ZTn0LPvbCh_W6 zmBq0I>|Dl`vFb{XI?C0iKMK#=QThhEStWgpUVraPvOFcVjew0+D{r&;;?dL4%9US~ zR{~dcW8{GKM?6rQTX1P>KcH4COt}ZQ*q_Jy)?YqGJiyBKwV>M<-{kie3a{cIUxLiHe;e+sy2vEeuztZ;(ul2+%6|$%^5QgHBR!@YZCYxc@dSw z-0AQ`BGCsdS_$h2il2XQjHR|f0}Vw!kF+q*GcDWO+ijzp?-ACsjR&mC8%jf0HQ!H~3Bi&E~pf%uqXQi!q|mSYkbc47J^}`bAg%<)+cEsgp9UlXwl#Ksr`$Gm$1rt9ur<5-AYwuya1HBNQ7# z265S3$3-6p$(K}8s?^zr8JHcSV|{76k53Ht_MyIzHp$z40$HdK-DrGQ@X2Ai^{l2T z_5PP$@$MkF;~iY}%k25Yz`%3we%wT5@sRu%rFXuU%{p z!c#9=W;f#=JWF$o+LwMn+R_C?o`l6hDi6C73Ld%=B9;t!h}Q#*>ui6{ADYetduM~w zrC$CHvWPD{1sKGV?%j*H7T8o&DYbnXqgt=r*0`>?s+X{y7lx;N*d^;3o%1Xdu_Ta9 z97|yjl}AL}w}+kJiT{rG#Iu)g@XSelez)M5VcGrnUNtu*O*(HE=Qi)Wr|dlHiz__z znM(N;@&-7dhl3N~y4cv08@tw*=%hI;tOOd2q}I!EkaK223B~2EgZ7~K3rjpF=6@B? zs*TI_S$~H^QmdG0l>7}LfG}G^6{Z9$ zAARs!*g!^MqFx$&bG@fj#lKVNUoKi~l%j8yoi@^|p9CYYyUL+4*yaXOePzjaF&6tJ zfF&swv2jRzF!nZ{#D&IFPiox`C=Cg}hl@Ewb0jpm-kW~W>oRUn$R?8JyjVQH;ZY<6 zWeI+m_>jxUw0(Qi4*gM9KdayK(QoPA5QkqA2Jc}NEg!{O^)y*^Z+IV+iCTEitezf! z{&5g(a(OIhuTCpvs>;>zg?gGp*}1+_)uh0EYG3E)=XzFO$r8RSdjkC!&d?Bcq}7cy zYA5Q>bRH?)`a*}1?514~Q*k!`_LA@&ZM6F4fYKJ*hRyzgm6;2y&t0!=@P|zz|s{BXz8;dyf{E15uDvC7e^m80WO&&ere<3ob`to}p+kKZ(Q-}CK-#yQmf|8U zrZ{mW!4vyf(j_nw045**s~7^;SHhY(=G}k0K>zcAe_KlY8wC1aaEGiVi$Pmi0l=>c zNRJ-+X4UNL2eTlcQ2YI55&@d7@#uEj0{gCj#oKbWg}AHK%Z=G_td}(Sbp}CTS`1YF zDmT7+<`)7$y%QCK;!XDgk@^X8Abc5w_(ephfW_1w!0xpxfQDaX3TV=9Fn)5ULXH%S zQ#cG3_cmB|$RH-+-(NyY^o`U5z>in=!F&y2txl~kg{J*GOQ|8wX>l)z)vUe0-;i{9 zzIZumz0wNcW#_@jR_rRi7#FwR2Y#wB;$1o5Rg?n{7yrn|a)6{TWBsAaV>*CI zzwaamd}oh-V667R;9Pf$U4FFJMb5^YIfZ1gRshG4br9f3lpzCZVLnW|e{Oq2>8*0! z^HZ-rU=u(^CEo7VtAIXSY6bEl^L~!_^%jHFloqYIJ7+gcMF?fmpDTe#owAJK1ZZE1 zyF>c*Bj`JJrCk(Bq5)XA)N*j9tnTLP(|#ARU%Nj`V^aD*y)=B`=pAin{7p7AVQ?oS z+I#0zoB-*+8E{UdY%wqRIa-Ro%Z>;n>J|3taQI%dPdU(F`4!KYz zNl!KhX97S-U%0$O6b^jjtF5)79mEq*OZnvS@vR5Ndqndkfb zI8V5{Pe5-<)#+ZK)i_-v$$B@iUKBy)VS2^nk31y;hks>s$9s5;Ql8L(IBX{GK#bg~ zZ|z!$wKlicDhOhS$ZB9H)iAA;t_`Wu6=6;DGl=J(JZYuN)#oeG%WHsQCcXAb-Ij?2a6sIYWJkX$!`+an#TKfw zn!{?vi(~ayv}#rW$5rCs>@tMs(Bb*h`6U<2C>pHfeTiWyEzFw1iJ)b$>d9*1VXGOl zRorf}hO_9_c)vjLuZzv4LF}6_Z%!{Q2I!zL%&|*ZjVW^Sz#ZA> zqF!=a(qYm4=3{GZ&*(!B@r`yxFFHd|e|Xd`pHXCf^|2SdJ^dy%Zf|eEa8-+9o`)t< zzR_TLNNvx9C%MaPyZLyj?<=W5`YSaSh}6PPT}hx+zNtqbwFV_TtYRO51h4$G(a7)1 zKL{~4Rjii1_Wvtf{CAIg(!cNRKL_d$dY`ur_`(8(OW<}%3D|wj0u;)&%Xp64q5{dG z^CQ7@0hF5CJWC^O1%V`e_gaM&`aJT)JRse_Ew*XIEm4V$G zOqk?eFX}jEBk-rPvj5EduMF&8F(6%mAr#-rLdQmh@RZikgh4Rz0{}q#P95OKFX)YIWr$60 zta4sJODiaCMQB4+=Fo`gAj;WvFwvRAzzT+e$k7C&jey^;#1B9^fs(Fi7{fgXtMe4E zdABQOAiM&3%VH00!nC5^q{7_!`~jsMVF7=4i#4{wW~JId!(T{84W;Ob){>qTd;9VC z_|H{#X(#A&3j>xbId`LK;4bBlArsWJ(G?g<sk^>T_I8%DI99za3!T)JeBm$(s<91&QcYG{{NdUC9Elq@|38cE`a6s82e zN&JEN6SWF}THb{zsLC7B7Lqo=gP+fjbG23jb+ULccaQXqa{%r=m7{Wza_ipPzt?@Bib0+5Q z#&(Y!c24>bT5sHCSI16dlmDuFU6g1SXKiyXJdB?A_M=sr!=X=eO>{uAWquQy~x~MuOZ3;fy<2Lg)4D-eb2>^d~3RVKpDaP&zpR0XW zvYSfoj2_3+*dZrXlIM3i?#9RH&p9WFwQ#uFyp$rXMb73OO1l`MCJYd1PLz4#gO}LrNN%qVGkA(G9s^4Gq=Syx^QQ=>-Q(6PQGYY#&XNqYH6KBK!4lh?;=h- zUDO=gvWHX65*70WIpiU46$?lC?*inaO-j^8N&@8+X%VJQ&=n2@@+B*b?CMkHOGIp8 ze;dUH6Kv2QY!r4hoLCctc*F;$pUcDnRuStUfVXUuwgikHlr zKDk`BL^=|z#u5#;l{1s|{bw9h)D!A;gfxoMgjid4wo`rF)$olpomhe~7G^?d^o=i> z{^Jwn`7e>{3Wr~!-isBloEE2us1+@m-4TWT=}?5k%qYPagVX)Aj4U^koPs^?VB{mO z?1!Y&*m+Y?ZS4y&=MRiqzT|vZ@S$H7RbQq$8VIweUJtD7G+C1;|9V18U1t2VNx{k$ zbFKH&A;0MtMU;X>WNvK!@jq1Hcf#lb?gXWrp&$u=oR)|4GKwDESb+OV`kuLYsgWyvNE6*8ix}$os|x4JO5K9 zCOSGJur&NPaGW|d@x8Fdu{5wWnNRLF(x@C{uK-5$L)uDN*7TN^mS_BvYggoufBwA1 zB=>VJyg89AsR;SErw8`Fz=@3mSYH5q1|ARm&Xxnf6c6AaUOnVj7_*HS?fx5_?r=Zi zD)DWHa{$12@wK9XKVkS+G~ur}D|K7}N4^-lf$x;+YL?+4UN=*ts#Q047Hr*usmp*q zyQ~`kNMXJC8KI?4jG)mk*G@V)T3yP3DSn{3#9bcou?$;#E@5A_iix?gN6BqjnjEl$ zE;Z5gAgfqk!Ti}L*~Ly!vB1vUM?D=?r+#IYk#7p(7tfJxkB$Zq)lDA+Kp0ha3F?%y z36A|~3pupq@;cFi9px23qFVi0k?Hi7HI$*f=gSngj-R~VwnA+9S=iV-V zpE?3r(mq`;0p0BPS32Ycyi{*afP#{H&@zlbIp3`ut^X)9?6T_I(PZ_k>jOXY2dtxJ zQ5zONx{>1#KJ%-b#?J__1N^IOcW+5F6fto8nWy}~Rv>aE8W;V24|3h}hgJ}7 z+t#-O#wr^M})%@vk}gLv4EBhJG)e3DEO3^Xw6=MQ<9yNbug4B!vl zO_#HK1o12sI1ssUH#WAcH0hGKCe!LR%Cz!2_G?J=3u8T319f<@L0hz9;=qFDLAstVTBXS70`* zned7>E}6w3x1PvTd31$h=+R{OFyCj67e6Bt4U}8M^A%WBJ6?Fg@T}N0{FttMUf7ei zI>m(q<5LbAW}|&azP?FOvH`|K!=70a4|#Q-tnrXeI&F3E`A#A{PgGsF(>QkD(|P;- zUA`^6JYjKP>B9@o2l*z=nj2h!0Q2R~p@^pWrLBy$K-EGUBu@Az{p=?_>Q^M`Y3OJk z1i8=d_`$Ge9i_op&(gOCz&6}QP~0wJWmrk=-U_Ee5*)QOrS%zFWDO^YmyOsR7-Ox^ zaLW9H+=b!Hx z0d&REFR}X|_>oKMjNAU>A2k~j`UN|~h6!Ef%4y*Kh}%HcdL3e?^o|FnE4~hrT#fQf z{$`(Sg0Hg(bl)k=8lK)_EyylR}4RpaA5Q;xAj_1sog zIP5hj2yL6MEKBL{|4e2j*N*o}F6QC-$dEfyXG?bClt<4dY;16SV$ZVETMzs0Ztwf4 z`&M$z2T126#lr2oa#}fM9>&+3^I^xN;TO3r1cU+S+T+&cU77F#{s;C@WmLmk$~U1e zh9zwfnPuGOR}HG67g3QVoZ)o~Qod_Wf7J-4}ghpdlcq(5m?BB`<% zB|C=nuLg>n)xxh%3H}R{yq%W*3u^vDHSu4M{0~s@UwEe-{y#7)(EP?WAaeIwu){Xw zcYoE)>jyIg@a*#P0tohXp(Wgm6c=im>mR@z7T+ddXZ+?IxVWW-)vE!!AMO2D=gj%; z-Ii(;zhj12&$z1#0HC>cNnA()#hv%OzT<;;TL8`^4@R0Er+mAHg9Q*4W-)mJ=Lb{gB z=E<~>OiGe6c>Ai49}%D1boU8F|P4^rW}pQPwAWt8}% zQdxLXg^A~^^-V_Kn-N6_wW+**Z{=3O!1<2JdDDk<9?zmyxw7Y2m?t7zW^!Yu>(e{DkcofCAHx@+4LGfqvWVhx$hJXJ>S z&qHGws)zK7WxEE8;!^~6>wZ-GJKXS!5#8b{g$Y`us z2Sw=hzBk~WRRM7JjspLiCuTQwW;Zq39uWoddT9WVpg)n4m6|?xvA>`O~*polaitgG~mBdRhxb04{Yg_nuL<$y}Y5qw(hvOKtz z!wK7s-a_a-?2ePTQk+ela0n|{1E>gV_Q?VAs&&N2v@GF~v^^)&d~zf_XqO#-_|w6Z zoLt@As_RVU01fLn0?bc#ABfyll+4-ei0N090~C#yir*+Yd9<6NE@M?xQ#g(@^_4B$ z?WE1&K`nb?F4CQEP?N&VC9&g!(+sTv2d;g3n6!H#EviY2JfRBBgkWBe7IF`*{cOxB z=60DGgSPi~4YJ=TxpBM+!W#0ieT-ZRP%t`JVOi1*GUp`iqkP;sS^_?{eQo zk{ZIEL|jxc&65w1r0G4xQe?QKW}iyQ7Qm0@h?WCVk6|4oZqyR>l2pW9T(_vUy{L8t zu!AFuFkpfFWmybD3hD-Bo3o z(o#B??T88=Y~4%QgTO^ilmEg{N4Z1CYkIz#MLo?NqU>9ggoj$ z?xV*zLiK{P>#wtK+H)OtFh6@XhlIT=Kskx+gtZ^9Acb36@u<=i7AYum{8prl)CZgW z1#3VAC!Dboh?tK<_V9!cMzv;p3#vS;^Wb(9qLyls-Hv*tE0z^KqFyK5HZ?ntz^vvH zh9Q<_^{V^`mFBKsFEFJsn4Uj~`S+Z=J>8#h#1wE=Y=P|_g5qHJqVE*7%^Mv?#1L|P# zr|C-m_xYw>sGx!1g;w>Izqxoh5T3+2-SCFFST#bm$dmR9r%jq*U#9y`HwEwiCOMdbVD%3@@s#+#l~@X!!&HuyW6!)BPGcV2 zJ*q?cvUy<)+IKszk(9cyvY)dBJ$_+elfgSX?}CNLpDwa!{<-UVUFv;DzKuR0Y(!1) z)#sLT@_NT^R4Q#J2SX84lgrkPc|prFFfiTL$`HNg{%o8E{W54BhH?xWR?xq7Btt$KbppVNrkPeP2Fj5k|MHes*ulBYWhj zp5%HPh8YX?0V76?(k259<)E3zx2m2zs;QcIva9Oj^0^~aMC;7=_`c2Rt+lFOgsnk|Y<2>aiQqVb7a^q3NULV{xJM_AgWT9hc<@;d-_t8z+ z<~(vb{v^eyThboY2bnyrI2bY43BOqHQg10r)>Ww|W}x>b?kVTKzu=`<_5BLDyc3g>sZ^s$p@Jp~%b`(W9cHo| zdxm!k621GL5p{FV%u|s|-1^F4H;wue#;+thE@oS_=iZa9<9{Zwv25J$d1bN45kiyCZ4J6=W4> zdaWD~GVG%cG@x*@=4;xC#jho@N^@{WTwFnkVrE7pW(0S+k*XA&_EIPGcwYeOZ4x=55FWiJ-_zf3oGf1*13mMBii8p>Y=c1aIm|aP8HKQq` zyXD2kG4A(tmn2!r2GVXJvAp2tyZd1YkoiN_rny5J)VM6uQCfJzf%k&2iwGxDmm+W=0~=a#{f_(a(eL zi1kqIhY1vmG5$wTN_K!HMk%UQVW)eiJg_`ZFdYx&d|Gh2vglH$i5a`U(RdRuCxC=O z0A~Rfow;3xkac~1%doBuQ}(&6kUiUlDS1c*Ha=IFT-fAusw^%xs~wSNJ>rf3=5qwN47=1}0j z5HP1M>(IM`W-gRDG`;V^lj^-x?qRmYa6;6r$?xrk;4n;)#i!2Fh@}B6YCmS!Owlu@ z-@{AxYT-ZBsvXC`uQ~*rZ+Y}{x@QEgAvmx$H{>XYum}m-by=eHtB7rRcLvsFH1cpC z03apPh)i*Xtc@?%gXf^WOGLawk1|3_%&?R*FkkP=K_C)zDv~6sM zXE3~%nQn$XG+=kel%qGcghqYkUfpiOasRaQOCtC^uIC^3VNCW&7KZtzYn@@+&rH|I z?PVTK6J>p&4I1LITN#zBqYi^?lhNwYZIj`IK9`$2bv@&+ik0lHWY<+$N)}Hxx~FFd zCOP+J;Tm`9YL1Xrhsj3O>o)^)v4K>YoLY#2a{>4D7vZ0p0MOBmv0hW+!k_WM?z~pc zTnlw?5cytUzeu}6wT^a+c{|kG2TKQYhfL|~dr;e_0uBp4wyQKM)W14-8U+*08bj7N zbu#XrxL0{*wBq$D-tCdxcGXKeS%*FDPX7UO7HO|vTy#zmF%*{%1)Em9AHrm^R_(GD zSKIO61&#N5Be`<(BMr=)D<@CyWAQc?LXF64(gmrvQ;C z9DA4Nsn00+Tkpmv!_G*=BouiUP`pTPK2Hi6zTEMo&s`d5(%HBh?#&vxV6uECN2f@z zQUU%JtBKsU^l^RK-DShqKhIix!ePkCi5{VQ$JMYiMpj^(qCB`?#Uih+UTWCUWJ0@9 zBUMHo9>vi0j;#SA#^aE*VB(@q!dUfXF{-`p!oEH~$FXPoOWKSIr;;|@c`Jz9+wGD) z9v2Zua6ndAE{=Rc$@IZWM0|z*<8fjH0L&FIXAVie`Nx+pJFufj{w~>)VVkU*v=Ud- zaT^0n?D1_B+p7dRmJJ&K@@;LD8aI2W^S*^Zxd%1yv(YZt>ATJy}7B5!Nz+ zxctMtaihaA21LwEl&u7u<9yP)LYrdCj1aZT&TeuKgoQY1^|SfvdH2dVtUhw&0gFU- z`h;bh_x8>i-GBvi=XNNrzQ>su^b)d8bC19}W6mG6>80Iv7&D@jYyykNP5}Uz8BXBA z-MzlG)NGEdNVfj~#GNC%b4NY_s6g@l>ayvo7A7|4Cni|Kkj z>4Hh<2OGkMXk;Rp*Zt{MijSh{cZQAGs%nxaM$Vz@+3(o z2zHalX$NO|7ES`M#=hq2%ND?Cb>E9ddp(e}iR}(2m;=O~3G@BimvqnQbVGI>Qmu4g zy1xIE-F~BC#@>_{uK!iAD5*$zoUreLX-2^7Fy_<)u6}^u>N_D8%tR+@FZqFH3jTe7 zM$j6npAgcg@LI{R@y%1#J+1A$%N{hGscMd5*9E-xz-+4c2WuB`n^SgciI>aNf^BMG1{*uwhP&Za& zhf;Opt?m=Y-$@%EERSx5h;@M$R#>u@0~A{r%`k2dy>Lq`{&(HB?cGpiaWY-?bT*RN5j3h13xs?+w`^YXW238+`Pd{s**?E!$qU0(b1`oSG)$t~YM(j(6{Nq=gW5{+1YSYp*3g_``ZLVB4<$bn zCx3a;g5;n6`+8HMjd>7H_r>bdTsbD*ej$q#KFs=jZ1FRFyC>zZ{Xxrre}C{eJpaVZ z(U?x$jvBwhwzgLLfx40qw--AH^uOy4RdppF?d&C-pQaby5AZ&qt<@!J#%~Vi59gng zg+~p%xz#yZMcKDD2$$lSW0?7eX^|UaMtGU&zRkm_Ee zPM+xRNW8GayjiEW?k4ZdMDJX&$ixwK40~>%TqY-80T4o$F98iP@Fj3XBKg)?X8=C!(Bn0{>j8q-R2;_1vQ za*R!8^POylyc+GoH7I@)*+Np)W&>&Sw=D9`EHd7#c7PhqjVSH+Tl@)jDz0ovl+EAz zy$J56Mtv*l3N>f!`X-Ib2=h3TV*(0)uP+ZG%{`zuWVN9=-Tr;};Jz;WLs|9okyx&= zxSzTLum%;>mlg*HydMi(*`WQ^U}DUS*?(_+KIi`a z@w-3w_5EJg{bzESXuMy`^Z9(d0)9DSl-QBOH4g=4D3=5DUHSgSdBKrosJIuQ6pdr{ z_(`m+|NYY>UwoW+psLFSqldxqc2TB*mfB~f6fIi+XD;}gFQPGMT8joRkT=B(FR*ngF@13uIe075 zl<#`LD0|soTCZ|VxL|1x%=*qo94bEmS~cuyY^Pbl%+|qN`K27@SoTtl9d7hG9;*9c z{Ex#Vms5VaA7mlBr2QlGsfl1sDN#5et(*>RDCUyWeFm&AEnuP{tS3NYD~W#$I2Jx@`#nAm#~#5zYQ>4 zZ4J{Nll7bW_QaKUcqxdiA{py-KU|7_;7aZFH_kyA8ZTOZle+rbPZn;PIZIdeG1%Rug=zoX-| zzJtAR^*BHNd>UBx5k^0jdhyz|%Xc&%aCg6~weNAd0Ye-!>!Dc|V0RK~tYY#M9TJiS*KPM9uPEVh6Gz^LxD)SrhUc9O^$jL>X z?znXIkV2w@h5@C|@M`AvM`+LDvX2mEhEhU)dj+a@0<<@C&i1fP)b<&$#u@7I!l|>- z_mjd3eF$r-SO?|W1@OlwXneLfwdaN%G3ZeB{&T8Y@Ph$qBNw`;FI7_!Zfg+OQqnv2 zR0epbSRoiUR3S2)rO06vwg?M5iMFkJcRB7=<9?vMiUlu3GvvD>LPrRnzq)0*4EEqG zrVqFZGj3iQ=<^IuUL9K~ZlR*Hm<589TDVCaOOc8yj2grgN1-e+=;moC7+ggV8OO{G zI02EE&CcjVT;5uc$M6ohTu#+Bh*?hGfMLNvf!`1)2%C^@N2RPZTgOnY}ec- z_m$i+h6imF0}cA`e-JD`;q5gxN*swU{hMW;8RrnDuaAEuM+vX|BBrRCmy?6; znyJDw+EyK$tk!6&5eaPpj`4@@Adjb&$(u_sL>j4*NB(+x5&1`i3Jtll&O=xy#B#*T zXweWQWMf&kz69_L1Vt_Ad$pY$oJdx=UmI1*8se_yW<>p6KUFzup2|w0`u#J`Bp$RS z|F0LGU%6lbYc$oHk{}wb>zfDBf?!-&Hu&c!AVkY7mG&Pd8F0kmAHNxY9NhJdkI-Eu z4S${oW2Qu*?`%zeJ^*YOA)jp+6al|6Ph;Q(CfbcYj1qbzY}7ADkB`;rByZsSAGNp} zLrw{Hpd=^f18T1zxDYJ2?Ah1gj?#2lTJvtv{jGp5Xd2lBP((uZCcqhsR01C08(5nG z&~^Ex>efE9lVO3fHh9D?#Q>o3kUJ^YN8IJaphKH(+7Uq>OV@OgK8lZ_5x%Uvx@;eC zdn)|$_CpwQ$0k5(WZYfzAiWpiCMY^sF@axGxRSU1!yOWG(pDt?P3+u!jdV42 znmqMl2t~ANBYeJ_D;V_q>#oO1W+=AaDiLc%P)(WAgGXK0d`2rnTY3-!ow9On!!Ao{ zsjTH>EJ#==^c$|VO4v`!zZlwQ799w=?kGRMbh%Y~^I%bw;N@>dQB(J(l3;U^uiD%` z+rN_1zNv4n$=8Bu#vJS~Vccl%f-qr_lHF*nEx-^b*P-0HX(XyY5qHh}vv1Jh< z2(%p*zV$NR90fV@@Y^?J5o3t&)4KTa;FAhsVxS>uM;YR|CFENYH@KEmL~p1Ns~gV4 z%lcr>3!EyJS%sGHn&sJi*CsAlmujb6jiu)TqG%z?uqU=T@tEgxUso~1puBcQ9=w!p z+0MI=>8D3D9xY34&!KJesX00E!n|5Lz%~a-{Nve#FqC#->$`yGU zlM(Sp@i&=0_Hi})O4F$UdsVs} zMz9kIDZul?-%yKsv5gYGW0%dH4&{y=lYGYcTSZIlRf^$PM?zcQM}yS;0+X`a(?^(= z*eH|1ZC8tlYy|YM-$UPG?t+2mtKmq3P2UQ8^GT5&X?#EM^Y5mK&A^b$tTXsaI=W;H zgcMax90Hz+1d;%-TyHkTY!s6e0|@@h?DF~>v!2C_!`@Sgu64$tvtob|s|S>pb{Vd3 zy-oM7l!kn*s|}pYF>~HWbQLQ1@%so7pot`yM-<^Rst5}D!aPJ(T{?ok4#%iagL{yF z!^hZqw~I$aG03x|mO82{#sY4MDH_v{hcmU{F@yZfXfs90VH_o9gbk{|mYJ&t?Bt`k zP&^W-P<}ZBA8F9|+EL+XwCvyZ2xwHRdo@l$4yJAbM%|THe)t(YrM6K^!W6`S({@~N zg_@)w+6O%AFsssA(g=dj>(1!!ioCb>@X^JV`59b?p^y`~iYk4vYaG^EkrAj-wgWqz z+~uJ&d2NZ=1B6hV5QTFcyy)fcZB|MjQ`(l z-c;&Yf~TRu@9zqq36S{Me53FW*Z`lJ?pgXPg}Uo4o)Je@FnKMiMMw604+t`4UYdY^ zj5B2C4_=!H4omg(nIg_yKPi&0;6)qN?gKuuCn03$b=yA1DP#Dd$Rich-ihig>cMrt8j0XRwtS#T6K;qIWmEkvU3nUaSR2Y`k#c8qlZ$KPh-myt&tg zaiCiaD73A)O+BF}T#zPi2FCo#+GxgBwyGR_<(IyUrqzj$Ext>YUv?%4zu#yzx@$Q< z<*X{7dt)P!JiPCtZr|ivra-P@nFM6s$9dmEDc-DM zC$RspcT4qF5V8=;a(DqM@m5^N%w%i!TXbiyM>#V=O(bh{J#$Mg*yK@kp=;NfTU1QI7AF?BtCD}wMZ+*1n?@IfP(6=kt zg?Pd}xYE85ZWIZ_>{*=hvRDZ%Ir*3CHDMuePvW(F+GxCk+r@sf2(r(}A64cam`66w z==l*7*c8F*XwQ~EZ)kZu*@{|Lcv&91<*Z0M*<2HFRWLQV`eFN?ytFsvl*IT~mawaX zb2a@^b4&U;xVQ2!he{8BH(!Xq@Aw=#G53=yGtfOs|5Po(hX^N@#K+i>a`$tzcy&X}3pHUo6FN>UzmR*eb5N#&s4DtHs2h1+Vas#Vn+3P z&{cDB0-lo{OeH zuHT7FkeC-m7xO`FCSRra7A7xS_$~@X#?Pdv&+IXcUYmpN4`5AXEaiEEgBGJ5yD~CG;jYU_zeRQivvy)hQXw! z-iY#!pF8sS0p4uN^O3O5is7P9A}Z40e*3 za4+z26#fe5$$lMFkwX7c@i>fsesU+g$L5Yaq}li6?;n zr^7HoS*Wkvco>sC4g+X<=wS{rI3;<1xv65q5^jU?;%0w{zW?kr^A1( zFqa8D4?sq=t16JP@Pl3VYof&1=A++pURa8r;ZQ8laQ1zxIsl9X(HmmpMGmpqc~oMH z=MEs3>>7o0#TXB41iC@^ydL;mDkuhBIg?U20H~kbj|YQ@?U->}{y_Cx^cyEo(@d-W z9e9$-jg?N5HFmu(zv}PZRMS=z+>-{{h&=Wp39FW;?&%fghod`pW~L_X3Nnld6n8_l zDCA>)9(BLFmRAVAnWXNk^-=Fti2>C#(18Y6QTX7n>h^R8@R6KS1B6&DGr*9w35zcQ zG^T8t54iarn=Cw*uD}ePyQMVyyj}J67}2=%O7Mv_&WS9&A*7B`IQpzNvKwDh5IcCmuV4<6- z3JGMIZ9WBq78XjUCg?O!?eryG&+N53VgC&-elamTl+`qd?C$kU^|OYHP!xxe3+~Xa zv0X_9zZpX6VbKbnp$ziGYJI$V>4zsY&m{0h$M4w_nKzGe6_M>R!EU-&H%*r}b4E z*WC;Zd(u9JD4V5hdmTH$~hW#Z^|xrEaIr=)C`hkpx=v5XL3pi(u=PQ?fG6M zeej0M3s8m#47^lR2PND1bDz5yy@}ur$t;D83|b23;736kWxZ)La+KtQ8uzbSqb0+M zrc;A)UXg)?R7+%bFV5pA&5hJB&3wL!XPV$=m_7|DB5vmu;iTyj6O0}{ByVa&!c;C-=SNR|L z-@g@ZGV5sLAZH^W`m9LhSE*q`&ofmRV^8z#`eu`vo1EoE^Rc31$QBa=}r(rbV7sSMo}JVy4U8??TYM z({QdvK5+dxc$qx@bi817C^i#&*00^TqIy#Dt5Gl&h5&;W>}*9JRJC6zdBD(ZjKUM^ z!yA%h8{D19_kS?zXmaUvle^b*5 z&*zJS_&^Qdff?=kK|FzULTfpSe{V3$M{!rz+qUZ0*C;AJ$MFQ_Uh8Na?=y5WU?gw} zr^ds<6#ki0XLUa0laIElWxH6lDtI$MWJp&ClOsMpNcQ|__=c10HqeIVB$9%!uBs2( zSq(=^IOEC!Rf!I>WG~Hl;!RA?WQ~-$?xz}foH5hD`X4{(L3aYVgESz-ql)$}SqC8xJm^UW1Kt}($t$=1?Zf}?_MMIqpTICUNr)X_qku+m z&fETPuLdZU0i~zKfHCB3>PApJm3J3MhW)C+=Mb2W3#8$dA%|#i?48lX4WH)9v}%Mf zG3t*6xcBb?;PS1h^$ZrVa$ieTAh|b@cpj6M zQ6#WsaWlY^*DZk=#Y;}%r}9r0+NC=iZ_YG{%Gp}=YO?tICr^I%B z5QKdl_;%3T67vXxr|l?|@7a2k%Nmx%W|7^*vOnlEUV?976G?=(=Ha8!7SpkxD-4Lp zNf(I1Tk82Hr9#q9lbKeS%8bfe-?7_uM3Ek*Kp^Y0U%8w^j=HK(l6s^p?V=OZ(Eodt z-_hdprCS;E@Iuc)?_!no0-Edj==qSk37HE{4~I)n=X_*1VP^a6MBXR@#nU1h^jhBG ziRu%dE!EHACd zz$0J?{P^g#lxHHbbl4%u(6mKrOt)Lo4TC|!I>G>ZdmsdN?2`yPus8qFR<#)zD88S;5$jWcC)DLUHe0A!lc8(_QUcrO&r-YvgemEH zU8R=eM)j-h^n&^+?)trso`S%#Y}(Zwck+ZZX(E4X_@yel6J8Qm2y$H!{jNLXh;1rM z`L^27zw=Yeylx!}Oq{D9)_((uX__SlZUVlyp`sIz9{v{H&)Z?#I=`j+C89EL(OLF5 zvdo@403}EFc4&JftOPSDBB)CaZC<}%KER34(sEqhWgskzxU&W=%rV*yLP)K?^7gr& zU(}viubyLkYy4QN=^kfwX?mXvGREe>HKKkJBB`u`AJr2EWwC(}MGsx`MSCk&&!_Cg z3?~mdY}~M~87|!bS^p-MWm`0*)GJ<*GDVx8ylUN)>6Ah{^aW0EbKb;$4!H{ zeEyFi+NTR_|E5}N6FkYQon%uFdRg4*hM-aIQw|}U+3xMz<(4EsM4DilYS-MF@m6j1 zs>A;Gr<^I)7K}dCE@|xgAmT3XC|5(j*1RPHIb=U!&@Z^nY+jv6TrR8O%9vHqXK+*D zGkS`!X*JgAJ3&?B-4S5akMIM$d@36I>8(1ehmPP_rOlAhJlbdqr<9W`xoghoD77nS z7mE=Gzqit~*H~>p-DcL0GSF4mttSQ!`-zAIq4rTbQZGf#%PxVSI9QrM?;Y`hUq@B& zT(Y?b7r3vKer{E0nlZr#s@gobvK4p8w*ycughyQ*T0{Jq2IaqYpS>hZ9zk_9y%$t@2jS4o`>n2xu{k8SddQC<&!FN3ajT_zh%?&9 zy89`=x5x0)Wb@>#tk1U#3(D2#cO^CTZ18LRs!F{Ioj3OWQ=k63>hOR2vA+M~_h9gs z{fS~gKg}07Zyn!_{QKCy&4eJTb|ad-J^DZTJf3;GXy%svbzO(;3-W7P3Qpsu0d!T|JY@a^UEKWLDYlZ@&dSEJMe5WTp8SS z=u&qWt0haMDGOXK%&7Kqc0AG}m5)6;HT&Bdedg>zqC#Z&*qJTBAHQEgGxii*ytdO# z=4h)PXj!(iiH;<2MU^HSu4s+{#q#Al#Iv^&=Og4hut&(A*HA%u za;4L@CgdGgRvQ}+KpB4lx#|PHYp)6;E-%t95B1aJRR5@>72UA*Sm)Qo2NBpfHz?N~ zlkZiDPYXsNmSiui)_ftPCz z1@6b?*S{eZz?#EbX6NgjC{IqSR9PieG-MuUW1f+Nu}mG{W41~p4zJ_yp_EgWtw`PzYo8Be9qgl9C4crlKW zX{HN1eaE|Ai*EuH1ap)Kem4r=UNiHDX-7f%s9jfwR;r;X@eA|j_>Ot@FBY86*iWe% z>|hErF&F)1XP?P@>wFGzKe(uqUp2UNC3D3Tn=gN@=6TE8GI=rhnnq7`8XK+ojM@@P z05g^v7#bK&qMGY&0-iNjsqfdTXpu!NH|yoiQ5yWg$5u6}c?rBp0k1XCi7~o|qpw-u zF00oj&e6n;ei|iK?MbAFYK6+Kl26CqLrUjavTRiggT0)yxGIVt=$)=-yn@ipw5S0t`tDQk9p#SKgZ?Zl2O3dn2#ixy7cVJxgf zC_cl87xs}+mAei0y9UFbpi<*!Huc*lDLka9?wAhDW-V98PHX|Hr%Wi~z?-PTy9U}b ziw0fUod|>dK>@!ceNgrzpIsiJTm9(@8yO+>#@xvcd_VOv8+rvxO|2+}{>|{hl!8 zO7FPHge`d`(t=$$_O+Kse|N=pC$yDm7_)l%L^Jz(L~cSGL4Ig=&~mvOvdh=aQGvD~ zw87c<&orqwxm^DcS36PbIvB*eSIomLyl_Sy?wVbyq7Z*S zYStxah2TORUo;;Pvi{kM~AE3+NT}S{m3IfXAmifku$yH@kjtX3*M} zQpUdUeNhLb^MawU{$+$3D;(ZtTOTbG!cy1=aQWQmd?*O8E zH^e{hVrT1ZVDWK3xS^Z5RwUi=$R|pST67W%2b`&S0|;sX7f&Wyl?g#G0k}fp&l!V* zXYgt)ZVRwT;4OFS7Oa+e7m!9Y@-gbp7Q9;x_Na*k_w-Z7deKW-`D5&*;AH}U#SETfy5B&r9`7@;^@Cq~kEm5VYfZ)6N&XZg3Q+)_t z?jGk`lM~WhYh;^l@Bc#cm%oDHUtGHS_DO@-a-d2UMsWo9&~%RZT)0TE3g@Z#Wu7s} z@@wf%W3_<>ie0ZDziG}>^eqb%#yBD5OqwO)avZ1J(>dLmYK1Z$ET}8=wgcgJ81Y4I ztPA@4Jfl)*wKcuxN#K(^Y{HM;V1y?B{nh4!z}WPw9ID(lNWQX(c@0xAy*cah*x!jYFb zlq4w?`1gxI(&k z<6h(K+i)-q(SA(2e`iiFd=UczOP@jI+no> zH<}RIteE%(u1H+onOwT7&(EYPD3Vs%7|v+Nrsu=+g;$d1>Iu#q zcO#A*s=9hVxj(3XC9FfR>DEWTQ&g4-tm|#F&b+8S*n-;a8k{s6F6D2JX@Y&h-&bDs zXz?w&Hl6sc)-aPFpfueL5j{N^J(3vJPMXjxiVjlkt~ztvbIb!@YFI?P{%FN>@e4Jk zMYD#B#2c)Wfslmy)#E_OC|W4KMC?y-gWTF@)VH&BP(%g6Kzo!GvX}CW0zAg(Oz0>h@dtxGA&c zGX{EkjOpCtbuald;Xu?F8)`a0xhtxT`@)tqDhkw}dZW`<-LZPWv*Da7Hz`{#ayGMp zZtE33LJp1Un8NCMNoZkuY)>d6X(D87F~xM&u64Kb(|(5n%l&MPfuB$c7JM;eLmJe&V>8n+<=i*w&lshNG75y~A zcseOJ^|eDqD3J49PH0QGwPr7b`s8Xpu_KKOR_4%wN+UpV{i+1-Vrm%cjcuGR*W z32K#P*CgRf0%xgxeY2fTlJbI&1;)}mYyB(2Nbs9A#+zntxMa1OaG;gf5P5$3;8eH% z_Vj#vUhIs6uT7Z34Gu~Y1up40&CqYYcLsg)yJ%>u1vGeq3HAhgmC-V%zUgkDsxt8* z+Bgr5ddj*Y*Hf=ZEt=ig3aMC$uX&?cxK6a6Z;`rioUYKnut2|Oujw;`$Vsj0L)G_d zC5@EbW6+a~SIR{(#V@pJj_?uJZbLI_Xsv*=U=E9xd!63+)p=VIh4H}8VqYHZYrupc ztsQDN=SN|9GERbiWgjf0$e5z&_*l}lmiU#KSrzSH&YxO=75kmOODxO{Rxa;~8U+E+Fwh~~$KL?H<%t3`cSm}*1mY?Td zx)WS=Urzns#*?>DvvsTR|J%{-fBL*Ot^Yd5Vydt)xV4x16ib7C|DPy|Uezk-MImjY z+n@{;E_NpUqyDocq2j-5mR%rLDl6UmaL_6WlgNz<{5hz}9vH&a*W~e= zpyy^te)@7b;Dt>9Bvyj9@0BkgDG}V$g01YYeRsDJ??tf9jGQ>jJ%Ol-8cr|0Cd&sSL1mK9lRnh;92q4w_!1pPs&F zr58U(r>mR5mbM$_2n(gpw5O%!O@x(f{kwO`^oCN}T8>};iWjlwW ze9t21n@sa%5kJgo?wN_R-L{8JWykE3QF4F$?NMw8rU@85=3|^ZWE|%vWR)9Bgqs(K zyj;jpSnu{aU17Ih@ph+I{B-O{>hV_D7tRM#?2^H4g^orVt@^!%=<#uf*w;0uHCxe2 za@H^>$TNx1=Vc$&I4fpAh-aNb-WDmFkY_lPcIW&-%`J~)LJ#E7&i{5ZK2|~JuE!l( z^0Lr^-qcjomIdiz%C~&RJw0}IwY@6bH<=MR*U|F%tyj}D^-<(&UzPE0 z#|dxP+9yVt^9=>;vMl;~p9ToIh+IcvINOo*pR{CzG$)WNuPEKn?F)JL-1NIYu}&8F z*<%%avp?vOMf`=ix)qz|J!eF%{FtM#6y2H=ndl&RX)uRf4#mnKDxEfgcOtdIj-ip{ z0{i0vmk#dluFJI*fvUw=wfvQAP6JE6ZJ9@y7Q2iEuTx2Pua}wU{YYAO?rQl^TQ-S6 z5UHRwy@8E>*j$oy&@hq$PRYLkjp`onR<_vp#YJ=n#dp->?F_)2-Qo{pkn@+IHt>kE zjd8LefGABe;fQLc??xINZSA~Q56>U}%@V^Ny0b`Et6N-{5;H{pQkdhe6)7mn+9#F`L%9=98^!dlvpzQM5(8|j_}j?vAmT$| z`DpY6coWi5qIt2Qxr0U#uj5q>(;}O_leyzrOC7(?Qvv@%Khg$@lC{i}wcX;&h(kfE z^HEf+H{Y6L@0(AzaHo`^dMBr1VF#ci1e^CaiphdfZe8U+)j{}W((`r5fAeJj=M(;~ z%^QhE7ytc*-p;o&>aUXujHx$5Qk{Uz)7njd?0wU68s19Jrft`rKP96`9{1t z6&|l}tHc42QNMr}f~!F$8APm&ES&=mM>({@3I`BOOFXH69rSZ+67Cmm1Ll16H!9BZ zI)2bT2sE53Lpi^LtlK`AIFRDie)5+x*YAM#`ukW^a3LhtJI)ot8hflrye;6=u91ZL z>OItS2g+vPj{=?PKVKe91>4x;FAdq(>~L><4euh(9cfL%DyPFFaI#WJ1wZhH;T3gM zVkh-G_-rd7yMZYJ&JXq`K&NOuRm;uuRk#0-4$s2AUY5+MbyMDv**XP=63__6dV&1i zLWk%rz~%c2BLgrw_$k_hWBUc4zJ2H{`|9rf`wOqQpeIre+;@4{>A+44ikHnNeGzH_ zcaAZcDJ}_xb zjs$>-wf2OzHMcTf%Gx*YFONJ=uc0Xg_`bUw<6^#VgJSGTXg;n)_2bc-kW*YKVt0I&Juw&9Z>4IPjxjj~kk%El;s9C!;U*o8T*J5_zP7C=coy5=I+}AcAv5{x7*U8ju zBONlQr5I6&HEUR=8SRNcUGHf1MUuz@$2dLxzg2%rvuwV={jUY$vA-o* z2BpyQ*AMqlPM6Gj4~CCDc5Be^iw_<6%#7aFDa!T9egC=Btll|2x{wVGjl@*lL@7hs-3%enm>gZ3*}zwTzN-K~fO)}ObZ3(h5^_>BB+KJj@~`y8B=QsK->EiV&P%`Y?*im+do5j`wpPH-8f@KMqDa4<%@6+n-4BI#CEcife$cTtphl z^#v%Cwa$NEgQ`3=d|vGAqW+4j`Q6Tr5+RKM*OM9zx2V{$@SugHgt>*lTArl80e7IU zpfts2yxRx+#JhRGtdMs6^@2W8t~Kh&0fC(STV%jEjV%ts!Ln%^*f198)`IgtC-g(x z%Z>7?#%~)u@hfrjyReqq^Ug1#e&)-#He5FAscpB2k^DEQcFIlkJD|&a6MIb3FMleF zZ{163Ie&*XuS;D)^+c#CMe}e5WHx`o$I!jz1`(LC3+FNzMDSXm9!q0Y z8~=eeUN9u^b-S8fA@DHpU+r*FVjcdvH^;8;60r)uAe=H=K7%LG5JNvI1csK-oT_M_ zaa3kDkGnaQ7*ipcm(q#dW*!jdW<**Yg#>AJJmv%uxB+0Y1bqUr))RhBhfkk95r?f`~g?~ zUskMop;vB!>kiDSNZOxSo0{XY{nyZ;a}Tp|F>RvfU=RI$=$F9l72cE_E-?k%5`D%?mx*w*WZH;N)h@ob#JW3+9&6A*G{ zM=FiQw#rHI+73oK+=@N1(IBhwn@3eDcZoZ*%C?yijp%mami+3>_3EH!&U}BbxZlQ-kL6m zM*jZM_WfJ0z|fd%K5b7N^oE>Ox|c#PAt{C1tDoOgTK7cN|e& zn)&KXQ~=|3Vv?JI<#OhoYy8*QSY=f*E=tzfCjEY2ha;B?)u=}`Goubq;dJ{;)ng|>L z;SS^<;5vvZR+hOrPWh_kB@1-!pe=-!FEl<2GssW1xmU2JtQF)eVbjg{9AC5RLlq`Z zR_tU#WYiO$m}{FQfgo=D<0)jp+ldslubYY8l(#)<_u0f3dK{#+$9+yDxh#)lW@&`E zA63C|UH28K&*qPxAv%KB>qeEsp(~TpuYL@TNb@SZQrBO=y7n4SXW0?Mf5iFCkmUD4 zvw3fmR5W~glxCn1P*qD_ForGbRC?D;l%rG+?uU{!4NiGkCltn_`OV-6**0%r; zCiF{068aGpldm&AOg6u_qg)a9WA76Sbwd>N*%-kz_Q~W`jToKlMX$Vz2ibJ8`=Ns~ zQ}2^SnQSp;S!sw;y25F-z%I0U!uq4)bs0CDeF5W$S2XPze{K2wi8&aWF9kIA7!&sZ zXRwh4LVQchh{nrGiH z(X4ow=X{ZxjfG~jK2LUwDn2%<_>5sSg=Z5O6$Q<^4EaAZ&dRF~%x-S2zfbtk&nzLS z*ejtMxU=zA??3%^cM-=O?65zv!ga6Wo43*ytao3HLavXiV^p}_Ll4uf9_!?sJxyr0 zWIk%9FYo5*X^n;kj9Nz<&`pjyyUJ$U z@PqVw#bKWFg|V|H`ywXX>l|Z>z2~h)wDt1FfA>lC$rAMVN<~3j&LtKY4h-V{iVyJ(dE#rJ_X96Yc@?O!3$pZ6LfshgWW*y@Edc&>Pt*k!F=I^fPtCb@>qmLgmvOer-XOQ~MOS1qI z4OLp1O{$=!_&Tz3?Gdi_=G2$JC*nbZV&gClgZU#z8VKg@C&n1kf*_MWY zjdSseY@>5|IKqVJ`^2T+hD$XJM1}01Sv1_|2&Ia_$5uJ7Gu$!N>yr`93WDAQrE$qZ zd0JRQPBDvAY%`|Uw^GMcpLnIQbH@Mw)u%|mQ`lgg!47<+yKdkC7k|lHAaSw(q%(yv0jhG{H4p^Tir6>p|1U4P zW`w{lPOb}4uy@OIZvqIa7Xw&h-Y}pM!X5)}-dEf==j7vDgt!WwJ42LYClKHCpE74;9`+mhcVVB^|kF_--Wv)%++i9tj&xW4>XsCluBv~A-)nBMBgK(~sdJ}gst zN7MCL7w;8Z#Q7G;%{D`Iy%|*?q+}-Kz&8%_xY{2lr(Z>}FlVZgroQdvRI1+x7j(RH z9*_m(yH~+CU-0VT)r5zDZ)w!(Xe-cAm z?Z`Uh-olWuqm;-f7ZA$tKa$>-#B{lTe&iNeVu#ik<6?B)8(JYoXw@@#apqy+90~1r z)P+7kp=<^ISjv^P$tK09kSgCOcqhzlSwLEC*YeVPHi>?~U;so05h{3%Y51ga34rtm+*ICeC31DS6*_T#ms}pqD zHpWgSZLhZfgkqiPBQ{S(2|%p&gYq+o>i_xmnvLYcfm_IR~|{rP+_?>^*1UD zdL(t16KV|pK-PE=^A63x>sY1Ll!~0(q2WibX{6H8JMS2$n1)swad-NyEOpw}`A0{; zqd%gtH%8vLWZrel4|bBb_ix*nQ|s1v>3%MA>w>gmC}^^uT1B9x!OdKZ@eN1~q3SmX zQ@nXoTZidr&uka8?`Po?6P_CuG*^wW82h6z&K?nR8P^s+tErEf4-EER8L>~NY52BV z*QqubSq2Mj^I8~6IZO^<_qc|W(JRb|HY%Q8B1D#bb2`j?T=NdGf6@v(-q3w$c3U?? zGql^gK+y{W0=;(t>*GZ#KHS&Wsm3sBY@O&?f;YiPO?+c~$FNauDx3uz7OVGuGyl*|pK1pkB?5u|2GUw|7=d4)KD`ZtW=W|j06)%Kq zUx@MQ)+LW#4&V&9PnNchMC*CaTuUK#o$IA@@7`AP9Q4mo_7~Wt-=3*@%IhZFPKRz) zvw+WZ+kdyk6O2`FM;eG9!(J~ZW@IRqcAYyEhfL%*EYHM4lUCNk+CTO2;{@_?U;9?Q z?Yh*#xCVBAjBMG(c2(PBTv0I_y9>~r?gydUgK*v*pfO9@ZZlBLFkjJ8oC|(@oAfhe zM!Q_Wp0sm-S?KASA@5pu8|_F2w+nD1hNxW#9j5jP-|%rZ$N405gdDuMev0XBX`V>@ z6wtwHTF_7S7!cXp&U7X)iEEB>OAT!7Li7_`>3QLqCSfa85|YMH6}T)IEg@?%q#M|~ z^VSnCF}9IKMS6^^=RNBIZQba}R5ET(oAkw506%^^xYZ6^5hzN)@k)|Mo8k%%MPy_^oNC}~Z zC;Ie^B3No=Gsr ziQ}GGwhC#&*_Kp+DY8Te|rJU(}}ddJ~wZ;tS(fAQmg=dfFVtR+yY00O)e;~yEf1kGAr?uD~9CvFsAz>Q=$sLeH& zrM8!I_saCf9md>eojXm+*m$5MENl%}m?mb2zk5YIa@{D$1%(aIFz{VAE(%D;NWJ>f z2AgugJ!-G<{FLc$wtCJA9YyKs1?*GY`U(Ik%_%7@`1upq1cl`tNE`f8eJnARF?Tya zrE_LfF5NXBg^%kCn&s8)4ssfS>=_LXo$`8;=V(5=V@7b%kLyI))M1z!NXtcuAJ6c} zp%^>%cJssc1*l{?Bv!1*)8_*L5y2Jjq)=bm)O6bPjVJ7hx=)Uhd5wLp%W`@uGcRH^ zGyPKH^OWZKIY$8S^jUs>2;{&^n#lb!=SWtt+|cJ)q^G#57tm9nx`0So2>-AVrs+V{tE+z*%)!S^TW4sh8Wm3q%! zhGWp|kc#{$0I1$uDEaKQ`}lG-hL;&J<%fu1LD!@HR7j2ts(DWq@*rE4+v9SU?y6J(S5!m!LPwV)D3Y@$k4gY^u2a@iQ5!bXnS zP}mc^SC*t_Ut^`0xCCd=WAP<82viw_EVt=fF(J9=Zh?OsvU}(=o$CZ2J272fB!59q zuWb3}+opk)$*`OK5UphuT=L{{@^+?J>2fH^b_g1Zab2 z>Q|p9+{@2(t{t>ZeKdPNVANANHd&|R1+@!1T9@i!aAn6BYa)&S3G99PElljnB&h^bzj|=OK-VTRaRQd_KeIJf21B( z#ae1f3dzCQiFK~>s?hqn+RiG)cE$|nX`&Vg z^yq6W zn?X&uQwtw4s?lVwH<3c6L>+9xps41DX{Vcq$B=pyREF4(-;LlTKw^Gh02?Yh3sm*-T&M9~?>!h**T| zo`+|q=_LFZ!>fuuxy78w^VM&b(z@GbdQL|veEP}tIbDbZu5@xh7ut1r#7$q-Nehb4 z;1Loy8?0dNuCpb)Nrq-_o@C#ok;Us3PPnx`UO4#6(0H@9vBY4aWNM<8=+xt~ula7(ZSU@Bl+YhhOyNy1LSA zdA0=h;`(F?XjEgpE^}+gaQ@b8Z1MDIc-M`89qov`Wv&0$Iiu^Ml=ZGHJ-_50@yh^@ zii&E^BCqI$Qff%m_krcLk%bo1+cWZ?2PMAtIUKt=gz&GB5hbN2ABOcJk_~>hiTyPI zc~|N`B`*JUoACY5Vfy$DC17_U6bGr{g-jwu0sw&GhgmNT>PhVvl;qko#oV_`SH!FL zo=4`^I`l2j$7_MdCE$B#^hIC}u6y`)%ql9b?`!S^1p$Z@03Hey0{wmpOfdamQ52|+ z&tYV*07c3Rwm2vpg$QM1jC9}lx3Xa5p3R6$Gp|iVgN-$_5#vduXL6YVA^;I71`O*9 zPM}N1v*4l?!{a3Rg-;+*4Fzx)YYkz!kd-oU~VtYKUeSq@{`h>YFYi_*J2YD;pvALKC z<<+LC6HW!tAd4u+A-XBEwoq_rXeBnK_kpDDir<3(@5_th3*ta(qywXWvrsX8`F_*c zKnOeEvY$He*c|e!EVuew7UvuVZY1BAFbV-}j!h{2v~6TtDkFV~c?CbkT08^Wd?n{O z#~()ugrOo}B@8ndJh`G&p!~HO%vsLvNci+MhXCEjQ%^HE`alwJZV+sp=SQUU%6UNe zCI@BJT+B-WL#3mDp1pNwX4M6p|3T?7aE$V7vyEhV)2{|(_dQzTIJ0ny?p?})OJ2;C zEFoa55_Nmv6rv*Xv#OpdH4iqBC>Fi!O)s5Lq89591WaFv zqen+S*Z^X|2(6ZW<-=ho0;i4RjLY>)0T<6HL6=B6Ns&c?p!=K)|^iL_(Gug)Dw#h8g(;n)l?gv3E{GtIho@hEEV#^izW@0ocI zy(CYL)+t>1ne4R_w_q}kx|Ht~WRu3@RV?Wy=Wl^P*IyF6=%*zPQ{v(<)rQKHvth^y z=I*}yGA^aAzBvt9$&QyXW4d@KkW;~!`XB=uJ za2$fuif^Vw-L6*+DU5S{Ry+H*TO*Dr(XGjtg?ZaI$KTaUKN@i?-5n_}sWF_rn?tXh z*ag_VTcG@+&|IEIpM(pr)A{|U2m~sNxK}$2{=mKUwL~)hM(f~6#^}PS6kg(_a`>p4 zkeUtxk>RW86{n1cjlvbpgK()Pms`;Z6T>xz6uXO=eK#FD*%_Dl zE9a1vDI&T`gxwBE!h{W#}7QcUEx)D zlcDNi%e9G{-Z_^W*CW(8FqB%~&loO;urvR7E#y7VQUijI&J|6$ox<-F!sw`-kz8XS z0Ge(xK?wjiUbhZ~0>2;fou$o?@J19-0wGwwa8o55lSEn^t66QDGk)4lOkUft&l9L^ zG;My!`ZV^s;SV}GhCPh&)|#!rxUdkBiQXZEl})$%6Xp;Z7zY^Ek7o?LwiiRxrp|%w zd+pJg@uJ(DGqn`5JZyC!xieq-DY5v!1{5iS|MxdXSdQib6A-_6I0jG~={-orMWmkr zlo^0Ygn*I5&+LMoIs>!VpS9_~3ts=aPpUia5Qo;83D<}Ax7A=ufxVc|2GHf*T5MW4 z&xH+yi8q1(DaxQfxV@Eh;;%oX{*)6q*EJT717865^XrTG4Xz2=K(|gK7PhO6i=m$v z4dzTZ34Kd-e5H}jnv2}FHejmhJx^|pR|m}BRFOd+5U1^D{v8fNg`xqR;n@B34WJ|O zogbOE1@pxyQI&^Wa5ewfkrda#H(Gfp?f}jeMY61l;*sQmESOrha-tW^0^*w|4}6RC z;64cl_v3|fw`OGL17&?MF8jiHRXy8|BCX{ifgIbK6nKYHXI@H3bJ8#-s0-Na_~mdz zKyA&KwauWmcTK2d%P#zW^re#mqf+HAyRPgeZcb#TiHsX{bV%sIk8#32wd>t{@((m| zgaZsgt>wouLo>HR@z4*nuO$jTH0Wf?(k#$3dCbLE!Z6|DY4-4n-v&^f(yBI;H1Z`h znnn#C+Bbi$GzecJ`Ne~4l5tD-2kqU5AHg4Y+GC_6U{`y0jz`?!Ew;U`Lo&m3u&w=( zwzCdi?-YW1u&nzoBeqJS+_(TswX@UXR0 z@i=vx$Pg5jhnJr;OoQEI(QTO4eaVZ|<{7Y*x*lt1`TGLt&)!;T3bil`nsMRmQKqC` z4VA==nnGsvT{@9U^-&LtkSU&o7Qo}if2~?{$>n>%)A;6Mon@llPm{E1Nh#f8eT*$z z4TjPudMNfokpT_n5S8w9ti`|vZIz-X&<*3C+F5UABk3A%7lz6RKuf>nCl((m^_E0BU6TC zG#v#vt?DDWQuhl5uLDS=JZ$Z|Z;{@ni#Lns9z49x?*Y1#zhcl@x-U*bMTk|O8zk11 zRrj#4utB|;`1X}Ub+G-;sd3!Ql9N{b@=7Zj+N`n|N_uVb$Bby`(fSiN)3gNJC`=Q^ zVO&eJY-EJBc_Jb9Xtpr{ZYp0UtXygRkuY_k1Cs_R?y17J&ThVt2*Q-jpvRBoG&A-a zEzu?HTaqHqgP$ywhUbD+k5Gip6!>e~l}&|;`&5^Yz3a2r$@zE`;`fxHI~1PRH)??k zPS>Uq7Hz%qX6M8A4`(q`gS=(GF-=MLz^{Vj&Udwb%>LfZn4M~{!hYy#xttIq2hhps z(n5gS_#As0~+zx;?mWIU`Vj; zt^FC9nz+CkD8aWxX#R5)-?y1sAAf8W2S1Qpk-JcxNu{OFo0Np7U1cI~={K&e-xsYx zpKaY*n+6jdtnvI*W<8~jyCmF&u;jf>C%tp=n$9>7egrZ@sduA5ankQZ=KI{y-lhgr z8g_NAJZ_5qd7){z#q190HqVb(Vb@&Muilfv#3-IT=>#@mblgwUUbzr??t)+27(U9` z-A43vDHCh~leb`s=cxJ1Xz@zN%!cBMTcx)5iGcDVRSh#r=jjJ(s`9Uk1PRlZ75h00 zfS(k=sPU%AqXJ6gLh*E~XVgR1_+kSGcx6+{S$>72?b29lqUxciPmf~q6LfmU5=)53 zyeaJBSIB=&3NsLCc;7!aI{bI3Xnk!h8jBp#>MdEB64fKsZES3@JI-&~bou0gtA=eq zw`u`f#H~(&t^v#=zP@=$%3H*LUn4z(n+kvFY*E<0Wz(@2hpfNG?(TS^^?t`j!Ye|M zZ7BXxw7w9khYcLB<;s*SD3w!|i0nQTnl(|Y=+Le}IKVe?^ zgjPZ}^B#E&j&=#{hn_OUjIupip^Kzk_lPd`Qy;xDIbE!1iU!Qie`k%vKv_Y9CSqOq zx{z!UjF5oMT~*xS)*3NRYii=yJY*vUiH4g#*T5mFuarB^>Z-$3VmTKVz1_(jFbA4T@Nb!{(v5PMHzl%(fu1wbab zLX+Ow+>Tefh7YRZ{eN(bQx^Q2YZenzx_@pFqGCS@itCVjo8wR1bkoHRz&*f0grh^k z&BbzfZV(|KU%UB%@(n7U)HvzxQGFOxIrhCCr&}^nH%b&JC*@vL^?^bb{rC37t7(xg zo(H?5Z_Hum6sS$Kl8^cp7cWZ25ob4XJ9FU5?%6;lSfAN&tK2O1R1v)OIBN-Ou3)@!Mq<($iS!^Y$RA8T+&O^X?>EGuB5iJ`0T&~cOCxo zDde8p7lhB=9x)C6@T>$o-DQoS>d4^sA*Gv8>VcQGDBH!6mBM&aO{Lb*dzMrjub>Hq z^L;BzpP)sL*0!Gm3ZWs)UN`3+W8IZzv75c>MxvR}M%HPeemSCM3Nb00SWR1?Pg3Vb z`SX6>E1li@CsyxZkq&;*(5}-A{)Qp^l!gWPLrXi&{ZV2hjF?UQNg11$e#+A|jX!8O zqmtW~v$G0s_!THd;%v~Thn@X8>7~2UaPCo-nQ8N{kAW9ZL9m%m@{DN%8|OaF_@arU zEBrU(-M4xH5WM9NucMGF~t6fA4Fnl{mYiS1Q0dtOTzGk!IXz;Q7F3DF` zKLb=8B>cs&qhz3>OW7Y0B~r1E(Tm^rF{ALJQ@w3TxtB<9PHEN~Cm46|zd^wLs)l6v z6OQ3Hlp31W4VxH}Rr7Pl?@XvbB9Yn8?QA;u+yPzh){lhpTO&SJ0t?v(`!hb(TkPXd z?mH{)qHo@tTc^GNb-mhodI^_Rd3w7Ow(%G!Ta(?majnU-r{V(mvwJqdM8&!j*Oq&Ml?;yGJ~&s2G zt=wzk+7Zc)DhiY-y`86Ddx`mxLcKz>s(+}K5cAGuHL$&9J$%YwM3cN>PV8Trtr;#cImJo#ey z6J4UlQmg*i$cM8Gn)qAHkz&30gt}pD{{ z#)R3h&|6hFkC%*^f12n86|f)j8}2yjF9};qpi2`;9V@~(qMV4+g!a3`iE-Db+M9zB zj{fHWe2=pVlKLpUp`}R7s zKmmOhzRVt2y5~Lo$szaxmj}I-x2pw`Ltc-sYOVd_U+q?pEKzdD3_JvZ(p2m>g5G9j zZ2|z|8}3je8v#GJw4E<~fy-PGI0K)9_A+MTH!Vgsk<=yHq}zKQbP4SuUde3UEi#~m zNP`{$Exw??YB97YbeDi;(#pH-TR@JOW9M{TX}@X7IKLgu7^c@-{bO#SUq(#c74}Cs z)OjBU-C@l2^>fN^_u6;WEIiLH5EVkcLp0E?0*1gzm-u({lUmqk2^p9xv5&*9;zpr) zR~Miap59(QmOqBV%kh+wq5^&jUQn#_JL0b6s^*(_-mc}v-}?rDo=x&a5yh#_?(AuA zcb%mX;U${kNz!j9RIm|vG)q8_b){bjx>6f`i)@d9Tpbt!l@u^DF54fM#frHLEr#e9 zG<`OLRN{}(621khCHdQ>#sW2NMcLjOHU^(C4-Af#+hOVHiSaMsZ%0;cObiVALv(dq z&ffyJ2!Uk6i=3&1$zL+H6ZcP!rXEX_O>I9Fq}WpTgH$0fEO2ym&pz!VdU4E_=l2c} zAnbJtXPMecl^$aW5B1UU4k+5;q;Q&foSaBNTE`?UIO_dRQkx0k?24 z_APZy{1Fqnk!D=XzyuY)V9Z(1-c&S5{HbJcm>7fe+r$c;Q~bm%sqeMvccnK!xDul1 z5?)l6?*%W8z?4Rm{s_L97olXMFXkN6@e$dmuYx*q5NaP@X@qYc$sD_4!%NcZuD$7R zyYq#&h$y}b;GLV`$H6K0Q3l%}Ke!bGG>bUbqb2q$MYsq*aL~>oD$A!C zT*e*aHz-ob9C%VOJ4%;Ms;?jKytAY5kR^1!wsib6Jc+p75g~bX#KVkKK5$}MT#s&q z6$jwhUiB{!Z-y@#czI=|dO0b{^-5vZjFtm)5Gkbk86(QPY_Dyw;w-m9#KT`jZ_1-T zi13Y?@HEM)jH&>9??9{5J;ejZ?6>wMMXLh=D+s|THn6bpfg69Xb}OJEX!(1AIt%IOuK+=?g<-z6ysqQE+7tzX2c{uCznaUPq^}E4>%??w;R~_;)qVmQn(4T_D!BCVNmaCy_~C z@Uec%tnb5c`tqQRRzxk-~L--a-0z*$cdrjEMkTxY|%GHv)1ZzquE3wrY?X8e2&rQd?<^O}*849LZPz_dA;*Q9&7L8h(|^>E zGFM?oL;?$9f_-b})Kgj6RC3aKkt*ot7#6Mp0@zYf3ZS3wpY8$**Cy1Jb7^Q>jDGvL zhn@Z_67cR{+{?-`{I~n}9}mI*%`beHKNs_NK;?C#mCXUrA6x&V>0t2G4WN+b|U2QQ}sUUPAS0t0PF>{wHIXS*}?Q?x&cy7q)#}z zL;D*rBuLx|`BF*O1s30R>t{O9(4NJJ1K2hI90$pFBytaeel`;YXFxv_beH}{gqp*E zt03gtV*J=)-KF}W9-1&jSV_OJ*Z`HJ9UjYG#`~?`2!ECHY4}dgr%dS7GHBdvw5U+i zRFr}77SPsXD8T44Z{!LL9SQO@ygTWBSNT-qn*-<7Oit0Zd+Qu!61_^G$^(ISt$OHF z=kv+k!9|)j3Z9t3I`3X!RWimQ#|j}fh?8+79=fN~0v9Gjl^ex_^5C9Flf9<%N{x6kQ%(xe$nzNbXQ*OByF#C#xoNzPw0*^4N5M2pISc zJ{W~Ad*HaQ-Kk!vR=z*LsDOdCz=FBY)qqH5WfUN*QcGii!7(#N2?_8{OMiJ?oE3oa6}JCy8VI@ zrSMFVIYNOa5&2uF;aFS$S;@JG>hrwk!HtWvZ~999a3jEJ<%%0XyS@M(UD%0;>9(uP zvBI052*wYCJvqtHp1R22cOo^3oAmu;0{oy|y7LXJj|iu({FUk9_cSH0&wi@+%TV23 zfuK!U{=x1m*ox5sZmGiTp@19=vz>SEy0-mp73bu)7^MFLXVROR{)qTdW<`|MSJhBb z?K>SkdnL}tOblEdbmv}sCJ3a=dZVRbi&3!@S04^r?1ge)lGYrYG<2G-5;W{Fj)hl6 zEz5m~rJ3Bp5{Dnl>Z0Q6wu&PGPLg)5z00S_VDFgbhJ~jtk+pE;r%O6*2}D;Yd-t;0qZ(gKA`N=%&gbb|~I87sf1z z9VVU)VEj=TWa!4K3Ru}k#LV)5KORXYih(*E&SUH$#L}$@FocLshFl?QU1=%D53I$hug(E8o z!pOU{Iz1$>8v+NMU;r%zpbrCVg21VsRONLFc<}yp9O?{Axp#<1^mt2H6>P?U;ZE+f z8Y*xezjofxI&%3x3Nrun;{Au;{QcPfcOLKC^to;2tPCI12nb|<`<6;d_KNEtltKfE zinhN#GGNJjx_&(){y*tAVk{vF9plA=H=_NT*;A2XAi(X1dO{`w8)Q&jtB8sR!sBlE zw#7deI6ziUrhesG{;x$IF=jGo2#6@(R6_&xQ*luKM$qfy+tzCo$Pt)HBDQ)*+X!$7 z!&wC2)hjmPMNA$Ihl2$nj*(l(vhj%eiQtj0nVF9ycCCXd=u!oKqJV#yidon<6sRHB z`R&__{B&?+U=o88v+O>3SUVdNoz9Mlj#ktPoZMctH5d_TC>xoECHYvt#$ zZy@@^va{Y&+vZy~j#pf&FOZWwiWENjuvu$3oO0jyY2l1+U(vCE$qO8bU`OjZkQ@Ka zt*)Vh?FC=UigTi?md|nWRq;|!66kMEoPX7{>()LXmslqc^r5&|X_s3XpHbGw9K@kH z$)t;3w|>FYj;Q|LF&rc~pssUA=g(9Y_-3%PiR4}%p`BfueS@x&D2a0}W2$xOyVOf3 z=Cgia&3Vtm_M^}y?Pb>2i#Xdf%^#jCKz!Du&nM(U{V(M}bmA`8W2o?eLrn~)h1g=& z*^v!6Dhk5{*NXfGZOT?qG&A+onj=171@UOdVfg$NRH;jeL(?T@*n9Yu_f*wd=KMm% zU7oP60u!td9a^k!Umksp`M_*qNE*kO6+(7)w02~EY)!IQ#{STDWG=~8`{+CKmSH~Z zJq~m^hgSa_**8ENsq*h0O(&dJ8{U}~ zt2V5*HqHc$GA(luh;3x97Lz?p6n!CMitZ-PCoa8Ovrt!8Pe9oCnF!PjXhwN1S0tVt z^pcU;>2&sq&Xv;J<*$XFdf<}f^4Y#J+YD|+V)?|+uQlPg0q()N^n`mXIQnQ;~=;+|WeuRV(4g=BlYy z4|c~Kr@VEHWaPcY^Bm>w2L%nEuII;bXd#H+iZaJ~^UlVpM+uGwU+3zF^=`gjxr;ge zu7In1_460ur-!9UVJj3C9t)9FR+LDASCMBP?g8b14jq)aQm!(>fUVQa4eO(aUm)RkyEh?uES6*Gb zKCYS-^?@VX*yp7r6uLxyi##ONA_B}^t_ewe%GyeSrD*sX&T%kD&r(npkXG_^q6~kb z;@8})G9$+N$z1jg8t%oAd@KH(=64jEA-rogTlBkAJBXiL&JlGi^Fl|2>qvaX63n8u zx5Zpm(3P!+e;V7#Tpah*;h}x81H6Ocom_suNMJpL327Ax<3w5E?gguSl^XCtTZoyN zwxFJ&VlX+z1K_xBcS&+JYH8vu4C1WrMHP9npQ_|A_GSsJo=el$sd1OoU)}w<98&>j z^l1Vnb_4E1V8u6eBK%KWsjY5^-zXi)6aFd6Swu~P*CmCvqha!Y)r$RZ9=QLL1~HcU z;@|QGpLM!-FX%NOl;5F^*Mc6Glx)fIn+1%TiGj}R>1rU0_CLY0vMbr1G?>FgeK|VO zahBe$)torUGxKm1o@I>9NF5DXl#XBszw z0KiM44tXq8Zrv72Vd|{2^mTaAV^SjBJ?*W>szU14&ryKRh^jBIZLh%A#|UA-N&6e` zRe=`+d5>0pwineUcoWuyKl@)++Zz=~~X~U>qPJ z<0y45(hPd&6H-xv_JF;-bEODrt(<%C?QCL${krL zMAg(SDdu~I-Y|yj*s4OH-P&BpRsX>=rX&vAcGu;HsM=MK9+TpA4MhUO7zfOeL-gb|M24(5`~LQj5i%Q}fJd{Ur(n#MPt5aYghnZdYVr=aQ^7BB7uL)4 zji58Unb`onWU(r)WC2PuuD&&_U4~1nWLf*w@%duiEG)O-o4r z877$e)fJHbZkuGKln`5Je>#r(%S7m%h3)eM-_{EMQ~1)6iKgA?QuhKlT_+cm9OQ<+II20tTrq0z;qXl2dg0GKZFkS< zfHU&lRh&INbp{*8zL3*foE9fAaF#?ui>(g!B+^7BBDAl9PX3AQqV(S1c)o`8WNuGz ziy56QNWL{UJ68d-$t&nOnH_cGRtKiB?A%bs2tRi8DVjYY0s39sYp?+{N`0^Y39fB` zOKm6|F_qz*N1WPvU+CIL4H`U*%d~Vc@)b6H$L;dq!Nai?Hi@1tiuX3UnXe4J$bQ_u z2=NS`ZT*twKUL|Ue>K>Ot9Mt${s${2brx&nJ5F1=gcw|eZ#a2Ul<=i%F%gX&_tp>}_reG>4#!FWq=rCrwmUsPL;g2OUUUfbXxsw{jAgou zXF+WoaxTy-Kl9pfOC5Fzk0qziwuP+sTQzU|e?-V-T>q}{cO1-Z$=YT!XzQPMelDL) z5e+z(0)7FnkurbO%{QAsxAs4`!Y1deetg0-w%D{zLYf}!3ZngU{&<<#T+0q*Pxyh! zdXX5-_3Fv>GqMbdkpoqF;!2({B_pA4fr6I)Cky^HQub*ZM!C+>W6w)bO@RIcKuP}-kgsLruz@`)Asyq}Q$2Zt>&NX~%~?=W&VBbT zU^NENiNXL}q6A0_NP~-WhlHVig;M1B>zG_+!{GT~sXRZbiJ92OvB{O+PgjifPk!Wp+kkJ+j{llmedu&Ei-Wssc*v+fyb+arYxeVG?xhaJ zy5*4wC4HB-qJs@$a(J=_->^Qqq?B8$z-1j4m-!=N{zLru9$CDDY{D1iPrsOZ+(+x@ z3~!L57{(;6ynUBKpw}*9(g`29H}HlZsU6>4lC&dsR8ctRnTwR&(Xx^bO&3K*BWiVe zWi-1&+8Y|sbx|_9rwsV2=!yrZJEb>m)bwoVY%6WGHxZt{x*doF(Bq6N)1672BV9^t z{e6V~F!ER(&6h_SVD-2Er2abGc9)E$Tx#_EgrQA{6nL{L?96gmTu2VsF?^=BYd<3N zE!qN+$J~Mv7+p%~y_y!=X$Ej?o1nPawO(`G&7j{4iHK{s*R$DfU+PWGbeAcHY=e!U zHD<*O`^GN4Ajv(6X=~lKFxE{a<0=<-iOVDYURPHwHB*_9+)~Is*xo`4#6*|$htnwE z*;?P%#m|wG9-;<4y$nt3W#p87%hpmFY^b(G=nwHH4JEr9O==|qjGinbVYmiM}^OdXP^1vJ1R2>5Mss}#%%viKxuL8%LiKidDo=mqo6atw-W_Vss;OlL8-P@z} z+8pPmttfNO%rtS7{-KdW9I%M@+q*1%hTA0xI7`8gpaY?AM|dN$=m+$>cC9B92!P*w zVMa|U_?bkX)jRafVFTZ>qi|E5Vh$}Vwvf=Y|4;2oU|=qRCHX_tL-mr23gI8WSzJ68 zP~~wC@F&fV_88%Ejpf3>E{m&F9NJWnkI8!xu72g4Zw}^`{zLz)j(nCAfiDpUR$0;l zxNks#^qPEylqhd3h*C=)2x%P)WJr9Me9Iv=JEcR5;ebkE zZ?~>!eg#uUOyKtc?XrK_EK|c|aw?4+q^`c#7QFVp#r8hOs(rB*4S9f7{H}IZcdpAS zB>S$g9jL7YWP&mG+?{u*^s?O0DF9LoUV93DL}4>zzAFRv#<2q*F8;Hk`+xDgo=bZB zFFpF3U*dYZ#@%^g;u6qpA^8_M_I-)cU)71>K)x7b#J9s$U7$M=5l7bX71>N5DzKGD)Ln$RM)dPthg@$9=QJ^ZYhuLSU7I-; zUb9esm)NK8A0!tMK)gkFyh7=#$*FT(J@2s7=4*9qWMdZoRr%cjhd+2bo6X*n_6z2m zrE(r+(s%nvD593h1xf`(I;RxaiA=JjMAEU6-t zu-C`knQBt-Co(G>SSkFfa8aO9;xg*m<_6B-!!HC7&a9~vfuCA9pWde=f73gAp=CJ8 z`QTvwsUHXSfA=|IqXBU+D`?LpH}*8Yiu9JqTTyxxgNsIEE;gQCy@s~r+8Pb!jyuIj zKJUp4dY?3$m5?n0V}W8XO12c`8TFNvPXCACqJWWwS7{0FTz)K{4r-Zf+@q86WN=Y(EA~NMZ^au>xh#PnbO45eVE-uVN-2 z(eN`m&WTStHEnKbEid&bssg3j>@4?U=m~E1n1j;bkh2|U8DV?BE^Ki6W7WBViu`<4 z%|zSA@FsZ9dj;0d{#@c|Ax~k2cgDu&?y<{p^pl$1Q35_0Y>TO#TMZp$0psCqkv(a) zCtjC}3A8FSck*~S|I={SBUa3;LV3=oV=L#+RoTx(a^~}1=LRRD)@MR31u8^8G7i~W z%`LAZc|an^+gjpi#uhh8uP0-@=uMrX?MFaZ{;?W_G*I7{96K=-Ba6{6x%V0Cu@oU@2A zw(m%oTJ*?iTiK2cA zhKA2LKuKRH1hxfg5d(2qRN(pfTK4XOWj8r7F%i%$w5DI!PoKcM*gLtg;Pd@xymcO3nP^ZLrSC7{s)^!T_*7A&%2q$mD>@@?RiJ5_wy z4~9JB---Cn|K(dR0ea!eS%?L!p1{Mtm5r7JRR+xVJmG%j1Ma%<@Iv|Z zC7_`|)WqdHpVWN%9il$6VybpMvZMiUyQZZM61%v9*W9#);JY5tS9o_BJ_N@*!JdQ? zdmdwWi&I_FtH3hG8|vCs2jNVr>v?ZSjb2W0t@}l3h``{Ssp*}AMi^dDkb&{F`_jEp zdCJs=fgi$oJ;e`o#5A8GviNy9v?a}N|NMpCuo3DJH3vR&rEX(@;Hf+Y7%8y1XbU=0 z%xzAtq}xBcvjWR>5Q(B*%y zR1QR10rymc;*kxYS0TX!+miZeUDdOxK9SiYY*YO+{_d|U1ghF8CCEP$Y?nHUF9W_C zAMw&|A8c-|e$Aq!I(Rl@sv_b~Hu0{ZX0O)%sgdsv=Nn{Y52RnaEL(YK9^Cct+q2CS zWZgtw?zMffwk`sStr+OcTDN$XR>>_)+w|Zi!P`vVl!g$l^ZK2O_CrAUHP%bE%Cjwh z6kmuM3=qKH0Nr8E^nE~-c%S?=JE!G}Tyiz7vW3vCmK-&SaSo2(mm7Vz?=aCf6o^lx zRsb8?-^LXNHtF_t?*(R7jiTtC(m)o97+4>Dc@7CI@z%oQ4R>je#!l26l`VR}C%dGv zwH7Pevq-6wd&f--XRW5+)1!=Met)#hs(w$B@&MI7=o3rGT1DWE1(XZHj3!2^ zak)}4`!q!Rdi&z__F`0_?9tTrTcprP)M*9}M_upf6;{P~$+b<}_bImbDtkG7tatt$ z^+@iT7`{Ty56+32P5hR1VzgWOo!%IKHMa}-Ts_xcb&z`A8Q}%I6418~vLfw|c}jNp zO^Vp8nIG6Lu2rN|u@J(@p(yytT_jfBPyVW(I%|A^c&iv8e|1Smqh6GOO(a)x!hqS(BbJE3`mNAq|q>=T8LzFqq$<) zPy84AUyjL2OG&FqceN7p0V=_AhZL{_)t=qIT^=~jwZDQ>T|=W~0mFrruJ6QbZ@L(0 z>UXpza87BHWyn!A)CY8*X5VbR3Yo)6QP|<(6j$-`$(%qto(cj66l>p*VVAB~6S5nz z+Cnkn(;O4~oPT=(fCx^t@|TLe|Eya4@6Y+y;m7B8|9#G%4=pL#2-`s4Xn_-&qRrex-K{TXKGWKFv@gL*_YuSFvTzrHvs)C zr*j=GZaOdPwinI5gedIGSqKmTU)F#%tZx>7GwP$i-R5OXBch}MrjJ*dlI>}bmOBZsgqn;Pzo5nMWQQvRoEAZ^chC-yw`7XU~A})d# z+6euF+UOiK%PnkNsD_rv7zM|Y5~5h2_a@Bf$77`vrcc`>T&YzTug`` zxef4z>Lg_zPm^X3o<^gnqrG%~^DB}B6#kNwE9LQPM$KDpsD(PCeWlOU8#VkLIo&C6rudDXE+F5 zD1OCItCDOL+|^*j!%W&jEaKF_PU`=Zl zY~p)(@|b^WTuZ*krK(I#*z9dk+XOM&b4uA#f1l2;AW@@-TIfvwszpje#J3(=Q^_M5 zRAz!9Q-d6QlGzH2Yr3ea2tEAl9orV6wABcnSJ~gK_-t|H*^!tBMwGAJ2PPZzY`flD z^XTCEs<4r%OyD+&l_0WP0Xusur_WD^$-$Pwz_=b*^Bw>0O7^4Z4t-2HnJWN2YWi?65W_O=mr z1P#6JA)E*PJ+?9tY4;Oy*t9Qo0)U8%T)iKeKZPsD-RC?-^N@dw1aegfdLRh+AC^Snt3zqE7TK=rlO#YhhkZ&X!g;IhF19Ne+FVbnLYM__GQb z)0=L|h)0Vl73-Mn*!htDJqo(epLgqzGjeWR>8Y_o$i3lhsm>GpF&v#EzbLpu1`064 zwhN&TZ3w0x$zFE|wt58dexsA8VTZ>j8u!KKHIg`io1V1RV_0W1#o0{JF{F7lX4X zfhlu8+;sa(Z3ErgIg?iXA&4`YftvfLd%ooG`=GfornA?J}UEc z7QRSqhBW-HM#Xt&VN~K73GuDgN^g1p2&CB@JUCd^mjAo)^`zJ?Uoy#BId@?>Fta$Gp-cPQ z2+ukHRtlU&z1{34v7IhKhrVPF#4ncT-b@R#`N$`&y?;*(Ad6oyRIn5{$~go&kEEE! z_NQ@V{MPJKxvW;01>b^xBPo~nQ^&`ZUG|2WVUU<>)4V8hEQa_lv${QfD6eScLi>rx z>UPVF)hnNv2rH!Ctd|vSXs|oPWX-~S+2CD&TK2K*MZ>TFPAp0cQbj#oY%pzwn{B=& z{`OFM0kX{x!-)h<9b0|r8gAlF_F3owGoQAwaJGgpt68!aN9 z&tY=y7CPRxyGs*nI0(k>oN4`(^Vf1E+%sA``~?ANsagDM(gXbhg8^(QM}84kuEhe0 zqKRC6tsy}fX?deEgJ5m$FLAY4AHd88M`*t(z1l(wDTJ;eDVjC5gloo)ei?8M#y)KL zO|+(>WCxjj`6zRnBdd$xm&t2H7k6mR!@^2m0H5AJayB${%%=r_j?figPbmVan*SVi z42|OjMB|*LIpeV+^`yg$(B&-jX}kfHw_qd$5z}4kdA$4^D{HobGx`gZVZ~!%dH?^| zrv0a6bc5>xTnWNQ%>WuZROORiidg6TEfR$vs1cAy+y8xg0ZbHP?V;!OEmAGiQ{#WD zh&BM+?o#GM4UeJURrH~7PvE!KN=eWGB^eO9t^4r6At!{m#`))fU)ewh{Fc0d&5*Sa zUC71c%)gTZ`pq|>U|Ig}NFWXi87xf=3114Uov8TUE&tR5HcCcaH9qfcYZox=Bt)pC zsIZC8lI$$!{g;?1XXhWTm4_;k<)fZi0ugC%aN$d%gaL-h@9*M)iybAMy?KKODRBjO z5r0&cp#}U14&-3GXyXrBWnDiBf2Mqze(G8*H~+9~swYRB5&m)foa^&>ck6Qe^zFSz?O(>=p{He{Z_H$mN`ojx!?xf)r2#!~+>8X~ElRiE zwjc_@{pmNCci-0R(_IUdb17$5L+?_xWVKoB9rt83xCCkJhH!1MjmX3iRp`DL`fT`Xud`S5hF*ZUQI#r!7sJ%|0bxVIcEJa#_+3je??%Ll4tLQ4>9sPQ$h>=X zG=)Cd3(BUUt>N|5_EC1sSkZJm<+rGmK3+C8+P9UCtbMgyCsW!Vd1#4LL{rRPNJ=N? z`1`wLG5W%;a}Pl79WxPVbmnZ?e~D{m%FsSi0;ScZw>TY*`iplnjiB1Ca|rT#AP<-8 zWAS}gYRS$r=&l!CDH=i$tFGgQIv%U;$**K?`c>oI?PzY0i>0SRiWA+&4z&9_mDlg^ z^-(Rk%gr#cV>mz>%gw{D_J@92E%>2d%_bH+UniT}`W%N$_7hTfLW0^qiMZ;#xiAg8 zFw$Rir(Fqmz?D1R$94L+x;Dtw%kh2VR;L_e|H&um`LFe!P=WDS!}B>6C4Q_`Nch5> zkee^h9UjPT)cI=WXX0s&&KMsDc741~7znOiXeZA!#J6GkF##r~=E>k9#)=f@34wg= zC;_Y*%fQcgdffaGdX2WfLgvZgfDxFgt`evG_H6CLy=AjXi;^(Y_*CYwk*zXe_H7E~ zguE zYAbpDLm{G9zWy9RQ*Q>lHuHV$$pC^yx5owcD`c7SzwmCwk-ZA(!(mr1cs*;^|J77_ zUt~5*ZdzwT(nKqfm1?HDCwEN3$(*+BoDuzP=HelOY*`)JGw%p4vV7psUabe&4cBRf zeq-pop_;Q=9p`eGT?iec?!4G+8!NPL6uY`Of$aY$kI{k++a`~anTNsh+|k;VbqOQK z<1x;+W}KRHoi1QL$6CLgsZ@um9|>-n?HCYOt34G$YJhiy?#HtuTv~M&!@DMu>YRe{ zuJCkIKQMX+!7y0X*^YDzT~(MTZZ>-jFPw*7%fv*YMnccDfrzXaJTo z4NFS>ARABipG)Mfi3EbdJiIUPOIq9R0oZ5kwes;CJC_iThY#P!fjhTfBMRx|^NjVf zLeS{~1z)P!_}ziDOMxW0uWOCVD-TpZ3A23C&X0Hw`XN%iaA z1le%(ib+!sMR4|GEqN%aJPNP@n&bh|C}^Y)JvU@d6fYuoDsz@MFx&lCb-Mm1cjn&* z>i^PUKNtJI!COkqMs1FZi!%gvncc{}V5xZfcGusL)q~l&+ksvGCXs)qUM{Kc3`Gr2 zU=RJE&Vngx1M(JUwMV_Z#*jubTo3|j zv~)?EO(l_!D*%)u#?Izph{1PA-?YycTBgIUl%S@c$j&k+Dd&=c=-eqpdqtc!{c?-e z>oFhV@N|UNsY9tBh7^Sj%3;M`rYpTRD?3Fx?M~D(jpD8pw_3i<6BxHu>3WZ=K^~c^ zXn_#W=v}p!)#XmR#OaFuj*f*rllGcAhtNC3oFq+gS!;q$V+>HOMDs{ z)lewY<6myP{E01`GS#p(VMi9dZf^2bwEU_zVv~9`)I=c0(GsheInfgc}cw zidbKk<;=@}k+L+Jz4D{f5|P7yL$;3g0X*q$+{Hr9`!({ZNZFIiGV%V}cM?4{5|Phe zwwh-0Rx)(6!ZY<9Y?(;@$l71IgxN*>(+exnnFUfwiFT)!;pdcT1I4Q+w4Gt6Eydxr zzf!uxq*2_}nu$z*-8R~*oV#mA6AIFit%LL;&y0YN)bq%syzp1Hq0WnPDTe;zuZX+$ z9TgWaH^Rrb>lN@9(hoCie^~%)Wqr|jgSQwL=}=9eNQQRw*XNwv;@3D=xkPI79CNzd zhwg;ezN71^pAMO8b&(bZtS2MFU#y`Ii<`7yuZ7a|b4Wm<0z#tLEh_Q8U2=hHSm}Y8 zhK#nPik3CUN`gP{V7Yv7g6q&xW|YC((hjH+W1U~ytddJemaNZ6#>SC1Ly;-zyzTvn z_x|j~((XS|^Csrm^`=_wET0VOu=x>X$1u) zMq`wZ<96UcE);vP@&Y}%UOIW}EC=vDFYkY24^)pI1$N2xfuUV$RAd*2C;$$>1Lg+& zD!_7K;8C$%+{YRK0sWu;z`)ccm8OKB91W#Oahc0tPk8g}DT1~^+Wc>Ji9e3~Cs}jD z`0|hMJ;NJo!XHl{8>uXtAeISQvjO4nfQLUgon!&hy9=?fl>L9(7>by#qJnx`K~;UE zHWUh!M`RD}vuDr#UN72U+jG6n)_V})AM7e9=kvL9DTu8Q8(@VjL=W5`a{}7V-*MEB z%WO|KBLy&Oeh%C7-+2+Hy!6!AZ#hrZv zafyo+Alz}{&)Q;RBkNuQX;yhwfW_ZO>nHrBbAHcF@{_aGgTY2SvNT_~w%ot?@cs5^ zS#pVOP9!#z0zmDodZy56`03jj?bhyhE1tz`Y=X$Aa-1QJo)WzDe9H6Jfjft{a>E`< zoV7WJ369qeQ8fLUo7jE2sli|C*JB%ln$Ost{IAAg<GON-pS8pxkL4*DvLm8qOsQHXb%mYnqL^{pa%4F&S}tNdW2C9cIX0*YAq zU}hn_!h6DaL(^CYPC|nNqA|1~?aG-iVIs@Q-z-MDrTH|37kl(+=xpDb>O#Q~u^WU9KmM(u@#jjmE7>ipk#ewO3CY>Szk@lUTY=T^i zOBicGYb(7aYp3KpJXLG>rqfSbaR}|4_)K%J@dSyU6{yZSnN1a?Od%{ycb_F z_x@O(-Dg_1&y^Dr+%>%%sSF#E!Zc~4?D`QP(=i5u^@4MjMovG z8a7U|TeATR2L=qFlk2dsn0e;v@e2Ys#Cb6_kXK0=%{sR>h$|!>Gt=@QvwP`3_t)Qk z_71=q8ybzE_;FDOv{W8 z3GZ9QP<1l?>}*EnjNL81$?f3J?*-B;}KPSjMI0j+iX$IGiQu(e}wQw;Wc*LIvRgJr0Bd|Cflem1--kLD0$Ft z$v1hfM&>>d9R+6Zq;%Ol_ThWk;><}f=FkcF zuUQn4Q2}bNq0TA@p$aBlp9pqyYtJDrwKG&a@PHHW zX#t66vdlWba1~W#5_g;chyJRiN8y3R0n1{(dDs>}ZQd7XC{gZ^8 zMk}xraH7xb1TF;YEs^niE~BepF635tVuRg-je9;|k#$>A!0or2jb27Z-J)2+Ue_~5 zfQRKuHEA;hEdX4*BfOc|?*jZp$`4Dk1kwaSl6l)CP*3%R=Epy_?7Av^br0{LkmSY2 z7ca6u`Wy@SYwAxw>7&Cq&N z7%m>Kn9iQ5vi ztA=yBSbTG6Vzb8~XYv)YxA@07cGtB+qnl#BGkZh>!I)dKei;Ov@qgc~9b35br@{8^ zvrk43yj1fZx(>V#kUSQ+Q)W&loPaMe9%c^qso0cx_Ni%uU|o*IV--PTj7e$#bmg;< zz~j77a~y%FDC_B>t1t-rW8nI&P!nbXBG8xErE>>%HJUo`DrR^)-LurOlFPcWT=0-r zSOG`O>WuuL_L{LJ#+w;tlJJ*3mp*DWZq?=na==P$()S^-4(eY(TPj5bfLAdpX-s$6XqVHU3u5T!l`%xE&ug(%_g_Arfvz>v5|8g^Hr9u4JO2Z%t)@G~=X7r+EWK2zZmOtuT}9@~gzh zjud`SNlBNPqF));tuuQuFB5CWF=t3K0%e&8;<8@x<;N3tVLEE~Qn#UUk_i|)PoM5k{wf9>71`g+ zZ^~RafrvoI$C|{AE?AODkJwoQ4mbl;;SB%Ir)G)EFAhzjZc>k&Kh{OUcad0KemF|g z9-}P~ow>8lD&4wF8=6VOdhpirOb(NIW2@v{w zy#tmSd}7)3;CsXmI_N@8WTER*#}!P* z0Hzdh~OJswjPjBN1!e*#2n?0tCoXA$;>oTk z-30mFXRZU7zaJ~@nt2P0y4#p3rFW1$SdCaOIJqb7Qh7AJu<1+M>3jm7+*Q}JMpQ2j zH^0~~Zj3b++*nrsVLaVU867FQ%=G-3)}~eATW|LXZtac(jfhTNnLji#K8R15&Gk&h zl=vWJwRdJMF;709_1wE7ZnVk5m8VGR(2XpA7C1 z;-N#};{=z8zOjnt=8Iy;)hIwy1axSMb^U9Dym!G(C7(kH4${!iqeI&UHQE&M!IPgd z|FGkhkcN3m))b$wBs-tIGvdj?w{Q)2_~$-dlD<0VHKE3%<`TF7mL4VY3!`ZXm+ldC z5Vzyi`9H_a=3KDs71SmOI&dUv6U2|~(ID&FWIrcDDNSrP)tKn?KcmYiK!mp}N*P1* zPIkg?QiX+S7mky9W84YY^6Y$#KfZBwDv`4Xc?%PBho%MM`9#Yo{uLWltRp$3@hk90 zGz``usUhq+y4~j8e6eb5)|0whwpR7iSEX9DC)UI(WaQ$Qbh1YszxFDq{+FN+zHg;P zaDyMHiYGLC7N(a=3{(`3hb2t^K68w#`{lilezB>L{K{)Hg-#={VcaP?&K5s&1ZWYE z^XDLqcx;1mw#qFKF&-N3df(S)xCQDMK<&c_Lv*Mc?QM*LWLEe)@rP#@Twt7okjWNU z!`8+GQt)`fLT%8Sa|ma;T+T`PersvU0zL?At)Go-B!B2RXw&g~3ByxA7EYxIRN+C% z1vICM1{lxC8h32_bl1BO=MZOQ3xc&*5!;Uc4tGY^>;qbhF$AOwV`RHI=0r{Kks=dZ z-3mzu*F0Dc`$8PaL}}>Ny0hgnQKtIojeO!mll8EZ*T*SJ!S(%9JB~EvPEmgI4496* z8ctdl(<7PUs6IVNCoAFTnHu%m{#FE7NYgb?AgzGUCMR~`xD`aoL(b17R4L4^oAezY z4u!71G=f0T^n4fLRsXm-uIF;NC8)kr8kcCgCADnbs7zhIT0wDa_)ds70MvzjGDv4n zungu33ah5JLmG|s4+%pwSDW@k1k~B|*=-bV&^68j4Cddrk$>+K|K|;D=<%22&Hp^d z%xD8BsVR)`gI%Z`53u%LnVlZuM>4%Nw?I0(yEg;ks|e)1Q?*C<|M`gje5s|F{I!Xd z8x;BpzOgOL7aM)2YeIrR`3lr>YRiQmxyQ^7!z=aEdN03L+QW~2fC5l3j zom(R=5-Wi{1ON$veq-Z}@9QUFv`tNw+{L+}hi}!E-eG0h?!N3H;KSSSeq-2Lz&Loq zqj36oI@p#DPJ`D9t#(JGOvp*dBpr(aG4GFr#4IW>P#KH_AuJ@D|QcHzS|LyGuKsuqVuXPmyh z@cpS)FJMG1H&p79laj-VM@??a;^Qk&>XnG{<+A(8M&9CCFhJSWi9;q$DaC#}y(%0- zrw%9)FNSLrzxOYGQa{vxQbVlcs_t0MFSu*asFjyH)6`mvI6}Yh(6M3!=geBRZI}xW zEN^g=>ZrC>_l;~4t1RSzsmv|{kp49)S41HeHxQd4`2{s~w_r+@Ch3Wq+ouZKe0H}y zM>2!%??}DR2u}KNF>SqU&nAd~WB@PqS1Pbk=vc@~epIGiF~Q?MS*U&hG&Aj|vbHSO zLPAmxn8@}!eIQ(}i-JjfvI00(r_cvj10D*}ZlM0d;H;EbKX0@NLg24SsAC*ch>9u^ zoQkH7*An^+)( zqf^pQ^SgjK7pOp{!yCZa@l~N>z?Mm{Sxt(WjPsY`m-Cs^yzdK3{ zc3b`B4rhhLjQ;3d33#Hj|8y;Gd~qwy?l;>eG_9?x&Q?~@0|K#$744XN{H}?eq#uxf z1D`Tnm(_2wZTdrYcEq|S!{1rv`L7ZLtT=6_d;2Y`=OGzMveQTd$GPSzsA-Z2tL}xA zyl1{@-+=?M-^1Lk@WZU05rn)M@j)BiK9Pf!oqy?-$hyWL&Z>9aq>NMe93br%@R>io zd}Ph#+NEfx{Er_Q+EmrB>fc&rh}I9Nx2(Nyj0Gm;)f?_S;k*0poX}5457#afE)aSj zTnSGts(is=uM^`0BcChuzT#P+uMTDZPtq!@9xXijf zs&{CLjqQ4yLB^wir~FU0Ny*7m$Q2*HXFZYmI535WDr6bKc&p|8Mr$tA_xz5d^sAA+ z`$dpu@~SX83blapOLHGlwPps4uKn=t7oW;942|)<(<8<>WsX)!PxG+Wm96=`8Pa&r zMF)hCeg_WwJ+FAY!whY+3DZB0m_=|R_TVB|&*wO*T_m7;dd2nBGO zn!3|j^-=K?qwXW&l`cr!s8(eZS_CkEU^^k2{PpRB@QNp4O~>FjCa4KOIwk{1S-{FH zl*rk--0B5Zf-3FL`CtCCsQ$m!`C_5#YQq22>U?xf;fA7O+_l=;tJ-PUcVi3KD7l?o zU7H~7B|F=jeWc4s`v2Zh0_2k^0~h#w&v(ZzRe!w)AO;*V{5LB|$F73=H*^hT(g^Q* zOWdEAU)27%LBv@saCRZb=|v{MNNCa>&XI&n0#{74j})4Y_zG?MSEQ^MBxu0^9+I^j zDo9>dhau3|`?3{`@QsaNn&Wyre2P7}n1|g8DH~n>Sci)3xZd~BGQ3Zbg#1XB5RB2^ zqEtG`Ut@ zIF317vc`u85igHT!$Yfui}ett!_mqpU%65WO_wnYQ0Xv^9%beVUCHaBd@5;VP(!(; zu94xMDBJGRUL_QOKH?xaWiD<0?p#CC&I5I^vq4L*ZM>I@&f0B;bbgyTu99EBi#*DC zPx2kgKV$Vr<|bQ4t<6->DOhoNYU06`%20Be;`IpnfjRZZO5U>ZTBI&9DJW_ia1NR_ z1cT8BibN+0Rve@{RYvdw0!G7-apEXR0@br7vGx6q#1GPyi0sUJcl+gCy=G6}xbyW{ zhtq|C(|eZRYM33}UU}$^aHKAey0#N?PYH*qJC&Y!p!8l=2Tf;)*xQCbqs3Z#cA~OB zb|UJ*eRfuN-fbh(ev|Te+2fZgPyC#xlIA`?8Q9a?5EQOu=4zxlmMiJkkZr!BPhgq+ zjN{-d1=8f<$yNfJX#Qlb>?Vjz@KA(S%r}kq14i}e>rVq+caIEDyibdLegft{%6*h# zAHeTF)gduKNvd!Zu%EaCX0q0Z>fVR!=ohC{njiVz(|LRJX zg$TlR+4$$1S>2oSdDb1>eIfO62yN;eJBLPY>AE z$q0{zM(LH^sqG4Rbxqd>{qpEZ$vOWFj<>|<_~2@mLO*%BKE6d{=vU@~N>b`WreiTU zdoe5?oE7_!5wvfF3jwZtUxygiN1Vr6o}E-psv3XHw@-K$10AnzN&u`%it*IoJMOUT=uJ_)_*vv70v zc*@If$^GBMu6p_Yo|Lm>9J#IwHs*P_10$=O^oQ8>JY6seTc|oQ&NygqV_vrm)j^-7T!g)$ zT4asE`**CUoY*FM$3~bI@v5b%eL#3N8JU5n+(*|2ge}F%+S7t3t53w@SqiE`mB4!= z*+gZp%^_&kOj&aSDu*%_i}_>e%IWk*#0yDACthxardbit0u{WaYj_UR?X~Wqu1O<` z>%wcJ$`-H25Jt%2=N4n_?x7>&+bkza0%+}1%cqk@GnUO)eA{_vB!{cCm-jO_ErTA9 z{7PS4o?(v@Vtom(Nhhd{j;5j!VXgzM8X-`>DBTD1;_bKE!bVKcZUOT~a96w|wLF0M z2>I~xV;}#r5lpY!oUP-vho?BO19uuuYV4w$s1ob?rX(1Zr%9d-BkrEEV!ANf9+(f# z!Ot81)jw8hu9YC~V0X`8q;~sS8($=u9|l$m7&}v=4zO(cD#*NsF7v1mrA*k{JDAw< z$y(?q=nWl&b@~zg6?t!)n{W2!XMViMyp!?>r%hO6%b_UtYXn8va|WVi@s^&E;M}ALxsMoaX z)wxO)(~55YA57EsG)`a7HhLX7`c;6^r^g>gzYngCOCj?VR0xgMD*wMY^kC&xi<-l-`Kz@T{HWQ3U!lCPSlIcm6R` ztu}7sYQe z9?B$$w~yb9ne>uHC`S4?Wil;OJPRP=KO*MW1~!8KGIdS!D~)7 znbuQ>)q;01cPZEPph?nxoo73`BN7Rrd7v|{h~=*#t!V5U0prVAqt=yv{Q79X-z|VV zGOu4W?U)N6L$g^OaIau~hjOiAHp*ZkZ{!4QWAR$|JnVk#zrTL{d3ovLom-Fp>Grtb z#hR@UkBPd2klPz3)-m4&Hy2PMRTzUSd?@bh9go z6cG&eKB5k~G07-)+!JUpXCIsgFoAE|@vnSYu}JNvSQ`5kkfnh8+(ne4vhr3EfTK3- zZ6wwR6h!6dB^)oP^K1(skN^{p%@7a-o!%_+=sef;wY?{`Cv!r7-+tx*a>1ts$Wm9a z#PS_dp1O_nOHSm+S5pMCqMgcPjoVj*t?|ZA7^ACN!tYv!V!~fJs+7cZ{xG|wUO_wQ zEE0GAJ?XXRfHLXoVU>fk>Mk0j;U(#oN_kFXZqu^D4GW^@8J4~mHEPpnMrs1@lBT^< z3cj~z?g*1L-j(y}ox_7YYS+H2{DC{M9aC-&6|exs07W@&BVu?yF~hDS-e&5;ZKN2A z4{9N-dQ#|pq)#^G$8m!ECsb320$VALQV=qx2y-@fradF2*lEo!KpuJF_g=y}z8lQc z+4*X;Dz>M+kK{*w|+GMoebE=?Ys6}v`ZO#eG;nGET185>XiLpPhxFr<>T=o z*<_tc>*Yh|_&zf}SES40ju%{vJ)Ra&bi`J{IcK5Np2+4ziK-jv*E0(2io^NC;(jT~ zXv5sAW*SOI#-6oh%Nw)@-$~&iOiKfx=Ymhl#l^{qmC^Z;<}fkHZ;s&i^6fwM$3*tc z5|^v;##`xyqdcGatoLg!*WMG3ZY!kNuDJSY-b0qCP26}CzJtlx8!H+PXG+e7ehPk& zX`Vvr-A*WuCo9B9NzoIc$v!5VJe0l`?ej}iE6j=xNSRHZ&M`ZnUlvWfBoGaD4C`Xa zsg zB?8ir6F2H}o9;^K{N&#C+{Xd4dPUnd?aCx4fr0H*>bF7M*hu`hC5_F-f!Q;NV+orrQBEM^D#dp!YiPLZU z)vt91zj9;5Ny#O9d|dwCVti&I%2C@W%cGwBsUyNxKw{RnP+wC3m+ANF4ybF$2mH%~ zg#AO<*T~C~Gos-Uh_{tTQ_T}-nnjsJjQZ5r6W`$ZU6FwHyo9#{C=u0I-=e;_OYi}S z>efWK+WXyx%aFA1%LY$;o~dD9AlVR!tTZBkv>1YLNr*Q;m;$IAd=qo;@acPk=vPNv z6t>ODdv)r`tA&P>JyU?w>|lXvHN42bb+iG|6(f<;)#7d^aDz1Vb!)cuho*TqEYm~| zxzX6A7#nv?W&Sw-9V>j?OXE!-B@j(Izr!9{Ygu@Yi}8W0)7l%L5nm6tO0aZfgC-+@ z%DkQm*yxe44WoX{1Z$gk|Tj#q?|a~^R(Z{AI5DwH(J`9<(tZO zUw0A{v%Uq=c*+RYyV6k=LtkVdER*4?ihF+SQ++%U;A9?_Y@oW*8S%mGq`U))p@RFmyx<%A7irr37}I_EmE^nqX%SGZ-+L})TqmR&0ok2c2iVp%kY{pM@eBXKP zeadOa3Hc0`B&DB@wdWaH+p>imGs!23K0o-e2Z@2zFfBydKsiyt6NCBNtt3Kr?N3M} z9LEQ0Y`AS;_&sbW?{@5fX*ToSF|irPcSfz3Ig@nL-bd3Ktf1fanUzFtAQ*m^{9zq& zE$<0WSR>zM?R}YP*zickkHLCpZh|4{CQhk$rHURiA49lwO)Cp^?tmpOQjSbmOTFbi z?&1*3nlOuG*H`|f_JCes>&~-XOiCrJY41y}y1J z6Wg|+uUw+j{!Eq+j$326P zBjLreCozNcR5fI-cj|qm0msNy_*eH!EvdF?dUpr7^3%gQP9=}>wqW&ys&Nu+}(iL2x9*W+xqV<#xD-(=brlK z5mS{dfA6guoA6W+OE6yVHD2R{CHNnb<{@Dc|JrFC0*}4=JUK?h(wPBqfCZj!Te}}} zJK=Ah2&6GcN37j*sAvoVY;>bVe z5iVyAKp=B-a}Wq6ga9zsjT^sqnxTs&r?*Vu@{6=s3a$6S7GPrVJ8O3#jt@SR4W48p zZq%4FJW5C2x7;5#u^b=-^FtzKAZJun_t-Fjq6PGP05-7+z$ntY-&kLmU8vPPzF)wW zPE>vQ?zgX+vz9{7E6)l3)HA#Yq;A>(UV^739BSFu;fHk0b3Ii~=A?1YLmGb=p>{)x zA_eC~2*Ngr#mq?FhfT}YZN5peKb;8W`33b3_j!nNt6xD19LLr<(rI_^JvUk#ul3h3wF^TY1VG>Ux>pgMV@I#U57P4i#4#e!A312TXL` z;x@ofMNFu>c0qWpS}dO6LFc|F7frG=iV}9WUrMmzegAdlz0_juJKqEY)R^3dPv|Os z+zEY-F3*zjwXQ($k<5O*d-zk>xcV21k>G&Lh3|0GS~%oU{k(Xh|8cF>D<7oqRgV9Au^~YtHfL zcdFzTbAQR3Ds)~%1fz0;ugtr6uIjB!Jx#nNRC8H()Cy7e1omPdd;I>`dY$ZtQKg+y zJZix1F*Q{7Fmd4;Ecb^FORL$jm0CcRKY{AQ-P2Y4keqr;Jk!JCxb}x7`|md-7WOCO z-u4fB)eCgRUOR4wERR3pn`nEg7M2FTHh~(QT&6u7SsE!wS!P~xb}k4D%)Eut+P3zp z1yk^_W$0_p!9Eopnobfo3fm0=&M2zdGR?vFBuqxCpIz;9$wD*EvtJJ4?Yl^_sfkJ~ z?D-GakY_r!?`9M=#AqR=RhyV2?g+2E}MG0N(U4FHxtS6)ovZx=x1{BQxD9}Jd7J$hsk+XubKrLV^4+fsN z9yxtZi}u^zW^G6|F6)b@>>6inT04NZ4JQbKH2Sn05&>2k)3FYM=Xb633gMmp*-wJ* zdATXS&@kt4RGZ&SH=eYp{Po6nV&(T36ka)jlqIEB)`>y9scjpaKp><6Fb`ni-hyh3 zUdU2P`R#V*zOGOqu5Ebg;|zCZP2<*Md6U6dIwyG|l2I^>&W6@ z8|={LxksfamMYpn$iprLNAj@6IDn;v@yLLe!kx5+-IF}_xy}#Mf>!wjjRn5KG*%9r z8D@^^PvKRA4HG~Ce7b^QE2L^J860wmtQB0!_;0S^u#L;&+s{^m zhF)4f?RwDFsUduOpRy)y$~dY~XhmYa0I&}BL%eZ9Onadd#PnQ=c_wsuDaeDDVsE1! z%r`0U;?=l&V3hVbi!&`s6R6c!bjy<1O=}%%H95-1D#CbHytM}n>60d4;r1GJQ;rL_bxFzJ`fX-pmZ(3wAZ4yX$XEToHZWwm8AzKu^$N)~3Bb=ex*++d5(%4s>(k`kgJ z!iKtNYN)yUYMk6!7R93av?C4@nforIJ`+@RbqHwD9}?vr7xCf04niJ*RTe;qZO0Qo z)P;4=%swY0rl2Is+`jaS;_H4RFVUanhE^)FHMZ(h28WEu>NbyPuO5i&lZ!3ljAL*8 zs5D|aj)%}VxnWJdYx=G(*Jd5w!2MC}1iy>5nlBYjSVaUQ*=+;eb{E0!cgrV~vKr0F z(3cHA51HiBK;gGRvSu%hP4Qhud5}ecbLt%biEMo;PNOFbKB?S7%*T^gY0L%!+N2J6 zlkKFPL3??uFN4O|RQQ^7Relw)<{3l8EDmThK-&z6CK0~3YX4CVK-s;qTLVy3=%sfL zem(@5L+yk9J%Oy**OAz7$y?Rj2bMe7>myYV0c7A{uJ!$2v+$417bjGCLs;QrU=u zs}DS=yJtNI9ZACShoz{Y&zA7JQ_{5fnjAS>i5NN3`l321?vSJQZ?6{hIJy(bZ=@is z&g$$;ShOar8qtzn+*lq>{y-RID&QUtl>Z{_GOuwEwgeL4G+YS}1*V-ZI#20GM$S^r z?+TN2MWHWNCiR;tCG zF9dE?pXP}X!aMhQUrn+Iue77?it9K_C@-T+S7x1}R3Ot{9<#3g+B#D<;1xOgw0U>? zuPZmr$=6~necYxG7^>}}ZwmgRFE&6g5IqBrBPVEd!B7>5Bz-Ph(4*fNu z-hk4g$gY_EEV>gm6E>>=-vrSiTjD>fU*8-$!eQ<`goR9?p>bv z-291lXw=0RGc|12;kkkN+R?>xaLhAWMz_#R+^oe)->HeeP#HBbn~}03d%Wsq*gdG4 zh{N{5oT`EjTa6e|SQm?K2OCz`uz&V^)Vlcavkw3Iuxg}T53~knW(-Sm$Sz6twI`2A zTCx^GA_~kaki*+m7^NJ7A5gZe*6|vO&GnI) zBf2eq*Hk)4FuPQ@IPDn&`;3}jT{=&yMz7}#Qg%&~x*u{FD~ivH$h_j^L>;tMnxLKk zo~}8MxPkgci_jwV5tK6F#tB#D@DHZzoTh@ki%cd_*8Af!MFjdA(ms6m4LmysLk&1-uGgN?VclG%oN1eq@7Pd7vhZwgesG`T zEFI}fVa4Q}io>s-=|^pwJ=x(gmZC9Q$6L!HKp=&z1-u6oQQbVU5c?#gJ`IfM1h%SE z@O$8>2h5-*FR7!hTM7I2D|p%t>sVb^7$O}P%Gy8M%XF1T2nVNRw9f>24X(MP%J#N~ z8t<&`4n9Lcx45ndoT0f%E{uSBzB(`A1Aj}3UY-*tv-12LdnlYJU@(=u#?FgFK&r+} z2|+d3(#pz89lz{L@~sXiU>z5ce&Ab8G@Sb=jA>$v;?($`Laa2>6Kuq8A+hx%+;6v}5Zl(;zI_9X zU|>NCE;QgU7}7I~pA85JQMbVBGS0DrdLm?i`=~%@MQW%;iSZPOw4O2aJf-ETGqSPt%6+L0cjM))qpljA21j3oySk> zs6bU6+5R{lLkOa?WG@c9)2$ISTodPXVHW4OUW^}=>wQyjVX=qbqhf#bTi-S(T?bv$ z!aY9E|_cI*@8l2-nFBn$al$8Q@JMOwr@ zW%!&~6h_`oXnw4_a3bv@%qb}vbp`ctd}S-L2`tD`^QS5WQM9PXf_Zv@W$ANjQC4#s zvm}VMOCQ=lf@yz1%7+b?mBZ$Jczc-EHY#Uu6S3dwJeLC!kskY2N?+>6>IsHt5mOSP z9rtjcFDr}iN>-l?4->VyLG018$Q6+@NMuvG7#J5BKUKz*BKyKHjl6MLeh;;f4}Hv0 zkc=r$$iN&aZe@bR(FnGnh?{v>S z1e~iRk{)~>czA9!{7K*g2&6e8u=PtHF&!lvVY|DA9!u0s3V$VmoqU*0)4}!e**pb0 z{EyX?7Wz{P=a=_^B~FJ5PUnAc_AcN||L_0!ODC1^t`y0kgHjPHF@|+`mn0-1IV^+} z=6tp(m2xa{$Z3^RA#y$&i50P&4`ah(G0e<1+id&4^!|K5r|bLq{y*36x?HZ;cx~6V z=ktEv_x-r<`|%hSSS4?swxHmGpnZLP4>7;^xuk_!g14^E&zJfe06SLQ5XfifR}ylP zQq2i-*iQmhS(JB6*Hdn;RC&k;veDo`@9Mzrh2VC^+l)6d%i0U)+bAzbavM!Tj)L8B zDc~^ACzop>*JBq_a@8wgpR$ej-VXjrkDq*bss8SK9ir- zj+}3S4&P~E8w4qfh`+e^3({S;^fa{9>dfg7(MvapNPp8B%nJ=v-4nWI)h-`d>*}y$ z@tU>7Q*Xv9YZt?^^z&S1&RB;V7O#P|`&C7~!4>0On-{)Z@xZuUIp$)J2Vu&+C115B z+BpT!jFG|j522Q$t2X$A;JsQ_Okw#4`iuW~O_X}dbQ&fAE(;l>W$Iz%GZv^ERJ$SJ7@)iumX2k z$AFMuDY3n$Z=M+lAJ3DEb?L%L7yV30>EzIi+B4oKcn{-*TOL=AVARX}^F^S8^cmdv zX^^hO@h~)g7QXk}Ne$TS3!X;pJ;_0$3d2Vg-?flWt@r*C=Cb?755e-Sp>s}kQWC1M|Oc)v)DY|8og6zud=8SPpm@AirJf*Ks%qU_=n^Ev-b zQ&wiC(=Eu>VlKyIIqX#mZR*%F5yN=yG5J1iVIeFy!J$g2`HT8^st|dpzDsrhSF|{Qka~E;k;C_oe zlgN{Y6LI`hjPNUbO-iugn85tHNnxAc-KJ*xxvsEDZZ}R>{6~y{_lxWOm5olD^CVjF zL=IEnE`!DSVTCLfdQFPy2+Cnr7TJy7mJ53qAiX6_!YuxDiCVFKQDm^=J4ELX)!{`Q zadIE3`olCk*8W#_#p`O8muFV^2isBP5i5!LYMp8?6|ueVg|CTa8~+L)7rfwZ0K}pe z%ZmthJ@ET$apvY4$e}1sj%R&4PpxQggpOsQ({iF3()PBr`pnG#;Db=fOI8?G-y6;AC?q zYB|JpR^i~z)x+bjU9+r(^JlKfiHAL#Wwgrqs{s?K9IuCs3oPz=UY6xjLoL3V;5iZZ zN4n}hmrKe++g7Xb?JRboifL$JNO1YMt@{E2s>;E_*`zu->GydzyJmit--qcvSB6}vI4 z>NQuTfc}QpU{w2)x>*Aa=Y}3JeOmuy!%naeoRLHz(;#c^+Xgi!xf?7{k)lqw_f-CP-V*QPe1o(wyxopTpPnBEfQm zZGi0^6I~=d^f3la_J^ffM!rLYUtbQzm|?6YJe8Z+F@uvOuRraD@h=EZ4Koye0-O@{o&0W-D=2Qi5CjfeD_fJE4ubGx1%MP6$d3` zM|qGuIizJ}50j8i(elQ&t$YmviB?x59OWM%#WzC>hUKPTM4lXvnsZ{s54T-$Ys}w8 zw@x{c7{6SeGJ-YHisi)cJ?~HF3s+$zt@{zP2gX>seUcB1At38hDJB@CNy@oMMYKU@ zBqg8EqP`pwW6yNx?g>E?hq@Oyha&w>^4QrXu#;*B z=(-dlj{?$R`T4NOYd88tQSN;=@T&zl)XlO0@i)&_!}z5iQ8!ajdwtM1i#0#QN8i0d zOVdQXEekuHILx2B(X?!E%ayTmEPwMJS}J3p+@DW5LYH?b*Zq}$o6Y>Oh>mq5DtrpF zi%l-F0jWopYN*=}H^$C1-r|#jpU{Jqkd~E6-UHi*YK^S3p9+wX{vFK%_Z!ZI^mAAF z6}K%$!>=M#Pd$05zmk46oqGO0y;>{e@ofEye&KzT|A&jHxxF3oyeIbQ{vo?%Pxj?! z4`vuW@Zec%qTotI&J$V3MD&T*lPlF$R5>@VGce-3?(jzzs*Ay_Y39)_3OmDYZSo@Z zXN=VIe=tE-YGJn0o1Z}vGP_4z5{lm6*~G^Eb7qfQla2As?noz7DO6)G1vGlX5! zdSaYO9v#UIvbB7OL4OKu37e&pxfQQ*^qO{<{GN?ZtuipB(X|78$JdV}B12#|l9jFoVD$NA=of=6 zO$PTjk$190+obL^+%#Hlx3__(Se)O>);9LvKK+6l+#D1uLU37#B|RNG{Js9qSM|&> zU(c}Uu&4_`)10&P4!b6H$c03Lw~h+Th`od}YZpYwE$r>AzU`_OpZ#^v0L za7>V>vrXLZ#ujI!j8E7f%<_Vcb8dwvLXFd$yxv$k7O9~2=%0Bw<}xOJNut~3_9;Y5 zs7c{P?k&rDe@ zj7>GKjT>uRss)RlW(OduHO8cx9=pamwvv7onl84$Nm#%yj&Tl@q#vA2#Q8o{#fQ`)Ho4N{ak+?H;Ywu%1JJNXoRb||c|?tycvvy>@Fjl$Es zo1wmKQ%w^xOeS_?qq>aM!%FvZX!>4FQ$e~;t|zQzlp-3^j|%$Z&9!-5dbS6;k#~{} zT}k>zpY51-ktb|0x&xZyeU&p9_tgV@mxs>=yyj ze#E16mPl#AZ6%iGl?BXn{yX_b*%CnwDGo$xe@~Q*jEp!jGAM`hM_@zNg$pcQtmk9O z^%M=&Q)I7Q;tG=uv5~LRjrRjT_ThP!s3ovONDS(+vM7FI`>aMq#W-nX=l3FT93>gd zFa~u7=D!Odi{F57%{5v%0GKr)kLKMvR>YfCM{Jdde??Mu-<(28j&+pS+qyApeM~y< z5!1D$DH37>b=j=JnE!=QNU##sv}J?dm5)W=CoR#8(?j36Fb-0mj?0wWB+nC;B{c_Q zu{m)bs#(VZbqnU>6{>;^&jmTPl*a{w@s2H#r$Qk;a+j|8Tuw@J1)86}9SHdO?P!1i z`EB#jDPV$rs*KUbfbzSqHcMQOt!zZ;{y(%dOW67$<$d8@tAp@nqb zW5*WdBgpwY#(uAmOI1rP@u|`|`!oD*i&kFaS;sx%s}GFbFFz)Gt3HVe9VMQZBQ|j% zoY#5sD4nU8T$wAQ&XLzpaLHPNBWCxURvcpTjC>`M<*L(uo%TPy04QkbV^2xvhm@>p z_b{JSUAf+-q@-`>lTvx#U8!Rx`>*fdOuu`65zib*<)8nPeY|G;>KreDDb(gLA?0TG zDbx`v!tm7^zQZUP*$Aj%YQA|WL1SavLY*r6uhUm7ud6&LS^RVg>_}o*bXem3!-)IE z8|dCj$DesW@vj;z*|Ua)j-{y2v1YUP-!u}O1tu&*GAd56@z(^1j1ldm&V zZHAsz=Mopw`B&d#qrd-rbBtB(cz^lkC5vL)AE+YoMi%0QZv`768NjCc2g=L_$;DUa z<$bsNuKHb=mQGHr(g|3qIV`qm0PouVyfF7t*Z_Rqx-s*yhXd?~*&f+ztc`ugtPc|R z&Wv0y#Sl<-tiVF8VrQMPm}(yVTLYUhtTFn+%QQLXybjfipH=?AO?JjzHsQlSn>;}r zM~>BZ>^L9o|3?1nC7DRCyUCZ%%UA^4+&W8Iz3%1y4fd+DyDxd|LXGhv@!GrzY#yHQ zXDl-rXA;vMajvOalBn5z=MKZae*6xP!!x~3z{~bD8E5qkyw$mEdN|IdHcAYB6*h=x z_^seBvl>!(At*}nw7XEtK)V(ijz)fE9H9Gp?cu;G;0smww?~-e zva2SBBD{03_{*}f76X_*08!Bc7RIv`NM73W^#hgr zC}<91s1hpBo_Mh(u=02?*coBb+skGASQ;jNCwcd;9(QU5T!N`tx z`4ja~rq8$+o|*YN*RG5yDS1+iPVPAqa|GQm_JLt*kN*oDd$Gip3lDsJ> zn5g^DFAYL}d~g79fI~LwRUgmeaCpe2S^{YgNCMD0xZ;|i&^bpg0ZTPNxPQGUASD*r zLPqLLJv$5l!t$X@nWGU&O+;|e{0hi~&aC@ms@f!r3{-{U<~rg0(;z!-o>7aQpDu}$ zqueQ5hH7l;xhJuj1j_r#0~5c=FJ=6P?eRI}26S8sV(u&(ec8&#)}FqztwQo9o%DH7 zt1D19PmY>8e(P?_Y=4KYr-vvoA!wq5BApT4!8@txc`XjDufOdN7V&qZ(Q}1>iH6Sa zdYP~Y4G^V5ZU6~xw=hG8ebjyh$4y`!6tHm}%P+_#BG&*jN z>-OO|eoUVI#vf*0>@TMnsBZ>-`%JsQyW3Y(MDNKLbo zZ|0hLL46#n4$p*u%k=2z5w&n!JY{GnszhBVc6`BmCRPr7tDm$cdh zPO=ME_XzO!`u2z)N95t^`XkL9SfaZV%MXDOir1DCM72_5&XkeftLw@u9%m_$%eT`bTr(osTca+fJ)zGmjGYqy z77Wr3$IE};cpN)W7Ns`Bi8&^Dme*2|N%&wjY|(y}lDoEAS1)6nRc`L}CeT)8>iwRt z<~I5dr&xaPkFp0LQckDJC?DO@=*>cG$OzIrR!hE#8rHOM0pWEn=*lC$^2w8%>GPES zZY?S!oWyY46<1$sl8zDoN%GNhwF_vR9BL#yml^mDo%T|r+UZU5_x&iD^;gDB;?8Ej zzxOloGdp%ayR@z-g7FmsPvl=0dzThGu;&ntZhVz@<_`DU+!XcRA79s5SwvfF4nJUr z-mug9d{0u~Y7Vb%X&$WZJB{&Ge}620Op>AY)$HhVO8GPymEQNRsU+y)&H5%oEsQBB zeU*7gy4_e&Y`%4vr8-9Vp{i>8DG+5reZh|Y1+PbbO@-5W5UEi<)wqwa8kBs-jZlk2 z+3>R;^SANh68t~p$+-rYb*u8?HbMUs1ZZcTqt@u`n3ew-lzse})}PAxDD&C|{UZ$YM@PFzCYIFvh-R5wuPd@K^*JH#V#xIU zeujNK)6v=aDPy#u=C%B3Sm{%=Y8VAlH*zPc?A9{@fp-#kEI}JBVht4N&snOa>61 z9j^y6+3;BydBC$}v*ae0CVdB976qmPVIBdEL;~(FhTVKU#)pUFuVg{#)l#J?|L^o| zT@(*unt$mj?h&R$CZ|2he|d=D)edu|yi{KCjUR#$tIT zDs1ZrsWr~uI1xvsX%O+_6|Q|{>$>2#WQ$Rj9J?oQ#2g)gq^Ebz@s8p`2=wFuq8%D! z+#FA9N|F6zYVl&)sEGndKrFwlxW36#x2GR!q}$goZdZbizkC=t2U4hn*)3jD4c-Y% z)Z)TCJR8B(iZfF@_jWu};r@3^$FwqtYpBfL)T|7jOgAf9b0b)@F4z0aeazrKJ!!dpekap0H@LmV zOt2s}UHGxWERympH}A9Ct3qi;Ah&hTTz1+y?Dl>$M!`?j;@&so+M4yXkmIN1eM32d z(g3Pyx{cpMD#kBJh5E7HXZG=?3tPWR?Y;N&&utx^R)H_l!#70iwpiLB{PIYK%``Hvgr@8sBP7M7+g`zTtuqkatpfUL#Wl` zNh|}+pSh|lEIMwnV6?LE{t%MMAK=ah2m5>Y+Z*ll@Z4jPeSewe>lor?;;-u$P%Y07 z327?mg-A#T?n9WRWDyl1Q{s@APf*zG(3jQ1*_j}n!RZBWDN^I&&}X8MXAY}U<;DYp#|#wv z%~=ElsvJlB&``i){{oqzP)L12_scBRblG}PyhT!E%VT7SZg9pL6{0WPWY>tL)ksS~ z`I$4nd~DNY5d`D%-9-iVC4_oeG>e1h^14fz52!tN7I{?t4{Imv+I^7<)^<9dRJQJp zvTI~brDsof2e?(2TXCRx&uadLSGhFOHLL1IoR-zOmCR*wMO7>=sMTIZ*f~gWK@jkM z3sh8`btL>7-k=0N7mOw?4E9!zpzAqkv@hz0kNg+9@%Sa9%0fm;98nNBz_{U7eHSOx zXZDkykIB!$@p|m5yXU;PKBcme^Mk9kJ|S1eulG$K_Pd`?4+J@2U0IEGE-FD;cM7AQtl4y=8R zU)u_-vl;JbPH9@nEgIuDJX%;i5~oS>dS+QwRRtcGJ#>=)+D;a?!?J0EOr|E|y}s^! zGA6_z!B@JbgzjV3wB9uUtP!X@IUo^&I^b?X@{tY@mw|vF5RV%Wx(MS0JP#2-OGqUO zu9~kb9x50l{C7X~`BmG&J9ErT%*{u%nns37_XY()JV9&BYL}OJ+-n!2g&SuGtFOG%R&4pz- z)*hFUQi=3JNf&27XC2NYKT#I)q>Pu!3BT22mOSPD@aU$K%qKTEES!Rjc$w_vM)=F6 z8`Oxi@RG%(dF`>t*hg#pYg2wFu}Z0kFPt6U(g|E7fLt8GJ?kon8G_=H<1JRI?^W&T z9q>dCKV!Xpzl=M-f6%1rGYxCV)UAK}tF7yJw-M=P6uPfQ^BJ0?2yz2WBG+&x8zvGI zBmKQ8YJ^l>ayxe)?D(ut?H(%iMgt}y)i`Y)tj35=H>_$3e6A2zxuv}Jr!rV84VR#L zJJaQ1>}w3BC0Tj~Hl^a%psMI0gb} z5&|7H_<3F$v8`aCR`$14wZF2WR~A}f_Hm5k|AV#c&ZpEV+ul`^x@Glmw$kf$B5Feo zIdr^2FW1l31)o@^T+YyslPSG%WL9B?#GC#NQ_Da+0SKq*(r8m<K6}j6M!UF|$pC*&V8->G7!WKKh-hS1!3(|L{ z&YkTNS9{>FF=lf>_6uW&rfJ?m-N1k=Si@8Bl5yqO=i&-pK_@+iqbQmeN1u9AHMcHs zsIVn0aFdcw3!a(%(yMsb&f^~DCQS!_I!nLYvC+Mn!iB{6+v<4ijME%eQL{ybdMLF9 znbYXT4NcaHr-+bHyVsEvZoAGasQeRN-631&nk^aP735rHZ{$N^S6eh`7{^cQ^4)E;2n%4F?~NcbmA5@KyLeN zQ`o`r)Vw&&EEmXPpbdGULv3Eql|)vIjo^7L%Aq8mFBHb171KP?O{@7-;fVmEz#iZ) zP%&o-7%wE6`333Xc$hFWjL7sewvp#%=iWl@#Luc1quZVaC=dh-{!(;HCxW1jZp1s~ z^l}S-nBd6W-_Ea&Sx{+wn#mc@ogTJrr$3Vq7dE+AAJz&6c+I3HYRE-q;OhCgr#E1G zU}{e0BdfmKeSt17z3Z%}=6jV{xl`O$r}|%^ab6kAQdp|L3||sWw)%-?Ay(Qe-v!9} z$!VPYK6LUurhg&F3uH}Pw~*d|BEHVMl}3RMQoQyTH&v;mVOKcFCHW0#;4?~uCl1TA zw|cH{2WXH`LBhZ%8)O|XXS!d2Z%9RZ*b2FVm5r=vM3d9Leapb=)jd=An9w587VyyrJz%=YHd(a|`snLBDKpCNxu!dmN&_dd72H;`@$XlKDD}Yk({mC2+@(ZVfh1kODKUjfSeY^Y-={^ zyH0R(LLi%Oi4}%isPlv5DA9DHe(3G{GTll!nE8d*JKL=><^2`aqEIm;Aj?uWO{_Ie z>J<8pC3)&51~&AlRbjkCfVk;4-6@E{ZghSfIGA2jdn_QAUw=9V<0@esnHw!gt)}@% zc)J;CI5xIX;BpC>!^g4&7GE`w^yAdsRSJvb-xL_%=UNxgnJFH(M!GJ-_8?Z{Ls>^a zDtH0cyFox7jA3=wuC6fckJ<*18wli%`u{q6~k-`&vMiRRx0?3QElfu%>5Af!oU z_q&jIMl4;q010j>qvBv(Kss%sCR74{@hhfc7bmmJ^}gaM`2LjiL)5J|Za;o{`WX*o z*Cb=C+7{Aj^y`B`sMn*GCtz8GrgEmx(uZokeND{T{6XHn<47?B?C~2BD(<1$emTtB zD|qUo1?-7@;&=QdkVl25)xE}>o|2koq=@I$oXB%&Tk%vilArfmhMce^`kD&YAMPHj$@q`j2qi|&T z9qUR4T-iW-XJ|pp#BuDMl^@LRk&OE&~ay>=A3h~_-JX>4u|==?e_zG1?= zBzO-HANb{P8M1&FzcQpc7m%Es+~AtuTazd!66Gmh7%VfoksWyRrad=QPn=}5r#(F> zND76VqQN#bm!p?%0UA@tlHfj+&;$(is}+zjZp*{QY7|R}UDOttE^m_Sopzc!C)s=c zD=S(R#ER{h*^@OPj$S0IdoJcf?YraAi8Wz*&lXCgoGAt*L_<@y{ELdF&bdN-E;j4Y zi7fq;FylF}?OU@LeI|}^^9u*EOX;*8_Ee^jC)gB7+)sVLQTIQX@#chaEDdHJvY!%A z4|~o+{L%LSMe;jX95fdn*%`;0yyK?WXNp^MLY=(Bu{N^JE9u(jtW4<-nxC*jhl= z{vor4=&l1A&AGR_l^K=~PKk;l=!>hYSL$LO?$f+$oH1f{gxHW~AwD(l>7MwZ98%jr z#PEc8B&qS&DZ#FAqM!{e($>A~RcpAQIbg_vns7p^>_e=cY;-&O7MXkkm1dx5vIdcn z{ff7{iU<*qx1=G3dkiI$u#c88BaF5%WtV;^aX53#uP5Z9OjWRzl(=v}oA>8u^d$`H zPXgSH(VcI1-GuI|t?w54Ki;NBU2sndgE1nC=Tj{?^aKuvo+ZZ}UgFW#JGPtCiEGY4 zaU=6KP&LImqNJoWf^%i6~hZQ%pjfHE{V z2n-Jo1HW0uCKN%XvU1Y3d^6x_tGxvwR1e4b(2tQ((p#YBC*IxLcmB#;*>+aQ-u9Oc z*PTZCcvk~_EW`tp{g3BiI0-+*P$X1{=`qz}1evDWT+lV@B#kL37%d$;T9tkx^V~K3 zp-d`s=hU`5(qoaLLeHkz56(f{^T!&hzYbou^ORQXTg^~p@#y>G&A#lSoGyucCxVsl zx91@mm!$zD)B=}vQ?=0>f8D6)OQ>y8oC7!5IBAQNagovu*iMs@?PM)bL=MA6pmuTM zYeL1?3s<|Qu3Rg;b*9rbUgPJjtDBP}xc-9T5PfAuW@71>eZxT`o8lWaergpZwA&4* z2EOXu)4Xje6TsnJ7BrGV8c<_hQ&^p*Y8?ZfY3BIQdAD08!Cx+3;06<+4y;|L*GAx| z4Pay^ULsmu8!P)j242g(Gw(8`ShaP*<7`#ubW|J2QuP-vfWaE@J?g}Re7yYFf=M9f z0CsvrL4)KeN3?v*U%&~L;x7n(Jg_T2iX=QeDDbT3M-`zdtX8L@XCt9|u@4`{8Y{y1 zM!euswTsMBEl#r$lf5XaPn=)!!+F<}zT-|U%PSGp+f)y-R|3NHVz_?qQ+&jqKm^S4 z&t!kWWvNpbFTP-ZvN%R}%Px%ZUa2m6jc16K?d9sbq`#fdO|7|XV3zx6h39$OnA7Qs z;fLm^~wl1U9z~ZUd^1Nffdc0D$z+->M(_e=o{!CbLpD@e(Z{3PP{rkU?{Be+v0OU=* z+lhSuk-7c+-%P7_gKL%tzG{TS*CAY@#Gi!+`1sIkI+7H^eBxMP(HH?2BRuir?L{ko zomk0b6L4><)X$#=wnUt_uigT)*eE4TwRY- zU~=?SEU;vVSuq58KtC=7W=JUJ@fC7;r2OfRA5VgHdxhMNqemwh#MxhcL()9`q|m5uS5=w3OnNArvSX#4anxkfLo*nD$yLNyI$ zccsX#*R-JE>tbi9oN*}1)|nkO;%`uj?DhLu*f(9-IGxCou>1lwx9}?(T3Zph5MsY8 z>mn=8qKtyxi8hHI#(hz`ZKzpy&oN|s)y#fie&5H14mK|Os78wCnBN7AX3-6(D2sBv zA+DVHl%YQrYh5W^hd`tI7gnamkur_C#*ELDVwoH*Vhb&ocjqh%zM zHKx?0nKOh@l09`d+ECwtGbkPg0UyhUW4OU?OA%gtA>GqLci=XHicYz0&2-etETA(f zj8Up6w+mBG9Hz!DgREJ;O_!Lph%^PY%I}8-ibLye8lZvWe)Y3|IaQl-~U`MNa`Y664Y| zeG(xwp-T-C2WjH4sNV?L5nzTMR!juy6Ak7D3I5V9eKqU|*lGmOK+mLp zZy!*PKXekc+G?-Y3JM>OxvfzRNgk-CHwp&zEj^e1ZEyd_hdH)4tv7@J?S0>V8o-u2;fdYo)jtR2bwKfCVUW#! zaadK|Xd@^Q4`u4}YD(oL^St|(L7$P#ou2#otHTp5@|l;>q_hGeJGCO-cfPCOlceGW`|va z;!{W?9fI@gC=s@0)Qh-QCm&LmW+bWzkt4Fhbq}r~c3(Ov#Dn8(j%U1%C*cSFfJORo z8osz~eyf*93r(-F!Ah%E!}5c|5b_@%kessSS!Y{=y8CVGVU*JO+axLK6K6wc<4qANy3=|`)$ z1x-UZGIO>6C2PO?zVwUpxQ=~-wJmW|oCMP~ou?!TT9qVS`$3@XHIgUxfQMe8h$f$? z%Q$XTH-2C$&UPdoi(jpcWmj2qCToZg3USVKgxi2cYUa8lZ*4J;k@qjIu9N#I|FwAN zvN<*PUTh=Dy(jgpHlR${7>ecz($4l^Uj1lg%H zC^JjEx#fgJC@s_aZNPp4?6Iy35fnm8K^~S+557czsW%2klh5pzsp$IK_90I8Hbn4s z`uS@)D~+G;g7&Gt9aVCT*qOuL;7T}uLUCaToxhYp))5W|Im<_YG4IZkz!;Ld@K>;X zhu9Pb%_6>Ii-IZoHV&QH8^|5T>g^3@ffV)4J?s!y8x!uhZ=cZ!G8(Da1S>g^WC<0& zoKPXMM%4eWJ;UfFes%f!_OCtZFvU_)g{GW_`80 z^w1=l6o}&bm!Ro)O))Q+@;kNw>PEZeeo?fQz>fnf$w7@FVPNd#`)-AlAVJOv%oFIm znnEKpUQNAahi08*k>2sWVk7?X_GY0M1>8wKt0j7ds8V*%&SMluO+VrqAX`Y}t%Kpp z$dW@{`#BHgS0BpKPX?D7$b5hQj`=pmFPJh;}Cm|L?5_1tST4hD$76ht`4F3=BrX3d_U_w zZehoyb|ilPQ<3G9?+Nd8F5R9u@BqbfQ^f|@wmwmc3PcC{7eWiFlzj6$Z~Nkh9}Ja4 zKjs(27s5v5&{3H%6Tdb6unNVoQz$E92=v5WUrULBM$I=4rgW8%*>2H79Rf0MxK`dy zt%z8z9p?7q_}vWYpAAI$d?8q#dzMCNqor+$tt!Xrk$(&M4ezK)psCh)=~TpK2c6!t zl24`-0|3v!ihcgn&mm)-ePMuGk@08FH<*y8b&%ySN?-=mOhlJ_?HHGX!jqwRo>HUez%Zq$~PLLbwh8xf}Mne(=k?{^qzSTUBSSmh)=E0r*TZ``)M>vafAwEl1W=iiRg|M1Y?#C^|iSZL*AI*ts0?HNuKO$`m; zSkxh%@FBmdjFl9{#BG4>kPyh}qzNo724MDw<2~v{!!JgP33;-{16;^G>PA(lgyeOs zsLH+ZIS4&H9<4^m10l?Lp*!yGqM*_oMkf3>X7>9m{_b3^48LtW3j(WCZzPB-q)dtU zeS&`hr}E>}(b`=B#&fZjuh>|&PB%1` z^xao`^8sT^-RER9!K464?fTp>GWjNIC}{@!wBm}Qj>r&q#JWj3xpTFt=a>hAvN$085=SVJXXD#Xa`hWsXlN=+y7kBgLqS< zR}4wzgyLi9*r|qX;ks>Cw>s8c-x<(#k(U7zrCQ40784%54V4TRM#KmWLsnX@Uk?^t zTO@eR1o2`!$}ecKz8?GgI?UcQGdHgqL+g(TbZ43_QH~gJ{5|_${=-QBe&_#s@jsvi zh|`UOpZyI*dwnRVDohXUf~RQfBxo9KY_@D#-UA@X0Z3txTw*B#xOQ|m0CGOAQ(}Co ztV}wY?`@X0tjL=NSIre%wVWDyH&h7XIdN_;E_Bfcy>TL*|0^D-`I%t&n=nTxa8d-G z+IIH%qJfuDR0+q+>NaBS@ISx1OzpkkqS29YGFk~I6xG>247{2laFZjzqz8`x5$E>^x3B2+C}*I|ow;XS z$D--3R&eoI?^KMTa*FfD@G#PK`Ej*@6a@oqt8TH9oj&wh)Yj$*1Rwr63OsDW|2UK9 z>9nga=vv|C&DB3QhFz`aM}#11t*>{NE?0|G$y3gvb6>o&t@#wo_#@w~=t)hU+vg|M zbZdtO765c>d+NuaJ|iUwJgP&(?oL_8oa>R2l8?Pr|A1gK3pn)f&ayGDu-nG;xIUe! zm5^={4T49Rvb&+l3JXW`bN^*K|7G3(`|pbA`-R|KG*}EojNefM?r5Y7eQIdf(ahm! zuO|)cx?-@U1iA=JR3j`K@k9F<7ZEf8-sG269$qe9EC}h6gK0|}s%gw4a<@dD>xfQ7E zeaVTLtq>()-{{nWzr?6VjPmGU;6jg~J4UM^s@y-byQkF0kMiDqy{nW%YgF8FP~I!+ zspF!}yvv2aeC>i@jNRaP>&uO( z#0$#Qj5^44uvK4MFv}2qAG&D5I*1-OO0)OQI zT$U~%qwTvjoEEs_+>oDDTHH`dhoNhS>bgJQv1KE0G|^zMaPF$YKCwtE8z0KY>J3X} zDUCbAYI&r7E|XybQ~+SS~o+{Bd>S@~L{;F;+u z)qObAVyK-~x{Y*4Ybzb$;>F#ATeGCCSYD7OQ+xNgPISf5e^HyH!kt5!joEG&!Bh2K zWP5;+r=ZIRs9|KlHcos_&yqO3!|cruL((VQ%sb&_yW1J|K_`C2niJBx$>;`R?g& zafwsq6_~$&Fz_PBZ0IkejOKvd_ZAjqPUK6535!OWx3)~&vEfgWZMQX}dEcPGt?1UT zo{+7AR88O}W8xJ=zz3U)+ZI?6Q#yDn8&tqG7&KbTvg3uv6L!T83lIX!LA?g=2fF^QClcG`y3_e11PgV$n{y#KWfT!W0 z=$443aN9`z`PkT4H}(SjcvgN^|3I=(D09}8EwFBdjZ(McS}2oRY`P>^4@i*<@*b9pWCY2~P&OV9{{;(Y zC38uk^#@QxhLRuQHy29>RXa4}T{%xGS{q6EzK8P+PuVWm3WgkmEv{W${UWKth5{-v zDAm$~q3EfNE65iEi1juoIL~dAmB-sG6)2^%j ztw+CHMRz<{w3&{h)E9Tgp?ekF*TW3cZf%M(&<*P5D)ZT;^4wE^*m@UG<&d37Gnc^9 zTlb_>?hhem4Rl%B;6NStDSX}JfiA|HN z_*IjTbEegV&$ZmJ6tI7`F=Kl;V^S&{Me7G!Hlqb6zgv%+Jzii;^w-7pcy4ND;xEy7 zZ`Co(Ir!=xVDWw*u%D=zhGkcEP6p$$p6ptj{~XADr7tiU@w)23S7|Kyxq2MLTB>ZG zvhL-zDa251V@w9+WU|unxQ%Ka0!R@8sWl%6*{f!^65ep9_SUB+Xt(DZ>+5{lTnf4y z)~NAns;n*rCEa(B%`kBYMqV4bxZEZT;;ABYcwp6g&m7~RFl)n7tS(J=sg?@=hXcxw za^xYsIpZ_xCJ>&A4~jj}5&=yPX`sJ6Vo;}g^{>F^@88bm*3HRS?6KyN|SNA@q2@fMlv-WHQL=~vX9yCH}lx} zG+s_|SNq3=L!$Ri!IP4#eJ2AWYHF2QnF5CSWGIpI4Ygyx#ph6)8GVOS_G39>Z0bD7 zF3v>rw19Tk;f(fR*~g*jrE)254}WJxQ#FmFPgt2Of9?gFGCwFFsE{6{9&Zh7YGALb zt-)6|0fUP%o7+6YrNq>tW+w?k!n5afL@f8VJK^!%cy`+(#=;}-D1vu_U3ON&h87*v zs2#Id@u`W;AL7uuqhCVE7&RGZ34%W7_MYuFvCy&FUCDhRms%Gd{bMU+x?$5)L-}m- zMtzjxCqe!96|sNHyliuHaGY7?gG~9%tC>~t#Vty z9tLS3Jv|)&%f?BGU@zXcZg30C`u4;Zg5{^ZM1{UtiSTGuFeDmkT=e@}7!HD_HWqeK z#2cXj6U{;Zl21m-g@f=a?CWwqo9_Y5T6{BBzjrBeJsv7ZWdvg+IRUTMv9}GUm z+eqDXM?;)$U2mEys55^`KfVns+fEYe+xOkE_X#`St4>LF8U`D8cje55CKD8|?sGWr z_5X49C16RX>)SuGvZm5$T5NHdR$HajU6~n=nOQQ~l$r~brCBNN3kW#J%1KS9tgKup z)67h;G;ss0T*{4dBUe;JQW8W(1O&eKM>FTloH^h3{r_`aO&6mk@bbRz^E~%`KldX# zxb&;?@jCRJs)0B5(8mfyBa?>X3x0jBSHn-X>XH)I!bq0R*Cfzm&+u0Y-qj7K9cvFbesRAzL0tPZs9^tVjymW)W?1gK8neU}rGp42u5g+NRTNnKhETuIPB(4#I}Z zvF_`DN1Sj`T%N67=-H|3t~$Jr3;q>bvs*n>lDTre!qXRAe?yecLw{V+|JUPxRb`=@ zFLtj4cMJpv4KOoFeWMfuNP>%2w4<9Qmc+gJY*y5{!?IaD;@lWkPJ%-GM8MJIZc0GR zjBPErXgCJg*pW>&2u0}y5P}Mei*NapKpyA;nk1lE8AW9QQ6g(VIRhrLr*VaKXwpoz z(HE!Ms&lEHCEm)7HJ)1y9P|FXBW^m~87CaM-)?S@q;a?LwN>kfZ|QY=b%f$JySJ)5 z9hzaxOJ5u0XNJLWemKYV$JOkHJQif;S|hf*%dYS0E1cmlm(=;cv1)ZkjvGVMVL|kf z9T_~(hrf7ULF(x@zl`_&c%Rl)BC>uInd!O1XtTW}S?V(Uum_D(fiwrHqy$Fa_p@RK z>PKK|Ck>v}@)PVon`o30_3t1h^g<$5p6B<5(0=}F*5&APt+k_+OaBBL<5XBYv5Q;S zb2X@Z@R&88F zec&Ma5wQ~|E%#EAQ?-+hI@PBRJ~5X{vst-umKdQxw}n!m+yt$C6sHl`(XkVX#)~U| z)HxpiMEh#%h1c_Tm~Iz^zMjZlVq}JCu6IKJ>*H^UNU~!fKk|e+hi=mK|7K7Elr>ze zfhxR1*KVl+z+>D=(Vx8yHT!GVVj8C@Prm}m<{TgdyI-Evv zn|z8c;kF^v$%be4%(j~2TbmCB9gW91Tiu$_w~01Oh$C=a2=iiKEi(j!jHl|%1aEI_ z_)UNY#`L;}_l$(z*QXfjPX9R)f%j?TnqJ!pa2DSc!zp)SXBGEfX!GrZsHYF}STy~Z zc3&%lf3 zi(b8=TruNSN$Q|0JG>kQ<{uIMqauS-GNxz^?sOOA#(J5rOCKCJ7tZhRkmE6CO!2SJ zGl}~>>LO83k|!R2O$=K{FmJoeU7s1zxwCG3XPd9(0&$ra-ul+Ez2s#R8C-t%zc-KH z28C097JJd>@@8b2pyx+D zTW%MLsp0D7HUCsd$THxr07bsFHJDQc+M?!x8e<+{co)I~Z+)X2|85^{b}`WgJYQ}C zAw6Y~vK*abwlxRfQV9(JaDYBCh{!Jh5RkBjtweyEYX@1L#*nUsY(QG0IS!`V&6YMJ z9Jv2+OO7bDHJg6d0?bWlHA`6EXqmH6DEjdVcYU+{SPA>hH7Q4K(fV`_Xb-hj3-%H{@p7!ucKyC;jk=05^q=NGwYkt<=#xS?t)_F?Hy zq{+=EeoSpgFWp}5$H5)Xql3G4A?r@c*8$6eVh}oYh3l#}HH^($S9w=>{q%SGoCmuy zsjY=p0YA(weE!W~tR+)B=xzO_uWJ>`W6Ey;%zt$(A~w;)rkYMhfD@#fjy!f%S+3nwxnZ% zyh{5{mnv`jeWa|bK7w|qZLdDZPrc0fi5l0!ze`y#;-kwp5<2TGZ?WFmm%#fIlsY!w z_dh>}KcxeI+E#yvR{7wfQDaZ*y8)Y2V$DSc_JdKNeUTf0zF!aQmHAZwWqbe#;S-TEjaTa#=1A9dL23yfqk*782fgU zM;7ob(|cC8Y;LnXYO_lZBR;iQ@etii&Y$YA;Gz3;;G+Z+Xs7->vnbr8XR9X z_=LNPxa9tEP(Jsb+i>d-=|Bs4>)&C0=E7(OXvjb3CMdPpNt}9QIPa9Vw`L3tCz%7b zhQ{3{AGO1{njEd4CLP{9@T!tr#1S)diL^m`l1U00^&nC;|N3|s|e**%i<-VX9;9ZoUx?>h0zZZ@*Fy-PfFzoYZ2 zG&6Qjqo)2(>-_J}H?uG-CU1jIw4g4AK1?4udAH7DL!+rbd(r?RE8Y^d`sZO}sC7Uv zhx%5)DF3Em;iT8KN%XO@#y3_jqovXP9j?v6hrP`?X6W$nh=ZJ{7Z!~Tq$7g_Ai>wx zDE#RvaJgRt;W(9@?#e0uhj(4kdtc9XpSEH=zCIboneCMBXk$GvxZHYhGSb^Rf({_L z7KdZh)6vG|yLK`2a|(MBy(^co9m%h8&7P}U99Gm$h6uir+Q)uvbm>lgPt3Cn8vP>T%NS3TuIK|$yhl-iQ^zpZ-V%Pm8I6V+zl7vVF5C5~uMo{VFF~ipR|B|2gdGuRk{Lpk@A4GjTtVa4y}{GC};~;=$=C zi%V7kmFdka7&-EbcUYzBKE3L0g+-#O>D2bC`sjiy)J-m!hgq?% zd8_w#&vTbFyt%u%XjSCCOF&gBx9Qz=s{$KId}~^{zxw;w1n8ry)C$_(x{6(06^LIo zSn$%g`bb!`PF^O;n|38KV5R13ve%;1UW+TbK!(-&huR4o?c$4nCm9Y6UIFm&FNL&P z?A%gLF5;5;&=YlNdZB2YhG{E+kd~yLn?VloCZkReEJ(L7^6y_X8*XZiN-xo=6=7xK z4m6vA1j5je66vW7oQ*#jj1~- z$Q6R}9YmEGN!sWOMb2^zN&YBlYmmI*Phk^$JkRGVRs@y%sdlDqBV9f8!TqRsLN|TaRqhC#0Bm0i*u*qa?Bqs~; zOWDFhTq|)_T#k)J+aVadDWb`Lj=Z*S^Wx?2b$j)g&fmYMmo{e_#cs{r4Ny3efLxgqi2w=&F?{Zf?3qN&|7xZD;uUc|#TUz?R;=>fM){-;p zPTm2nB)9+fjaM5-hAx02cp&g@0SSl%U={?%;6TY@o&y3194K5pJ?A|Dj3R*27lQ#x z&C_XzH@hhT;yzyG%IXxgMm^@Fp1XHDr2LTh4mp>ImUXw{9mJif6JxyQwfyP%L{irUV*%^rEUx`C zJAP!u52~%J^9Pq-@%7?MaBt4P{`hV8>zezA|NqxF{_6vOYyUJgRa{a)VCa08-#+IQ zfj}>1pWk;iGdVF4dJpDK%zQ{D%&h{(KKmg+`5Quol0KZ~xFuuK64Hwve+4x*H_zE+ zVewVfa8sJbr%($!LP$u69lear7YmM!u`ih<@WT|rL?55|F=9ST=LkX+4#tLMkw&L( zVtN_irIvd~QR?Mx&69-N5_UR|P|<7L%qnmgLHo9)dqUuaKG?G_w`rojOlmu9Kqmj`MaC1ez?&)6FIT^v-bsR z?PpCOK-?v;z4OOkC$PM4@;`61bw_n?h7tH||NKt9Swq31MT?**ENu|WU?Y;iz|5Gu zysd!p2jISPgrr;;1E8DBsxB6Neidl%a)1oLXj0Ab$ELJ}F`nU_IlrWGoVn>F65=&i zhCkk$Ig)$t-n~~k9b@53$=zdfcG=i?Rp-l(gSYQ)-&Zuz_wrKbi$R)yo}?!&pxp11 zINPPpQTC_B+33*qJQNqJ_>MXzIoV^dFAch0qp;0*b^U}fLiwm@)x%DVO43igM>6zy z%PueuaLMYOO8NHY#<3@Nx`ebxZQXdQKmNXQgotSLC&uBAOZT6T{kC7|R`0%I|NU#0 zKr2_Sj0Y~C^QwR;VUPh@C1YtsvgLhR-%-2u*AUO1of{*SMnP(XE6HMX3*=-rFc$*S zgwplO&!2Zd91aJXk}KAeZf|#)1E!dTkUqt=A?`Q%1oiR7@!nl|0m2?ISk(57QLhJx zj4htW7`g32H?deo+cEGsH#b32TB$R9m+z2Epa2=eAKz9>(!<;S_ObrQL$L|EUDAq{ zSI-$U5syPcVi}eh=*nKV*%x*GL_v8eGNFdyG8Vd0v#*4aXCV=~fMU?I{bIn;LM$njNMZ@R-P0f+F{J9{NhArnk281GOTz;VAhK(>)M5}VqAzKGgJP@ zH3N^ko=@Fxt%)jX&--DAVEf4)!oIH6>B+{KCmKUcA;lm>`7N@J|@VM0_2md znqNQ_S_(_C{O@&7+q%fJh+O5J$04R1wp+QFPAZ_W%RhWlB|rYt5$|1bb~_!a?_~{F;Wt}=J-_*sp4FOIImGGF$mq5QPbTbkJz7)Gu^}* z#eWwu?7~i6OQJ$EPg&C$u01y_~lmys)_y58G3g4gK2YnJ_q2iM};d^dAvg)N4@^x5+ zCva*Z5+TBL5ov{pH#{6aeuMu31i`70%+NEF!%Yx8HfwG_U~Tju^=K%ZkQT8N0|qt5 zIOXo$r}7X=+v z&kZQ%%n%!4t~dnA46Dok^ZRVkCWjgE67KX}**9a!a`K(h796^{&x3|MlQ-F&xt%&j z*&duPnFh_&Jw;*tlrJp6-sKwP{tabF5H%nZ^ILD4R&m!E6HVnMz>tIIxX{-Of2z~6*wy0?x zj30I(TA2T=l|{=}U33&z$bSzcJ9Ct@L1%_v}9{hqtUm4($VL z)`~xU`k@)60L&?kX+!w@mC-?$r)a!jnkVpO1telrGAM-V4OWxRkJ+eAmp$8eK8XL%-^Jo$e|47KRf44%Km{lAOTqjYjr04~#2WT@#TpL4 zqgq{ye1H-XZXq@kR!%~9!6LNebGVftOJs|{%=MZmfxW=K(r}2DLtx~w zQGY(_>Nmb~r#yVUJp4jgCO+BmO2X&KJdQ6BvsX@hI{VT;ha{T@g)#ht*##RJh-RVw z+NK&dLZVgI&4@KWGhAe>WGzFC@ID4m6XaX%$-GqyQ#qeLZGmUh#~`8y7O96m zK$TNDh@e*6+M#87Gyha(otrSAbl~cGj}GRF?I$2` z+r*Fmm-k9IKD{m=?=z=t?^=^fPym|^Y%M}n9NL?b09A#BQsDnP313_sZ8{3K)G50o zjB!mhldlxBcLE)WsvYX}yfdtNtX`r2@*IEp1Y1tDl`Mq6I@0$LP96E;`N+bT-8Eu3 zyN1(c1sE?1^`F+kt%7vX(YxUdY_>T%$sxCUY$f-g5~PTBvm+xT>u2A-T>xD_pc&KI z>84KJCbjwP{Y zYE9!O-@SXcezvDa4@$=MaN?ymy#>CrJ_hH!d~F}QdE>YF&)@4#`)pP3IV$t7@l}v`v^|!ez%ZF&y;J{K?{!k$-`l4kusmQ3b$Mb1xG2F<+$x9w zp+3occojqkZEVX~_V)G~chk}|AGBXsAxZ;_j)y}!@Ejk+COIHRa-$u5;azwn%|xT{ zOwCvD@_w25Tr~-TPF4?a+KoQSxJ3^s^pZE6kDF%;5h>y1M{R!{*(!;X8G$)sUFT&)FLI9K+7EV`0M6r$!H*? z>lG1=c#;aX;_B7p&$!Bw$7%7CIW50uu)U^S$tzRE;L3g%2643%FLJURoBJKb)C}`- zjY*dJ^NA4ig8oi|UeYdSPkg8i&$?H%3qJ!z=8{;G9bC?d`3;6QpeeD5N43QePH*z*Zj`2cBDB zsf+LvWRHbaLqlhe+Kgnrjckl8huaQjY7m5@jd6{!mx&W8B&&THGONjrlZeSDEPm(A z&6105bv|HAm(C0(B*>$bzSfPn1CKrwivHWS%42}bsLJlBOK_(@(ZfQ*eMrGv>2=gp=3=0JNYCHyB{jJ z!!TDTq9cL}e-FYr@JQo3fI^U|ASK1pyq1#hL@t!58`IQnyopIkD@8mWPo35@UlfbO zapN1#QoH9p=)9!86XQwFsn>VlJx&Xl(_g&|HMidZh5ZIHN)P(9PV;qnJ29nac6(U_w`HPMx%VDSr+~M(4 zdzIfHf*D>~&PmzFcxlqF#WSdiw#lMQ9rMl6uiFPw&|Q9@INK5-;6;armuw*lSX{~^ zQ<06PVn)ruAb{cwzIeL1mg&rSWQ^%z7(|H6GdRwQ6w!?^^N}6x8255|z(^^oBF8IC zF)IC0Vw)6(drSax;h<-(&$E6ZVK;h_bM-;;Y{c8@(q%N}o|;SdLYl%^o+~|C`X2Pc zU39eOePhFYR%iPfA@+F6O1XCJT9$sWwfN~|KIx6)^*1Mb4Z_OqV09cSLH|+Z7GF<+ zjP@`f8SCj#s+Z!_e5@I3(=Osiy6KLJQu$(wU#Fk7dS2-Dyb#^aKScxd=U+BaM0TJ3 z3bAtVv#%SRJg_`LFV`?{r9?kWOMW^)%645uMs#i5T_Y9nwL9g*Z%$UKpC<_9XCK~jRUbj05T2_q-u6j z13pL^k;+MuPE{)y0IrVbaD_733F0^(L>FUnF*9nx?^=L}iry^!Vtj*Hv(?+6Jkf1$ z8Im)g-jQqBGY5i*a5e*OjjFDf7ZXIf;1Z}YJ3RB@!-w@5ddHeizbcdM_p z7yrLx(k=RVlIM0brqOh#uOZZ-m^FeDleDMVp}N!}P|-}+9L5-YMqjvYENI{(1e7el zN55s(Ei_k!C$OEmo~6t@L66;sG@o&5A6^nGGyywAh-hcW@%JuJU1UsOgm#4LG8$MD z3==hoI6|JFy!gvG`0L5|@|CXmt}n=JFLei+t{3x02dNhWHyeSMnKTEkz(JNh4@6x5 z)`@U{U6l7413W_~27G!NfekyUzjYTbGA@N;mZ7Yc#%StT6+UrEg0q@a(b1I$8J(9_ ziZN8hhZ*{}iZvtPa~rxlzFLKxc?FE^OM|?32aHAqgGr6rO-6M$ZY+htk+`c3igd(| z9guqC?`L^vRxL5@Q4fus3(L3N6Is@ho@H>5){gs53PF@QLen3;bs*3Xfcye0 zfDekap7G3g_ujq7rY?4^6E}&=Rm%c`gI^pp8Y7Ln<-+!gE4fePriv&aI##PER&utD z?J_YrF~#=6jm@i~%LR+7fa8Pnls(NU*Kjn5=#oQlK#EbwVL~lW!cgHMa^*yQmJJwK zX%}TU^*~EwJVwjv!Fg(8%1~-Jyo*0Lm65z_{j8H)5e~iznYd==P!zd*^l6sWYT*rn zBZ1#huzeKQ8^+QoJpJZsmLdW?JVQ_b_>%B};9$yah@>=RMBNX%A$zR%jzLDZFxU!# z%Pk{jO=50aex}wvd$GmJJo~|%%4=}D-yA^MMn61=;G!&Z3`XLn9BbVt3Rt(Njt$#) zyiBAPdpEa&-^yFfD9a3eAwUPe+hnIYtM^!MHB(D3@Vgu6E=#_BJ&E8Tb!Hd;R?7Tw z3l?u0Ran22VP6}DzW*MI=H|yLN|&sZuuA63JuMc+;&-`tnST^czO_d#j|q6xqtjlv z=(m49;Iou@C3^fV{A?+(gRG9dplbUt?h?Qz+jD zWr}Xd7pg!=UkUK#=V#~}73#eV&d%3>&Dgz9fsN)`NtF?wS{ zA}>2N_-v(-mo>e}_-w{OMe0uX`BeqfzOSl|+Uw7&0=IbWdantGa&wf$J5v^gp~M3J zwV%OM@avf9M_W*X?Fth>dGwI(-{027z3ZK%(GLdrY#+_Qw(}1Oat$1i68^L`_tr7U z3`Ca%ELVw~eS?Got2DHhE228wd^N`JF{9_lpn(#}uB_E~gpr=xv(;WeaVwf1;{tq4 zAU*7-hj!MRH*by)3XI6n6PI?D25kWP!}oFVytmFA3fO!o@k}Jptt_ch>-EFC{EQ$l z=>lCG()GyL#TsLZ>GLSIukEbp6O$iK3e~C7FB_7!v^319fP!g$l{qhcZ!vzE4c9>> ztPs*6t1&;+_O4B$B68d0?y)FQjjk?-Cuge&N0n_6d0`0dLHQ=HTInLM@i|bYaw=C7 z)CcirG91nCAkbdbj(8&@ql9qtvEtUN)0nPJ+y;>vMggNIf|nvB#{}h83sb`#WD7~b zKKhQ8>As4CM0n`<*4nVzD4KRVD7Ip;Ho`JqCX^o9;-Kd2YRXfT>|(!TK{PMbA5X%U z$JrGd5+6v*&=A>dYzSC%!!MIsvO7Zz56en z{VlgC*R2=E@`cu1ODAze=BRLcfxI&e>M0Z)fUqG=HRG|utRqZqdWskb1B(yBcsel3 z!T-&(@ZmhP%hXh>3f{*!L75R0rysrw;*p4iAf%||C5++0H5QD~!nihAOzo<+wx5TT}j_VGeKw%3M#gh#tMo&m)fBUGZ-+VyVB2^oJ$mAuni^U_Qc~(1J9z22 znSShI6_ugzhcZ>OJk9Z8Q^x9;IP1@+ZFw$9t4+;OjBGu}_U&V7gc&Y^=r$D6w4b${ zvxNseeH#)NhorhO^&7 zB(T>Ll=v!5gI!bR>5=9HU{$Zt4}08QZDzx;?PNXy@lNWjNF&ro4dZ)_N9x|a0}eI- z5({)N;w_A5aWK*d9DB*q_A~^U)-mcuGE2DRo`eWGAhfHV*BLCT+Fhr&H#FTxzYlol zzYqGCivQnxUJ)TCWMNf5;u~%5s5`b2L+8tbWVBbxyPtFYc3xGr@E8lKE}~VV-&-`K zcum`vtA4kn5nLY?4z;r#>Ln@b*}nC{6vhk*J?BBI$87VOMRv2-uDyzQplv<%etn-m z8(EPWU8jZ~v&S6%7GHzEXriUkntlEHhoH{@!qtPPBX~p(T(PT!@rXwsK0fB6%#S4p zP#39Ld_XkRDBwH;_M#LY#7<-G24Mrbi9|Y5SJdo!Z^4`6!$h%sTji;p6t8Kj5IVf z)H(%|W?I&tzK1ULlrD|Qr_k4^X-Sa6{8XLK3#+#_HFde%PK|6Q6)iJaawT2FD>2NElgX2}KSrBe@oH zocW#;4pDb@x}PYno0ZtF^7i!=`nC zwX1!h@tdiYXY-p$AhT+yk+~(4ur}GMJx+TvN2PZI{gM+?+f*+p_?__7BP)nF6GwC{ zj!1fEB-)!40`v=yw?x#U>Tk@vlc@Ffw<*!*Zg|NyjASEI2sC;&5gxNTh}CBVFt?LS zv$havf2+O}gOTpwe18Gb0QV61{(CKr!0}gF>O@wX@a-iX!bY(F zyNhRGm@wqT}WCe>y z0o1W_EO)HcS#ftVF%4UJsjqT%3=F^RZApP5-#L2CsZxL)x87?wYQh?>bj?73h5$mmHeR+_Ra^&mWkH$amyMu8 zu+A$-9vc7)2Db!xDitu%phNd)X}RSVq#p(q6ICtnI`SX!gYEfv+S)ll0q%m54Q zuc{QlA|=by9ZaHMwrrW&Mte&~8K9|kJbhmX+Bv=;`CpJaRivE*ME4|>+vWVa<1OZ+ zPaZ+Q8%B{JD#D6Kvob+9YBDqfGQi`-n3nvycc2?>BufDb3kGf<(YX)47wl-QDzQcS zw54)}qwz6QUQ6UeWbhPz6ezV~LN3Qi4>^lktmV>X1qia75r`+F@@5-H-I6 zV3kLX99hA9<-w4R=4;GQR5HqXg0;>v47dI^WCeRjssTQbnQFwi2bASRHg|~Yce7Fi zFyww~1LsMAq|RpgIiv@&IXfk8?4`?d<7_-|RASBnvZL7tcOzD#wxR^SR{Hp<4|bLv zoL&5ypS29kLNoB{K6h{z+(1nb8@&y_-~*9ji{r_XM5@ecpp{%k;uEE*M7JrFKIM*d z3UqQ7yXFpp_YfrV3IY<*C3x*@D>XLmOTwE#%*fikf)zsdzaFA4dYk;{i)5d?z)osP zU?*h;p$eW4jrm;MJv&Iq=s1P!pQpQc`e$olj5qWNR1p81-SxZMLvIDw@~AkXVM71O zTk%?qUUaUYf-UtDEFZ8xP|seB6a*z}2B@s3Ucc^_)MYp|)B;Tl!1Rs(9vX;&4+x-> zpp#(Zz#=6$2_i@$h`KR`K!XH@R`kM94GgZ{LW&&}!AJlZ9%aI56U=4-p0Gv%Te;0u z*BJdx7{XpiOF#72*z=Zm2+g}Wo9aNxk&be5nloNRQY=<~q^pc>1Vw^!c_r_i5eLEA zo3uBe4xDB)wQ>Lq=v-Vi!EMmE3*=7qV$jsC9|L>C+49YI@74ei(xs)st*EH4Ab6yY zMr3nH=CND8Fc}GBi0Wi538|dP!aI|)(T>&^;WrW2Ol{jcCQ{cm>d6X;*RQV-AHphSP!j(l!FN6pg|L< z)ydNT6EF7iu1&Dy^4`RWK}g>KQlrUIb!(^V{C6cY!5jFbLjBDFDPiUT0{07yBF zQI(u$BvRTya_4!|D`~Y(*bMwsHI+|TH`G`Hi=h;&6L&kyjUJ;J$~- zXPdG-Zwl4b)#*6>`Bn9|w+jo=>nAr)a@9@{@I~{%07lwIZ-__WLZ#l5nE+0{PK*{P zz3lMcC;|P2-c4^sTw^e8`qJJCTGzGpiK|)aU>w5MNE=c64L<}-&jqyQU#;O># z1LyE5=IJLPn(MJrDxd*$_YBYki18iW=+%)dlu~SK5^wcvD(g0J?07Suv{af-;<_I9ZMQYna;0MIgfczs1YX2)UVL_-C=o> zao7;cmKUb*AOdP_Nfoe&@wkkViA$dmbtb@NM>9i;+m6LG$APWBH(rvIGn(f=iKEEO z)0Fw_4Wqm_&dgisFz(dpzPi95AUg`X}o zeBOs<^}P=My*E#A-^qJikrsd%$EW*T6vtb({Z47w%pJ$bYiAK~VKlC^0!%$1{~L8T4SzzzlCc$hZgI zOSo6A%!4|}p6+cMxYuA!k*bdYuTxPGf0K7}x-0H6K5Q@{&mYrQ2fW^1M%RPrCPV_DFM)H!)pecvN=(U;@HGM!zLX zkLZi4iF7L8idLlOvx`QhCzusO6+TUCATPzJ3+98_6IR+#5df;P(#0V!ypX{jC70X) z@X~^UHD)#}@u3RL$HIrJNa|}M&BTAssWWG?-Xb`l-V0w--n{cT*?%Xuy#Jp7O#%Y* zpRBZ;n~R0;MDwgnNwI+&15Q|1f?)zT!mVJrOC+$5TLcHUII?D!2}4qEad$GO<7Zi= z5`QmzhZyIk@F*_|qY<1C;z>`yc6d+ZktVmI6!erprSimo?#S@PO||{P`edt#gOCy= z%?nHMNSEp3Q>!lH>d;Cwg6@uJPvg!R)(j18ge2}^j91dBp))qu2k$D*TT9r# z9h(-_F9DBc>8@1M8D;y&kNtA<4F#`+ewHDz>~6xyBJ{le7NJrXXs}iHzhxV}i--u0 zu0NJJr$^a)GumY%6e(;Fu_;dW8gC zz`o}_6c&xJHYAy4m1asfSg*Fpbvy^(-tsV*xzXM!xxfD+0C}pJ<)oR_MDxcU3#k{{ zK}X&f#(wC1G^K3#{`w4=`R(ALyMf>}N#e1a*q0kZMsg5%zYl^uc1D3=S?1Jw(F+1z z4I6d1WAtUwG|(s(KhXy0R;S^+(}&G(0(Osd%bP-*o&!(mwX=Cg1)~sn1s)W>0@MJ@ zV~7eCai_q@YobgY1cA9%{8R|Tz-6@rxHWx$gQ%v8`T-EVbu=|}k2a`H{^7TpUoV`b`=@uj*w*7=3{h=448o$2eQI%U7u0`|&8dd;+vw1AYT1@U98BW6(cO>7+_2LPh^-|l*iGcUf z<$Kje$$<4!xF#%dw5{WUVR#Z>o5_1nI(9VAPL-?Q%J$=7`~13}uzIl`EN1?cFHPWA zXMhyr{z0>8XCMmLwxi>Y1x4bJ5@UOX&51Y)JB})r?fln;{M!Q=Tdejlk}!yclt>?J zA^V)P{uLo>r1KRaT}a^Vf#~B>%AE1)M+RCzVQ~PkS!+JC@7-Jb*}aia^x6HXBvq>a z&F{aAv>S<>p&z7^Zd0xuZeC2^VAA3->vcRLP<4x;(1OZi(d?EBx7ly%CGBd|ks-4t zM`QAo+V|BVqbvA-4-hLWfJ*a~W|np2KVW?>EZPHp2xhe~PYvi1ldNq0$(nNO+4gdv zfa3c#7X>7NZWo|*^z31ZpHYEY58Af|oOHQc?o02db#`YxhOo5*Kw<}(!Be3@-JI$}ZJHs?JFJvxnw8s$X^jC!jbXW*1|m$|uHTP_(xbIP^>EV-f>ICsK0 z%8ASQ-O{UT1E5o2hRRwCr)4(clhgpu51D18$;*}iEs5tln2wQ@hj5@WabYGLtT-DT zt&+15gPUmSa0GZuC6G6<^;$tj3Ec-RQdA1t{LIS%??qHcsmG#(PCuLDXxc;O9CEkx_ z+yR;$CZF1L45V6gh(;wXSQ>@X&OG{`XQBn~PC?FCPVB!Z(5W(Da6*0p+>|lwnKvm z0Pj&ZP>PIEnWe8fdiXU+kNw-Q@V}&4e|jtKN8$dZ6*^a^?^v%<8~?+Ke?HBNdEnMY z7#`yf!oocm?FEUCuEgb;s|E}y_uq6h)`0V7yV4#M_WFTGZ&8+yfzBfx0LT!~OvMM? zq|CYZssxi^t{%V^{o(A1o_+#EfT*I;dKz7XbqjwUsxc}DcUj3nno z-$VIPZz0A=*v$+e;E^s9ey@Z58iJYYmL?HAiR(QtNMThl*(3&KiwhIGVD^H67?fBgw{7?HR7vB_9%Pt)`*+eT>z|~T&et$n_+m6V{IEv z4R5w65z^%=#g6ixRtc{dGeO}It-bcDT8LYdOUC3OvRne@ZKV`I>?abbiTRC&V|Vum zVLI^q)Q}B|Crh}uW>o+LQWctR%#c8M|<^)rPU#~HYP^{qDy+&0$p@v1b zXdA*KK;Vx0l}y%ZQ}yJaBpcaz=<3z0@v=Kp*|ZMMftDN7Q3^3DLN67D^j(Y3FNILi zgUb?Ta`iV7#9^SV0O|?v_D}B8*_Z*VUxTbT?t;`%u1pVAnL_NO$a6rCW8N*Yic=v!2UdL+u zyXS^*%?NIS`7x$+(RDjz2Xl8BYS6vz5t$?oX=pNKdw8STTgH4>IQ-8r_Z^{oL(091dCL?LDU@fj!{m?RCiAKQM0YHana6yW znx4vcc7j!D1OX|u07!l~5`_h06w4^PE{D5AWO>y^WC<)c*MR49v3F-M@_U4@vQH9hSG8X-)R=U(Y9R9 zJS~l`iC>|{*SAjZb!y|BQF#0$VUM_s^4czYtN!Ew1Ml|kJDr`4xtIEIsz*lF8ZnQO zEWU{W%4^4_Yxk1d>E!(ON(!NNM5uL&;fvIX;o^2^o}%YVqc4=)Gi+xQQ25^H^6-P! zWaRqajt)adT#oZ>ZBo8HUz9mWL=J%22hQY`mcYt9B8|^^59L2)p~k6reP)okH^E9n8y3o)?=5z5mKKS zrIG;o`1&S{RaJfW0r}XliPziPq!MI;n*G#=EIV~P||M;p6ijBMj2{GuiTZD3);IGqWiJ!AX0H~hPTWH0q2dY;c%4Gh~j*!N-%Ohk>eEQjEQ=@F2ZYZ)o6Qu zf*cK)X$Z6ivA5Lc34BLGxj0;}GAn?X=i-OFhMow*bMT&qqm!!l29f|^#A7k~L6b{66 zUUIPRGB9Gbu>vvM=(va5i3Hj^N_%`mMyP+00T5kj)?E2e z6u{CkQbTCAn}G)&X8CDRyZOVtFL{nf_OUWTd#*vzqY0$Br_PRiC$F5g%5Pls88bXK z)%=nj;oJAaZ!OZ<>{#w(+wP6H8dW_x+oRFkkh| zJ=cGo7#l=ps2%)UFqG~c|MNa;&IlYz7#H9k1A$%8H3D2U_T_pPc(_6Y{-&F+D2m=cr>6WnBNcl48KyJB}ILhkRYkhFcXw=)D@LQ zNq04JfE~iLhTyq6h?aZ&o0(>^PmdC&HrkA6jmk0<>8U~6NJa*JhMmK+7W9i9&ooY* zKmWum(?1DtP%8&ErB{Rgif1#G&5|D9(9mLzWq|itA)&OU$v4hokvx|4w!eM=8U6CY3P=Sg z`?*!~bUntOa4z+-H(GagaBz6lkh?znmZQR&E>Mk%Y)CAiZw>}SFLdSacx`X8A0=UW znf{+9Z3WcxAF{GC+j#Aq_N#s9wf3V?z1pKPPo#@Gp}RyDbp|g!{ZI(NHrv`xwQv+r zs`Fw%(MB;tasXitTwP&>7&HxNDX@c=}{w^dIYML8c}AZ0R(sd zEB6fy&8DXk%y~N6+OSUj9Pyd`sGFw>6g+7N0*zKfD9JYh^P#)RY6o{!%TvY$67_929YwRDvFFzQDzbpks?AW zQ)CP>MG`}Rkc7&ceDTQ3MQ$#+(g)CvY4!N-UXeSF-~}!`B9&ls>C-fDa7n zh29Y3o+ynSo9h#!H^I-bq5qlBI@IQE=upbh31E*l+Mcxayt(_3!Zs z7L_-)`gt1&Lz}3N4#5E8*Y{5j)?+V#R9p}3MBie25Re6ctxLmXb)Y51HcwUaFEW|^ zvn1G9cV1(6LFolhD$x$LEdmzZ{z;}ymQ?a-AGuTG0>3x*>^Xf3%fBsOb>brDbLsv# zO?l}Wie}H@*aA+0>cpvRHvw<{H>i&ahgxuAH)>3qH^9^im{=sx=(r(jSGy^%OCk9z zGW@>DBVih!|DKWZ^1Q9Qdl=vZhP*37eqX`@pKq#H8^w=X5D!D|a;%xTQOs&TmNkva zN^=ZrK16(N;3L1x*K2i=Jv1ntTu+PG*w^uRb55>3`-tz&xHxN~@Iur*QDk^ouB1cK z0JXN4F2?`pla3>u%ksDtjpCfRxy}5&WPefjX}$MMOy=$P%~xbeFQ+t%OLRk0@oPW& z<=43Ei-<9<>>rhPrOBd6S4ETpAVefBmKSh6y$;*wp6VQ$y~09%ng9Qvn`0uCpsJ-0 zDuUbuEPtIdoqA1|i8%Cdix|B%HO(K%k?+u{Xu2Q_dE`MQ(x7m4HNYB-!rnq9n0mfc zW+h_Zr8sG}Y4U__-a$KEOLV}{)fB=Z$4}GkOT&_Z{%GY1Psf-n9L!45eEV%iKMXqu9 z(^;a*KN;=1V??;F?{b%C4)Y&IkKbs>z1Re9v?02r!a6ES!2>{lcGk^wIC&ao` zqPma~4KIv0`N-DJ@_IjBzkXM*xyW9%g4urZv`O^e;yEA_uq9S78F+xG&uu6vQnf8u z!&&Zh>G%t#P$`AJK>c`xJG`ggDfiN3I7DzYPWFlI9B=V8VdB@sxIhn}1gsXin zN$v(jqwtkV0Y~_pBZ={sNumTE#ZI&x@&sw=I9I^$YHvV9QLK8*IcUy6NK%Z^YzH!; zO#1XjKug3^@}0vwLhLjhpKA(#pXK{YcC#7Tiq&Zb$?`^yon$+<(B}F3*f?89#r9p4 zY*48(6g~`Pdj+O`;^R7JV2h>wYw`UuHE7z5)=ckqjd5uHG|am{S1k1B%nf84@?2@Q zYOPG#Ty4b1@KF~!1!Ab|tBxFkYLwtJ9P9%~P9=>FD{FJHYx#Je14Ux#R`%%6z={e3p zhT{1;4AR^Bjn77vFOK#U`{fbyg5K-^dOTRhg%~te)bsQFteFe_!U`hJ_~H5K<;#~p zBm*7AX4H*LHpQU3bIT@l+a{riGAACv{Pj`?gMfWwL^p$-*=RE1j&X|pm{_bo023%k zA>P@ih0!~C<_$tV|5UfreOi-e|D0O9;{zwE%$qmwY;dc^q9iYJ(kg2rUv#I~FgRHU;XNFqP7V&u}8 z4a)=4!#$J46kSxUo<4R{r&VS6@0(B@xIB4toOnA9E{om_PD=vyTX(g~&CroYjw0sT zg`+B$!Q>D#O`MXAru1RZUQWga&aG>~_CvC5%uwyr87{R{YyXg4dtN#3clW`#@AJ}pvu@nGSGbN_D1pu&t^-qHOu4S9dhz0*L^bBgo7s&8 zYPvW;fb{D1;`x~66ckh$yGU$BP2_&{xT=)vceG3_@5JhDwNZTndQtt-jH3&~RFy-hd%>B8q&->Wb;@pg?9HPi<(!|tM{IsPt z@it@HV8MdLDkzlC!=RT3!wiV(;0C@~z|t`tkYup;jEr6z04~WY!Do-wjnp7m1d;g_ zJA{hQMIVm%aIdx}+z@ghKh~=Ss$$P4)w5IKQL0}K&?vU2^w>Wz5S;o572g(N(5?}4 zA^Tq*xwb=2_9n}Xt+QqHArcqd${V$=G-ca2Ufar{0OuKECnw*i>Cm)^S3X;Ak0&D# z2GvKVNlGutGWbcSral-+o3`+-d^i5Yi4z}c8X40J;C5Of|GC9h2$vSzV(8UaT>+bH z)@#$>W#)aR;kpHTIAU-IKc<@`OV%Cb7O%N)J^5f!LWmhZH&dXgWH(MKwf;$z7$;g< zZ0}bTYhR+YWZcnecg32$w{s5I>`ZzGDP=9u>`b(57h6vE?kZq%qA03u4#!q-+l+>LZ&oWM?QPRU z3N3Y}A5e^!q#c|M2)1rdLx?>`Un&{+oM-z76PuCyIQgg8ThaYr_Z#w{0;9JH2rzmx zRqOg+hk`;!e<|O!G+qAqbac8RlO`4Dc4pF^H)-FBFMc+qQFa~a52bOv-lh&(|IB?X zxz?lD^wMOPx|9?_gjCELl|N~046S)EJ}#>aaBCLl81!rY4w^dCUqa+kyq9<6W^T>+ zH!;@V=jElzjMGJJvZ$4q=}dzG<(y*;b$3mo&)|@9_L>E3@+kX-rulyito&c%xW_4! z#VTaxq2|6Af+E?HhUNaA$=Xj^O`+VkSaUEAS264B>#L`LIY)RESL6@*NKGr07O35e-k(Y8Npm#&oFDXnN{pE#k1lcArZCzb zgG|~cY>B0hmzUM1O^2&`&gJx?_`4jAnchsloW-3~Pgkwrw`a8L+D}jODE52dz>5AU zB|HO?q+al&6}i~WBOUoysG|~?>X))b<5cd?`G@H{;&zDzeE=@U=gNs-t4Ty}4ELnN_4rkgj zF9=c#?+Y!%WQ|4%9bLW+vg~l6&v>76v}UnFW&jl$+thMRy>XXDovws;sOZ!joywz9 zwE-fy!}aBiiJgovH4yS&9{|eZ^QS>v#U4|ps`=C-q9P+YJ={Z=xRYk^^YtBN-1fFR zNf%=tts2Rg>VG%auf&#{1h-70@q`w|^u)!<9S; z)dDdNtK|e$&5!5M#uMtL_TJ6vvkKYt*?xtu&O0}AVXdgFcV6vL-RRQ~)hD#REa!w# z2k8D3MXr~;x6blLK}R;HvHH}@6vw~dqV&t?5)*ftMD_e5;iZj4MyMt!gXf<_93^0# zg#o(a?zY0Z6ltPTRifP~sS>wTtrwm~Y@gan-YI?kc?pr~>%#Bag&T{4yl0WxX`xrM z+n>ctzkfaF{0%PPSeks4XjE2J*^Za(D;kXS@bG9TB6Kk}Y7Pbk1wEnp5;aGF!gq_o zQK?k6vH~2>-M$?|brC?XGSgTKoa{+)>1ka>AF85>yo3u1Msp|7Om}?RN`2(_$FFG@7#Pe2 zB*7h3R?g0hvd%;}xT=M+3{_6n7Y{QY$X!864x7RCq<;SSt0yLds6lf8?!dka*;1+P zGN^U*MrSt1ElRTS_RK%UD(mRE$N`9^Z_@f&psQaNWeH{()#!x1XKcJi;3#Or-W3Ni3arP^iP z3&Y|bE!S+-^rnYI9_TAuNF17hYDL>*H6XtW;}J<0dja>wxasT5A+!%xFz#GErOdeK z>dn$pZ%5rw-BT(+-+-dxmTEM@GL6Hoq-7M^1xj`Y_06^w&W`syN>=UL zK@%#oRem~DFCM^q_l?Do`WUcB9iMP=84@{+EBYnco_*4N`;dckza^8u_@OSE2TeTk zRD(Ied5NoXf8hckQs>V-J=u=d-0a-Wwrohi|0Q=d1!5I-CfV1AlV+;;7%Qrf#YnwH zii09~$LZ*dsI%)15$+1wNmMG8FO_4~l=bd3B5%E)SH}r2QGCBsEfkHw=_xi1|Z6T&Zr1E|@6vypC>TZ|PjVo@P4fKmBJvCqmaLgGs|7S$R|JiL`!V<4^!cj2 z#K=|CqCurT)Q2s|0<5EtkZZklvd~R;Pxh|9q{ejEkuQ{d8nfQ6LfgarUJ?DZz zka-;jbMZdFfM=GFFtLcSK;v`axVbS!d(9-rOE4WUw$p)9(K+T+nYw!KoLVT=8Pp;U zbHrozMO1WIQ0obkEk5iW=6^eSU@%3QJWc4G7W(U-@oVYvddRcszaV4G@FjPW@9=Wg z4egrxs3$WOa45G6Q&rQgPI%9lZE5oP4T)Th07Atb4_5Vf2G!gJoU4DCjn))#n$<%@ z?SZ!gAC5hcAo`Ag0^j>0=tyK_`C)^Kp!)?4xklt0?9S^cQ?3;qyt9z_Ko#4(@|Vhe^BLo|^OQxVe>!%5ef z!T0B5Mp45Tl9syu{(!ZP7G;}e%DX?3WUV#*M4ZI0c8wFbVQMK^2M`=XM?1^$EE?6w z6$)SKLs<`TUc7Eb22DUsb-Fj=OB|xju%>X{tqPVgLw~mwbSgtZ5!>#UJy0zPdU==w zvTl#}d|IIJE@iX#a-NHM8Y;$$bSd%qsr?H{%o>S^l2iGJl5>0EPqz=*g35XYLvKIF zT(cvpPR1$f^X@ZdGP!Pw0Pca=yCCEf-}vo_fj{05{1yLYblQ8f{)|dfJD#BDd>#>D zg#j}ip}u+`y@fp=8(Y!BnLHL&LkKw9YA|ZxHD&;e#srPhVE>q3n^-278>;48g~p=RFA}GIBO5{R8lpp2%rV^$MMjq1s-;z za=C`Y>zt}tK1O1hn+lX^Y8Pq^PPm6&j`1HXSLguB{TnWd7X09C$9JViH#ad&l5hql z0%mU!i-r8rR$aTuTyqZ}HHpNZj~6CQ0_nvx-Va7G7_*bOoD^wfR%CAkFAi_?H6}8f z$``B}tr-vADcKK;VwU)(D zb!ws?X0`L|n8t8Z>cc!)`rb5+bPF+!-xBTRAbGqlLHhz4JBv>Jo60P@Po6)X8g={9 z-coTP>T?m75BH@KI0{2pwX9q>2Z}ebHS>c?qYUDHWX|_fKO2pcUbyGWYUa>7AM4Z! zbwDiH!Bo5nGGHK%J#87-f0PsJxR%UQRJ}afr912Fo5gt^URqNub|S9X#x$b$xD{5? zCXzaPHjD4p#5I@36z?_gyI!Tpis))-R=Vo=YTE1j6&B4@;RVyk9%1U8M-)nBrAC9) zROvay^|~FiKIG-E_!Gb5`H_w|`+XVbbpQN{-oArOCg0+9bSP4LjmXc#NZq@SM?~np zY=CQ%p+DI_x+4}kT%S9FQs!BXp&(*WqK1h#f}s=}VYu&cJkZ%f0%Pq>?Tp<8Qrq+L z3h}qw;#^{p?kHSIJ>aRZ51U%k_V54kJI7)tRTm_eSR^9O)P8Wp- zHD*4g?B>mnhOQ)72=-7{l(jR_iAvh1GZgrR`P}!u(KqY>#hfj1=xFK9Kp%S7WF<7z zn8dSZ64=;&oY4!X$D;c)W2&(@vkbL@K%F-Ng8oHVo*%C#d0Z991EwtX7U@T~9HWYE5`z5O3W*I>>7rj z@z^mZ@*Kn^sF$80_pv)Pds_;Ubk{YbLHR6d+_O$bu6n1bV0z8*I008r-v~ovmE14V zO7p}3i{|2UEU-wSC-05gvx`@qRbHT$Cz8iArs4)p=+6#BN0b}A@{voPSfr|lx3Kmz zedG39-rSL|h~#U%f>?E&w1=YER^6%+7`e?;S`VgdB^^z~|7Yt>{GG|8Ac)Eg`6i`uE_odW9ks?8;A16$W z%khapY}!^^d?7Lp{=0JqK(4|8!=(sNx7UswTg4u^)0v>D5pc%(17K1ty4&!E+eAdE zM?E>mSDOa-2xNPS2KxfLzn{!0CQ2hqSrc5V;*MuI+Uq(Mq)Kn$P0)kZ#U_@55GfGi zj!&wCVK0_whM*C>L40&fx6WbpwB2kT@|%;c)a-e#nLagIuZsp^FEy|<&RdeL=rUEk zh9NNVW}uH-K|vHFlO?OZCObN=u@@<_hngp8iirv=c5+>F^$oVoXa@{Xe5%ur@9&o> z<_?LzyTkX`(W3uCw1$6IDmU0r)*Lgnu86miOx0+#)~jC9yzZo=Gc3751vD#AZC@c1 zN}1Oyr)-a^BI>hcV!EWGa!**ZINy6c+pj;k&Q)(_z~2woD3~Mc%-dI{-4~PfKrt%k zbb3`3_qAzf{4roefg#w-EM7%?(J8d4I5sP>m*G_g5#oQoJ*B!nTjDalV zT;zN7$^mFikRV#AGOlic6&4J)k#bCoinE%qIDrJwb6Uds;cUM)D+>b?Hp)L#i%NSI z#zR@;#!rlY5g{_{<>SJWkWFviVM1~=<^{o7#Joc47h^A=S7nRcZAR>Nhdx9aK=Mil zv?C5AzhYO193rV&sLpR%GqP8{?1E;2h!`3-o~m|M3zR7wp)x~LNp3Dp3`)%Hg#y=Z z3<^o(iIYw<5bZ9*bW*m~1A&!#XvT*D>Q_w|TEYBR3{XfGkEBkKY)R4U7PgPT8^wB7 zM~3WZv9=t#|KP!EIIRrqP zBFZa9JAJrhnmq>K;n*J2FzO>)5kYYwa=Gb8BdJ64%|$jM>oI2=q0EQK@QRamV0a{0 z4H1ULBTGJW-`zLj5!nwIEjc%dOpnM>O@+*EH!_L@F-E`r2>mk;bqj01X2$MSX-Cv7>$U6^lglE9lg+5j z{OJ_QJP9kt-e8rujjJlE(0lXch{f)06seW)o#wJfl#e?#K2O{FgLDr2G{rxVO--%s zcs{y9jbwO#P|3oMW~=^rXr8hj#-fakU~kG=3OaFy@i>XSe>3_=1m9zY7*c=v(%q*FC= zwU))eJJ>h|QRHi=mM|;Ag17*y?=w&xHaAxvZy|+R;lYuvH@rYP3!Uzz@c^`6$?VntTp)${mIg1kG9J6(1zNP5UFCEmVr!+cS zh%>$|kS0{Wth5Z*Rxq5Ltw;M_FQ>ud0vl3weQdT@8Bq}C2z~xeitI84NMf)ibYvSU z_-Q}ng|d}fl5;+4Sj6~Os=83}fg&ZgIqJgDd@H^m+k$((g9fC`#Nh~KStZ-4QV>gu z6>9IaaHdb6p$YiV1U3)ZehUoRZi+wD?KFJ{STjfi5g()u7ejOPlBd0v5a&;nD$Ya{ zJCy?g4u-!hel}d)o-ph>6?7o4A@!o$)Eb4#iYBqz;=mob7$AV=VK1x}@XW=S2{2UO z9r9>#vI(=3yH>iQDG5}Nb<&$ao#9(p9ikQ74n<~cbE!q@^xQE&w(Kr*@iTK)7h5%6 zr8kobaseM^4j-7>&~r7(VQD;~*p<`zESGC>s%o6FDrZ$s`)M0uP{qg+ED-2=iJI7M z|2_k*=D;`uI92BT^gDSurrT&~{GSj?Qyb)*|%gOQc3slbAHq_CTNvQDJ&HF0tE zi0NvZz zH{)|1qEr#8c&Z1Oq?q7^GXeA?49sdheuO0}p4lMSNBPhEgA`mvqeTeU(Txec}uib?zX zOs^-KT3bFtXx~jJw(G&55w~7f&+B-8bno70IF~3IcZ#^eOJ!!+WfAZts&2T>x0DJ% zHRx^36i7KZ>h94lfAEoNzU8EAjPpz$$2thnM3IUbQS>zg(dD47p7Wg!7cHB}F}I(O zYxy2!>Vn6lrOwX2oQ+LV>(n+m^*>DQ9%2TQ9MrAfLktxNlRmy zROMK`yf9Y$C{6%HxopG#c2_SAeMV}v&YM%xSW0!Z6Q{8G_F}z(<)^jgSie@5tXD2H zltaCfm)+3Day4|Gsp)I3zIyqDS+BdndT|LkY{USf_SRK;KU_}pC~KR_(gOk#2yM@I zjkLq+oYEEbPN{(gh{+m5TXAzdPGQ947uc^gtqSVj_A<5$xVBHyET#kGSo!H*@_~+g z2t1zO^VI=?@0iRK7$UcB`DW#b$sL8}@+@(zemtvqJp3n-G-ot@LKU6PTgCV77Me)! za?fW3c6#S?n;u2)Ltzq5txW%_s;X(GjFt*wiL>g?8O%Un(fwf;e*a2Nv!}LkHhb(| zJDvVI`ud$iO{V_f!SF_1rikBN1MO~YKJb>ee-S;3$4AF{LN-d!tAEr5ax1l6GXv$hry)i^ zQ3t*GWJ2$7%p-fv3!s@Tti`o-@GQE_rXQ}bah!-`o-!G}JL92;X$mx5Tf zM{fczTXCyLH()QSB`sU*o?-htEiegmik}^wkR2>^gP- zAL_@5Oi@!5I6QC?D}R2)m-@_+*!?(HIn$+^^J$FoXXfLKi~!Rhj!xvyS(kon z^aF~Y$$s&Nn2GYxI*v$4hN;1~(n7U|6I zNG;w_4TyQVzw$hyU(D`4lD-2+=7p$l%1g?!M%E9dV#bU)6$G8^u~`d%ptc{fv%xeA zG!+%rs5b}K%f$iC3l}a7M#gDA$1xBj=x-z*AGau;rL6HZeZ0jC^EjzO**7hdp)q6& z@q90In>|#cS3R`273XtJL4huvV`c~8)8$` zT+|EIe@tDblkqS#Ql$k(Eg-DOG#*{IE!A#BNmQaa?<0n}y3*U+!5ZM1vbn){#^qj? z1ZiRMdj@TRZNwZeRCSvsn1-=!Xst;(w4W3t-oU{7I9bZ(9VR$tsri_*cDYF-kOkFY zcADs^qH)eUoClU;&ND06$C868ii??N{HAP2%7E*0MKj`%XLZK!k>yB5CC1)Hgbd&oi!KbNLE6xkvud(EwdySktj^#=Wmq3%XG6Vhh{MXG;+ z7l`)SP9^yGvw?HlnxfvOq!-^|U!>h{(u8_zwG?_$h~{h(fMb#ypKOeTGM$Xe#Sjp;fAU7*BYP?%*^%Qt_pB zS<%#e38U2(-I54@7V~`425}w`Wf=3&nBMnp22wd6Nrv(jv3*I8%eFsG=cu|R)Q1gC za$ZaU^VFNoR)zdEMH%8wzh^I%r9Ja16u$QN9jR71xlhTtUc`SgBIN6Z0mJg9t}a>S z*K_yE^AsP1L4cRFUbyg3%@t5}wVcHm3q5IhRPl1|Ar0bXJ(;W+!|?-9OO0(8`asA$ zX99#j%Z%$L?GKV##cqEIU}+t_T-8JS2zS@5oj<+|I*aO1%Fj^s!VquNUWX1j&_VQs z+NZWquQTF#4bl*lGTXU|$j;#OFld3>5khTkL#ZO%qSm|7(j(~5&GLcK&<7vS>rkz@ zV&rCt^0?@JV9Q>NadqWPUUj_uM%d5cyEkNakh(gy!er5G-GM5`|_b3jot~aIX zr%4;r+s=^{7El!!`5`(boIbho8FwTz(u|l)R@f;j8%$!}RnR%YY2OUhwSIHnZ3age zN}7SV_mM(jH~E-C3WE4{SHn8hE$G0|y9qhlT(D9;!=5rBwPc(xjO=*UuDGBs7LsJR z1~ICM3{^OY*2$YC9iNMGi%p?DC1PuLdn>z>m$24i94QhZ9ff+y0VPwZ@=*4n*+zKn ztsdHLHFhh0j}3IMZI?&{3Sye$v{0NRPtm)Zlyq*r1S2DXtezm6ss}^73LrRBCE3d1 zUF!^PvdzVJQ}4O>YI~p`_3(}uEl2&r>upAKwr^23&5Dp|_n^_vIe-l6df6DHI z!EL_6Nfy1yHLU-bQh=rrF~u0vDkweU?*_ z1??ow?fz`Q*U$_8B78EM9!yesS5EFQMtLBTWpb8Ek zR2~m(1yftq$w9(oS@q9S0KBndAX>1cI*fNmVq<)U514X@_x&QFTVD`?ZFPs}tON`r za@>PEl*t%sWEgRz3cF_qJ>^$Pj)?|uz4fjlLn2V_=DUq&1l)Iqo+XN=k>AA}O4IZb z8-&q2OqoL-sp6o028Q(DwwBxEB+oNC7)&tR=?WExEmUZWQy0`ZX^MmqElv)diVLdt zXO;v-Z7{IH@xUAy+r<+cE<7=sRGDU_5jUE9;{N4kXp*w z;AB>1ra+z7bF|NdFlKPgL|?!kePIgybC?kbB4U|v!HpY|rX$E8Fo$h=akI{b%IN83 zo&H7N>DMp>{ECX77LhD}BvfoYsNC+cF)bo(;?XrGu;TY)GTJyOe^o{3EV1J9!8b6A z>gUTaD4U$Z3K8gd`SK{Mq{MksO5T^h zdZ#~s;=ky*K(KZZzNYSNikB$dA|XQHkU3+q9B|=7GOcCn^!&yi-89{BNzdbq zYE<-^l|EIBX6TiJInXNy4|RefzDES99UKMSV?{o!)%l70zX_-kC{NJ16f(y;!2RYo z{wYo;h(Er2_Qc@p>4VGNug?FhZuv^qcaxuap4U71<&)PI4X#@H@pp$mT6}=<-L}xe z2mAlFqu`5Ocizq~{aa>hc9`stx6a@+k8!TCzb`=dW>1`qFIl_xq0xcI6%W%7H1qbv zh^!4Iqd)bYUWUQLf|gK!P32q2WSWy)vTRZa)^WQf6+G~h=DEh&V-y@Jfxzo+Z!l(KNLIpU&WP*mOdt1enr7(}LO(T%5 zxB1f#+L@S>EWFCUC;a35mn6DduFHmzzM;PU{y=3{PmdG5H?#?w@)&^J2MO+l?3R?+ zl2?%>w?;hz$g7t$+Z8BUvzUF#4ZQs7UBtVLd=HiGH0UFbhUN4Gb;Llp*WX?+w9nJi ziT*+?wvrauV_3r~YEgMf74^l#sJjb6E=7Hh(1;>bv;NhKiHJ+EogT}Np!&73I!~qO z|M$w{9j{V#K5URCx<&*PXgl2tO8F#~x=L2pE*@A_r0fgt<2B7iKak44$})cu$W-0x z(C`u~UA^S2tQ1iTzqKNpw4Do~tt*6_rW! z$;jSJK0J${4}PoWY@Y=NXlA6&Wq*G34MDo$s z<3zcFI1!f`Ac-m|E$w{FRjO}<12oyJ#js62l_6i(Gy0_a>rWFW<`TarRDgiO!gH>x zG(Buh51|qMU@s?SPoDEzAF}EipA`>PUC+C-(#JMiUlwIcjCZ}Nwyy8#J>7qj=Ay!Sro_MX-0(>JChe2zH@ z&3_<}To?{nguNE2`qwquYku|e=o&&}tONVb$3_b?qVLA}`I(b$mzG{a$G15|42Mj# zaU*2fMCDatIze%UF>{8{s8mMNhrwcRqn~RSx^8Ubw}E)XA+D(`)`>n0O2J#{+jHA| zwMZ|FL|1EJ7zurt!i@1=XPQrKt{acdyE7^}xYmEPt0Ig~C!o%rWCC02$?k{^JQf(Z zkzsn50YvR)61}LX$d?*HJe^Oi8DFW74Mg6UDXMaKQLMjeVPW|Me&?%)mjl-%7O)W8 zn{jtl1Nua`n_$f{Wqp#!kOe`C;Wnfxn2C041v9*Zb8sz*j@GKkFJrSAFU=Yxmw~`4=m@kku~Hz# zUJ@P2MU&{cpz2Jb*MIY5n$2SM8+qhupKa>y>vN_b3kX>L^58p6UpCkv1xvM#gT#le zNKo&}cBg8u?qItz?@)RD8uoBtjWRA)E}!a)KAopc%Ev8|5&3kS zLF6u0d)&rKK}@57wrLujVQhfT`e61gUioKy$3K?N!LWGq&TnoVo;rE&#}&-u>z6Jk z0V^$;fM(%bJJmPQ+8T(AGkhRb6T)L;i&W>DZdO$l(1H09U!A+SJrBCq7>YCoiC(Sj zU^+}t+=(fjRN+hko61}F!kUGj|F*I9q^SU&EPzCZPL4ICsg?*<`fz)+RPfs)d3n`iLW=9e< z>IYX-xPE<7-};Qjyxmb&iPfX&N9hDKw87w1gk3&MMN2u*l$n!O|w7AfR5nzxJ^_8S> zs-xIeER9V6ok2Y)b zeIl~wV!?Lq>Zj*zo$604mb)880w-=c36(h3i&pvc_4a}%1hN5~dJsLIhX)w5>yLLV zqKbVgb)2Vt{TgCZot9JK$yU*NPm6GaTpnxJFE$mjmR@6#Uv#1a`&_W>H=o5XoA~SB zd$KO0V;dO?cGj82V^5!xcO47&|G>&YvaMzcF)yIocBo~awy>i z!Ss;rbQmKN1SfIeb<4xD9{UynTjrrj`ts`azVY$E+2~?aQel;KpdiM?VBSeH8pzF= zkpe45E)g2LO8ns1_ec5BhoN!e#R*C2sE4$z&qq=qBTLw$&*X7DIhYH@BMy|;6h_`G^@u7mAcFr`Kl!zk zJUN64jB~k+Ul$CvFS1?SS*7DWT_e7pRLUC{kL$r#^?+ka)C=F|K-I?3kE4uIhG|+-^neWD*;uVi8C!3?hS|c;QFPAi!-@ zFbIw4+~VOLn#e_HOj`#kajsF1po7EH)6>~z>3Nfoer~Ltpt?xbr+DG`Pt5K@Wymh8 zY=MuiGUPD#Yf(Yp4%Nw_8!-+Z)e%o)Z$y@k)5F7~vOW6)49r8cStvT3TELrF zK%xU@P8T@(b#n$D*tBHXrV^~fM$dirDknYgU}F7$ysVdJFY7l_6bgk_*afR#O{E;P zIN<%$2^ElWCKHgDR5GD$Y`T`JfATZ>Fv#X@rB}oFF&8>glOWL>55l(sx=lYA7Pbx# zV}~N|G{kxFPLB({Jw0uMy;i}{ql`Q4s$EAi^Lc{P$pM>;90u(K*&clo{{shxYB>pY zqr#DmL3*3g1UBJMDMf=7b?ubf#U*`^VTd|-Uvm#_I`eQSgg%S}j!G;<#pHK_f3NzAFACxkjqoH86L4!LD{5J48jG)9>2McntRS>L?nFH=rMkh>+K6+%;ewuREB#WqBmjU$G_%stcmJoSo$A zs5>HTAoM$iP}xp~b);-sawCB>1!7-B(iGy9G^=k! zxl2Z&feoxF46EMo-LLEY&6CXc;!x!Fr;mbl>(QBVErj4}7_Nt#!+USh3CIT37@SVM z6bc|TiH@6eP#?HWXiR{Pg6oWJsZEF4IAG-I2jk-GNozBj=>$Uyufu{AsLT=?WwLPk zK?eqT$q;#HC)02J(hVc9&&{f@hA6Os;IW`mU?nNwX5^v0(tMgrDaf0tpSQ@z zqq?1|^5e219Pf%d=f)5HSR4UfvN1J`-(vh8IfBXXAje5haOH_;q2~l1wvj zvz_*LsWs4u`qc<_2sE0B zE+s)Oz}R*)CJ$`yv*4#MB2f4XQdQgOt5ch<`vee+1(X)w5;KuBy1ASp^ag$*iH>wE zNw?W-UNzZWi^AhPvL>8o<#Ij!v^OQYL%oKgJ3^s+L`>SH3icnmm;U~XOCZD0HsFu zkMX9FwWL;|lum#%Nh*kiRf+u2kqruBt}tC_;G=BjOXjNZ@B$Lb^$qKmo==wzO_ux9 zDyj`OBubS63U^a`Inb^-+vvSex^-49ZNMWXsBt9|6fl-T=4)%TUbnJHlGi%U|6DG) zk`Yeys1&UvOZ5ge`4Cr`6brkK+XM_U>js5qlNgXm8bun5e9Wu3?wcAbLq_u4a?_=5 zMX8mN=W2yy9v-#)ZgKI88u|kM!=m?}f~qA%+-Ew_Qj;5pTR#to^v~O9l9Wt{iHRZ6 z@nl>%8z0Iu!kb2JldAHmQV$fow22R|-$sATmyAXl2<7E@T=${QxVXDDt@sf+Z`J<; zK>;$*?dRO&ni&;ADLOHMaOjD(_YBES(`Ih;P2>X4L&pSJJ(}z*FZb{%u&>8yi-fxnR zU-x0)!PsAl+P+zn+T`@+ov*9Dfa&5%dthv+!3+MaeXEz~%g{$Fs__Y0u8O(+4-{J{ zV*9I$qx&|rY%sw$$yEEy)P;@)cLfPNh#g`mU}g~%w@g*H2#x4NQr=}xMrIPywwvj* z4DRxpo0|!Z01%$w>os^rWE&{O}ZUam10BGM#{bld|-eb^P{kyEG}XXn*=BY{7xRU^<)yGoDbI zJ{8z4hNKexXQz~0wgYL(i9PLvmUM}bhK!+9DkU_6^cloAFjfi@=f2}^^n-qWx&9uS z&Jg${9qlZz_#t2Dup-^As8D=2*^KVYcC%!&g%B?ueZ-qgjDRpR%a;3qyqM^&gbpNo z(v&f8(pdkYhxgq$wE6Q+ewM5+%(dh+t@CSHFS+_?6?qJIGchr$T197{YlMj>9xnfW zVZoWxe{H=G1UdGykn2yVAFNdpr#^xjc9b}V2VpM=K6D)e2+vt-MIEwApSI>ixwO$b zz!#)08LtZ;1K*e~ax^AlwaLv7AAU(ch=340ON39~x!NscN4@Q0OLrv%5dhP=f^HiQ z@qoM@&lSfrHAq9qiGD07$f~D6`*q0v0H)@@q-`F>$pHR#6uKQT@u=CxVGwzbxphiW z%Vyy#omt}5zY+jrF1LM{sR#*6DzEWmwvHVKcxwcPA`%*fWS#Q57Eep3$J$7ApvwP~ zG=+vNljvkk2icj9*uo?_mCd_@J3=sxIj4;+VcmV-+ye9qK2w+csclsuds28I4bZ!a z&@-@)D5M1Ezla9F)^buH3^5}#f;4I-0ifZdN>P4f;j+{2YRVqlO^w3bbeg-5d{>8adh*9QX#HF$0PTTzD&KrHUQ=>~n<0u0oM+jX$(lLX?xf$sgf| zYXM4(Jc{sg=y{+M(1<0-O96IWWZ(=I&0;A?7|aF_2wU}KYpCoocDF~=HFx%4-Abn2 z9?LjuaHq#c-N%Xf8qe_S<&C6@fpK5A!{@P?@F*|p^|OZ9z7>JXQP(1?W1 zHums;0r_$CJ_@L5=>%k7QwUL@uL%yUlBudh`Oa2)rVV8!3Y2T2VBPJ=E`ZT;&fK-q z#U@5}j6RI!Jkf`tV<%|v79{x(NFBw4r94#)56(71>>!3OVH^S2WZ&dIWB!hi_cMnn zy8v)?j%sF(tMe$^o0)+%b;{p>^}jAYe&|LJ4vqc#%D+F^Pr=TYhqMpcAa&vuq1QM|^4nSpujN?yVtD_VJKaytCzUU!3I9bi@Y*; zJ}`Qh`Z$n1%x8fD{6ZJaK4-|LzlbB^*bDE zOm1c?AH_Cjj8_Xs3joIo=YHZ?)br}q|FuuwgpIo+95Opt2n&Qph|?C4U+H&i zb@N$%4r+SKN#Wt)>E#Gn{o_Mx^_P5Y?T{7tOEt15Pzf-b zwUvNnBEP~QMAfhIfd~qE_c94+yjU_JN~huh0tI0~-yLT7UG_HIU;#oSU0F1>EvBab zn&oTXrn27o!6mo<3v?_u4L!-9xw^|}tw~bGrb8PuHYm>K2C@{RghrTca*3e09;Cc} zoqBEAvSrQM*|OQA0@@#!1Fy^+`Ac|bd_xfO_??}dbV7}fCFyoaNiqS(ypia5wzL1^ zt#kmBd{yWmn!f}T5IPG{Pqux=%aSS>BwaeeI%3?bje|}ke*H=c=ZC!8xG;*L()9d)hlGIJgl z-*A2K{LT=kdc(e{vzu3vC%0e2>yCHtc3kw(fW?GJXyy=&#y*qVl4%{b-_BiHP%V3v zXTvbE^IwZ%bV0}{--a5FMfGV>Ys|)s<#<{Gn2MDGZg;!*64aMxR|Um}En#0g>zVIUzucYO+D*2ijy)tVHcSa5E>5Kky&9UH zIr99kB$DwBOthU)jLL{b1=mi?8Xs|O^r2gOjP|ercy%nKT?~W^=Q#Fo&FT72PS^kA zbUhJ(zP?SlJ-mZ!t#A5Rx!AmzdF81bD9HOixIw#r{wG9d_fIG1ywVQO{h>NE8GnuwQ)3i*NEKYzrQxeMo@WM@Lju0!2FVngP7Zo zZrW~d!pT*hiQj$vxBoi#?xD+b_Feg}p&KLL&0E|>c{g{pZ`qM2`;VtmcJ0ob)GyB4 z@y3=byDy*k{?k^Cx>7!F%2PRqnO4nzpx-)H?X!zNt;jY7?RJSeWIWfJ|GST&c7EuN z_kZ`=Z~vM3zTxZtp$_uDKmOaz`uFGmyLJD*AHVt6{~nv)Y{u`N{$GA<(lcIR4vJqB zN~2d1)3$Ga1Fa@_2lN?1<;T1m_`84psBKJp^_wL(9$dxtVG^%LqaUtK2|8z(IC(E7 zVNs&A;kZp!`WNu(z|e>3fBx1OW_~yj|G)hE_mscmY?hp~kLAO3MyxpapB{*v>>oVG zuoE5`)JLhL!TZoG@+Xhfl9NXcA!K+!@8GN7y>NHk2VLmDUvn{L6&|maA1e$$ef-Eu z^iwD9b0h!cts~#x69lY4U`^IktoY;i3~c*2%f(U|Tf>^kgZF=sgTk=dlMkLf`2cKWut-B)R3=gEc+@4=q|yikVsf8X~WL#Ou>3LTRP6dpmlUkt2V$mVzSn!L?o6 z(C>=ZXV!lc!ssrtvYPDUg*+UI{*KmS#y>sTyRNq2h~@%Hkm8%_|Mc^&o%=Lef;Oc5 z*kk8c#E>5UHFjJnKf?C0-GWnWijN;K3f^v>Op-Wac)r-xteG@v5xAhR0a4xN>KyQvPQi4&$n-$G^o_IvV!YtJ31v9TW(uXvi% zR_9#vK~-RPPNUiD@O#G%{`@$3z^AZ#jpyBs|A21{X8-W2%aFgjz}+2wi^GW6Iw1OX zpExvXPX{nPWDm!}YdWqgj@OL&K7ZcHRwv|Bfz*68AA!dC0{b#;lsoYAXd8xk($vjW zjR%i<9nkJ%M66HSWHGzJyOJCK&hRQqwAkm6VK8|Q`fE$`IWy;U`T4G@JFW!+wjz_m z6oCGAth;X5N!$1{FZ#Q6EoMEFEz;;k{OT`ar*t zSmvYX;>Ct;f7Ry5T1Wo(lLN@|PskUyr>( ztVop?a{oWhz67etYhC-Jo=RH>s#Z~vl4TuTH=_P2+3 zzr*uBPx@i?dWPqcaiYdW8-jszj2J*s7>=P0;Lc%`$~q;e9nczSG@1wd!O;LqHPuSJ z)Yv?;=n#IO+Yzk`*Ds8&;axFIeTy8v;-4K_zOETPsWI@7J?{;9ARuWO=m&ygEA^Z^ zI-qs2VC9Q7f?2XT7qk(myaPuO!8ie#WNS2UNyRpd|z0NY$n0WE$jF`z%qg(5gtGj@)G z6C7ns=cNX`B%|VSLWQJKB$+E>3-SmPrFdH(`6lpu7hN9zFK0FnQ*OCLHw$0yZnhCMHtR1bsLK&Frqy?8_i`^a2S#^bm?Yl6=90x*d z%~d%@8m)=l?~nFZ;PyoAw(QGKmzWoAKCHeA9<%o2sl$JO6@u^l z`-P~#DXeqfei*K_X8-cHH-ia4tomSb?Bk+@3LL#8aOclK_T(c47-PhQN2K(Xl}99i z@w&@ELx7W)Xl6D6S8ybU5?JoJL>QJZ8^^vExIB4zwHnL8s0$(GGz1cE47B26GFUs1CK#MiQKY+3iRRG%Q3k9&a!w4D( zLPif9XX^vpc(H0I?5gRGC_{|#y2YU_7Oj;GbiFzCnLsdfIcwk8(9oyEYS2DRnM)PR zQzhVTK-`|=b3?ibTp9S-<~A`oI#t55Fy>vA zDq7vEN>y4#IhP^;;A##9#|+UP!65NkkwaC^`qo$i@oGXvuKS*>rQ>1rc*%f5>acDa z?(|5|3`oEX0>%vw830pvV-8!1NK6|NN&BsA!lJ+aEOTZFIefrRdaC-_3HpO_%6;S; z_>{|I0OQKvO&s_hY>qywT&sbNd7o+c-#A74*s#@+DlUA*jd% z8H{?y0A_uaGlt)qQzUrT{RHC^{%Q{f5UhL1z?LecoI{4O$LI+dHI1ZAz}PFATpkF> zC|MK6UVyZtP?}CzU&q-PwdHkxk$Z_f@C}L_loAbK6!-F)PKCW;5g+Jc+1dr%A&r7E zw%4IAykhlmcKVgd!LX+f1!wH*t(Y-G5f&Z9wsr)=Su45U|EZIj6rwDq1QUc>A9k;n z7vdos+H0xaUGGC$THvKl4ef*tD?v!RLdXRO5_cI;cdVuapq1V1%hvqX)>d~VPr~9g zPBo28k2NU{Kc5_s1(t5%+T|vDtbS<8eN|HNltcj-ah_2K(@w-P3P>DNHm5rS?!w4OjFVG~Zg2Kk6&ctE0wz(#EW&|O zp)WWs!IA4VHWfyIVLbFr-C;r~`OHqH2Wl>=HTFuEgU?nq1 zBED4Nu7u|kFkx0uQ1a&n?&YxKFf=sO8PiB$&L*dl457h%juX#zME-09Y}+#!gV+}i z6kNRtqO{!CrUnICD*8S3;bn?soq32ZdD1wGShp@H1Bs3P)0@~?gif#V&D(mycGf#^o~#d^d&x)VQH*kDrzx6#hFU=(%q&5BuqPs{ zsgsi@-UirNUf8*2Dk*2Z+C{6NQE+YPHTe|yo-Gl;xP~H9@@(p|P7!NQV?sa@I2evz zE5bu-xfa;;K9%MULyM0l-K zCC8n8NUT=Fju|Pgk4KrKQ0FL=nqjk=yOad`cP9;foB&dX0!j=|GJt{{6@(Z*5%h(B z80>|6Mza|RF5h;7wmL;=*Er{|ki?t7MLi5FzSRmN!)m5oDtfnDIvqbh6a6sSQf;1~ z{wH>lgkn%a2ydj6IY9d%r_8v=VYZLe#mySrG>k{X?_WDQELYfO?rgLnk%>~3A(}=B z(3oqhDM;lTaE>yx>SByYbiTFw=f&`)9TC8H19o&tC>xL$?=7;8474Wl$ARXFBRASk z11~pz%EtJAt3@4ot&4wZ;{oA6pSz>d{f4&X!88BQuDSK=+R&X+TQ%yH2isNy%k zp3QxVM#Tby0#{@(oeyfF^CsLFfe4marTXvu*{$4{N}FPm7eM&xMp^y6DW)+tI$4pI zmsc!%10;}JDFjekWl&%Z3rzeQ!+2K=+#7^(iR)>n=uQqY2RzI<`v^N@uGQEd4@`+0 zbJitUxwSF=pUf7f+IaTeg6<&0SgvxwgAGGnw|C@reC3{qTFURR*qlD9h#W<6m1XR|1c*suyYA0H};cU*+apv zPhp5~?deaEU;IrR11~kmgS_m4W6#OKT{;is0vAL#lvLnHPB8)P8SKbYaDY$Nmkq-k zDR9@D|@`Xpr{9IOu;MwF?z8~)0-T*RluHkQaN6u{%VO0RLkX5mQ;4w@?tdd zXQ#Yp0hPS5&KE4wz&v^$vN*s+zDdzksTDo?UvydOQZz z%14bcN(5XjQd>eXw3}_f^iL^?w!5={m#QrRJ1$NX=|BkIF=XZ|p@6fxrd1p}*REyW z>EDd1_BFTSk^8CAXu@Mob?hRKjqo)8bLXBvJ{PRGR30A--y z8kevN1)W|Y{h$@_EIP+PEfx_gDd!Nl!x+#UODz&e$&&^}wv1<0&mJ-SZGA%M+8;#m zNapMV-TQl&?1-Fb-LyR+T#uED7^dj!JFJVgN9Lm64FMN&j;y9(;%B34o>8&}aN!tE zF1>^TYr|kbI|U|k$otpKn(2GM!Eohx$N*cogDF3@jWYL;HTRH$00^Amds1M^h`^M|Kn9UEN2dWV z(e%7`bn-aR59Yc>0;*Ju3T2V!O+L{P#*}K zU2tvgXn%F7y0S#)AH9Eo%5PJYyHmwPdPhxLC_@NXzJ35+uRie@8}>X02Y;dL3OIr( zXRMuhgHpAFpfClky+Q#Jh(ivFA>hXvh`qMm$K>=F_i!U;eX{s+}_caB05@7AmDFmUva6Uyf9DtIz*^S`PjSl854(s0c7 z_5v3Vo%;m~XRvz#ba4I-0!>tm@?c=J9)e?Z6r&b#fg1!=(Zh>y-7UWEY%6@7l^eVb zfS;$h)d;5!etS*si^e0zE2)uvBwY0Xj9c@NwJ=mIUb05Dkv}?4KiWe!NxTq`(GF=< z_wL}M-v&g9zC$++JmN-1M|W{7CKA+x#>j^|-eua`uI))t?q51{CA4+b3`4gcG7FSA z!mHH*IjjoaU4K=??p*3FZhvuLNV?X;T^w%Cm8OROLWat{sGA3sYv&Fm|v4&#~0)FmRZ8b|- zO+hHTnC_8)L)uNY3oYG+QNwT9i8}*;8#=rtQzRh2T`hlI+c$Y08Lt$c$f$Q!<~&s zUtqLypGSnFT(hhtxTlJ~x|nEphwREw&jJD*jK&N~W&c~t)O#On@`NQ=1b9COmL3Zl z1>IP1A`4?|Y<%7x_`qH=ENHXD$lzGXOPwO_hCNgGwq>bavK|7F2pk4Oim5l*VcXht z>M@&uA_L<-kSucYui~-vAe1jsi1T}BRZ`R_>ySipblxu@EiJ;qbbf3@!jPf6%mE-d zy@Yo{<9E-X@AIC$GbIe0g1Bx4xso01&G8Y0KXflj>O^uPe_Jg0@64(^_9K$NWe%o1 zFplvKbc>o>jykC6bozP*%8QNf+T5#oH$kaUG6}zcRPql_n8?>pDN&|%HQjlm-=W`~ zieafJM={2!3)Uj6vX&?hfHU}{;nc1g6ZCHQ{RbaMJqTB$+!KWBQMTRnSdWzPF??+) z_wc^RXHT6^Bo`1a%r3yhb5pn;Y%-x6O%<@TEyci*b?}Z7_kAkv?>7FR7CpNf&&Ol@QIaE*026g3kDhnif#krRm>3< ztB#>iT%f7gjD(Na42?mjzzz5Fp^NhE+d@ha72KRa>$Zr(1PZ~JcqNS(r(3d$X6B48 zDD^C^m@{U63{JO2$+^bJPB`F{S{ReDaE1gvDi7wfiJ-SB6ax}=J)Tx6!Y5#+DZa`c zj=StfQ@fMrj3YsxbTN(>=j2ahauWRih-)7VgYQUB#QB*wM-t+frC*trQMFAJFfGBw zJKp83ZZVafL<=>~pHgaMy^MPlaET{fnM-K!FSo+LKvH*j z&M?7mpPj7>*ax2n(S1FD9fGg*o@UZ0$nks&Zqq)>teUG;dnrJCh|x||fynbHT6VMu zH)@7G&IU||UToM=umuj5dli_Tq?x@SiEWDlwW$8efwi1n{?EWvciQuC=e(U#WH9Tr z8<1C~Qz%Hqb>ss<>t3<$5VDD!ssRk!T-|1lkws!>BfZ2%!W1DjG=9$*6ZfZ?jgE;q z#-8vipoCfp;xxnd82U^(70hauH5?!q#J`Bdh>-mlwsta^S}5pdDGf2(ISvf{mczIU z->|#e0}?b>&YKk5PC39c0LlMx%y=n(FdJo`?NxcqfG?|_kfGh#T72Dwn%1B_*>z8B z8VeLE3S|Hgw7ARc0T>uV2b=>XJK}5Ld6q~Gfe&?&)c+rgZ}6x5R%}JQ+??LzP5_P* zxCv-&5(OwBVxDwAq*&3%4`b9Y6$9)7!#0>oNw6DPk5ShesPceB>vf{vfiz&oU`oALiTs808m@*h{b zxOkT&NW-FBmgse})GCl-_VS(XqiAFXnixz{$=bCK6L4^GN5Cgc@F}>lUF73zf3Rf) zzsBz6*`Pu1T9{@jcr#V}f6hk1pY`uOJs%*$mcvU?irwJLW4JiQHmr7{)V z4}$@e>hU1&tYr<*^}XCX^tsvz;nP#za)(w`QjY+_pAutXX}@^{daW?p)@I_-bNs$tJz0tlBR!!~19X zq;f*QyYIv{bzj)-lB-n~=Q=&D`bh z%19^mb_A`CfGF&(keYzJc=_^&(AJEDZ(OOI^oWS6{*Cs446jKsYz_F*9S)Zz+*Pk= zIpfrzIYV_+ns(;-?6I_5SbNS_x)wrBT_-Q`*QkrmhHgJ0NzdO&b=>xo&r zpBfq*p+?gz@GGV7A3I-N&d};AJhUs_{~x}SG?exB6b>2N635g=FP;VQS`Td<~w$L@J+G1UJM{V zM@?7v2U}gUYBjXYITEG;J2=}kl?8ekoNe8{Qf}_qTkLMu%{{X_Eecac0#n`_E$aEO zc1=LGUM=jf-VxDZR$jo;uobEZ0a`zDxg61p3C%rF@cYny`=af6N&nde7x>1}dXHkI zV}4GI>`Yhln{;0bY@WsOm(lUexB#sKo*QX{;rtCNAu6gw6ry$Y= z@IeWQQB_O!J?UQ8GmtprR@U;bd*3?uIF_PNsN5gXKsQ+rCX-8_e4_#Kl8Yd?zogAK zDdO^(0JgBfV+&{mu%Z5C*lo`#-Jg%0hx>1gCs#-}1~#ZtZsqixStZ?=tL($-4}uPk z*Kg!SIaaM0I4TNM`0D`W>YNtU51n4vPmNzp5cO4B%CZg`BC+>BKK!;x<{0hq&2#pS&@(7d-<3ui@VkL8x4 zC@Pcq7GuRuRiSXRb6Uisq?xY=mM(ge>-ivOjnX9d4B8lIh(LXK=_}`|^6sF`Ot^%s zldy4CZVd?;mGmy1C#3)5^PtaL5Knff67i^W1n-b<-ZA9E=6Y3p$Bw`TeAf}`EoATZ zIyFNaZwiHH=qFxaf44kjoq>9a>-RHj#d~$8jp~SoI0Ih%jVf-^(PX82b7-$GeKk$p zu!6YkBENd%DdZhJz66%-Gn8ywDJ}?Fk~j+5bB{xu6F{DUw09-(7YqbJ&RYt zl+{pKuV|z93l2=Ge`fbuXBDs}-Gsumix)abjTOmi*VOA`5Cz-?|&!ar}3yH$zW;eFg;C8DCiqFd^0DCy4Xsu zR!pDS25DH19(u2TS?!W48Qq`vaV^NyZGr}-wACM9#L58$E9T9E=<2YHl`j1obh;@EiafV#uEc>J=bMU|*FlzgT{ZE0Ay}4X>AZlB zd$H$-`)Iu%o`4lp0J+$bi#kP=O!~XiQ1#G~5a3$WiDDVXPx1+L{O0o$*SMt4gVx(7B?UZ}R(Iq;-?wb?-WYO%{ZgT&Ex7aWk% zsd_iHao|Y7RjRvL<8>+^K!+^P&s`p_j@0JQYx*ji$^QSRsfEW|hGftC61I zoTK#_r7VPUj_3oU48ij`*n-vUgt)3PE4^c|;?bc)hjbPL^!Y6hx%aK!AP7gf={)ZA zeJi~Sf(Hd3d|_|Dq!#4PU>Dzx-^*wjL)~h zv36h}Jx|VfIL2A^w40T;PeozNdV$TTHxf;rVE{6QzdQ}Weg7+`SNy|$vHTn;sJCJX z*!S7yz2^L*9Z9h(jL4NsFD@aO-861iw6;+5!w{F>zlHG)%4l3vU>!GaMN5piOEu?l zzwbW=FBjx2t{r=YU2x*;S-p$jg;yt2tzV61Vwct)!p`l47LJa`$IXk}DPQNz+s(g> z=&xf0&-Cv*4zy;!{e_*h1JtPwRRF%RQ$(OWG61vK(O~C7P@V1(zv#$Z0ke8 zEaFayD$Y2KiXK(xWYDBjGob^%Nm0=R#9xM+PtL&mI-tnW9o3yW;_vCI1{}?+b#IAN zbwhQcVfCbqqa8a8vA;aIfN%H7``@>{H7d?}7gg2txI5^BN0pTy?W6lTSCFT(u^;|o zyJ`V*yz+ug@=HKYm608A;*IW9l(GKH!|q>8KO8jBP8#SPJ9Z5E1q|`EP=;8+S>L?rD@DpD znPO5{>eT9$J36IO@H}V5Zbj@nqTY<2^QDNEGa*j+Wjk)h#r@4xE!Z@+w_eo=QdqX# z2r1Sq!w)5C`{l#hrh@Fn`;sgnOk(V3^r;NDJxT8GEx^))m%gNVr^c{`epx+qe)n1^ zqN+g|9$4zTsLs2~c z@%|0}Zg>69ABoWKqp+n91Iy(`6L6RXHdPRqI%pba4}rt__$3KxWVGOy_VJC9%U28B zi#7=_J^$y&hC-MkQBNo283S@YAJ$5kNOHFJo0W=}(q+B=YdGsiMv+SYxEmn^Ji*&{ z0(@{!)l4|B48QnNO+}P5364s$kPt_?H7bPW7`Huy@t5tkQT=d4>gj-T*<(NiS{bH8 zw^O!2J)?G?G@l{a&ukeD9k=UpF{^BN ze7MwZ;z#VlL4B+Kh5L%zzHVJEF0BR?dcBx2XVc@;x=9h`{dd>60NmcYvkpTGFX=%U z2RD5T>X>OVLqH)2;ejNqODd}l4g&n?*wxrkYF_ zOm@<0Rsu!KpEqiBVIz~7Ft)4Hgi)G24Uqa9?bQUdv&)YFwFDe4`IPX}me08_0)9-I zZkPdwICcvEwl-1tDaW+LYg=ATV`P$YqaZSfbh?9MjiPc(lP@TX5Qo}Wo_4dt;d_a> z4Sc+v_+LfrgQ@9=!)haTO|y_!7!^6t`jS^l2cGVqHqO!;_}p_3nfOHSAgyMWf&G!X zN*X#S5dTq)tMrsqVHLL*Ye}Z=5>y^3zeVmeyTl1>Ao^)yU-E-@H$rF3 zSM?TM@=U|d4{5x#@Y}PSg_4?rZ_c@038uO=&yFlflnm}U*9pBY%n&!2Vacm`4dtFT z6L{e)WIdjb-R@7pEz>;??#FkB|9a#8NP*7KdY$cJt6>uVZ98^s?lnQeSLnfeY57mMVuS6-@#|OTZgQ$Bu{FR>=}m%+ zkTTKT^fxTPDqT(l$TFoVC4J$n4^PMh?6`+ye}Dg`__OMah{D9EFYQ=<_9z-J)=(%> zpzZB*)|vkGna79xd_{$_Po>xVHE{Xran97ETjZ+mUaf^FGC$@o+59f#bQ>??fRLRi z>=Lh(`Y8Je#|;t{Pk7~1&oZSaOx(8l(WERFC)o~PEw7!BR2{VOUB?TS3>2R4 zt5yk0TQg44IYPb3mjS31 z7Y}Dy>ODH$vPu`D!`jJkq@oF?Wn~ut6U7`=^6&uO3uG=IoKnt+dV$I;Wf> zW-*XQ>eK^1WqQm8ELd|0&i0KbPk&W;AmD1uhUzl&wO})-3B86A-4vy7~itos`kdElA*`k}Nj zy%pzcCt}fToGq!;-TYT%WJk}4{MKmYI&wP#X(Y@4w$&iyGfp{oJ3pG%68nh#*!RZJ zxzWqiJIoQT|GzTBDe+zC3uhJ&vf)h2i&HUpi4en(-ZpYBizsU4&-?;r8z; zQXuK;71uEYP0uw{dZsuJnledAl4r$Uw9UQq$v>0-GK|0U9J=UPyl@}z2!8?n+Cvof za0s2|xgCP%4yJz41A1ntpXzBKA@XAq&mXp)o&Ib1=Pk3~0G5f1Yn+=P2*Qp`|&wPOq)XLkwY8MuBhvT@i$j-hO(^_mq1x*(Z#IbXCGe# zngx1rwwA{NvS}lgfV!na*oBbAo;`c^t$K%Z!flm}A&X|bH)^Bk2Uzp1lZqt=0p^}R z@5Hjlx0qS16{(7Ar9d=UpWvr4A6K1hmYI<3mS6OxW^=sEW;5H3rHY%jZoSN@B1yl}_*H?hU%9oD3iwL%5ddK~qPaGwfJx4tV*QAHPOc0f-kru9wsRG4 z8vka6C~*=dgN0xM@71CYexcU@Px7~0I)OwJ zh@=zk>7(if9E0Q1n&)sdC88#}_*-w*{=D3d^r+)0fus1T#(K(dWt1zqIdJ;X+$!0o zn@TT+B_==5?tE46R?Sg!0Y9erK!{Zq%lAP?3yxd9{C$!>H7`ohkj zHqoQ{gNs1q!vIt(F5l&Om_H+c>)9C({5cyJa7u=gc>l__N=;J3C&#_$!`hLNQ_Dks zBu($PtG`i?Kq?0fMvM-x$Lb6SVKVHTtGrwEIoC4_3rH~gb`0%fXVcAzJ zEB#uaXr0?Udre9CHwX>H`= z9{!DQwD$bH*sG=JvGvTyp66~TlVU4@8M1Nb`r9p7xN6QnZ|pNpc&x^D6lcMRJo`)y|#3_d?m?XI zC-WnO&>&O^xHj@v$wum!sAv$~~Pclr`^N0Q!IDrBgtfSeI-A zsbSdF`T*qM-6IACj)o<@qRm41Si!da$X?NIs0I{;H%p@62pi5A0eL8A92^M+f$Sx| z)V>ut9!%?X;9XNIVz*{|0ud#KM7`Q;rnw)@Ydu>A_uabTa8){@xQ@71NDjrGAE>tJ zM#y$dFbk=mr1vCYfT4(A!*o=WCQGqtOEHq_Dxd|E&iA(C4nJMZdR3hRE4%&a&sp)P zG&0PBYdR)&^V9fSVy(BaOzxA{VIR0}d&m6oF=BC>keheQ#YpOt#ajW{?e==K z`q zVY7o_FpSE%b!)XJV~YQ;d@Cw9bimrhmne<#e+m*B;Lwh|=I^gxp=gmVW0HUyI1~W# zx5wzXJ)m+KcSg~OBlK?=I7YglkbcfUl)e=As%)-vlDZHQzHB2)TNw6=jEu!iiDKo) z8Wnkcck#SR>yR6Ve35=Xb1roum<_U2xd}NI9~e2jN#Ne-Lq_|8@uib#kmM@GY@M;oecHSDr z(>B>NSnYrZeizjwjNit4YB-vP_VAC>cqd&59k^=RDUCYnn`)L3M<6@mrovu`i(EM; zJJdj%I^UuFpy-*asL_>~vX<0BYU$-EG_ImqSIJ1PQ%*n#&v>OaamXxg2c!8!%+vbN zyt(Hs>hy_RB{vp5dQzQ#Fp>&tuvQaCSh-3AGj`I!dleoE?+3o4LNgzRlNV8{Q4Sy& zZ3_2;{5q0Y%vLyMoFjhI&677Dl-;#`E4Iin8?D0jS4lD5QuW?2{EcRW@My?uuDNXF z`c3XYXF`TRagcX9FIS(m+eZ=WPMJ;1CzCY)8VF{gNq#EZn~K?)yc(xw2J!_X`epCE zSmn0XT>@#EAnzOvv#?XoxyLZC2FW7x^5(Wn;=jXd?FBYuqCqy%{5Fw=ARC2hTp3<1 z#1IP#)>fd5e!s`ta*L;xjLAn_s;oJ*dn)=u2F^zNxX+t}{G8n)Hs%TIyL?4na{bqK z4VB6mMrG@{xCBEE$&b)ga!^9w=l@L!)pqg4y0Ik15Alh($=Am$Qnf#lLd^VkP@^N` zO4s$eZbNP}tBsJv>vhjA{Yq_gN?5v{OOERbjjbM^L08_-&Wz#BCY>(GrdrYRBBqqc{8?CMC#r$F15jVN)dChysNohhNh zv-svVlC9u#D0il;KyOkf_uSCXaA~Ky*nJTYg0_IOPH#>Y8~Sv@5?mh+!gP303?M;W zR11rh0jKovuTJ!Wn{5jj4%kdn4NC2SWw;{@t+A^nR1D880qJ>J0Y|v0r7!@H zJi{V~$-r8?4yL86U5T&&!;)fsS1Rlm!6CO#Rn#V#1;fCEeqE!JNY+Qh{PXw#P*ZcR zggcj^hh8+09yYRs9^qsH`vLLwiDUbpnQ+_N+o6Zxm*0klYOHs(u27^3mnlec1n%v6 z2-X>eCTf(y^M&ueKJPSG*xVVilU#H8ddi67@8`E+3*&%`H|=xrfsh~)e!pET{aRS6 za23b?S9@4GX*fw`xi)1Z7j$OtninhVsyOeJ#y#LPVphVve>f{m#Z;K!eM9bQQYbE^ zFUkH2X>?fF&f_HMew)jH9DdbUI0D9CmbM`bx@)}c|)i!f)^V0E` zD~ZE%`*9}5u%76)ZQC}Eza$%F`r@`gFn#)Ke%Mvz)*h2(L~w=99({-&`q<^buS^B+ zS%cJ6;J*&3xHh*^k*c&3G|?DpQGRyxMBmTe41bcn z*X24K1{trNB7_c5lhtohQ%(;clcM|v%$t=b-?S$4q@l%K2+`0Pn$zB0!z6XA){10R z;WRfTmufKJx*(>L(8+5Hy8EveF5b?6U3iaaPGVx(!>Bh}MhVB()a=Sl<$h(ivN^1f zGkh>WEqF|K$!28T2rrZ7p_jLGAx58;S)-@tO3IQaMMpy3S|6xK3*@7dqSn_u>1hNv zj4mjfjnP#5Hpq@m(AY)}O10Vc%sMM02j=&AS}TG4BW_R!f~_bjpSF9WO(J|(ldQg8 zb8Syi+FUlNcceF57VKWa?s?63rJ@fgIicLeS4ik>{E&KiOnnxKxPzN{S~L8NKg%zS z@xTR-jjrgG@|`s6jP@MKCW*&bbl36LOhx3{Jxeo*fL@e<8s&8<_#xZ!>{s$SW2<8C zu`;UTQG&SA>i$fk_y#F}Po}ku|MJ*leCp^oX)}izy@zu1T3EBb-NdH3^|4y(IdLqb z`Zg&u`7POzVcEE6iu=4?F&w@*l|;A@#8PqzBWo3;RAdg|ZQo<45RJmrq3} zgL>^l6I_Gci>(cItph<81cnBgs35qt#GYpR;Wbx}NbGAX0xh(pC<|_=3@?fQwWKsm z`K<*h<)DRzb(KpiK68)fr!BnDpVu6*N;-F@HFNK(X8cO&^jkyec&?UjrT6Fo=Eu0v zkd;zE#PIEP($gtBBf})IX~mIM(uRrSpN}mHE;N+5QxXhEzh)f-H{I{vO}_g6e*?F% zEt7$O0L~8Y+O-Q(wTe|sAy^wcY3T_%fN&gxt6iZ8vR{XmE>m4|bjlL|kTkR#fhA($OrA5N4u zNfc6a>x$kyl#@DD+8|37&H#YF0f?WQ^82(SE}DN_1e&|syNP1@><5z%T;e~ucxcDd zjZGo>=(dbZjs62F5 zMHgXkuR}YM=_s$%xVFOaT5dY5tehr9a$<2f+v6l$)M$ps2e9z@CdIXguPR@7)EkW} zn>}iFmTC6KlVk@%aS4ptbzJ`I_@Vl z(ge8-TNW^d+W>oN=Tz%^NU_>&zd91l#M*|PJx>%dN6P~8?0=q3$}?Wmn@3(gewE<# z&`D{FG+n(X)q;Y~rPVpfc1a0=BWA6cB*v(^)4d?4ti?pV7lHr08zGV&+rKxC(--vE z{#S8Vx$#AXLUD?iykbP1^*QHrYPa#;*<0d_x$#R2yoqMrxB;@K%@0E|{5{Fl(W&m1 zh}44|%??I}AUwE3evqIloHl>sGV$cC_DBYAxkd12yw6I}6SL**37cl4rx^kFJxqlb zt;>bW2w`luHnKg?d`-J>y~K4k0JY<1lHvaNt9+-cO4ACyWQ_GQK6`813Kj|2-86KF zG_=9aK31-ZMX>m;L&&r0S4=EO1YxJ5BCoPos<=rxz6EU{opIMZO-pED7$&orKM_;M z)Y%`Q(R(xN4FNs^%?xwXZqTl~xlxo;&Al;;l@gpZHap|HzV1P;BOjPCCk9Wq5#lv_ zZ>pkK=QUp2ub%j)*eT@`bfEX|rbA@;^oblbc&Et^aWwkwu_G^6O6dzP?SURDGpiC8 zc#h7NUt+Fo5yXQ)dV*0ru!Y9D*|I_7b16(!BRNs}?gk@k-Pidaua#9*b)f=nwyF|) zlf?Ia{+xfZd{wh(baqk3RNOR9qTG8kk$Y+23FVenaa~(ka?IEJMhji6!9W$3i89P- zdGlAHDu|E3LUOrWE8Ro8k>6 zBcq1=r0K70@Amm?KQ#1)YC&5QW8gt?Zxi5 zZ)s5iC276EcTZRg2m8f@=rE#V{LA(n4h_VNcXJzd3gRC++3GmswXm#pno$1Q!C*;z zD|4s7Ki|BB1LT#jj9D?TCMqBxwgm!RLgpKl(mCY#3#!MBqa)}Nml3N}lA50rjes(} z!#;+Ppgk9|9=qkACf}eA&9&BEHuSx_ zL4PhYb5=i->QHPuTrD9l>)paSS~OvillPU}RTgrO7Zvhi(RcDOSEj+-*?5Y((4i%y zLv~GA`E8}^xL{Y8G}L5-x2R>7I|0SnXew!(;3+U0eG) z4BHw~{@6o??~t`9yA|PP6Vy5mLa!D$?vVA=3llx-;YW3P$7sPmkBIRP*0z z$9PK6w<)=yQlz1sq>_tx{LWuTd1=c~Xcv@bS75c7OEhX}7V}BltU+2nv!dEKlZY7Q zx-{_jYZQh;1>YBe8otJo;qkVO%#f#}t02%}s9DsdrhDMA%SDQ6^m2Yv@0#na_-gwQ zt#jH;>p?DZjBKWO+%L*kXN1{poC|P5#_xt))D8Z6@_X31O;GOSk03>)YCrP%-1koF zuCL3~4r~=OQ|9`QXcSg~eVfDuznQ!IegSV3|De{Ctb}q^ufA>8;7QFMzbe7m=Z~-&a%S2 z|6#}@05Qx@iW#mx`Reu_D5$S;nGO;k4AkH^8UxqE2@l||&U7qr4VJr%n9m>y@}ang zVg(L3n1gUzyPh0Y?3zZU!gU&F4G%XW*^jzwcQ@OQIHCobvBnTmHK?y=t&#K6glS{Y;3^+myXOAAwOyxt!ngY zU7d_SthM21r`H=PUXX>$h})!U*9o92xd{SLTcS;w%EZ`}Ws*~h0H1%qpg@;$C2Sb@ zXcoW?kZ|4f!;3T^&Da!~BpoA%g!*j2hSt1r0a@~f8MsJ%0Vp5nO;*t8=g+8^lNO8Eyzl%U1?@UIO!!5?!mxGnrOiaiufeWVTzhy?AI(0rNEaQy1z zdTuV7J2iI$O*GMbgx~1s#V@PzggbmrvlcGOPQ*=88D;qW3{wBYXdOR`2y#v_(&ODY_>mbl{B3 zn3~ar6m_2AjHwf=|Y36%mu7m zray8O@rH-?dS@3td+>S+*1f1s`g3MDgUsB!dKq_;yjdiw43sTP^%oxrxUv5{so~ML zwiI@c$tUO+buy=pmQkVGq12aRsqei^w}%Nb?32licd9ooET)T;|83u_Met+0nr{y? zBd3&`avheT8rg7~A@mhoEx$1R#>rF&hy%J1vwYqc3b~8M`xVndDtYcYG!;6Mk!!Aq z@v479m3gO4ccl?gBfMV~vlxlZN-ob)zM8~hj_2hd$(MBL5Ww5Bavop^L*T#f=VFU} z>9#f@8vEB%mC@Ucqp`lMsqQ4P)!sU#Nm{vAcan8qm3ZLTpv}9sL5qZt5p8f7z#2rAL|Bk_5b>9V6d@M zy0Ud>*uOP$fBxoK?SuK!aCADT)#X%T@o;_0e9^{lB zb~P+HMyamJ5dL$O%ue78Ok+tvM^a@;sG9c-m_e-ZT2zU)l!;hW`z%s;B^RBVBu%hJ zRcK{cZDd+ar9qyrlO0fKkV5m(AP+HHc}KFp22_wgfnZhUO+!=C`uHf=$($TEWUSVq zt2-YaPhUqaFmE<%4h7jV8?B?E&y9TZTyxjl(!R=26R03N>=OvE7Nn;|yz44mFb@od zTjiV7YbDHhBZUf}5};ZjuCWG!aqe(Uk8rmyY#D0GNRd2QO6WeFxrym$^YYcJP2;$s zMc9vEKywTPnuZDMyBXp9BajXNh5}?=AADh9aeZW$3rB}60Ub`$`Kr5``q0wFt{R0@ z^IRYKk3pbotbHAG|Ni~7h_47NTj4gyGN=oJ5#i}V1Z(J!IsLPXu-zD4Bik}TGLIiW z1Z79{oP~gNwTtsGaBt^^pC!oqUZ4$3xS?tj66;RcSnzDN-YzJ;`%^g;OwHVaLZrMc-OXE+TTz}1!nA`gD z<;Kx9tZS;8t3sqqO4SIcX?$m2v6S!TO)^C*nwq4jk`|z3rZ?|)!{VhQyP8ef){MqU z8Vnaq0!lHvrVvdZs6(~gS&uYd&DJ(6%|SjpB*clhNz8I${8Jq8Ra`9a3ZXu@F9RT? zfJX{@g+%EtJ5TBRD#U&F@X-oS0N-(pJ zvym0|-a5^+tK6Hf>W|L&{#=8)bVvQJNpfuj<&DrzWN;pMuJJ5kSBmxPTH-t8=aP%+ zrK3OryNeedeh+8NyLpaNa=6jS&l}{5QV&+3;Y@R5(Nt)^ov~SpmW1;iojqOC*Q-1P z?CWRbq|#)#XH5v_JgWpU)H)F?IQ|Izl|VNKtA|L_-S|0H1!QcRxA11&%q}xr2H;1y^OZ|*iGs9 zjoQNIcdk_0VM92)L^s~C69YRwV^cr*R}K+;u-HcHYjEP@MdY@6**i`Q1E&k_^c0;Y zg`hi~^%Nchw|M^K$<1TijkO%(;Ha z=#QBl$!zh**z(G~oM&Q|oE2e&sQAOb-NVW?p<3a0Av&RnqR1P5l3PNvtn?-9&q`3W zvW`D{RkaCONv$71zcD>*l6TlwDknSNCYT73cez>8xkt?0BVGL3lKZH1Q+e=$ydgp5 zztB=E=eA44gA-5DKIS-`|0;$A&aVG0{?~eS6b#85K(nM7rN7}iPa;;dft-e~0oP#< zbQ!$1m{Z&UT9jsjE(=!=9n$(I&}R;`ynOk`6oB7$EjX&Ea5Wk_8p>GPX2V!m54l*e zGj=j+YHD@@b!2 zU)44=ILKg_eMY6amIC)ftsrvx&+F+iOnSIrB8^$OM>DT5*yV=DN1YBv}4!%C+uCJ8?nq zJA{&F)hDsIJb+|&Vj8Y?eglEb9DOLPYflMu5QM`_HJh|<%AeL0)j;{u9{*i4Xa%*Nj`YR6NREjCvy1YlB@1JkKM;vfm7({WK!S{g?3Vgcj zTj%`mRIqw-t-eiMK45@5W`C2atK?}EW?AvQ5nKeP))=}2LN#xLMGnTK)(*~Sb>O3 zGgf}EMYo&rDdnxin9?!2Pd*-g1705~#JVtC56tTBF*q$;{4COGUYu|hYUjoL`SQ$X z{|?dW68-A8R(q{oe4nz~Q6jVA(}YKh@}bb2^M=P81|#Z)!m<%=^QmnHvvmGnC9B~u z-O_8yPfwM?0HeOw6S&f|+!o5m93+yh?Z>H^zWTyfd4k}YPPmlOOpzbtpvzxL|=Bd1;S*_;* zCZwMT^}Vx z)-Ol4a=P3}ut!AGF;}5zN%3SG^Vv2-JO@UI)qNw0?4FcbIxI-hHZG#2X1g6Otk~kJ z(}!ALB|MX>PRH5fB=Fmut-MFLwBDZoc*NMLw>|{;+yCM*dcY(4n?II6kT>dF^bMq~ zt=*}3^X4ls)Ym1NQEk6X)KJfz0+*873%O3UqhCGoJz&f+v$tOlTrjR5E0*q0U#Rj* zTkk`@^m--40Q!3Ed7u&hHcksaaBKmRO5|3wVSHw7dW zbMec`w+#TlWOjLRa>=XS*Ew)N|KLEh%bI06#c=wTCMhpen!(h68uv-j7&5WCEtV|I zyNWY(mBTQL{SbQo7VK0iaXzXQ;AoF}ObFa&lBg>5bl0TDZDW->aHTz25Ap?d-jMAw ziC)4jm@zT#)OLy^kt9 zZ-hV~P4Y6<`&6S&JN^7K@TCL3-l)rewbM*78)Y*II?6A9Q{9!Rkl{cu#);~ZzLS&F z^q{6x;^OdvdWN71NMehCK~1L(3C5@~=h?jG_cfTh71u34J*%!pv~FFd^aGNZpv)So z^lkbkaaqd@W`M|$)+O%^gmIMpB)e=P4J(zQQ3v2~k!!LnM~ZdV%+W^KDzlO#u@uyS zrG>Ar=jxw=xl9k|6Asz#pYbk*mxu;ez82RO52uS}`BO?fI=7By`eY&&{d~8b0PTJ= zR(^n{a-3Y0T`KC^F-94}yj$(}d!7xp;QCgC>Ez{?3vGj!RSazIFTV;;vFhzeDvr%? z+)aMmgrKj;{m9zS4>Q&5qAFmSl{SYa`l^=q=L{am%Mvx6-Z6y89&#ouTl8FcZf zPvyth`ESiVRDssR3y!S&_rKe};sd~^PQBfqwg1ASQ=kt$H{)bWw@}~AECC1~Oou|x zD9hqcQF$Pmn7T{35+oHm3B)lf-=A2EW3Hl$_K5vs2vK3aa37LvkT~kF<3xg=XzaoJ z{1KF%9ib{JPp7BHD3zqnlz0n7@h;^vBliwYd{;6HtG>r_k9WQL^r1c9i(-RI&4*`U z-^Dpq>eS`Q9x}n7`7b>FjvKZdjpR0w$A9<EQk|ic!><3hDV#4lI8?&u;6g&7D5kmi|V30NuETS%x8dKUA&%s{b zW9G!Xj=h}0zsG$o6s_?}lAJ`=?k4G@;kCJ|{&RK)XDtvn=KPnJ{Xclh`^nSzm+Wl2 zGc6M0u%%4QkFv(#`2w1ZjSn=Bl4`Ep z!s%Y^FS2EzCqF56R}vN88PwtRyT|4x8E}-`VPODOn)jl1a%y2P;!c86MAHs?yTu=x z2ax2(L~z$`ht}%qN?Ku=`624hhz!$ASAl}ag(p6oLG?#9 z<#CwcsvDjpm1VfQXk7$hx0uGkaE*Qlgr0|@H!(Vm32WOx{?omkes$`<{`v<5j&&XA zzP>5rlGPJdfKqcR0CEMvlvVWKfHd<_Yu*XB=+uc~FL;ym;Jq9P9psa0S#m3Dp(Rbz zqy?r<4c27`2q7ED+BI_!Ot9M(ak~+#UAcO?c{qlyCcbIZ0yeEQX+J5LuP5{ghlyBo z2)Xo1U-lNnU+il#YOY@02;3|*H`=wax_~cI`4RohMvbB%ZLzbv^_O3M*{QHK6vzeU zMASOS+c(^iDL6lg1>`!U!K-K2%L~R#i7)}0z=5~N_CIxmfMVPVTDyL|n5Uk#93z%e za*;?wSB^Qzq?mfGAP=Gu)tiQctX!R4u6a=vU{*=cSbXIY@+`$KgCDI|>NrVux)5;n@pXOE}6fg8_8G}XNp`H%w!iwohD%k8V75DEyQ zn``DC(7-i)@7Eh}>8{klXT#Y&eI>LP%uAG?yK~Otn#bL6+i!cD1dt9?@@;pM>I{8m zi)r;f?mm0WQr!2cbdb$W;I=KU(!;$$MO+lR=Fe;;7P4HuNGCk+qB+q$RaXdZl5t`Mk03`0|5l% zxl$=fq7!pc@7KjQ`jOP#O0M4NdP*8GvjqF&$TprAGbX&qcVaT`)+3@{%htr<3jRt> ziqYo_#4@Jk<_`7|Mhj@@)jHP@yam#Mp$|d;1l|le8H9fYMRNLh+Xx|L>NuKsp{x#B zvCxcAI{T!GXCDSD4!AOH2{&?`SdPj5_I;ucOWE1HQI@?lvJ_3ShK2G)Yn%gGeTa7i zQo4q`)gVXbD7WLUpXEs^nP(TtC*0&eFJybK;(gwc1zXh??gZ0r)2c!EY$dSvUwCn3 z$f5SS`@0WE%xdkz6bE3T-GtbQ1SUpvt{z^Lk5p=Xpiw|300F6t1aU-WB=3RBQl=g^ zDe!Y>`+Uda0<~pjc=dZ^#jwuDm*+}3%S2mZC?ae-WQzI|wqpK~1;uI+r)y_<4_9@T zXX8?eGIUZtLFXQZ;d+=(ZtAzx(7$JDaO_C+UCDvbtpr`&+maW5l(tKk$o&_6?YHKu z8Or8)zt1;NW>9+OK^)S2twJ{?iT`|VhpiLhNYi(TDSY35w0(9BSu+{UlAxrKQz5Y( z-9@#vay=dl=J6@Iu(Ku#W2I~SKrKN?D%N$}&i!q2oM2S4iQ!+grB8BG%{K;1_pOek zUZwK)|D58ypRQ>}_5bEETJ5gckVmyWLBfS*ZnGn-vYj_n2X2SQG;*hl7r)|n?pegwZEvB-%N;_sBabAYVcm-(ug`we5!UWcvyl>Vs;xcFKdp7 zR--{|fdmB-8rILM$utUmQ!+*)AH9cv z)y>y0qKMsX%{^s-Dv&u2QW=1s26|r?;Qj(JS>;o|ca?Xyc`x35h|(YL2fI@e5e3kL z^Z7~XS?SVnVH`L9!1))2q0(yJCI-O5AvmX&ry7GWh3cS+tCBvw9$Ci=kPL%!GE(`zM@RiUP)l&`!HbDfMu~4S0 z-6%&~E4U@7@2Vx`Q~5Tvd7ZkB5){H^ikDjU3f#ag9T+X}tvI3aytj#W`sUznXJL z#JnW8@aNI}a^|w|#+e^cb@8uVY7`YCvJqsFbEW>} za8h_=Tz;c?HVAq-*Nj}``3H{Q_>**IJ@ACOK?adMhoF79SCbGhygfY8qlOXA5O?j3;bwTBOS*3WVeh+}RG#SsazK&3P>13Sg z)%Kep7>{LwUEQXOm3<3&Rh?hg7G_R{Z;->R5fy_jv;BWg7EHx)kc&5=11~?>K(?&& z)UUpn*)$tJIG>64l%(p^^pr+6>u$M@CuN!+djm1~<+i>&%6Km2^#{qPcc9hnbxTIq z(d3?KOR^uHddFm;mEaW@;>W+u!v`htE9R;vHoU4zk{nO+nZ#7zD>z8DA9*l&gcZql z7W+B$$8MU)8xJX08daBl)FWJZ(i_h=zsAg-L7m)V3om^aw(AIxe*V`-t>0VRaOZ!( z5dY0LgZ0a;SM~H%63&)=_0J{)RdFyd#}xY@fWnClkhX-TGQk<428M|7frDWRUA3xm2J?LRqU zk|&G;T;G*6p<{J>I^hlu4j8V* zO;f3o$P>tTpPu1Yeh6?)@QG054jiIdSf@_wxlSORVLT%0CQ+O>CwwZ{OU;rln0unq z8bJ$MZuTih_T5kRU3-j@as3$SIXBy7eHdbz1_P4UP>W62W4ZzIAbhu^oH8WzF znAkM+1eR$xrrYc)l%JKf&Xqis|0#BHWM6b^IyXc{0unM!_a@aLL}h}e?9ta%aAElGq+EAYA$xCZov$bWxM8@Xwz(v-7sW$qkP@mC zWLfM~rf4h!V#3HS`OHgeLSSgQ>VhpZFqoNnS>BQ-PFkrmC}^gs_@#Cm<$s1Z1~wJv zdn`2?7W?Jl%5oO@C8u*%wiVnLYhUb4`>_DFS)L;Nu5V4Z{=O9#X9kL(o_ssIn~fRP z7`fR*%wv^v*47#f{qciIkHMwu%8ZDzT>n4n)LxzW7T1+sr2HebOv2M}6~pojNw;Dv zb@pxeHa&c}&pr0e8WQ(S*z9QdSc;|nX1TY6=uCdkA2w?eF`T>M_gQ#bQmDG(Ut=M2 zzc(aVXgHIu;aFqVuR*lC5$e?Fpfr?-YSFG7aZo>2A6fDMe&M{p0c^_?kF3d5_>wnII^uIX_6U?h}zYpeHAgk7Ty z0IFJTEoxaFq!5q1bIm@Z{(fZ5Iy=gzn6&U8G6zLIyCo(yd6>U`nY4Nbvr~T!kv@%1 zS`DY&xqlh6ts}u_0p;|Wa)cF;^0Thj66`bFNV@d$a? z>3w_=;@D^ZQrPj@ey$YS_t;K9(W=9KRbolUj9e{YLSe5%y&w%uuq{!D3p;TZ?($|1 z|28FFsgq*n<$xJV6~dSLbwTJ$5*+%VM~rhU^=674^uSe{PWLgg4+HYJLzS)06qQs;&e~;vQl3Y zU4>{Fug7MXW*|YF?F;P1i%_CjpQUypD2H=6fdN8Q*>q67?hzt7yK$TNh&6Ho*NlwD zYPcgvv&4WT=ni;(;j=lXgkWJy^%cYc6E&Dls}ez(+JPnp%4P}J^H0A5kVi?x1xc3R zB%+#{ZKji`(Mf?w&-9o&0TrlM)8wx5pw`rinGYYnhcvL;$AX#Z-kfwrg;EfsBB6w7 zB5U%r9`eq!sG;zBKdc(aPVG+s&tYL$UF$1abkE?0P5hK?7AwMr+?+Itr{z}@?trUw zd{7I?7EI=ulErtZeF)OaP=#=5ntj8*m+Iavl)+7j0`4Psv=ChnY*bUSPw0%f!Q#`} z7u8CMYjwKV8=a9ajqc{IY4?@~w0ynhyPX}h7?d>w;L;F);A~eJyCywVa9xHRJAr5;{kHr23_S$#c`YfmsoPITlAhaeDFCG1;sug}J- zC2wB~L|eTp+9Qf4oRjg+=BY5z-i+sDdNSL0y!zDc#^xG>J!J+DCaDCyF45PKsnCZv z&j`O8Q2M>priO6F;f-%kj#OXJ1pQ59o}yJzbP}24@fXfv{Wq8RP$pGzTep+0KZt!c z>f9F_Q;FZ2a?kLQKPrLZV|ZMiSA(7XW+15d@dgmA16YiSljN)iY5<`aCOWLQ-keyH z8}$REX+?r8!GW5_DzhJXnf$7s^s!3%bfJr+^0u23(0jxlTVX>>kt))^lxwPR%hp05 z@P=`R&yhRg3%=u~)Vyq!9ebnt7f{%DKP(6v_g2lQS2VyIZRh@a<`8#id^)ZaY0c_A zM|c}w?d~$$`)6Pk&gT1sd?21~R=dT?ms;5EoY$8T45r&Lz*G|gW?A1CJ_;#}N401i_2sUNs zFcrJLsIoQOGqm&?9(Rl>-0eNc`N3JY6;}}cR)P6GWrxh})Zcq1-7xdQWXq3>`Nel@ z6YyUz8dzeD0a~1HQTIAFBr@>QI2vP4@;AqQkEJ$PRU&P8UeI*rYvJtNKhaF?TpTG- ztaR^>izh1z`rrd$$*%^rSKB$wOi?>+W>i)4iVv!kNxSlg(rEvm{{EU@p3$!VA?9E0-D=&gI_B>JMEA^n?oM^5OeK}@Npb9zp z`6156d4xG)eCztQn~f7I+ca9RaXi3;jcBg4lTvtShw76Pe8okLG8Z&S8PhN~Zu}!r zQ)c)j_e)1d{)qON==J(t7l>9^Ub0{LHAn4%fG9omn`$w#DdO6T);XuVq=s%jyxoEr zy1euOI%^ViSFHfTYTNBr1QL381gJ#*k@BVYO%ob8bVt58L9^P zGFQvls00%}kf!%`zIVO(cHtM7_eA>*UG)AcdRl~+y#8_edE5_}16~1d1=(RnfGX4l zz~u&+zRtc;SmHIE&xFM*>+Pm~Ke*n_m34A$;}-mdlnHWRRc>Wy${j4a%s}iOR9Lt$ z={!!}^l><#giRWWz0z3aTmF_kBjVLrpIFlU$MyguM|t~))^I_Df&T6yj$@B*mp+OT z>yN*3-QA>_l03rUb-EBqpLPwdR!RmxhU~+c)F^mwuwE_0sq9N9HYNj0&C&eL9nqI< zUS{B1uJNWq{)}wW$_%c_E&T3Po<5xu`RIVwMJZN(H!fdLw`keAbZ4GVCv8lmhw8E} zG)`a>K1-6q8*@UGH~(Nnxo?J*W?Y7wWl-zS@q=M_=Z^60BhSJq5vQovgAP|YeMIfa z^Y3|_ovU6)k(xt|<1r=>(2=Vj4JC9K#u`Az1oUCZI6v+O2>1u0213tRRbfxtuP=%k zuzdQq$rqb5n(sm#6kwCOEbL4*e^sk(m!#*7kK*W2=@Hm7)STu^Ek*Yoa34+k;`JaZ zP8@Jaabr5S&~IYvHrL04qF>@||7qj$6Xxue@(%2=3b5jjo+6Z#B0ahqOj-=URM?@{ zDtU;n-6UGGjkjp`YlQJS;?TCxWGZ!7Hz+B-$cUSjo@%=?(O(_;2|3YlnA9OKC4g)e zzHdx`0nE9h=;w}Ck+jLlclaS8Qe?5Ldm^L$ytB-5uMy2)3y>D}Tk`R)IY-yMSCM?F zk-~6iP&4EEj*t6)ZMzdGe7zZ~a06QLCZ>K>Qh0d78}`DTp@sN3LiANkT)y2|UFM)F zB7QRy9&dTOv2Gh9$xokYYPiMy()e*FrMN#1AdXhLwt3z6)A3&@?E zIpT8k!ClU#lN?&!{~vT5?>NX91#}#AW^bkjP~v`Zc5XH>_zHw$TKD$^6Nf-pR>WD* zP<10hGk@g;1qFrU2AelSV8pIS1nMyRQP;`cil;0(BTwDMj$opP!88_?)66Ls|6Boq zYrfVe5Xt@0(VeG`>%=uvGwiKRC$g-W8n*!miLKw+`nl#I$J@`?tw{AbK7TSpv;+ zy6}ujGpfGPZ9hp*NlDR~2fY#0&WO{|Brwgj!`>a!`EhbL9PF)(0jVa-yAxGvzYRzp z8rCZ|M&sU^bxh<5LpY+A*bYlrD-|HRc()ddWu3A?zBPsKs`M(*_qFcYA-a-KFn@UR0pQyB$VK=+6| zSoKqZyYw6lDb+ahJHs^?!;}<`B*%*aA&u^u7nVS8nF5)xs=a}Pz;dPjexHoG?)5gG z4??N7g5%J7yTx~fhUqkiy*NQk5a8}2hF#0?y*)61736we^%c-Ut!GPsi)>JQ%$Nt?jY#%y{f2wb>jp8FUPIW_%{^OGxE^7R zFyA0YT2ue%%d=z#yX@+y@MmG^^!+u=GHlJ5&|nsB*M*wXRAU zvgh`4ZEaQ88@e~8!1P3sV?31vX$-^fk-&^^oIY|{>Y2h$XbrXU*k8o@F6zq5ty#1C zU0LVLyh^Mm&po^oQ90q_46zL#G&)5TH`dB)?TefN@@@{6% zobF(ziPpKA%Z{J@Ra?Q{8}%&&Xa?7Tmca*u$sf0Hnb)mdJcznKvJ%RP39Qca6f#Y^ z?EC97A)b2Wu8bKtVXQR|d^@njbXWT=iB^%*i#*&7l82~X29w5BW{>9oL>*krJ;5Gb zwiv2F?+*Z6{FP8(f$ctX2h8Ek$YJ}a&chI6G1*%?Ojbr+&%QBE_JM8xu5jz*Dt3^C zz5ab(#AU1TPXKnd9obg3*mT-r#J`o}SVQnUl79_=j1695#SgeX&?iTg|59IRM-*es zU2+zz>cmunJI#5aDc&~ZN@N^^5~aEf*^9}#TJs~c)>VmWTOeWWqs*^>j%GBziQoSw zDCjfi2KrQoRK*688lYNQG2f5RUDLLQieC9#i9!`!%-x3zHFh=rd&8`qI1zggt?y{# z!iux4TJmk$#__)^$$r17Az>7LJABEcZxxHv>vc{wb$TITG-7Fe#l!=?bnV@lr__mU zuW7N~?r3;ACHCmB!lN5jr`mWoi$WgiPY7ACLY3w9bx0^%d5_Hqe^2u-&v+3kbECgs z#Q)@3tzYTv&Vy#PIo!3nZfni(O3+Ca;HH( z!P&lYg(nKcKBtG(BiKYSHA^!}GjubU8Spxg=nC3nw7^9EoP(@yEq3Gz6|E)3Oi;{x z7>JyPEd2N_d@_0FGCsvt&1O%AVujn5>Fb>oi>>RHQ|3$ZVtkm~s)*K4md*I9fP5R} zgZiBH5U}E7&-Ml3+Bv|jUZuW~WfvhC-^cxyu=EwAk(ke}$|E#r)Ldz| z%{-g;5==T!vF8v|Ge0;J6lj6!IN5hlGxiLH-0GzO+-V^%xy$UdFg|W> zya|(~Uxai+J??Ot$sxG9DUJ}iHlFa)lNu2khE*279q)=Tnr+rZ8J^%gjp~)Bqf|81 z4Fo+1$Ih`uZYsKv-{(V|&p?3H%=HC=zXit9RtG>*5EN~6T1(z?6Yz) zLKaZ~CSvHyHQu!=P3YcKARJf_#{Ahg?ZC_8P&&rILOLkHddPN9WE{vjdUM6at5>fk z@x*yW3gRM_nBLW4+X^$~mDu@VnR^LSY!3u^H*-@;)+pmrIhR1&;V61GhS^K4Zkm2v zm{*Q&1mLxH*!xRKT46nhI-syO8=asp2TgfihD1G%J5=lEsnQd956!tr#mh(BvceYV z*W#0SCqbCGIPF!he4$m0+ZR$%e-77prR9t$=EpbkHjD#q?ynvAwSzkzX@@OMCKv3t^%1{0MP5F<3Ktf3 zT<^?-*mKOACCVpfEArl4+1tw9^g19*-gxD{tYx@*7M>Qx9d>dQowZ;8dp)%$n3nR) z{u6fJIe6P92{MZ;mvp74b;)0n(^I|KEIm3m``r5IW*c*c*AcS!5B5$Kcf(AEEnP-a zSmrh4GtxYXwhims6N*oI$OS9HTkns_hbMNP&+2__I40`o`RcSA=AkJ^^MJ; znlQBG3Hb*_`5hGUa*_sBBI>P&A{T7;UK>sd_uc=tV)%yGkkc)1EjJc~rvlN)=0rZ4 zV^e_5HD8m+!o!XJkKJ;aWd(JZIt_4tw>U>u$@S|kHknD=EJD8e0Q&#*AP^uEss(O` zF7&GN@RIShZ2p5kpHGa2qHG|4Wk2C|gpK$@;IPjCaq|Qkx7+Co{AZC@)7Fmc3kz0S zGm)DN8#i+67GC2H|^2L!1gPn*ppguawG`^m{Am z?bt`MLlP!zwihlJdTY-E8@JU^3$RoL8}$Uw)yYZx=2_$|9M^gymGLUkjyWMVM(SJ zFgu-R5-M6HZv;Sf+spqPqZ>=nX?GP?McLaEo$z0OObBe%k7dcv>8W@Ae*|Dkqb1tV zf0NJp&v%O{C{J$#Ja4J*dYnT8NDepym@N@VaXpXNo0-Ob47Pmw6uY=<(5qrNRt{oJ zPSsCnWK)}g(|<&F{rVLLH3}{uAvil1XhXLFU3g@`k2oU06&B>+Tfiz1095Eyp+aiBXzq0j*uTb|)PGrN$>r6N2|JO^~zAH)QDq#m-TzMz0i&>2l8>kYM{UPg$U3 zW~aP@M2qQi^&u}y@PRsO#XNxV!;=vo|}+3Sy^i64{CC zzLw#2GB3ZpCTkS>6xAuq7{0*wf1h*yq`YKZkwdUkIehpe<)trOw-mc)k76@3ME=YD zk`P)D=0R+QVYPd!ubb0P`g@C!#@T!ky)JLo(T`tg-z7(388;fZpX@(>_uzi!3#kr| zIdh%;)4kvhGVk(D@3PEvL5%$6Xdn4zfqJ6L;n1t1!f{W`nH2U5XSK^`{}}UXV7s^2@GXlryHhA+Eww9*jda=u zQ|)$*z?wHqTIO`Tj=Zl&X2$kXQ8v;}=03&)URT63YkyRC-*3mL1JB4)#ofNMbnbE>RXI z%ffx^H#TtJVx8^8HrKs>vXiP?)!Gn9s*uhp)X+YIxd>8iXU_$2LU8kRLrbhf_ zj}BZdrD`pKQsJD%Ab~_nfW`mUp;|in8{gcsBq?AAnR+DL)z|24@1FLVGjVf04f6&U zZ_kfklp3>yoUVy*wI%$>=B&`QgKn+H<@#AK2)nwOX53bZc{|_0cAR-Eu5eP!2u&G0 zYYS`jFIFb->)-ON_t*&k{%T*IjUM1Mp8d}e`QLo|-;*?1qZH7HY<UV? z)^sztumj@i?%i1PBduU7(txBc4VvZ$jcaRi{rdI0=)+u5p>0UL&jEZ@7A}J+K#$Hu zr3DZjHVfIBG9pcu>JdPaEcWJTFGGdHW!wXXR+D2xnF%)-aa+9D}f_pZaCUYnr~_v2s43$h796Y z7XhRQq1AFGKL=k=`6vMvtQVm#t@Ha%ijEhJE}hFfNwCwwV?nO)4vGY12f9RC;=tZ|aP{5ro^^3tJyYXM+Df7^#rxuMF?l_+`(o9*YO zOryFE6S}xxv)!w_Ro&e)71hnF6HSPu!vX5nk9tHv*L>>=3>IOYd`*^Vw>hF;?GPHYu>S3Pc^I-^iS6kHL@SD8-G0*Nwct;B9%Od+$+wN$9C+8Mz! zcBE&9<^B>892>2tK9~Pi*>%YNR*ii%?X}dNdz+OWF*n6>u#tXnSihD{yI4v)S=I2( z4?W&jD1}u4Zdx#UW9#jnZ;#%)Nw`fbR9m_g*|euRc1$o5CoZWYy26{!UMpEIe6sFi z)RPQDN~vE(Trl`_q=hv(aMf;3+F5l3rU7k2H7hZ>q*dJiCobMRk*BM?6zw`74K*_2O&~aF4j{{|AoCFmy1b#__ut91438YxG)s@j zJ<3|RkmB`%JUn38ab^I!B=z)8Qg!MTAO$M55aa+!6bH$oS#QeLM2*4vb5XppCCtH&ac4|tw#P{XaF41zvG)_i=xx5N^ zt2%Xojs1-DysULJ_TU3=buW^vod`$u-nBHuktJ$2+#Q^m?5=?Xf2Qdq-+P zgLeYEZtGl(jOL&R=NvUlu-$X|Q2yB?WaJ{+7KYmwulq!Tex`r%n2qg7qYpiSxV?|I z%R%8{HQfm*sI*sddwx9b6Vg}qtlME;bcUaL^l}S)q?;XwjX$7%rBB}Q-afzSYG>M% zt$;Hs73L_UzkO;=AFcJ*rW0zHl&Al`QOhx#=Wa_by^q2D(Ko)S_=5i%c6edyH%#J| zH7v%^tAwAg@a7Sx`Hsbw2N(%?ff4&u{@)*<5_WK?`pFE|-gAg9=C*EH5UK)anfL2L zY?Wy{XG`9%9A*Rvn%Q)ED^|$%{i)4(- z&XwPE)|`rsC+^2oXW-PeCAgo@9ca8#vvDx!U?FYls-n+2pLvUd&wntJh2@to20W_e z>t6n8Mt;&ym!kH^A1_f$B(0q38Z!40D`AAEqL>z-eahAr%Kh2L`eVn$2iwIC6*-Tn zMJ{5$Q~OHdnHMs-;qCcK<(2+exm2d}V-6!S^cscW9wO>3*;_*IJp%KtU8Il3ZNkJ7 zk_OA`43V*49{v_kX(H{sZ1R^Bl8Psx{%UKJr_!QE&le~D>aA;xzQ22dp9-a z$45H~*>075deNfXx3zJ?L_Jo6-uy1%YF-^?%S=d)kGsrn?q>s1?CE%T>Ft70Mx?Ejkp(q9((vkl#7daf-0ViZy65! zQR?bG%vmBJ5wO)y>8Z|n|a_VTazTF5hp5oALpF@J=JKfR0gRC!pTH6~nIIEW4CjOo$=&9H&MZRhAFecXuWimWwi;S=aO zO2Z)bOD3aY{&hOKP_Z|e_==`V#y@few*%4Z@uo5Z|` z7>%bJh~FDYMWL#iR>1A$q(%6!`2eC~mI<=E^oMI{h=lzRz_L#Y{1C`eBM(bd)8h*@ zm%oNMxsmIkW#bkoO|gRf^HIB?ZAXJ4fXM8$O6{uAs42Ex7k}#RFdeEsD}54^GP_4a zzfR3JlvSk{jF)CX z_g=-jEy@hB#23U{;vF9Gbid@dN4nar(@f%{w=EjXd9mKCh0FE6sgtoDt`*qOw=v~( zIk zG}QpND3AWQuCJthjJ;AdpQl}PJ|Iz6 zS!q$7hY$4^N@KqoO=xyE<$sRD7!k`B8P5XwmPU^9QAAO>#qs!2(_U(C42jZp=~fJy z(rSNXgAf;o7?S(pePpe3OL6C_IgYxjxnQbleaT=aIp^jZX6Wt?4r)P3UWG9>8Z60C zV!N)QsYC;&$&f^+mH&=ADWy-vneEx&th)BEgs(DS3C!9G-_6HySx-ja#WM1CGDFmU zkIq$Hrx>T zFS?iuJK<{o;AY@+JznaNhlbt=v zzlB>z{g1i)-+c4`j_-T-RzWLubkf-%H~TcNit?gMi;SK?B*Xyr>z`nBW#Ct4e_g7(QIOdTo)^ z91C3y0%+gF*o)K4FP35o(V0y67()7ksRS6A5Rg5%;YsYOQ-LtP19~1d`D)5`Qy<7F z)M@+Jem`BR1TdQhiFpI6?nHK`c^w{-rUrL(hFa6uJ|C)SByWwzqU=DqV%=3W$*5b> zT{4CM-ri;-XJbRBg<%u`@Momt-FX>EbD|;~WV(t^ThyC2giJi%M-zEA2ia370!0xU z1$U#Jp4^k)rMuT3C?LdcZWz&xK}_+Ii5;jsVW9le^1JXTrDD0+z}?l({De*(A@v#e zA+r$x)XvrS2X3;vGBJa$?DV&L_8DvQm|J?Kh_LzGX?@7~6wpV_Ta#oF>pc)gJJ^~`)cMj4##%&^ z1#$T`uz}-rzdRB$Lu59gI&4Le7>D)qW3Uz&7;%eO!@KOCmg{|W}VX7bcqnU6AgeE~V|#+&eV=NnEL zSKu-W3VBtc1zEMOO&rc!UQ}|!-)c;JhrZ$R!`c+6)VB%djAC)>$ASRWct?-eDD&(m z-QdwWt36I*y-{dZZzyVwM#**JEcS@;73r3ijSiWCpG$5lsj}A~A&{?<F|yNzp%$O3L}s0yJo5_{8VMR3&58MH^Q?E|@(Nys~h{x9T?E zET<;V;~v|!MYgmQF#MFM?4<8O*?)fYTJcxi4CsM9`d#;o^VvQzHml@2anKt{p9%Xc zla*1jH>)R>vlm;trVAOI$~UqfRR}1>=~38%edx@~H~haU08MjF*L>H< z@XL`2a!D;#Js9jdeB*&XZ#aw+`*vH6fivaxmeMydH-hIISh3H|bXffD(Qnh^DH9=u zJlaxTNZwMyK{5~xY91|LWAu*<^Iy$Kn1XQT)9{0M?VRk<>M6>u%U|B%fBJ938qYlN zxyApKtzm=J8AH=uyA}d92o8>CqdhNQUkP*`vIE|z@(43iSJ2WW4`BkaM#On`)wHH- zE6h4C)$~_xDDd7L_9S--hc#9=P3xz8`pRj&>SAe(RL~()1cB;mcEpy%g@uKy_7pr& zvZl$MfL6HAR(J|R0Gr#2gSNJ7HDN?LZ5n+-RDH+YYC^HbVE5tfwh&G4>MmM6C}Np^ z;BUL*JQF}d+q9wjA|OG*xHn*1_C>+M6e0hh638JRa5(^Szv(Y4ixijNd#EY z6gtY+&~+vc8QL!zQdTSu0XRJ))rUaqPKcKhI85JoDKdRPGwO-6w0d@`WOPMi%)0koMhSO{UwvA2w7JbVN}p8OKpn z1gsz>$rcMLB4R@Y60m?sl@4i)ir6!XfYLIGh)4;&CqWSr5h5Ujnt%ZVgb+dqA^pB# z_CDwAd;dE7{^O%Q64CE_*IMuTHS1lwo_0hZT3vSdD==9l1@C(giM1&_`225JtJl5wVx8bH%f6J}(eP7ABw$t{ZG%=xS_ z#97SYrqZ+?%j?G1F$r26+yq)fEly9HX|Nu|!`3AyZs)J)*+0K93UP9|q9AzdvsJu% zCrM_hNF7kHg%_aI;3+`Hl^kujGy8Gs9Rok{mayhWzyGHj$-&zr7G1(b?|R5S(}Dcl z>vElZmA4~lZnSQaWm5!q&a;yCQgd(Yf5iVJrJi~$3LO`3OMO3hk{IE-A@$Rh5t2a# ziwY!5olP41gkN_U2FCI5(qfWE(5RYCz$4?A)Q;`t+EHAW59%!cD46ZaKZ(h-JmIRP zNWd`Slr3>cxhJGvPePOB98hszm@Ck=GlFm)NlX{j99CdT(f2>CZuT=@n{}_Qqx2Y- zoY(I7C7vNav{tVYP@|sScqS;Et=W-yxx@~6c}u>E08?y%=(eNTFRSggc=rhQB}8vQ zyBev7V$A37=vJ9qvJ29COJ{l0PxE;LDQKA!$eC9g2{LGy{Ufkt#NQS3ME28tU>pK6 zesy-SMx@{YI(6FkyV+-!dFzy1BA%F1rvCSq7>@AIyUpn<;p?-5NH>>tG&6kvzG)q@C5+u~ zn#p1C5Jj_HMwx9gqaAo}*Y=3NzA#5+S9O$BH~whc@S{%$-O`o$fBe*p<~KI<78H6D z8ge<|y#&GrstShA**Z$8KR;g5I|vl@%!uX_M`(XU%hTQ+KF&A=N;Yis!-lC`v28wE ztQvpPtE~QQ1I%4iX987BeetzJ=o2NSx+I4N>0z-7MzgiG@8th}Msr)E^i+2IZ4v=F z!TC%qkUvH~ik^t8!_?=^hL|^b2Y&p|(OCY^1B4AF;8WJrq;ZhE3|7r!(>B-^C zyM8u-rK8lz{M`SIs$d^Y_QT{A(A zOl_1~)djlG!l840x>OqP6Falld&}@FPF7KUNvh3 zuWfugaQCmNZNI+#OXJ{zgO{%C`2$+gnATFm(kbn{ced}q9sMO6Xq+Z0hSYPm6wuNx zmR_$&*n(g%F13SkAW0`Fr-T-6MNxxHYL3+J_C`+I7(Ujc{83@ndR2>B5x!{AqE{Rr z8wcKMm11tUuC7@c&((r#bq4~5cq7$Zk+l7X?2sE#9Vl~~x1pmQ#QAuf0icqYfhUpXz$^n`GW4V`O3Fk=RvFOzr|JFmcW&lUZYOh&>}0ZHLNpemgN zVuTZ1dkBcrJ$Xr4N%q%uD%@ni>yl0!>lK0Mzz1I>zG&HcNKIkXcy8f;@ppPa)@u=G zc+7>6{v{!cFA+|+-?#>{r${taXgg9#Nv8w*AMsI9ETI92RW|!+GWn5FS<>1%Ss1t$ zILoSlR_GNAPC^C7o7yL71_m0TrD^Hu1Y&vn8CtHX5gkzZAT>Ol5$TS{%X(RVt5FK* z2-)#1-bU!_5n0N|XUH?}RG|icmT7hC=&vEGO42FJHhkUiYZsciMYF(#d?TKf#R$dn zrxJUtvJE-WIDiYxQ`<;Anr7DPp$$eW&k!@iUo~(Pjlh6VyLQgcvzn!t*uqLDeVEffz4q*9d&In2oK7FCzTMDr)wEmnN@7OS$MEAFKXBbzs0Foy zf$(K9Nz@$=^&J-KGfHpuKVKcvI9osdu51)3Gvs>NyvX#XaU5Y+5oUW7FUB|}q(tT~ zc8d#lwi_CERqXK=BTzwYA_K(Zdd2$8eax@pi@07}lioQ;^1YinVM!r_5-ti4L7Ee= zPn#~YH^3~-7USP)nMtE39r}YqK4(!}@)Da#XNp7pP&pkBr}|rB@xbzbIs2IdRpJlL zjXdFfHyV~SIA@b=N^jj>kz$l@jIAlbN`fI~^la}Oqc&r5f@Wl3Fs-o!0waLtzz3Ba z2BnvFSiC1+X21D#evAQ6~IHpLF63f{$FWpT$O!1&JzUs4MN)+bitU zpEvizj~Dg#KO_zk<-0H|_iy-eDY+Ak{u3HA%bJs)*A7TQrDhAqqO)~8@X3Hif6}$IOri(x9P>plXX*DWO z1@f8ZRq{8oq57&;zdKVh=|WG#8-=UJvd=thD(xs!ypWu1HKwQ{+jr}Wm8`w}njOBR zXqCqZ;au)A&Jur%O}V!W0%Mig7N;Ia;pn6Zt`R0}_cP%}-{y&9#iN7CA357*qnkn% zhq5knL|s)=r*;PNyn}}$BoX8V1D4^`f7HwL<$dd1RY(6!VHi!XhrHXE=7D3=0p$sHx_TWHAl&Zs0dz8a;)trfEftiQCk;U}JSms%(%_9$vfgIEQu z()-8%`JmD-MoSvxZlxV*vJsAVzOOZScptBP^C|o9C;6ZM>|^xnx4(hcVP)8|`fqzn zE}D9D5Vkw1LMmCng=uy23Of*w#yKj=W@SZ}bonHEdeeRBF$BqlwW^0|OqCUDX3B7yuXs@_QL~$NE=~fO&=Ul=qZR+fyDEf*HaXr@75@2w&*=5;tnd9M@bl4 zlnT!eZe_7x{nZBUI8lc>ef!X$8$KKqpCd}S;&8^w*brH-Y>5u(r%3`E)Za`Jp47k> z8<7a*S+Z1)t!51Pz-2MRO-c3w8D?(d%t5NGAaZ2A38)Jezj6tGeo}E zLaj)s?q&E;-4PUoyfUdgi#*%lCtvvK8g|P)sKp^8Q z85FKYx$u;YnPY?KXpgGWT{cIb%ydYa7c`m)eo%)KflHi0Jy(&y-OG`&REWPFuI4uM zKpL-KA9?bwqXt7i0JsW~zQcaZH2P$vEvjAZ;^E8%V zV$a8v@*qPPZO@0uR~`z?COuRaa9xKc?<>UfJX$|sG%6;i`190ZgvK=t3!9JQcf7pV zyvMT9TkgBAE0gD76oNF1FmK@QMj&Sd4q*3qYh}t%#F52?qitz%zmAk|Bu3l6cBv8u zywS{Ze6jQ&xKP~~f3LtLwNYs4F385u@aJmb8?)Z#NM)|SGV%J>+0orYs@34&b|$`} zlg`t|6k#2oHqFZYr~~ug1Qm)^lCZ!99tFCemcSH4#?V^29j*V`o{SsMdbNzxLQm}p zhz1T_Yafx?!fZ#SYnn;_Rn^tCU=~v(n>B^DM|1wz{PN_#xBz9)abC*2+Mo@yG+$nv z#`9BJoRxb%Oc9FS@Q_WbJGZ2v@Drh|_hTQj=;f;K^C`bP})=tMPEJkJ45n@r&Auqy_n}Ieff*X^1{!l z3-_4ft2MkgBFm_=@1=h|iw!pWS*M&njEkDaWctaBidW{$^lko)S!rBK8@v1xnS$8U zFSU&4pIGs=S<})|VKh`3pkQruFwz(pw1;U8yDs6q7bl%~@k@_mDS%%R(nFj_&7An> zWs4byJC(gZk0M~?TY(nbPWrUtJM8_2XLf`bo z^6d`wMWY#i=`3yjR**BS;ma>mm3Yc#{jmdz3etm98EZx_u*f~TaEW}~X4IBLGco^c z(syaXS^6g*iu@1!iR)xBR=Hb2TTl7Xdq(~kJmvI@d(yW2&wl=&c7a^```hM$p@8r} z6*qNh=frOp^Uh6It2iOF!17*5cj>~_uU1@&{|J&Ex*twoGslfZe`ooEYXXQsTq4R* zF&PZa+ODp(kV2tQtK83i*nAv}v_oHp-;>Q;7;YiU)h>9ChuZ6|Ua)xSKEOcH0MVSU}RqUL>?XK#279@ zk?nb1Lbl?TSWk%swoUixLf&guT4D+(LM`Z28(KqdSV6tBvTYK$(S)DTHpdGdWXLn6 z+ZI%#xW&&h+-z+lEK9Gi?!HYXd?iCky0B{>c>p7{lU>+JNFVaRCEuTs_+R`tom zvkKs4qNz5$M-J}m8J<+H$2@?q^xYpfBqF{@3YudoyboBiX{n>A^h}VGt}GC+h@wKZf(DpQ?X#_? zUj>I%3a3sCw+JFB)HbPj+Kmupou37-=_+_VQyf3T$ab4+(~`xD`LL8J5}jBOGrmcAOUpriWFZIMOUcz|6f;P=!BQhV6K$LWE9fe%SY8 zFZtp0$#Pg8<2U%Joibze5=O4k^qHN3k_m0(Wy0zF_-+c$d(96zXWS~N>UC4L)75+00g!8Mc(;nJC;8C zwwaJ@_{4bT07{)cCI|bzyA#0dW!)oUAGNDXL9@FXmUT6H;%&Lg1Omw=!rScZeB`H} zhtd_fX*ZEW!uNCp_~UNg)juZfrJ2>uBRr#aXw4}c?i$^Qlh;T|zM@iBL`Y(E7Z zPP@|bX(!ogV0G=gsAs2J_8G=(BEr{y(!VuWNf%qgX8$pHODZi=i{!@m?P1}g(ZBs` z+MlZqKltyz%p^k7I(MQ*KYcy`>1uvJWH$Sy`J;_zDKP&y3ID#NS*tla{uqrB-5-j) z?qus0?e1PNEg75e$F;9w)AKIiqsPgs^{uFiO%KRa1M-X5us|YWdLN{RQ=|Za3l-VR z7~aHfwJytlw8FV?ZMuu>z|BA$*VV3q`Uvn$`iIRInh=9rHjE#L3~Idvxe4lEaZ|Z* z!PocLRVE6V%BgetWu&6D>6VUx&8cw17)t}`9pergyWHje*#2CyaH%-fZD*UzkVRy3 z8;?89QawmIGXZZHlI;@gVn#qT_9?sZ&#Sy6ut=+{3 z^Mr+Z@9SepxxEm0uy~zXQ`&Ow*1)RK#1YD?ZpYY5^-|i&^t|l|`;YXq-B{te+IL}z z?OB>QswjNWKm%#%$w?!@s_Ng7dN0eoWx<`u+w$`B$QhRibY$^r_d>be(*20GkULWg zU3G4DlC%m7pO!1jyO(hru`pb#boBAM3acN&O?_YKVT=rk)uA=aaTms^07keUW%eD; zWWD{;zaQDhb`2!WR7A?;Xy;C#=DPYTl>NW;`Tud5``;A*^F-aV3&0mB*Qj(jm2TPq z@Kg`+FLq$nG|`h=-cH_L(MAH+CKoeUDa~H5k*DE3Ch6yLR>z7lj9##-8Gd<0 zd;1Jbl73PS7>}370P^CtV~o0f0O3Mv>m+EriklA|f3a)RQwJxc!pw;wL`F=wL7)_- zjNpN7t8%>BdgNuR*cPO*weqv^R_uqnp{_uhVlMa&0_Mw7NIU{QNQNfLyoF$net^fhD^E%p56Fw5jYyhCBFY=(n#R~>L=?6X> zvoZwv87ieYG}i^^PTYDh8}JQ@9bukV_<{t9?JOJwA^NKtS#Dt4|GMb@YUGyLH}@!XvOd;`q*vGOy7 z$cnIfcA8}%YT7nIGbu++u^J<~(~hq(=VUy#-nPw&OgckQgA>pIkk`+L_|c5&-=c(A z3@!?u8Qn|HSK);ztb2}(@oErc50kHwr9>b>=Lyz}O=e_J_G&rI=F2jfM zjo5E!+yO;?#RvMMNmQXhTl1!$7%xQ-1K;dg;bzNh9QKo+3%1$QSzW(Nl+gxKmp{31bX}S+?SCy2gN9rG!CV zNpR6m4*i3m6_L%HehA?!7eZhc^f{|ZZP z5x?OE{M+z&SJb#Mvw&J2k1>d$9($e?FzgTKKj#t5Vij3?Zz8K-#NOiud;U;FIK4nu zGnBW|=`KQyWh&04^Tj^-*aX9Zb}8fceqy{avpha9E(}Jromva&rABzJnD2^bmMr`= z)~$xW-JQi`SJ~ThPue8QzO&DnL&EbwIl;xb)Hd%Ea5}kK5rX3K>=A7V2NR{)yX~ zCWMpwgLIi~5zS@0rsBzjo>+mq;KF~4e6g(3u100Kn^HKob~BP*+f;2x5g$ih+}n|p zcEw%7Ft1n}qh&6)hKLAVX{GX5SD>}Uu7Kfn(-L%(=p;U4Jb<4wi?&(L zi~1{Y$HCskQ_g{1QCb(;rGgtxD%MZb;zH*c*rE0~(e@&HhfX@oUcc+BYYdB_soX+6LxtCr z_wIoinI+gKwV%q?#nCv7I#^Jx22>+y1|Bj>xx`uSBZ)##3>3_E2{7Bzt_2^ipUI(N zYy{lB47d5U9WLnQwQ9}T+;k71hwx`i;7zcswk^{^<9tozP6$vuoh!4XjqMJi1LUa_ z+?|^`kd$dg&kxD;)Un|=Nz`fIcYzp@jt^F;1mR^yeD^G0SmI<}M0TtSA^ zz6&1Qyz)gJ%$lfHq594+3490VO@fQYyn;*h_l^C{QAR$>cKK_C(>A^jZe|-XY*U_2 zLp#ftOo3)W^WkrC>p^3pA}c{^ArrC%mjmK-su-=l#|(hzhx=c;X-NO9>>BY??isVw z6gHg#mqb+I9k|T`K1IEd-v|6^Ag~leY19I!;T5)`rVsFMp5O&RTTy0eVCDA)7gW^M zX~XQA$V!r$4TA;I1{xl~wo%gqJa-^rZx>AAM_$Igl1@fX;WsDPs1V&cPIN(UPfxa; z?F)IX|Lop`obLfgd3hy-qii_v_R3HW1Nq&R*R*JnYK~qHnn=Lwq<)k>3`RJmI`obZ zyCQzwKc3TWSCFOz9W+CNo1YFx>t6r*9?wmBJ>0L?*OH7vW(Pe}XWv+A>5 z`T+HkAaeIa^6sB5J4+MyDl~mnl2dZ#wnHOJcT7b1*b6Gx3$Yv?=j=Iuv#Q+th@(5q zTUXjG7JM}%OiziA?^vPi4Jh+<8ERAZ$viPkchk0AsuZzMiR#fQkJsMkjnB+Sf&{_y zOWBXb2ADmNO8)58yi-lEnVXk_o7Yk%H1#}i#l3@~amV2Q}Xc*5|no#bSeSqZXb`J=ditjtqOF@Cdi_UJ%oL;W-kOY&ze=%VI%opS1zT)dyIm{feIT}0Pj&51gO^vtZQS9Ztc%(`0Z`_;2jtO^tl|>;S&E+N=idre-g+}lJzJZKk6uSNWb9$Wu#rsneKP@Upj z)8hia3sq?eGcq>^m8Xot%70)=-MR)JQmrFgtBb$vBD0D(b~k_ZOh0_z)%%+)4P%bM z$60HCOVyb?x%IQ4i27^N{N{omzQq@h5OQs)N6S^$ZMF|xlLT8L`E_ELp6fZ8Mf$bM zirXuXEp#RQ;0^>!j-V)EtGA>)za2@QZ5ak87h^Vd6?S(kPA!?*Yw?p~(1i(K;E}_Y zb-AZnj@w0=lMnmud27|{eRSgxH%BES?58FAv9e6Zxh33l$Lrr3${5f5{%~wbc7N&5 zmE^)155ijxywON~z6?Q3Np-n|zv-55(CA%W-SlO1(gtklpAV!ut|uG{*G~I4OdpPb z-FSKEbyvN$bGhQk!uIM+>p{N%c+>2c3$y7Le{wfFF7m-*2@)UeLX>yPpc>Q{JC%o+4=ERq_vZ<@Z;p2Tz!<&Z~oxZx;HgoXW2S-R3 zn`h;BLJ7VRlnClI$+8*YRHi3Vr9+i$1QCOeVCSPZxY{dR3&@Xc1kXLhiUn zf8kS-+wZFBx@9){lq)(w#!rs&|HrCJ88DS4jz$q}#4GRT9WYP4g}N5OxYxjLxYJ!2 zTSW0*gU4=O2dR>o1!PgN~T%$ z_`KRVSXFJ93p~!9Bk5$8Ws2jH*27B1>0zlVb<{kTWO%XH1=pF ze~KrFjPAI;_ZV=G^np^N$FiTKzTws1sGJD3cD9+E91f1MlQ{NX?r8zeEf>{sw$Tt1 z80$jn%^rGoD#-+lw!@fCrW%CAvB$X`$Q%=TZNGE52zPenS>EGch9W%;jJTtPHg>RZ`tR|8^EE$CtT9U0}uL`Hi^6tFnl0I5xKbyZ_MFPg|n#Z8rQPP952 zgI32D%Ok6NqA~|N^?YcZ!@pSVX4H+c!K(m1Gs!@AO2eEe4ltDy1mUos@7uG(unOIy4)Q?j~ zbgI*Z_-5V$Mv~Eq#f1|UiXXgbCoTTtandEL654gElJW!-m%}`B*3$5=TBr(r2rYG6 z+u)eDs5$Y6ap5GBg*N%;bi&VULZ95Eezh;<9Z_s>M_%l-kZsm`bn@!I&h_(R!7zx-`;UCJuB`w}#!5NdQqU!7E$nFp+ycg2&? zzcCY|@%*$_cw^fi1ry=xPJ;w+ow{h5Dln?!lD3%sC2-vjSfx&k?9Mpnv7+^&k@s6h zO$m~IZq?(xa@?c#rcNHE5s_6IAaN4*bETm#cbi9fOtq)`NJOGDo+>nTvMDIVby001 z+Y*{&^oi}V&)!UvpWpEL>PFgv7}LYIuJ(`L=H7pgoOkm2*Mwi5FF>%q`z@mUIl*x& zg<0J8{eH_0L_0=+)PvU>p|#s?i;W1jBe(we<1g67yvJI{bP07o^MD%PNg?jFTDhQ7 zAEv{q9wB1N-ttjM>X1oQ8t<}73g;iwkVlOWXfpM21|4pdv95fR+>%FiiJq) zGIa9{rNebF{pJjL$z1)*dOR&JMsZ$eat>zlGdbQ?0Xm#&VYh$}&ZAzzSf2$^TuH~7 zK{mXS)4DWOdJhXc){k!vMuVDBt{w>|KMK!NSjXu^UG-S;``Dv@yUzZH<@&DZi1VR; zu_W{@dExpoYWXx^RC%Z)59BlDw1vMWHEDbUwvwNVwnrl_*rxBHs@GnTPRLAvG0>by zz>E|ezd5Y@`L3%=Az&~ngVYZ9>IhKh$}>3~Bh?BC@LycJR&Vbf=JQJFSdPZEJ?ouu z)dt{Np&0{kXvq`oPoYHgl-iV?#ZtrEb2O#7L^(CE0QgA=tysJ7C|gvLxP&6(Gbh?- zhNdQ58k)Q#BiF{Lxv`Kf_X2(1+>J&yImt9rNYv;%HHn`@lV4TmFdIMtRLRdREF~O} zgZfcOWgmB|B0z^fXbZsaI5F!q?pb0^L&_VDiNUn4)jQ@k7`6BzyhW6A@Q6fZx*znWwm+b%=Ptr7Y0$8p2Zn{ z@ufUY%m?`#IdLIY)}ycEp)OG2HDSe{1LRpo>qaqrQVnfJjF5HH=M~gx+dy;FglID!5Dy&VFWDKj zG!whW48y5@ynMzPz4vc?8u^!Q6MNcl5X21maqm51`*X#3_>=C4*i!{rDT&&W z)TLylU#yWrPKkdRM`#)St(7vmb%N@NRFz% zN%6*K%q>_0-GX;QL=e|{m*Da1P}4Z+d*EJc>-F{RP2_F2XJzqH&wexz9WKb7a;jFz zfag%4u9?dczFwiE#BMTzZm=|ZKwkU-+gbCX*sj{V2OBFvb~0@LR2Rc^>$WJbauv>~ z+hs|Opq4SO02{dwB3TKM(3a`d88>3U*decWg=Xm?!2ATjO0Z8m(<0%Ajkr<_zy@NS@L$pa^{-AAdDicSf_tgl6^GmC&Wy z>E|T~zr26qdSwCP-T57uio&!^OS4TomL!8&cM&OK>W@Q-68!Kx_c}5_Xr}9Kq7{o` zDB`Fk(1GC{gpMLf<}0#Gc5hWxcn#^sEdmWkCbQR-2)fE?H!f^R+CEf#Sr&z3IXC3T zK5Yq9U*%q&SrXmBI1r7De-wxRAxl!NjoE!db!8T=*J8O2mUl>6?lvP_JKh)ThHd#% z)?URq^gQZ}Sx=?yi!}4WbE%5fjQyYT{>23-w6sw=Bo^b_JJHdqYYOjHL;P)-U26;c z`U0EbL*}@ZP{;Mp5&rPB`RZZ~3h(-EmRI>3nNhp&O`CA9f->D_i*6XFWpN@vY5;A) z01NQTH6no{RmL=ykU_d!0i3rWH7Rm_PN${1tzSBe99Qs6%lm#2H9elu0>2e${c8B@ zKi`nQ|Cax-g8yH0wVRpdoGm*Dr&m)tH0R(A1Wf5N2XDBU*^9aN8&ZBqjtIN8=tM%? zZct6&$e}+E{Jr#7^P8?*So9Ax8V2Q^Gx-w7a>?mfA~d!*0nDftK|l)eyJpMW7=~AS zS*lkwK)-Ne8!o9A@PkUTQUa;#Vu<+Zj3@HyzNm^zJ1e|E9tB6E0HAX7clh`0f$peD zqFKQ_UN6m8-BsqmT9C+lcr*%=tnL^Y85wg9K~O4grrSAMSzTTX+$ZemaG282HK1}6 zL;~Ty&@70B*Qvdl7lVVUegqC26q$#j%EWd zLmi1bmoa}sR`eo?>g zU`qX}Y8Ta25jrM>6Imfhr#0hGa}DLD?Z@7erZNdS%AY#;sbvr~qT}qK;!?rsKGqLq z8aH3I2||M;S7;xQpdY}0CB{o5u<{=OzIJUSZ2Wk~Smau*>R03$rv}Y~bdd6(J&5(A z=P~>Rgy8p|9K7+?Bk>&mWbOcY2mE&$xB@RO6tIOxxW=l2;MUIeLGHp2!i~&nkbXzy zoq)_JGY#*}3U1t@BdZOk_$1kOh#cLK19gEM?^|h}c-Ma&U*AtLME0&;Cr|dfqDPFk zz0zmz9&x71P}UK1Y6OohEz`6F*Oy3~#NDOMxu5A~|bk(kh}fWD4n zv>~zHhpIm>Z1jY6ogdr}-D&bfa=26}1QCo1!_062-d)s;&ZWDLj~9~Te0rpM!ri+) zT&n*Z$MBfOMAC|Yu<3^p%h?3GSwG%Xan+<1t08G9D;^$&^)i1tyg7-4Vs&#P90I~* zgN)={&qbqYn~^#fto&BFeo6D^MH6GIZQS%d-9Hu|{y;75eGsvrlBY8x^f1lET->$d z=h3qD+QC(?P9^?wUgxz8{j z5Ub0&BMYG0fBt-b^?nff8Rv56hQaE8kYTT3;zGLA6nK~H(q)_Q|Ub@ zt#%F67&rji2Ok-%Nz3`ROmU>N2PQHJ>(yfw;j{dd79r(3LmmVfy`P~oHDW45IE!p9 zJt$y(rx%{Dns)Z+kZb}rcr7O9i>@qz2)G??z0Kc9nq}~1bVN( zHx%B#*Uh(kDgTe()sQfF@wXcH(}reo(Wn?vVrBmcIbVA zgsB5S1V8HMa?qTCYcvo}Yp5f%ptUd<@k{op?1KVK4XVYcML0~SxB>Q$C3q%bF6!+K zq>e$-h@5V<&j@V~w+S0Nqt-t_U4F&Q&eGP8O+?97H#VzA?GM(O1x4?vX^d45Q6Q^5oJvuMSd=eWs7pM?8pJ8ZV!gO zbNOJvy*JcI!c>RA$3;JbEdVp+ElN>Ia{-tPHnMUyBC_T&3xgMTUDko{Q}LvOba!fv zTTP8^f8*0Hx0)U`Wpdh^e8AR!p?V#J{jQy8j@GVyy2;_yRG3wo8pQ@OV94WvY1!Qg zcOyAQA%j!YmeJed&g8d+4~nymL}GW?tndt;HF*bIf`RJTC`KI^Da{8-Pltwgsj>Vw z$6w5=1x~;jGZT_&zJutUD!t5AWklwe*9@CNa?qqqpg9c1A<68~*an`ln~^)-9ad?8 zfxnjau!Pgby?ijzT)CkbQoIyvA||W>{I9W3B^Mfhmt`B-01paC4eRKldsrtr1Aeiq zb+qi)Ev1Lrp5Ag^=672*A1zrAF=c`LWxN8f@K;2=ugV$c1HQI3E_-as{+vxY_W3b@ z4K{r28N{!|8Vm||Tb!5_9B5V8Owhw|d(&TH+wDS4O2CX+Ry=-oC za~6P=pSs;>gWHft0-;&qFhNAKgkE-c8`-?V9Q$P?N@ zSEGWrn%D%{1`fcW3R

ju0~?FcfCKIr77FM7&$1AC`+*|ovs#a;Z}J~3{t)!)nzJ$ zq*@x&WKJ`PdE#lK`Vd}lmPbiawJU*P`e3Stw9n`n%xP}t;kVDm7T(aNI6!H+5rTY7 z<11IP#G!M8c*{be&_!-*N}wx_D=}5HMmkRlxOq!z!B#T+*ecM28lTR@GYQNvhPyMZ zhg;*Q?gu`tr0m82;bi`AUk1_w|9iJWU$!k_DZ~O%*lM%ag;1=WMhqz0(nigDXpg>&xq6&muQkYBHkBF?CdBFaga~zrXi(4!AphD4 z9n1U$h!*TW2$$6ke;LjICGzR~qz3}Q4d{Noi2chbpHFO~On%VP05Ei`(5rY|EE3qn zsSo_j?Wr*qTQZFaG{!sjma;I`fUToBFcj~!XaJ1A9?x37B$#AOT~w!gxcZl#{f@9@ zmMElrWglC_2mH@gvCi`48U^hqY&0f;hoH?;PGq!F;bY?{92AX>M%##kCs~wK#n_-N zoS`);wo;;!l~<06GC0PjfDbf@N~cVd(QxcVsuGSuf-) zganJ+rF?V^DM-PWihz7tbD*p@V7@x3CB1+aq5#vZi-y`18SaCe2nW~0{>o`rR(ggQ%KburX$5z?RH7_m9hhuaW$VNj+Y~)qVLPvdx0xXP@T^%;G{|HqiEIB?2)b z_31YsQg-IEI^>)05w8ELm#+$$>4prZIsx~TTJK;NELnlhGm;~k!;h%u;&jxLalse# zkkS*@P~kqze(~M>_uG+UM>v^%utDVj5jTF>*+vxJo947`Ilen-yha}~Hq`4ObS@Tz zVo}ZV;r^&+ZWZ(ms=4X8a4_7iY5b;!tMNI3#9bf#%+H{$B&u)g{0)6pD#1cX8{fVG znohXozJvLgp{GXBA*`ecy=dTBJko-NSCgNCx4qZ1WDC}w6`ggWtDcCv+jAM6yVimed#OERm(jAA zmp_g}UKY1JcT&uAGZcONulOydyKikA=f7IjNjllzGybH3Cp}+rVsD+-S7y`l=Py@{ zuU^_{y|#hT5L$GPB(CBgB8#i&A--$SWLB}CX}bf=iHhW*#U6gTQbZY&~CZ}~2g#S zD`9gG@7a{o1HSo+PcAIV_|%TaCW~;4#)wz^=G=a1&4uifi7Nmn#rj*IjJCe?FBiVN zpZAhP^S<$omiLYD%;6=;T=!{+-Y(bjG%cy*GD-3~WKIx)Ot0W}X~v+3Uf`Ox9Lqb+ z$4h55Zy5!a+`N_Fx|p->*|9P@ZN-#jMbzg82eJAxjV{uJz+j%6b-v)&aVN^4ixyNd z)5C=#`}I+)q?GF;NLQxL7lQFz?Mt_76Bc68r^v#BHerRF0&e+gk9)js%%KNs7FPGk z{~T4;PuG-%A}=SWtCYaBc1=tYW&El$Ry{wwxhu|~(i`aP<8)+54eM+AyC^$#Np6(x zT>Xc-x&K>z{SSUwsNC}IU$y-b`_09?zl|C;CArO;TcH3;hCiEzH)F}I*N$()0rBYQ zK{4JRNV`f;ZaMy(Hs_WTf5LjnTzLWGw=tHCBG3QEU|4!q{4JJZ(=b&MEXuBAC3|Wh z*v%|yV^mOOs6;3YmwYYqJ2gst_z>Em#tH#OwAs%^ZG{h3s>{^D1_vcm*w+#VA)o%B z{>N?JR~oQg0{IzDOMr>3Ob0RhE2N%UdmMW3a2%1<8_%$p!i|_sn;y~YGDU-frxnBe zC`qJ^SPDjEBGXC89UcNgs}~&ej#WOk@tYKn&k$zX%6%k}%C@OcTO~@`Ldu#->g0mW z{26V$*^@3hOYk8K2{6i@eN0^PA5t5Hq_N^Ns8evzF>p3MxYxX2{gcx9D4rW2>@ug@T=PlpZviQ$yN9#_&TCfJeMk##lO4d6 zg`>7E#ALZ!(vA|=(!#%<*!X}qd~;NTSf2LKX6}9Je3l!BrX~nkx7^$P77-j8@#ftX z8SUCLN3z!CA1Zdav-CYb{ENVN-eilIYT0GF(1CoiNN}Qn0F364vD07a@hz}QS_46R z!+9)F&(1dNwQv>F%n2d60s;GwHIFX$p$C=Br~|+kLbjWvC3tj4;8b6CK?Z|G5W*&n zEt3)EnbvcotHl>D%V${JvHpg27Dj@o8^Q-wxIy+Ggy%FkMh*p%xd+SN-?PH>x7Os#)>xTtiE|@N1HbH zAVR<$-r(UW4B~ZFiEri=^S1Am)h)Z<_Asbaa&>A={@&~1M|KuPjC_=_Mt+y|U`eiI zMF0hlZdcsszTW2QF7;g>NM{w7%1#vRZBivC(xZFqD_X9U_rhc!rv}1rRI?`Hw#zqe zCh#iujoHL;Yah+njyt7yS}%-QEqQPtG;M{`CKnerT4z>T_{SfJRarX_QKL;W&_EJH zW&Nx=FJ6e-{UVQUp76oC+`(L-4X8-uz+F2FM5@dyX{mWEbGYN0eBB2dRD_zn<9gC| zUW8_p>&h12=N6kjyFREI?VV(z%=@)s^a%?6B~Mn^<<93-rOp4gL;)72^3zejd59M^V+ul!(N=CjGPlt{j(gu9bs9^aFJCmiM`2B zBHJrG#0HOp7h5uya`5A2G9lC@Sx8?v(8VSO*L98i^s-o;Pb%gf-j8f!xY!J?^NwK^ zMa`czK}(iGdhVgl)6@nIA2a*%<$#kw=x)H`baDIa%IPqWV7x@Q6}-cK$6j;ZF@x%s z*S=GH9+f1F?(O|1j02y!fO_%be^4g>-#W{UeRcmI|NJD20QEQuq#e8e-V0Q^O&lcy zS9wp5^KP^Qb=#X><(%H43ypBO^XGYtG+k^+xHnu2pMhT*wKav@t zz%epV#Pr7??{lLgy*CRPA!jy4V6nFE^oWeQOvx1fBipGr7PuFLRtm8BWT^w&k!$Sl zZ6@}io{kbm!c{)i$A%dMaj=l@1O`y#>u9jW6+|K6EJ~XoCyUK%Z5JrJxZP}rrk(zQ z=~PCI zo0-Su`~A!LeW=2jF`kW~`P{?KHMJ76pW|A?Z8CJzU9<$VAL@p2eV+KtuN=ac_pD-B zOZ6QYH3-SvsR}cmy)6A2P&L!zdjRnZ(s!&}Ab5<9U7@cnwy9A40E;4x0YLcIm^_Fq zmKkFEf;$P=^)Y!~^yR#s9~btL{FI!cW|I!z)<_d>$y_!2c=!&{IK5!vdU_AFc+6=i}yv zq92CPasF+EBXLHPz}KSLUfS>`t|_vJT+Hh1CTbFPr42Z}@!}o}wg`0JGe6@BUsiSu z9o{=yNbKgl48;YAqjY4MjA&*@p3t$Kn}8?Jc7_@X){`wyh+DmU6DN9@PGx39!HGSS@`P{V#V6lBc)K^ESGfz2#hZoj^syJ*bGnc6+511W_TK2DO}MB zmnsz(;a#N()NjP`ssPp8jbB6XI*;#_31bI>fnU8s%;5uoY<jynQMP!p$EYtr9{Z9U(H&^@gzba4AS;rU~ZIA$G zhW2rDiy^=*zEO}7FQl}EP_dM$$;llBW2rUMay=&&%Yn}A1>%V_KH#e@qS`_n$uy<( zEmExbL?wLYTn-I*3U=&SX9;iAYXASYp8aosuM>9Yza~&y_-9U?DwxdRLT7T$f{4x& zp{7Yqpt$h?E$6|9VZdM;qof?2mKp)dvs=~KBS{~vxw2LQ|Nb)N&+kC=^JjO+y|E_-I`a9 z68l7_I|Yif(~od7liYIi@ro2)`94)mNjRe8Z(<{%0q<2NM~ zqo(42)^V*AOcbl#gv51FsmDi0S_92eLG5U)Y^Z)Sl~VQqCF(JO<@_Jo-ZQGHwQCz) zpn{@;ZVMtMc2E#36seJI!3rwUOHi6f2^~U75L8r@Y(+q7P!vQ;r1yj(AT>w}5Fj8z zgb)IRB&45(`+eRq&iQx7^L>9D{xD!xx#yhMyb6`jPdB9+-qi6?O9ZPY>Y%LH1*P%jXTKKCiy6irj?T|NeiFtpXpG7(IoP7 z!N^rwR|)4)fnst%iSv4QS3D$f>#Or$s9VIM2SF#Zog}ANcdCY{v+SPO<%{ZYBsusQ zymam?@J>H601&Qx&u*M(zb{Hehl3`Ncwl{pX@ajMx6-~c4bca_t zw3V@-e=o~*&)0ZhT!v0mjy&T}Z zm0V2UKBIY3zAUWtnba(iz|s3yG8(lb?Oa9@r#b=&9hn_j+!*=e_&ajv>a8cCWUD+E z>Jjfu4GOY;QFtbyt^nc1_EGs8I5<0Lg?LofB4#_#`d0p>l1GtJP0amnV3e!I|4vRVO$-6T24YF*aEAytSOQTV{y z!=JG$F2s{J{_FWnuoj|I4f;g>uP14HiG~M5j|#i^OO}oC@<<~9HFElKnrz9PK?9|A}KkAgV;F)57*b+l0Owh zTD?JPOIhwur%+*?VP%Bv01Q{FG03v|6@jT6QLd2r@?$f%&f1jyT>B936CTyu5zy69 zxX*z~OKO47?X`U^RB^$X9g$^TK>UoX8y7St`g)fyq(2JYY^82>lKkq*z1`Q0=)ZYb z)nqROwZpj-pID{U(Eg$HTe{i|+6=d<)@|pP>`)d0!+d5x1t0V&BeY+VS#uxZEA^ zmWAve%Tp>Mo1yznkD{%wv+Y#y%ZbM1mxF=`@sRIYH`ISJHhP7&Eb{6!H|FpC;AcDW z3zO9Te9-F0>dn%#_h%h?&prth)zK5!!6VZV@ry{E>u`&JvOos|udyp33ROrJW@rrA z70%Dm(PIgoRLnFJODhfu(ykM3|MpXQ39F^3yvtnp^@g!=^xJYc=a;f0JyZ1?FUV6% zpZqCY$s{cPQTxqWpD=fSVeQ|=54lciKc?F`yV|RD=a4hX>1KV;iJgI++54y=#5m-{ zkaeVH5A2{3?2sY;!#^N$%T;kOd7H`ZT?sg|zTT$GuHT-PnqE}B_rq|XpwD*B0FX-EZ1`AC9av5VM#=p$MaA5goK5S&!4WprCHxg zgFa9eAhi}X3MQ@%_1Hdfy9>z_y(nax64A167#GK+gE&~?&z0av3g=*Pa+;;t%>9v7 zTm3!^0s`3TKeXX#>@J621l!K`kJP8(+?{{%8fY(D;$%1Y4%vWKt>ZyN!#u+`IML(f zWM)7VpVthIJ*-w>c7bSPP+}*7>a5^jcw~CZr{29FyPj~ho z*nDWa)aFCR=XV@=c0lUT+@&MWd$*5AHb1w>osgm0ygiGUCG~u;H3Aj9*)#m2ax-8`P*Rm-pa zHGEQ+hWR*@RsVOm3SE+Qr|=%@I+i5;GXVO%xmy!QW4+#SpEWImWFoCtabtKt(4Ja8 zEQLvgBdK-(Xm(oN#&zG_O8!JH8L_1TzyxQg`R>|!k7s@Sivompv$AXPidQ=Gw~W#j zAe01J(*yeohr?=2-GZ+)GK1P6C#_SeZuWZ^!fEof?L&*8gX8bD>2lE4MF;YH3FPw1t;g& zLjC+hqg3Z_$#X5p;urfAzy5s=Bf-UPABw?puj<}_HmzXp2hTE1#DHDx_w(@`21yAm z@>ciDBM)ZIs!j2En<>=DipXy1GAupe6feV{W013Zqa26)^j<0%!+ijw)Az1gYBdC`g8KGILP~` z`*j?NmlP=>P7B4}sHQ%HPO{k<8y%RlFk2QP8TWU!eiwN|Bkwq)`t#`~z+uZHn@L_x zC(kWHakNnqvJI$OIKzO=9vL#_+K0gMQpBmN1z@odZUPAzXwCs3t5u+lb8Ma7^v2>1 zk_lXOiRW>91_621AqqB|>MZ=S+E!cESr<99AE|Vu;##K$`ptO-liiw|3eqF+Hu+~; zg1xBPxw*8l=&%f)`MaXW$h9cq<-rcuu%;CTsB@GM6rS#L=XA^9k^>x4biPYb%PG^l zZ!9g1CCn{~g5j}cwl~;U&iOzeni){N3 zWLtp)>zEo&w>!vYvHz_Oi(lLZ+59Vt03%@NGbtNsUk1xpmKbhfWij?XM?6At7c0#~ zQ4~)#JIwXtNmpXKr1b74BwW9VTjFEfZs^{5LvgJ@Eyr1-?TI^{2!eMMV|pQspm&4& zX_HzJ@()gKVYf?5)V11I8eixp6|&o=YF_)q`M(sr;<27mPWbUl$Q3-dQ5R?>PkSbd z+AKwE2EBz#FbJ-{t>>7=@G2v9o}WqAe<`m2wPUShDlBxXRqmKvmp=ra?>dLkaa*~c zx^fTj!?w{RD${i17sHOdNAzX-hQH*ZXPDn(&=NBXbH4S7If>_?u4B-@ zWXsHSeQ31e)}_MdOJkZ?8HgErjJBF-am5eZ>ZIO4K%5ZcK8Q#kj=`Qv##74KULWhP zPGV866jn9a!U!K=k%6}|GVArLbpc)aHvnE=8znlsW7kKXW49+z3&{6qSR#n%CowV9 zxe}y0CS+DPTY~trFk@z0RR8~-fd4?{|DQj;o%s0SKlAmoTnU>bG2qP+n?-@sDFFCZ zEFiESmP>(aO;F7QjQi+>I_};C9Jtwkzz78;Fb+7%!1C@!Q*Q6xWVOCCU>gX3{J0G? z=#x37K!cD+heQyU>064Anfr+B;PqO-Bo4$6xN!jjCUFB`syBB8Xt0}DNt$8R`lB@1 z@^mzkPKK4ZTwTgoR|7m!0BKB(;eSdcDA^0FCNsm$U^YhoiZQh0D^(TQj1J_qV>oRS z=-2!!0X#}~?Rpe>&fMUvK>b_)>k^dJqQ+t_qH2A|-oT||2|^d4J2rWf@H1NSrm1PF zs1=!fcY$qO>r7PPBR~BB4wnTk&1-tjLRogS290%c7-&rPnyvQ;_PST-VRlmtc*vOD z0PIzF+&_G~{*A+Quq9`Hs#Ex47W>#bNN9#|^!$x}JKGYW)G0_q;$^^ilDFmy2r@Uw z`J)FN0bLy7(NUCIF~HMY-6i4=eg5b3J|EJ#2ifg@{f|cE!xvamwKx|mpy3m~W|aRr zY&+2VGn{j$^g0e)oooP%mLACrGy1p&l`#TjgfdF)>uRU9CF4#-7ndf0<8O+H!b5J^I9;x#mry)KJ=6GJ}_r3H`ywEQFsG zD0Qzoa$@^%eLuG#Q91k~75m4^Ea44Kw_b#iseZy=M{_b#K-PpSYv)h}BKk4wwiYaI z&PiWoALQ7D4aNKRcWvHibz4cqhS*MpqbxEV`EmQ;b3w4Zx7oatVt~Bq*fzjglP%|_ z#7g^1ymT4$lvk))u-8*Wi!VlXPzD{sGNsLRDSc@J>=9zFZTWt4g0utFy5CeEP zADLZe2Xb+^WAT=+W*KWwsex4*V~oTjF{kNf_4Nm%; zFN%8{4Lv%KCElx1#X&ndbUVF}Sj$rWjdCy4UBhzWnKSo=X5o2HJpG_8uZZKD!9}kv z2``F+4cARI#X`!%DBR01_P~`k&bSfYbIzluoNhqp5F~(wDh3OP$_O421D3`pBMgI* zRf~jY%uAd%vyZAeOML#JyHZ|s`6#+55TgSr)seX)UM2Qb@uhD5XQ%awe&n=wOU;l$ z_Z%^xYEB}-OxAJfEC6a+#!Qc2(|^PGgy>_fs9m`%ZF#>k6KdHULdvVnH zkzmA4ztB?tz3um#@#y%Tlgt25Wg2Vsga>Lo82Z~R##p%u?c7PZ8_Np<*Xu(MP$9LJ z!rMe7emWj}m!P~kR{$g$)&B)7^8W?GJ`z7>=QiH&Si!^b(l1$xU@bcPglEAkm!!zO^*R&A>1s zSutkSw6aTGF$HSM(U%)QVaRzouPn|)SQXmUro3~!5`JjQuRf>q0!=6VkLkaIXkncv z*{SJxre%U%r2xEhJTi%iX7k%S-Zw?Y349JdP4JIAaRpDm{-E5p0Jf3q{RJMD^{h$o1L1ftY&`yh{ihnenc7_I zs^dh}&8s0wb^`qJtJq$pecM+<@0PFiS0p+ge)u($CctEPav24sT}HLj>>QKaT#|8k z+&LsT@LFf;(k%K3Fu~Id=<0&3QJQ~UC(>A7yYP;Njlp;yZd;{>gVA=vVdq+tdvuM6 zE2Q=dEYAdB^m8dcc4 zenVKAnx<)gPn`b z-jCU!#spxIQ7GVF^QLvh^NtBTEFOCGFD>!n)}J&IX}PF&Q3`Z{_!$>NnJ`OV7d`;K zEqkoM53aLl|HyjRxxkX^A#GV0b;r7W>%l~S1AqKP0G%=44ZvioXOS3i!^}*Q%offr zht^brc2<+M@$B&At(R_Z^lzSKB2WHlMuxVHAp^pD1f>RqWNL13?RW!uY+ZT?{gaj;qLX#4wtdm$w)6f~q7-1H=B=+W{b(X}eS}CTK>s z!*4v?tL~1M#6!;3t(VAe$yK(=c&@ zXY2}XZQgRw&1{6QEVRnZWpPyT*dH1kE>S2CEP;N3V!-9r%VI$Ps#V0^ND3_k0P1`+ zFw7l+AAC7>ZlM<50C-m9v!-Le3|KoJyuvWIXh zk;LIbFzDqVL!43W!hhUQznKrA_P-OI;ad5;)Nna1`fLWxon(|fWSp~bdHL*cy?5=! z5C=C@VB0=LV!p53)}a!n=YwUAeY_-UUai(LXJPcT$WzU+)2CLur*84L3;@W7s42?C zkMde%klzE(DxStC_iCuAiO~jdq=vPJCq*7fl;2Sdhv$jYyQ96b8a;29hAnYQ@eM<= zg_zTjE_fix8;CkeqI)qD2W<#@MHgL!3+FV2V1t$T?5ECqKHIf%PfzD|sRXl2iPSfe z$8>&vM`CaL9L!bvyvDs+Ti6;TrT%o9(L~%ZaK|Fr+Cw5T4UXLBW%3+*ws_fQb++_9 z33>Bnv7+hCO|(5TK}V6$cT=2Q^{^3&nYJ)dAWe(HbH^Em)9gsHhe2&1)~kA3-j5;| z0r4FsP?}*96CjO(+tg`9JSvYXXy!z@oGxu)zagIqN3U?SZxBc8YTom5@6;6Yc`J<1 zE|W|*6;m16@eRN`C?a|U?u?m{xw+-h$kiRH6Ds!TSzhU3E|f=S?*|$@$XuL-UCRRP zkgB-J;ZI|_R5cO|N2aoV1|YqwV6~zC-4PNe_7vf#`2z*JGDO+Z3^`&Zf};y z@=o>}i{iJuaK7$)`s-890tLPa7!K@wB?g3y3)ak=QH5MS4gq?co>9Y}tV41gV0F(0coRJbX1rUk#NBX$-2ySNC)pFR_2TbHJNoEu1(BvI!ElT~$z4|+r z7iL#^{pkv7Pa)K=dW2?MU7q9L0ZBNFRxe()55#4J1kI-8vW6% zIm3jPWt$sr(JT78QZvclDh9wfKa&gVAX>aHHj1u1SMIdm{Pm7a9h8^Mq^|-);qXT4 z-~^Z-vH#WE;7#JZ?}ZoX%0-oW1CYscW*w}rfukL4ePtwgZq_H5-b2_#1hje8As^<= zJ$G|Xr^WCLt4!kt(I<$`LDlMf865$Xs-crQ!R2ltZv=bM>9`m!YN*1_c16#|DI^r? z(QCwF%}{cfVbyQrL0JwYtIn*QnM`V#VK8#e33;Hag(BcPfuM+4SKx@W_ba+7;P#Zx zb*Y$BN-Z6*^Fevxsz-D{TWp3uW4$yNw08wgRPeIn(b-%&SZyj^CHorqAFH1a8x@R~ zVAgTX0@M5dyD2HUQ6!&eDXk>nfQ3J1{VW%UlId;GsBD{gg6w|GG7b;?bOBB1^GY}h%<-s^^xNsUq}r26Ef)3d9rdF zZko)bhZ+fZnWhJY8j}K-b#q6cj$%j`1HiB(@)m9)n=&^ymjzP;rBHxx$MM{apk;km ziCPsmfNrLQWvilzylgy0;Fp^bEDl8Qr@=K=u$^i6w)1)(?~>6EW^He`DNxfo_?qvdJueNd_l&=jlRa!hl!@Cu{8>qps}LkSn9fR2D~26a`n=m z8vp8a^xC&i@Q7nx|CUNv3mBz@X>o0y(ViGp?)tP|yI1Ff9X!2jE4;?`O^@NfexHp! z)fA+LIrpO|V0JX9xW&%QI~`HtW~I3g2cJ?Zunt=}YkC%SvVwWKa2R(MdmW1Hb1~ZA zJGrE`R1JDVAUdq6&^j3 z(Y&J{LhTh|kBx==^LTq(rPjcSW3mLnk2#iOTi@;X_ZQg#shfhyb2d=jwSPA3i(j-7 z<(;ONFO0>j{UfV8V_p7Gc-#H?Ah8mUkOC%AIB#mwPsgT3OJ~7Z@=0>rJ>hLMBWgoy z+TFqf`?vQWwr@R`E~9p6vG&UVC)475+A^yqP7E-q zbXZlw6Gg}J%lHQ4JE=NXBajI*-~Bm@&y~dhoDe*^s|p%}?`aI>AAU4;?XM!LT2qjl z;r?fP!Wyo=?JFpgJn(~`H4fWV zkstPoBX>h`{^->z__LY#uF=kck8{u8QGHd*=mZB}X<#y383Q7Q%ks+Lv@jB42F?V> zJ#&LP@}9ByB@E{>w68@Kj4_*1Uoyn!08woa$G_I@%|t##qR1I+F`!~>MUEBjM*Db0 z$bLX55zg_3_{f{E`ddcspUB z$;7DDPe+w_?#mC@Cxwrh-C;)R9{xG~u>Ie~cJDO;BLEzdf~-+*W%p$A%6cb6UAabE zfi-ZGDGjWl38U4fR4=DF?;p}~MF|=-eS6MI>C-d1?Shx96gYkz*^KTdoZ$qvyK2I| zbZN9j^1M6nmnW5v_`TwyDQ0`TJEm_DLaQ%7pmd=9?Svn1iGCF^O9~lCEFppszADcxCP7)@J7vxxUq9u(o*DGM-Ltnm3o0$u zIh=mF-wv%2p|B1rS)y%zvJw)>?m`;42wT^r(+n#JwgydfL~T#=)PCvDyAvOCqk>H< z_iStMs+xFe9LcNkCL~*3ygdpxGW4EmHStO}gxgiy2d9)1rj7j4tNXVa8eNBXutvPy z2V!o7_NQ%O+QDD&hVxL!NWn=UsAy9SSB$f&(b%f)M41l>|TrEI2e>PXd5h0!9W1<&nT4 zHFO%xzxW1=uUs$Y;9}XAKE)N_PQVJ5L!r!In!p{*$CBpG9M4S`2R`3DVT{j6Pu~be zn*{;qQ*(f*MyDO$=z=b?28zf_hO+b7rLVL{Hv^y(s3iERfsrcb7u6hUG_NGHQ3N#G zGubNB5vBg;$_#Dm5Z)Up3LKLT$4elbMoAnzf%FPDMBt{H2WQCnG)} z%@LWVo0oqHF#aM2;*&o#O$V+7{Oil+^oHRnXlkNtvvLT|Y76fD$!Luv0m)VS)o1L2m#;8EGf^ z>te&X}Jr+$t-!^m*&%F{iJhd-SZsO-0+lybA8lU&qw`;pDfn9y>L_4dUS$X9g z{)X1`wlK}W))BRlC0_lEiGkAw4XlREkFbOkBy1$qa&=M?`#zf z_td;$@bk~F5fYYcw+&6l9N+TDLY#>Gfn!CI+Mn%Nb4wc$(kM{6E)YfX`DK(QMl$n& zAu5UDKzjM40?_apg1}nn@XQ}sl!|idkVS87xuq|8y(!qTEPEFr(x3IDF}!m~^lpMc ziyj@`3`|;Jk;hvuq3;JQz5QHnG{g24qFGXBR4MI8`++EDBk5syvJ%xQSZFe6TS9 zAPShT1OSa;7J`M;ajuyU#56OmS?&#Ku6>DjEDlQD!~JxLoCz}gDFue2NiPO;S3E8jR2*%ZaCvuYXOhKvjCm|Zi`EL>k{5zS=&7O1FHo}l4aZ_CMW%#t?%y0YIJ;#I7Ux2+r*leErzV#2wso?I0_NL9`mlg}hvY%csuW|(ddCkZwSCaFbr(!kQt z1@b*+_@a2iVdrD;Y`r#z+962(kA)$Oi-V_3cTIa<>|*419BOzfiLOqQRJxab*j~o| zQ^fNZkNsnLak{vg8)iY#yqD8rJ|^&_9d>1rDoHZDcouU#e7VxQpw8L z;w23LxN%FPj=BW4F^e@EZB@|;!gkMC%Ij)-voAj6-i1l6`{AiRH~WO8Z0&LX(yaO# z;Zi{BD9v$Fop^3>dd2W&sPvt-zJMtNxUq;A4Ew1Oz6$jVeQv=n7`Dm`$g>R6W`*dt zVJCFz$I=rdSQcPN#{j|D-ym|+%y6X{KeN3PnugPRORX#MfqpHmGZ3U7gUVh5(a%(G z9bS0^ayA=n^bq;e1PY$~V2;a%`gv{xIlyo#tc&@WdRkO}N$`tcwYRcuJd>S|)>!~? zflfsVqJ(#JT^XPIL73@*_d}$U=me-J0%({9{W^Z%BVvR&UM&vR)?AIl6bko($EoEe zTlBhl#(zYy>@OQ8Op!-_pyIaT+O6$|})i;wIa3j1C^s@t#p}oLGKsX|E z1Mu@4AG5s9jq0T`quDvRxnh5!(E!lUrZ*M<0%p6m{8Zc~VH`2|a1XFS(H0-tW+PMlh!?Zz-jHLgC;~8Yna@(cxl)l4%u1V%-my3H6 z?OTcHH*uXhwib(8lLQ*T6ohK~@B!LDiRk#Z{uF-{18Kg9nm`%Uu~lWrv=mXwP`8=q z$f>RKd*bD1hUh)@OCa9Q3#^nFI`nPB=Ai*>)VXUbQf*yV>kc@@hV zQ@Z*9-)lJRwM!lz$?Wtp9Iu9oQl3!G&96r;{V^7a0^^?F3H;Zl9x47}#bYrEqW@{( ztj$03APORHS6op`tB3q1V%*4*1~;e{AB#1lwC2;$w;7nFE06m(LueYeS{WC7MZ$Vhpl5n$E?}<}On-OUTM8 zi^b2NNAfHA%DTYB=V6FNPu$Gw!RDXehuE5>(ad{WGF?{M;QXUh^i1E__aeiI+HhOn zbWsRMT*#@2JPtTrbU6Pk*Km8h`*xWVFBWhFk~G%AF@I10hUrULMo(9s{@lii;t}n4 z$Q~nq8GI_`(3QH+UXN{Z_$RZL4j6i4A zlS-rX_7k>iKj(@pzwCzHYhI|ueadB1VdL9?&`C2?F?EpQGLXr3W!GnZqC3FBYUN8T46&(}d8npC^)0jv^*LYL?f=#Kkb-C2GSIzcRX5pftK%B9^6k095GUBs#I9fm_3FP zbb>2v80&s%|C*qEakpS~w4ei2f(QJay1f&nJ^pF9EHHhEg9RBJ5u(^6{BYCEs>$n6 zgdJiHariVrc{xBXLRgjzj@XmPGATB(>Rpb3TA1H0$5*xqAn2Jny3Ku=&pRywTlpE) z5O;jgvL|Ydqc#C$-2ZxQ6oYlon5^D@_;8X)y6yHqcA`3VQMecYH(Dx85yw5rns~PpBcP#VshlGPG?e z2}i^sAzPt$?uEl^l@A;}quL~J`Qx!LIqQk?#3!+GFKs3olKo@a8LH}hUBD^V>RtM0 z^w5UL$rS_Bgvs#F1tTW?HFuf**UT5EuQvzWo3}?Hm)EO|Da#IRKt`3JIkF1$0V zHEUdk7Hc!Qb8yP&UMImV&07Kp?J2Dg2gblRX5jkU3*HIlNTu#5?w1Sox*dL#FN6ED zzDF;&iJ>!WEj7m4(t}boww`KYr3Dz2k%DT!{2d9A6q2z;9FR2h5uOP?TGRSoFjqa> z?W?7g$zAO5z9klGns+=vIzw~_dB{6jY}_w6Uj!C zj0A{iBe0a`ER5l5znA;Q&E>CvsWsM~ANfVDzV{y<9QO^!x2&AB26UO09m|W){H2O# z;9(^#!Nkq0K9_6fqD#GD-anqdi+>gd-9C!O)nuiIC1A9VK&z!1fV@S#6!* zZwUx%?FH+N%U!=H2!kRGt3fV#g`*B;Hp5S;-T>8It^uH^_|1XNz;B|#qQM2KT z50f|*DFC!RX^c$GY2*pOWywDjT_6k8c>*=#K078*U_GICyX^veK`*EcfFrjrSz;N% zm56Ed@2Af#Eh+W%gL;IdG${DR>gvO5K@)FdXdaPj3}SARF<|A6q71obKNj z2fjY|ap+I{vlw7*ZfiX7{we6-nT)^#L><@(cpYdv41r%27ZnX@ioTMlV!$11Jk7sqMpYTP0q~_Vt-zbI z`bh&^#>Wqb*Ux!`qo@CFUOiQxHP-w4)p{QzoG{=o!2rO7pVn<9m}NnSW%iVsKRmp> z6wB4lR6P7){7rwRR9j;tt^+%>?9E2;LpZ1ZE-;wkz}Tn=rBO=%m{|>frBe(5jiR;- zKJ*!^oz)ouF(F=ONU4x~2KxE4e|k*jb}~)+A$a)SUI^v(7;bqP#)16%>^#$YQs&H1`P}jz zc0sfyQm8Z{fN&-PeCnP_59b7D{HjSgZ6}Hb`)a8otE*lLau}EsF_TvL;MEzS*U46| z>2K5qe$|t_!WiQ;s#827*8zelquwFqJ~}~$8<5`cIw962EUx=(8^3nHfrHtr!@8|o z8IW9wF&*u%yvNk%Qo}VL-H0n>rKxZtD(&rfPHL$J%DGVLC2$=YRjO^+i9 z=^iA0Ff&K{&fi!nJ{9W zag;UO^>?6&%w%}lsmyMgtw?c(7_gQ>Lg05bI}pNDD$#g{hzk)2^Fczn$1D!5yT8^ADY5kxp?0{RzLaimqzDy}-OqE~bC2vom`f5Psa`Wi-drh`nc zT09p5^5DE4a@{~m=cf^uO$Mj$34gJ3HrY z!`WVC)=G5sA9tImTw)Fs>C)PfZZwZrhmehcg72ge5LJl59|fF1%X?RZUEQwh-A4AS zUo+Z#`)?i#(ZKf(rHylI$)!-+7LKsu8FOB+N=bHTTx~I~UGitL776a&C|?}H<)czr z_G3ZwhPMmDmc$;@3dM)jV_hjII>YT%glhJJn2PdPzepSE&ewXLvH9Pla`e9FgV zK$m+3GT_`-&2t36c)sdAfUi)Oa=Il6_weUTYkO^v`5yu8-&?Z44z}Jyd zo%=$GN7Wi`xu_4rQvPY)6w$k6hXyhrCQ3c27sL1hGKoqs8V!6VUx15FaEYqfQEF=bT{Y3T!PZ}|u;HAwACTPiT>X{+-xs)c zl0k+4!+6UF5E=jsuZ$2j0$!`zR4Z@llv1CjdF}X4=7}BzbDK=|ZUB%GJ!S=UMKzgR zmmS|feR$g=)t0h#e;1j9p@R#4PV$8U#oay_<_IAaY56UZs8FeLPB_2JrTcuBA&kGT zQ_8ceOqch5QJqqh6BLR zcyEvVLrpdERtR!8umrI0jfG%1jU+hU;UD44V7nWL1oqa}{>?bMb{acYJhrP~ln3SrEoXi4*lhKvll54eG<4 z2Ne_cMHmV%v4isdEy2pKHX?UvQe<3A&FLzQPAKEgzgS152OTrvE!z?KT#4tW?czG4j*> zBD>qWfhd(106;(q8DfCWc;K$}q5Z(bIy4UYMyzAm0Pvg51ERoe#c$!0k0inSt@rx? z&>$i2wc{~?VXzjT+f3G=v>nf_RX_$!&-|SE;ZqsouL_E%!v&$ZKF#eNK<2d9iH1t}9}_GV%@m zBIj9Y)DhF%Qv1k-x3t^%q~_X4%ZldCF#Bg(ul=F#+EHo8>v*w*$i;wUDm*L{pESMm z=iEzb@=Dct8;x$w_%kZ!l%@qQ!cM`>#DM*EY<5^A&kMQ5F`%V!~MwWkDb-RrB|DAlsaM9Au)o+Kn(rNd^4@c1a z6S{}M|71a#KxDj|XZ&M(w=@U@%;x{X zZ$2H~A?R$AvCn+VBgwg^@9ZdiMrYqN9gt-457P~zktRy<@LtINK>bHxEDhMhEI+0t zYbuCtO#9Z~_~vH`5slq%z6tV~>sKwnEk2f4%`w?V{4mal)%TGE#)8^F)tYzp1pL%0 z`Zs2~ltuKvaE{pkQll{corfYSC508zRTlq+RE~JMf_sx2$srb|bHDoM?;X=<3_4an zz|VV2y;vb+4zf&woNF!5nWh)lg=?EV9^;=p(jP473$gLD|9NkKw<)(ld*q3>Hb*a#)SX(3m6gu)8b2PTcAb>I{3;a9EbYXKMx zwhxnTy3qLjz-%HfVc07TU;Rd7!G-9If0`A2rYE-=#eLq)NuL>F$i&;7778tahGqgd z;5iAJ*eSlkUUq52yp}VQ*?_3)YN}MV#B9%zZ(eK*JLx4{j@Dl+p`R=dbJR#J1CxFc z9hbi`@9g3AYLafh(M~3Tm@HSPyU-xBc;qsu3)CqOc5ptuJ%a3T_l=2>=v<(PW3s?d zbdb2t$#m; z`ed`yCg|?%WciJzx*w& zGgFl3ytDpi1Bj3)P1T{WO~RpR*#zA>b`WY0w&9jiI8=@$1JfAZ+!yp)*~r_f2zH(@ zU!Fo@lF3{G5XDw3LnmICefUAPjCfB_I=**MMFs8Xrj>YXFe_5c4*yxO!Sl>k&mpCX z#WZAXHX+zHZJS<$v)rBsPgV}kKMnqVCPuR^36p(rH-|vJNhJV=uuX-CYKvI{K zm?)_i6+;76Uhj9VgC~B2o!oukZOj#0)npVN;_`_SgI5${JB~}!S~R!Jn8a~%jiTA2 z{mnrhFzt8opSD@S(v}TepzIr1_(;Xt-WG5$(Z1o8j*3{Os_}!MAt9 zeW%G@(yI1Dx84@j_l5T#-DF-O+q##;PJUT!I3UCS{Ek|9KHTekb#imF&Eu=LBW*in zVC=nC56Hjnp&oq8sjL}@dBt(V@TZL{j6^GEQCE4r%q{HZqxQ+f5}}|U;vfYKM-r@T z*)Nl-Htbz1Jj*G^5xR-X)lh0!;dpLFE19>TYy=r{u7$F@Abmxt1lB;#e)t{x(GViy zsa8Q@1*iZqQ+?zRU4XyVfyxO&s_Nx7k@N3X%iQl<)WCsZ(G40J_3?N5*xOlhnbdvy zW!Ng=4*|i7Rw@Zhen$^;Kb7lb5>_gLe@t?(RmP1=?{|Q!15TxdT|SXNib&!>{MtX9 z45;u2cqn2-Xwe75p%03B$+S)uV&D$jJ%6_-3Ga*+f;8f*?#Sjhq>mYPwmN!Q$R)u$ zTzr1SV#AWbqwk$z zY6T`a`afI%AC33?WsVIiPF)~B8UrdLa;_s=@Ri^I_XNANkTO9O57#rw5Ms$&fu$eWMm}?h z-}h4YI4_9Qu#d{AALwjL(>5g!CTF}9=RpfhUzxmG)PEA)Z#dq5Cf;hOzyP+=ks$^U zNcZGH355>7XY>`Di2N+WjSR;+7B3f7Dz5r~>}?hN#M0~rYl1y}JS|7g82?Nd65ktt z{psKZ=m@*oFQS!{M#3};Mik82hvT18tFKeasti7Nu^!l05DMD(OA1;3`T@Gb1+gpo z*^PV5r3YJS+!lBmpX8T}`npOKL?mgq!PM*%XQl+f&4ojfz{20UAxoq#tdL{oozdETG<$U~t+rL_(DN2N38$Z` z1f~2fuWdB(t9$j0(Rlj1U}?7b9r)mhoMefd%YJic^$oxRd$vAiIa3`>rg!c)_lDyY zuEyyKCutS(HubAN=bX~jsU+TE1@;1C>`PUurH{zHx_@$18Ep z7#UEwyVj))1p(JZuKYr+Ap~2-{hsY>-I@8;@WuOO0Lp-z{0 zYaJ9vy*OD?1bvJ0J5a3SnnM@xzprpok@R2;S%&tbj}xY59I1t^?Sq2BdAh?bz~q{{OlZig^{xAZX;3(=sFF)5p-O5HNMs{&`_}MNZwoO z6T7zNWg}V-_s&nS{@;wD{|Zwo0E3+WATS^%WnI5cuekDNBXDZpwrJjZ!#M6E`^OU< zh*b>U2dqa@Zv=UeJM|(GJN(H3Lw}eHxkk+6K*O;#FHrcr&KAfb(rkvw%dPtRFWD4usUsYL8Jx@4aVbv-{0VLNANq6g^wtScB;}& zQ2+n1_FiF4t!>-xP(-E;rlO+KoYMjVf+$rWQJIR0fQU*b3IZxkdQBptqN2otbO_26 zL`vvA2~8q3DnjT1LI@B-fRKdrHSm4Eb^N>kx7H41XCwz>JmI;Y`@XL8D$ioz8^&lv z?{Z8R5$yX+ir-dHlbtt$Cpua-l~Y(wKJwThGI>soZlkYiF$LeliNg(}e+QUr00-FsN~K_E=|ir#YZ7>9WjNFntu4TOb?&$#xp8RTJdFOiNXJM2#0TKv zC;|03-VQ>%dII4V2|J-`4(|S?lT+2pwi@=|!%YJ|o(-RqALo?rkL( zj0?t^bV};Sxl5}=Q9}XJQ2P<{Q2sb(kjaIto&bBLc6b@Uh_w*B0Wcdr71*J2ai7h2 z4O5TcCoZ(X!{>f(sYp3ca3J>2LPznMFeFmgBVbQ`P1pc{rvye28Iyn1dq4#Zx`R3_ z-LAcb4lqaT?Dhs+uFIV?`F+VsEaQoxdpWYq?ESW(hA*g|UIT|}$a zfrB#8(}Z7ovUmBQ{fG4!6>tr|P-haBmcLF4E#~Ie>K~^KC=SFZy-nhZOkJPXYH7W~ zjxL;)yXkh1)M?B|Rui}ML~gJCRRYJQEID~%+YIEMP^@RnkgBT-r`ttW?JU+~>!v3qM?NHqZwrnnO%m3KAi~O?47ke4RH-Kg1}@ zoJGn`P}`=<20!Gw=4})J9yw>u}wV`Mbg(Q_ocxM|19L0`ZCC9hb zG&2z!fC%4_6zA0#>{bM7?C?y?_(lCKbolJ+ctqsxAwh99ocT*t{0Feq=Mzv(vptZP z!AkrVb++yyJ%2(lmA*j4I&#R57%0|Th|Sh;R_ z%x;3}J(uM`>4e%=!RCsT<;Y!XNdfO~DYZ1%=pQSN*6wIf>%ysJws73%{>g0)YZg={ zF_U<#Ctf@)E^)`+3fF5Y)4ma9Bu&hd8uPslgki#HWO8By4ygg$a`T$hxTN$<4JLBJ zIIVpn74vZC3`R3W#82liK8)hzfkzzW$kUn??^Jb1Vo~m)4!vx>EjYvNMB(){bJIWQ zP*jo2=7sS7mv~aKicd7Ycjbl$l0FinWE2qpDQTNN0{M%iSxzp?GT7RMG%46bjAz_Q zJ16&Lr`zK86fe7l_E4X&9hx+`tlL3T=K12L>77MpkO#3Hb%D%mp)bBOR358BZK!(Z z^iN$U=YU-{+d<0-Ug{Ohzaa+zi~Fu>|LdDC%_fv0+ioKn$S8 zW9rtDphhYo8zb8X^Q_amD;Jc-x(W@=cZB@1^BbVYK$t6{?!#XhFl6!^tryEVHBdJ7 z4Sx?!&r;TS5z~4VvE%YkLdE`h_LjW_*xXqmW6diH9TvR1PDP`~rY-Iu%pbP>@8S`I zz^flbdwS*H6OSyhmv&U+v7F&;z=NSjg^u%GTs8S&O_ED6s0kDcR5t-d4Re`PJ{EDD z8O@3;S`pW8V1LHb#u{jJ?p;IIh{-2s8y$b>ypMa5F+XIym1(r*7fT+cur9kfmPoKa zXTohCa~8G=c`MOb)l~c`&mampQT&X}H0VkHv2$Khs#I7h7xhG1*hWBs0n&?qvhr6d zZ{e(PFB6^MRyG+i1vjnSLXXA6l|36gm-RC|!tiYi8HSCM;L*IKp)r#y@f8o4gVK;m zNU_n@E*@uu30gSlc)Sz{lQU=LC{t(-*QL%*EhlwhVsOt>EI+yPc*FMAdZ!-DU8}~J zu3^&h#1Z+^I)~B4;wc5?KHq$?W z#*&Y&s>X{EUefhLd&6YA6>yyM;C!AqmdQw_>Ltl>u!v-tWj;;?8t$c46*LM)S#AKl zvm3xq`hki8r;@Gx_^J>aC&_+AMUR-BT^+b!g7VE?d)L7Cyao*7(yUs}0$1XP*GvCI z<|8d6|0*rc+KxN%kgFcxyhC=7d1jkoDZjWB%@z0(*h`ro9z}^BOZdBhiAI)h7N=p{ zdu2*h5?C{VRUyQV>I4aH-woScq7YqFgUBK6>O9M=@r2I^?69t@>NeLB^Si@TzsSk2 zarrKiMT**~EKNOA^s%xKW(Cs+949EV5sKt=XAg*ntYM`&&v6G^#}w9o|Fie51R5xZ z+5_C8DTjj(oc}(0kn4OK(#gmDC>2L{71yLo1P^&8bbm|2G@i;K!10CZb{6!Wqc3G=bsq$@*5RPM)~CA0iRz9j5EYhq(sz6 ztF^nniGQ3D(bFuG0e>(Z@0VGvTr-4boxw*49B!}_S~=~D(XBVU^4*Xk_E~IX-45jg zdtoUE2bZeV%$QCI0Jv4s6V$DW+$;_ZxaH@m#f3!UOrfp8bw=VnR1Wr);rKpa=~V!H z2e33}qYOIpxi5nU5k)Zs<3wuMg$A?p0zbXi+r(zzhNVe(fav5(-hzaD5B6i6>CV$1s`>ks@?TdvpuyrEnZi z1wmC6qed0D)%-|07RO$WMw7l9-v!IA?5(r-|DV*Zum4ZAYg{t`Tm==bwDYzAuwrT` zQ*v#~V$F!Z3DC4fEfWBK=c$7U1)vRt&IUlus|+c;k)jNWfN|_%;Guubw*ORdW@gw? z<^|i`|8})v<{}{FTH87cGHr;+{c=pULn5*32b~1kE7z;fp`n1{o7;fzcl9<3dKkBB z!23``6wY(fMk4)5#{L8z(=!@O1+S3Ii>99F#QX(JEH*I383^|}C!Oh!6qTddK$hDaGOA{v!RE2-)0w;ZeU(`aqMMP8~QGSVdm@kW-k|cgIf2Pfm0hmJa7~^HytmM<~#vCE;hw` zIju#9VWT)aSVP9%3eHMLTc7W-OvgDxQsbwaY;L%4WO_5{kNf#99OQEK2MFNsu; zG?pwkRKFb?8Jy@%Fi7_P25@X2&uhT(B>k~owYT-_HTO}BaR zUWgUOc@t z@sY{nAL=x_iu{OvLY?3(3pChv3G8f0+kR9-w z3PO8iC;N)9p=ngf%|i>HwE^s({e*U{GUa1byYMKNaoIj9ZPOjkc(qbRjbHNTS69cO zH{6iPhx_q!s`aosO5zOKw>)h~M{YcTP7RvJf|}ZkE;@#~y6dykAXff(#9-{em18NL zEPDi-eexLFDH8AU#=DE1(l^+WUqn;Qj?`wvAHIw4>b3g(9al!?jtlh9MlY{C+iZJP z7Rd~J6vWe$uW+_879k(ef@!@TyzLP^j*z1k?!)~|lX=i=OH#eh)&;Nq=yFB#Cc-i~ zH8YD4k$==#FRm}gB{}QVZT4`Enf{nQ(zAE?Xud+GU)bE)Xl%z+MmgsVw@x-;MW@*C z?o;QeUL#68@K8b=kCeH4?fsm%;EPUJYWA7E`y39xc=Yha?4yI~kN(k(-EwF?eee5* zdas*bqexXLeSKYK5-vg0MS;GUDv?6B{CC(!Be+6D+6V=$5Rd1|`O~6?hA` zNK6_EF_X`CRI3g6Gd?izya~o>l3&)wdf%r!yNPY0TQ@i#+%`|K$Oku;2%dJdvzRcl zdMZQjP_$MyQgW!y|}q-r#N&&!%*a|?))ZNM#Nipt%A)@s91n<0c!h?$U9NbGI7G0R z6kz9VZ++g*S7jtmZ%F$GRYl~jJ!0gRq_jrxjy6{2al0vhxE^vjk)ce?K#R04^Dh9F;gvNo)HOt zkSEwj`)K^(Nuzz?*hDNBG)sCXdKOsU=)MI%>4(|Y)uk{K znGp2R_HE7iTbQH~k3VZBM7CFqztXX8@gQb0IA6hE4j5#;_aeZ8COsQb%P0Bzb@WJs zHGPtceA^Edk`BM(5Yh#;eZYomZt`=-aaEY8yVFe9lfqF}J*@;h_+_h_tN8Zh=)Ua`0>#0~cs>f-vi2QibGo0;yq>_gXktJQ8m(2zS8|7nNW@_-F)ZQ-M@i?GViBri z;Lg?T)1sQN2CL}qu07H#YBNtDWc2j|v0^9A=Y9%CyLNE;FFbliR5_$1JC8XR?f>UN zkh-yam~GK`4H0FW_&q<%NxYcH)C_8%fhZ~&AZFdY1B`=^O`-a{zR&-B;8Cdau3=@MFI+G-4_|Byna z^LZdp3Vc`4)APq-FxF|DSfR~bUJuBT{i#VG`eB>UnutQ#WWd0B$Nkk>-dC3>83t zjFXMOEyboFm>V<%uj|IZ6fS+>On#p(q4e68l}QUIZ1?%sauUWIbZj{&iJt2sHYIKV z)&*QAOii}~sp;uk=lhwk`jC4|Dp2TyZ)v#v|7g5#%GOwpiRb?_%-y<9{MH4)Xv0mM z6KIB15@dn0gyb+^ zgtUGyvU$8OKkLNx3i7>tZ)UEjoDwzyioXPC$rfOZlneFYSV#YS>mys1b3gj8f!!(= ztlBIhp&mLFL|g5%kpW(UAua}avSk9UkT3=S-onuV#;aggG{KIET-pFMf!|Kh=Co`X zrYrd5Ewq^}X6@;2Wcd!K>mg%id(CiBQ8jE!s zhOw@wVN#*L9ub)iqHYmoPnh`=mR=ETyHHu|A1org?y;D(lz3Y39Yse+K6HiNH|MBa ziE4E)-5-eVa59-Jl$gt)7<%pk_>7r*AoYkoQI-tt4n4j{#RM!#2WG@M=R+4|?swM^ z5=DQ>JB0TbJj4(#4Y)^&JEQKE;~XS@g=kyrRIBaIqL2D1bE)JgR((+_uAfy8Z;*UA z0Fq4c9)w(S56zLxwly(etabVz!Pu?Pcm?p?9)?67Du>L z#aOY2=;4Br_G0GAMsCZbpbNX+D%`!x?qYly{F8Z0CFIwA5&BBzh$x_P%Zq7B=9$;@mN7%w1o=1HDN@NsT zjdt1;7M9ZAvSDuCfA{E6X3;7ZY zv)&o{AMjCrB<%I*%*O>)1DXpUMKuPsW` zx$|9^>ZsH^yti+D!mFAv33vD7SE@iYLpA(Ibuxa@=v@_USB{)L&5k<0kLD)VKfkaKq2fIsUYrR+ZWO>laxb>& z7vm51rR)FE>s?Qe8+uj;jBi%xiVG{&)qXd96M2s~Um?$qix~bG z$f94cpsgUH5##kzQd8E=q0~$o&+(8l3&{H|XLW>?#DonGx z1d)3rl?XfP>CS&U;Wyxl@v4#(IZ*4bQ@z%|2VdXWzMtf2sCbkZgp&W~{Vk!Vs1zp& zDIAJNOQvby5{T&-2bD-iEjT9M5{cvP^7C&}`R)80I+u@m`z#RH2XXVvV@SMO*Jq14 zf?85VWM~-w?0n$St(#M%blH(;wqv0qZorMS3c`tb8#?#DD-3Pd%^$Y3T%P@KMs6)U zem>+NJxS`FU2-B4{jH0fCk$jQl!mK#ahl|an*h@o$;qd|Eo`kUFsRB$%m5o!e}MHd z!UX!kxvYFO>UQH8b*2gTAhMNxg7Pw8hpzTe0I0~&!Dxm>(x@v@Ru0IW6yTCO$IiP` zP-Dg&=ildS(Bbig~Z z!7QO~QT!qWgiNmL(ve;DmK!6T3+yltvZ_eVN)2XjzJIZ?*OAmwxu9flGvE^S{3|@8 z|Efe`)f_Ioei!w!y8^NNMau0gwww8$T4}CndzF08X%pjGz*CliG3j}<=y-N{lp|6c z6)O(sahj3U{D!qD%e%M=@g%*uC6p1jvU@lJls#3%cG;1*i?kgr*N)WDIcIo+{1J-ODF&!q`e_Mj6$GQSjUszD1qbosL=9fV zIH+w~ttPBn;ePyWf7@90<$vOn{#$_k-wmv z#E8#!D&D;AIgdbqQdTLTQ832b46H_B51JAB?&%!>;m_GJbEj>0TSc#O`N1~!rV_X8 zAl^EsJUSv`9{^I1KyTjk^qnP_WDtwEPIU*q^xwbAM4*3{jS)e$*!i&JkqZABD=RDM zu#m$3t%wFQ?2ikI(wq!c5u_$p1V@Z9&i$4Be698Y-m9>ktC2;nH+Wk>Be_bOF z8Fg=OHf`y0L!OMGOSCJ{B*}~fYqml%e=|r-TCZ-joa)0c%a(r^thsh=#{PgDS2d>P>*& z+xkz$)m?ealXMF-vjafM}tN zpmH_}LY@@(d95gBm$8W}sMLnISp!z^ccm(iuBob7UX<>D{rfi~M=`3N^s7xg^nE<3BD?Gi-}d5GY-Pq;c5XNQqrID@B);7LD&}5xB!%|8m35+Z@n-mI|>Pw0t!G>ZB$NTN*VRrUlzlq@0)P zr}9FGp2e8sz6 zpB?*Gay)&osrHXX<%-35Eb{KlATYdQEwpa59ASF;B#AHD<-?qL-S5$G@@rtr3yM&0 z{?ds=e;?=58Q_rkL921~qQB@-)K@`Pc#R5etj^m1JEKvc)<3jg>^62ueXlE?X9{_t zzRwVnU4o~BhZt>D-oZXaFQ~iOb$EHpTrqsJgQrr6PvCP)or0K^#nl5u#0!8zl90;sR;P`ih6Nh{6F&qgrJ34t9y1v|&ad(q3WYvRq`MZ`T-LTC@zG*@h$Z>WF)sN-U3KE3 z88cd;$GIZIVn+WFf<`HIQ{KWi$LcMzv|cuV_%Fi3?UKG?k`;O2Kr))!$Yl&jtRX`y zmypMDTjk59_(jYM?rcehCPo_Yt!tgJF)Fd=2v5Dge0@v%S$qH^{RB?`q!)wT9{`sI zmKYdiP@*!q2HNXw^T+ZO4R?3ux3pg7WQP$1u^n$ZjwCmAKr18Y$J8=#5uD)c9Y92@ zB51~rpcYzL7wZQck4{9mLzhLv1JLwVD>vBQ*)p%bUYq09feYm2Id^vZ?MOjQ*Rd3g zCogkPn0SJu^5-uM$sR2}+h*hfKd3XN6cDK76zQH4kCTJQk(oj5m}n)+k53%<$CL%m zHK1uuElau560d*=(4ek0FtY_~Ds&xe|8^MRvt7W`6@y75%qf3JM?@|yzejA~qMBXZ zD8<6TfRoxXtoV%R9O;-s3yd-Yzo=?h?{` zdfh+xdN^V&#ene$=tyqKX)_yjsmr=VU#oE@Hl!GMfaxaw!IE9 z-do5ts;WCeJUL__sUV6m8v)_MYViJ&FS^~=<@i3IKzXl7=kcG!E1iMQ({Lu&#_gGXtfA+r1!Tz7K>{1h)iP!icUL|8Eh?W~+-Mor-@kmQOzv?m^Bl-n{4F)JvoX^zJ=Dqn*~hMJd1C=a zdJNuqXO;I#Dv7d*%KYOp3e=oMXyn}3|Gz7-{?i}(pKIH+uG#)hMP1)r(~l5?fiuN7 zr9c?&=5-4~GcT+`c@$QVYDogA0fNGQjo+y*X4(E&H@QU=#mFcbt%>G_fR2L_=op<$ z@9UZ)fWRL*LNZzh%B;9!;jr(rm)-&((MR^b?)Eot-b~cz|Htlsb4LxdVa)#)xqd7R zjChjnBj;ojfT z7ajO@YTLVa@?qJ_xhj;M&$Rqm<29hUYUvA{1A_e+&g4yiOT^p};Nc_~aeZg#T6B@_ z?w2E;ewLVvSD@mRDX`4YuAK&#wwIzGh?x!`+Jocr21zXN@RcDVnVRdt>y@`8emc9M zGhXbFT3flc#E8t9LiZp(KwGAHByJd`f@$Ck`}{U~9t~HQ5zca%Wg}f$*vFPx z38`}-$7Wk-Q?(Z)AM-E#u9&_I>AV@*PmN4$79K=;zwE0QP10%A*k|-Lh!eoEPevxWH94YHy!6NX=P_AlNkt5dM=pM`Bc<;93iL*MMMO@)+ph+s&PrRkU*N+fNk_J}k ztVmT>!}MP!vd*XF*nW`h>;p`X`Y{Ves$S~&xo9MEKhqmOLYvTc64_}v=zC&s7U3-> z_#d_rJt9dJVz*q_a9V3_5A*g*);+U~a7!`M_-4;b${}wTTq~cq@}@|pn$UJw=3gph zp$7&kCrLSy%n@FFKBX&wG6(gWqCXwo!3@D}2;QV{z}JvlGo*c27b>1hdc-SfzkRw; zaBokqdCs>~2)%5EJv8OnFus2#*v|xgbg#@AGOLwV3)yz5+vo|KnUS0-JaBsU)lKj? z==*8u#JTy7-Dm6M+yDAGx?)>!xT0@^G5Tc^k7(xK!sRx?+RsYvF9oR;V8#W}k%Op4 z<;Dmln*0_)t5TOuL#n$OOZ->BzM0_0uj2EfA5|eL$|ob)MdsjwuU$+bj?=rN(15w4QMb z2l=$Zs&wPu%*|J-lao6qi_R2B4QfGVA5@2{ zbezlYY>9P~A~Wwl;ON193R zZ=r34#kZ~)sOy--kgsr#{`D zlgDbC2zu;kn#(TJ*S5FR&b!@L>2Wq4kc326<&3H`?O41|mydkRuI4+3o725@q9(0@ zhlM&7?31`fB#$3yVo;a|BSNUY;2|)wIo>LUSC9P(G3MH5t%UFTyGD+Gly=~aYk;jz zwxny@J;GYJT9H!VE>67kwr=9cr1VII`I<({ibO z!|wa2_{RxmO_w10&{d=(S-du;Pa269xDm(kK^%(uX9>*@|9+@v#4KL)MZtxbX6@5C zAG$aic~-D}F<&wrnDy5%jmpGC7;hGkX3?SfS8|v*0Tcu<8naytl3bagt;|Q)3n37v zil0asa_FUQV(mmDEG^??`9bw~T0?VaLvbQi@Qj-(%IJGt(w`BMFbgGpQA+%vbfB$x zev2TWR#Hs}Z5}6^CeF~zvW&NNmqbh^X!(crP>~b00p-vq%&s-zanjgH#u{7DNx5!X zChRJI8+#UdJaJq7*`pR>-e z>IG4MRCzj^CVEA;oJ;W*l_Cl&x0$%4;3UlgY4*udsi+TbmYeAwmXvHh24xyy<59cw zm$}Oh_RT_%KTsUtX2p)Y}nOa7(bL!tzp2s8}I z@PV1YGjE?j9w6R_toNsea!gfiA+P~_uNl`t$bW|x9tI|oowc%!3=H!#4v8_wYG|0$XN*RS077W^Oc$!qIrL5Y-!a13Y) zTi5#k#vOz3V!?q|n}HlKNc{Iy5{xAnr4a@2_b=)mgIPk{;@R1VC~H7+cWRK$0cx=I{Y-Hm4HI2aTTcCKg#&m>H3NRHx$DY6m&KN z-O9>a+VcrM8jywi29TQJJ03t2Ny4}bH1#9F_WXlsJYpm8{TTyy=LE8f~3d=(Tdpyz}(y%cvv=bg6uJOG>r-+ z@?$ffPXwxU>|7t{z7NC?(hjYS5c4zRgEW5O{Yl|p3?w0vF)o;b5juz20Dw*oZD4}a zAl?q#4fP!_un4cyJ$>Kn4=rq1jr^KiC|}Oji&-$8$3!>q_JYxa5YA4Zsh3FpQQjqZ zdw(kN`OuNNEHL)Ca__EW^O?9mebR$sdPKX?B;5R$!bkMnB*k~{-ko*1jRFlk6xAeH z!5I4bzW9A@TdZ)?S=!3#TY&XdQgmQZhw6CGOaXO+f6AY<8$NCk{dDq1V-OJw00i+Y z_=hhI-hDl8WjMBJJtv}u-r71T`0t`B><-7Vlq+a(aSmAykwP+lPm;KTqE6spoa||E zo35_G?f@RN?X=j$jUHV|>)kCvR|!mQC&hVIey$foxbMRd`$d^YGHdTGz1v$LfL@t6 z6;MO*?m_YCpIue&1!JF)Oi1q!Rxjk|M-G0iRs79O~sL$|-JJeOtZ z{wSxY6K#f~uIcfz7j7pL2kNdYvJMDbi-!2B9Fg-LO$DC~Jvd&mr2VC zv_-MU=KRQj%%o<%bKrh&3&X8+fdrSr_(ro}m|X;AeH=fxjetW!Anz%68=r{H&idS6 zc-Hu5OQ&sD5j^*>Qp%ywUQK>!t|}x&bo&@h`bT+CH!bQ*U!oX<5`H4`WcWN^gYj{f zvCXs5P^C?~!Sya=L*7~Bqd{Tq;>Uzyp-cRe16{;>+hN2fIL{L$4)~rr?LruJTY6Ov zxtR4r3IMp7AjtOCc#e}_UF^pfD_=P2hnlx&kcI0Z~J9fEqPwa+APzPjrdaWIK`>i%GaQ+lcRY1#=Ap>8x{ zE?#S_0T~5a3N~b@X~Ybc#!@r{I<$(Yn;YH_&)<0HO`tgJXOEr1FXKE-Ark$74Zx`V zDdQ7AE4a`TyL%gU?3$@veex*KpU~(pJsSFJn5UqIXze7)X0!%hZM$MfThq}(V)w)h zsk_S@SF!gud6&hKN+vD}#-&pLAdx>4xNwV=hAfFwFETq&rfjmK)4cOY_id<~yMFbY zVi9i9h>AbfUKFe8k^-b{K&pM$21cOncub%61Ko5MWWJ#J3_9>($yBJs71A35WfaFE ztj%DR;a?WWmdV!=9VNYcp2tNg{Po$q86 zM_H!xX^gdY=-};9M*5yRgVGP0Ur4b%KZU^aXr2F3w>Gba&0gVSLy#^nE3#`PbT5Nx!Eh55aSoaCis|X;2FNJQPbW@)=7K&|=Wmk*X+FGu5igwQ?o5OkjZpj0WfWZ_^i! z?4Mz|4l`X~7wamR8!!gg;i7VZSCafBqTyqu+uhJ(MD%-V9E_WbC($SK=>C)JJ5IcL zB9GtRN{&z$$qc}*m}F7@-Jdb)7wXi^>bRQK5j=K;uKVH8Nl-M!G8W=2o*-QFJ(z{0 zR;J_2ADFRaX`-C)MU+49eoc`?FC)~=T7?do9QG;3uMa}0vNZGD%ER7W0{j9OCJxdO z?QTE+{TAL`c*IX;<>*IT%ou&mYzGFt!o4^s>*3*X78?Ow;je}%?XTu%WTtQL6+~5J zkBq!Gnw9CPo7u3)(@3xvPXALu$za!2a!q0IyuREqx2zk+Cn5ek996C+J^TL4&4_dF zDa9oECic>TpMfX-7Np?dN$p?k&1$l;+-ZNVtpD!z3hWiJ79&VVRQ@2Y76(r# zUU6eqH+=Q6=KZ(_Jd~FR?pR{1;IE$uK5L3wKDV&UsuecYP567f7xeJ$!lqunJFXJM zIhKc)H4}o(nHm*DDu`2a`*YUpmniyT5IRlK zzZ~3#u&)NnShmn5J3&$aWHFh`oMT_jd@RPJYK|Iy|KumgvWH7dSmwr+x0)NjXXj+D zEbBR>ID3o+#dIELI?f@hE+~D=Ehs2BgQzv#-5VLb0YlJ8#Be}4gPdt&@_ z<)N8kk05~`GwPk0eMOY=iz3%xn2wc%NDuvv8H8;n{}qSvaKZu?(w!inDl3c*dzVZ$ z&Q;?2qB5BU!Fak@2H&z`Q>S;M6HPcjMoK;;QSegr{3RtNuyiuLBnXve&1z%dV+$Z~1@!RZB?7z*=K`HnMe{`{WN!?G5_gu7wzf6Sq-COvqAaABS4fMc(oq|WgqHmC61lP*M zv+Mn)JLjXi*#9EsmcVdO*@cZHa`8>gh+Yc5gd0X48!(51+AP_{U^ufD!VXCh#NLka ztX^2(WZkQT_in`gf3L{Sfq;kW*QNhk*_C-+0_~Q+!yMKjm{$w?0RWsQa`&9y36@~} z>!|6%{*8cM2H4?QXHNf)rr5uKOLoFX^@}7FsQkCEAz-3$*^5bCOloLjrknWkSC=_^ z>^um6U?L@g=13btpBZ-)?2KU|dwd?ze}C{)$0XND<%XLAbe~`4mO}(9v-TXc`<$oom+G(Vl_A zBS@>al1`3qNU+;}Lo0jrXI2;hxbVAJAmA7s3^u>LX+uoS5}vY5NL$?8*k$+SFaNsJ zPR(d~x!k`;nzB-G6FCD-=SeQ0YjdWH007m2xE)XsSK9d_F+S@U=s~CPH+Iu7Ilz;v51yJHo%K|;T6PpDh0(4v&xXbRreGU?T zLI05bTqia#xwFd)sker^e8ns~Sp1lvU?u@I8R1HN@JgJ{$SWRQCd}HUCT3==M3g7l zX}>Vo6%~u#Wk0X=0G4&;nf_zKmKa>6l6}L}3(bwjzo=^*vbd43{GAeEeZZh`n<8_k z=)DRhHk7Yvyc_kOcG#Of^aGPNN|9@?VZE6hRd}a!&8O-%=TdgMSp{QY1OT2secB!H zG3#12s-$iPM3;izijvO%dIBy5kz5Us%S}>pfCEm_PI={@BEY4A$Nldl=h`tOmBwI*JmXAR4TG_-^K*HY3;*0f$p1X5d;*Q1 zJl&%~OT#T$o2ZCXfrponw?hXLGv&_t#VU_n$-gCw6K*p>c)?5{^}iB58u4Fi1N}u( zxi6h-iqTD^!jK`{b2He2hv4DfIFZH|%nv^5Z_M37{5P)J50szU&2jg5WFDzop7;a{y+8vEv zitOumMkd)ClRH8kc6L?uv{x_A`KZRI0u#=vy!?qevPiraXxawhG8eqN8-KQKp#e#xx7qOR#=gskg zr=e}HxY5UfiDq<`S5Le6{Tk#-wM8~BiaX@UO)ggw$L3ttFG6W2RIng2Qoy1Vb-0ur z*Kt}fw5iM3^g!5l9i%ToS)p4B5YDd!3^Qg~i@Td2KCMB%4|*;-J;fIdvMn5UVlGaY z`ES|?%q+JMxRq>a)tEYE^4g?C3aFhL2PfwVr;~}j-%YD2KhNmwIIb(m`Fa40;wd6} z{<>*J%ajH7b!f&a?Xt1kIv)VTzg=+%7ChpYP?S*|8Ct&WxSVO9c;dYo{-Z!89DXl{ z`+9kBIdEjEwo)u%8xGfxj@682QfK=z>xbUXIpVYBk5}Hw^98!|&BmO5eq0;;exYu} zSrdB8oLBY2z0@~=ILZI_$U8d&?f7DN-d(x9?~hU$N?&Z|o4<6>WDaQ(TarIj^tWx( znmi9S6sybm`*~7~)GFT0`46d@3NUoTyGAP9~<^rkVX~xi68=15{9~p4jbn^S+%n&2^qPGZ#`s{=Pjzd zTz9I!d==BUav5j>jrx1m>ZoujD)YcvV-lGrm@iMJGGCtRonlpz4=GA;3))?3Qi8?r z=5kb#PzAIT`2?cJFZOZt@5F^ZuV7tjT@m9}xcfU-TDs^AR(``GN7lyhk>KxLCEf=r z(OB;hnqMooJmz?x{Grf*eE%g#0O}LNGZqfLtC0TgUa~X-MQa#!dxEkIt6i?{cpphm zM{^&ohXDyE=M^6rJxi%ME6Du%wZ|}HlpKJE$4$D4{h!lvZ3HAz^f>6DEK-fOIUAc@ z2eUFUa+jRF&Bhb58_G?^u5YjIJUmK%%h4iVI=*Fv%4QP6)P9-cQ0?637e1|G(O0V4 zHvk|4?cP;rmE8GcSv7oinvic3|P+5A|>}ik8U)c5_o0Thzw$ph7N{`g0*VC zsN`Mgf>$2SkBcXaCAugPg>rF^TLdPz&oO zS=EZ3nQEjJ1YVLnVGQSH3Y!BtqAWY>(`mfYhz`1($>C=nk-Wx6DPCo|=+m_V>q6Dd z9673`j=piFNOr^)XRBS;NHN;y=LD~D_T$r((+gkbN7W@Cciuby#GO4rjKT-ahr(z+qSEHIyWRZNP~98V&e@MS zZ)rYxA_CP2{*`jcE#L|nqvqTGa>XldhXDYLhEc{o;EoNM+a7TtE{JqczM5xSIb;>CN3Aj;aEh&;D)%8f^|+Y<#GlX7;f{K&+==k( zjJLi;9=LzV7uhEhy;v2W6;Q5fy=-z<2~`4{sy*!|h;FDAy{7fwW9*Ui_?j+829RnU zy+tJf<71+|3e50U=F{^FS4CIoy-&b8rv>w=w(TJ9A2S4ZEH5Abo1a+%TBOgY$hCMJ zfwo{$C^woq5G+*4F^c(>?3#}e{ion# zAY|%UezI7HPAx2{-wG_Xs*3*v4ZS-IH!W{O_KGCs$`fI24db56#fK7&+?}8g&a)1C znwazLz>3XoOHcN1lj47*)9d|?|J|#Ki|2o%vDdHmN&cwbeYWuWBeG|?mejglUugaI z@ES-!&BKyW;G_}wldxRLWC?`AD|6jR#C#tSF=V`p&R(F^kP*>9)36T)9E5}6Dfz`{ z0TZtZJm675sh-jI<&&>6{TBa!xU5fQO4YiBxocsM(2#XmyH5Ojw(ZWVa zSadt%2<@@FbtzY=87V0%O9NM|tv6HJ zM2Trj%k$j#qW1_q{+hV_PvWM(2O|n@Xdzy6j1gxZrtMVR6gCbT@LSE!6@9iYHJfB^ z?6#*&xoK^esaU4N=C8Isd4)Ksy7?8|s>sBTyLwUErw=1Zlsir)CtoC-=}o3tmg72x zGNWo16p}?Fmv`D=!Y#v?6k>j}po>2l$P2-KrI(QYo_}+=U^C*uAzjh#eozag;t0mx zjD>>DPi@@FR-owyUI6}6iH%pOT(OuGo=qMqa|+nR*A5r8#IP&A+hEFLg44nFAT|ul zr+hFi1KJ-Q0*dQm-7M~KxlNoaT4P3Rer~qrph{d6DiWWeHssNd|I>ZPL{R0mw{j@c zl|LR*S^05mF1ae-vJ{|#I^f}(7adtDG7-n~Os^Q@J6Xm%7TpH&ufN{(yHM2NqXYMO zhv3VvrPKDUh2Q9Xcjw*&icwko^;AaMkQ$|KNbS~XcHWR7E^)!LVbN>uNZA4KOap!) zp1rRk-%E*(`9V9bL{6Dx9ij5}qrJIs5CIW6HUXmSw*V0m0U5Lqfg^2@zb~2v70Tx3 z29imZ=_!Upoidj~vU<;z=~;EDGe!EGmL>%*bJbCd!A|mmBfPTO5a~T@h_6(9YFL?T zPrTEWI3B~+H9<`OVS-quhrKjHUEjXke#`_UgH+oRgMyfYwN*4V>*_nu;1j22bZ+sB zJ;PR~?=Q-Qr9mz8BlQ#jxv6s~)6o1Y!X^lATj9YzRY&k) z;0hBf1h~Nehqm($OS=EtKDMkZZQD?3s%1%KxrN46=4EMSxs}S&GIQoaMrLN^pwiq* zrD^5By#>vboMaB%sEDM9h=|C3zFybyd+z7?^SSTi`J?_yI^+wV&wHHb>rAgmx@8)} z4K2!NHdsXto?>#0MXtHPZ!{m0YKI6Ab`9z2?l)oQZ+^iH3Zv3Pdt5d0W#?7DN=yKEQv7M!92tbcnS

|IGx zr$4cla@c1yE=-^H^7q;4Z7%V?M}8LC1y0#h((` zrv;`&jr~*%RY|TBDhbLtO#Uy66aRyne9vt6@dfOsu!sE{S!nP4c znw$xLUTuDQ_l4`{n8{vo89*J%ZL!!08WSRZXmJ#u1K z?oxDu8o}iRUDh>CMgF?X9fQ zl9X?ZbUsSI??I;LcB$@0zHT;{?E6LO-EYI*2I#Xr1in?w ztVw!oFFO`e$h!%w#x|DM)K%E%NyfPed$^&D-k|RELaAOx@^I84W>9Zl=j@f8Wy&^c$80`{r`GOKD?itYKMwgr>|SLvbas0}z>(IKlJeMpq}jR(PKyil=R+D;O#ok zZv2RE+x$H6ioJ0)(#P~(LOUKdDpW23e~Da(7OBhAehN)<+N6D@!qgh+mD?IOncWz& zM>gB(1RK{)!l?yePD_@#s1GQ`_{SAg0Q5X+qP$jVmsKT1!&2uc7|?)4rv(xpb$*%7 z*?@-q*c`qfu5KHri?&=VPz#+c0?^UG#nKRfg(=P^?peB(Ko?)7LXxMAqVCJ?-Y0Ua zz6BekbWD{7A``CR-ABb!dX|nEq=7`O5Er1%(}_x6q{n^EBRNwOskYKN+|XusZp~jB z=})#j1@`c5zH%6Xc;qyO?eHjnEogZ{`LUEr95S{RdtRbL@MK_tdm!WQ2R2v#d&S}Z zT~j7q`Y%twmWdYbLLw3H&;l08RRE4d{$HzR#n4(161Y?W4j+(Q-+=nJXgmelf6W?n zuFPTWO1k^7-kCEyjA7e>Eh=e2qJ;o{ParrI0}Ss#pcRSQ%0l!*_w<@2?9eGRhDcSK zRfhou8laJy1HyjBs$uMSJo1l=GcbWh7IkrH#E4(ZC}X-WgLInCaaLoX zovaKmyu3xCZKhXVj3C7$%D?+oNMemvfik#hKs`6Q&;VF(0EhY}0KtJlZUlYk^+<-Y zQq=HPM2qrl5U8;+vxgf9Z@A-(r-U#6fU5-q6*}AAeT54B?K8MhEqHEMsw1I%sB=QA zX3m3w6FC7@u(Oc=rKyOhXR%6s1oG0kXP0Kx(R& zfIxUm2lFbNoNQi`srm=s-$oL|e`5|^6`H@y$ropvu%xCJLO1{fIzxJ1W8QoiU2ya2 zsAmnYuYuR!6xOyGt8`;-Y9n~%GjVq_4L$s9Er4oc=9MUdKJb374s)KArVU;4l2yXg z|Lu9~PN{)cWT0}#;qrfcO)E-9EcnZrWZH69HzzoIh)I|1Z%a6_8kB~S0wc|TJ1xQr z`(!1O@T$m#zq*zA&CMb5k3f^nofk=JsTJ7|PKunH9~-@;=MI!9wC!E0Y2s~dQs9>_ zP=g~(ez~2~o6yj%)LtDIHEUPI%UWI_%`PW{`-^0npxYKfP{4iBKS!+4^`vNcqVX`& zs3=bs1Q_NB*xD3I;$V?((_&xtMR&=%fJd^s6?*amDsAmVuHf&Rl-V2u2PexL0McDh z6VFZmVn=*DP*`8MQ^DM21*Tb>Np=gV73Z;|dB4Y3httn4i55C^ixxd}VfIlu_{atX z@pm<`!7#}kA%42kXVx7G90_O0-iWb%1B$wEVIi5KSaP7sYr_U+I(O2DxCE@T<)zHS zf&H%)6dz4B2;IFC-Iomi?DZV>sR9)kllpQV2aJ8xwJ?TuvmmYdseWPG^}HC4HNZM5P~t$({qKjU^> zRKQG>@rI$0SXns_gG~ztxSeuZcVAC$Zd((I=yt!TX!wM3*0vuqtY|&}(;qzaORb-OX9l}z@l4$&m8BiaTT?uWN5Y})-63$4{X0XI0|PX9s|<< zaJW0t9&D9&9<^u@;Uojp6xRt!&Lf$7bWa<87Zsm#xnCNDK_q+*WsG0|>>FR6T7=eZ z4z{k_mDG~kwt&j*ZnHf}po_>dmnCc>$tnalcU2V7Mse1@Kc{SEbua(SFXGPTL(baE z{P%N2{sB-upb%LHq>z@w0BE%HJf_+I#U# zFj8^O<>fE;U{$sLp@r1Vj8{nN7r63;m%vx~yM$Ei5)^v0tplBsl$6w0K%9JM4oFy3 z{5OHdK1Ql-%-}DMu>CMBc9W8isD-kpQo=$PV<->)iAVP=?YjJV`32BR7tKf+uNOlF ze>RB?;T82%dVmL7(rLsJtp%h%dq87TsopD&$?4Ax+&TDrT3*KJ&!0a_14+6={YW~6 zuvGR?((d<~!MZ*|{s3H0=n=Ce%PKU@tjv`9dTp7 z1L!H1$OLdYN<^9o=qXoQuoE=)P78=d5tG+4QZw)a%X2)^SJDaSyD9tm3~lWFsx7AP z_rr-q8j#@}5H!GfZf#j6J`za%eoc23sCF^}=(}dW(|~8C29L5XqK^*UAA2wamcqQUEZ(w1KO-ae_ok;I2+n}6d#hY z&`nl0iDLw0Kty3yH;Xam!Y2lMnoiS8a=-_!CPqdh4l-U<-sP@Rok@jBcrKv&4!vPd z1SfkR2t<^O^ae^Yi0h=>do8#*Yha=5bscu82XhIggkxJK6YhtUq)j)>4Lup*tt7WB z0y2I1S`YY2t_Vt->P=R%eXN#61yuJbrI6au)6Md=>B5KF7$D`YMx3pp_31hq@GJ1b zOjrxqy&fF2kxa~h!Q!*ynLA9cB)MQ0j!*e#Xzw7d)WtujNl6^So#s0gqj$|g^(Il^ z)1rteGaR}XV1IZ?_ejcLO27R(R=rCH@QMJwxt7}rdfEmds?;y=!l7&+@aED()xQ#% z8c0>w5Y;#sTXY^d5(f)f36ruTU?;cZme+u81GY%TQR`-^Ey2a5!Y)H2L~PuX@?D&S z@Z$BlmkZx^xX0|Y@qYguZP~O--bGjosE4LpP(>yu&)}2>k=v%Q{Y7mdWr~zLNbz+e zDUmG)YUcVvE9AJYteB7ld=<7|G^Z6T9daMw;FfL+of5m8?~$tOa{8I)bf1@bmptM+ z#s$`&q!i^%59F$2t^PKRliq7bz*BQ_`n4as1#9Qh!dUrfOt>f0t1vb{r%9i}UKS>| zs~LyVfFe22K5A!JAr*l-4zKur@$09J!VdD{05P7lE;n?$&Ce#d_#o&u-x8G&G`|i` z+Uf6-%6s{u<+FTF!T{TVpmf5CU+UzgIXB-wV^)>n@wk9e&6te?0(s_8K;r$tyry?4 zBG)Yt4&(0HMCV}Ag#MuVt@zNhxkL2eZHpK`_-kdd3C)3n&(; zLYI5=w9&WGbU+Y#rjGoY0!+)yD?W?>)#P~KueK!c%-feQRw2_90FW-vBE~*}G_ptg zN!z+LSS8L6lh4cw?oYod7xE6!B}2*l35do|b7JPsVOY1obL~1%#=UC6CjKRzVYo2; zNIAOtWu9tD5_2}`j-~o*q;tS~kHcY`DtmKkJ42cIkHubGuPJn36ZuHL4XX-c`O?}13mAWQGsve;UQ*h!xS^HsIS(o_!+kxrWowz7`a8t$^ z^JzWXBsJE}3cV4W_Ui$jVpH5rB-wqd<7MNQcml+!kg7ZHt)>Ixm$|x!88xaY|&88-~y#S#I1|9igDS zPTQIcCDm!wdyDtb^C2VjGTTh~i&QQ(PJ0yy2F=kJ%KR4j%66Xm8*@{Di)} zByjg=Zf=)8((M4FWTbXWq|6ixoXlbsZm8vQgOVHwQ_CawL1z3lVfmPf?0(Du=0<@$ z`AgE?v!Ub;OyydT&nyKCbi8v>xMF=LLj$b^*GqsZ$8VF1wCEvksasdfsYEH|mRnX( z-HlGGIgAJ6gOSfT@rpyD?_5X;dci*`4jwq}9ga|&wJ!Q3t@@7^;2vTWrZczwD2Wl* zaQgOC;?nh1bCDmwXAM21zl%R(lO(6lG!KPZeqGwVm}@L1bOTW`F7JXYV=OON##a2z z`T1vqs&XO~8k=!ULcA^Q1Gw4e6cgC~g2kR>%+?hL(*T*zRw;LM!_ntp9Q_qUCb(b-D_a2GYm(PxN|#(kqF^XpckRP#3KOq2aoZmXME z$F_}x;yY}`OBGWT*_0kp4&(%w&_M|p2AQ=6ZpvWp8z%AtLg!=}pF`GHC>X;dRqis? zpyF{Q;E=Syt5AROrNAqSO+MX9|Gy^=|GlNw|7oQIRO1-}E@L%lC0!jzeK45X4XR&B zR|f<|t3jVMf%tC3%vS(J6*RmRR0yd1K`R`BH(o16#1CIjMdXnusRIt5;Hk!b~QY-f{(Gj2fu$$@K4f7Z2Uk zFq1Q9&a46HFZ?_t2YQ&Yl0B#mx*Z&>Uzppv6raNkOy4rWkzN|Wak%$k_dPs32DxN;uU@%lkQv?#tf`%1Al}j&DXQcr9 z*4qDa2XQW0?Qi=6hY|7k28v!Dc9&!<>n_jAGzL+y@6W(qe9%{HN?sC=(E+@R<;()m zQ$4j{08n4$=-IT8O31q*$r{! zrNl@bKH4uBn@2?v{jLA0do?VcjsT@`@O-UxY$^g;YivQR&B%O$+Kd2V z=XM>(TtsPEVwuO^4Agd=q*Y?;TdQnHXb7rn4)nCjb_lVphcW1D7TMyTp$W**uli-M ztW80nUFZowCl7z8i*rXVTZN*hw~#el1I4o0x}YRC+kh!i#{B1RGc#Fpmkpj?zr26L zhUERvVqWR$o`3rIq^?sm$JoDN{8_`DP32(MW&vk+j@&-lsjjDMpB3HlZ2s#&e8i0k z2m6tChPQeXEh;g2LFvCoox!~`+C3vPd_MVW+|sCfWEsUFl!XuENcgf6Rasl^Y-Q(! zEi(s3?o{aWc4O5>wZ-N}Mmy50CmYnwb`y1`zN=6UDf9(}X84}AUQ*@zrU#Hpd&#E` zHB+E%GY16USl-{;ivpbrg|*^Wmg_)bAIc#Ae7}gfb%_ydd~F#2Bj857X*poR4}Oqf zaM=b-hDz>%>bn%m)sXDP9n223JR$7^Mre4}L3b{2{z*Qb!%t+RKj%4LYDHm-B! zZ-d!qea&r~m;r5c+-c7I??h`tlFA;)4rBu}ebqDO7?Cqh1hh)Sxh~s`r^41J^JVI@ z2SS&R&J0pSD0HECTLQ#au(6FFPObdjtViy;gE_iul|l3!wO0HvpnE{ih!rylSRVsa_NI*>fkAnno|D;VayOYWH2*h-yluPEdqfs^8`n%Zp4KG zjFnZAkFRnWl6b^(BAZAlOmy~tnFN^w&r|c~5&Vr07tLqhqhXi+@UMK59N4onqZ#|| zIr_CpXur}xQR_3_MM+=x*Ch5Sl4^%Ia{W$r_&tp8C`7oncj5tfGxN}B#h}Xp5w$zS zM|AN#Dx}K6SaCY&!C+GfUang!`(}h zkrccLRyWGHESX1FD1RZ%k%GIl;A=p^16u8|;@`?0N+J?b<_@H&l#hC|t`Yz06y%ao zHc_IwJ+>)!GU*Q(Y8z<&nX(CRt~Oj^3h;XUb)f+RD67JTv08BI0SrP+b)?Xo#aKJ&V4o^4M? zsYBT@N^jY|sGmIMRz9n8bVibUoYWu&NHnI$Vw{t0dRD(^2&mv!p^B0dVe?@C)fy_9 zwv-nVWf?EYG!CKyp9r&=#A6aMmbIPiJU9`Wq=&DZcumSuI|eKsx|(Q|VN69Rf9?p@ zi!Cr_lSOE1=v>y-aZKel@77Ss;UbT zjV<5(6|(~Id3o`fO>S9M#w6le#?bF3fN>7+n@m9UVzDu3%!C~R*iLJx60i9Tra5{} zNG3~$$p8%?-x0`0{B{fgfnZ6r{TNE76P4?i{qWo69j3-b>Ekr@a-#_bGEG<+MY0wX za3XH;qv!pYVd@(nm&WJ5w|ohY#A;~}d5J+POtj?ES0>og__0MUmf<%l-U$g29tjkO z0nG7H*-$qyt0$m6a0?0eBA+0|C% zdYdn!wE`&vGC?|19F?#3CGm;jQe6&=a|Tpz-DyPWv@`%vql+(qsZJDx(7$k}$}pED zx-21lPRTkR`8o0m^?WnJop8H~n^9)ng!n?T!JREW=jEmiQ)soX4u9vnwN(D}%-Q?- z!16`^_))Er?i*PjrOC<0`K_1zvs8|Qp&bXzuW2X5W1+B8NBjNQNXc3#bt5DBo8lyI z_qceVJ+rIId<2gc0Sd}G{wN@Gm{Tut#n&x40F_IH*E-Pb=wq%Yi>u6_J_Gkp>Y1zs z^A3u_v3JopppMA-mfTDyag)v&x}vyFw%tfW4q-UqjyV=36a9LOjyMzLJKrj zxjojF{;vwH2wN%Zl0*@dXp?ckojUAGQU&djyi&{gBfSjcqxMMCm~7w=D{ev|C+CF4 z{#Zxx5OGD_PET}n`mypsTcKd#WF|z7*c^|E)t}fV{T%C>Js(b{`l?1M+wz{%89)1u#KhJeWr7x0?wW6EC8R0PipAt3Y_Hr`NcLWr1b?6`Z}qy^8}K}lf2p0 z{DJ7ZVhtfs73b(mq7uK)aH;5_&Y&b&2>Jx=gvTRl@0?esec`=!h5T>_nZI%8le{#7 zqM-)|IT#x~(?CmkZ!R3rF@orb>V?zzVsed?3m``_B!HW|#8_Lig5h@RVk2cW5Q5kP zw$_|e0&T;FPw0R1FGxMD{U?Ep`98@1oalG^n$`7_Vb4}SeobKetlxPo9D+dGnz4)gife1GCP(<$|*7C25@j=n%`#1`w?QfWTyirG=1T%df zQa0(_c?!DO*_Y-gj8=<%BC^%{K zt$j4B8Dmc`_5n$cSLsL8YV3yQAp@)xIB5b2CBFdspSyZsH+A<5i6`#W693>GsA_iO za<+Q?*Sj!ge$d*t^b+()N}05>yz@(foy;ZCyOg`okyb8?@M|TCcBoN*Ji$-B721Xz zz@}5&(zb4~wvfV2 zx-Pa;x!INm2}0edV($Dq=Z^Xx@B7WcwR^iFPsJv4osvt{OL6_+kT*VPa*ii~Fe!hc z6NjC?;qTiXl-Ow_DUr)sosDMMho1FA%bKu6Cm^$|S~ za}_qMgYyTrk|E?!MbMa%OoU6pOsvM3OP-`i4lagP8++5c&op|fh{xFy;GeN&)9f#L z7|hgLH;jY-BI;iH#a3DM?*+)5a$JUY>>+|BPwZ%mkq6DgM;8L)|K@K|St1q30 zT`1T18>Tdw3Zt+r)*>!r?)+ptR`c$HoSuNL=2J@@tQCueRKYKBUFpfDX?@LyvhjqnrX`smJh8hg3btjh_H%!;u^7$g#Q*PT@)Dw_6vEA zRAJu{p7qMU88gMcL^S_Hc`5NJl68-zqmtVv$JzbKR1{O1+yE-Hfqoy9C!dK@%%79X(Hfm?@LjdUG^fm8QD}s(m=Cr7a!6a9yF(|Gm^Yuk9vHL1u!_e1z z6eAn_i|Qt5o>jmw1DobH+=U%OuLiq6^!m6OI&%za>qlS}IJ_LX9bP8 z3U)Pq7&d4#xu6O2K>vJjkAB5)Dp8J_`}*{k$aw0hyc6l{;ow7{vDSTFCs9Wfy8R_I zUnUmFMjtGd1I@Qb;ee-E(mcalreWnl#`N~?aDKIk(XKoQgmiPL_3gcf^XX>4mtJg& zZR(#n#9o>`J(L?r+`>yceCxr>EfXipz}s6PuZe#5{7x{RT-C;!hSfg7gyi;qtkc^x zdCXAVQXW;4;Rn64j2!(;9icy@6nIk!Ks0DbTSC1>9jm+p^fXm(NC4~}6hXnaMXM7m z`!@21j+a7?7RYs4LzR^53r=6tq_Ao))(x=+elVDK9$41&uwz(tx=hB8xW_Jt!L`bO zum#=Kq-$w1opxKgcgcX9V>4&Webrl_@wwNsbzz5C4k(5WV;Bq5fW2eGBGpZ7ZJ`>_V-;`o5e$hEuU*);@;RJ%)7Z0X@^Q(} zSg42V2lg4nCRWxv;V(c;`?Y#ysRDe7V6P<4@u4G3VA~_iC2j=$@iA$kV4Yw=6T#;& zowO}?WuNin;Nhn*Vq(ZSVm8Tuz=`o*1F9Wc3jtE*BrI=vkY|74ofiJYa;sbV5!jhN zi8cJy-tNxnlLZaK4S#M0O62+@cw^P7lyMv15y+j|zfk2(_J{S*BVM!R0S4_SEuQgW zzLB<_%!G%$SH*~20fN*f_<+p-Vn_C})JG9i82EtcD@buXPP82)qzhevQ|nKk4dF~t zAd8Z~E@&_C{#L0S@}}{M`))8T(!-OdPrDO5r_1**P!@*t74ZnQ-aPyAfmnw4`fX|_ zR{+(SIR&0Hw>>wDr?z47kFVv)$7w0;99Hi0kf&vzEfyN4aBTRy&OTemAe=eANq_9P zmyjZ3$jgCndsmRY1HVlJd5$V4}>vmte8;79^KpTZLx_L4EhIvEO$fow}4J znUcqFSBs~0Llo9g zblBVcHq4)jQPs^(&`!1{SbbXV1C2X*;P>O=(w%91zhT zB#~n}Sf7`$Tk}!2iQ0TS)Cm?i`1t&i!d12~+}qz6Qb^KJp^RFcc7Yf`OHe8xD<_6a zCA8ZWaes$WiegB*#E);xOBTt2Y>6zugqQBDMQQQxqf%z2rsn0#r@M9P{C`Pp;~q`x zqDeWyGS2Lh@+tVn?~^sgqxGxsbbq9pbsG{>l!XjQ%2qX@`bE59^ibbH!Tk+Pm6u=5 zHI+RJOugaN#%7vSt810;6FPTpCqJ2A-<|vf1IIbWdfbqdz;dER&q&xIUYZ|%3Ta#7 zoKkt;_uS!tUF|X#qN(bAUj!K)J8%(B5VxK84;{Xj$Qj>M(MZMoY?hQPkH$(BEkX-0 zj%c++9sZj#5z$5Jokq2_Vt)x|IHzr3YXZ#htB=xCLeA!d0~ zO%SF{@n{uKl3_U^d_}AYDJb-nQ~^;FlND%*$>PPnCy`gJT@u3Vdq36YzFF(nU(%v! z>Y!Rvh^}jSwcXPAt?=#i0qc@JuscQV66)s^uQ%70TdTT=3#w(Q_7R(VU(Xznci|W? z%N%a_2AOD?DV`ubRAIymE0gDInaP-=1yfK{>2LfTlB+Mqzas+&N zEvVi{e+?+>;YQHU;^V>Wi(nxD-&$yc$b-b~WHBHeF(3o5o7*RMuRsAU-eeGQjF30z zcZWT80H^o`Ao%qDoIgM@etII`e?nI{2RurhR*Vlcv=;1=S6Y2>I( z%9j_!Ud}wyMnl9gt>apmLPuwc$6vlbCTLBpEigW(Rt1VqD6w7Q865aQpwW)ej8lZc zYFI$zu>q|xUnQLrj3EvI0q%X5?(l1-Dt-p5#w)Xi7b0@VNdIhqadsvkc@fWZK_HlL zf-z*=&Sw*4=Rx&)1ny%{y)99A3n1T600d_xb1qa4bjfSF%&}+tvNOGZ5rk{9hEiHf zZWF+Xm#!-vB?f&g6qya`)_p2xI)Z~ipu+n@x}f@<6enU9+A48L)wwun)gkJShRQ80{&|-}MR5KL?Oj*kF9rdZh-i(TV z!RA!&4YMuJxXu^bCcT~=Pcq%u3dH`I7@WV3mSud}3o-C6)t61~gip%vN>i%8S7Wv} z4Tl^CB*y+^6QJ^*a32zWA8^O1w(DFRqy#+XUc`^wF<{m9D)63KVLAypBU7kU?6E%c z){ufOu)gdmYm9rn=;Zp@SGp|Wv*~!Fpaq`dGbQBK*cp90v-0s-e_$!jtuBJ8 z2MKNf^qUx-EoSZQE{MU=ImnW&E>nc;WQ(fsaX&-`DZFo*qb+2UP7WaZIS3ltM_mR0 z-H*LP_dRWr9MsX#F`o+ha!RL=m+u^{G+pdqMmx=H5>b>7WdwfRKk&AzQwaSw7|-YLA!#gfv-nM zrub5g@*@gBP0=#5pZJ=WG}SF~18a;M=ImFIw`Ls;guyXO0gAa5A|Qp<8D4SUD3Rcj zwpLO>jfQ#an&aQoNB4gdMZWz`KZ7M?kbURpa0nn)6)PR5>E^i^s9j!U2iN(ftGGH^ zTGkruQBTYXIio%Cs$CRO*WYT?K(BV)aN;u2-(|P;=rw=mIyZwC$HBDSh}CgHLr@vE zmMnLzs?hx&lRpyMVQs(wWBtud3Sy_-%@98OV*T*c3eLFilaTG02FV6bT-xe`8{D@q z8tPINn-eTFN)S#g)dMB2$)^iGUTIwO<4#L1#6S%+FHgCS*}WW0ul#AS&<{E35y5p! zXny_YH)$EIgZyf`qvreKSJQ);($m1p@N;BLC1BCIHzlJO6k0$nS!nOXB+xP27~#Zn zv`cNQK%%NJf4p%WnMY0R=p7Nob&sA z8T|d^uyeM!SIoV;bq%V70Cj-m8INYG&J4u)nVaX<1H+q-)~e~MHaN;PUDLsEO8U+`kmo-6t2rs`+*7m{O|UTj+^!QSbRR8m_}dZy#3udkBE}M*W!XO z38Ida<#Sk#=n1(xmkCMD1ZW6CPMl>#(UNKV-$j|V#jfQ|^y6n5U55d(&YV0r35l4C z%1{6;V`RjN)gb9SU$#GqUyBJA1d>@A}O!?K1%=~Q7|J+Aj*#~N-vmilS;TEE9(lF1g!a>=GdW6@neZf{Jx0IJ_^32Pe7 zq2^|(4Nlv^G>cDlZ|3ulq>-8x0*Q$O#=MV26foj^!Q6wfE>nCZj1}Gb{Zr=qh1hfH zkN%H__u(ZakG>;Jw@sIMj{OjZ$p{zyO^hnnUFs$q5F|lIQ+%)YhK(M6zGc;0XA>7c zXxk_1Sx@m!7wtcHOmW*FAY)vZ;St zHHsh z>Y`EDKeK@Qmf!yODF1(Q^{>nVmj3rFKp$YzZRpJX20Zp(N)q5=PiG@FKx1`m6a?t_ zH-gS3+;>Z2xTRo$!9aca;~}6K*Rv@*N@RP)gTWB5mR10$P~KBU6?|_X_D5HFBVe5yDQ8Pj3Ka6=2&H z0{jr39(6q^35ToT|A~|%4t>6w@SLuB`C>xJzMq+Ly!8~wf+8;=0?J#T%MO`}%UVkJ zuVG1J*-pTlHu&CFo}Qdhj1)@HAFx-8R67lsfMsNGV8t~|Hf)>Y=c%~EI6#tW`xC%e zAe;^?sNVw2fpXpZdiJ5vKd*ioc@o~j-%4i$=uv^UoPaU{jSVvKdInBOCc!3T<#ni5 z|2Wwmx+f8~X|~hqydb@j@V6vEKuLql{FZ7OKn~Jtf477SvOYOGB;0!AmGJ^``dDbe z828KZI^gsGkb5`g;K6fM>I_TNvlf{(?g4kQsNCg^o=4yrJeDP@VQo^9y}4^_cLSzr zk70R51szZC`u@z>U2>o##JfF2m;XwG_v$_KK#%E+U%Ts`nL|bC`*qH}Y#g=xHC0KZ zCpbRyonwB!bq}KwH0+!0Zklz%W}Y&t#$dE%ncU;m5-y81B2(%h58@fVx-UB!Qt z`FsQ{PlqXvUUfX1TN(eByP{VliNTyVa$Etv9<6t|s&@B>0%;M3@XOL_=HX&McE z6$2r2k8rU7&su`y&y4^e8lq?fw2S`=sO#CQ3jb)tbO#YVkP@pLR~{jS>2j9R5=eJb z%6EHy;Zgn5SPSgtX2i3mnAqZgz~r{OM{~W$lVKY{N!6_rW~VA5A+f=o@;UCam8lfw z*{k&vS5AfI0H(=XcR1S}gB~d*`I3Z9`bGLZR&AX%(cJzGS~7(9X!z`lxr#hc-6tKPV$rX6nw0IwktcRYA$lP&#ZgNgYBCDZAOmHJT`QKI!EKL)gr_%?)C8F z;ayvu67vV0AasO|Vf;m|3x4(`D^?G*eWUV?S|LfCn}1B{m^z1tDRAfQA?bDmd{t{| z^o@7dvz652!w+wk|K1+0p#L0mb6ouHRBXDh+#Y$0cabN~z=}m1C_D&48f9Rix5jUG z{Ax|RjIX!E7NU$3vgJEkYQpmvhcAzva?-utcz<7x%<6n5{I*;-aNdhh)K=mG@w9Ah zh^Akqh1I*ZtvwidD`!Mu@L|%;29ZH(P?|O5Y2N`Q_t;iX*~S;MX3_GOT0-6+u655p zPE|g&_b^f_A@A}&t2LkHeBXiN|_M4W>kRcPk;OZUb5gPuZ=;}`%?zSHIo)11%bh*kVrcvU5sCl#c<2Y+bNJ~_|cXPv!X~~xkK{t`a5gU8vyy}Ks z=fw!Jdm7aPNKKN@CV12vm|ed zJ}C?yDuTr}df`zKE)c2HEPT7jR2-`7!uGA5a^ZhfuCN{~O2aQa_vNI?zX%_HCmHU> zj4;uF1OO(TfXoVsgFckJywq*KJ;Aw_ruxuSG090Be9czhcPiVadHxsL&D92 z4I)Cp98pAL#Py9U*-oIYbr7y*;9cqre^u0~O$b+#GnYuRtwWh@5{nENgZo~zR?Lqk z8!%KrQ5;WYCm*?|O;ld$V(8sC7VTJJ;{;!wB_GRIfC!s-Z3e(ks~KtTz4kx3+yC>k zcIOw_-v4L|ffnlJ>iuT##*(s+f<6=gSN0>g2v`&Y-5o&K=uRD71L`kwHJwK$+;0d) zBj+1{&ir2&hd)j<{EmFWmQd+*>(*L+#1_*6@i9Ou@Q>(J}Bp$pC0H^bMpydFLfL(1S7u^O;jpG4O(bsZj((b&a7C;h{^j_BXt_ z40Ti_dgwsgI9X5`MZRCLzGxLKk?UbBJ4*{r7St3({V`VIhj0x^ zk9KA!hM}?VXwXLy1Hw}+-xqW%n1$k|{;AMW7%M5JfpZBq-&0uoxO-o@=fEmZ<#>Xl z97u1Zb`Qvheimrs3M6wepr`3IOV^c(_{SDmb~nfmlGBbcx3Z43)|8`ciCM;Nj>k15 z`gw`PD%PX)o<|R`F<%&^h$WTYx6beBTprVF{cI%!DR~rDH-)fn5&DOuPy$y%S3T)1 zEF%kycsmFL^qiol1Jg%A^;Pca+b6lDIWAlJqga7^OEGbnO!ASBP*Q?ZJhQE1`>kJH zm)A4G%!Cuc#x6J5ZOlSq&M5ZV!8EEJdcG$29q;F2VrtGdK2Z>WDOk_5gAf@E$p~|( z8!HKsf1SfdoT*g{u6rqrp;)7yB7EQ76D_J1WfXm}jo0FijK*4-h=LfUmbE4+OV={d zG11zNaz4+d$oQ6P&*P&*l<|pINwO@0Okha%K zUO&jNaI})Sp^k+3%$*6W#)h}fz`WhNeyB}ZC-y>PFb96Pk}zX5bU*kx%|wA$A5FpH zoPi{nUB<=fTs;g*K5>$@l#sJdioizs(%f$PU1htuf?D(8BUFI5A} zX0g2ypDVd7%7A7DDKbA(n@9Ln9IWUg*_zwOh0+|R*riBq^d8XdAHmo_s$Y8&=Yp5u z8ldLyzw2L7@0SyZF=5pimIQqFFn&BeISQ_;vyWCE4#P}(DrE!}db7sydTs8;OW)=% ze-D1M2dcYt|2$jI2V*EQpxs1B&qevxif1z`(S}pP2T{z3?)7b3`P@1iLFsJ8qJC>0 z+I*?Rr6V@}uZ66;;f1N>o<3|;dtakGT^sr{!MkIfYVo((CNIO!e`9uM-fQ=Lz)QWJ zcKepm8&j_RB=+c0%#GNb*Q#l>HQ^CQL`2geUZon_YE#{_J*9`vugAJ`tR9g^PL`xp zNDSK8&x^`rs?p)+E``{RZU*)DxP>rXt`~eYqxpY_D3vL`U3z30uk-M_cj$;^g2nBk zy}#KPy>DggsUj<%o&p0JoPaK$kIqjFV#1+Ivura0-YlP+uWldZwaR+HiNe#qCP?%=dn8<*-E zBOA8Oqsxx%J=gePbV2TNi)~OQ!=4?WapS{5fq_|FY@Fcx~@UdQtKe2`&M`O(LZexPA-gZ~G2?+ouNipuqUAfVG!4iib_n{{Yod>-X1# zyK)wSysJiDSTzJMwA1qUc(;flNOcyT_sH{%HSC%un3?r@cJdeW8cEHvwX!p-?ldp) z1!t&?gwuF-l;-ACLRPzu#Llh(@Fi(2p`L zQBTc&bI-NQ|1@+|&V^QLvF&$y4==;sD#QKG;cJexX^ggrCMP z3AWv93xn4TYvy-gLrl)dF1m7ZFI!vWTeK`kLnUG$KAIlN={XxF>fcq| z_27e^y>_y{f8bJYK?FEipwzCJKYqY@lZ4HJus;N!s`)Hj^uFHrG1>Fx2meZ!*S&1B zPZ74a)xzSW4+F$g%lq1}NyADK6Fsa%@q7(KJo8|pnwJETtI_!gjW| zeEIq?a+CATIYEDNQlify^0FN%{9AP8dKTvJg5?tGG9YsF@@&a5P8OnaFeSO&_iJ1P z{-1y;OxR|B7vohTPUW}(G6E9}Rv4)AJewzXUdLPob7uw zRd~#TLJouQgIdu?fxC0ji#*eRHROMU$qWP4TWO|m z1{u>n6XigevIiw0Z5_*T`@W*b0MY+{l$n6e?Z89IkP-$k0fCT#9B2$F827Z@-hpb% zA;hNI{3-D^*4QBWyJgjhy_6+JzWuDYob`^ZMXopn(K{}R4*Y6aapkR8pdxvLq*vhT zj&w)kr5XOCVh?Zdr!8C(<9Ij8#;kQ7P8DsBAQkwJlq!G%ek>8?=X{OlkAmuRvNP}V zPO7b6^M`toZOtsc$rnmbp}c%9OJ4J$UkUmQRfkw1JD=CDM00PItxTkdJY6XXRoFOT z{BS(8{7YJ}H7?H^EXOxbRNi&(%Eb2or1AEyhT!&`Y*AP%A|P^CX72;Iux(NMlX075HIweB-mUKAM;JcR;k5Cp(q7& z^u1xS6EzRWEa1PIfvfI$Za>=gi0phsR_;&HI6ahK;j1|FvAs&iv3!VU4tXN-4$+}3 zy?=oc z)a|#Qyn%O09`|^rggdlb6~P9e$Igpxd*viVTh0s9H+j1e@bAaD;YNXo@;iR_!SASF_*F-Dj ztcct)hfN=h9DgGAXsvwJF`{qVD43GM2kC#dRjwY&BfZ+_7gno4o};=LwGBxxQ^?n2 zTpLiC@)W9g-Od7ijK1BrcBZhb$|cV9?3GBT5N2a0doJ5b@rCw%fdy0M;w$+>CqLs_ z3!yUPjxIg?oMT1HX|aDN%J9~mic3RES-d^$Xy(BhN$*!Ix3F#A|9zk*@qYRpl_BPBRay|5LWwPug4l!!C;Sw zg*H3)AM6if_h%2Rgq7NAPOaVuADjL&v@Qo4!RFEmG}}M#qF}JONjKz_acrNkw+eIC z2*L>QVT^+CVHTacxH=Js9h`(dUAR$!Gt&sYEFqDjTo$L5folUOUN<;-dU=IpS_qbe zY%U;-OgH#fPE3!HOl1#u?Mj_Hh7~n=tuK0a%A$u!(yriibTWZ(&5D-GMPv{4C(_9) z`i6N6KYUfZ#jm2kq2vfcuQY4dJZ&4e`P+%Q`FIEm1TvdiphgMIodsHzNTyh=q+`=( zVi_Vm!rH{7G0gL{qQHk3j7Y}-y?zX!VP$KxZ_DasJZsA0r6EreJSu8fiWP)JQh9dn zx;E3imH<2#xz5Fg9i8RcD!s9hR>!Jl)E-_R{t%JK8-=^_PN_K5<-ac%R>n`$>ZGr~ zoF&AKQrh+J!5F)#IQgI?3gE@kFr&|jEc!j6K1l`Mvhhl|+Yok0X`H2c(xU3sAf{rx zW;>`5)p)x+B!+igd)T%(38$|~Nrr1`I8Rej?@iBVArHrh{eN)f;xq-)U&5}A=VAgf zU9{rRp{FEb6cOKB08?1{`^@>Cx}x8Hl}%uQtv1R)x}|$L#&nEw+EZMW&Lw_@3{ot_ zKc=oMC%^3XBy8DM2;HNsUVg>0SIsXVuZ3B{O*8 zvcRNn5{1A~pAl(aNC&_Bk9wKT+?&i4j@;~P1D9ncE_YX_i5*D;(yj{(9d z^e^1nvL(B39jO1&cED({4Rj^RBf#9cZ}QiTn`JXWDBY*Vf$*@&Ko3{f1Fs=#<3|e> zzMt$=u=&9GJliI^uH`g$czE~>t5Pk{q>n9F2MY7Mv=nifIY{nTIf<9q zzKsYQ=RV9iNL~@~5A$|r4SZQ*l_uH=26Mv0FGI64OV8BhkQok-z8G+QHi|~9Lnh=v zD=hU8N0ZTD^SbYM4U{7_=seTu5MkA5#<%$oC?nqyAOg!Da?=_AzIonFBH{t1v+PV4i*QK+)D4zeCf>nXc@i*O5`7H zyK7Zr(#aI12+}y7_pTJ|=;H}Y&HKEFwRnR21KFqJjU2keqIxEw;+)raB3$3}!)hw? zJlbOnop5@AO`QR41Z|zAG?Qo#b%|4aPaKTT1srxd&xRG z(obQgvR=*;REdvw;2rL4K9H7l4mu0@J%S6Q7IXpf0c+et88Eil%H@!A(U`j z>lfx|g}mex9~_QHnQQnz8r;&0!M2YMV#X-u`>MxZ+*!r3tc$?$QSzdj`^+e%miLqq z*Yz8Wtu)^?GKhq1PQ(y2LXDnB(4gH-4qPlP>00}kPq%^|yTw}o47%A=FA%89lzSNz zBSo{7nFuw$e-f@)`sDMRg2m1!jg3+e^yorJkJ%(eeGMZM^{o9cR(_x4;S)kP@5;Y} zLEMloKZ{`$r>jYpP}oxnx>?I*9cX2R2*uAJ zya2CB8 zG0^h$53Syq+<2vA1`mgH7L-%_TpmyaasH!{9Wd>D>HJnbpok~g@drlr zmKyPSd9!ltRtURvu=;`rndn4zuhC9QkZ36|bX?E3gaRW&-&q8{Al)n0zVnB`(W+n9 z`gy51LVd^8bUZG)TzmXGzjDXdKC64SB2({9=@~+}`XD!j=CdT}D9Pp4qja=Hg1)4u zq`$8S5rsEbK!X@WY)z}fk`HkYfqQ3Gk{e812ie)Tcg6U{plhX~OHs6kj^y(;(C1kF zZ+qph1c1tn{YG_S`i80P%dxqUxF9%TfAFaEOrArth|(DRaz;&JnEzDal%hvd@PnW+ zvW8I;r*wGC(VXx~E|F8$_%PZxUa$iL(LuTEP{3bfN!R+eM!pN=@HB_cNV+9z$IV5b7KKdINR+dp{=848^jtza8TP2I zjOcJAIl2eoJXYXGFj|_n4*RjN!|AxZ^!vR8 z!C_|86E;*>8`Q|rq#fjhq$65(MKS!K+4nTw5$=2toP#{j81JO^Uo}VBLcf7Aqu{v9 zuMh8~e$SHZ^4L>elAIMg?JY5jSLTZ)(if6Ds-w*Uuh6lOJrS^^FIs(QlLZobL}-|{ zXAjAI)K%E_N`et@yy!h0f`FC}A1p)EijQnzx_%m-M` zd;qH*onYslUal!O#!`aloei}^t<|f#yhypJjy_U(@C-gXSTkGg1GvPf5jDxpccvg} zn0jXX8zuJQ)$teaP?fs0hCfZV%>2a_?|kEO`kwO-98uCiCnkwW;$Rd|Mj4F|hN}Eh zjzL6@xNL^IQv-`WpLwugGotQ?6`7XG;-`DYV8S8{avNxIN(rq3TGT{yG$ZT!7RbKK zFBb*7%n3s_apXP;(D};NsZf+kj_|jxM9?T~L>*fWzfjx`00x1|mi3Te9I|z_z<9h> zgk3u-zeJW1sK$k%nHkHqvwPQWURMi=|9kxZuSJBlndASR#`H1<4!ZxdEYSdUGlAAT zAU#2MNZLr&!><6=766rH0IJWn-~k(iWc>1%Nzp&>D({NX2633*SLD^*pvT|8Kj9p< zDxTq07etAHx384ERTt`H1BeA1IrrZ>y;%iRgl67v$#Mnum;Xpo&bQkNDh#4`ogu)( z9=7ZS?){m9S~_rU1I(D~K^n3ZqCBZaV^6Y|nTC8IPz$vV1UOiptgfw+;2X}GH}c|6 zTQgIx!gB79K$k1(Sy7}&k&Ux`#DxDSn5b3;JWy7@#$3i!{rVH=El(S#Mt(_4LluV$ zfK;AN#l(H0PH@Czq&x^TTJ8&o3t<%CH0)D=Kh_=e+N&Y#Pe}^_*j0+d2HWH$_k!BvjJKP{?m8D&%YT1#k?c-vE^u1xX>X&5HDkphRA7M$o+O1nL0eDs> zbq9%?AFW8-pNleK{;SRgPnJ(j_>W{*%3N@oM2T+~95cEZbgRP8d2zf5*2QS&*`YP^ zDpH1Qja9Zi5oE43X>KAh^%7Pei_;01D1h5gB0WK70~-#YwGvd%XO6DZ7G2Lc-KA%8 zLSpXc4@NtdKTufM;BV`1sb`<@>ae&_Lq0v1^V$ayNN9#XGJZd_e2}T&GdJg@B3#=d;1GVD^hH)jq+-3ejwCFgDi38 zZ8N-+#G$1j;&7<>)Ok?70#OZMMgU_|6g?ht-}yxRk=B@4p3}{DMiVxVG&&9jaECSD z?rV?~ZdF~(16 z!Gv4>M=Yxqb^_G6)gg>T1@LQX!|PTM)wqK!hQPKsMDg7*TVhNDZCO6BrZAr2>wtl8 zFGQs59S(q0wn%SSCLAXV%*v`Z)&y-7Q<4ktTk7wwF0?$dEAVnz1>(L5B-Go1 z>qHJIEpVjT&u3D32tV+e?xe{#R>7xs@1zy$j<{*r4qHdf|CLp^!Jiqcz;97gQg6+a z3)+9F^SynEyUsi1;AM1*UUZ>55J-v_?n%tG=qOrpp(LDem(6?Y_u=AczRtj(WF^!x z4Oh(R*3eQlx%M2Qeg zX|=yy=iayL;b>L@SNl>1HW z`!x_FQHZ2`UP`{vx+M;Mi4FwHXe(a`ce=Cpk|gR;CmC7l?-*Dpf3K*l%j?-t_;{+W zIFYETe6|FE^mY&ikYRm?#UTrV7|Ca?J@#G5YbN4*t74N2rOnMjups(IhxlFM#!*MSnlKKakZ|#m4)-J2yTVhD{cY8BL@5 z-v-iGV{}hc*=b*4?biAPJ>)Ec5gX62=;>bbk+dMPy1+r^Gi;`>A;O;q#)6NH5-VV$ zVrUt;)D3sgGD*Z`z2t+{2Fz7086R-mKO)j*Lzj-BIoz>npJ>kZdhR3LeL+)pBAB3>D-Qdhu39thGw!KBiUiQ!EATuWuKjt&H1GxdLOPr7SJ5msty80&;{Ft<>S58*#C4VG3l!>7 z6Il4u5*Q6=h#PryBE@`8@MgEYHb97|mJs}xoZBTD4TlK1D8d=wUbrGX7*hVP9qpfE z&VT(<^knAyTI1S3f5BbZy$YZgq{n-AI{A^hT6^)f39VNE#*)E%B7kw;CWAmO%b$RC z@LIFVKYXmmz>yIk5{Q|=9^S8t2EZNeoY)+n>HqdnL)iPTXRE18x8jaA?ReELN2Hy< zc{6hL1vVrsksheC1^#&&(RAppc@sw0(#Apvz-f+Ur|(oFLD*)X`tLV$K_H;MuiJhV zX-k2J&4`i+gu=q#%-H`_txW&SYO_YS9)w!`F7)Z$!#wm|l?DuK2mJIrQs;iZ#|pmg z4NlA|(gcB09+`hG=H~3>%KMrMtN?04m$_zl$=|d3YPY51ETA~t2}$KO2+=5BZiBGQ z$VE9F7Sz}Ydc59lV(L#YsJnH=xxZp$Jf22eRh7h3V4UL7XD%2QKeG6)q=S(U0KO?S zb~FErxP6S2*q>x$)cHivB60O5w9t+%)D@An+WY(*!zXwZ znr=po)jvwnrj&G<8vWG!=p>L%v}N(LYg>(VTizw>f=`u$hggqJ&I+p4o#eTjCxg9> z*_95Cnm4at9AY>>h~`)f?aV?&?jKd=b0%wR0JzBvciM$>jKi(c0JdsCSmRbt6ym!1 zck@Rdq@QbHy%rg1XEdf#{nu~j29`Lo4!~S&}nddS2<5w#nEVn$!EXO6sk%td>)LPz~ORU0W zf4Pd(@A}=z%Xg4i<-!bBBP)c<#rXd^+2PtmO~{l1Fcish5Zh`MSwPaa*}%PQ!8P7F zA^SmiCKQPZlcQ-m7Tl}hom913unF7SoqFU?LL3TmTwbRBOdFtpTf(&qHe4M+3j8D+TED&`v1z^BzJq)4n z7iZr4U60?3o+fS0wQEbxWXrOgR02#GF3Q@EZhXwMJv6nFZll6hOty?o4iNsRG{9P) z!l-&*pV0Sn|GB|Mn9DAOKF_gL>oAIGYYyCP=whV3a^8*o=otA`IbX8*P2G%<@1D!s1uLf^+mH=|lYP+y3zMcVi#&@H2p{RaYmgV)u*RQfz|<;Fo5 zI$J3q!dxJa^&na6Pr&~|J|V^JmHmQ83x`a9DC_BmPbbKh6%xRb9Rn}sKB!TjY*A`% z!oWn--O*X~(o46xs?AO>pl}4^2_w7)I|j9c!2=yKOBnQ6Q$WA2`iQgSt{gBh7&ooX>| zB$TDvqV@pzqhu%+wJnY0>Dj>nZ=01vKO>GKpdnF4eq>{p@Log?nR=PX5swif-Uve@ zR(c79skwr8sy{~!JY->QVdefaBU%MYE%##9AOg2dECi{fG#JAGSS+W*yLNTBIH_!! zBZ`gZt6wi7w{?OORx_5QyPzW8N{BCC8Ek9eBKftn9gnRizJXysVX2NVncWpX0Ut`rN(!N!R#}wkGc*nl|ckdAZOGl~~7NF-*2gCCq zaH(DzUqKKhVYd?idK5jp!`5V^&#Pugay)mvi2=aJVw+xStFj`5?2@6#6XKc$>43eH z4@s#?dg}Pmim&S>Zg5!YzXAZKG^LPIF|6nEj1$qzep{8fn2Z)C&*D~QGy%S*l#aHk z3HYr8qKi?5-2cJ;*i5$nn{9sOZjE-lT)beI0%YW7Ny1OVxl%|%JYzh8A7gVL3OsY) zljI0z7BpXYuNw5Qh=Css2@0^Y0Co2LSuFjm1$9XUU^HEL zjdChbLBlq~XQl!@{rnU`i*(>SU(@32fD5i3XfXxZm@NUPJqAn$&$lEvEbWIr#Nh+X#sVXm>953O?c5DAY=sPfuKsy3Dqsaz=q&!(fE9WD6 zeuPl~&=Wst%!-D9`oHycxsU-j$w)|27U~LET~ze_2dDG8me)F*?FJgN1V1KAEH<04 zu?{c7BgXv}oi}P{9I}t+)X-^imW%qvwcZTz+r4EVU=*tgFbw+o>+7Bp)q^SA33 z910_9oO3b9JJ;74>SRg#Sc@jJjqGt)ExtN0`*?fQs(h5|Yjm$Gfi7GGadCB-Z#E#p zn??C`6So>BGz{2T7KKZcdFyAAXkLc9>(Bnm^56~!8-GGWs7h%KIYE%{so<#BP<#aV zG9f@_FG_sg(Y~?B%Zhlxa_cr}mDbwz46p1R`cg{YS5q8_KEFqF&fA|oTq0jTUHxk!*spj-7zlU$cO*7TTH+<& zvW=(8VV7b7&B?}e?hvRR@OUKmQmKv(K2-K1msG|cT*YKCX7)RvgLy5(Qg}62k!uI=Edu~*PY$H`_iW#mALnJg-1@> zHLW*%&~vLE^hzc1{MoD8DzUFVUHkOn#jAI&o_=AY_XyCzl8JVUAQ-D&T|FR^1P+=_ z7o;Qk07iF>Mu`otcJ>}-d)pM%?%7(1x=0+fD#{dJyd7c7I$W%Qhc=VtiHzrXn|s?Y zVNvPh7R3zZTE=creHMQ>gI26k-lu%J;1a0y5un*&tNin`Mt z;dW1ONvHqdL&#k6l1NODs?W>u+`F^JMqSTV`QJL#ocR?c$T7?q60`@P&+YwAF7KLu z-de)u$2anxCfF9sVvTJgvXaz(`g@eB|cAJUtpADIvPOg%g<0X*; z_eYnGpkb%ff3;3;(uyB_<4nm6KFnE4T@jBfzMFqo)JYZZbIchgnTyJ0OVv5%jggJW zJKr{yHabkgKCp~L!p{twDEh&DFTN|=Y-6f>pD=u{UMaj-qSWUTmJ$5XI7P{cbieQI z?-m$XCl&wk?E9ZTo-)eV8^@&|?0qyzStGnrpWq|oU>$DwqtSth1;NcSx6?rJt>|X^ z7{RHsXe>P1&C=BU`(WISgzfsX=z?SJGWIs@mPZ3`mwc|y#LDlrzcC9lTe1S&@A%bW z{}eP4S;q}hjaq@9M*}w~(VlQ6>g!_|y=Yf8mFqZ`tX?b!;*akG>*unMx2k5 zm`^Bc42Wtr&)Z|jk`Mx(WnTe&Kx8wib~cnYhmy8gc^Rwt6G2&je?-31%InR9YZKtL{8x|Yrb%x z7WizS=!!E3oQidYU9tePQ;K(78VFpvmy(M1Eu_UtCI)!Jl5R*ovh(d!EgF1>BnaPn z*2UvXJ27GA!=pK(R4OySFq7){c`gcg1hu^P$0>y&P0>Zuz2@Td$JYL2EB zV%TM@eWjARXBb|i-cc|vJ0Q49_dhFvL-yVDu!de{nQ%Yl+FAE0b{4}N8zN3m3sg?F zGfKZkah#M8e$j6S+m;LX<2~Pxjav3$OIGM!$u0_@6`UUG@FI5lw&?uWSuv$t8ZmYN zFz}@ds<@YZR0!q5y3rMO#}yWXoGMKhU||6A9SK-*m9tDnwu>AGp0fMeLR%}n-a6-^ zE9Qv7K0t7PHWj;lea>?b#0(l`J_BZ(Dig7bUSqqK3x69J529 z{1mtNA|VZq0Z2#xP-!9Sx6{|pxYOc(#<%TVCwoss|fS~xN1 z`<8!~5voRh8-VUs_sEd@A475n)`PZ1p97uA>H&ccS*-{C5{MguP)=p{0hluig=(eL znL!1CRAa@0aP#(JCyvwRBIXpIj+M><_tj(<#4!MUU0jcHhByBXdQ7fkJp#7&Hl~36 z4`@&N>&2fBsQktpv1&rfL)g{bk%ZE#0s(l$y%bH^(?1$3d*7o*REy8c^-_=!iO%=4!`PmrkexDX4V+7+KA@Nl}s5Cvw2@4Fc( zy|I8TvN-P=L^bY^)!+w~M1$n5tAO<2s9JGdfo>(xKO)*{AanR<8&U9_=;lUw7+pl00zk#jS+1}{ayLKtxK z{PA+$YBO)%y%iynS`jGJIDL}Yu77%502JzbaxL(Y2ZB-h8b!SCBGFu+_>5+&fZyA? zqQT0#HHnWo28*U6W^c3#0>XR`l*ukPmupn)z$QcngnC0h&w2i~l$U1jrOgknfdCK|}P-iT(?!bK0z zKalzh(XW)ww^964mq>nHm1wUdGFw(k=RDWd#hS!^b6 z_#RYT_N+zM3sg&y*yrv_pp9hNuK1`^1`W$c2T4e_{ORBNf@j#?f$6bBk%@Hlf(S!` z8b$07sIF`(d!qXO_SpU_#TvPj602|;sZTKu5-oix&NgzC9Bo^gS=d6(MPFC=89@9s!+R4`c` zf+w}GM+Ro;%fGM>i5or3lmk(Jg~sxy!i7(3e0Q|tIMp6`17w}6gkZ41_h#Kl)8JO- zHD_&E1{SogUFQ#U4wE-V@jK z>nA_8QD7%fry-#oaLcrPi3eLQT=*^i(A9?!`6jR(3NM`%cQfE~(^66ZSlWNwhyBJ< zT@kdhkd1FD>|8BLX9UIV5r~7vS?RJBTTnd}V+kn!m7y%E=?wA*lR4|8Ql@dHF z^+ti`h%!wyxl|`Je_3V1U{?@$&U-BFSMhJ9Uu1O(6lK`S?w_}C%J><*;mrG1L9lE* zW{d%OmG5F{A^8hfDt8Hs=dAU%&b)@p&^67gEwvX+*@$Qw%tkyZ&NrJiFK!~hvMFuC zRxVgK`4umRe$0vGLRpeJ$7>~1OuQlh-kq4@LOKy~{Fj%^=D@4(chW;^%dWL`v~^aJ zM6Wf?_J9i8bs?#+z#!;19HXLwFrE&O2f07|fu4b-GXGmBdaHa%-PHhegP>U*Wq)nf z@w0k6?PZcGVLEVKsz`fy>kwRCX z?94+iVq^4P0au?i%P+dS-uI&y&7&hmJ$QvWAao3ns?ifA;yoiKWXxMGF&H=)n7IpI z_ju2-r_EsFp3=GbTpZvPVnp)QTl<$Eo*+zIZJ{K8Hk6GQFYS5m_0>zZTuqMVa*SOP zC^d%zAc2ciup704Mu99r_M_O>1En@uMLgcmc6dBZ3~g;N(Hxr}s4gB(b$L#Fm0&4+YTCwbjN0 z!*%~87v3In;FzGX$qYK`;c5ulcK3w_Bw&0T3a*;}4jcn1GO0igLgoKtTE71pi8Q6B z15Fc3=#|F9=MR)$otIv?dGpeQJ%J^tkx`}bqhLYy?R>k*ifx;s4okEuY^9l^9txd* zNxG)*6ft+wQ)L)=$dP5uGqOz^{V!b;9afOUoLY>!a^Y;)%WMxbcJZ&@Qe``bi|&~0 zFta@;JN)V#bW?4YTLOW|k`-#4l;k{-UY!;m5Rg}Ef5I8B_6)!7e6T6-_$J92-3u+X zyBXjrs9_=9Et6do3=B!c_(ZZ_NK~K_22%8tDktxtFB@V)(Ehz$h%8%YQ0Zod+WPPe-E}5dx z9Rj)*0)21^zOUurgwh0F#npTl-!@fwYUR}xRosH0cuNa1D)@89IpKsa0AU6R#u%&y zWTw0y@m_!oC7##{I$>_!xs=K2NZ_CD_USx`~K+#eqkcKM$F zmFIBhhTub1QN*GH3L>|3Otwao)lvFUmuhpD#=IlZ#!K*#Z415)VIM!Z7*M}Z2bNaO z3~7+Fnkx;RPi5v4>n0|?m$P2SZu|pi32~Ya{?1}sSHu1t8QP98z8avvq11?3_e$L@ zT1X5=al6p@-;4FZ)_$_{JmqTt(xYqu5W5Mq2n3b|hV9~WRFo{Bw#$Q_VFPF!C@YGk ziHlJ#Qhr)ohs@|9uagMUs!-AqG9FOkCgq`kBk8$O?K;p2ef?dvbj-yzo=ric&6gvB zXHOZGPFO~h9d7t*298h9uez17V)4S}O2pn#UM%|0tT+B%^f8UM_2f=#8xz{7_lT(n zmS(p66G#->M>5`>6r2q}Z`!9F@Qb&&(UXFGE5GDwH){VRLZz*_-6s&~Ut7?{Lli)c ze90H@;pWY~8(_>QB|?Il0Blx9nO%7_gZRKQ{j_^T zIcR$@8XJh3yp9F%Z|1?X>SIKz`1GKEshihx#IBJXFls3%(k0&B%^UnGa3UT~9sJ^7 z(v)d26`(o>@J|CO=uj+~T>a&1-ITG7ycy>#cy^4`tAY7?T*VHLJ;as3#{3OcdkTaW zyX+P&!K6u#LOdlDTN9gm*m@+$yW3CM^S6-6aM7EA?CVWu^{+MUi@2NSWdZZ27ME#) z?o@D;)hyMzedA^Qe9ccpxN;}lsY27_>r@d6`QWiPv7HhW!D>^8jJIxc^UvhX_oW9} zX!QKBgwC)9t|p97iD?o64Ex0sNUrlFA!U-s13!A1F}F|-KDPZ^7sihet!GNB;-ycy zqDR=pX~->IyIT}1f@OQD+fJHi+W`g1)KbvHX#bHOevMzsDRUpi;OvyY_eSQwz;Rau z0T=ePVAEFB*L8`3ZLA*X4%xZWWh_9#RskytYmYe{yvH$t@WT)CA%c??4=8hgqBC9N z5@k=%DNPyQ7O~ZCTmXiIMeuWLe)b>-gsPK@jU%)#yO6>sUIKg==+edsFDCPpOQ3Lt zzOM7RGhiW?0T%3*wZh>-qDD=3p53LEsl(KUPv6*$y8il@DItg)1vqdLfXp<3vwc>n zxR5ZXBtp?+xh-|zJBT2PJd4OD4Q%__D@?Q!Uq_;g7*xL+AZ}5~e-9f$eqrZ2i`Xts z%bos!zQFefSD->0N`{LTpk223&Vljqqf0^qC(C@gzmqhRoRU_;cL^Q_&em+m00*kW z_j2eTbalX^KpTJDy}g@tB%{i1~BE$YSiu>?O7SbZX1+af!UAd|?pFSJU<&=U*n+o&F5 zbD-EDIS!i|9au9%6Nhrc8tE~|i@6E5C{@tpE3DIw4tu-|{SM>3DKArsY?RaR3--x2 z53vK(g@_0Qx=OadjYdOzWjYIxR0j46;#%Vrv}hC)PUQP1Z( z4ncN8Jzdrgt?OC%am@c?#LvK~e|K(f{E4l5Ex44xu5Z>fWA=Fzj|U$ zG&@mI752Py9QRKSp4RgKXtntFruffe{J(s8aQ52&>J*jKHN@nLPe&^=>0tpH#FPIp zpQ@pBa>AjiPJs}~6IdS)xfbT)f1s(n9q3Dan8wfZ^BRkn0Fn7Oi%*AIaz>*^!(ro4 zI3cIUdjtR5T5RI|*GnQjUQosLgjM~uf#X5D;^pE}@KqA#dqIqEG${EXQlg5Q8pP9|i%Ovvpq;qzDr)d-2h4hrbUT0W zEoa!>?Qf>Q`rE}aX)%{sj$IE@E0jo*<`#*54m{Uz@!C42t0VSh(CIC!gKNExpcB)N zl5GdP*RS~}K_DQx3VN*Fo)m#HR@=nZdz&aM%W4xXRj+M4#zLuaXC-4wg4Mg`whlHb zN@(riTfUqTV5f6gnqjk_j;OtY(QAtwH!|=L>61_o4Aag+*Z@pp1DPnYpKS=A=K%0< zowb`y$VO1d6=*Fifz$Oi+5`WdId^)#@zhqP8r$dMqf5#<_iTy6L-C=6nvHvmHTYIn zGL{BXg6}mMcVn|8w@<4kOxM46&PfZggFLFac?;UIi-j>dxuHyphtg@QFC@B2Uaqh5 zJ@r2Ku-Cf&#$hYrsui~}kg*ML1Vp!tC;!nd7tZ814^ftR9Y^sxMo#}cY;A1KvBfF{ z&)_BWt<(K1t?z-Yf%C#DK=GBhtzBCyRQ-F{w7_Q6y(_N0ruhVaHE2JXshPLcx|+qvrT~Bu z5C~|2ISrZ|CZP;L01i5|!E^Y{l1Q^XW7#~0T!BJM#;AXI-FI4Atji^8`I{L^kR1>% zqHE>;*S{U07A@Pi7Hr&WKeMHPVX)GUkmASt;wxk`&Ms3TWG_8zbnD#)olNeJ(1t|P z7ndDAGGNN$>zi@viMJr3vd*CLc*)Bk(ngR|1W#}HNd~AzQ+J zidIuNSbm(s81zc_o!CmnvNrFlm;)pPz`%g zbIPN-Wua+7uLG4g45%y^+oG^VUs%bT&5fiKKSZ)Cznm1m5plq)r&5IKKzQE8c{A?ZdDVC{mfOr9~=JY|0l z&!Ca<{CtqWX&aU~<3kS!7!+7@|^`3Sa%xrubq|FrvGLPEdLe-7)!c5*@!G!)< zH+|bA-IHv~`vwINWi*|BY^pC}m2Rbii)|jgbMOdz_z90wN);Ly7<;0dQI z!S3C3sjca!dwp%_`8T$OjND(ES0cY_-!-4!k3Tm3ML_Z23DxgXUBTb4@b5YL+O}0x zneNth^8QJiORvtv|1SAz1s5I>zrN47f0)Wr=bsQu=h0zbh9yR|<73A}W%(B&3)yd( zFfY1ZL`w{yewi=+x>XjFOfgmD9^yZ z>W)tDU@O z7-b`&AJvnA&zDTz|&)KOgNb^RMsb=QN7*z!G@O2M+(TeFE!evj>@uU$*J@rO4r7quGGeXxL|z81ZT z{_5Xzy6ocx^N-kFCC+}BzeOH@t(QH3NB$7hCHFgGG=E`EU?KaSao?816%Mez_mkn~ zir{UVTs1ztN{Tfy6tK4pP=z9-9=fU-vPO3Ag37p1xm&BOY7N35nLJMUMnAb4f{9wK z7Uyb>%U>Z`UcOI1RE6fIQ8pB=2*v<+J?603BdC3?y+iuj1D4coZ5$bk554xs|Ak3v z^MP`;obzj?ssA&IHI)+q+eSdQ>MIQxEb%n@_I>rx~YhXMd?)Wh)MF@!>qPQc6? z%k>}|sN^9ih7pz3v$=j+1|k_i+mjL~l<5Nl zGJ}(HV3i>-R3!32N``2LF1DIH-hhq=y0&%+0l^vd1~?Cr2~-h6$5!>A*cMCj74&Bb z8;?Wm$_rwjFz7FxN!QBJO_W@``t=(B^>u8TNNt9(3+iz zo?K_A&8AZq2c*dO|3llGhb6st|K609m8H#APP?6@rDlUU%Q-O&bfcrIoI`jo^w6lKS)zS z@WW?))>`lNddF@9T@j`Nob)@iT>+>*WW;qR^z<|hKw%N-fGMV*I&b53l^iem)2?o?)}nMdEM(T{!$QyVqhn{8G}KmxX|esLIPhy(pjg@o z_|GhjP*9Y)we-G^y>G}1#pK+TQ|)HBQQV#2vJn~CFHXphR|>ki2N^j}rMmzaDN|hv zJJ(w>{20*~dAOwxy)TyWNm$}1|GYlLsp6zK&gfY28H4rx*yq$L0dz_c}h zcMYpy5DcL?A_wy2hUkP?yR2LrtJ0mqfdeWW!sNE))`zya1Z)kB%(BlUhJBNUAd3kD z%)o6|GgeB&{2Nflij#^L&rKf7j(_APJr6zJ5;GG}kIGGWMyaOs&Jj>ocXL%$+om0( z2NI*N>|TGF$k}{MEniMUX3ddz(X_O&8`&i-vV-UOapa`vxSty1uq^?BXU1&odC_mI zdafhJrTgumUDIdAR>fW*&Io+$f=(M>;D;8Q2 zHZpfddit64{0`GU5`HTUez%vPkiDoU=i$0?neJr+85xHebC!9=xh5lzY$s;{kuxK5 z9&(G`3jJZuv80Gu=Wk7jOl(P+1-1jwq*$k}oW=Nn#_8VWp2Uv9NC=m?! zp@?*N%Rn+o8vQVt`^nc##iz6R;lq_pN7jyfp~#8={j9Fwy}gIbMOgKHamSpz^SMX^ z)$wL8MXC7QUkh1S+DlVRKyfAh|2fmodh^`Qps_uqbWNa~Pd#ij@3Y6+u)Z??RL?c=Y= zQJoT%P~Pf851Uq0Gb!F32deFE9m|hbsp88ENVJiKdhD2`pS9J74usw7$h5SWCP)GB zvfqBH162?(fNjC-7hv0nrbrX7k{|Kyk2vm&H!F+hEDfbcu$HWS(J_Do6#y2a*8>1z zy!W4SfAViwmAGVf#VK8bDz^zr!vLLTi9cn#zud6QW&Kod#0yp0Sj?Q*H2k&hLizfN zK($+%=gXLjl9**6rGROk^EUNIuu>Jg8Z`UZzCXsKA4ahSU14X(01kXr{iREEl~(9P zH1V#|S&UrWi7Ud~T+dA-DGQ6x>L5$bb07@|%jdS5`+9bRTZ7iK)0~5-R(5Zz^ZG+5 zdePOrDF(Ka2RXco0UPBFtC-S*R<|>XH3Xep&UCSVxTlYno=bM+D_A_Un(s%m@a0lZezSQdluoOq2(8G8R|HcpX zr$%uzq=MJBM4@LU)mPHQRsls1w2!>b;?phhET+pL%qG5VRMa+Qmf!=;OTnqY_VG?9 z3k%oDGS1Xz64~;TZqV>NdAaS95;i zEo9av>paMV^c!*FsPmMYjC0rW2UmR+cVC+}xM)Y5zBV@(0u^Zh?t{#^@DQptyQ`vp zVf$E8;CY%y9Kc4N9|cR=H2_l#bWN+f2Ig1271O*I%Kc4tE=ip&hfkHuZ*pl4G`pL9 z6cC6kuY6X5n=rsiTn{R1(2L@c+A(H|GybjfPc&U*ShAe*Tdr*+rA6+g2lHQY9`w4R z+ZRR{1k5Rb8At$woE4QQ*5RS>4J6XR8G&VR2STuS?h!S80mCYh$?-H|U{)kxFVKZY z-@ZVT#I*bq;S^>UUM-bR{y!E={^yVVN7-aa@qh)q9<@L(fLQ;R@j1|OZ7R?kVQ>T> zkaUKKfAG&^Ud!AaY-WsZ`}ZLL2|%aJh(5%Rh4=hk`U|aDJmgP}9NHtm?yF$XcOL$S zQBh2emzev?>%nD>OR1_L{<4H$&}L4aaxQt*Xuq^SMR7eR6B1CLOQ-@JJ2 zo}L_m%6>z}l>3&tY&m_wQGU5Vfd)&+&bz8kM-Se12YtD#stZt^8!gBHJv73$C;eX- zjglcz7AGafbt;;b5mLGgR6B7Rz=LQ5-cq1DX||_(R(BzV`>3**)Cv9pjWdx4hb7K;4k^8|cf7z1@Ab!oWhN*ti})m?l{C?jOCA zKS)4am&`sh&GBO!zjKB^6ni(`7U%avlUA{i*?wId>xrvMF< zH=tCoCmhhFm~tg^Fd#XJ_<1+bGg4(LGMQH5>rWMXSm$@+*U+2Q%>9aJx(A$i&>7Y5 znxQ164$ejg6mf>oiiiTAsP8-CIlSt5O_LCc6cm8RztI%U9{2%~->u8gnV2tauQ+GA z`=d9QP;3X)dXm_40N5FV;RBWe$^Hqp{qfMN>5k zP$*%NZsds21ls!n6lTZe^i^WJm*tp<4=aRi_s(^ zHpB19^QkbneAJH+MQIY2TPy>i5ehc9q<*A3VEGzBhv?lv)7?`CBR=NA`)*M#_&`~1 zf5pTwgYDy>J(31>5z6d|5Wjk(eSoUx`War61qc+~5~eL*Z~)%U13W#RRJ z+d+C?t$+Wm?i=ov?-nhrn0@i-ch``jIw{qr+B;ufSQP_-CDqQ16=h<7<{WaRU;lBpj zJ|fR-?&Hwl+du6^qEq?j168+Gw5^@E&4&9$W$G$(UnWpqp-(hfzT0`o>EfB^aiI*o zb?24DL7hprVzrUS-*E+)3k$6W%=&d2Pjr73zh5V(TYnFa3kS}j3%jsD!&OwLtQ6%9 zULn(-W%2xN#^JagzzUcdw;Uv7Vld^V4RuNAw^X7@<1(=&bZcYs3|=A^AX9JzPoK{g z@C^ib?d3%NwR}Q$;*XCO8KhQOPXH0`5J$mi4!$pDz|c}H5XcEr*l=G>%R%=@(RqfN z+H+HJEwyH4zqI#=T_E*{iTXgW1z#irq&vwLlzv9paO!W4fcgvr*fDeyOwCDGwRav$ zeBTtX0kiAuJxR>50bZ6DN>BJW9pCZId|GB9U6bVp?xnb1(cfqmZdGqd?X zIomuu%dd7IZ)cqMY52U&*<#9>nuJaOj0CidSnKKimH@3AcSX=_i+;PSlpUGbq5;<> zT(NJMrWU5^>8@LK!Z*17(fSJ0e0LFMythL+6x~;T( z(^@@^GVrQhEVb~^uFBvJ1{Y-Ci)2fX_3JHqYz|v{sX?FqHF${HYFT^Td*r6+B<>Gg z%pYAk8GAXSyOWuhk_Ecz!OHymHZEsKxbVvR>}{KkW4N+xkykRc`mk6u9$!J7#&0xv6EfU<8|T!B*u^0(F$ynQ34tysbbeeSv-{H5!}@xPo)RTQb@3KlEqxGN7Lh!qB(3 z-hMCYPVDu?$uF&9eH$~oHeflJeHFE~$$CV-lKMn?VtgVnx4pXyJg&JJr~2N>$yNdZ z<{R3q0fU1EZ0Xv0&-$y5ab0<+D!!|ZS{a_))V)ig7{kAl9}?@$8uFt=15tnurWIhl z%V9!Rpu5i+pgl#WtQjZWyEau7xSZG(nn_+*)eVy}`lnm*Xu-KQ+^|g7-g$$C+0Mu- zdhz}^#hXcon5XcCGX^b`x1M`nYyl*dnZc(e=>nTOP_SeocNqwak+A@cd0c-#2n3+I zPrN9Yjm;;Q+FrK-CiHX`Rb#ddU}uGk$4Jr=KgzpprvF6h>X6OnR{Yp+y8C}h zCV+eI5dvpN&>gm&2`_*b{>e0EA6KI=ZjFyVR4L-XK&w^AnL|b*_=%RcJdmIdDjC$UcZ;d zuBw$tk_hmE1~y9|byq2?#(Z9)O#nEII}NwbQhb?#DEC~lVp4c@iIM@>WB_`re`WfA zBZMqvvp~+XgAS&0-hQM1FyNkIUK9qk4?GmW@($6sh@6L$8LdvD}-VVlTM zw<&iuJ7puMx3_Pt_eJu%bI%dm)9;wdex%qfo3~s0*Vl|RBX5e6A>rE5v4%vQ1+nki zr_z;;h@YX0cm6uQkJl9fXGE*`69mPzs6L@gnG=<-3EM*qi+cVm!Beh;O_tVQ&~X1O*PmC6#@Uztwd3B)Ul|`+19zS_&;86| zg=zEd8*W!^{Wf}J>U??jKw$k`ELV&pjJw*^)~`yb^)2gp&ObxwB$HN!CsDU+t0RZx zD$Se)D`%oqmJjz8Q_*`^v@S2jrH|EEdIKzO2 z8=|qw=!nnD2cJLfjQ%#MR#Z2mm{e)u)kq%!^_b5$?OnRe8BV9qv-=XU+>c;7tmriksCjcQ z&SqjR70iPh(V_$r&JHNyy<#u*!JLD+nUwzL1SUnL5was565_x1!UHM7tC)U($OE`$ zqFNddfH8H@3^{2AL8SQ0k4*)hi2Kal@fq8lcn>|AK=%auD2sqU0JwUws*z$5mC@LL7+0ccnbw=+mj?3*7d(BnH4wO+k~#vk`R(V>t$qRsp*1 z336DXwJ@Y(wX~DJn~{^Ao_<`HpPs-f&_%^YD4B9@(e^W?*J9J>YYqoN(^wdQ(LM~T zXGAsgN)%BXU<&dBo4DWU()0X#BK=>rp$Dn6LdPjeBA4Z7KosBb|*D=AXg%cJetIwBh}!xay~J7|TE}T~Q&{IqNEpF>pQTnDOJ% zL9`9>cHF4s!g?!SJ0lK_e{6qMf#u&Jo`BTx99{?hm`8`zX8gY8-Q;k~jdBCxz~D4b zYPWE}2($l<$1E@Mtw|C%s4@ui>EUsn-?1z5yRG2JXDoupipfWzM)B>Xq8;pp$@HV= z!GLv_rER_Vp1f=FqQX_X2Y;u+PJG*$wxFoy3ZgJh3Iez3e&O?pwyEpP>JL`s%m; zRrdII3^c%i}&**yJ zuLi^RZw0rt0VG1kd@2Db*33Qx3eSMn?bQ%@Bi%KVZ0B1J_Ck<;73oFFtyu`<-~j}9 z_a_niT`z5A4p^{Wd4pEz=4EB9aevuLKdOKmoYNnNzS##1ZxMMPb;bg@#T`k!L-70( zz|mT|dYa_Y^+23gTzO>NZY8u-FEcDTrcJW9EE*?F0-#sEi=ze0K)T`)N|m(F1=&W{ zx(pstsDLGE83({kZ=Uy1PAM2Q$?L96b^cs=>MPt@N-DE0MW?7mJj8gu%Zgd4fyWO^ z;{e`Tdt!(KFhH9q7)FJ4!|IJ`{deyvd-rQOr6I=wm#_V0l#6 z+$`pL*2tD!%EG^8te~#Qj6qoubxX=vZ!>l@1auc=U1+;0a=1au<0g+C=9bh=28KS8 zTRAHIFb()9L_*;^0PsUPP)Xh0SpY3PO6t8mA-&`P?KU2$&!^rRG`CeyrO}gm6+6v! zH%a_9${95^R-diLDeDo!fh$2e^ss)x!xq_z@FO}@6^}`@`=w`TZdJ8$ zwg1yjlTaIWC^N$Htq*6#+U+qVy79J?bG1#2Sm9op(W8}ayuD+GwKGtEUK6?0%aW)s zbxjQ*pRY4MoQ`A9&vH~6eVjYi0nr{CdOOqWTVky3q}>~y!Y_bI6r;6rJRy6<4>*VHMnOMJVvi6(NVh8nP5bWBl1q#2+`zYqvs z35sotq?IdthLGE9ls*S-g1Y82O&tB)XyOFad#s=#2uzNfhCw;lW&uh-}Pm1 z$Z^X+4AD!M?FP*@g`$Q{S1FQ{W3OVvdlu*}x8_CnA zlZ{Haq8&r*{A$@072*`vH`3>2UQCE~);Q8w;_tCyzB+fY`ldBU{OlF0Lt0}P@ZKUgEMd831@8_s8%q8gyRv5a67?mZHwDv`_I%&eU8PO*Xbj z)YHQki(asH7ghZ!33?YRM>1{^)6GVXE93o?_-m9cca=L5p{ZP~!XQdrJ{*t*W z7h|aWJJs&qj!ACBwpV~;&hzc_~WytES0ufry! z2V`ezEF1`AkWa`bOAmp$^?Fhe=n9Qw+{TQ8@zv1qR^Bw~R2!`|eUIFR5M##}(0dKt zzC!GXV{(`&K>56yq;T50xJMv2wo@j`fA5pWG5Ej%1Yppp{R?8CoM(tu%_Xo=NV}WL z*lBwB7!N}t>0+|cONKsycKq}=;E(Cj2mb`xfulde?Us7y{~3$^-+y{dE$Qt4nx2Qo zy#l5V9=}};Dg)}~K(m7Z4+I@wnr_?zSXBWA*VQvLwSOKM({X7l=r_ltq1BBITY-t) z-mZus5ujQ84HEXuv5t9YSE()-2jCY5c$E4-2=W|HxqJYFWnC|N3Wl5yPocvGOk~iV z!e`1H7xelnN(%{fy2PhF>E;cy{=44huFozwZj|=!OLyA%AJ6L>?f}{Mh5j(&Dc;30 zcqh8a2a0*Tw^+5IQlJVJiNZhmIvY`W__W;*KpCxjK|_ysCjyh}6ZYV>jl3n3iGM}i zN&%C&-xtHwn~ARH{;I1x8S!_s;O|Mye>rv!CF5lJper|S>^G!wFAmOxSrIwWorwt* z!>L^gf7_97r8MkeAt4(NZ*n$ksnWAE3c^Pb@%jX~REDb91lW{Ax4ST_#*dHJWShcD<5@Q!D->);lZc>zWKr zZ$|G^U&SbRc%7}HE#mEf>4ucF{W5^XuR)cZUP5skSGcPfyzA` ziPu=ct(O4S2ksZSNZJbqXhB0{15Dnv)Ku9ul>xt_&*j4yrwWc({}b3R{Uv$Lz^TxDL3N1dZdl--k? z>GK_}#xYTYDO1s@7LztU`L@muvXZ{`f(CT!9jR=zb6o*lK!jdxC5W0Ch2nC*l=v@xBX<_G9BMySZbJ#u?L4<1Sz+bG}A##@PT zN;Z7_OlCb`7FP0JQEuz<;sE!{kwjZyl48+fDBBX@s~b zoptwn+tSS1z$rEbxDl}_Hj^_q3UJK&c;Oya-zW3qwEI@ex*1;*2WQmHjUQEHa#K(g zo9J`Br2)x6mt=bjHTl@~}EWl{gU4?6t?07>gP2>@`JxTS^ zoz_$7*yZps8(7EH=1C=X-mdv52|je62du@SG&0c5owZli490I{QmacsST?y?tf1y^ zVce=NzwhWULjSKQ1>=yI8wJc?fyfbe#q9=^C30FC<<>Ik*FGz44@%;rJ%aonXbX90 zRem~l#|7vP)pF%{T-3Bv(ed11Zm6RNMSY)=64zw}RPn)c!OwH6u{ddCOs^l%`r_e9 z>8$TfIF6V2NKt_bN{HqQ<9z8@y93@DwHTu|sv--U*}{rN7vK&lx3srS0~84WS`e$K z$&be^0ttVG2J-u;-ufN7QMGAEr}73HbynKp^;47lB&>23t0ACh^Yi5A0^_C=f6+H` zsnd0}y+4k2!{W6imY^>$?ZN4MZD>VZ*n+d!X}E{2_4>}VYg@*tovDL#TH#Pl!xKU2 z4?`ay&!76Jssdst8CXxdku%B4!m7#O# zmnEtT0Gc?d{c%WoKN|ImcDZAXz6>sQ+wyEBF5D<6&if(XTxIxj@ zANaGLN8?M@P1)M|OX8aA5aUj6&LMrz=lLalO6Nb63DkDW^_}acFMgfVAyPDn*}ki% zJ4a$p%q8(rI3=YR7K_>V|0)&-Suoqqt5Yr3nWC#YJxnMtAYl2Tb6o zLJQX%tg(5V(6LRv!tqu(C3yg?S2Kw9_T7eSZRv>(jRvrlp!M-GlwcjV=qU2dxaK1A zkajhu>^2um!04VVo%nDCJF+)+b359x4l-}a#I8s>j8cU-EGY06wRFDC}?7T2*8$X#aICr z&8--Ca)X#DZj0G&D!6A^;mhc*<3naNL@$Y$;Yan>+vC z2I-~n*K_ZGp%4H8xL+5ZqXN{f)f1PP#Rs+3m;Ighb{Bi7EIAqM2L06@1aJfX{m@nE zi&hRb4SS!~`ME9wZBqEA>;e5X8kx+_?&C)$8HgsoGOPo-a`o!6FC%tsH8nLuBCqh8 zm4M5KTrwmwZoVg4L_FHi|GQl{O0NRjyqCU}!_p7&I zt?c>u_4a7hk6erXa=-e8yp+qtMC2Dzi@Lqr&COM0MsbfFPF>bOF1a&q#}11;vL;;5 zO?Z4A{w#4FyVctJ)b zYkwzA+_Vt3Hxr=Y)wctU02yly^>Ss=^VW}u)Zk3S)my*QAFoo5Exrvf+hm<+81GiT zbz(TS_Gjr}q)HRwx{ad~-$re02w{GWQn9b#hW|V+6Eg7i6HAh5`Nb0CNya=7e7u_Q zPz)EUV)R~TLB$iNmTa;n)g0PO3tsZ-drwNwT&v2A z_Uqx*`2Qt0x{ua8X?*e9k*slJ@{RnsInDCjA!oMPuRnN-^0iT<+YW10iZgXu2TI@v zfl^Ky3Yhxix7ppYj2?a#q&(M`{f+%$-_Bb*^WixBnPHSqc_*bU3`w9Dn_Gj6`&o8a z*_(wmL`u34xr2YnBnlnJYfk;(RB@j4bgLbuMu7X{@UU$rvE-BW8#Fn)U8jrtUbkOb zA$5_~aQT$Ut2wewzKDEsh(6v24*d5Zgb!fmw|E8s7FH=QUgJLQMf z;F7z|SgMWo`>11|Ycfl$1M}h8QpZW^(Ft7az^lgW{tt%dIwvn%6(;UH^p6&x-lK@t ziM0jVoOd-S*cqoL$!pAEriC>5;07z<16Jtvag{~`=LSUx3fn;9mFNq|W)+ z&J?kK$f{Y1`FLmRT->#rZSii{^tSy`dQmPvXDU*x-^$M&Lt-iqi{Z7zMUt8PS`IAh zC~@%WK!4a}+Z;YJx&H~x7AWWm)Bt^ljJhLG1w@W&kLd*mFs1ycU%A~$bIEMIaW)q< zs4j;v+%95s5TnDOJ5)QxN6;M=4P@*Fis|*exPXF<4=URq&)wz42Yhw@bQt_gKGA@L zY@KlOZM>^EUvd;>E0!CiYvX{%{7`2BF8HSG^N&M2ww$nvQX{NDS2llie@)JFXl%&h zz5PD*aj?{S87OCQl&S(!2RDtz@Zy-sb16THwBw)bEt7&{fe2$-s@TNLTqQT}C)gBW zoR}Qv8E%AZ^cT}Dx4vDsQ8l|q0uSFX?sX=;tDd4Y`-Rb*(C>x|)`c$80oMa%uahb$ zE2<4>_`xu8FVG!5X!Jw)(Q=E!=Z{#sCcOO(JA@b&>tS&H)VUa7PW7X#XXgIjRep#Q zd=qHsB^IgA%3al;cEPknrtje)vl>~E-uZGOMVE&_1|!P8EaIDktk^}h5@OAskwTq~-h+SpSD~i$vnLWXu6;wZcY|16( z^n6H6F)FI}(U+O=nl8Vegy_m+Ek)7n3fCpTq4J})zNPHkOrU;itf3Z54E>RQQWm<>b0$sbQy9b_3CA@+OI88VqZ^7Od+sO?Z{npSmtfQHdT`Swx z>|Wi+Zw@WsA&HI*x2)J=dpcR)VGgp3G&4Lm3;sEYwG7dZ=~;*v=F4S^n*D+MZjcL zq-wcE)&^>%bJ$>mQsIHp>BmJ$Ic#fQ?GeyWHV+ASRK9(7;H~mz;aFS^!`9K10TI%k zvmoR5+bmf?u`*XrK5>QYeT!dY?JEDrNqV5o^1}a5LU#M56nqIp2>3~Z?ifA={q3;0 zl;2(fbRSE~u6>~6ss5YZ{<|db7!2?DRn-j`D$X5Y4W9uT_=ZpGX8dfpqCGMl8NH8tE@%=$%l2TohS za5fYLIPHCEJ^+Ah|E3%*NeTZ>L3DwcOb6nidXtuVJZPIJMgjnj_dqp%2bAyG*NyPf z)scZ;sP}m|q5K!%gSs4445ip@cS0|qI%5Qpqtd!KlJ8HlL_%o(K!zgGXkAdmT!SXC z2_2jGtG@z+d4D?)rrOwCzP`J^Exy)wRyLc9YtUQt&ZGu?d(@tTuR_g#KX1nYN2LzB zUAE4Ta=SBb&Jyew+`g`RHreY2IQNj-aRpXzWB%@UOlc5P4W@Z78X+C()L?=`$C0GL zp8?C#Fq7J#FB$c?A5YJh+r-ft$*oSWgX6~%dw7R{ropauw{GRY_=$@A|3n(-Z6%Sc2q}G=y+XU8yMIOQQ&cYM$_^Hi%gybaFE@9#vdJZje5!KHG z#ZjdfdWKUfqvQ@2^N&y`DHY__RRbEiU=#hStI=+X@(GoK3zP9z%wxAH@@5I>? zGvNXwA&^E45SzwD4@o7mTk;js3%|7QOL&4cSL*~pbv&=fAv0)km=oTw-b(n}{qhy( zXny`%s;obxqRF4LrJ|%hZlo3IdAd4hBb~u)b-)EAD{&-ijf!bw4pQmW$&j8u>c0BL zM*S5>pWJyc{QG^)-BG{2egV37Z~5U>7pfC2-nV3IYj(uT3iLs#(ofs+QoJ$wk`BRrrV9fQd3`Qkm63!&i`wYynGn(Cu z`Cs!#Eh^@WO-gJ9N3`ftAFnn|J|nzDe%?ecgc~J^?k8X2zx5g~6$E!|gLJHkW9viW z5U0qLmzSi+$)9{r-_oLn`6abJ?-(%2r|2}%Nis`>Yrd*eqS1^tldtAE ziQ?zjRAF4>aFpMoF6WnvQ{{Z^%sD$|n428g6AO2{QB1#WoK4WGSO>s*E&y7+cNjbCMs9qRG$k-egLur@vhIKnGInqHo zN_~hj7F0K-|D$#E8M>|b`ZH-kj^cumrt>_FLU6Y-GLTHWgN?NEQ zkW6l1Pr=xv#1d19=Gwv1F6NbrpZi7CwGtE$-`U-EMp1KYG|=Ds87A#u8;tV2ulc5q z1pm&${W1~Weh#V+ku*S&>6H{)HwGOwIIdyAM3#?<+DPWU1L(M0*$*2?&UX0)fp4SX z0AGg2YTcKVr)t|XzOOSX(kfA@eiuV_K!VnDROKYvD<8HNQ=+sf^$-V?&;4 zngl7e`@?!$fP+@YjCsu$KR#M*ZzpsnBdb&5te%btEtF}tn4TE+$ptxdQMzH+szJtR zct5ymR}Rh5L)!DXi%p0!*E=@+V6#31eDkw_(%T!%Gkw+`+hKMJW*Y5BN}mj>UCXsu zxZmJ!jd7tTYH42=%h}l4W=i0X5n8k2s3W7CvtWH{eP|L0orK;_>kwLnW ztE`NEA(6j2=dAP+im5FD&o;-b3)P<5W3Ff1ykEjFdtRTNm_Sd#TD(EliAL|=*p8yT zjPo{1Gj=2mon`|ICuSyU4}>7G+@1qNvAw$0VE~*T=AMG3G&>WI0kdMo48mn8Vj;H# z6%+S_x{O2RqpVFuv_LqGT7OrG+M@KGN%Iw`CFMb5ThC3)8o~C9HpB&KzFa#}fU z4rsG?hI#X2H_sp0pO(ur-K?y&DwIZ&RpqhT>u4dF7eRO8U^3~{8XoZhylGMTP2WQx zrOb*C$mA6f43EIUMVA`0sDD$9*#10}0NU5mgr!>O-;}?UT%QCK%c*eff)X zZg57BEsY-(m+Ie+NYbyG!4>p3vgx-MYGlW06zV1l1(CHI+IKs-TeSQttcRgJwe~$w z8VIn~^F_b0q4vwAFQTombT7fNkG2(@0#vq2E$pA)FoIV}0jg7)t9jrmiU_6$tt2nq zI>nZDqLuT2(EX*pT+URI^d|u%K#IN(MlMLo3nys3)9eTa<5o8dr5Rj+g<3!gRilTc zZH_QoBhMeyAcXbrg?t`M_0lh~p|5Y={f=3|@~~z+5WL`_DW;J9PfvC%2nnet>3i;mJzxnRisq9iyTfH$|NJk1R zHhB!cBk)@)+{SK;_<^jTK|#gOSyo9BZ`DCT!I$jW%`gL_`bB?(RA0N{aOkZO`5uTK zancd|ldp5wa$ag8w~E=6`Y??@JTL3#MU9Glq@M!0BAkLpfFuD&(0Ex8g}3*4n*XIQ z{)UYmRKRPHT-{J48Vv-~`a8!X9WF1o!@SN&)<@BK?Aw$u(HsuLp@-%S)782%Vy#5A za{XbuqEB7A)6d^9odQpO7|p&$;TD(sHb9R$6J3V)(WOXfF|>!h8>qXbx$PwMpR$Nr zkkEv2ml#uYkHa61ju&ah8i{{dJZDndlVCVu^rXSIH$HL1{c&WH9!#`&Q(ysmrSCKQ z4Odo{+Al8Wmh~^(dVL8L@!eRwXlcFkjQr|V^O-M@t-t3tjJ2?>GS3_-QP}ux>=?9=$CTXH9@!ka=@YG;LGzm^+swMfD*qDQ@^ zB1C$Lof;y&wlZlc5H`qqYT`9|usU{P;S;b%A~dmvGJq6?FV1Yj;Uhm1|yZSP->%bZnyOglJn)HES6_d z;X!673bwtCm}Ec~_I( zXX_MiP(7ERa;ZZ5k??SiTO8>9=lIj|cp-Fb*8!6%@=)+w{EuyCP)bBp+!oPEOc+Mb zCXrQQ2~Bubw7DUB&gPo70SStT=|{`FD`f3QoUGo<;v+gli=MZzhYVZHZsIJku`H&5Gy;y(=^eD_TKq$4q>xv-&@7Uc^d;P zwbS=K{#`5@JOf3Qn9mMyMgAU}6Z%1S?#mUsZQpYNlWMH{u$3qlJ3lm7>Aur3a=i1B=&$^Z6wKBfX~FF!rEN%5qYd9rPv4;jWr_O$!2RzK zgtVWjiy^lh-l?&)r-qKw>Hz`OfA``T6s-Q=e{ZOnL+77KN%~JxJyN-%+NJmaCj(#} z)%$n#KlF3YCY{NBssF)nEu=$}Ji8Lo@h;7GKSa{xPW~N|k&~0E-*sDO(B2X}0IlZz z7V6Y#=J|+q5^=TH4o?bA)3}qO*dV$6ScMqFOVt-3NE6j%ltG+DVT6(^duX|RbVbrapF7A{KMOwU?6?KC;Q*u>4z(-lbvF z>^Ge)PHd`#WK2{f?4OJFMu=TM34Rx4!D~MFjhS=VVOs0c9}0+Y5b`gijWZ( z3Z2d+9f0>fo7uU354L_jI#vIbRmA8TmLOZz62vgsP(seS#qgwdg|tAE=D&=8aL{@9 z^*48WQPA&6y$RzfG^b2!TZSZIuQ~6W7Oo)GJTdK))Od5hucm<*8EoP3@5pc2@@{iZ zK_w0vFIut0#D{cH?SzLQ9R^@A1k%A%DtPBS;b=SUzNoswPdtBX9Vz*fT-!!lvnrkq zARr{{HBHzjXe~|8_h5 zk8{R<`HJPhQHRYYyh%7D>CdG30rWK=*A)GI4xK?5LVrS=QpN_GLL{^CU}K1+Erz`L zowc>K5oCY!Vb4dm>JWe-M1|eX1bs;T@P(vT{vH+UL|TcQTgQ^}CMSa-9W5>AE3z^& zo|fu$tYNFz=l)cjqFna$Ow#v>8NBVR#BDL12_CQ-s9;@qthNhE@^wlw+rI9t<)`ZE z?F~PuV8xuXxPs;BLvNqs>{U}c7bA;5EJF<6`6C+YB<*$Y89!lJ`n03{XA|pnYgAI6 z!{n#2{_>-3KHRa?+v>sccTdKMZb9*6J6&q6F40ZH-=EW&&d!^^_vieJE8Bk~-HjD* z@rAuXbH+>4`pZsQ`KPBi88#GtAR)@%+dS=?R{|AK4@gI9AjS9qrI5EKew82B&*>`v zZZc*n6w)z65nsBLbz?uz)Z>@wDU63lqul)4uFzj=icOP^*B*SMH3g{LYJQAIuI2wk zOsG~h;(i`S9_#f=pFJ~)-EU2?{5Gfd1@Y}=_wj+}Ba0)fXJ!yC(XPeAtid0IM|dw!rfIsQJtn`X~%rEg0woFI`_&ZOwra za3S%cuzP@i2Q=wcw?`l(2`zBPxF#)_D(6j;+;VcA5O{+iO5g?|0S`^Uc?CElhUK_f z-BT18ZMSL7=q5-92OulE(o~o3GSqnA>7{@FTh|xKkN?x6)HZ&TN)gJDzolTz9znlRQ=R4pD@S zp-2al@?c1oR#M(@#C8GmCuQoh-P?J+k<;Iyk6m{8-XadZw>g<*t(5WiK(NTs-MO*b zI;2V;R;1~iB~)tpbDvCMM0;;SuAz<8%nKryo1WFdW^$!RTdJj^k=1dm&wSjym zrp?xjC$x~Fe4+5PbXnr__q!l=`1ZX}p_l4`;I6x+^2oCEh`W{f#AG2pMKQWgrw9IH zXMg6=DrVRaF2cKKrZdM`cyK46w_Cc(PCQ^2aM^UONTp6O>s>3CA)8Z8lJZ0V7%wT$ z6g~GWDR0SJJ}D0{BZYXvY41^kvoiBq*oIOU2|qi+H&&tBrbc7eM@fBYIVum?Vw`N$ zU=L);ZsYC3-@#LBGfeQ`j-q9Wi%g2jzkav>a@%uoDT1EpG?Utr3-?1VI&W!L0ICdN z*#RA8-=vBV`T@vAV6#iabB=i)SXyewvK=dIy?H?OCLtAz_?4>rdt-J3gJs zH9iNqm_r3G)0oT;M&O%_UNw_!^(;3%Ka(3Iw!rW$oV{1e4;a@+axKf9A0i(i{XOs}f~mPB6eW*bdTh<{L8lwbH?*c2Jp)F8+S>$l!$j7bTB>N0TO~xCk7`yI8=3UBYg%M|V`CN}QTmZiG;T}% z6uZ!Gig~9$sYG&kh=KXotrF-I-wh}*5O8k@X&?4K>njO5@MUA(+GXF;z(FN=tGkzN zf9u*TB+xl=ugU7Oa5aM5d#xGY( z-uin1>AA$!-DeL3!M0}ZgX~N=lavSe{sIy6V$!Sp;0oU@5A{Eka)OkA^bI%`(BuO& zNRp$i;MWAYPEO#NhQgRoYpAO$*Mh>m)!$Fw4<7n+n}qA1=Cc>lM~aL2#XN-phEa=h zG=;DQ8G+>iL{75JOe8j0U*3n8XXsDf)j(c(Ng3W)Rk0 z4o-h*(dDC&+;jokSv7Ghv?f$k@KO)gj~MRqGCu{mNU3^oO#Ey3kizwy#`Q?sN5CnY z0^e>w_1@RXI=mn-Vq!9HSADm}<Xzd#BS$_+Bld68{{R`lb|1c$XIP?w~s1@KjOx zr<;mCs4QK7azT@_zfgl)GnjhhNY%6_aYs|%pc+POp%2j`Eyk3PYtc#iEo!wGI6Nk4 zz3Y2>9ESkXDyIwtI=7qVoS??akwo zPS-x*<}@>&rqX7baw?Z;!fbJwsoWQ4#%aM)E605yr_|IGHFptaro|~!3R5ZAIq&m6pO-%ZG|7Iy>%Q*qa^2TOUN@81 zX!XYpe*Lx0jH-Z0XYn$5wYg0m9xU-RT%HHN0Vz5^XooEYLpy7*rJxO>_fVghxvE#Gt^i4Bw@0dLPNL0kAiRr2+GIFRT^J5r@DIp7??KQEKsFR#zH{1r(at#~)!!c~H=8 zyHp8!R=>yq$?0u7alQ?xNL!jRL-Rv*esrH$k*3`#Kk$_S@p}#B&DFj4P*4M16Cga9 z&JylYE~<2x8eJbRuY>)khRfZ61gMd~aS$hYuSZT_mH89O2A}Vd>!$2xT6Ka@+l~6T z$Pmean?aP}W$6a>Nx0sq@4PZ2hO``*8GoNEKEtZGy4(2M&Yxq?@-6$#pei*MQpqUD z>2rEXbxoym4_Qtk&ItURqJ&%h9fUaR;9eWXm1r7kxfoERFIT~S`0mHq9VMCyzq3$p zqp3QPNPN}JtfZumbw2;_fT=_(`Mo%=pH8+pik&YUpD~QcG*>FWzm(c`I}egE%je`n z%r8wJsOKk$4@9CvBYfsQk9ax%@NsH$M9*-Y{U;8+XB|Vo0$2pVdg zGg4GmL13$)X6ps6n?m)@#mdK9<63$y`Yg7W`|&SuFN#fT2#Fm#1JOg&=cOQbF@vjb z%zw}WH_=Hc)Z_He?8|;f2Uo4KeKtNUgLjHwE*LmE9*&06Vki|GL1qHuM_|~5nKGQK zf2!uSHZhrutH>pwNarB-fQ^kQ%s)6dr&eF|uxDh^%C9}`<#0{+Woe_M6C!*NBCCfEEs{;tztQN`}4vl?Sw`f%} zHNF!m`?@GSU6tNSZpHEPbW$i$Wx0)XzIUqEV!ZXkMWwF4!5=p#_SIUNN9Xxjsd!eJ zZl{G<5~+9*_I)cjIaUmrfq4M=cScYBS`PvPelM{!Pj>Zk*92;KJ#Cyty|m)=8W3~Q zyZCr@>*W6_JRZHMir5MpGAE6F^{1NP@9TU0PlQc%nvcVl{vlPLO!?P%dVO6hA$xh5 z!20TsVH+D8A+WT^YlFXpwaTXpOkm?NK3Yi|0t1 zRcydIa$uNC^Q{JvCx?+)?7lfac;k)$_3w3Q?P_RD9;660=1$1$Ba7^$papeN2y%S57;E zjbSaFZgQ?q9SA(F|4MCKap~L5fIwS4$JH!lr0-+?q`7r({h!iAW*bb%8)M1hnKY_t zL`7R7-y1f>;;55gL&JD=0&J*_EwOlKWI}4%D?U%Kmv)~?(fiVoq9^;5y*fqOfOiYk zqWVWS7VPrB`qyat5ARN?Cf{$lu}i%m784{RK1Z?a#hm0L*xA;6h?mzUa~dc!fem%* z!_Bnt{N9FtVoUIp}ITECULP5*J`gf%yT1oVT&a-%JGw z^rOxA>oLum4REF~E>kc;7H+7pj%Gq&{(*rvA?rWB@OHUw-J)PUA_YJSJ6fvykErdG z?W!ippe7Bru?;a)O}3G@U#BiMi__PD!oj0inH}CvD%hE|FB2r zzSUEF@!v zYm5V6&$*rQ5;bVdUfulmUKRvqTAXqZ5&;0bgr}WSICTlPryOQ8kG{R~{O^=7${O$R@X=Cr3aKxUkTsTj!2gdRF20Xcj? z8{GFle7zw;Mro}rLHT=mB?P`w155J=wh<_C(R!WA;k$T}Qk5prY)I~u0CxO2?R&-c zL5WhetM*a~NvqhbfO#AD9Z|CtC|B$f=#=h4Qy({}$rDfMF3t}QdYqb^n&E9K@_vj#yuR=4V`SXw?QCdyYm7Atr_3;b zA+KuUJluFW7>;UJ1hXS11PE{J(o-A-C{0AD$|VYMmw6|Y&*@$FSaamM347czXOj^D zSYu}pM)Y9obS0DAIJp?bWaUu__xOUR(}cA|!{i1Nf{vAu`i#KBqnaFQWa?tOg;wEk z@N?c_BmK^1;)ML`>#1YL53clNPYpTiEs1hKpTH+4zIYVTX`3MW`n9tCctg!<1$9|E zzSun8*t6JFPJDSWvF0JP6mM`!(YuuqjgYC6`8nu3h~Q3x=vGzrY0GQ#;c*R$RvN;i zUM?z>aHj^NATSmWP2)kY;%ZX!dP3yB#?*)le+HMgela1FxA&+z(9}_=N8?tg>}XMN zr9W(FfAJ#7ctBW8v=)`a3wgcvh1$+2_72^ojS@8nniXA$T2hnONedm=Ci7A$%WiD8yU>2M( z7g7}4xuz^1WiBHL55XzDDx$V$udt*@2a= zKD%?qOiCZSI@&k5e(KpZbCpW|x-}ps7vi+?$}kD_W@^mNQN&46UdOrE)%wyXh&5Cb zQBr^}Sv}uvJyWR?L2lU_W{%%${2a z5Kl~0yV}LkzmcQ7&tEhnE(#up7o8SQQIw_87A_9NCH~|3?kk-`GYJ`il}_`f5e72M z&1v3vty#v{!1o4Ai8RC%7x9^h20zm|u?gnCaW&k#jT=V}-f{cuVSv^djdo)m)M>RhWs4;6Z>eU-KwZrn3Uz30|=wDdKlA^NGN#w6` zqV76gT~-8{6TD~tyzyUeF0Q!9n*{?3s*Lp68~;vI8n3+80scsb@kQB3o#z)9#+<8l z+QAel@YuEuap6yB@mJjZPp|57;@{(6TgBXH11P94&@B;YjSj#TAQ4}GaRhty-gU@0 zQatdv(Z7C*Ekv6#CCUu#E{-kL&g{dMW?8fz!QSLaH_k2?odJZXQN*4E><0qF1;)b~ zgBgi8V*s&9{JB7;{RaWC|hk%IqDx&emCEa$toyzRjkB^yjT7bl!%rE;0HBRnh& z;0l`I8_#A;hEjTvdWoNjyvPk!GJ&}@=pb|iOBAgmh+o6sZXB|uZ|@|K#7ohp7&%@1*PGyEzGNX1ZdXm4bqE7mO_}5BD;n0w}NZ2tXAQS7-nhAL(}sj>$aQ zMwk)=!|q40=DA*X6e)RUl1KMuycMboRHu`|k39DvAb(yzH6i#4w#NE|xB+XnpOE>4 z-nL^e;Vy~d;C#Le62Ur8lkBgE{zjUHWDjda%1f3)y!5z8HNTV>H0BFtnly8Z%LfZ^(jjw{A{cfu{H!X8 z_ADA(IzDQSE%mHY%H#4MWe1>uA$eVx-s)Ks9~G2ro$)Iq;=8p6zW*~OKsQ}hS31no zQ`&txrPjDu=$^(l9@L|mbjC%ID?XPRThkWbWlY)M5xoBo;knG|_*1ezd{@8%3cnVb zZaTOf<~sZ%_Q(7(NF~r~QiousC;l7JI)eQHWa=EP@3vHKLxyRcGYG8Y#V-3=t;U@n z3gq{8t|7M6JfP`_f2zm6QsYL#@JxbsqhG7CTB0pDCp_2ke4oI$g9%7mizmN)`6ROr zTFUBRr*#v$yAQ&KKxgj+)q+|I#$=Z?H`LXA2}^sk5x)!8`VRa62j;hVf+3YDUk_v_ z-^={W<}K{KVDNXQPj@Erspui%>>}#$3*rjHJGfG_CL5}`019iu@RGj~o^E*M1ixSs zkTkclP2sJ+&_*O3M`t2eT@%5*k|>4+$)-1ef&|!;GB=W zbr0UsI}>71S)vY(n&b`Z#r>Uyh!(_!(5EvR(qp35AMY@8X}^8om*P63%nBmy=*~j) zqmA7JZYs3DHo0Hk*)u8-Tcg>I$-hR^9BWOi=h+C($ceNE{!{wS)11gG zjcs?#F2XVNcYDZd$1Z2!}2ny~w3cB5qieFNHf8-s+t@{q@@&9E*H&)Ap zf2@E?kNQ?)Y5mhj{MEl}SHN`fR%d!=5; z1p7WX*9QSrZilE0pkZodf+YY&e#9<;)8qcwmj|*T5eK#czX)9)J(RRcR`eXM-M*(% zjU>uC&AU=G!FXHp2+AO5?c#Qjp|89-2tx0Yil2v()`Lojy<(n}7uHB0kdA3VyxG{XIn+VjCcK&r|4CZqZKE!RAj)_3^J zp*?a*n&8>z4}4;(ecYauHn0>voN4b{00S$`=1;h5Ms{)AHa!B9j$wH|>UYtc{n zCdFZkyf}h(;zsw>zH|5ZH9l>eWi$74pP=uBo{9^37(QjgRh_vt8n@Ob!xft#+K;z{ zwT=|ro#SnB#mRgh>~CP_nCCbNf`ku8Jm8Un_A3RutKqZgW0=rHpljn)Q`TeAop_*+?Wo zoc8rW6Y%v-x?V}j@R6qaVjP9M0XjKg!}v#|;m!-&p3(Zw-ikXh{J96m5HO$DhEm?t%3Dl2j~RC?u+k zJbm?q+Z$<72tDCP7bOd*?;L`(@%O0bQH9+ff%w1p^Iu9TwOk01+7Gs9?GA1N5~S94 zzVrtvDqgr~3hu9yEbx;*h1&NYZ}6G6{1S|S2GZ4vs9&qfQey+PRB8X9tqXPul-Qia z{-`aK=&uJ1-Kh}AlX}k8n}J~&GBWUiP`-@BMrgaS3Yalp+FP_68!D1*EtfPKpq=XH z9GA3^EpIy0x*sN`ZAV+C_8x`6C`(5v5UmX1Z&h@ia^2k)IvGBsy38kCieJMc_Vcc9 zW3<)idbZJS&qP2U%_2&FiMFgB3to9yaRh6j>Zhz<-@U&uceq$Ll`SC~j zgNHR^Ea>S+hc@@zXgjd0N*eRl&*;@wJieT5nc=9YpaCZ+qQ3zj&I%`!$3ny*11X9; zoA$9cZ|0dz(CJDV7NWvbLHEM^4VZ~x(JM=t1tN`t<^4?_R634t=I3UqcP=yi&vQqK7(C`OMq(?iY<;&ah?njPn^3-Bl67+yi5SsbXeQnjHQ z*Y{@xY*q)a+?OU)7st3&qZ~ZY0Da5gsPD0jp8cqHxTVl9Lot(QLyEQ8o@(VSbc+;} zRG%Y1eUo$x;&v00_wg-7Xtbv9?BvwMjF~cV4`oo#N!bmgw2W(p&b~{^;)heu5$)Sf zt`qE2lfSiQ7aIb2u*O8#%{UQgcJRIpJZ1k-0eX51aP*K%w2ZH~yu>io4+x)u=T(pO z-eT@0RnRBj7E{g^k`aQhPV2?ovNqekC^&8nU&M|x??etRiZ z&XG!|0M7%b4Eku4nMcf!P<*;qQunHVFeGxO8vwVX1Z%s1!A)BF@+AnFjRevG$U> zwJ}?c4gdX533QI(Pn*iR_e!`Z;4S4Avpmwkd?4`nzdD&em-uK2xz-2&3G=CO|O!xOlsMK zrKDSF4t0iZM(UfHma65iB#DIi1#eU|LAduv5@nF^Oz&w|>Xje@p9{~^M`Z-KL&f2s zbQf6yqwl$KfE}EJcpgKj&9I?t^VK7vrcYx_i)af9KapsDAXTnMW`7*K=#6*4$51&d zICP9;p4GNJa5+9TwU^0Dpz*D;nflhvFC*h~2_17Y9(fQI6tAhb!awUN8}HhX)IQz6 zTkq@5oS3I(b#utU7HlcYIj+pAYv&X50E=~k=1RyD+-=)aRwsP7QESTjn2332hAC```L@zt9wSn2pUKZZ_PwpX^V z1u{t+2dMZ&p=Jm3OWoUN&N>MOYxL?QYTyo;J9}K$+xE$L^*NL05z%^a_i`#$b1C0$ zZW2Zmag2`ZCissVocZ8bO);%fNTKKN2#C>>;av08Z72ZdL$FW%=2oo=t3c426drcC&$DJHRXw;Nx1_ zKe@1AlyIk9Cs7qUUbk*W|KO*F456%qf5}$zvcehE;eiRg-q^-OKK-qF=Nh>m-*%3p zqagr)!ddbRD0^Zeh{V3x6GbT*9iq6B#(4CrQOkTt##95&726a*+(lCvkK7~30Np!| z4vsELd%LYO=R^WJVqPfIt5JPkROzidr^fz5p>l})()y%)D9;f(NMlV3>SxotmiQ(? z9)n^ZWdxCPi_~xf=W;_4cYt!CzAFoTlr|ejd(H~ru;6bbcT&jfqg8_~07ZKs7r|jH zp5x*--u-Ha>&G#ko(=TCy_%pSVVk^l1rB+fe>ihc}6NY!Ud61m@-q!*M;|&C}XPxVAbqm^gI>pONcuBLJK9zdu zpruU!XIp2?Pcrm)E%|_xL3~k!a*X9y434l+$BL|rUs`L5wm2U@L4CW&h@&6Ca-pst zuaEFZ$*`-j9S`eV?7b)$Ky{5Wso85E;nGWHo{!5i1>%|&owXffTRM){b?Xf_zrgx6 zt{vN_Ox(**>-p{bDDHj8ynE)G{vWsh3@!UNfz@*afF3EU9^Ct{X>t3f?DNk6V|wS$ z12Yu}02u9<5Ku|MKXLZar(Z28rHXe2>5+F1X`#bC)xi@-?3ZP~yR=TIWVmcFyJv{smi@!j^lydb=CEo?|Ni24N*;k+oD z!nqPwMIDy>I1$doPd3|;xbA7w;%L=*u+N8iEuWz4ewIqs{g`fKPd7T`XKF;-delb3 z*NbXHy|@jq30t79QEg&(NY_Oxn|MJRjBmY49MK~oWXYPICe4>(j^4Gt=XKF`Q{2$g zYm)6JJZ_X8D3%PL&Ml@jicyP2!aRtR_zLD>*IG({FgjRIX74(e<*5fTmcy0)@Z|rZof8AEm=*xfe{ou1X1hzQx1r8qIWa0F4Z8 z=*hl>-{C&XcH=iorPJT8{@Jo3KxU)A^KD0a11LE0kI5GD(CW9z=Nyaiq&KhHeXxzm zXFob07_Bp3vgV(Ks8fcE-WLB7rvK^9U!`t8mw0FkJG3-PE1TXazmgM$AUxP(|6d5x z>yFRZr{}b8=;Kox1i~aBmV*BJn9jfD91rz##VS-CkQszViZFRsDGnGIz^K2wjM@68 z#-Q(PZ}%CWNr1qzvOv+OTY)tU8>|*-(PQ)LU%hB+`9Rh3M^H#kR#pc?3B2VK0WC=x z@+m#FDQsv5JoWC#VF5a=#6}SQN-5oHp5E0URv2yzcXwpX$~Mu`E<*?jLG{EA8ap`?06m^B zZ#D0b`rxla1V}|zbXVh)iy;&p#}*^>=k%hHf=z7Qjm+oY-LAhnJ}ps-OdUa8=`WV3 zQPvek09Rryl$?0xn~^;C#5n!(03v3Y;Nwd?Q)yGR=YgzpH-U4bA|T=Zh4p3<-B+K3 z1tgr`b1q%85#QJ>cFa0j(nI1+d@JN#aENZ{W@H*gxEA<8l}_k$6Cum)+5+^(1P8Ab z<=sXYH93i~{PjZf4F%Hi*-)E(GsoDCvH7qp<=cYZ*5ZtS%YjD8gYqQKL>iITP$%yk z%$X8T_YA%|;g4>KreiGis|yKrDSqLNP6i)~gGZO4q_e6o$Ctk4>-FkY;F?+{`2=9+4sFK zHGlm!x4aT)rND6qX9d)CPCysB0j#3U(f@5rzbN}ct0xUS1H#V7;1tcwTmvm163yud z0?G8o1R1t!8{_N&>^l)wu~Y#ye`mxe$Ye$R_1E5f`wE31a`44?JPuNnNcY2*a+b3} z34Zi=tChWTAoNL1tR@81*KtLTm_a>x=i^?_D$R+=DJ1h7aDH5)&;j{MwNMp&T8ADK zU}46J@8OddpaTyb8x;gIGDrCm4vv}*hl3LrX-7h4G{bP@Gk4*u)EyKIB#T-Sc`CiW zmYBKVjrMh(s06>7qGxRNTvMlfz$gllZmx0&Pj4c#7C%nQ0tYp-G*3Yy=ap#GjjvRP==lGgUf7`71?h53GzaRYc_@~Rq6`vmc?%VsN(j6!> z=T(mvj_*Yt*l-DQIrI9~uMTXkZuDNmh$Nr<{`+SG8O2m~Iyp@9#p$`+mUwAK@z{Bq z9Y+Y|Ui*e>aDMO`>ghpZPcTm}k2i5)GQ{@f8XOW^`Cz3~0k&mmkHbuNS zww`s`wVy;$eTy8rAfn#?8&giq;TWB@I2S~vj?_8R%{){79lPEQbSVRmv9!w}Uvw)w zj4us@HMB&&N-T2y%knMu9*&R?zs=IO+zCrcPCh&XS5Lv6`#5xmCUDRg+L;|?U6B0- zswjcl&vJ5mA+Y*Di`+mzeR4xXF2t|K+hbZW9o$?m?&<6!DY$ zEz!*>uTw))Y~grW{^70L-v=(^+SJ+ium8>SUjf~!=nLlXxnUhOa+>iJg1`Nj0xDU?E`YQMJCxW! zKb`TO3!8Pju0!nX?YEXe{oo=KI9SZH1obS$hwew>dQ|vmW-eRnMq}X7l%(RC_RWY1 z*p%qWk^?cr!0f1MK2I@NJ%;yqb#sB!Apq;(JVAA@KI0W3#Qg1>6XS$=%5J`Je~oyc zw6+aj=#E{PK0cfoKUQc@@?wXKQyR0L$s$`4mE~cMUY+8@Q}#xIdE7IEQuHZF01OLX zrfastL=pIv^jrugp=`X!Fj8sEd~QH1sF2uH@F;5NLV$GtCO`Qmo|%j`PS&4_re$o|o9{Yj#w*{D7t)^sPunO_1}vj_t5`bdTpl9(hUO0^H)9M+Qyr=7IyIDhCtY9wARI_W$S^YuR7f-8K<8Ea4}TRtqhF%d}w77++vKLTUVU$AKT))$v2)_^Gel7+HF%lkpTl{Jr^JG8j@ zZ#HV%xY*e1lYNS5fqGmNjoFyNF@@>s>E%M&+S*Funa$teX}J)s?48^v~&j$As?stQ{3@J?Q$hy`tY2y4-Vd$M{1P_{4;pQ7f2B)%e4pKOaW zspii&JPTN)$yiZZX(9s$+3r!z+$Fal3iRCUmSPXAV{_{*NJ~aAlPb_R z20>MYPY;M-m@CT-c;}+RYU~6>yC)uZrv*!pt*zXtcxPphT>0yT-g|TTa?b}EH%hH6 zWM6G9rg=3i{^4HJ_G;PoTz}f(%$gk|3JVtPWm;Rir^qeSA5l=Fj4P_)r@AR3$mFTZ zb0fp)Z!6Rx*vU)i1qo;yM-S(#C^eH+Xjxnxi^%~we?3|wG=X)jtS7**h1-&%oJi91 zciEit9)iE^JZx1hIMr}JBwI+7ijTKoBn?{Pore{IrCLa@5~XwsW#7(2S#NR2&68?rYqGE%=pRLbVB}$^3FBIBCw+Nj=ZcDv zKc7CmwjcS>lg+h!8FzLRCl9mQ%}Wkz*>6N+o_F8oJ1g$HFkBB$UaE%l8t#_FFQzDV zPb~$YjlmAwt3bN75Y}aa#qf_4R*b;^?P4%6G6H}jDe2G*Ut$l0;0){;qQ40&zkh0% zCamGamG|ohv^gE2+jXo=S)yMeyS|3NT5CNcVlPChWb-dST$6IApVNEs2k?#3Av&Oqmn@HA z1D7i)I7lk14fPF7q_6G)x*O`W*EDYA48{6My%Ch(%0_9<0X)*Urt)G%q=TdXH4!bU zA=s=>J|~>d7k8}O9YQfoR6%GB733S+pg~eqrU27+P`>+yB;L#6xWiz**OO&rxmCxO zp57pI^ej1lzk!UG2FCoZ334LH+Y4o*QrRa3X&p){wnc(h=h`e$@~2s-{T3-FJH3x_ z;G85z&uwc9BHv(N$9lXPt8WTptD`iCXj2{*q9bN@Tuw8ts5msGQA3yErm%VWjB$9tfp1(WT4EGtHi7IDHG?hN&i z(~26{>8lGnQd*aA_5_RYGAOjpre>nw6>X;+8ZyUmeBbbgFq;9OV;13R0^DXDz3>jx zRieU#>EERkw;3U3uH_%}Z_F{x>mNb$Z(cA4L7RQ>cvKU0D*01$+MfdS-?JRH^l z!_LjkL11AGqh8{_Foa`}7CN`p9>pSoZzu8^4rmT0fOHI9GX~TeDHcLNPVLQwP(*tv z*vY=ux8>bFRU>C9YxON4uxW8Bc2Yt>XJ99zDAK50h*GJ9)mB%;@b9;@5S3%o+2zDs z`E)#X@~HYzm1~>)83zyXqpxtJHXTryNRKV|INDZlGjUlD-tp>_Jt0AOd`Zbxqx=Oa zD;JWLX|#1pc@?@?2E$TTT77V-bA8utL2lFQ@Yi^42@>H)VWHz_+`ulUTrczT@`@h5 z$MtjGH7Xh7?B9fZd0u=Q6RL!nqxV&@`csi(jiq=eGl}eJJ>3W16}p?3LmrPWE+~27 z_3RKaK{e9v9OKbdUO1EHHP5I_wc}D$jE)^J5yF{Agkg{N?+aE{B&w9zz4dgLY~y^s z^N|(}E>c`1yhcFtcEcZ+eKwc>8IGT3oD|hod~@x}qi(R7>qqUc3NLV2_YGrYw%PEe z@+HF6Wz_HHGIY70&5J#fy~O2vu8|y-|^a@6F{0jlXkdWvnN`mS+h0!#x*1Lyekh<{`a$D3;BLia%3B z>KRB1Zs?`UUv?!-*>;7PpJH%4;`mHKzb#bYif%q|Q{7UzkyS3mmDE0?>NMOz`@c~Z zhzf@ps-0GW0OQ*lRrJ}$wG3tsb8quQKr8q^9zPcx`rqchcbN1~AdubJkF&%y$ugi! zM8oL)AO8HbRjXb{Sgq^Yg#ssn0O^sn^GsUmJ09{|PjvL2xhH!4G0DFM3jx&a%0q`Yv07Q04y zh_WMwwkc_KV}%1dZeThy$})8M1NX;K zOCS0&z+4FKQtKd8btb_nx+##z55CFoef-^;3ai5ckK2vQ)cZvYd5eUTgyJU!x!6(0 zJI-nN97T;u^`_+3`MR^p5f-r;m)PpnB@Kz{oNOhv)ufOS!)Y0rnVB$bLt7gRi&lBC zgNLHj4zr26f>x46f_K%!fRSstwchE`mbpb03F^ZLjZR6@*!qBhhB9LxWYo`xiCuGF zAZts=ca1r>=oU#z`}Ui~RzbLBKAI-95C{bb!*PbOrW+2!uDqZq42ctl*ZPC#pN5mz z!lPCl^HlCg<2F)ZnsfhFO{^0kA1!9)<$hCX-zT3xsRJ)x@N1c@=I*_|r zX>#t8zk9UzZ7|9GGocNUycDh8y?D;rq!8rSq%iS|)PbGEAdeniD$*#(4l~f&6J$rzU3J|%?E*jfbn|8DXbWS~PE@T>GTRDwt@}u&g*PiVks7BDt(h&p)G?bfP zs`fhKK#4aL*HBKsih2$wG^%)LQU#Y^L|>tnob;LZ z@L@3;!yJ9{6tm3bSEQdJzstpbjlRl*whbLWq<^4MI_?&EK0;Lin*D4g?yb$Glf#L> z9P2l9>We(UcFT;1w}m)fGmjE0-gu-RTmzS2YSV5V9FX~N6giMyP6mT!0#mEdE2p;E zQXLHPQL*JYS$J_h&J?D8%R|LrCmY~iZz`HvQ=`22_Ri+vwq;X^!jY9)E^43@rGe%8drYo1RW)B zK;%+j2|&Ok<>j7tWE~*01Qtch)?-@&cfxk&_U;NKFiLD~l9Z#xhmH243WE!1gVp&Y z3%)=JD@tx+Qp1KB0xRd6(pSkQ8N}f19Gl85HeG3-d-TP%GTE72{_cs$7jzX)ARaHO zXQcUI;C*tQY|L|;+I{@tIa=%H<_DTvy2d4IR%T@(L7JG55j85re@}`htzuxdUtSuF z9z}bv9a|Slfz>elgKoc6^tGA>@e-ATZigSjj>~fJ00MRik?LC8q_?c~nVI^rrkksJ<_RV*Kpxqo~B(VUwIL=MGi{ zc|(RhKg}r-Vn7BGb3*i@}(OP-5UU znV322K+~*{{a1y@!b_Ay|LUl=-8S(jS)7SCzmIX~#mr{LG?lq8DcM!+W|*m9A~Kn5 zJ!L+6thk&w4o|h`oXbWhjt$J;|G6YT-Jg1+6;5gmjF&lBnVIORn$g6?_zHX=`K}%} zWQetzRPTm2^^P8_MJ$V26-#ynBRqoM#{pSFhk}d0i^lD{|cW~OeU|<&q>1^DM=GvfK zrru*EAkG~GyH{;>vSadmAh1c5Cpy4>BoFxeSaXvyahnc+;SUeJy7s;nU>E1izAfDW z9fG)f-C6{GScz{mwrNU&lX#i*4*ys4=}~(9H)DwwN}+q7G@^$uRIanicy;HI>vOu$ zqhh#$+XJ#uJ$7f+OnK24x zj5q%-n-mkM7a)uF6e?%vU^=`Pz07%995L>AO>NYykEaA^jGKCj+@c62F?VX4gl^_D_bmmX z!i&+VO3VAU)n5R)4=&KT0I8`6Y#kP`w;#b^7+q z0h9rzqIh-lC4rFur#`M6d6D3!oH>D=eBGR$mvg}I&~QR)4R6LRJ%@7#$2{*%sig7x zmEx?iD4-AcmIRm-YSCz^C|Tllwz(>(c@%Y}a+2 zP4DM4A*i%MyZw7AYSIgj?exiBZBr|VJMGZnRP5*t&YE(caK|wf)U;h(4Mfthjw|T5 zXM2R_wzdqKRthHWvl*$Q=(l~X%Lm8N?14VV{Rv!An-}(sH~P+L>(Ll}kLsdh72=Id zK?$|Nf}fxgoV$|SmO0?6Ttm@-P`csGwnkCA0D-L|FMq6fO0^3 zwX|5PTEaLo81}-23y|JQ&k8K0e+md|_w>m}{%JvbxH1xb+>9@We2(C6^I%eH(!tUm zJDVP$Gunt;HTG&}5cH1aWqXgM#6tXl>#MNbTJ`C;gdf!p+j1edEg7N8#!0-TYtO;a zLcgLz77XrOO}|Hc0wdLa%j{w}Ro}N3ec#lvnvrN!Xab{Kamtd|!)d?hTi{|f2}Q9- zW6{aLBDqA7+kk${8UFhd$ku<4-2sqW3hqwug^6!u#zLF>%x%m%6jI1BElVDsUNq#z z<;#9x3F}gS5-gNBBW^jiaG@{6+ROIywoxhXG!>r}U50PW(S9Xm#fSRqT$fJzS(s5%NsDbyvB%BuADp z>Wl}4)&0N>*XK9Jrv(isyCP93Hz9Vna4K1w3ep=gKuIsMM0;5{G~8A1m~u1@HU!R- zTYs7+?RF&&cb=fxK@9Gbj9FXCjr?m4N$$W})Err7TcjP|pGI!diCx`+FwCqJ9&S-6 z8PfDEHcwB3(aH#O7EF5`;e2sQyFgl5j!uAnVU$S8b;hSui?mt(by)0>eUTv1&NO3C z3j-YEyr=#GkN@pTKC(LC_3rw{@__;4ibeL5kVmnT&u;GrOi*sA3B^u!URn#o;>dHM zFs!Ar-f~+vgHQZtAM@rwST1Bn=?oi69N%SddE&uh6Z|ONor>NAWb!Ea2hAr;bL@|^;R_}mH)9p$2ft5+HTmu4x}ASBmwPw zU|7!zcN48aa9GaM3Q}+1?s~`AzWT>KSh$%GWOEf^T~Jl72jUMJVR*WqAye(jaL|2} zZ5Tb|H$xp8$7ZUC)1phz@dS#Jzo z9fk7x^+KW_5BjWMB0$WpSnq}!#vez2H%X#QXU*m~ln8K_)0{H1J}cg<)V#7m9^Fmy z4xmpdGW}6J(2=nC1vCBm;Y*1h&nbYjcmN-h8Qp1U-F4WV63CNHB|-6XQmQNZo)St& zVPq>td|MA`d+lU@uKl~}r?F3*XVV!mD|R9&Z$NwPJb48FfBa|v`-|6G@4Po?99I^) z0MIKcI?)VFZor&P0YQOb&wO!NOSb=cwYoCxy9JUf(S}?Ye-jHOmOH}gfk89Fsz!g< z?3cHwS=QN~UuEri^KJFH)U!b%)kCNT=ejZxI^aEI+Q7m>9MF@1z3ZS>q5+eRxD8G6 zrd^kp3?{cZD5n&WqDa5N^Sg$B>R4f^<68#(^>RmNAiZg+zWa{3s`mrYUtibw2zKSJ zhk{J>wn_Y3&BPNA?5eRNO`LRpWXc$5g16#GmWh|0b-TE&LZ9H@jb%67Q`Bz>e2Lz; zVljxfk^Jsjy-q0_s7L9w6m`_jxTu~imnJ%Euh_;EW1s(!?N-GKj=ire7%X-I9%}5w z_t;W?>mN*TD&DS-0#s71@&EZhycmlg3Cn}rP6CAdHm1pVWWj;Gv>Ip}YN)0bGWRkyL|bxT9B8pP+5+hn5HxQf zy-<0M5L^kKCF(ou8`8(vz8WkU<58Rq4e>6QuVvX51MIB_ma( zL!J*#+3Jco^$SV-}iG4 z--&0(E2bKpTRi%~PBt6va)R7kMW0Mi9)0AeSyoelLvVlZO1I)fW<({Y3^wSrC(E^s zN81eIVG3oY+WY@rd;k2+zbwKPQd%sL0FJkF=OBIyZyQ~}OD20WEeoBiy~0zy)qjb+?p1x-@*qnc?%ySkE7X0WEOFjuj-2En zF>}ULL}1XI3SGEu4poP2`A2HYt7q9lE9UaG-3))zQKpPV7cJ^sZL-+MRorm8g&p9w z9dcZ3i}&T~IVW62XLDyQ9-lm()k5<7$i*=EA)5m-uQ2jHSA3h1X+*on0}V?peiJ8L zJlGScfY*J-*Vg~F(P?Sn;GJk_EY0?D)GCP@Z`d3tmtcQQ1mcug``h8 zfY&9&SssL!;mrs%rcBoCPK0UrQwR0~cuCRM` ziuVL4Jvldc%gH3e$26ccq2$ev*m!_Yn>ZW)xF1c@uxj1AXZC82a zRvvxpM|e}O$nTwbHm=q5;tWM-pJxs0iLbh^^x2SyP-g8`bJVvoBb&s=jbY9Zh zNLR`^IYUAxPrjr;eAejt6YWU521%h9Q%`DZA@Pp^^`Tffc8D>IJ;PNkWwsTy(?M~bYJVLo~7i??-e9y~$hlL!Pe}f`xWJd%{ z7b^O^yylOq{O|ty=RN@2Nif$E{xtxU*=;0r-KdW_x|QhGclq(3^@9Zju z_41h+ryp~(#BUu$e=Sa)%Nn^x06ImcwMUy47Ut z&|Dx8R4S>Q-*w;~Z92ZybjM6mAwYHS9-;V_g?Y48iCBD25Bit})W-@mS$PjHz2}(e zPL>TjR-q7##fIg^;j8D1kWX0ZmNGk7VLf9zRP+Tk`k(RR|Go$RdIxI00EU=ZIp*t# zca2rtOr!kX!0ZS>@xnsYSt}?{^WQBpG_=pXGkYB2$#yR^tq5I-qke?vt*tg=jKzpN zZOHKm6)5y=1k!HDMH@f0J@7a+B|N#3SQ197?>Xv_e+1w2Yc1Le-4uqX;IcQG0Zbp3 z`JT?e0x>qKS~`#k$LDXm_dKS^LyOk+L)=Iv@?CAsO8}EZNU@c%+r}ZO-o?Y z0d)H83n=kqFcS{~i4ja`6MU^Os*Z%V0R^vnuMPxT@5cHMu@DFM)lfiGqs1yskeovGfm|8FB0bQ*H7=ZMSB;%z4}xBasH5kdcT^7 zojLcJ*~rptcrUrCbyyTLnB-Iy)KWE@TKrXC5CAB9;?&n0nWM=QEzS{p<%o}2i2FvP zp&av}%?opwYlZ%H`Huco*vxlPz2gun%x*+B*NACNrvnsfKr-_sCf4K*4!?6L?@($zv-w)Zk#3_V4iK{ug7I{OR}yhdsF=t)8Lvm zAA{NlAKT@DBC22b!)d0U#l#{=tmV0N*N{qq7z0H*F({wVh~^3Z%^x3|vwsT1wz=TK z3NCKTG(sYgj+woIN5>cd>0$)aoel>HIe%X031TDtv#@H1#N~H4fY^KEISCCEHOFwW zQVd?V5C&zwU%ZrKWHqU&y=13%pbL-xie299n2Ar8nC%8O17Gapu)7OGpUvI*Y5@=U zY0sC}kC+Ac2^&I!8{O5e^iDgJ$IHn|WstXgL!1F)f%p%q{b$CU(@jq)-t=e*H^dKf z$T51B$F`n1F=?iIwaJ_IWaxy&LAHO3PhV)8f1{cEW5sCf-(7@HvzdLHQ@D$&dSy`^ zDZG`W>$E38fdL))elwB|`zwblCC_V_k5A`yTaxz+oC>SQmhlZOD<8D!-oZIZsgB(_ zvi3D@#GLNa{5UB*hK#{2+h8c`sqx%sFjnpgk`(FfLL`|sD;GTFD%3b&YIzog&M=Wx zsR3xT4o%(q@c-xPXBxdals3(w;mKE3Jd}>z(i>1`FvU?Cn&{8;J7L>M-*ODp%G<6* zG|~^+ay@CuL($G-H0w1f-h;EA+ts>pdUqq{em(zL#++FMN*n|Z+FuclB7oeWQx2pl zUGzKpa9XPM9ntOyrrEpb{=g=^>a{x5uNH4-_NM!e0=&3k!@X>^hR9f`;61O zlhtJ{ulfP{mopZDUQx*V8@-`mwq(zI`o_8sq0$p&DN@zK(;{((1FxZb zonIc`*f?vE^se7ksuF07ESDxXK#|G@PIgupf)-W{E9wozwSenl$W4Ske*fRmui&K& zt2I@yCKzX~q~igJGFT1(kJ2#t!^84i`eK}>)Pn`J{@Ipks%v;;`>|`j?XPqYsNtnt za-~_~NL^9fa5RAqt55X4@n#yD@G>p!Suf`vfn7N}b3K4Lob<#9HiYusfcQpuq+0Qc ze>$^*YrFV^FI}9DSWd_^LM%1nt2R%ReN}b(r-ZP0ph`Z}7`Ydbg!I(LVEiWdxcVjLbKawShsXG*u6f;o4>ru)}-t zapRxv0U=cfurG>KtiTM^UubDtG}f&x0`=iR7@6hCmV93%FW~HHUbKpp8+-|+l%AF| zh<3ONw}8pPFt?G54g7ur*(E}LFpui0x@K$#CLNtARfQTuMJ;|SkoDh09?h@X8(8GR zKZ+?sf+qB6%1UQE-0n7kcW}PN6nGmK*{-tqq=WwJ3IDX7ShW3-Rg0GzogY-11#`8% zR6tg9jP=Gxiswf`VIk5UkAlLl$zPc`Mu*g$%IrCH%r`f{@6r?e)W_a(SeeTQBZR8i~*&$Q=CuyL=Nh7s2}2sHZo=;Wl+CqnPVi|;;J)-)!z zaRSIm8opGZ5PT7?4PxAYH_L=*7E-bn|U{r&xWNhHd_t*PZjut<9@j2Dy6lQ0Tp z&95&ii6!Wa0U1|>;WG{08wvO&X}egy94`|m*7^D~nE36y#)el}GY@%+@mZgI%-fg8 z{Ry-eW6IHmh;bz$9l;Y4cuC}I7Wmarq#YB{$h7d1fjn`NRx{4;1)bDERBD?;2G%Jj zxKiCIH+;&e$Y)H|t@PRAnTWa`KkQW^8Y5E5s!C|yjl7?qIXZv@|!@p@Mg6HN0B;7DI~Rwv1lmv z4nVg{Tf60S_H3s$f`?;_JA2`tlSqX$vE=c}E%w=w?<6Pvx+w8`)q*k27fT=>pEkz4 zkAj3WHjSp9DfIjxb4p_g-YD>UYFpmn6 zK@jT9%z;1g!M|NY|6&}|I22VAczF!4T(n6@?!5^#JFu+V-t7Q}=tWJBTY$bvbLTbd zrk`^IFiA$QqnpoUPDD`*KFOm%`F6|%QrEj**6RZQ&TQ;C27uNlzoC7hsVXG_HYC2> z=rv6>udyqB{8<;cE)>(cxG`X1-_9d=n4IK=+AzeSyi%Zsx%I`{*aEvc52SO8pT$0t zjMDpCnV5*H7snY4f|qPz0oVeU0a#9-)Q9XO&AUXYLuehmG)IWBy701KW)3ow= zw74;4D}yHq+>Y>42P=bG{Tq@{t=LBSP-6tvZ)7ooN#=2Yr8&_$3{l<484{zCnP|!G z=EdfbMsf2n7{N4A4yQLRl1epOvn7xCemAHKSb5leuXFf2;<*x?8|JFhC6$3BOtp!MXt}DIz;UZY5lCw4(SDOvFlYQ5@f2bi%tCa1bpTk^RUi;H@ZL zUWq#i2?_NE5ETAiQX|%9yv$;d$6_LpnDBQh6(;lj9S1{TXXNqHi@2%&Sd5H~J=TJ#Gzi1OhZ^xim_*c=Wxve6oCZ2! zAyFY?oJ(NM)Y`$X;k;$QA54EGmd~8fvCDqn&2i-+XLyp{nNbi|KS&ehq|Rh;H>cbr z;MDwa$Hj#N#zw@0#;M@g zi(oS%Dt4lAfmn&ufMP>LdD+VI{reyPERLH*M9{0%Wks-Ij*dEJ>KhpBcHEk3pLs=N zE?po={6IB9csd(voeU2aXa-_`qVhDRmzJv5tI6xVlh<41_kKv^Sv~%x(zXg0k8R}B zK`-_>jeM^6%Q!8MD@tC{mnGpPU~Utgv;g?xYUzDoV+Z!Ze&L6Og@q4^po}Tz9V9hS z;|oT^0GVlPtKZ-b%BoEO91Rsw^2Lf)r4|3hix(f7o7kbWehmpEsRJwl-Z7In>jdVX z*)C6AFXl+a#K|IxnQsfQZrp)M0e_*C05_`iNrY^V1CT@K{o3wtwyfQp`k-me z+AUF?-+OdCuSosD8{wVc_uf8-odgzOu*otQhOqhgdhur9!ap?N4C5g~h zI202xnK+Gfo*d7@F)#US!5?Pq$NI-AKtware9O9rus(h~_Zpo}&%0&} zK8xLsY*ul!&oMY0&TiTV-^DOxao(DB1yBqU^dY(*@>(OdN88MgxXr`bUtbV^lT`%X z4>aFE_k$`3jO4&aPb#14y5d+zyM|qLkU>4rg)6T!D;QnN$pQN)v;g^6lFgF-aJcb8 zv0ru2czOx*;U#pBZZcnoYD{x9R#N&p!6l-w_p_cH;7ca?YJ>Js$2U zh-(#;Yp%N;y|Q`cJ`~n4>^9m|US2LkAHf7|(>K(bLNaZ(q|h)6`0}W8l{x9uZozr0 z@BbJCWvrA@4$W;_dKP8z|)Tw=O_`LhtseT98+6Ti|1LU1RbP=qFG~VO@xeHh~ zNsPX}zA$X1{^aDOry>zNTI)@J|KR|5dRp2Q?&VfX=SsKL7lMQTl2TK17!otFSm-zt z6$5Fe->rHxJZ#QR+L?g{vk2?hhpesd^FA$;?-ly^Z@N4WWA?Ln%V|w^(;sf8f@xp; zRUco$RFU1nF7aVTzE#W~DE;KFR2Iv6`l#4OG7+PtX&H2B&(*R0+>oLCr^bhi3=H(w zOaXop4oSdEH!{1wqHm{94i08DXvc8+)?VN>(AVXv81*Awp#eOHG+MFprU;EjkII6p zWioj=Zy>o+f7d?5BgDROWv##78Fu1Gfz^4{W2r&q+jt|mi>=^$Xse0GZ`WX=vVsI; z=K(*h_I#!y5R#p>%b%y5C8e?QqzAwaQ<~e`A^{i*hzjp0tI?+&n6Dpn9+gn_Tz2qJ zft`JS0YC@|6#060-WArd8 zAnLN15i1K~wk+n@!9SMae~2%i{^@^5nUM-l2=(zgY|RvCA*uDitz7F3ld03*(Xj?P z8q~LDO06WTgQh{SaZ1PGf&_mnAMS3$ipu>rv;*gHoyfqzV3Y$UDwzlPxQ6R;QWL*F z;9fVn=0R)gjue2CSG^e=T;2ZhW275=C+xCS$Kuh)7p=4WLyK==SmC1?;4&H0S-|S+0aVBK!0LST(pFQaSmkv7pu4_L? zV66Mr-JQ@;p0&7pNOCE~c=)*`aj z9lq;Es$${Z;ngX-adzw4K`2|_F2t)*(DAnyR&S14yQKbREFByMOv@1*satE3GE&(b?SV)4FhB(lh`%jDLw>UA={B$0u5=Uj0 z87mRR3sbn~r~e`beR`Dx&i@jLZr%C}5|0MKAjWc+#j20svgoHE`6S&GO*sY46NP=y zv3#eMfb^`mcvtE!Ya5$7k@vNREl?01eH;Sq@yDycx{CzoB?74ew7x|e4(F1Dshm(m zG-YcOot~mzN)~@k4b*T}rxgWlc&k4z8D?>8cND8m2=% zg*F!BZ6+4%TJVvU!hX!ajdgM!_6I|UjvZUm4lwp-?OWV^?sRfC!GH`C$hnnh-4%2( zEX-saJ`GU%i!iI6yN6%(4A$$4_RZKoDzaCm%C?F2cahjl$pN#x*|Ms-Z9#cF=L(oJ z9g#cs5T@dU3@v{ScM$o#yVG^OVWG&HHXMd2p4$<%wvR z{}+-#v9Npy3M$0xfsj14DKz2}QyDhl28a3V$J^xnaoa0B1U^YOIbBpqCul;n16qpJ zImdLz;g0ruUel$$3j@=3+Xu)ew(^l3wvo1ZXH8FQUao{(P#p5#hMr8f2Q7|&z#%6D zsIj4XuY=HtKNhW7LHK3O1_i$=Ux|5!c@-MxGF zcX8_SWsuv)fm0y}oU1Ax6-N;*Gu$$M=iFRRn20_NnSvI`Vd%Zo@3uSSO1F>aQEo+l ziO0$Y@zC2iy06yTkkUfP^23AcwaP~YT}ji{e*-m{8{<-IQBhGCI!S+a)MW0vplt5Y z#)K#01mbAV-1rrFVOzvwUU6G4<>0t7`I$==$B}Z4zOg>k_-Yio41L$uIn2KRkukWi z+~4;-c{4Qt%UUK3oMAsY(2##;;qtCs=aKgp?;Qt&wRWF|n!rg9DDmS9ZLP%l8p<

v z=hM0zYtXHjr}Nd*wn2q)TOn|8L|xQCW@7>-NEO14c)@l~dSXi&^K!voLqgE|Sk$MS z-s6IVA@XvY@i5`ean?{kCz&@^x+wPu-x`(REPp+R#$&%Ed2+1w9jEH zMipJHXd*u)0}M!ssNW8>S|djnG{>L77k~AiHZdf#$kemUB;wp+qUwe`EXGw{NrQ zghaummFN?#IO9vGKN456IC=Hhv&Evh?}ly&&)TV~WBRl%B!Zv}kY|I$751p@A0Dvr zgeN@8fb^1j*+h~mQkKDi`>54xfvO4^6H+1K4JCGPJNb~V)1OrFrOh1xxX3ePtTmZ9txf8XVa=~Pu_ z!|bApc4BwJTtG)V$gibyt0&x{?Vu)s)Y0IrvNPcL!E{69d`*RaYW%UZ67kM4{7rvqe9iog2(mIB z%7Dv)q3~8-=z=;PA~{n$Q(~U|SyUDr6H;Qu((P{e3crfvUDc-DLW=XS?ueQjmM$qFGFE%(isTF z3-zaF3~II}7K@FasP(bXOP_~d@=SI+#ZIm@d?M4QV!_BFt!0sj9>j*vogU&og*^_e zyJuiF7cxWx(_&KN;km4){=SZA6iiM@dbz_-oeaezDK^mCXzIao76siUDwTPkrf=ck zgmGJWXOnzuY<+WPEpo?t7tT3bpGBhNe5_9UPud1++BMYOi3lE&mtli#AiMmHk@Z4YhXzx(H0O){oYus2^>YOi$0rM?g8Ur3M^zfd-+ieO^Qi9 zeKT5Bay56(VGOj`a2%xlc(r$w&SK%2gzov z#hZC2cdAZvQsV=Uz)#JU`EEPS`x~k&4)-MfP=@*!GyS#gpBY!IZnx%EWEEO8rY_f< z!QHG&8+We=rt3(1g6b4^3;2#w#t*EzA0!_D(GP+!jBQz=AepzB(euV(nB{lvSsd-8mw`r{8vZB!CJevMz?UE5Y zt(5fRNf>iKZslX;uY;IjK627-R?_pvpb8jGRP6p>>PKucOJ4r(j;>`C>yRp9x=Viuj=b^)g@1Pen6@z9Z8&@ik29S(A;#F^54mgxxQ779BQ6FEdP)y&7px&bo2?SlRo8MIw zbX>QrbUSbWPxr^#LE~X{8^8)*rf9tdl^(PQCcDKtDNR=v6HPs9*Q9_LvHqu`hDSaL zPvToeSi@vFS!6XAF(d>PK?VH#RqAsP^f9CmI}QU!Bko)xOsyANOecl{q&P@le3te23>=qY#nhY`b(u8^9w>2t@{lopX zN3gIG@i$}FgM%I^Z(*7|muUT_eqdzTO2k^Np@#}<)oJl2eLHwli(NYX;awt%^btB; z$b0^3c1nU1OgLEqmxytKS#IJSkA$;vS&q3AOrQm2U-?t;n0D zP5n13?70kGYLa@JyAMXn9&Os$wVRD{@6qsiAdwZl=KN)lA0}v3 zZF11pGC{*1qS`J#I)9D1t)eRmCXW=!8TW)Pt1q0aOsdyY4SOtj;HY61 zG+$@;bFSIgl;0Xj^g$HS_Ay%xKU6bzZ8td0k2>~@r{b0HS4Hx2)T_Vsr=`u?3MTq~ z503l!`bO)&qyP#4+08wG*V)e;1bhB|csJ*9MAN%UIr9Onlp1#w;r4Z4noeaJHn^B4^+Pgyf~`}xo{D9nW$)m`H07L5Ny*e zi0?Lpf&U%WI2x!az5rZXHmohl=~M6h@zcBi|NMF2dj%(jtmH*Be|)*#NB=Frl`y}7;-9I)1*tI7=toAd&3D{`1iKYwD2QK&MS)&nxAY z6&J65YS;t?gW6ofKLcbr84u~Bm-?aDgoM>^!j5Y_rI+LPI-EQSErUr6nb#125i}qZ z2o~NW-TlMEYrie-haQxcYNvo=OgCkvez8y88Xz5KGVwe~tcGV|@5}HnAFf^8Lf_84 zOa}fCZ>S(?<{qRU({mWAAgg517l1N36gpB-;X_dk)OlvJf+?({-AP0F!T4ghz1({N zru=q@`_`-tC$^ru7zU^*-ia{(BI8C}{1s3~vE?t6MjwtRO&hyo5N{ zGN|Io%%^38Q7oA;8UIADjFi4~J66v{8V{!G2Jx^Z?)NYPtIJ(qKt*7bNku`YBtVLM zo$Bu+vT-#W6cu1(v!hYdT0L*cJp{nVR;X`A!*tS4Hm6~xI`hNt8<;JW5~~GEXiu`y zzAkg#7;YIpmL3e_H**Z34C}eQP&l~hCVuJS1L6a$`7P$*x48y8cI4Ok>wtI~Iuf2I zXf7#bV!lV?XgBkagNce0Fxi(-Xl0%Ad3CC6)HLHDE9ZzIE?K{OSzbdiq>h#HQ5+j+ zBAR+ahSbZm)-2gpc*~b5sb+24QV2x$K~L0Hx_idvZn+6Pv@;E`b=~iN z*pH7Hw0%>&UN?~SX;-?WX^$x%{$ra(a+OXr)2=-@nSU&ik#9Kw&XF}usb1T2#Rp!kdiSv9`I;| zPXH_j{-_^G+m)LkIev05NTfQcu)C4iMjovwD&HPrpBpp0;4z?d$HI zcdt5#l>7|$jn`n{e!gqOqoU5}hzNeR$S&7-&mIV%ZQ7i-zVS%jxP$U@0lIz%JF3{= z7KUVzpo{yRKG6@j-)Y%6 z1YFotF_Fy+9F6!Mb5iOBZcBMQs!t!v{bJ*)NMWzV>W?qek9RK~T;v!FIjaxZ+7?ve zmUm9yw8Sp0k3s{gO$!3rfG0K@@QtfJ5=A4_Zi89NET!juvnXFzQ>qI~H$-+@{AeCl zjJ_%A_bW`ED}zvKIg{3i6CW7s{oue_jv@v#ANl(*#HAC6D&sNXvc_dlhiQ*@@xq{8 znd2P~Q}ax;9+1i8XZSLeJrTa0Y*Or(CrCsgwyPhrzE^A&$SOSRpv2*FzqRc<7(i-p zhO3t6gf^w;qP#Xya3$geN7k(+DNHSZL7FTuta#IN?`$JJp!FQX!3zFn@aU-jW7QMa z9<^O#u&bMC0kBX4+Du@1ytgio)o_=5JE{0f)(uedBD)1XX*QS0^fqzBsAZErYz2I& zj}4L9)M5@0#gw+zG65?mT-86RQbf9w^zgnhY&8@)kw@OpuBDAx1Ax~|iSRsG zr*V(ooB&E8|x5^OW5z{VGlN)cTM zEG9S*LJP0!!0+m|ef%ZxNl8$e+V#OdV@gTD-0Z9tV1ojwyLRpRR`lDtZzCeiAZ^3M zzd~Sq+*&}eiv~4^2O-cYcAW-hM}nDkTF-%ZZZ&udMLdhchcE7_`2LOy5RpCGD6fvK z;N;b-S9@CBLizYeBUC_yLW18NTlHFFgIdLhW( zmt23u{4D8~%WLJKdUU~)MrueqA1{7qYjS@Cw?*lv zPyU)Rdxnj98Exd`-H-pibrbJw2NjIz*#$Z1N3DUtatj{mvqxAU#q(k4CAt7Timl$; z@8V#yL$aASk?S4)Sv#1McF)%ZRrP^;=gDRH;iBnBPu(|RJC-e{@7}~~czyQs3fVR$ zB`9ekSbGoa)wK1mXZ6*ZSyiEkHaUqOL`TGQrR@j(2aRAiO5`L_BLa#9ZZ_!o>pl63 zQ7&jBHYn-aQMv6-^_`;PGWF~&h3WJqjuTihC9~!Fd7X)eFlL}U#wAJKr>x%id)F_A zt8UWn@;|>}5dSutvZ4prnX~!fhgJ7c;rkE5fB_luNn+;!3_*1Y(RWu?(XT${6k9)I z8oMY}IFMUPliMyfon zSncaQ^Ua*1EVb2CfmKY=*Vo}-6B+<(MPTp!P7s9?f#tp(SgrQJ74LLnfVwLHj;jnI z4S|;mIWAKCI#WO5UCn?pA5bfyg!#-XAajJv2?0RAe_Q)smeC>iFiNh!um*=tE3ay~ z+RX=pG*r18o0YfxBGi%hr|_53Ew;3BE^x_t5J7Zzq~$bLCb5E;LWy zK2WrT*8{#b5)r6ftf}idGnZlVZD6kkJ}=qQ{jZh5z>x-^tpGYpYny>VZ?4AVv2FWy ztqXkxRuB>NWYnGlhmE#C4PF~G!rHtg7*(+m0#v6q6k6512V${UdsNJn#OHw;j*P)z zm>_mBH7p{e;*wPd#v_u_)6)YupvAPOlnd(A#%enEY`6u;w!XkNvaA3-ht{`)l44am z=t@dnbtq=$jHbArD?^8uB#+%28}2*jqG9G_n?(EOoa)v6;SmI0BQy`Bv*GwMzl>7e zbYcKHJ$d^hG*0*%d`B64aQg_^-#DSW*gN@`h zxW~6O(ULd$|ByVl?yUY0etI$R8Bj+=Ph_4Wbw}X0Y0<6mLX#{(8`7P!@-OK)P%XV)2v zLib@W`*@@Zy!Yz{*lKJ7v(08*{)Y|sa`Vz4TP)b_Q z$F7zKj8{LEu9V96O{XRJ=z@`jgwAEs$DyT($#F)T9VB50f3$W^_Dk5Qbf#B`XFVIM?M$|W#qIu3tRdm zk#C+mu2kmSEWb}(wmHrg;GNOsO45Mea*==-#zeZPa3Ty`bJh1|s?DH^IkhmoFwJ zZc72&+}9fa#9EE&at%gFUuLb8g+WD0dmxQTBk6K8vFeE5?Ck8lMWmhG9oil29i2N7 z??3{8E~pezPdG)ai3K$^H5J5cwsD{CPW5=st0UOUR((6DfMIf;PM(Hel6yyxwxkrs zkA0Q`Xh7W*jhA{f>ZF519d{qUl(^7TeY5ql+R_gNqr2uN%z`OKWkd%nIbsVyp%*=2 z-SH1h@SAxfrO_}^MMWt)zC_p+*QYVI9VASA^~o^+Iv+EksU>i6s>*HsN1YY#jS;W8 zQE3T-s(#bh$FVn+tmOc-Dc?e@!xoG;xL~8y`5E+6JkAyM-Gv0($NnZ~>j5aJzrD2Z z0|mB2?^*YWIfvqcp#e#aZFXa-l9kx65{X0s)QIsU&c5+i!4|g^`@1*WCu_V;QhiED zo{LSJsB&1`-*=Kcyayee|`a(EuC0>?B5%u+GzymDl=baf=y?SVXhD;;lfC|hJ!UR3=wX*{0w zRG0L@9wN>Mm{G^r@lnK*((q`Xdopsg?c^EW$x-L}9lC^2=Gd)d1MrN}l)xzTH`DFD z381S?TTFLCxoDqX@K#==zHybLs&!%26qwk?%4YpqJ842O3qZN|ZO&nRJmPFhb8n<( z2}1KNAN%+>QrTj`cBRvHM2^&B#-WvKO2BbVg_Xp%Ba=IsdYy?u$EMq}tM2HqMGagF zs$xG&ek9y)1l@2zI{@q7ch;#(YERnF^=*xczK3Tw)$1E~Ssv)xasaeOD-|9e(rJ-zc zL_GpI>2qJ)-Z65N^#3lq&Ds66!o zy`s~yW{f=I>dXHSZ0&-c~15<|=V3dyLx#*s6$B;0v?km7-?b%($8T=}ppP%)VvYj}y#XuV)L zEK;%%3#;3;RZ-;m7Gr+sc3K2Mj?{)zc7eE39?u6R|5j7p^wW5(*@H>>HQFBF@p``* zzq|%;gE}BSG3uu80|1i9KH&d)CO@JN$q*W9I4RTS_g}3imIQ@czx=O5q5maNQ*nM? zZ#o6oW8{d!_$ReHD1ai82lFkt6?Mxagtkt-(29#sP9>ila$5BXF^0&tYjin{(~!aN z(`a`P5ZhL4A{rm8ot@pN3iu{vA3l6ou46jrZ_-#qKJbB#41)FQDH*q#Hl4nlrqn&? zwFY==U>n=PG!l&i?U2T2uHq{ZFT6-Z=K_yL^_+x z^dMKrj7HVpIy?)4Ew^g}o5w_Y(1^ zMDezw1O1E~7B_k6XushTmhG^reo|aHg#V?lsVcVUgXEGt*r8i&R7DOW?67bBdiF~u zE{{6f1{l^SXZRj4jwf0)T*o&aJdDnGBBT&c+*^2fuWFY}a+emob=E+ttK$d8Bvr&W z<}L(1vc&q(f1kP#r@D8vj<0=I|L7rPd>muQr7MZ5s_e=0V4hN5b8jKv0r*9pu4*d3 zTY5`a@l;h};!tScJntlNY$@7X>I%Cb0&FC0p3SK~;cVd1+#GPWSnVpjOL= zzNm+@ySrWMf{k|>9^fFn?QLMxO?3O#tw3)ZFd3uVfMNbfSpaCkQh+DForuZ{nxX>Qcn`r=KLTtVP=Ntd8I>2~epZP8>I35H874aV;>O_f&c-WQ&q zMjrr!Ic_bbV~ncI#s+J48s1j2WYh3s15;u7q^{55(#m^=S8A-j% zCLCSXau(}Fz0ZuxLH0Nc_#zM4boNL2Mlq?OduJXGh`P|nZ_7sSRfWF3j0<4brMc`x zKSCj7AAq;%a9=9#5z0At@Lbi-$?2or4xUwc7OH*J@AMN&wmD?;PPy#IU$&Q;3y9@W%ikX2qsL;Lbd8`2&RP|j;{0>- z!N9_OOv-=~5kkB>uy4+P-o23vN`Syz1aGXm7L&*7c5Hpl8M3lG~l9RIO5&*E1hQ`F5N zUovbS>CZ{m&A%Li@}g33ydvYP5~6P}x#tfPja`t_nfLr}&eYg9E|p0B_8On6BOnUi z3u_IA6(J4>ndBV9RzA;>f3SppWyj@VL58&=80xc%!89m;XlNr4lFjNydpH4?i^RML ztPKPXIdN*`2RKpS=}We-KNRC;KwmKY9%dQxc>d9#jUcwcfQNB7i!q#4vgHM3#n1gm z)%yqL{RHR!kDv8tv<+gvZ1Cb)#UKDK3#;ke>LPP_0TAHF|3QEcH3$%}wzZ<(z|AQd z6zJ~{`%&p3z#?{ok?v=^v;t6H>X-uv7{t{ScFSN$`*P&et7rT!?#twSY0_Gw0c?|%$yD` zoUNNsg)p&K&8!Ec)~uFP*2&hd#&MJdrv zn5{ey;Ij~+tr@>(?)F3s91V5LDY}0B?2N-fo-XCUpaq=D=k(mmdVHca@$pd^A$HQW z=+WeYqoCt^YfOY!_+v4=dl>lchNC--?GbNGt3Ho15RBed#doFMZ7R#iA2{0Qf0`5W zgo`IuvrVvBI@4R}Huw9+vql{3ZZ1_mroB$yiVtahtvb|sz2OlfclM~FW3#7^Znq*s zXrFv$o7yRqH&*^yTUDj~ z)TZOFl561N)cB?}2{mfzN`YU;cECR|@+5T{&sKQzf;EHzMy^jRc_ffJ0;7684m10! z{BMh+7({buS*}T<_GZbOAWe6CEsd017n{nxjk=}S+o{Z%-7Jv;&S6y~D4l~SI4*MM zbk$HF1>GsovsY1+8j%)U^(58rl*-1j&r{6lJ~7?Vo8*!bZtzxSp76A9XW}>0A3J9; zmoRg>C&$`41-DI*F32C}l2P!XP z^#~xp^II5CIGGyMNEA6@+!t$#3#4R=s~8WrN2W}j#SA6 z(mzLMpB+_U#vvV_xS$Mc3j=#s`vTBa@@ox&24k2W^&-0MFCH`gR_d)BGcxRyz%R{W zXP+Fzf%WYxu5lpt=085|>9W&z8ud*hQjlt~_5!$kYo-Fske>Vf&m-U9yU^*EY<&&Q zW?mI5j@_1aMHe-=_UL5OKJ$s~xA#oC6I`1U$yvlhq>-uG!U3|BSq|aFiMk?z|ZB{ceG`KtrS~;5VahZh+ zV!b#ikPp1ne#TB%sMm$~lci%3c%3$#l+lK(D$AKWl&+HliAQ>wlAi9ayeKW9K9{2+${||J}56G z`G#0w`LjR}Wv^^9a}vdOAA3Z=C4wztg!EP+_CM7&|VKNLELFcz9#g zM{xdt1_3?($~dSQp%pvI3f2T?0Iz;Bml+zW>IA(S(jAcMfsD_;lmc5ZsMh(r5IdO` z6Okt%jV}fQWKod`)~nJjmk;9gsO(E%_#o7F14oM$ad|pbOT+wWST zs>+ewxfMI{Ix2Fk=p3$?{5h)z-I5^y;gc575Gqh6w){Vwy$4j2>Dn&*qNC1?ql^`m zlI=L7(nLjyltjnE2uhGDQlcQDNHIu@{_;)e|_H;xcul_QQo8 zk|#pjzQxL_Dic*xY7F6rVjsZzDjb=ts>M8!0(HfmVu!I{69>Khg^PS1@73t5M~`OX z+Jo;nzok1Uwhe7iow!HCCb6V<3elatPttW0T=Nh5m$;1pfrrl+#;Zx_=_qV0Y@}yK zso(Y!PCow*OSluhS=#5w8yFb!Dn538{rFY-?Zs;VFD^S%Jj>gada{BOVYpqXC{9PJO(k=%Xi*byqfqv5Ben>l76{<>SqU+dCf}LR=A0BKno+J# ze1=hHItZ^kyfhn=oy?k(I`MG_F)RrJd)$KE0PZ1ld?A1dUy^?vt|6zJ1=80bDp=6~ z^?{TNE#m5^ed3GNzE9?@CQ3B;&<}x-3I4HMweUN8{Oh)R!3FEucJPqMrN3~?%=r>K z%oCsB0~90vXy1Sl=r(mH?R#RE_pV@n5}p6ec6@%fQ9Ztnwh2B0H!?JDSu z&$N#C81a#i)evd7Lop6^aZu8Sr$yw`%xYiSd`FhEKvy0>d4=tY|>CW~7U zNvl(;4sntJA+Ba-8Zc;lFqx!IcC@ptpw^I^8>xilaHr&aBWB=(2-;x?)a-o_Ov=!T z1LdSzuxGm1r{L;yxt%~kUirC3qc(&POP?p87XCO$vGBuPO?r2l`)Wpdub57Vy2Lia zeP@ypv8POQIc-4?<8XkOuhFZHO7bybemni>OE>&vn$knbq+jofqdtmb*WE!)`_M?1 zNCQIfE`ovO6k13VAfZfaju=u-HL*+jif{K`PQaIVcDpSWOMm=*b$NVWX3tFgM`F^) zT|o4~6@iX=v<2acBmp1+l-0s`WXAu|*#6)D`9CT!pXFq>>OGuPS)!zYUF0)BKw9y6 z`OcxykjpBe62LG@zJ2?)AuRb?@g}6MTEMA+8GQhs*pY0%_-FTb0 z=ugG7V50^tYxNQ^_?pYAnnWA3CdEZ}<5K1nBANeQBiWo`MI(fqG^vQwSKj^rC>c}VhM{Ik$A9o+W@r?2Xe>$G?N%Rn zmQ;j;1t>Y?h}4)zIU|0li`C|myRqXyvAety!aZ@Q1thqEa|qrJCVdK~6N3AtBs4dI z&dr>qajdKKripki9cC>n=9%PJG2K0je<5cDr`*BuxG4%55w}jp1z}z!H8n zw~J!T+QX8Z(2ekAoTbLEgQ#%~V+fw;$^g^w7prr)^z@29mdNG;g7}P;)@QhAb#gnR zBl0QYpTHX1LtVmeG@l&3y{EuEn~G5xIWb@6#*3%4K64If*NxgvXte>5sW7*}-Jj`- z@}45tkKB&(J%c_=>9j5n)$Up{UrbBbCU^%98?81gy6g-DXHFZArbKG2)zzHW^EbQ| z_I&}qTcr|!<(n#yEQ>kwIa@&SI>V|#*CtNJS#avFIP99cEeW!{4vu$s=BZKQR3H*L>5<25hnK}aJ(4k+Z(6+Tclp$%|DFqIFaPJ7`uU)v4vSMi(#ows()K* z6+e4H%&W#^sQ8SfALeW+<$9Q?628x$Q@?g%(NCCvGYwY0?_wscN(QZn=Bo?9-)!ak zIx0>8pnFP@No|T&ghzdY_jSk!C=!8%1{BsKwk_ z|2F)D&AKO|K<~oNsBNbu zD29!Tg*p5Zph}9Nd;J2$^(dleCUrM~^ouI$@V;~bti&+Ega7O#zf3eo5kXLlLV%93 zr>nFFFY*!+?A^++3DFcw9NRm43{f+m(Vi z2es*_C%d40{D?QCm++$tn^O`;`Vxx!1G z=yBTCR%7y|Kedyif`8as`cR9dyLJ@uS_YKUA=li6o052G16Z&&&YiXC zn(phG#^*?%hb=$d!op#rf6Mv4JpcdoE30M@XnbG1c#)o-{$^;%0dTcG5|pN=jMM&` zTXJZfDNKuAUM-!kT)nI_3JeS!o|nM<3qx}LiZ%z7{OqQ)Kx$r+)OIl-U`4V>B$`#+ zLU0tzCyS)oOr?t+3t*I6)o2wCYMsAkr@quG+%tW@In%g8-C~6ex{LILu z6qHgc865cQl4D!A`p|3x8$YZ%{yaT$2a~vPJK-N^_=|f6Zg#wUnI*wqxpJiwU*?x+ z5!jJ#sHl0$b(yWftmy`ThuZDE$b!V2k^y`s&PVL%npd|EY>=J{$A>_SpHU>t@`;1e}E_7 z_k4jBiu1+jS@mpUH(b-eFy~$C33RWmb~4HD`q7xNcl23;znqo3;@PsI{qrT?O_OFE zQj|S*9GGBz4`DPNR@I7q*)q4qv^z8`f!?%u9>Np)&OrH5^RGNm&5W&(QvBr}h4iUi?;g$#2+q5f+5o0WlTi zi)Uxv1@b0t4oAQ0KhOUr*9-eELq7=iu(9&W9 zpo;-)K1*=1c4g3W6UC!yB^Z0_-He^arUB(y8vt3D#qH89QpAw%kQv-vlMm8TvDa$J zy9|+lUdJK6Q~z@_A!7^nVy*7Y6Lva+8Gsep1m&Nt=L;wP_O;xMh<+OwcLji@gU>MnfculM16p%+?pab1Q_NHi5Hbgw%N2UI$f1>vNrXPsMp!plbWq zlE%ZeDXN{6^htW?7$$xnV#6R*-wb%Ppo^qIs~uQ&Q!g(x^>A>bV2)S=x48O1Wn_d8-cF!?FWjS6dkMMi(zx!| z;2Qi1jg>EG#Sx=HR#R{e%HC{iV+TFxNF#mz#WfZ~n8dmUDny9|;)H zpJ622hX5L<&!1mt-x%}`mZ5qqYYuQVd*8sn1$>3tM*(`}$~O}e>yMz(+Ay^is;#eI z!8o=)d1~r1q*hDmwj%J2-zDF_zXJxf%P2&)m4!u+=tNkUDYQ&G0@S(V^%$@mgd`Hi zu5Q@YR01rlt^(rv@>rbWZZ9I=)%jonEHK^gXBZ{$S)T z+uDeyy1VAR6{U9VVus{e8Zgv?cI?;zZQHi(CCw||Enn3~Qg2QM^s8@^`8j2!5Chv3Ck zA>5lEav9?60#vHBQWaI$^oA}0;@Y8N{t$j9WuWI!374;EI;8ee6fHXQ9xa}k)+u%f zT@&qFoYF()zmXomEVP1^UgD%fXvzs`Y1Hi@Dz{Y6gY{GKi)WNw1rhs;9dPFj9N8oo zzgNEh=h|xi0C~=Ku}I!8*;`Oou`m=}ig`PqVKx3$+XGiU$zR$@P_jlk*kBasS*k8o zmfE>@-kv$(*t-O1q~O3NgAzTfmXs=7)TTc56(KEq0lMooi1uXt#7K-OMf9 z7SRzyp7%9otZOyw=~LwcIR#{rFe@_2yt*FyIqTJ3t#j2;R|n!#r~2ovdQ1Cqu483fDVaN?NHG8mWkA7hxYdAE@n&g(zLosF59nz;vf%fh>D#9V zqG*ft^|MVrt~Md4aVo300#mV;=W;KC_vxvPL7W21)Ve+0~5yX>JV@UUT8U{q4r5Tc$q>B^I3X!cEg6AP$*P(2`44Q$jfu&Et8%dB+NpB+4? zPKwgjnNPE?=bfZQ^A&>HiiI@6gU(a2JJ#GD`R!f!UVB2UM3~0^MY61@>}=@S#C=vB z?}-aZ9H+A!fZCIycpHrbyhkWMVTRz@&F-+KXiHE%X1Nk$K-s@uUocqkVs zv$o@?rDRSeg#Bu}OzdC$9#sIE6@XGBQ+FPs@rWOd#! z*U|d#@*3@VhK|q8qoG{gY%OZv1dA*4T`pVh4k`HRKP#eNzT0xb4m?V)Oa=6lbH@+X9B>o< zxc}d;%SW>;m;#C@u(kUh+O+#L^bm$Q;5r#{?b@}{`{~HYH$+ePwsp)hgrb2sDE%sz=^ zVd2xihE?lJWdF>`$uaQ@RBRO<8}*>rJ|Xp#t9RB%+$X<@89^&~W^@!51*oasyJ-7WZKWi2)IfYO5Xb;iGPS?h-KLN=y~TF!>$WGS?xu2!B`QphG9F5gohb6Ec0rCzV6h( z5=L^GKS0Z=>$+@-Z;!KDEXO8|)r4+`V1a_x619((Cm=6|c>_Eoe(u&oLZ*JWr=g>6X)xaJ|)!RVjZWi>V`sXo<3psaEPv7=cPZH z#h;3?E3gVD3!{OPYEP`I&4fp^a(`88>n$%g`l0VB$0koshNpF>$zFh^1oRA~W36~I zIJmma&5#D9REb)R^Ir6xMdFZtS8zT%o^W4VJ`qb_+ty#|Ruj2RER?z0Fl1$F7j#rX z6W))M$XoattlSI`6B%%|VBLgzd$$=M9`dGUPWA?g!T&>F7&IEOrmYL3o;N8dIrAVVBNu=OmJlEkmy6@+ z3#AKZ6Rx(-75$W*CbMF%XycJd{^!yj=>$s*KkRwM9X|~(kLAp=pIq?M6{jXoo%F@KXSY^$jjIH+qux7{Z0ZR>p*+(%1=M~Bs4s!9FjPxgCn3*(t04ya43+lY90 zN>zA^{5#-j{7A5ah<48B8a98fhtv9KaBu0P@UZCZ%n$$D4wp64LzkVvcYB)NtEzl&8(v?ng1vaY!~ z4&gyqbK%vW0S1x08pgDF?`GlKPq8cX^%+T17>e0M^HAbG$qR#fp#S3A?5vxK|ITed3rjlF4T5ha}+ji049BDWjsBui+*f{A!3VJ z8pX<=T<>)_gZEHHp{o<1Wo+8nUg8cdf}&qG*&S-qF87nU%g~Y^a)jAoQcg_JG*2j4 z^*)!b1k5(|jF-3Kk$m{wyFAl!0|PKG#H&uTuXzjJUVqw9g`b+~a^!LO?+Qb_WhMA?T}{qm)Tm6c zj>v`+-zC2rM@Th3$zRiB!#9B_a^QkyzW$Bskd;+1Hscc#Q?FWNi29t4D&>W`DxbJD_nmy_AoHAac#G&HL<1q}TAu;M;?iV|>%_^cs7}sb z)iD=?6fXv%HQ;w`JSlM3BkKcQ@i4A*{H(k^!C+L!r(sGffUt@C>pLP)9oULIxk{(d z+yGxm5>s4N*b(C>E63AK!kfUI1uX%(`0BO@yrMZ&vyD`)f14T4)m^{x0ua-jD_yk9 z>Ds1qn*wH-r8SU_@6wAhQPOFL@z@(iy?rI4mJK4(p9^c7Pk8X%?dsACV|EXWIlC9u;Hy|o}TS{hS>c#p-MVQa(Jysu)WF5Cm= zKAr~ADiJb^S~ip&_hWi_kDa(8dq=M4j8C3dUX9opO5$uHK^N|xu`Jzz;3=EtR<5^( zfIQpgn`GdcwLY1OmqbB1`+Pj}t!lg0J)#+p?^7@UL^WkWeKn=sHFGy7P-c|I_S~kO z^IDsSA8=m(ySz+HmwPah=o4lxwo8C+0$mrI5>`D7`}->hz^K6x5E2%Zsj}yNWSf}3 z@w*y#PephWE}NOC&wG?1stg#CqHfK-d($5RmpNj~+ZpK7ncm<4 zJ%3B~W@c5Y&K72R3?fMM%W%S%>Z$7!B=lm6cpp-9gc_iBP(w)o@Lc&GsQtcL7P&nn zJj>Ijlvbj-FFDUop4mZMzrb&;;wTn>2ach`Cm1aX32`hd1J%_rYOFxEzy&bFOe70U z(smMde|nyh(}nLXky?Y&l{`sDxsPol#D0=qfsulaVC6Q#8kgNP^0@0=e6CXh;@%IL zvx0pS55g*X2@a2>34Fc;c|$ZIf(Z;J1PqQ`5Yx%&?@Ua|=D(y~3u2ew7GfWOCmde8 ziJ;LXq?}ufeK*T659-|91AOQ*QR+BgnLN=c2M#)gpf@DGy1U2r&g;%T{rm9v z5B{#fUIpA~FiX_|jPH-09_QW8`7ePd+C9s#`*7qh@+Z^6Mq zPq^oc=^W%(vk6d2FO~F&$mOf+D?myJY}=g-)bL^ST?rOmdiGt68XrxMn+Y5bG0F?!<-Ib0Rbj*41({}`17 z3$kpYLvm97mPj?dlpV33%ooXQyJ>_AqVxuiDIe?e2!L;XV?MgSf=3^eyZm z%{3$SG18zpy88JTQnkWeq_R!5)hV zdOUsNJpIzEjS%pkISLuXJzPq%tRxElnd9T?Yq^<1E~?p-Oc##Zb5VJ!x_;F`M2w$J*6 zY@l4$@yuio?)`CSJxckxOnjiqPj@|6t5^XxV_MdHKE|x$7)hZA<*GRIWs+wHwX-M= zl8>pzgjH<-KF+Vi_^IefU6yKZYZT%^??}ZquVO44d?3p+M~e*y9z2Rdv>NVvcc`0- zH_YhWjKR&jk)dZtcL?`l*8Cl}D|1u$^U1A{1CRdwl%`4T>1Zbk5qdCWLuI14Nquo7`fWjB=UJ18Ang50eVJDdsNc0r^yRIJVYpiu zCB8$aTlN-<+Q9@+*iBmZd*hkjVT<)|B}SS2(_Fjm-iT*^t%j7~JpC%C_hxQ)AY3wG zM8Px@ZNbL7CXh~(01H5~whfpQ>O*b^7*3gI7F+RCL> zvvsc=rs*Kq!Ea+k_BnC1Y+M9rMFH@;PZaxoQ7dr=J9cO>fqs%sPo?jABRu3k|7R}L z#YQ86mrL>_LE|JUYiIw|F5RF{xDOt*wJ|_%t0w$ZJ}Dhr(7I#^2xyl5>^ld{eVqDj zzJVY3|Kai^CG7bXewFYwEGHKiuS(X&f4&LjM-PG>5)7Y6VdL^;@=S}QG9r4p6t89d zyE=pIBh)`QxU5?r#smG0|DtSMi2s6s+`oUny79#HTWqZ#j4Lm@7%mTS;N!P?8Pk{r zZz9w(7foGxLB7C)`UeJrhPDu?=(mwAbRDCTuDGIw2OzK+T3#MDf|;{g2u$>A6k(yE zUjgU0>jh8OG-)$Njdb8+1IDbs00QEBy^W%0btPkE`D&S*Un4({!H}|pO>^r4Z89^< zRj{Ns?h`0vaZ7g`pk-=>DmqDk$Q>x0rL*x6uu_T8dI&5Sg%Pgw4b+Gq$Eb_C)Br;9 z!rS@loy_0+_ese3rXbdn3R2}6#X54-gM&fe!Wunq8y=v!Sn<~czs|cOX>QtAm(o<2 zNA0B6*Rp}6Fi*nAs()5oi|4f81`}ID@KBL*AkuFpB1==%Vt=ob8;q1bH<`FKc}~HT zN3jtqDVWnhxi-oySDO|SGUlxOyxeBAFGsXSi)8R_5uU<7lS&ETxa(y3$f{vpp~hL= zz6V;8{gk_z@7DCn#U{e9Df3Yj#*wsCy5NClM(!T*m#K%*gdH$?UKwHo%KS~p7r3gQ zpf%KOA?b#?JV85pWjpvaEF!)UO~w&Pp#$wpijQ~EJ#y+dn|5E4oSTgGGqh^bSkJL= zWd_&0C7*t`$ld>ME&!0D1rCOHaNDtGPor@3GA=^bCxoq#%jc~P`-zSCc!ll9P~2O)g#OG6q9eKw%3kjak3ok(LM_Boq(kgC`J7^{jFqhEd%=Zw7&k^x zncAGBlR-2ih7ZmVU)oMLL<*d^h`NRu@`QMTTN4-kXFZzfHDb_tHqhKSUPuTNS5Lk8?cY5uR@)8E-m`fP$ z@i4FL(qj1&r8Q8DLrW^y{*UCUN|Q9~Oy>^J#-qS?xMi`9YRa-NAn$J9g;iT*8dK2X zt+DJI5uOIe1xLVc!fy{riGW&wg_T+6gGJ#fE~da?vaUk^OdP(^2IldldC1KFWS$*TAEc7UVT1Cl0@V zVAPu3492Fd+)+SWshG&_9%@ejij?fu-a3y8O;r+O(WuAa-AFbed!96ZY7*IDTtz=n zBS9w>Zd5$CCT$wM_fEanAHcNDeQ}?9m;L!*<3JEL;kgi!zd(#=V$fMzrR9eL>dJFj1u9p$Xr%WqbL5(*_#Z^%d6C zt)#B@X$vqR-uS0AVOmOr3uFg*TQ5(RQW(5foi4m-=-hwf?$QL~NL+-yy6_~^+Y7H* zMlNVB`3=>gVh(L7@f;)Ner~_;JO~HzTKO5Q$Mw&t@n`N?6SuSZnWGs}X)x&0XBpGe zr1IVB@NyG;S})T}5_=QiAK_T)g)g|zfBLn4$AInsF6x7*n>TNMDZ;RC5o8B9MJ6Dg zAsJSKykW6c965GuB?P=foR5RfI?+F8pWnAI)CmDcm6Z>HJ)c4kY|wI2Qb5TA(H>fD zpz;5%jfq@5u{=7v7=I}-dDDaUcM$;h?9zsd!qBk>5a*QJl z(e4lDTQ|{QHQ{>O1s4b@d4;|#pv!NL)T%Bt(d1FS`YE-3fj4eo3#0V89^RU#F%Gi+ zKWJVy(tSRwkK7o^ti{Y?Mn^|$xbCdOlMVCeeoZ;9{FLmNV{rCLuowFdm{JG=)At0# zvtmZ5mIc(KDyQ@8zG3etUG29|$bJ!>IA*j4oLtxeg^Mgz(3~VF$E>>0oZFfnb&L^n zl`xJmkvazBsPnB%*j7ItZvTX~AHIU^#lC@T`f!KH{6P_{b6oD6-@#5pN{XBPDb*i6 z<;rwf88^o-Vs@G)pET1MqB9NRZJtCHsjSc<*6lrodBt5vny8Mp8#;FxvQzrv`G3dM z<6WP79QMlJ!Y}VUTQO9n+7S`OPA#|aKb!b?@htDqMKBgG%kDGS(RRg7#eVBlN$G^J zBqG}#5kS~HRIbjFq%11;`te!8XvbKFZ5go>ctcN*;;-7)-8?_%jP>3egs^q&*M99*wj$lqIoHRrB)>e=bF5tfHH$(?95(pnv7-R zrdNChil#%?_6!g|;oT=BxFB3-O5tXL;l4iW5~Lp{U-%AYd%ja)VvH#Sb+4C9vW?0zd6a8@f)HLy%prUUdJR6A5#)M9+O$!~;M=UT2uGc5_OH z?Eci?;14P{b<)8M6fs_01x$oh=UwAnXh6JAU-dqE4I)3o%_~}cid1Q6U#A2-Boa8g;i|;?Q{PaI70E^aTbxXAq#t5^Gs=ZtW_YK!;9uwJWYTf-u%%{8Lj#nqx|- zTN)p=Gs7vGwxvV$p(!AtU0oTPk5H6vlMt`;~LX(IGX?p_ThvnX!GVH=nms%g} z$Vec?0DKL44+11tL4fsmO)Jkl*f~a7E&ZwsPt;|5WsV-61MowQPG>J_=ybwN?2n>Y zV8#;)3LHe7S>{mu=N+$S(f@3COH02=2Bc+ZW@ZNVS6dDBk@RScgFE9dJ}k@JN+2CES|$L;DB+xgIi65RP4F$Cf}+># zT!0?62h3(T~h{1G!Y{<_IJEY(yss-q5{y^9K4-xj{=eNc3uo$c`OH zx0q$?X6t05YpR|sI!inzUHKmV9zERZk)yaW`#0o~(RT*9I0T?&8E7*RJIt4`AwAkpnbdqP~}mBEHtT zKvth;MqCNsx42eSky*qoXAgk#rOG07N*$XY^o;fm-wQ)bpZY~w#J#3)!mGsr zG)l47J54mzm?q@Cq8yX@72$Q~=kM&LM`>(GIgdCUljfeuG#;1!>zhs0 zilJ)SkZR@zATRWcjBX=!EK*tF|C00M4dMMH+8ebI-D7N?quV2* z4R;Ro8XDe5bnayDBkU3@D*T*mE$+`XR%Xxy$@DB4TSVvgXPFiit`e)tF*^|ni*1fPSWzE zO$<_1YsETQ3+Sv?0HQOYTW(m#4J*|bo(6vQ{<>67uuTmID>n^m{O|A@%Sk*Qz9rJnOFhT=iqxn{G zyX)3qlQ!^UE2Q&4LwK1-<(@t*=yu7Ydf*$#)}Ogo^@g%KUmbDOo~)se&3x%2x6jcf z!+&OCNUSR3Z0a{xRlp``W!qm=<0*5zgkPR2aBcS9jwbV6nr-stFT!dTcp~h*hYS*V z2l?BLH_SUo%VQiSy`;FY(TUY^8%H>+`U0iEC8s0ABZ_|#7 zigMSF?NCWd<6N5T&h9wq<4qf7J5iTq&TL5wnAF9fJNjCboFQEsQGu zf$AuHbZkyIvMPFGk9zPxDM!Fr72&Gk^6n3Yal(T@r*QZb-oW+&yP+z3jFd2S5B)4xDn?`5M5Rt zlJsc)QRLZs^j~-nB?oXJtQb|P=IZIuaiBwiO#GS-*@EE70`yRG7fk3Vv1%=~_HXLx z*v#%PO(I8?dDz{_w9R6jk}aV1Pk9;&PZd#E0Gs1ILY>dH32jM6n8p`VoBXaoviC&?^l!9C37nRn3$b(Fo@#))QnT_&Q(}Y_?x)#(s^WK7Mgb+;B z#P18Gf+$>fDef@$bq%!CLFiriKCg(l;ytiQdUKT}HD@4eMjZ6}(^sKK?pHju7qp~sJhQH{ILJ)O{MM*U`${|t%n}~CXP)1}exd-m;40{oGxx(e=VU%_{fBvYuEfF#Lmr9Q>I!_0bAId;$umLUzp!=$CSGM$+3|%@tw8D4ccWuNs+mVGC(gb@7dv4+RkTPHzvkM9 z6IJ)-8fI}B)+D{row}n+H{0?^1Mz`ydQ%|+z@1x|M-TMe=Kg-LC?mJC!(+xlGORO2 z23(a3kTWpIt-z)AVi&e6sJUIglA(g<73ClWe38%EH%2!+?5v#?K8_I6Bi~` zN8z&TW7fWO5ZSvVAEK{sTczXsM;Lj@UD%ui)bFcdhKmRH&64d}loaM-`81?2A~V~i z7l28ctRVD0yFWMy<^B8LioqZ{75;{rsttr#{pp=tMPCq|;Kv>Z5e%T{gY1>RKxk1n z{0Gn^Z>+apcr$>f0P^l#;Lm5}V+G<`r+Nzb>H7g=_HFBbS})=6uk>7wQyiO^=mB0$ zO(%OSz`6p4@k7T4yfA{(;E_H42cfKgNXHG-x|`5~M4|u!BMAgL%w~A) z|2;z&%+3(deL!$J8_ayzO1R>Ub&80HsHcH^KikYfJ|n#-)8^Y`aL4H1zJ>GMVo;!;9hPk|V+gmPcuQtkJ?8#~J?Ww+^Bz1M zu>~GfNm?}Kz?`QnRaz>)V3`x3sEDkHEMGL;SFZAu5d}?8^#|1>&XS{~5oc1F!w4Ga zT7Y~fiQghlUDQ<6KyZWqwx^-)PE3WobkS$}Z1bTBre-E$K;t3*>&?DvijIEGy5D0O zcYj3BYG;R^_oUN*Cgd!Z&yp6)(Xuyra_$HBId@8c-b1{P>bD+5R|Fs&W9>KX-ppL5 z5+6x*GnzCP!~!?-AZa;&2)vR{HvV;rxnT+Mi6U*tt<>Sb+$o;LW=h1~*S(*R?%`5) zEC!;9TZJC>q(wKo`s*KlUS4xd&yv2zX4dN(dYj#VwfrNyonKRGP_Iz~pS(eW@~)4Xglh=-|(Te*s7TVF+q}!C_csgxZr({{xx>+NFMv#z@m8v!-gWqntF}= z`aPQ=2v9E7ko;D(fuIJhw(Y_Bck22^P&vj0)LCTRIB?B{-d(u44gx}B{W1eIuTp%f zfb;kF7|UYo=j56iU@$G`i0eABFCt@8-L{a>4=gKhr5z&vnYn4&iK8l`>k6Y#R^w4_ zIlY?zK(gQ>??|#;cymna(`i{zRC)Rh*8Gue!C7^QH+T9)BPDk`2?DFwIbR4?Wo|~f z+~hDX^;#e^xp^L8^ya=Lv}ggu!kl}3qh!gK`{c$)hh7}J%8#J`bT^_KbHW1gPoIZggpa;(L9 zKwQ0eTHt3!NC*Zk z?}h0Q5F1FN%OuvN&C}nIge@0_6YUbH`_*%nGzY+`YN{UM%behHH_*uRG*vCc`f?K4 zBH@B{xKH?n3(0HuHU7gV7VKxeCj2ntAt52K>p&y$ATZ|B05kE%Oer~2LEm%fGB=lG z^#4-JhC^T%l0p#v6*>;^j@pyG&%p*Es0(z3FTWkSaCtGH?0>MbfB_;PhJi+}q-1q6 z2*j9Gz(X$(dT9YT@bw_PU#qN;%aw00rJMc0&;{-3a00_v-XD{C?GkJsmSH*a^_T1( z5VfO$9>Rfvne{`_U_%e5t$`DPe^$AfyBC*QPvj3-wYIjx<;}gDFo`)Gfukz73nx(% zDV@YY=KN&i=-}!yqEW6b2&*!0D4aZ2{sSpH-h%S-CVi|Cub5YoUumav|z>ZcgD z2GT3=?t)g92g%0yC*&{FAY0Hu?$0boGt(xkL2zR8dVRFCw6~;R^Zk@T$w(PxW4QJ#fD) z6S<@}XPL#0Vb_2>z&fvbOuEu%^9bKwi zEKzzk4WaW~KHdP`M&sMippH#dt?3e+-))q!;OZ%q!KmmR$;ckC80OUn>x_d`)s?!~ zhn;8iG$F0XTvY7!k;+tSG)s4;fJGRb0tMX~%ehe>(VIKv)YR0R7I)3tAx0m>w|Xmz zBH!kXWdB-Q(YvZa2m9)qJ^2t!bgG^&S-2%|Pv>%FYks!#jA_ko{J}RiIMsF}i)QG| zJHsnlO1PdS`Z>MV#%=82PR4-{FAZ48Lf08vyg-o`uMyFVhnh2cbjJu9mA8CGosBY0 z)pnDY(n-J^|EyVwv+1IfY^%7mf|M>f&CHl|RAEeV!DQYxkH}=ildRe2JToy*9mfhf zC9iXVvu%HtL2IcJxY?*uc|{Vek}R_ycSxEu0j7GdnMZkwNBa%>t7%@XZ=~m`%DoRT z4^Wz^&}Ld;;jH2uHOe=G%!%96aRNIwi~#SkPg74Ep-Mkvk`JiHP#fS(_0E^tjuSKc z?oXLcv6Gwz;&a}%3$nrwP|}9zflWwLrqXN_l~a>PbX+q@s4zQ#3czOod~d?RJAS(< zt)XfI|8{5CtV00qvv`-e?svV0ol{nUwg|=TY{43?9kmA#7yb;jiv(UiD?g76(YAhZ zwgX0-)id#diJq2deNxJQZA|1lGuK!cWb{5Lsuo4&6VteyCAI8|cp6Xt3)HdD@r?w+ z4+$t^WbADr`IBOKt^x`IT{w@xQi1^v?|4-!5kiLO|y1$!%2=Y=!;SI1#&PH9%`#<@i0BD1@85pbvtC01} z(R!dd!aE6giTwR?chrl*}mCw(cEmED|8j9o>QfLy;4Y=O0s_mGk{!rBxa*kf0I zxR=^^QgjxFlQ=xJA=UQ$mnI|o+6rqo8CmJ;!wsG!A_EWKnmjHo zSENfT=XHp>AVWp>X%ha^le>^L&}fo|@Ks1g0t*h=g@Yx5@)CLvwliP7?|2Zt;ysZd zcN>WD6(rhGdNd@G>^nGURBPGVseJKR9WU<1Rb_LTATktHtEc!aP9K|SN`2-hPZwMT zGxR{r)mnV8wLbmeoY^UK9omLMapF5Plj8iI@QFiiR*v{~{w)D^GT{*EPL44TvDk-= znpg!$VBgg08CfJP*}oiNBpJR`;JZ%Wi*g~6Esb2_c0vjs51T)<@+HK?5^DkF+AirX5>a4z03?8!43fD z@ij0FH+vAQAJ+1$gb`O0YWHLTFeyt%P?li$7dVCzQhugC66a(PLX&QB>#L_%bP9)S zf>RO>a*q41KA2PDgqMlL|Mj5)=t)jT!?MH)C%e zbSM($Hgg{&&Y5!VO6eNaGTWnK#DIyXquReYpbAK=Wg;`Zi<)l z$TbVD`EjsD4tCsvK7Zat^c<(3>YZjqXEQE_ZQAXAhAcu77W&=L9%tC+3NI-H^{X!Y zleKHXrwWrQG3?L|QibFTjcxJXW8X7RiJs5Sq!Jbma!Yfq>vB7$nzVS<_8MkV3{(hDxgnt75`(2Q_#c@I3uK+ z0wg+eLMM*#Dq>s(6IaCR9HrIs&$=x~FY!Se5$pZj@PXGYZfA82-tPe+jRYn=w}JVJ zBz^w8?L?`5+#` z1XS5V2zJk^NGJ%x{_H*WFz%-wLhiV{_ObTHc@Z$fzAze=CcVp11P5WC!tF_hc2PHi_D06=$k293 z6b^zPH0dE^tNb>Z_QHPGcUkPTs)6!ZX{;y~6@>L4W*_s>;r_K`wf2Hc`14TKQmo?z zfl4>{ay|aD+fU&0{U2XGMGFX1is68ISA>e3dHHf6Ywqy>cPlIU^(Aae3O6p1Kown& z3k=)^Zv~dOus%P5a|g~>THykd&vzxv8Cl7r9vw2VM9 z=z9?Gw(_B+AG&fTs4O-l*0WA!dd;eH6A6%T>%~tno7psiL$g4P*#dWG>ALH2N zz*7>m$U77P79Q*^4dsZl_Cy6QE_Eotx1qjWFeBx}IS!RKy0gt-qXRf*%G>raTtHsU z`Eac(i9C+_N|Kw(1@#c?=&fQ&ySTa<>i|19ypd*rQi=!AcnJi@b3uA>?|+3_1Yr>? z-vf%^sRQU6AGB*!5BlN=)}~BR%}A*O!gOja9xVH`u7BQ{vUw<*le?{VFaB)v)~?d} zzNDLrM6S>}{0|_Z{xLls&ZDbWmc}d)_?M?Al4tay5?p$VLmM_Fq(*OJMNA(!r1_Z^?M#jZ;Qu8&s#VVjL8vaZYfH|Lqjn2cNxh4k|_p9 zdD{{xwjHKb_UYVv@4`FqZ{aHNUsvd%g!PZ4rn}GF|EdjyfPY)DH0DPVwQ`pf?Mty+ zqw+0ODHZ>{?Nbk5p*!qVG<&P{U%X4D>;AdqFa2dnKm31j_9kFWp4-}R&{ntGR;s8t zAgNMCKtY@U38dYM2vjH_AhSv-APNRV2!w=g+d3emAcM*fDu>DIi3d zf(Z~ILQFyuLZ)xMad)46`d{bz&iD7auu!W6c%SvGb+3CUWEioyu`##jQ>ml=p$1&x z`1kvg9rx7q%(JaO08amJzdjiRdKUFz3`9Cb&lS4mTLzyDj*hO}^Ltmv*R9u|{`K_ zy=HEGZk3gUP#-$9eFXY-2-0HHmV5~yxU7}?5@HZfC^{VmV;@}0Bb$5R>~~hS+#|G! z!LS+#KnPW>Gp}LgBb?Qf^xTD?2j%mrWko*9KRBVaeFz!bkERMoIVLd8ATyPh;jjIL zcyQ+0pc3X_1BhA!7m!vEM7?s{q;20I%p3@PtL%A_KI7t_QcLq%FiSm^eA+Ir4`M1G zgkF_=vG7uJ)vq=fijwcFug^96^VnPEQyvqE(5$^)IG%Y7U7Wlybq0TEV%yH^nTCB9 zcccCB0|_@WwzgltkG(1|v<$k@o)5apXVV$ePDc6niYsW9n-F&Q?Xt41(Ka1Tn+DX; zm{KJis)Ynn6B(;juyp%R|AjUsXwEcd-KInHs~H>+d#gPaJxKx5wv*51?_Q|hKaq0u zzSi2(%dUrypeVLe7ttSm@SyIaD9)@!M=2+3AIM6ub>5pQ2t_*DDOi#C@K)3{o0D;v zDk(0+W#*SbXT12iX$}4^M>SdBzEECl-od|aYA88ck=l>jmZMs?y@T&cx7Gjr;?zR2 zm&(?x-TPpawyZNRTt1RF$RD*Xw(|EDrTE=qduu8RKPf$*^L?#)JO`Ucj^0b%KiE)n zhOl?w@J7z*RMjsb4?;zn0|!;U1vEbgBa73at4aMo$*em_B{BDf^D`g;4NS-F85Nw}!I)+Z4h}xX|1@8GiT{0vuRq}zm1m(>o=3IwcQM+| z!Hc%~CH$X2#uOOSxD}<74-1po&5v;l5E+VknlWX^gq%=RgJQBk?|5Fv#%a=CqBrI) zuT9V^n!`iH6zYA2N9x~?J9-%LmKQlQ`c?nr0s*xAo$Y^~y*vDRky@h*>qVf+4f z1S`&rB8f&R3_Upl_)_raFV4JMSo~j{R-}*k-~Ll&>3#R>ldvm$J`f!Dd~5U^_E8JO zwLG2xlGX0BgL@->x|wnFztabRSru4aQK1KJ=i39TsjTwPz|mVL_?7*0I}Kb5Q1F<3 zd|RCT6ZeyLG-%5{kY(YHFh@c9TA-}eaSi`10YNT6e5?2H{FE08Ns>Wj+D)!Q0UZ4e z3U+9Snt>^^NWXZ#_{dOx>kp4aW6z91KoNk(ZOFJFs#4{j%@vC2EvkpP%^OXLInf{Rz5k{M5-dIll+w@c^8>3MvUzU-}Eu=8Z9)W+vw7@i2JO? z5TVj|&qH)L*c@%HZ;oCZrTF>ais#(S8kX5${?bcKP?S$YU}x-fQU zdLI<1FRIxh-n_qvS{3q0Wd?oHJ`ZEc#Wwt+jT@)QJdCG5c9Vp~Gp&*O5j?D9N(+pY z*-fft!6h&3V`M_0JmTm`X&;Uqa0Cm!1Hr`%^K0i%9W) zP@~EC^F&q!atX&&!T)$IbmIztJaZ;Tt$r|{h+>Kpf}hcwnhqf0>25@Hojy8RcI)3^za6SJ&v-B2xZW@bEuQDyP%2_HE47hI=ok;rew2&`>%b}dNpc*&*~k!!;o8LEtluPEjbFnPRI$AZ_{ z2Y&%Xi>HTV+MMH^Y93l*Lgg)c2fYwQR&tE$6nkiYs;eoXD?;52)rE6y2Pl+mbE{WLnPO8PHs%e-j8uDd$Z9;xO`S=W3DN!&p@K4OS6I8E;xW!M^lHtasR#JQo2X6_@2Q51fFTq7wt`>R!N-0%67DJflCx!eHIt=8gpjYUD~>{JuDdF`5M1z^-1+fjX4|j zoLrOlvb^a${j}+&K52WyF~!jJg{kn)JfE}9KOJ@#ZEp)%oZ z`nSmM!iFgSL6QE?*ZBW=UxQ7NZ;JKv*{0FPr*t0O{MpsjQUCngmUW+*d3AaGkcGPw zs`~pK-WUOQY|B6QY;WJaeceaxp6esd|De~Nb$RusZ_G}WTuJ-;k(Hl)Z1$Dw;LL=a z;m`VBJ#M_nW!ExAi`I@B-#r>lZDrOl?f4=8<=5BCeA(Ok8wA$9otdd>AwJ)lk0dt6 z%U(xFz>I0n{{6C+-c4vUA5yy~Hge{dAY0m-HnF2#3@|fA|I*w%y=nwaEm*wmLefGh|Be{ zObW8eg7F|%(TP}E>$*FH!Gl7;psi>;vG4V3e><$$P_axzBxIFVR8(YBlZ>Z2M52X3 zbEW`7u0Mh<=bo2kqj8DKFF&qRdTVF&sL_lGhoj49`F^SK-yoB~uZ(g?nKkh3(XF#y z+%HRrdk4nUne+b~89Q!1@J6F(Qu>q+y>*R2^=4iWAAqq^m6V{I+7ke z=7wgP+7U6lv?QKC@2I;K_g-B0W;fpjs-=vG!qcK@P^H>WRx%J#-VtcE|J4{`EVLH#{8hus`6N zuFRb8+uq%lT3gPNu>B*7TwD1GXL2G+4X1WQ7qQ=}`>RG;j*lzwCDIEoV~fk9`k9jP zVX}l^Pqo=Exs?umXmbSQEkj?M7_;Uh7Tmo7@_1LlPVh*h**rjDX3OaUVO z2I8J=;F(7sBaqSSRQbqjAXOpWbI_#2=-)Ry+%-RM3qw87FhpRu9}f!r_?bHP?$}Wb zq;$}hQSq>G@7H$Ze^KSCDf#lx7yIG${Exp@od+Zhi0q!jz>zPE@3wPmB|(@RZZN7` z?~kFOPS8QWot?c~Q&U6i0tbubhL+#{`p=JnH2l_|S`R=V4rWbQdW64m3z$&-8S35} z0GPl>X)yKcJl7zodFcrxQ4WY8r~@Fr(9I!r{Z&qogQaT2lprcUASK_Hm3^9qgiu?y zy#EIZ@_4L{b$3p|*XuL6HLJU#y!h{kOeE+g=f7E=$ZkZ36+?apH26^O9SC^z!mZ|z-{1}zkIt%NnaqC`C|-&lA3hO@Aa9fB%^ z{c)~bg6oMXGrag{HM)fK8M%k*ipd}zBlm4}JXCm(!G2u4$qE;SSo+^1Raj+`&x_{` z1f!MGAw8T+#O zOl&~WM8e@N`;t&b%icjX*vTwGmX|17`Eb0RE#z!js4Y8g9w)a+j#yRK+;30i$H5P+ zdK}yFdy>O~t;|XU{hg~EUYc}y~lbKQ@ zNug1wgyvpshzQbOlJ?dB(Az)gcLjZLYsg%A!nw9Smv9;h?$@IY+UtE}SpbEVLuk|g zKBifnn5z`*RIXUwNhWbyE=Lz_#hT#h586O&xjAJ=fjdmMiKwY6>`bm5H*+jjK@1-JS7f25H5F|MUv)67&LO5O#C6~$K z!0%fEPT%R^m_GPhUhQCH-Jek%bsosJ0HKHlCG`8Ch9M(UC)Gur0qP5?3*m0VBlsC| z{Fi%Xf8k(-h;#v8^qbsVQd084TTB;e0O3P^#svihdXcA3-^Y*@&vk~tI*K-A5N%c4 zI!;<=xj$q4XiI}zHk*P$UzLcJSuwN_B5g<3?5$~NS`L?moN_#{YZvNEXJ_ZKptV*Z zL3S}d98wTgoG-AduZ*<9h3bAdh=kW-X6u3zhRoPr&L46w?f0zZ8KDcj3PbNxoG_MD zXUw|w>wo#=i#lRouh)7%uG&a`um!EIg<=>N9P+4@mzLFbs)*6+ z6FYk6vn6)fqoJpVEv6>pqFz!Yn4^T#ZgxJZBZLpZ6b#l@gm^7 z^ZZ|uT4-|%9lL|BJCV8tqDVteAugy`zfJH+YcLZ&SQ}NBR4}>OZuWdW{&?t3j^q0= zifLge1D_$oTQ^e7~&{QE)j&)cr1 z*cnf%tA;6DYwr2+=+YMBiCesLC5q8o<{j*xhfd}kbT+#b=>fu^h9ddR8LJuZNhz)* zwc5vlXcIshPe>g_ch79=D0$23L6@|)Daa18<7H^-G>a~o-!So++(5ow-X9r?vmJ>T zWj_--2R)Z^B1w08;;RZ3DaPm$npkdsC#339|G^UJiQ@9{g%|3O-T>QJH#NP7&%%%N zzK{PQ{2gmDVdSxFaa`fm`8A3Ks&>H&jfDL$AaJZ&^eEC-Bw{IcXrNx+}JyPjA zTHi%q-YLyvr96VD^*11`#Uc#Ph@ynGAX+z;@L8d=bIBJ`)WbAOM zBw!x?>*W1kPxRbBqEDaJWhU14x$}@uf*5dsfyh=U;G2MRy$cG)>7*FI_#vs$7zt@# zYIIK5j3d7MZ{NMEJ+ZYa?>{c~cj=+3Egy84u+!2zK_D2VfveTZv~mNc+YU}Eb-XL~GbFKLnhcq=9Y(QQiu|(hyYGF~drj_^-;>A+R zg1Ay!%V{OZBo>W9mvFzunBOkB#~BVOj&8JatJW6A#6R2loaiEYIesx@zh3_dTQ70R8!05=vk{cCXR=& zXDk&95{fqHd2H!Mbxm{gMue^5sx3ZnHeh1GM6EU$jm;#9h%LqnrsyGpR743Lcz1?A z_h>ch3p1|gYC1Yo9ZYul&C0=j;$B;&ksn(o3nW*XjZV%NFw$=V^m7)|te9NJON`+_ zPiBJ>Q0c@vZI0}#1E4r{ORdY3?F$%8G&U`^KG)yvF*B3ZtB)u-VFIZr+sHc}RE{Cb z)=kyt;m$mmtH*@!#X$oqkAO`BRy{jYdWft{LE3 z*HPnI$eK-jCsl~Q+KhZLE2IQ>$Fl`V2}dE0>h^EuZ?Ko+9jo@GsDcI-?8sprY~_s~ z8&xd4}*dBkw=w4W)?$r&9ITgSoB(iG^b7r%#OzC{m_#OgY9 zp+mfOdw05$e4+J@D6HYv1N-uoUV>{rJH&?4)ry$CnwM;;ce#55rT%5|t~xp-0cE<< z$-FzpcShrhVw}mR3tz=>GA6?^WEMP?5$C7AWNQWk!+L=;Wn>Jm*ic6vCg#;~?ytE% z6gWz$X<=Ve3_s>BTdS7R=@oK%cv zKr)Li$fc_4Q$#z7a>s}cq1NsUIOc-w&5-aE2)YRb@D(VeB&gaGxGm2v`E~)_=?Cd! zB-;-5zsD*r0WlM~Y-{_sXnQQdro&=p+n{qTBMIB4?odJw7YYGtgYc8QJth63se8Y{dV^E)dUWWTZ+&|ponG(`VmYYzrgh#N&_4W0B+z#-5U4cm5 zkJ-~&u)e(+wqC5nwqLTTkGwKHi>)(%y8JsJ(h(C4mm{*Qc2bHGAzM2XIMrpuFrzD) zl@r7XA3wc^$n0ux6<1Q59vN|VYwLg)_4jb}-Ktu8Pb4K=wSI@BY~W=q=z`Ix_s_4p z-+I`p6WyQjGETf{XOMOf66FG9Oq^7ZuX;tS-7m1@xwlE)^1&Q<7Yez{P`WnDF9_!} zF<;hEx7WR+$#gQ`+W3K`Ape>3xP+Sy2>^gQQM=?k^>!;43(Lv%*1~5Z6V^eAD#eL+ z1Q;w^9i1GvxbJIn$B^-dx)M)?93W{_0Sn=@9&hg9&;=v&S)1yb=N(oR<0dcp-ch4* zIzAM@a}{32o*}=RjUGPm=Ad)xXKD3x^-P^AKW?2R&Z^u{@@rCS%>$dYR>!|FW3*Te z1kKjaa}tL`2}D0mV!FDzmdKpgl|2w)VNQ+b;)7xz<~YY(5)o4#lrt5cj#8R%q|7)gTfkW-0FA<>~LSz5|9SR~vmVW8&ZGiaN zpb);TuF0qv7#MU;hdCmbfAZY9hX@+kxSIlvdqs-|EQOb)0e)U5CYBDpe!c8V5+w8? z_==wGI3#E5VrAfLXCG+zK%lFEXH1_#Y0jP3mIzt=+{yBB6ld_okxvFNb< z@}A5)4jsy#Zmxza{V%t7=)IY$1A{3!1-09*$~FR`m(VtJSwDxL!+$Y>yEoTt>u6ly zdH_er+>^tQ^>Plo9l!;7KWCNYhy4Ai6lQZoUTrQId?o}nsRfU@5sVKXQl8`zF?o!- zY~kglS!7#!$O@;nb~sv}rTNjsxpnhi+#`kT(UIUr&adt17C4LEC<_HTq~W1>0Uis# z=YB7*`)1y&Zy~MXoxqUiNmEGXg3VIn;~UYkS0w|puB{hxW$O=jDvmaex~7~qn>kTh z%WsH@pN@?nPFUm;4%uJ6AcnK43yfd%{_4z=%Cf*}PC##lGUuaoH+lwf4Gw?u%I~nC z)*E{jM*bL4K%ofJ-jw@o+7`#Fxb2fYE1gdj4+0-H;04+Qt+QDIeD}!Kknskd$}a#X z#m7&191Jhoz)PO1e;XlMr;c@!=HU4!Zh1HxT}z1XB83sNCHo4+sW0}ifleiuJ0v|n?*eUoeh>Qz~arO+gUeV#`W~80= zqc^?VIO@9*>()5Xxo5H}eADVFHORk~oPcF-;klxL`fpvfqBEF}pe8@jKQ?VKTSKEC z6B{UIEO`s5;j~l^dX{o7b)8)FDvYEz_T04i!3$QX=pR#$7u<18UpIBdr?4CeOQ$7YQB@BVSZ}w>|+gC zI(%XfeVGA}-O@COfLosij{N8}XzW_z)L>Dop9T+tVcPmV-+gUMdNEPJyZVJoFTg7q zH>fiJ7k=<5R0-%B!4HCK$FVxcLVa*jKoKDFk!nPdzbRrI`S)=1;T=|}ing5?k*gi` z)6im%`_n^%`y) z&z$*0XTb0T*bt~uIZf^}X_Z9`q9bhtd|At=XjUEJ4xyy@4)G4{U<(OZ3r$;0<9)7N zSUI!VnCEyW^{|FV3>!6)H-ZiI;&Tvs@%5|jQ=>+FzGidP^Eef(#B%2$Y*RKH<9g|r zj}`{|lD-z%$@^>_J1)Hq#cD+4Xf+NLO-KWB#a=`>apJ^p8XDMJu*LqpCLQGSXel9w zP)*k?7);tTO{)#rjq}-VDBJf>7hCZ6<;xkag;$b#JO}uqS#Lh;&()%*!H=p39h zDak3(EH_#hp#gSbdftc9rVN7|I$VL4%Q{*-9N zAq5hNFujv$Gi_oF`>(@@!>4b$m@G4x*(HWGh)*Lm$q_MeT>_?cQS~1nM*H3?%1l99 zs&h)FbJWpUEndBDhE`X#bH#%W&MGZ~nUuv#xTY_zBF1frW7+Zhg!T|S8_SRg%Tn;O}qf=@Uav?9Z@P@|@bBPFMBw8GC*e~UZMQXUC zV?`l0hx-Oj+aR+A+Z(l)lL~Yi^NH(hv;uEr6VA*5tv-%U9Zn3g;ht#IcJD?Txk}`= zj+vbb`Ud>hOV1f zMRiU|XP1P8jjUnR

UG@^a$36!XtLNK3#f8mNm10Q{VFq=I_H)Ocy1z>})bNF0yH zc+epg&0KE4w|XuOsWyUgCil{jnv3Q zsi?%r5P;@xHQe%1cs>qKx6lQc4a0U6g#pEFE@`G&qF%wCRBs*v@>b}Q1R+%$ z^~tB7>g_sk;O|dYIvbsQ;{w3I>6n;mOeP{t@9p)vX$5A^ej%~Z(TkC>CRc(?+s7ZC z&_j)kjO0o@5RhO$=$DuEfp5f(bF@`#L~{FnE`K*{S*qIBfKmT3{5A14F*#YoHKZT6 zvzo4~OXw%=-85rqT#^b)Ge5DRWDJ5kQg!CDxyo6ApmLam$;`;`Szv&o%3KmX)&7UJ zgsv5LY_ynhSoo^#&1Y(}Zr0R~wy{u$dP!V`j~}?UAx{cM{c#IoGlwiQ)w{&&d9I}f z3cFbobOk2Xijk4St`EJV@AfV^YU+3K^owz~DY3K{! zeija;Wj!w*b0LR#z-!XL;=Us_=AmYjInIh@vV@6oe*ttz0UL7j;D`X;9FNwuZm z{hK)KNH~#R8f3*~Y}-ix_BY$>a#GxIA&582jCc*(S>I>VGmo?Kc`El7UhAR45Pt#vh)sT2 z$%PF{3mnCB#UtZ%oV9U@u|PmQU-Z1JwIy7}(E}rWM|w8#CFh8XDfu9BA&qe~GaU1Y zP0||-(GeRVDWfbQVBBUj){-B59)fX#Na{~>m5~zGXukDu^5DC&B)Mga)439&bFJmUB!uIk(p&>(>pR`80R=P=XEj6Qfm2-!y zvO-A8(X8T+KzYiw{lkBd?;Xy;aQTtXol6ZEt?HRr>Ug+^}L`zz-DJOwS)t1es3S|9P1Iyy1)K^O_h!tc@nqO%>d$N<5-2 z>TMGXKRc))CTP?Zbb9E%C4Is>8(X)zkalKP|Midm53@vo&Tlhdx(=8~NR!ZadgZ^0 zOf&^idF$Xm_zA_KbgrV1#kTpwDsA-rPc|q7?e*973ELN*(>fz_0>;HD2#L$18J zxmhm>m=1dLP;0&I5ETaM>tf7$-K2nc(dg8!Pj<&%5hNxg_=G_1xEGX&NXEfJ?N}yy z)26yreov8Z3k|l#%z;7tICd7> zO_mwWRKmhBJEkyJ*>~$fU7f5y8-XT>3pEtIbH7`W{G&KMXzo0uq`G1+^F`~p>d%DE z^mNT$xYC&OS!A;9YQ+`F7Hq7dnZ6hyU5D3&<_TU(0y}cEG#b~YtPr;;eho+U9m+AW zW4%GkAALP=^@0v?vYa5|B25@c5VVPYdYV~!`i3?VUj z9xucEZn6tSjV!(Qa_xKl+=a}zP%w8X3wCbR8D)3bO>alZh$8}2((8f1#Sft^_nY3? zi$`LnZRuSl#*k67Okgk08ag>1rhQDU;Z=tC*i&njH$2)!JIhk*@`<}9=eIKG$=EBz zd_I0Zkd(Yxxiw_;^p9MVSu72x`rdFv>0LZF_yJ))sB&}VH31eWIairutrQ# za>bFtsfvlp2OOY|1YHw9SEl39@6X1j1ip@K`XsHKiLoOy@F4^q_4I_YaW{zq>hK?= z*V$$G)LeNh0NKJKFr0lFflUc0b4YB{c*8C|p^>_s4N>Ap8#4QAUviCv!*lvn|kqB--M9TRR~}+^m4{J^2qUp#ADIThw0bP z%&8~mNBlLxH#Es`W+B9Td0uiV@JZHY!hAtm{lA_sdV$uI9~8HYqE+$plVdM_y8mGs zv>N}hhyc>$A|122CW5tO3`VRaFfX9e0C-$`3c({G;p_txc|#vi1&?{#0|x?bM!>4% zvk&gjZJ8i+S;%bYSIXw)@*usSv4 z-^;%zL=63r;l__i2V~PLw9<9lRJ>_s+<-Fw5eoLE&SkAY5A&Nfoek@`X_Z*HnL)mc1%n=gOt&nG5mh6*eGxSeC=2E! z1~#27RfRUdYrs2(^j1;RJJslOPxVlQsjfX1Z1hnEUu2B|m;pslaI~#)p;>eS2}gX; z$+MnK1lsM#W1w-aY@_;YBC*f+xOccZ#2|_>!g#3W<1|(=F}bWTO!W+hoi-W#eGOuZ z>;quO4>LBb&^w#j67Ga{hDDDu(Vq;c6)b75yXt!~yv5N8a}|FyhqbNXjQD}zeerQ>zQTW4NHM;b3#RFk zn@r-bkvBuz6YhkZ(u_uGKMx^G7MKTkchW8IS1s;V$(i`95F@nWG@0GkWID=H1o$gS z-gn*g#;i#AmTaQ;O;SgyEdK&-B%w}rF8n4Rq4=TB=PACkq+n>1QnwsV$Hv%Co?13{ zeFQ;Fuz%lLhKWzPJXb=noS*#>r4vOW#5U9epc>X)@E5Z~dn@bNag@d&Ub0Rx0&PZB zF#YAhVfMKwn~a72?+OM6ry!Gh*PGYFSQz^g^{bOkgT44C+Xy5DwWaOVn*sFBB7k`f6?!s{C+vpF?k%z z{njsvf+t~Sl@6ECAujcv!=3VcUO8ZrSGzfnRd!z!p_NZuI+j39*@VJcD#xu^S9zx# zH82|1Jl6j=Z|Yx{f0QDXZ_m*y))c8`>e)n(HKa@DC%XoPip&FW#~|4oFo2S@Ye=N& za;)0y;heCUYaPM?ud}prZ44`Y1c_?lC#b+{jv}|qfK%JYW-}0GHvoPP8Xi_t5)%3b zDY$BzyV&751OnL&~4};nRb7!L2?xby9~Js5(M;m*itUz zoiSQOZldEk5>;merv$JU-E3*AiBOm`QMhpBfU%at&<&4RoTQ&P$ zd8B?1?T)cYtF6_bTT3yA%+Csmh=`a_!wsQ_(B1v24HU}}0XR5v`ShmC)~h#<6WT=? zgO#=B+{9Vaqh%uuWe@redXl(7ZAV=mPkJ^(a$;Z4qQaUp8c??k#5@+B0ODu&Qoj=3 zF(s!s` z#o!XfNdX>MX3fjk98=HGib8J7J-(5U*ANsd}fx`Hp+(jYRQ+mGJkj9%sA3WZlG-E1QOL7!A1RnFGV}&A^aFT z7wSJg0ik*oIgAshtq_0D4|&cUEyv9?XemR{V|=RR1rM+rpaYlo(&C%D1@(s|VnRr& zL;sknC6g?>&i=|`gN@iRkm$g;Vx-+rH55-eTTA-G*O=d31jmc6{@JzhP|De3v>tuK z9YKGC7~|C4C`72M2aMds<}=?d%uVGkpv@%}=KwwPnZ)}A;e96HK1qpvbN^c$7OXY-v0v=jp z;@L!)`%v$%co6Q0lQp#Ss^XoR70(@&?-uV!K|gxKSr%|mh8DsN4s{X2oCNf&)L&W^ zFJixgZ$aHkw}3KL^p3wYYX8%R$stHMPyg=F=68>mgL6-^8g1j+d%ySJBN_a`0C2Hh zi|RmF8b(GRo#-uf$>obQN0H=d1o+V5IS5Y_k)Z;sen$a z>`<_HT7tT?DS0~zv7JSna*mr#u!E}icyYf8s;?-8 z<+r~nh?&Bb`dkH>2oX#O$<8(C`oMQRv8M=R;w{F=FS}V+aCRc-axO8 z`s0QPxs0A#$ID{-i9PYA@2FOs3kA&7QIZxN&5MY-LdrDyj>DW_oO!laT_k{ z<5WPXLd_5J-ItENm#f0cjGes|{p<;|VrNoD!#Sx3F(8WK$+Aj$J&?6?~>-CRW>xKNdX9DhU?9he>B%ZpE5~*F8IL)?DF!|8_IQSvhvfiuH zl~uaL!fiq2?JW~v#8UY}umVf?O>wOF(ocsag8p#z&Of^)-{N_r&r^{5Vg8RxjvqZO zN*rB=Q9I<#j}i&p=N6{wgaz+lhJM;hPwC{8U;j|;G`GZ#K9O@uF`~r29T%cey5oo{ z$7p7bna(O@?ZTcm;R%U6uLL6zHp5~x84+R&naa)VzFQgxKBNwRZ$H~`F}k|(CpXK+ zACO}pr5}B4J_odRKp!NJ$f*xBL;fxSxvI+lhZ;8+X#z2iCJ;}8MTEi7qB|m#Y|G|% zyTOK@vInS2OcAJIEu;&;6#M$t-YX!&iU(hUU0}J2q|*VPtjY)>=X>7%cN!pse(U#;J%ju>y~|BKn-` zI@br9x+c(=J)Bq@hzS?3>YCA#7d4X4l79&BXM9p2t%&c6L;?ArB>S?np5rI1|Ou<@|BDnl{^dm!Q@fuCGn$S&ljx{a4(&>@dY zvqQX}N0b`yzRDcRqBOAJ5vL9s_o=hRHwBw#6B}~e0xN`*am4t>*|lY<#h{8r1;xLB zv>yRUvovjpByWbz^@QQfX7MI639kaQ?GpfWElrI{7}V0-3t5czH8XS^r_T+0P0W&f zkEwPGz`??cu6-AQUH+6sllST{e*_T&`?{43?5l2Y7hmkZ11| zNt~kaoC=q#I2(8~^jaQaJoVo8atx~q|rwaBrUN%)o6J>-wj7uX~JuJTIU zJ4@bM2l=SY$>ysvwx4V#4?i67(j&G)_6`88v#Mp zmyVTQcL&zGq+o0Ks7RMqhrmMy-AXZLXSvCrwKGJ;RT{r;yGt-7xRwMpOdaQZn z7mIWbGsT+V6ROnw!}OF^P-I(7Yy=B(=`FbldoL6mD3g!jJAu53s}oz{=I0u|13V&^ z=Z;m+m4yUmML(&n{bOY01)>&w`}VzZJlog)-vHX5um9gA?yv7ut%mUGckqQFX^jLy zAm=`v)^m8{BRHXfUuF*!Cq@1VHFg*gV7JwQAY=kEgbc( z6|V9da&3lE!GY*fd+z1IWmR?5>}&Zv-vCF~!e?q5;}Y!zZp%VyTmHa0(vl^o`DNz= zHk8)i;U2IfdU6j2Q4@CC1(_*ukG)(n4m${X1=>SpfyKg zLQKqKo6I^l@ydd3^``!KkCXRysb|atzIE6Y_HD|m5&+ZW%rGMo3~)Y!zkm5$t|M@U zJ{R6C0AEw^didK@5IURBoC2hVQGqGiH+QAierM`}3 z=oL8&?{>_b-Z7~+p%(2ec1@n$QrwQ&%Y6o%fMk?Tv8|C{wFZdE@a;Il9Pfr7UpoBd z)n;4}rbAH@g?Y~VHD^^K4UX61T`976%XpQw`=cz>k)?CIw=8_CktDsvIK*GVp6Cyr zJXumk(Z8GU)tt~Mc3N`p1}j$~{^w(l)sh-mmZygTe*0}g@kW7cZ|DP7vp1njafQ&P ziiHx1YT-KwD_#UrX-)fu$>%j~5_+~fhNU#Zr)IOr4eJ<3XJ?8Y7XJ3->xi2H+_wzJ z3tPZcq8^Ph42H}Dm^iKOv)u&Q+t~xzR4_UEI1S2li7dH-rSa0w(%zs;bJYz%$I!w^ z^wC%|M{S9!*Mh(Rf5`sken_EoV>WH67i=f@>|HT}b3Plq_ZV~Y`MmRCtp>_{xRGc4 z0TWK0*6*yQNC?QezIMj^0Jc56Kg_Ji#=x(o#ga54G(y( zL&N%9{J0ehT)Y=(U)@4a;Exg=gn@trj-m>sQthI#dnJy?@Yq3*wwuFNd8gTBqj!QQ zlHa;b4EiiYdtXfWwiK;44D}}5)xCpKg zoEKJ-#CWI%ET6836$jpajsj9F_2s}>STGDMw&tX>e273}T)YdynwRmT@U%R>HF4JB zUzMmgJya9&3Q@&k)Zhis8mKC^CULeDb3k$Zw2!|0nON@hp+=P&dTAUyHAP+}oUa_f z2?eD>7^j64?W~&-nooyR&WaYH{9NUe!P6E{k8Svz%^kv*cT^OD*adYrm+sd0Ru~DY zuj%x!qN6Q9Kch6)ZvQ5GV1MS_|7mvq%YP53K%Vtc)uc_}XW0V*|0Y1d>njZh$-@T) z7P8=kW>W91P`GpqsP1Pc8W7<^Kwu^MDDs{8=mcm}m!1H(hPWa`g_*i#YA%f+^h!=$Qmgji*t+(PfJacw)vC2`@ z1u&xa(+q$|g;rvT+XfYf39nOIjmoy(Q zKP`8N@)~~n!^|1~nN|%ZgXB4z#fc>UKH*qm^wXW7JFQ2ON4GdS4>NI_xlDllP}8A5 zAqqH1=j^sOEu=Ufuoh-22${d*g1Y!6bUvQY_WVW!!bDy)wvUR>$h}uL@a1dt6=GlU zzo{utC`>>8zXQXoD|LgxD5O zk{gS+#B`{gi5uFbiN>-OfY-g-rDG-EaHCxY#4qg@AydY)G`WQnkSAgFuy~A~FI5Sz#tv2L_N-R+JTy zB_l+LA%v{&+z;BX_3zvN_xtkWhXz9OjQc+4I@h_*IYF+v^`d5LgE?qOY;M9fCq}=K z#qRnndgF-|lusp95#ee^P(}2P5sVa8n--<(9f0Gn%T%rheZ2F zqoj7ji|LCgCiRg=eWQv{zJgRaz@8UYwS^2l=zP`=43U@`f73 zqRK5ox~1`8l(amqtLd4%S|hnHg6itc-;QI@%uaM2=`j zBs&9|kj`DTW-dAWmPls~aGL)sn1dI1Wp+Z-q;im6zN7jy^G0bLvU#unvUwxXEgvc_ z!jMw(Yabs$M%`?U#xH%4w0H;Um``W2vp1NTxf(;M)~I(I>OqZCu2+@cCueiJY_9h_ z3j;3?4F(fLP@Xe{iatx?5DY|9AP}FfH-r#eT3y(q@n;GY`?C6^NcuVbAEB-Df|$gq zZU|@3KX~xeV<60Dn6@^vJML1bmck#Zxi6i}pu{BA{h|?O_MmU4%P`{-#J7#RX{w(B zJdJ8t)y`JRxk+ns1~T&Vry`+T3og`(7p(>GVRn`+bCB-0rmrQdt9H+Ds)+jolZB95*b?hjz*SDAh=GBT|bP}y(L95eIlbmwOQ zq;yI>edS1gwT?tndQ@Mkr9U)3X|&{fX;NRHR@_XS9BRc}xRj;-_cKxxURqii*U2W# zCcnR;Gk>kSE9kTpkIMOhluLcv_l1=v1!`ZQYD!m&_i}0BHhRqMf{}_QN#gkx1L0z7 zVso6**tug*m!w=|=v8sA+W6VmjV>zM)cEGdcri;uB}qGx^rT?(m2Vo3^1pW6QRcMf z4EdK`yB)PEj=lTi?z2~OZyf(|{^>7w7(4!Z_sy@)=;wU>Zt#ZzTAk;c@JY#O?8cIZfs%-E)x8vc&A8|IqmPf?T_X{NwX}8Hk`yUfNurmGbWw`5 zH>fO}rsXa{<)L`LAx&f$kI|sf!f#j@Wc@BTIx&!a81MIx)yRCUsm0pajD^2R13tC!Q?yLpcpI=A%N}xF0*`7Z)t5e;EsXi1k z-gOy+PEOV)l4}lU6{dq*^F{gLZhi%Q9aPl|v#S1QXT%~vyzq&Qo)(+PMNzV;anbEUxj^La zwg^Tgbs5;FUq~Jj-5Z?A78TT!9<}nvUW65n_z%b;-HlQ*2c@Mq=>v<){5t~2x|xi9 z8sd~1uj$CmhHGd8`#K$JZ00k4M!iZ|XB#?=hAM{A)3Gpt4<#83dvwA55>^2^avW1h zRMfA?leOdBse;_DO?O6_^>%gi0;UzEVYE6o+(@w}&I+%dzBiZ4gWebwsW0u6^;t0# zd-GC3aR)6zJHLm<^c`&takI6v%~BU8*)?tLap^4O^{ohUe@9K1@y!Ard+>@6jS;{_Wc3Jhc|FL7od?PK1c3*}0WmCosO9FaRDpxNJ&uv&cStFilcVGF z{!XF9bl4|K5O?q(m1;q59u=F^MIAWsZ}LV{(`4)kD=Gp%#7#3#?Q^PQ4C<#(RZowH zu#+)knAe44o+$y_@&0APP)q#KXnTIDVx573E*Ua0CM{ED`6q+i?Zcp>Sl5$+hsnR#|I<(lmZplrC+`5~h?I#_L@H^&6*>^{JuUBz{m6@x|?eTFQaP%6FNMO~w z4LgkbOiM>a4?h+%T+T}Fv_(d@lu?sLBEmzfhoT2kJIVx4gC9zc`||05yosYt=@Y#} z>B*Okr=v(_+I@G1n~#Sj_#F7n-=&8gbX~j4Kh^)CqI!_-YUiI587&!wS8Ht^I>Gj` ze0inMHNAMmS6sfCuS+cTg;PR`WV$WPX<(^OlgPfU?(zDlhYYG}b0=dn&-!7UhQyS# z#vzrNkvGoF7w#F27B^FEZRU?=)wlR`#MDasI$v10@j8{gK{es&Il|0dv6Da?t_*KF zkp|WVxCY@Wa?eaVa~0WQ>Q-D3i{g1!GMdw?I8jm2G^%vq9;Wjb-0>RN$L9j!!9FwB zx60edO{>gyL>AQWqJjxcyR))*F>Eb3@u;i5o@IjnIQLl4c=E9q7W=sTARU*b?UtQN z|4zf`lDKFYJ$Q#qPLULEWmnqzX_sYifwRRp5K8?Alm1MZ{u$#yw3m!YK_&@D2Wgfx ztO9g?@tAB%FgoT5`PbCY2bVq5E@NSn@#!hplTsgtEcX#*lt(CqVPqMV=9`t$?B%KI z5h~n=PS{s&E!Zn2<9IFz#hF}5gVT!u6q z@mPdpTo6gx&jJl!z>pb3UIry)w*w6)*m^Vpdoo9uF;gu#@x3nvI!oTyjK|oM`t0U& zF3O4ChSAqr05L@cb{bkujorQF?JTtq1Ml?Gh6Qai4&(jMhC^^EE1!L(edZfc*R9qP zl}}j2verqJ@I4))P?A@yOR{hTjG{$&P>=bv$58m#mWdztaOYU&nroy_{OXC3)22$Y zmYMOR>!JcesSXucXT{ZKW+b|N?pa=Ed*!;b=~a^qQASN#N9c(JpPVoY`cU5$hEcx{ zMN)}zpvW&BgPbL+*_ejN4PA1uu&3M0woS@DSQm?NGPK3n80Y6j?!RHiP#gNz9o@n+f!%S?C7NeQ`JSn~BGm zJ>fohspLbk@|u9ZPhq~U=4G>ug+LZvmi{Grxz;>^@b`|;HZ;<%{|~-#;#Q^IM9JQO zl9}$S;TGA?q6ITkA*bzxAO@mjr8%e@Dj(dD*YG(v^7Vt>OL-vgcxBm$kN*3&Q##p@ zD?!R9wB$}eC3rt%fgbkML=?jmrkUu|+_0t-lb5p0zCZkqE=K$O!`VOAYJdFL5x>D3 zT}e(%f(4Yk)z=EwGCKYxc@kW;1-U3pG>CD!x?J%ae4#EGQJxs5z^w|`8vE@c=EC&b zg*P@e1vlH;*sLOdaOU)x<66-0j3P11?%O|f`|jQEbQQn<{_Rj?j_CT%UAtD2gU23U z#KIo>xW{7^Gb8l)L#6RHnRL<3))^C9uqaI?=_6*jqWr;5fw%JjLrz%8{ueV1$EoYh zxDID|`{2bE4}-+Vg_;@lrOjP@2e$u6<+8-$M1O5}cEg1Wse%y`#wc?zu0!I{%{|NC z7nDx-Ix9)eQL;pQe~_DX6)+d7Za1Oa*6isr8LMaffl z?-r)sGF#&x)pM?QC_g+q(19wDZm=JESg1v>^EAqjl-&t*FrP|}oVml@w3ynIFj&59 zGO8_wDUj@BSEwhARxcf7hEsV%nu(rXp;Vi;fF+NpCe6Z9E1pifPpVF-s!j26gW*W{ zTvkY%yI&_GNQW*Ko9`D)ltr-si(nj=p2GY+<&H0n-|Mm32=`(V*M;6k)=at1r1Xt9HJ632g5Su)(z3Qh{k|aW4`Y^{ zj`gmHs%Gd7U(wE-$ktPNKohu#Wu<(kAZ)8)WGr3KS?&@MIU*m{kWkW@_51iLLC~~f z=@$<4p~q5%jOXj$DC}g82XvX3cN^Pwh-gR3BS*`8Q)N2`DB|Y(NtVl^JNydH*`jW! zHn`c~hBzk)i+6unXQX8}+gmnec2u~DKJn9_31cRoTO1yxF`BuSH&&?Z6nTg(Hs&!* z+!KUXjcFYNcIp!mYEL7!`M*awTqq_0`()h|=;p6Y@BiHZ z{pDHh3aC0-M|){WyJX0gK9thgGxUr^Av73J-eXTMsz9oSB&4osGEhD<$UXV=w@J&> z=a)_Oj1z0M5bt(1mQ0>mlCR`rrCmQ)uFJkik5Yb&dyeNHy=; z(Agbiiqx*Eq)exc<;&$c_Y^Q9EeH0=e3)kz%orCM3r9`7gbYhrHOZBlF)q}5KVJYMEnTs%sP{Zuk^ zQEYl$o-uNG3;#$}nfrFH&bj(`g17myoeO^cIl7TqA5bnDJ}wS@AbD75meDk{qNd3{ z$#B#%Rp?bPxVM7zYya;_-`-8taJE67DszR?W%Zg6{@M zTsK@_wrNCqAm^M$sUUF=cZe@MVZD|ipSvp})0aDcdx>YqltZ##|B0O~ett~c z==n)o(+57~2Kh$)8m@AXjP7aB2+?K&{5!GV+@2N0Vm5S&4zv#C3|K!rQJX!ZlVJOh z8IUy2O`{bn9-vx2Q9aS=W1>iBgl7Qnp``5@IF&-_7LCpJ?|!YJ*f^m;VG@JKX3K^srffq zDw%?c9|{QAK;F1@Yhu5L?k{X}ir^RTkXAYLBq;N%I<#MQeX?)*D5*W55=gFi4C*;N zaSY`eheAsQ9B)sXojWmdnQvzz76$3eD_8{-w3*82FWYSLBh}PS-APVK*@;6}Xw2z? z(B8DFL!z3bC>tHm))PDqzrtJfVIrx)FsPRJFdo zxwH{^25BaPtR6*ioyfZa?Pl}IICOq4D~`N$Cm+31P0hwa98nT(utzvA7?B;uP+t_E zSCfT)b*j%PMz-18tl&JYZN>^UehnIQX=rF5x6cULg>^p08c}9?P1BHmIvKl7q0Mfc zb`=;o`^t;VS<$BBy4S%lp>B^z{ls%4nVY8qmod<|iQ9+(s7eVXVUp5xptcQ2(srVXFxOr>et=;Z#c?qqA3IMkzqPBvswx}Q&zqN_dvL{tnotdWd>%hQ3lN`_z z*i2a!1iN8S4U1z^BJdp+hm-M#$%Y}ZeOyHscO}_foa?ZXJT=~(j5#?wkCfQb&7Ebw zTwOIakCLrTE`pJB=gwh16W!i%6lf|)#!sG0gJAb+T;^{d7FI)8Rjm{=NQb28P^w{jHGw9Hwx1Lswl&a^Kb`|lZpXiXRx4gfQ9TohpR$_Q9ka2X!!Rfus(AkBXO+P(D#v9gEtsw_OA7nrTle)Bx?ntj^Cpn9^tNxFMz*FY8GDaL_ONmalhTKfI)!?jHmu774nUt5p1 zj*f86EQLf;w2)FUNEbwyv_KKsdU7DhmJ4KBjv{m;3e!gB{sG*I$ffKU8F!7GxJSlO zOTJM*rYAQIi)OJcnyIrvO6~nUY?`Ln^d1B_KsI?1fakm269r`yzZ~gIcSt5Z)al;6 zT$%uk4xSey1NGmk1orjcq`mkF-AL+p zN^Cu+lJ=qFLBYFP^UP?)d2}Gd;|K>=TT7R&ridilz8pS>fR$Q?GSI_-r8Vjn1o&a?YABHnzd_# z29r?WhA^e+=%_qfbS-uW%CzxADy1S-bbT^Mx@QPX;d2M)&p>**wlAofW+Kmb=UobKZHR!>jV~6ZI*RENEImuE@oS4+k8Pf5m zUwcNyq_<5S^rH!B<$b|7Mr|=ZFeXL*d{$GDuK6%I?zsieG=0={uF*ny?tkA*r!tNR zdupQBn2H)V={$-mkj%Xx9*R+``_uRhZAzE!#QUL-Zy;Wp^o97}2MHF>*!VKCOF)x? z>dzWSO?&k|RonVoLITK!*nDA$hPM>r-7H06Vc{QS#4FfTW$;FGS{FP306P@Qlgs6i zS&H)N>Mk#bb_i?_9yziK8P+!A^bS$RWR3z1jiplFUdHVV^l<)=fwjdM;NynS>H@j-9>rnO~MVSGR~ zHwnyWluX^J40hMt_l%C*w?i9I?U)&yAQ@N4B)a4>zwRru7q^ki4Qb}hy9D)D)&uX< zeqgwDYrB`>Sa%h(ICA~syksg+V&rxcpZ%526hX`$>G4P{iMMWSoK9q$YpVOgg$qrF zfheu^t14Q97}Qm(R+-dAfbBX#EtSD)j&((Min(afB2$3`h9dybEv~)!SKI<=a_iQ@K&E;K@4Dl4DPb2L5w+ zuDVv}cdBy#nu!y((4ts({qe?7>;JxjYxfkx>e`mr4a#X)^#0H>s7e0v<;f%TKv{IN+xQeq+z&%845?fEWt+G@vkgUL?~-u9#>A_PA~EObLdo|k7H zil?>$O&~lMed)@WXO)5ljuSkrgH~>u$yRN4Wa1Ixc7Og^UQQu@ad9 z`cUDO#BgoRHX8%6tHUt821JHRn+R4J?BUu(bz zN1}r6-STFECst>Y(2LZb_HP(5|GLI9JK%G@R0-TYxY|4PSaux~+blB~r0?l3zgR?{ z*iPsB-h0G?aVKC9%b%TJ(ZSvRNJ*sz2XEzc2*>DYaLrvN>V}7hn#0)Z$7lvCL8)U zfvuQqDq_|Md@G<9MEjzm8$>ZukTbSA1w$-u^hxc8Ew3$n1=O&|V*ub&2jIT%z>js7 zttA(MRd7;4OD$)#=(hlZ#+;m-hD%n!?B3fL2aEuID6(4iucBsQS4D9P>$yZWi^2g# zr7HGU)-##|jzDUoVZqE*2HvvQ0G$oFh=m77&oa0REhH8ckGOjaoO~>SV zU`gM(Qcxeu84(&?#em(ECa5JOk&6&_>=c!A ztGYS`14{n{jsfmTt$**-eDA}D4|hvpK-P*5D$D{KU8kd|8S!91KqX8cgb?vOD0hq3 zV)1LUmH*0^Sx#JLHC^_U`R(cH>B`Z$XTI@NK(fh`iZIy54I3_E=vI6AQoN0ctNz(` zaon;ml19)%00>xB9A%mx>iCeYQV4Bv`R@ku-z%l<(>E?$xIo6i&#P#0M(Bd9 zu6Ap$zEh=2RJD$mDjV%I+BCw0gUj}@tAYh1{hM-WCnL-{CsUtq_3gOCs~HcH!n*8` zu3*zd{>ohzvM1a@G-^v*HC^W!7&sYGAF_UNKgjvHDtWqU6KYDNA&swv{Z4M?QE z{|FW>Tv!w$?13n-A3WXz9_;(b={q`2sCz&cv_Bdh7m*ZMs!LF@@ZG?Bbt$BOA-d_* z{A>hK-67WjK=n3hdM+79cbtqmpF(Z{qF(^2teFvv2}8v?@5gviV~@HKy>d{_M%Yjg0zM;eM-UMQcPf zvAHcQdX`-Y^7hu_-2Nsj{t&pNc>iP_^lI2Z0hM|dTlB{5`eNJx(59Z0N#G_P>nK^x zadtfh3xLxRdLUiaHS{XscxbKCUJB82o=cilNcnV^hI2v{f#~R6;tFrpS_$}DAh2YQ z>rABfHN29^)sTf5s2s>u{5ql$RW8=OPGzT~HC_QFJ4l>7`k$F7m5iT`rfFYJOLGtD zn%x2em-cN0%m{pezxcOb1ECbIzr%+SwBViCdCIRonLX9Fd_QG*7u*rB%FO;t`)oMS zfWIk7H3mQ}YIGeN0^7Jy9R_gulQ4>BReXHB-ZGio2B3n5p+GY0lDdpZ;KTS-W*_JmYQv|iN$$o^`vT1T@$h9aEfaOkfgKKa7 zaX(9)M+%1Y8yU~C5a&Q@dHMo-N6|uCL>>0;3n)U$BQg$E2Q{OEq)m{>c~W%PT9I)e~#9fUALw;zRhA~7^}%pQmW{&o=6R;R%_(Ee=R2S$nEx#*I;{c*~&8Nj^z ziys&D!@lH-%9u54A-Okkr(Osw)I&|^--4!7apt+dc$fCshgu z8u1I2%_u^K)96TtvSdr_(R=yc9aHS6-g%VYFd&~@Oor=`HO-6J7EbJEG+1QS?0)tM zC_Yb5aAz?6q>!iLb@OlKGOx4`<#Ah@n0=YuC%EPSqAv)h^Tkq$ke`mxX}cxcNH4dM zamY-Z=niZP&WIImhbdP>1IXwRst>p+w9CkR&>(ikg^@$1NH1pTXZVWB(WO4qoy_wz z*SDok#-D&nQcW0I<0qY6;cvfPR1e7Y2M%8L9L%E4WQo8_nHqK--v(7aEY!32NwKG` zx|Y#@AdrPn5E%xfq(Q_KqSiZ^{-l1}MeoE~zS@*quReM51ml2!;gen>jz^J35#Y|* zj2yAOnuU`1yVjqmd9(qz@T7uDqWff9k0XGNH4JG-!C>dfS4RrIe`Y>xrtRWwj*gDg ztVsfh0Y8QqFM_~HgvO&3Tii9IJBZwVf8XMdKS({PGhI6u#|$ES=fz`;D1ncuGvdP2aaaelW`%1&I#Tg^7?y47TuW>jb-_o9<~o>V#0yp1NDh zqJ1-_Xyf8=v>*|;2oo0`{H&2;CNcbnh#dkk?rSd0Gg9tx^f-Zx(GDL*4i}{sIFMA( zb<3x?yW5`whanEHX_D91*H?-X3Oc^XghXVApi}Vb8lGCf&sM5gcBEfT^=E2sd1Ztm zx7U!SWBAiC7)J>?B{xdnIuM><9Oy=yf`vhj0#F4s_JVMt=UO+B-_a&#%+!_jol3BB z2w+%AeqY2|Po4xett#FRG$~|UDwQ^s++VKHRrROOrhYI^>@rgv11mc^P6M;b>6p%} zyHG|7a3O7>q|1`1!$~+snf`r|NkixK)3LE*_#udnQ%^$WRWWd{3atWKv68E%3j>i~ zoy*()IhEn*`swNS@?UEKzCbUkyHH|# z+Hmp5?Y^F^k);OStedaiy_UleSROB@z85{pnElZ)Eig>c_HOR(Hbq;hzoW*dHkE`h zXKZY&Qra5&uhMRaXxn!=u%nO71`DJezO;@#eZRC%rpqeQt#Nr05XL5*FuH4{(}feI z6O5V&$ttq@Omh;@V10(v734tJ=PR8wDS7t|Q?FbZSwnKaZ{XXoN4#&KKy26(dJrd? z>UxIV)6my8ChR?|Do+Jh-pIwspdqj{KNOZrrE8VyC~mxCW3yfK!K?Fm%Ri?wj=H|O zPS8oa{W68Hw1n)ZuFkXp03ga`fua2)_IZhBNAUNiK&S5&ob&e7@*_oC&+bBqh|D^a z@3wKT&gitx1S^kAk(~s(k`c~WKT`9xso%G3KI!@Zh~DAW8-4r=_G8QZQWz`TSkTP` z;}|HK4OWtscWBcZWc3)v+I8zzIbDcG^b1l+Kt{+?^;`$&Sy=}!cu-eYH!j;Of2(## zxgd6Pl&oUA?`$p-_)Q0(Z=kjx^9i_(+-b>-A#VuOrn?9vL!K;3su}wwwk~`6I$BV= z8R4&B)LX8NTJ4_Auj3^Y2kj)13jm}Ez91wq3D#3n@SA^FXBBg1cgwwd_b?71!6Zxy zeInYNdPc>-?Io`#_!~z@Fe$(ur;4HL;pjMHGWoaPjuSw}Xxz?qOh?s6OTi#7VF5ZE zno6jGg%WR9?X!L{=A@Zb^31bT&Z7!=IELzsV2^f>x+h^3FMbCj2Z=(<{RQWl3FjNy zs2wK-CCyA}s~pM2N{#r43g|Z%hS}o%Oqz@do{H_e>+O+OALKojEW0cxgT5O$E!HP0!oZf4}=Wt+Wm zp`MoS?psb!ipV+irz?tSiJ0_(r3HS-H}dDbeY$JVy-y=#H{fF`#;M-&pQ&ss$@A5d~0*oqkcop>Zn`h@S0u(2vyXpns6iWNS_XOm#DWq#pFms>!82ms!M;t0+D z#5fn+UxE@7IJQRQp+Y@n&9NcQ1N7nE{Zv6{PO02qIp`1h0+A^j?g2{E1TpE-^D?w+ z$*XjAgI;Rf;k+13BX`M!FpW!^Unn6{?5Gm=nz zO~xUyyJ}Xz0@KRLI6%VXcOhCr5%=}_H`izEye-skW0Ig>E@Y_24(j}wGKHdS5LyCC zj>kI7e1RK;lrkig3My8?Wtf)_V4KKjH1dETLFvH#6ZEaIrG!ETh9)s|$&jrW0)%z! z365PRHVmItvxz_G(09q+Re)X+k0CgK_s{nah3`<62;BZK+13h|KaW!$#ruDpw{8bM zhSCs4L8I-MPu~rDt%51q)7sO7Z;8hs_Xvw`9%cI5SsV6>-$V>Q$64!Es=YG7|K5D- zC>FRj8)>khBLNGmGwpe_+W+2aeTv!+{C~CouWi}lk6*D&;EXU%fBzv*=TQy7E=2sg z`i)ytdkdBuFxNpm)q^B7Bze2Kp|fj3FgyfoR>X%qxpmO;wN-`2vHo~JkEY;(XcB_w*%%E{4+iuB6w9UR6(^6k`Z=wKwt!+>Ejy{V=r82k*bkjXa{u-XnJH1XB44XKroo z|MNaTRzlhW$t;Wm1NQC{@Ml&qcfU^K>?gB=^iIAK1Xz$Eh3wnkAIEpLO@9awJ5{>W z;2P2+&+P3pKtX;P{iTA+-b5MpA&8)q4?CtEV@S>crRPKJ7M3WnTk%7XmMcz2YA`ZB z>yVHbsF;slbl$+Pr@CSj|NiQC9{=`F;U_Td+BD61=#(`7NHdSXG5_$EWE|0+WE}P- z%9y}{0H{G`$p?U_SB}yY3^I;t>mb1Y`@?8mtIQGWO#B*hhMPsqxzTgodffmY^Q zrzM5|fQ+Ah*Q&gR)lU4*uS2f7_JflUpq?BY8XUwiWF(5p%YU2A$o(3p^YKdP3_`IB z`}^6e)dpN!HXVk%JH%|fT@*sNb!r9ge%q`G1S7z|3~)Vt7aj?{KEN42eNSgc(blG6 z$b9RjpYLlcc@}l%_SH?K5sjzbT5ZjKH0c2FV30k8&}(1%Oe@9#iAEw3e0$1*&FpQ~ zsK)t`;wR6m*OOQ5cLnyk7luj=_T#5Ym{9 z2gEhJd|C3ZcVm~lA_F!5P)H!&o=tBedk3BZv1==5ugQ{+KOo}}{kjai>C)?dUTS#l z{`ZSZt8do0Gm2+JhT5Z`OG%YZOpH^s?c@SWYQLpP3#=%RBp-s{7x=Smq>aUqwiPZD zw8bCa2{d9V0G`%;HpcLwgbo;TA|Pco^tZuLClE4j3Hm)UF5;4C-CV@-9y;k-_$)f* z@syx5i5oS3LfSQCkpxU9aemg6nPwzJ+>dZhTR`YL)ZOwuU%vfHp`Yr;K>A?-I#PZI z(~NDQwx#L82_X5<4(7(Ey}Z#zy83?TjaRh6<5PqT<^O^fFk-Vy2;?^Hm)U?pA)E8} z$8p!(fX_Ol69L-_iNz<$y}#DC&j&39b62@9a&p4}lqKc@8ffE+BU_bvvy?LmcF|f> zXl)ty&Sxk}hmCJ_!*HrFUR6&5U(wBf*rzQiuZFg54VwTara)Ot^xedFVsHMFaeR6R zGSaL_5S%?gN~J8bfz@Fx$vrprp0aFx9cvGPdH^C&08wNdO~iuiRnPn-&;N9I z$nuA?<=cJvHyMGYQsw|-KrA)76)Er2;UXdmQ5J;!Fmn7vI_hI^5$;t?*C5E-8=+sq zZ_pg)n>p{sE%wj&`z8PDGo$6^FbvZs@I{;s1aoM~Ko@RZJ}7dOP$_19za^KYGcAiq z)huop|2_B8oNwIb_&O)t6ED2B%60Sk`MZX5H^gk-o^-0?arWlBH}r0OlQ9*&ysh2( zn}g@h?lrh&mD8d2!5r(pOy{DwwFg93zeWe|czOnOT>YSlF^I=> zlrN&=G1HLoycFR{n8}GKV=}(hrR>B1vD6$#k(?*IhV@i>WFOfcy;bO%Gq=Cw+zu<- z%Pe`x^Df0iVa&kp!i@z+`(ff?a8<(SSzbims7C2JTfb80wK3aQKtuJD&=csn8RQt6 zU}AX!kTqYy$2Y>o6BVZ`)*zic3-q6ZK#VQi>_M*3|L?G*HhypMlg1hGXfahWLs%jt zT0rx{fiGL6<>AFveMs1rJTpPCDLL@|5>Lo@Ldt_EufPw1l7fscurXRrZuTzKtD~|F zenDsTmq%^UQe>r^^Ai!aO$at#*zIS?QU3S`dQEMHW)mYJ(iLn}NpzhC#o>~?`?x#f z(a|+VO6J49`Rszp@C&__xp#(~mW+tQ?em=z_Su@-tv{8ipssimA^zn<)e1wed`mpn zx6QY=#>YCDkmN;R!;9HT#t+6CuOc@e-3-G!?Dc0$k8*gUKfY4!|M@;z_EzRU?RB{J z7-1;n)yVk8ix)A@N4hCr9!0NuVKjfJ`)ud;44!r1aaKXAaTMH5kK0mfRbP}_^io=O zWp1?dA~mJK{(s~+zHj{;nI1LAFhZN#=Dm^(2yq}X<56~rX{rr=!<{kjf3Bsk#0$l0I4C8AJt+@3NE8@cX4LH-*>p*KmWOWXsm$Gz z`!a2wAFt}ab$0Y;^`Tp+>%}H=pt#KepvD+i2@)k}Os?xBm-CLhweVZ7wX1efcn9Ky z!Bylz(XY`RSwF;uq-*Q*KdQY*w)01)@LwK|-|brd)?dYLh7~<~2fRw1XTw2MZb-eg zk%f{`0BUgz+JA(+iswM#$=`vuj*9DVxjJ;fvMI7a@4Mz_JZ@%J8?k-z5bdxNm!tNj z1LZ{gNYt!IYxuI7da=G+R!h#^j3-^H>?}W~i5v6G zs9`M=1ne*m0(d7NC!tL15L;kydv1Z{HsqK+tc~8o91n&13$Zvwfe1luJ+}zt753c~F z2O-WTww#4U+U(gQG65DQ8HW^L#BFJV*&QTrQ+x5Dp|>iq~?7k zDsj%oIEZQOZ+G$Eg#*gcMJKjmI_{TIXm|VDAq&Q1w1J7zLuhMdLJovJ6hIOHA!Amc z&>W{#%jVZ!yteMGNBrsp|FjaXp8xremKO@qZag}Qk*_>dWZHw>&NR2Ma9_d~O7w?N zV;|9_(T4Kob&#~uCw5cq-t0DSvU<~*`I=23U!c~HdV=682$hqQ>5Zl+5;KN5r|}pd z+mxrI-(k$JeD*p)PoQ;+*~sFOp<*6xDAbl1+C6i7M$z)7G_Z91ETB3fe3P1d;Xkj+ z8c->J^8P9Qk8|e`#^ld)dG#O5lMF=pArAn8YW_BTV+3N8-n$jwa-*GWr;(FFtiTneB>zc8!f}o5H#O@ z054LXW#WeXNMaf1)PosmqeiW22Z#;3#P#3M)bKU}9h0==Ot(GdOgNlWRv`m=cnyH=!SPdl7&DGEUS84~l+4 zN>}x!6-fJ?Fum|Fsh*V>nj~MPX1i{D)tey)abIS?)&AT2U$0ODG75w+12X0XDWH;1 zI0)VnEZ>&fZ!(AK^J2F5?cqXCYmCszK2??26g#c$bspJ(-nMqHURXAWP!6%y9raq2 z$?m^}WIy%g%_L|tbD4?mGXT$OEH2gGWta-u6ZU7^z5LR$6l%wZf4YjiuL%h;QWKJ) zt#~#@M%M%wzW?hM$l%*jUZ*eq_W6l6IrGA^gEt^XNW<*K*N0tm3{4`VG9K-~%>cF9 zZlr`r9U%65bL=Fma zkyNt+N)AN81j0FlOB*O@V5Ei;SV)3x3_8%7Yo`zQWxdjF{OyolJ-IM~DxEUTftG^Q z^;(WIjQ2Uv1d=~1K7<2D7o0lW_8RCAff+DuAWMe4C`g|GM3eDUQ%H2%KnE9KgCO^g z$^36%JP3G?D}}kg-Rj`^0tEkwVorYzJw_j?B)s|g0jzKG^|F;u{DujsLd>Aalyx~*eTsm@<1#ax| z5w(V!ug_IJnZyRF1v)KQaDP{VKOx7qWr|pN9b~7Y_B>tP6AlZ)0 zalzgtOO_aC`@>8(%_WXogWe8CiI4YKy(RhK%2c*HY5Jn;zz=b14Ux7Mm_O{hNTGQ-qa8{ZnK7-^#~ zat#UXvT}IlI8v+~_3}}I65Q&!6MLq9C}+DeKLcQ(-D(JJ5g_JR=~CP>tkufAH2RKN z;yrgTiC49O>r>Z$qqxW6-#Sj3bBwl7jRv-)7Bg z7v$N~%}0pc1=o*^H$Hr5SD^Xv2MH6I>cjq(O;&JtRUTbc-F}(f!zcA7#sH+2Gk!8+ zP}s_=EHK8KV6=QQo?ii~iZe5@=2MRTt{zWXe}3D2p8eUWdqptsAtm7ck^~mWuEuM( zElB#E$5x81WH2|73vHJUZaQ_iZ`L{&zU%wAa5)(89l(>qSMigb@CCbF?!)YaK<`Oa zh(D^7iP1DX(sw`R-8bVq;v;QO?Io2!rrn*P(@If(fedy>sn4dxA49c=`pcxzhH0Cu zc$=&fSJl0ikg8*+g>eq&xZ5<$FGa(gr>|*XcHs-aJhRjMD{858{-l37?ePoNG7k81 zL_J#ZxA0u!JN?;Fjk56e z9gr+R@>mg_qVV3sLj*sx1ulXDaJw0COOddw)4%RA%R zh$LsjwSGE8tRI}YeOd_?IJ|~>HSOr0k=VqE!(Ffo!N#lq%7?>4hH-lad{wnP%bfvD)o*(g zdZ=DV_ZC(@I{KJ+eWEKe<7=I*!veKe{&QbXX2x1;?NtEp&MeubETF zXf00){d}eSN&{mU4|MK?F-#=fUqIU56A(&?mWVh{0lRXmdc*~&D1-hIAzvgwJpsWz z%;e>Ats(QAa@q2Y_=(4pu(!v+vp3^HfjeVJ;7^M{(jvqN&rRinBn0Wu2}awV;hqXJ#@^HTVJ8lN z7Ah+cuLRg6yj6@=E|R$d1->Bi&(F#GdSKP}1h(zfQ9|A!PWe0z#|Y6JVrXRkKbaM= zA07ShO^;HT2aFEHs>4Dc;?xV`cHcKlZo~63)4r|hc0Zm5(Sws^H^G*reF*4+qm0fo zPYbvQE)D{29s2@G0Ye^$eOHX%w zthI3DAEJobz34`vuyC+mpMl7`MM2|`H?0Q3vJq82nT3ug<_>cAkTnc z);qWG(b4@KFnW%R+hnbP!tcL5YJd2l-2K~}jNK!vE=?g07eiWoh(7G|p{D1>&jvej zGl7>ay>Z{j#!F!%;z|#M;$QqkN5KXsWPZ9qT_CH!3S2gR2-Oxurul%k=R_(;^(j*5 zYL!>q^w~?xqd>w9j^3g|Ep`6g6T~jK3yFpra^QpLwuC5q=68Px*MFY?)>&a<}d2R3SXNbbSzn#*bPaFfmb3-RBQ6SW^ zqXt6K9tkT6Nb2Rxu0U&K3BZOfo9uu~sRh%o!T|iGg7neQ017gRPyw2ozdKJW@l_md ze-%j<2-XToq&TNFmm-Q3Y2yja#%vmMY0TL#-!$pbKlg@znEA`gPEgll9OzSj85;)X zcyM&x=46?5u{)vWBWUEQMLASxe3_hVy^)ecnC7${keO&+7e?>xJExoG=!K%C%iOpG zxf3S~+FAMnF-HOP2Uq-9re0XxUbYy9IpvjDS6i_rvNK|>h|*wk8USR6!PNn4Wr5i< z1&3K1m7KgsM~Osk8VGxiKx~85l!*8q>#EoUAwJ4hK^zPuECjwN@w~;Sz5lm^v7E}F zQT4GowY`o1c;Qv>!mIb+3*9?$^=oun98ZJ$$c$>;W4V$Hz6CL!J7y1%;Qdl< zEI)bEAKm#DamBNONLy%fg^7Ybf4$mJFaW9}kU2o^pJ)^0RO)San6=mQTDRZ4t>_bN zf5UltVR`sm%>sRsvAYXwp-dOWn>TOX#4)(PWMqI2vDCddG${E?x58adKKlO0bZKQC z(S2q%>4N4VYF+rOu84#*??1}yr%sf+eLQ6Pj!}`cKplO|e|cQUehrL)`Ku2I4>uub za|MFy1RV`*RsvDL`PVmyzChFuLyK&@k!MX5wGbKj=g*H4dAr&8S_(DYiL+8jgsVA` z=4Y>W3RaP??86ll*t8g0(}zy1aM-)K#O$-nLq@xuDW`yqt=Hi!Pl zv90=jFiR>PrG;Oep?v|eK&LYIPr7HWdwyK|SUEsZFvg|sPzbC`Mp;AHApQvlIZ0ov zBu#Z*=*;=bVIFW%kuBuYCW-b!b~<+9FIYWo*=bKD{^NTl)D?# zvK8hy%aPM zl_UkGf^ES|I(=^=uEB$o5Pmf-vvt8*$nho-b+LHAjIV;81t>op$?`GM2_|S(i^+>| zz|zRMDl@d}^!lu#nsJh*JI%=7;l74M$GfLjxot^7ymNtOnrCSDw)_w%&4)q-m0$IH`3U(7%+&nf@XILIQzZANT0Y(rE;PcNv{gMI{dhy<&Knl-nVrKl=p2+V(2 zrf@E4`kN*Iy8Iod24u$nMDVV;DFh#rFxB7Re>&P>UD*O#vkg#LPR$g8Q7rkWw&+iq zir9fjH-Y%7W>{u*&7{R2FVuRIfhlw)$`sJ#RwXoyZ6j#?oMu$kaELYjZBF`&k}Pwm zRD*ze4sUfIvD5{@E-%myG&)dCsI|Lh&9ql$&(R!P=SdF~15!cd0Xe zqs`lHaP?5bt*`o`N3SG`&u`=x0!#_(UdeSBTzi&cxu@fB+Vw^eA{59tw3#&GL@sN` zHP(#U*xDLQ2j#gi*P%XBNBk=-$9%(dzJT*ro=zD%03I6^Z%q!sB>AuTXo{38%ng~W ztS!o!_lD6$3+nab0z!jCtQ`fE+BjYi1{KNsJCM5m`4%;^i+`rw5IRFt2N|#~3LICG z3AUKH(0)vP!P`@XOP1u6AW^UM+ZNaqvrT9qmjgH(05hp~wkam$Wj*m5$drIbI^1%8}jq$Tp~3iJg(-06dqP{3Cd{tGFpX8jiPA#F<^C{b$9!tM~~{O z4=bz&n3g4<39I1>l~wT&-dImVd}ICbnMF{=CBRtj^}kjU?4jG9V-Bqj5j%RLs(`kEDT>6olss20BnyuTrJCluM(F!awQyw!R!FW?eE1iUWZWguxlJq`fqf z66SU|_1SKUCG_n8iFQIc&_v-PMaQ?deoIY#2v-WvHwDCNKV-DVoik6SWp74+n;b$WP3|S~xwFmUbw3(?B}<5f&8f@V%sK ztqxxWEvT0sfJ;{VBoEjWgz_qz3HBg51Lx~W3vr1FD&Lsxc87#7J8(FqRb;4Hh1g29 z6y^#wLf?)a49Zz2<1x-GXx<0ah!{~0iJ|H&(6|bfqIlWw;2l8mYYXPZ#IMF8*|((pJk2Pk;JfndRM+PvqvL#2owZ<%)wXaR=X z3847k0rax)`qT#JKf zC^KSB^iZB{Usx(ax8)xXePm6&Rbri1_^Sb92&Qg%;D=x!^St{}M(lEY%hDLvh~nt#ADpqO0;=JDLNKE05&I2tPRemY)nUEtY>`ck z`y%Mrq^71Gu~M}qg{vnBTqE8NV~gWp-$SGgs5Kcf6p*wl?Uh>hH8(;HH&t98bO(vv(|@De(A9EViH`j3kA-#_ zj~?7xL`i`_+De7g*HC(GiW*r3!oZIIL)v>lHJPpL-w!hCj0MKApn`B5Q500fMkhMf zfP&IRAUFspy>}8kjxrWtq=-mUX-X9pkQx*P=}IwDg9L~WAwUQLLXz*^0q6Lhb>8!T z*ZN$p1u9_jJp0-EF4z6LAgAo39az7@9f(i^!V}tqj}W58_U_%Qulf=8%W2g0oIx{y!a<2&7Yc0hORn-b62s*3rlT@x+6u_hRlMpqhX@A$Nk?1l=d-v{3~~Jr5?h`ZFcBCwwGr zdM3EdV^lY(dp>)fetXuld_#)KmZM~umjr>sfEMX&qqxKa>GGWI3{Rxy$ZUHi{7R{U z>nuX5#e{ujQaGwOM#@4HS$9Faf~KF5I#-@1et_v(?)+(Nc3>7x&B0BHWT`raJPit= z19RxinF+L<(a@WbdOt?@znSkq0ci)yV;06ukm!Ih_6gxaOBohmMhA5aZ2=V5CI*$s zs&Ywz?x^Vo#)OJBc)5fqn02s$LJA~W5PU&Z3m!u>MTnjP1mZx1Ef1g^*}^WjJZGj# zxm+pNzb*3z8B=ef*?+#n!dvqNvXLJCRG}SwACh?byq|%(;DGSKfRn|RJ>0fIMy3e> z`&X3UOh7dRI0*M0IKY0)aOx>lMQufx>Do43@$7F|9UA47#Y1F+3Y)cglRhQgvZ}5I z{m2*o2jQuKnVu2eFaAGj=6KhHsxtnp*3o3++}l)Tb*N`mtm1K-tey%uNoZ?q=Z^K% zzC@}3LdU?7w1&agGUM{(6VcY^(xrrv0i;TPl^nYGoCPg6+J9Lpxhq`pdUec9ZP4sz zMlDr%Q!DORO|!OFCYkI&$Cdxb=ZVp>=sH1_VScCuJd+?bSJ=hYsbe2g#l@eHY^+QYXZXiBAFHsu%IK{dsybQ8*ffo-hNyZt&T-bEj6La;ExO z&lshoIoq;KO$G*zpd8DBByRXG8e~V(ti!?KgGdD{2a=(9*VjM&Q|$j|wNO;Nrw65F7<0$*}LiWXiCgHfp#qemb_;B8|23uhw#xfqkW zBQ~pkYmiTV4uBX-*SQB2J!n*Nam@$R)MgokTf@=RNVv5!FZmRnjm5$Ov+A+#)>8yU zn>(3ot{f;;?m~!~T@KQ{_vkLE^hGxn!HUo()sUh!IuOP{wg&dVIN2g0PaPqSdWvwj|$t!j+#p8ADNyXG%6QqgkluJcy&3TT+wwLrL3&bWqtE z=ywu|h{7CKlk%CenJ^s(1gLkA1+#9Nl>UnP$t*dlW>Pj9>0gt5>Z~6wiR7IP9+&}1Uc>R6LW)fi zc_t15jZIMOYbvgGhU@_JN#>{;4H=A{ko8;*ClU;_Q=V3Ew&LcSERye$vUy&#a4H=} zH!kkQLk(f%cyVWT@h?pP*a@8v3#H!snuYIs0lGeDKFQ(JX!7J1hAe(tvRgDETyN(} zx(b z#Gk=OAjnzGd=fZ+{}Go}wp~!^RQVZ%)Eh%LvL&8F?w3AUy>iLb<1pt{B=UiG6%K}5z^w}E0`D}gfbiC&sbAnJL`88} zoClVS0oHnfW!C|y$%uZ~OkkJ;jB|7%fD#1)N}5{)NPU!Rah#&2w)t{@g{Xd&1mq?W z7t5(S5x3Kg{97TmpUTbwP*v~K*2GhhkQC(~b!g7%i$iBJM46kIr_zmzc&t6cGaI&& zqZDjgQo9^yA{HihgF_wn#_n&n2`s;z>!X&yJeQ+Eco?NP^VU`66xzu}KPsOW?i%Gu z^;6c-#Ha1_u*RwI=#d2^8*t~WZ7k~0uCstO5LT`Ffr$ZU=e*2XSPqzvM-jdegk__} z3V0Mz9BechR-_OzV|a}&k3l%Xj0`{(o>1EaYz zd7sFQCk2}^!6ftQw*PRI7yhc(st9EPU@JH9$7C8zeSaiRW z39rUS;4>DLsgYm|q+&&&iP8=hz)E{=0uFCYxTEQ-sVM=UqIYt1JbH7uu!WVVuT6%Bphg$d!c` z`X4?>?T6XKP5~3O009t*3RL@7Jbn20MHX@|;knU?i1!-=F{#YlD;dXrz4<-pvJ_!r z{!F)xcKahz4@Bwnfc5f$R1~3Q6ePf!0F%GsP6%SPK- z^_Fx;lKy+X<~J>XRSk3}mzL(RE%j;$x#YabeRthc$z%K+pDpJtEz9+KAgD{Z5WMm& zL0I?t50?@CO67Y6iAbM&cXtk)M^RGi$quE z%>O9RrNVyhS=Bc64kmrFcC~&gERE_fr>&$;e=LL@+tt)Q{l1CPda+1Z7|a93Bm|U3 zeBU!~Pq621;WsIw3yA+XXkG8DQ{-+Abw|~JH90w$$PZ_bGopCOgkkc>moLlGBVCkZoW9~8}W1dTPD@c5{YbLSNp8y>G zVm-^tF2SL%3IATe`0jFw-G_qlxRxL6-XHf=TyJg@Hs+s>!+QAE!Yi!ti?+aUNN_*M`Ae35&SYFR{mPUF=z*7d1r~D4 zNWTp6a~Q9rIfoc=Wj*d)Ul{=pMyAeif3oB5kQwIssHhC3L?Qq*DzkJY`GA}mHO#a= zRb0Pd_$`t8?)rXvy;sv(h9Ft-!*5~2!Bq#=r&S^cZ|8i!FH7eL#nig#s6lr>BM_a! zPu97i$J#zhp&C(cK>ql-{&~hSx#Z(K;X0QRi<3({wOTDB7gmDdeiXa(6s0Qh-~Ehhud z6MW7972==xyK2sDLCoA<@n6u-;Hzr9Cq8a=F)BEAj#Bq9NN{sP>n^{Z-9tWco=iiN zY8tc+P48fLgNY%;v_)xljHUk*fo#QftNs7&>evG>`kU+m$4pfWy6jxWHVzea{L$j{ z!plb=G|XFuUgSBUDGJ29!S9VdwQ+_vt}?GLEDRjFg^%z^96Ms9nR!+8BO#2@TAiAs z$R=XJCr+gsglsknbgzpc8>TYGE~-eu#eK$anFO_X+BZB)?>~mV%8+vhR|yBNg@jcH z+Enrfcvu6gA`%hvC@UuiMpc333$j+Q8-PE_dO~;OUD|uoT+%qd zr@DRa#8oK1>_bgBBw#{~W~4ntquYRHPC(uLIXe;QZj$)^g$)G#pnv}*klI~3_8KW$ z>QE&?7NxVS$#k41>b6ZxJX^9zI=}e-ci-jv5|*1ti=OZ6)e*8N>Vxj1xGA4Sm<7`! zFO`!KFAPSQUQkgx92A(Yn=XYj5XUOE*l6VR-F4fP7znBWr#A|yeL1D5-wiSrGz)x~ zw(tro4)2u?2XaMAj1$UY0m%t75iei*R?zI|%6BKTYA(802phZ=xb)^GqbOp=GhH7+ zai>`gC7jL6E)BQo9PuX8xzm^2UoDon2&bVCpW#F1cF%w?_b!rWqq+hMQEd>rT32KI z4L9qbIO%$(YhLTVxs^`P7wR_1dM`ygVf zP*(v&Ci<7;@J0Z#9xl2LqP1p85vjez?!({J8oTuMqk2LYy6VC{LPY+|Ex3g+xk@ZNuX*1KO~$%^ zJZA@Z8jj+CPSIY1@Zl7@*3K?HHYB0IFsdc;&V_uuv|rnu%74oZAW1th(g*;HwnJ?# zKy;%47QlKU0TN1zf@)D|6D`XV(DJUr7=;MG{ZD1-Al>!zHK7urdtRn2I}W;{MJJkd-mb+eLK^^wRdN3#8`O4DU0al zFawrb#kQlXhrw1?b4;V+&n+9d0cAZRP3r4cY-FP@K3bW%MziFb&xoo0F~q&) zxcVu@yPV8!XHpaA`PZerA6^xzeYdp!{Zpx<#HxAw9^t>JT!K?_zIy-{Afo@#t=$0R z=%(^4B2qd>4%UWO(d-3+X+XcE3kKc-Pc5aE361Ryl!4lDn4mc;eE3YOcO>z?Lf=0# zGt;Q+*(aAba!|;^PecOzJ=A?`;DAZJaTz8Hj#=VVH>Jf4DTM zNlk)XD-#L&`TH=*`{b&BfAjzPvy>s6uK6(gzJDQWp8qEeN7U|xO+s=AsXA1JJX}!v zHz-i%X|{xCB9MT%39@A5~xR6VuhW@;f+nQeH}(FaPT_dR|_8c_+%Em!pyHq&BfBB=dMK@v{qmTdVd{xZOc89X*R$G4L+Mf*>sNmxsh0bv zGJS1f0BvWzqT!I5bnM4!(ia!V#LnTPai&j(V>4+$GC%rK27Bkfp+wbYOS~^Zb)sDP~bF7Ov~`{mK3B z7Z6*jXCj>Asi@kw5mG%N0|7B#kfoWh%!ZGkh2y|K3Sn$&P?C+Zj`?r zl(X}0JW|MQ#cK)s3PN)V&I;p9+U#;LWCZ_ofMt5~!?)qrNk~M>M%Ku(f6DO8)J-3N z*C!pXmIys2FR6XH5i1zJwr(w8R#jWZF&?i^gX;=egv{(M@})O&Eo$WMjJ`+XFEf~^ zC@(o+HlpVIqz>tFzWu7W62;zbv&ls%;atq#lAJ;fWl#TmxrEpt3-ON~#SiVv&YWXv zzz)Q%Q+v=(&%QgQz&rwq?q^d{l!)B}+GPVg`KF+RAVDp|j2S4-@e&>5rDp%{K@;$U zdWYl6C=i3cygqvSF$ycyNh4mAvErT(_WV6?Ujg6>-hoeef3r`8^6iK(Cuif z+=raPC$!L+qPOxn8z5Cu$b@h_&Y90R-nB2gF2;8N^z6VO0kxIy7P4dA%(p9rKK4Gm zcPfLeG~&$_8IZ}br6@Jsw@5c=wh3~-9PV7$`8fA$5(>8fbh7DbcdeUE5KCCFMG8tZ zcrJ?%gJRNgO-6S3Zgx{5{lf%NonF=k;<3P(+ zc*f&}+xI*}=Fcr*5N^ZF{D(h!y-gTR0R z1BMDvaO#f)?Ix6(R)8yH8QzFsXDekb@H^il3YTs}>dh@xk@D}iv$pA1OV9=muO-z( z0jncYKCx)XMeB}iksVT22DTHbcf1Ar(ti2I*f@tbyRII#aM>jI*IR)T3>KSwq(4sw zAHyzru3hwd8Jipu*G@Ux{Cwn0vkeLQanA}GiyZ6J9_c&V*rc8%LS`uaxYLQ>YH&{3 zaJXCem;oy=)W6nyYPy}{RK}4EuAE~Q{U5hI6i7h3u*c6X;v#wkQ7AqjR4EECE>w~& z7XrqmhE;28eUG#YV`^Uw-3TF74D~97+#-BY?j?R0ibN0yTSoX*qBF*5q$$l zw;)khaZC}xq#fs)N4zD+>pa!6`)>Jby|uC-7!e<8lj(YV2>+3+sZvdP$DqH_oJYHj z(Hu-RY#ituDX98B_w5x?U|7@^WZgQ4rYs`+#|WhJmUe#K&qtIx#kN`$uvjd7gpf#j zo>K}_)l~&qit+=ij#~y$Vs! zEutS$Vy-(EKFjg*=CK^P-2mdoM-aIA1cxZa>ZT?W#x2-WtV#GLq=rJJtcxS6x#z*5 z<5@w@tH$yqOha1`=J4Otlq$Xp>CC}ID6|O|7d%`D?jZzjLTDR-a#?)gd9STn z;HmauVQ1sCqUqL_q5U;M)skp2&X2S%u+i=c#(~y3;RKmWdFsQAA? zfN+O0>%S`G&5+6p#ZFLpMZbE3Yo}jZ?YWc!SjFTnPc^5MT;afikqJL(UUzZsNwwTT z#d4?oOD7Ehd&OEPSuGK}bw#&m|a8afaT?_m9^U8>*we7ruJB!Dc31i%!~6cihE zeYkHpU~o+lIYcd#?IG_r9@i@)IXmZMkPHWW%$%SI^xZFpJj3}`&Bf-EF>J+D^K&vS zeoSwWzV@!;XKobemZn&4?wA3<0Ql% ztL&hEdz`2w1;-zR^5+hx%ahGysP?B}8M;L$G8-w?;hD4a%n9MySmfOl2SsV)v=+6V z9pi?DH^bAsGz^)#az+{lVozntvQCGB@MN@6s1kAr+gQZNePiyp5DXH1naqHU<0of`;z&LL!? zCkMf{fLidlTx%O!LZ^^+8r-^U^+N-(3AJ2*SGS`PcfeLKc0j+hLtY|N>XXMkLR$J=DmY}51kYiS>k70(z6 z`gjE;EEQXN`Ln6Elo=vNYP|n%6tR=tQkZlR?3AZA}LI z2*18*4zi`SxxUAe6Wb}Oah6OkzaEi_NZ|02RasbSdk@yQRx{ONvppkNfV-%vAA?-Pl(fRO`5L;bHUrVoMi^8K2&(_q2XXd}FHc zdheW&276+mZ<{LZ_P&kWTl#u_IcqS6^i*?bDZeP^ceWLD zW~coYw!}!Pb@YX#o6*_6)b{w8(F3+RI%?Av4oM(iX&pEbnPKi=#%cM;dye?afYguQ)Uwuu-`Yn|0T3W8Zv-4Wt9p zwWYm#b#W!5_I^e8Ltdiv)`7KJ55NaiZZ^{Hs>Cuk{@!pzhQLe-e$M(sdT}Er3Ot|Z zJl9lm&Lnq)G}>J65wFdIBG9RbQsD3bEfDORRIM~q39eIqSvHzy@G*2;G)*XOuR(yb zD#muM9YcW;kB4G$m&J-2cZt?|lFY9V(y1Y=16? zigN%UOGh0aaUt8IV#$8Ar3#DOhyoxy{84Cz8WPqhyp4d0F3JbR&)i5lpKWa2>0Br| zpgpIF!YT;ljHG&o4o=0;I=oD^Agy7YQZphJW{~tL21#Pjbv!$(7~rL`{|Q%4y&DAu z&11m>oe8>I`0_KUWRHVd3fh5b)YW2Tx;)i#AoEAa=K77~Dr+^h%<^sZW4PTu0w})_ zEZyB8#IV*C#mHG! zZqktZC59W(W>0*(Dh||-=&}w^x3xi&xHMHzP~Y^4B+%!uZXQ{1Fn3JojQ|jD9%!P#ZUjaaBl^Kj%%Qb}MD)-kTX`o43sg_boxu}W|D2sc^RaUw z2D6;n+4yD|liAO;c3;!xd#8bJS#*b3Z!H}ui=b>#7qeyZ zzADzTmf0pT&6-h9jZ>ozEfCal`|o#m2X1Ow&k5#jD=|s!Z!vHSiglsDwIJ&B&fHxI z6shc@!XW?Wr{u;b>)8(Vax$|bD zZAK4!7t(>RC(dF_uu5U zL-{d9GKT(B6u;sbX{sp8hO0xK)CqOkVKN+N0Xd+;P&?%aGI8SeNArz?Lm&OOYJ-ZY zungZn(o=T+`#UPE-atNc44U!tgq^)oVv<~VBIhBF5NaRz6@sNh5FOn~)J+dn2>>0% zRE)};us9&)_GT42mCNMt&&;bCWtY9W=;hOJe|u7c;9bn9H8PFexSo3_;Mfm|Hr^z@ z5=GhaCUK{KN{HmDK~&bK6d9S`n~J1rX0%fjuG z9=}T^LL7=vu3m!u8!1VgVXgG;>?&pcaIOoob320Y*CsKRupq@VClV|R<9mbY-GUj; zI;`S)OQ&JC;))Y(fj6M`4e?Z&m)h2_^(MSLoANh9=}CzTpnfz{xYnnYrKpW_iA_2? zdqs!jR-^b0UKtz$eJwL(YM@85`G#D@PGPbsNmdk33H_Q9sav}>#vE0S-AFyV3cDge+c*=9 zNgh8htjzt7-TLo8xITqWI?`QU?uWXE7xdQKR4T52wwsq0i-HC$)N_NB5J6{|OZkLD zX)NS_m!lvNA0^y)u44}UfsGT_LtMUT0fwIKs^$L@lf*uPi;|srLnS_w-y>zr_Ri#S z?}8_RU`P+f(2KsJ17@f5FlDX3I4Gd%F7D6hdx*f&&Dw1;?L)@A`(&TW5{B<%gAp0A zjF%a@deugOUZ5hD-qR@0OMX7S!Szc+7-3#hLD%9GLAdPv^5I$Ma(<#IcnL(5j)sEG zgvMvWoNGjX9?n(Uh^Qab6Ouek%((602x3l%AHoetO1`VXuVBMRw^ss>Zgt4wcoPzt zb80&`eC>v5vS&|jN~W=SwQ)Vu=i7Mi^xJUI>yqqj=Y~S!3af3Dvngi!ThY+>Q*G&M zR4{g6Ek|8p2+z+nDDP%1(J6``RB`J9-^^p)5L6WU<-;?JZ01N)rRY{v%+}7{Z>R5x z=I2L{VSBsiv}p`=x`bA_ky&hNG)W`TzX=`1>-p;=ycp`r;Umyc`pj4eE*|E83?T;l?Y}OJJzy9x z2(Ca4a3W37%V4adA-k|A!!3B_!+)!lH=pF7_Lr-QhE)+#tKrjIP?2p6i}v+5F6q{? z;-gd)Da?Q(Lz-CK#@F@gdb*pETgMBb5)C5yNnC;;Mz-dLoD}rlzQ#gKD`lxo?)TmE zxG#aYc6_cO9S6fvU}k;(?UC4z<7d;|%u6hs&?6yQWt~c5T^SS&FLfukju>u7)%*S~_$i=fXh1 z57;M_awx3=d0c=qcB>ptxhZ!y&8W+D^3Dmv8E(c)+A&4{vovDzn_XKIx3hL>^8v-X z#d!y}j#uqS+OzOfjrB&4a}XunfptZv*s+fF%g4b@CN!>%)@M++L}WD(cG9@d85JOO zv&NzCfjxohP2$-L&Dv#MNL=W+3*FLT9k{8MZy#!=n#}%GH&g4M*i80;E-MUJ2uDQ` z!B=WbO*;r|#cf%3O@u~+oEWYe4EcNT*cAG+y4`h+3F^C>U z|F?2}1ug`QE5HTkx`Wsq97n(+6_J{npykRDyxdZe{C62_A1Z(eM`K5fEv^Co@eU4! zY%ubi2P`(yn*rs)>1UZxnV*j2 zY{jojyJ}Bz=umNKFJSoMT84}7o@fiIfH2uhW{(t_QEIFYdX?fvelM-Jk+W*VyEASj z7Jhd+HB5Aym(=^5mbb&T+FOY4UWADb`zBAGDB!lawWwL%^UQF+dUmZ#iV54GG1@md zY@au^L=mhj9C9Fj&(C~m_f$}7q#3Cut!j_@6UcpAdJkWIT>FO06d`GPu~^YW(au{c zkYk!Vg@1~W1(z$5CLY`mN@bz?M4>LdI$UnooE;|UqPRcODs>-xQ|FF!$;&^cZprj~GPGD>kEA zPHyCKzP3Z;_9_#Ym_>GGF`0rymsrCAlbB3ym_ecDLHxu`!VGyMJBoYb_o?#VO*y%D z{WiQ69g$hGuoOoL8_0^R*;+UV()p*aHZ%pNf%m@JCMK$43{CDmvvy=)**HI(_@1y59OKV%9#mGHw}d4y>jb-vNejBN_A-R37Px)A(JQcngF=f z7c?6vB4+nLdd~~5|J#S)tM+OMw(xsz>_tC6RVc3(4QFJ!b4Lp4gK;j6Fsq0DE9BXR z5LQU(jKs65vtN?;;c%ogIiCSLF?k#dx&3$AiDtuu!%X`(9%x1_fM;OudNPIvY4V9i z@wp0d5JE8tGtn@zWyx{~oi01|dq*LVrY}|(dc)}7G^unuexg9RR15Fk2hreKF=HqZ z8FEC14B0EDvf&eCo9>sY%gK1MnD+S}_`Q_u^o^%7xmTl+jEWd!Zs(j+-1Imc0K9Pj zRq8>ZRaG#jnRIF!KH{^h>D$F@Dz9kolj7ZlX4R5qt-QZ@^2GH!6-{Q!ER9Ny-frfp zph^YgyYUomt$)lF_6IhcOx?!>_K zg#VQz>4D@ZZamlsOqW^;%TUNY8$S`|QwnxDLcGD~IZOL4N~r*#;}P+KX%FbTar@Ja zsLLaVSv?trBlx0*%Lm9NwVc=WoFg;k+o_6WGqYNcrCp%Bud3jM>rb1pCsw+a>I*cP z`$z4nyfpIEGWtRt_&f)Ap#VtTBDOk@&1gStM;uF5eq&<=x}%VB519tr?At(O&sBo1D(k zaV4aJq^zvUgb^|q9F;(SKh@zdf27v=)v^|87iTec@VvNFMos7C8G+jT@VR#|Cl0PS z_PqD2PDp)tIm*rfEq816tMOS1Sxepf>pQ2y?z@1^124#}NG1)dZ`1(U$+Z`G(D?$% zrMF;}iG7ove_O);<735Pu!}C!-U1!@zgHxZ=RxN_;^O{|Kpddcg(^j%^}{waMMZl# z{JNgRT5uOm|sADMW_g`0U8K6tjB|@Y{fR1I<&)?j0jAc`~U>m;1UR@ zY?q9d9KJd3Mds=(L(XEJWp39VS^T<=HvUjbN0PA-Gex(sq%Ej&4jE33Tb=19WMvsr zEGWv^xl1vXz!wZMO@TXgfEJ-fo|H&4db@*GOmZtNHha4vE<@$QWf!lxLH;VE(z6_# z?(jrQn|na;W-R|=(s_1mcoea=CZz)=LR*GMQrQDW z6iN>5{_ zHcLz?IYSNc%v9;~>EVND94BCe8*w&2;U~Iea8>2JNQ3r4B-VK{`#j7G>*098ZrV45 z<_B;ygO8+ZC_gl5^4U!j05c5oHDt(#dNZx*5wfsMpr4O?5Qx(uBxo>#|KGtd#oE9t z>}IqQp00}G7e30$jcrI8$yJzV6Cf_*{@PtT4VzBOGkA6W3DL8v$G4KRloLmm?i@n4 zGx!PcM4br+<+x}Jt+x0x+mf&&9rtta)mMc7660gSUP7~-nZI|L_*P`2^D)ldLWIe~vs%>^ek8NnxH*GsCGt+nc$g|Y8!~K)J4n(IPHVn>|#xiE?Z!aD6u5{hAeW$f~ z-bedX??VAgf*1Rh#Z*f&tgW#Z9wlk*CmGLqFNi%j(l5c{f%3b!ybCuA6PrIU(!8~M zv>kSax{Q*kt3`YvK#J>7Qnko)9PoVe^3rXWjp9Oon_Z$BwASUJEIz#0{s2ek_ds{sgUUPgHM{F*sWP*l0GvZ> zkCqLjJz5xRb(r0=EU^ET5yvaX&M4-j=>0CQ3{L zt!r&?y5`8`ielE9BlXLF%){NyBb~Xgw@didvYPAO`ktTRQP;=Za(iYuq`nxym5LaU z!_+VsY~n2ARstw$`RA&=eW7F4@ruoE)1N~`$H1m*ZSb|_66;=va<2WFu*xzkhDz9- zt|Oic`ku~Hf4#u7aLc05I{AP9Zmt{#F;sHwKQ2cOlGbdW{|2XjX0*v;JfcjnapvL* zcnKo+?%l(z!1LC1?$f9BShq9f6#DtnHXo!xppFH8_DEXVYn1DE05dUnTP$3MfJ(#E z$`0FCM;0Wp!)1QxWyA!E2DkK#Ex{6BG+7Rnn%N6X&hOB&zx*LKamfSA(~=vlpHTO_ z<6Ygn!y@0T1J^Q?;z^0W{||g~)wT}F(5}RPgm3Zg9BGw)oIYEo(Ri|lP&cwzp53MO zh3<_v6TAN;VVp@7h+#~1dJ<+@;CSw4dh@AtWqq#Ao|7Z7+F`lvmoS9nSQ{Kp`kG`A zaPdk2Fafm!>+#hdrtXhx zFdt1}74r;p@&le|sEnVM6B#QKPoJRO2pCBSYZ^bQs&W=TZsakuYT$_1Uv)S9we6H; zWQO22g#RwsN}(onjphz|A9Z-O2-BL(3tu$Wxg{REa-%NXsmuGTnRi%1kw*Z;o9i zFeuNNP=O~wJS5}XKmOl~XMTY-SEf6!TX>;yiQ1`)Dy;Tky?`W?$3XXX00Y>Qh*iiY zZ>$$Fnz1jiU|yK*+IJSd+y8~30hf2B%>V=5Y3fv9u6TtUj{g&8Nt(c8$aQ4NgYFUR zJu{uQPSNa#ipk0S7_Mka4dYK5@LbEYdetjb<=R&Hh zrczG3<3SNXF6=V5dvU(+<$7E+ETjopK5-SfdYU(-!~1J`6B(RcuwGyTZ-P0aS02l_ z$?^BrOX>|A-Fl7GMJV4GEKV9bf@!dS;)?D`q}_6Vs3p}Ht2HrYntR+29!hB*U&mQF zYbezXG!}EoSgBx(AgW{aa*33wH+$MT8@fnCUit#-IHv-UUht3EkLldqx#LYs125`Q z%p3o*(adw-?FCMk+U`}yuVl@Y-wRQ>FMdzZD8pJ+$GUEv2vbHbd%|*~2Yy>ZWnC9EB#Q<_8kHM^>=u>a=9F4|xOiuOe)5&t@nGy`4(J)`ggn!D~6cgvt zm7^hT$`3oVlP*b(Q#1$Nh5B6PcQ4}qvAD#Oi@z;EC;#h}n=wQ~!d`-F%ks{EGHI;M zer+%^G{=x?c8L|daixV5uaGKY<@jjvp|bn@R$P|h@{uVR+Y%2hEFIX7mB)9ETW2r9 zV0IISka%$vu zlBLHkEQ#M`Co&!GlI~pvUdCK&7{JcAlh1E$d_tc(UACwqh?D(;)hjeE3++6tbi~O; zXXGz4HJ1;Mj$!pL>^xqFT=Uk~OjobMS#S6?9tPfXp6ibf>PTjE)eLAW#3%sIM0{2~ zY{acclIpn(%S_W3(1BM*qL^yL*4}7uH`MK-oJ*+9HB%G3G02WlR-Q4Wg25B0HxfjZ1F@9ny4-bW_SKT7&}{21=?C7$BH?r!rE3D%F+eAl@ITtRi< z=QCP4I|oJU3*irQukUdd&Ko!eRnFhO^>(^Lf;@c;gEVue)riWuQj!6Yxvh~wXRePT zFa4Rxx*eX*TpRe+dPwsj4-C{BJ&#QfxD`)utCh4&rJ1*Q1LGC8&xcOO$ZCGgu)Xqs zIQ`}qqx}US2>ivtXe)D7`QbsEy$G1DVd$~%O_t$p$B>0_!%=vXLudD9 zQI?HNp*8eAOLn8k_sj$a57dFp%XEjDSJZZI9)8h&Yi+~qRm$!)Ni z<>lk9znxX@*xWz<`u%!IQL7mXACWOQz(s?@&8Ks*uBTt`cFk=suv@3tvmzL%Kb@-* zS`%xk*(b-D5F22nGY#pB#^&fanP7j-e3Re1p55;}=0Qt`tL#dm-8ASpQ zYO6eE$+>QzPaZC_OC`4Ly&c^uZtq3ZI2$B%a)0*h2|f_KMH!FgRxKPgG8E1i`(5$cVf-1pr*ZN9#m zRIS)_HEd)vzmbrrEX5M3`}}4~rMv~tT4UYwd;R&p=D%9H?)R>*Idc|QkXRerX;Hk! zRGqkv`T5ODvzegy58N`<()BL4IHYiZ5`#_iO#ka~B}<9whQ>7JwzmO?lyptBax}Eo zbL*J9VmKi)Rj|>_T1?9<*8ab3XP2*n>&=QKqJU}`@-Bf-Kr0b?{Hk&0v3xTNIxZk8 zg=)t?ab^C8HE;V;(CIAtz&KOn{fM{7V?%iiTJx`G(TNI0hv-;2Saq7U!@REDa``QB z4TJI3E3H+(h`3IFR$S7n+T?c-OPYQohfO1Q(lCbjvg+{sjUF8FVQXy8cW5OxH#WNu zn=bKyp!g|WYiY1`IA4JI&RGVCy#JEw8rSbS%Uk1|9W`d3NByA4K9a#UyP(-@)bQEZ z;A!$mo-D`VvDk86ylL1rzkz|L$~)hz=5778v^41F&wG7J%g#;?y_|h-XBu(vc(l3r zq(|BM<80Bl6GTHnRDn1>Q~5Z%{=pf&Db_PfNyS!NvTMd!>WXD-<5r(L<+J~2b194G zf3n=sb}QMlpH0jmEAU>F6Pf|WyO7lLN z-Xa1|=(;|a!pTO>2qCHAVA{BrRY^?5Zu@i5wH)9S=8WhV6 za93Bqotg-We)0LJs(voz)`0nwz)fCq?g7LRw>IuRq! ztH!qKDvWc2Y0&?xeR=R(X}3OZPn9)o_L1OcHuT2- zUW^@MgK>qM`j~SAew70wO1KA>_c=dz(gSjJ=D);$e?13op(07yE{JB!3R1i<*_|W> zGd@Y;6(P8}3sxh(=Tv)sra27IYKJ}n5kN!*>fRzSBb7Mz*Tm*>y)y;n98b!O)(8Ja z>KW?hdmMFqTm>r#ATD{HL;3*f^C$9INgmLU&9iTlhh=}F$+*nY4$kNcWZslDe4P!9 zYTH8Eb`b!A91ABrnKBo?2ol!s-o^j=`Q&5pOGmRDXv_r_BN3SfXsy|tQfR-d39hNZ z!xN;A2;*TI6DOx&)_&zwl7@Ek)$zZRqJxJ~{GgjIdMLAP(Sy#BMeiAG>^Q0GF{R~9 z-;o*X8}LF>sEWXq#ZZ-1v=`F7MZGYkvjlaJf5Ctpf$fJ88>lJ`D^IFjBOdi|ToD{# zlFol6;8IAUz0}>sKL#g2C=yK9q@TeOXK-RmdV@v9um{Z}={Bhmvyy)2@E<-rHe})M zUv6if(A&0aNoC(+<5;R8;iJQ~*=Oo4jchYbrY<+Yk5qP_QEA?k9UHr&iqOQ3dtxz`uc0(6XTp=i zVgebG*xYd9z>b8Xo=j!4J4!_#bJk64?U3Nbhk2`~dI+Ny;MHUCFz9=2s?OBer@r5_ zO2y_+w*3tMWXHv0e#X0fw7{Nhkt=wah`Kf$pscAI>B9X>H!Q`h`eI_PJ` zPXtM@9bVArak=j=rMbPDa?Bt|s+P9vX6P}`#m;WY4XE6KnUk7wmkr%>DL>+f0{{;I zME=~eZ0P&!STgsXxM||jpIPgFmp)uC2&2_?*0Cb2%U=buqU$~{tjh2|E)4NnCVY+j zN;69}-}su8x)}7x&>YCKM*ajk*bIweNT;`XQ!;bOfNNjrgwL(tH&EA_7P)W5kd-H< z0x*K*pQBqirX?h|gn_vmBdlFAb$1mBjUYo+0ssmB#z0++K=%TyWu*fL-Xh=u^oXQD zPH@x`rqZ5Qim99Wo}4Y~qrj}||N3}*|4);D7S9f6qGCO=CLUK8$X&6ii&3Nl3!xt% z-byBxeyOoLhdA2ZI~R`1gPOM_6qh_eHKl#~L=S-s)-hLj1RcS=KV4Y54;*&z9q*2o zlw0Z<`~Xx7IShdTMS>AP42y>%IfkA}3@n<`iLd|p6Xp-0dgjNh9g{Lu!%qb7N7*7T zhjL1==>a>fl@&{ITs*dJ*D2m#5?Y)+nB`df5lp4`hTbBp3;R-Y51bs5o-zo`x9s_= zMbE~xwBmSA4>RqUWmry$>YYjkopF~dXu+)h4`44cJjoiveCrM z)x*oe%^E_^L>wIS-yg%d@E7BjyQaF?AdTB?nIo;wd+J@9eK#}T?1JD@EThcQ=1kV; zahj*9=G8vFb`E9Z@^u5eBLA;13<9&q4GBK#mpOahQlF&E&T*+cEll7xq>N2}TpJ_N zAzG$w=rC03oJD8ECMLkKq@BlG#b!oqg=7ePQ^YDjK`)xM$>(;UW4;-dRWlN1`~2LW z-HhC!+G@!PRBFn_mjQFHMA+q(=^lzbU0X%V(}moe3E5 zyRJmCn$$YGt~#dBhZT6^UhYCUK{KM3W?(a(=s3fDn0I?TZ++8Ej(s05aawvPU5mW~ zLvX7#uW#AR1o3|?j4wYt6!0T*0>hC1jL0Xzr7q{zQE@?8RPu!X4Bu&4xq!!HJ&P&( zdVwpxs1ERmW~dT4fe6U(z|{YCw3)e>YP*oke*bX%r_Yzg+ZR)BE)%y zriP-JYX8rGOp`bWdK91vg1gtGzb2GaDsQe;csZ072LgF=4G_}bwYIjZecnkZj|W%5 z>i}TAhsaE}r70^L1>4WeKe8P^z(9yI2IxF#Z0Grc6)4F#M5Zpa+OvL5W5BEBmR5u4 z;g*bbE?f?ET!k#j6_sz9? zr~gbO-%s1tRbqbtBb1IkTEjPiOy*kXI!upLdjKD(~pdFHO48LESXDd}cjGl9yr{ zPBhHF-^Aw0m$`J%4Vx{9#0*QD$&< z%kUlT{MRlfm(Eh#Bvs?ho8H&4KRlMs7??~QE7|w_GN~+@QRmsE$7h9L<};NtB4f#d zxt}l!uWt#aR%H40PT!M!{nE1d^0Vr{rf1$~a4Z{R#y^@~&co}_o#t%{wIN^9-}pzhuY(e75(a*5qPSRG71^4$jSRNAMRc8~lsI-N}JxoG*W zzuj|Qn-03RkeSAq`E@LfAK>`Z(k^VSD*D}Ap{nS&%;@qHV^9iOW6JG*2vtQ%Q*OCn zdfIn$xqGQn*gEFQw;b&E($OdbU6Ed(Sq7b?NPMmicpc7WE{$S&1#!4}R&5zZ=vYTk zD8|NyD~p$IO3v_P%?=_~yzH=!`D|Cc@pv_m0=^jMuS%(GX@^ijrB1vUbQtWnm;iww-{s_ zi}-@-dRYayn_?BssX8J+wqJ^Uvd_%MR3_eS5mEX2w6b0UzW(wSkYQpF6Rx=e@WxNl zb)TeHC%`D1FNQ=SqGJs%#MQyV;u0gLXkD<`R<`1G-Wuxk^dfAQ_u`?_snt2N(f*_1 zt=N}-U%Tr^uyX%)8!V7ks~|EBP`RbKC5Tw4SV)3Lm~=j+^Xkxkht18-A3xXZ;{;X_4i_pY_Um>N{Kf zT?&>Tig{tVNsm1>d8&iol4K!T5uQ^nzl#)gmtx^#k>O&ezGKdI%Za4dPg?cM4(@v_ z(cGDUuMQ7Qoz3qY8JyiW!eqDbjklyer{KBbuvJ2J_NBFm@32AAvI;l{_5IV+m(Q%J zdq@5gd^py81f|zz(xdKQhHkq4PRi2h_@QpZJP~T^zd_ktJIW!C2l8AUURZYFl3rv% z?#!W26x@BiVUT!eUyKeXt5`gEwIxRalNx_5Au<*gsdt(ec`fo9vGUWKqj`MO=i5>h z()Z2U+r??iXMp#6f)G6ajJ!18;AQmk0(#7FZhe)6O|*#t-BI31@Rd-waC zLe+C&tq56vzn+?wK2+L-nRI+?LQ8zGsdn46{I=SfqD2tW{Z}R^1(3O~?&?r7G zt)IfmQ|mG2?>;u8)!>sPqhOq4UV~62uL~gi0k^OwCem7%4BG|Dh1&B!9plx(oaj6E-GY;1>7Z%*4{nf1^BgUqTVfjY%S}HB^E4}R(A3EdO7$BexCh+I*#7s`&N}q(SW|gHNi?7` zUi@v~@;jt~8ruOFu9HmP-UMEjIk+W$QoCBuXTRo~JvV;rbDaH~rLp~v(hI37qwDIn zUFHS>JjAu7p-^P3Ovd7;iozVoZ_VJ44hRCG03I6mnIen7|QzOFSX26iK9Ji3Ug8Kd%F~V%_()Zhy2wG56id5xE!fd zfrUcF9sz%Picf3Eo_+-_vX&IKoP4QsejRj6A5Z>{&F5}$-8=+6-|KLD3 zvo-h6@d^YA7nY4NNOsoFo=~#fI3ijNK<>XEtB&cnfx*x2#h51&*0b(e2V{lZV@k1P zYMCn=l&K5N41d|s9?A`l??+ZrCirlQK85N7hA^a}W6=BBXv(!b^T%o?^{B>p!lU&h z1u~WDoX+`Y&vcdYBG-DQ>NR~yE_1v35|}T2sd9Itn*)j@8A0c%JpU{*9cYdcYd?x> zO43YOt&T^!xNF`h7jg`r50n6c8#Hq67##LJWa~ z5R#DOcb|Z*(!TxszW;pu00Hw|=bU?7_kFGlwv}X4F~fpX*+-Eu!Biq~huj|ctYTcQ zvbQe`t5PSLGHl&m)z{IRGtAU9HC2AI$>$VsoiFu=4Q-3fw^crG=A8OHC(s$`oWME zi@qc23AK5Fy1O%lhviAb657X|)q0`xF$8h+<%6{2qV_`@i}72Yu;V#u=5gD;E>4LV zlRWydEvHeIp0PEfBDu}Od{96CP^l~oD_)5q`L(cSc)JcMUL)y^(d1p$x^(+$HI!^SdSN&OB4YV-J zFhGQ0i~)#l83u8VV?d%~MQ&h6U_JKU)ehC=S35Mx68T#^{gc}~jVm3|=XsyTi%{fU z*~*()Jr-<@eI%XObkRm`_TLMo)zY0Rn&BHU$W25R@CL5Nv!t=!wY=u5l=O)eFJjsy z6+K20tLG+SzFn{)Sj_L+yKfO|PFNH0WBvR%q;iJJ!j|;AckL>|#jj1bLZMj9c*i`v zb1Q$@jJLlu^+4Rg0fnBW+``yXNq%Rswnkb5&kw%iit=84{2PlmGxy}P>gvoa%{=i63j$zNm>T!khLt?VrU8O+(2bA6+#jX8Ux&t3pKw262BR_ ztRB&)N1-{>dLV{)@Tbdy(i_r4mty1Y#O&3j)%69hW?Vjik4s24sThOu`|QZn_N1%S ziNS4{oN~Ds5 z`|O~yt(OW#&10{uO84`4<;)xm;>Y-;bDfI%tjf!&BQGBMv?n$6+qv%^^)0t0Y@5xY zm!&!fHF$VZnP)i{MjrFW?$sO*|E6g^^Y)7W^d$PoU%s@v8L`T(c{A|8uhrin=4l?V z+Pin}!rIG3tS1Xa-$mo6@hY7ZmOK!H#13ZUuBq9nt8zN~a7uPV-~_+!@DEh?6XQ7 z!!cQMXhAV;^BcB&?+~FckTj}hlWiYA*yvek)fGNog_-EOJnxC@s@F~Ye|j!ms`xH? z@uG{-a+^_T2YfrVpch4_f5sK|9IJi%W()%h*WyUXeP*0yFM1SSb#3=}5J?n-LE2~^ z|K?0-T-JChC8%EKZHl&i6A`DOm_0(=D>l78ZvFgNScs!e2Q|j~xXJp8Lx<(_o&)!= zYURo(^^Yon-~KqnoQ`GeFal=X zWr~5jTAs54^8P#dsn$`bUSYnDpN$bp+IF73PYX|hM{_5J0-}jq`aKxCNkQrV>gS+Wo{mUYZBS@TK5S9N~ zA<($aQKIr7-O0GD{X>jIUu+Zxjc(aCb0Ud)lI;Vb=M2TN@D)`qt@653sJ;k;+U8Z# zi28ljw{7$uvE1*B2QdeHFmEb22x5^Ag-9DPZwee*X|k&hGjmTpzqOF}Wg3Msuz1M2 z{G0lLeGFXYw*EIYxMQ~*+Ni_~+EtM#s=cXyFiBQ^?a^XpPf^!z!H#0XWog=aVQDqD zuzLr{2d@jpeT(`|cy0{;L(tEy>F@i^--#0)lsv4XnyqPqFKJ=KI7wu-;(v4jZ7~ssJu~N!q}Qs?^m8S%8258mOdfP#%XME;yki7 z)BXDDfe`(*LzJzvX@ck9x9!rU3dB_t#_GL7_cZ69X_hA%e0}L2U-MuyFntcKY-A1Q zYCBN}S~J4uU@S=?2#hW_Im_V55^A5_v(`zI6@;4}u-@fkLRKYLv2=%)9weGI`2f}4 zf=PRiSpH`O11arCl{OBZ@wGfh-l*lsF8=NEjZe5g>>*?y+^_{hF7Lkcx}a=n@R{ zDHJcgAXyjhSKihUD`52ca(nR7FEFXJUKS>}OMu5zX3F0LW5!s42|GfT^%0F3de3Qq zlhdX5X^!)1#>CD^8KWwy_IL^<`BmolT#F{&u@^&n@jQW0yJZ>aUVXb&L;E?QC^oa& z7u#^#vGLn5te$(Zv%4r=%v~YdWhB2vaQdiHA)ELF<0y+xO&cj#N=Z7^KXU(tVmTou z4VEWEjqVzvKKz|t@Jftp%Jsp%XMkP|jbyaxt!+H>v$y7e&$4&c(&+0%f`+nah9Qnb z5f@yai8S_wm}Hvd;N%T^*OCaoz4>vD z?%MUjKh#4{NgA5+0Aw{3C9o6`_gKqbU3+B5A%LuapBJeJVvvBsn3x#xe=S;%et3I> zhHmuY4fd`~nTVwT#|kJ@PM#;!Z93+ftL^<0_*a^^*ej?c2EOsA2b8mjG*#67fO>0Y zOcua^>Id`N!3>Nt&<3IKuQ54gd4$b`-_zni+|JODL-$K(lF47sI^( z0dHQ^8TVl2lz3Ihwqani&Zm_OU3n<@(0z=31;3_vQd!F*fhKY$RQ+M!S7C>dCo3d9 z8KW#ez7(n!TF{kAvu z9LNtBEbRD_ef$ljZ~Q9kLCFIx){`aHd{U_R} z{J1g?QJA2OW#MATlBMdA>RJr3sf!on)i%0_jcOU2XHyw^cLO6DJ=di?=bcO*jqlc+ zd!4ggBdzpcZDBx4{`jk_^VZ8cb`7kkPOLi{l7o%I)%D>Cc2E04u?ckp6QQfG*sjRi<)+5h zEY`iouUNNIhWbKx#%uW2 znw+;}ruRs-TS%uf6fe>c2G3*3T!O(%5-1q&r~0%RmB~6o>qaks3SG-j0p+nG zr>}YZc+SYV8q*;kOcGHtaF%HG0@bqq{C5Jv5)8B#iDxk!{XWkf3fp<#?1uIMoeC<_ z!}!SikL@bp<#n#GRNn@{`}%yy;~n+k(jKW{Yi>933Qcc*Pu%^veLnRyKW->|c`Vrz zDBab{caH5T0v=MvtHrFMxI3R8)V%h|pJ)B|A3}>f3g|W_P!L(~Q3^z#n)k<{6AtoD z7Z>L5Dw_3>`wzUACMs`dbPwp`&V<#6fd-Nvwp<^i3Z)Sk)X$5$!|k&_t?bcMfRXJ8 zBcMFR00TN(m?D5NXN<#)m|Y3gbd^dF2AHG7M(xT&w_@fjUcdHECbV34XEHcvPu$UN z+0AG+AGjOWayt7Yl+^B?Y!W{HIA&{*?9)hmhmY+_%^Tv9s-u5ccJ)4U@GMvafKUEr zOl(+N59Xy;DHrq7C6POudHi7OHp?~9jOJ5|ySZ%6m#mBK1vXEjhCFI1yKc`{xl>hI zzQdrPjArxnvj(OLxEV#a+{Q2T39~0ya^|7$##X0r=i09C44^))j(xJsz+UrPgQ^_{ z_viYApIM&vvn5AWKB3wZ+HczR`^?whhyr3;GUHX+u_ZLM4DnFo>!5E+C<8`OXNnYV z<+zBS58XcJhjkqZTZ>)7$x>y1Ms3wI(@$gjUl}@pspe!KOqPnH1%AcL2G+7|Pdqu3 zf?sEP?soV^7LKOR?y+x(`aOKv#w@efyaIf7{q+ii3J3GP+L^^kG@e{KqX@t^%(#$p z$S_n&;bA&~=X%2nm7ttGecal3okJleTjRSk(tUGZt&fkYI%YN+TJ5Ww617XGwkqFj zv$2Cgj@h;YPiW60oY`XX;##J!*>}Zi!j2zQG-g%Gr99P2GwBu7z)_$tTI zb)fhNo9NZscTP(dlzk-c)5!DZ&p!;cd+_~d$O&VRraETgxuy(KP!dyNC2ZnS7d6BZ z4_uKx_kxNejo~%ZTO5P6tP`c3;A(cvtaND%-GfMOBXfiQ;nAbadefN=TQp3%8 z8FKYT&hv_)-q5WQmCPO2;;Z#}NO#MPwi?q)%R;I!iI}~dlFFI~Is4d;}&ca&PZyg&O zyQGjRm?-0yQTbutZqmC6?H?`du+){`2Gg6uKc6%M1DE#8ttyRJE7z417Vx;Is(u-% zg61&6&(eYh(ax65R>hluaR_nPLl+%E!_!#Q=7$)dcd;6S4V)r`bkvM;)>9jlSWfMQ z9;iJLM?V}D@_({2N}2EmLu*|%%nHqxV33m0g)R4TC1#6|UBsdlvOvpo|DO@L@;*%u zN}whUJiEE&P?lf(b-aa(!rNfI2QRP`17af&VP~7FvXNgS?9B{lYURzV1g$^Z&#s}N z%Ta#%NhD}4Esg|Xr+JaE8Rk=z(k!}2vB^9;e$2Fqp>~&(`D@G5gjdPjJL77{&k9Pb ztX3u!<#<)({Ql(%{raxY{p9WUyY(?`1^kYD89B4Ro-vDZ#JtN^XB`%8;l~udI;n;! zWVv(lE2?eIa_>7n@Scw`9(oXAkALuSWOdJ+gA?>!zhKiTMtcT4FXXJMw!D44MGz4q zHGZ>%7i6lbA-2!%W3S@t6JmRoRUayo^X2YtVFjb%HWqb!Q0jJQv(GSjEpWa_v!OHw zb)KQHgO_s*byG@x%lbGhJ7G!r4&q^pI(O%mo{9@s>BorAE@Q6@xr)c;2?VwctOFDD z+BWTrwY1-p2AuVi2HOS(1-}jCT&=D4H9A**EX1f4zd=`Lq_m!vzMZgtkF6s~@78(W zT$`^|b*fq02}Wn3A|<-R7d7B#jVewb{HRHP+%Vx6f0#Uak%N z-wv9KH$E9yPB)%A2b#YbOJYJf)acWpGUxk_PsIl_^=J3F|I|Dn6pZ0=5OHX zEm?ml6>3@;;q5-VAvh+HF!Q!0U0D3K7r>wsgMqrZJ_btFlpUtFIkt6;xEGr*HTaes zUp8OFe}yhE>d$Vv@#!02S`k)|^$@oWehh zNqIoBsDXgwdQ)=G`B3tvmap?0InR<(0*&d$^!vx8>!@?ioslPTU)yKM=#ULG}F9iCN%Th?Zu0 zv95joyA?Wv1%n%7!?rdR?a_&ieIzI51l`gw?S7K*kvGWnEs|J5T@aInEZ>Nu-Yx^~ zL4$WJf?G2#B|%d#QOGhYU*gA34*DoJROqgZ*2CXii_I0b43sBcOaG5R(Opy^*yer` zbg0nBHMaKgI_gEH7lN&~J${<+QY}5u$NM^)C%;&cFtSuEuL5J-Sm!+UO$C%fVhban zm*XzoeFV_@C{SGMqb-tq8}v#z6!r!}$Q)&DlLfln29k?WTAsO~Z6977&2Xj&u2Rd+ zf~eZVu{pX$k_wMd8F!DZ4PhN4yk*bY;epnbSnZuj`x9J^R=;81oU8K$5eJZhX`UDxhoE6Dq_fF;{h+W>*5C!EDr;aL}Y`A)&y=a^Q8^Xh2Cb z?EA=0>NwfKY8>D!$}!Gy^>2@N-_%xCp3r#4-x-DV{sLi+*Y2_yf2X>HncmEH(ZYw6 zD?`?sYnBY0508<}(O!(ng=lJKZguZ5=pBMRAuSG`I6JHL%V#d#_XiWO+!-(3^YR@Z zd~e=0QKWhapYJ+*WOqqz<4&=7*#;HJ80ZFHYOMFg_?_EK2=4og)F*5bn7j$nY!Z}P zl^yP%OGqA%S~%1b-sPFKyeIo%#4ml``Ra>@cAhxg@U8rFjV-j@ViW6-ooQpg4lU^u zJh0C`@)FAul#KaY;BV05#2hQ>4oP7bJEkXj#5@ae8YxY2POd#K9inMQ#_QNFuKk@g zu_l{xBySBDAAs+Wt6_$UgHE`}I;2B7>e%vLX}THYSQ6~Fn; z)I=iw3~_e4MxX5l&D}Li>ooY-SqexrwXXZ&>h!Xc!F zvT!h=UDWn@Buu`v{fbADA3vGXpT(#ZZg+I;-EF9OENuPpu;PxG;hrLphKxFlw`{t5 zx7l1+`O=QO+x6th+1wXvvA0Q^@Wy=QzMaYT3RTkhPK;UbP!y&Jddue2?3OXrExD2tz(yaT4^ok^(6-k(Q?TcAVUA(YX8Zq`tB{D+PHYd`P+E%ikzy&6(40&q^ zo@;SH3(F z^)f`N5u84)+^66EaV6ix*K5j@MyS0+*YTP((=7~|*hY1X z^0+2tmk0BbH4+AsZ^xP~kHkvzu$Dc09lSQNgJ0Yu)(elmsJmZbMY4LwQg}W%R)&z?_T;Wvrh1EXT9Eqo*f|?po4`z3{~FPozHZLnYUVQ^pZXXANprrHIgM<;# zD@aC6xsXEqc>ern(0CtjarXD4Q1d}DdRcb&e*_|$ON0oH16tLnIm7QUQh|H`+*aZ* zEm}Lnb%0TKGNk$c_^Mh83W}gSZUp!~L*WHwy&1xd#d^;5x4w(M$6&}s z_Mo{7U44yG&6)B-(3CiX1uDL6J*#ET!+&3N;?V3RLOY-Q^#~2c9-EMcijl{$($IUY<$z!GiC8P-JF5-8RZnMt^8bTDS zD(nYBSCQ0W{39GTIDcFTcy$*sJ|TK>S`PrKvnt1fEkuQv6Bjk$bB}I!SymIDaOq-o zw6-sC|C5BGi*~8j1@<}`_-~Tf6XJ`hjcb!;`*i^9Nol{H*LLs4pr5ni)UrlfVot4i zGu|wrDx+n&YG`VIsp?3$$k?qM%1gDWcBo0o7aEqlCn&b<4hBRRZ;Z(>G=)Hw^jYzR z9@`uX4#e*RLB_)Wu1pzkZ%9-ep2Ijq%pnWA!F+`zk!s4HFyy}t`;iaxWm$Ix#vzSg zCb%9C3A>_llf`2T4o$?I%)T}h?o?p$3t<~$X0QLQkpFx|s|wbU$%9^Y0dNZRIsll~ zq?{F+iuQ5RwMUn{Ars-txuu#e>sL2DhoIpx@Pc_ zja5)FZ4qMym*Z<}oI`D!x0HhmM*+otUF)d6k*I^%uO64)!U_@xRQliG3rj&gBy+o4 z+0m1bnCK+ix$b{$z1#E)S?cTb$03`Di-b>pur|X)+Lm=M!l)C+yD;mIlo<#K(6ss@ zp>G7rVj+Gxatn3aA>a=Fb6MTlXIpF=o9dTT+aWGbT&m4PjrHT*OK7Loc>Wg}83MX0>P1f1mQvS~ zmUPo3D{ioYpcSka!A9t^w|6PXL{c5X>yN;aGi)QWF@BBQ~(c`mOU9KNzef!mcTUx(t!hLjp z({jas6n}hsJ95*{zhW#t`gqoc?cXh!m9W2dwP~8ad-`U-e&2>y32a$UQouR(K!@Mm zs&$*21!Aqs@vf$)vt3O&#aZPoukyCJTBiLA|9Ws4GE_>5b()+*>zG(s%~fiGp{_0g zv`!j=ZCS2n;5z!Jw##s^QpcD~Ud~i9h!hrH)ym+MT*^Tz5kP_$AhYZhFh!5Cg$;iSXKizJL$Q1wvW6|ehv8CmtNIV8QYdq zcXm!RP?`Y0Dpp@Vs5ZBkcGRolz7yZt*ee5{K?w0`5Q%n6LkKi1@QW^YDOOyyRwsB+ za&oNp;#2A#(C%QvC+vD;n>OG-W7e>WWQApTCc8kKG9X&>n94lS8qv;swyiL#=lQz2 z5PvDbL|=_}DURF`?&*JHzS_FM!)nr~XY7#!LUqrdILz|69O@;KfqO3jOG)I(O7aDd zrKeXBML9w?4?6FFoaagG#Gbfb(DAj3a}1#4%;O-f+-B|zgh|@wP!;_pMs%3aJz^=V z^DBHsi5HMtsIL00kpFA5qH#UNy1XcU+11=f3E|uxuZjfyZp(^;se#2@DWTP`MOWo$ z`Z`%iNNgcirBHg4pTe4}FQM|qS&1%+ z^N7PQ`JpuL&QZzP>-WQEb?(h)C$O#$8T&1@cXH)7>ucYEPGF#M<}N2<7f& zn;KG>ueG<;sGMjk*n}|-9X)lMrlXiIm?Z9nMl4|I03o985DkYTymWGm55aX+#TDCr zPHoxlWt0SY(P-)F3R69x>Quhom~eU9sWj_h=-T?)oDa1|c$R?PuQ4A(+(oa-M`WeX zcq~FV_GUOMC!A6mPP-|CCRTVEB(`#_7S2&qC`Q4aH}7Hp>c-J7VB8PBMUm#_=KhMP zp+koJQWKGmcO8-p2Kj(wpAaNMie$zSp>TDfV*Ja>z<-tKGZ_NycdwFxyGrTd6+vy6 z5Hu_Xd65)E#~K2Cl@H3wOtyj;L6{$C{;}D_AvXWVC1qctKZB%>M$GJ_yuTE5hKmM7 zQuDZif&#=vYZd2T+WuI6>y4fZD0}|28t=r2$t9C_8J_w5=rQr?iaV*$%@jOwaOl() zqdsxKAo&380_XAAJ?BQOJ=UY7T-f8}#|6fjoCtnEjO0qs)zHyxzibc&Lu=gIl@Fxf z__*FM6%7!ACjpx;lO+YZ(o#zfQ$-bir*7nb?|^f`^Auc*>S4kQQrQ8Xm1&0oMfe-_ zK%+DL^b0>&=>|M9)yJ$y=xxw?D&a-Um=3$Wf|$tN-x_qAs1_xynDd8cKeOzj8l$ug zt0uPAAg&_zOf-8gD!oKuBsx(PyCObSWBd4r$inj+8>IUMW^0AZRJA*5xKe6_CP9Pb zXq{#2P?{7dO`){%erPTTfk9o6${7=0Yq^*lBb_)f;?S!n!qeN!ywyZUg7{(I*UCfN z+&b9V%pxz=_=&tjLku5|>{Tj1X>z4aY8bXr<|7^j~Re`Qc2+QL| zSYwLiWvv89U-KzVb6CZovJh|UeEoY_bg8*7Y~IPD*(w+HKw4OH@DBwIVXb}zA!hC5 z0D0nPeA(xlI_?D}_xm|Y5^sT64UDk)YtQpeliQ65~LAH{Kj|s~0kJZ#Bh=v>O-GcrXJ_iO^U(04!UKe67vrEq< z3ckq}1qL0TpmRMhD9&Xs@e|0(4E35q34{i$M}5!*BaW;#*t6()cEEChYl4$cNL=qH zdcoV{dV$@`C9W5CS2jE%x*BNgO9H;okte$fLe<#!OG<8N9kH`Jb;XunP_sMD+2I)r zF?yqyb$I(a76X`eilh1Ef-iu?cg5=UEXz|0#ho$H!$y&UrsV!E&Mue#H>~`JF-2Ix z#meFk+1+RQg^c9jEKZim{KL3-0OoX2C$1Olo>MpU)lxMb-9JymyOD!lJ~Md7$fO`= z?o1pm9+Az)aBt;s&|36*19Vz3GB|((FoDPTd>1xx;PDF26BEz3ZHO?g0Jq$28N&5V zk0IP$p?LloL%_K1Xccd@s{OWSFM&>LePVOWxLp`n>d#Br6u+`B8-2ZTvB_nXyd z)?xE%1Kg;be1Snbo9&-UDH%+96`fv>_oxp`8W3PLK3~qHpELyF(g4yGX~m(`ld|*| zw;cl3G&Fm(rwH@adav{)hg%bp6h&f3MvpN3wvQkrDLHvx+#4kA1LAJiv=Dt-+|(ih zZZy!Gg`Fejai(D{tmD9q@}1H_I+JKlp>93p0sW6%{S}@=n))oBw!?jKiVicm#f2GV z*v@us4deT>w7En=>qvS*m79o!vG(qZFv`Z>XwhrDLiBslhBs@BwZyhReZ;pchVqs9 z1F@!^vnW#me(<8c3`7x-1R5U{M$F9lK<2u}+!toiYV!&i7dW}+tuT7=n!>DE$CEh| zWIBC)yj`(4so+1D*b&p#A^p~#*bV8Q7%iw7>5IUo#N97b*zP{f=_%4-)c7<8Ki&*H zS&$KfehzGriTj>a5fLLl?&-%9g&zw`Iw;AA8`>+~ zFf{`s`Q_)2wTkXhCC<{@3hZ3;YY*Dk1l>k(u;p_DeF`g|b24TAQP<(ZcXdgoKK=Gv zr00v~Nf}N7(jIvF{Pyn4T1ccAEQO4v0A@?2sYgpEu9rdNGT@@2%>g(RQ!>5-o+-ia zNG|C`K(ZD%5_4tDqI57Nfj|b$e1dPHWM{87*9hsor1kWSs@XLyL;u(N!Cfew#P~>0 z%s4uY=TG1(Jlk*`I#v}iSnX;YJ2 z*seiK^{N;PEUjq1c&OoacD~0wTHSCCpE zhKsnazfN>oYW9j;ZXxlNF+~IUlETKT?{*Y%z%7>0Th-AS|QN&-#W{30(PE^ zYCVN57Qp2W#&LA#iSSn%?0BD}*LT0AqMI5dY2R}rLSfe%yaIs~`LdR{ z3OfNz^-9S}a|j6hSw{U zuh&zJ?p}@r(T+GcB-%BA^xcbDb#mW{2Tp^}HBztU-Z{EnW} z1tCjyL!cgpfsX6_?ldGF1J9J?TV5Lx22ql>d4E4} zHpAGt{{$>}DNFH93G9qyT5sawhQ>zyhhqU=QLdp8t$kO%`TH5*rca$S*CW&7 zUue>D!-)!cV{xJxK@4Gq*Ozbh-2ef30lb1PuEJ1K1a3Y&;ok>_n&G(}+Jv~HH+2cUB#xJhQ zR(rti{p4gjqA|X-ZEU1rbbNKii`#8M;GScKq0#5m8$H2#Zb1GtRR^uivDs~brmc-5`5fmf(z`3LT+`=7`HZ95U#BW8NH68;f5=Umhpg+^3H;*w z?V1VU5CrR_2DPRtW*A_vh$LH@`~6ybYQ};+?G3r&g|hc-Cp_8C${)(QJ|{fc4N^@L z9bouVU_&P%XQ!_j*tp$%xrl~EdtI(xy{Z)b($&r7e!cJAN$~#9l>-tKgq6MiPOla+ z<}suklEIOb-4nsO5fBt(gb*l!q@ZjA6!LsG{ZO;-?H~(=t6)8m$rf?DW%EyXdTucP z#@IM<(hj4;ECClI6tc?lFk_9fsu_F%J9(o3BhMECna%(2U@ zFL5!JTVf^s=45G-8>4G)U09ZwDPmo3Ii5M@j#bsRO4<`QDwUn@v(ia->d5ceTdK=t z--*CdQ`v##%z-*tGcl)C_;t+&fq7aPf5NIX#lcA==(D6@a+6zAPg{a4df$Wgp#-KZ zv{XSzV9AqS$lGRBiVYQ;Jx#El@{_G)`%9C?f(8EUBkw)jm&>juBnTRUDmE*IRTaM) z4;8IKY-B0?E9o9`1=SBZJ3mNY*c7M`^PgH1zX3L8-7klWGXDfo-aFpp8wa!x{1Py1 zZtX^ctU?-AQRacv3SqXmjct|yt2$X&0-~TO9SQ*ox&%C^DAemI_CrG74?kI#d=_Zswd7z;K zJ#PiR_B1{ryTQ*sf#rAUuwYa8Mjip!cqwHCe&|g05DhZPW+P&0 zTXsWcL^4z4DxF~V?jUe%rVc%tlkp z8?7{wp6ry=&*>Fwh|QF_N(DRC5jHMOdVuvJ4PiPba_JKmrKGh3%$}ESGK=e)>nunI zB?)eV_q~~kk8Fr1_flrb&^FM})O7GT3!+>;l1CCJ(!_}-oFk2>iD9rUl#^X7mvA=g z>x*_CvHjjegWVOS*^mIu(7<`5SqFhf$8NenT8@Z+H5DV8(wqP7^y&Jy2>&Heo75p0 z5))yD^)uK~-Vok7Q}YR*@4M<5d@+OH_JLH{;XrO{JW~JFix{vJWVxE^yj5bZudny0 zQ&oW(p>7(w1$eVa6%sn7AKbgY9D*NHq?c=bod6krF|-f?&t};gFm<}?hsVn{Pb$s7 zF_DUR)-Vr#0&Xm=r#E*-#Ub4h^SF|nDI!*=XBy{GLT3{16S!%m5UJ3ae&<& zCc$Uv>!hXaLyvm)SzK*&)wvy7#tjY6qp!T7HDc|T)3}}mD6a$ zZL?zOQX(eY7%r_L`!Z|t1?~y9If{JWR)%Y4VC&cT(g`AsWItdfsxWJ3pSG8!uL!c2 z6&fkr`Vw-JNX5>p@fohgW+zJDum&Y5L&8^s6V@Ui%Spb|u9_e(8BORDxS88ZMgnQH z!orqu6IKUSeV+B{Buu5)Gt-9{Hqnw&vrhK7wzl>W@%YJ;{hMM|y-f*~>oB)`C%fQmm*$OVZmA>EdZh~h!|hWkmE>$zpO*)yDMT@7+@j{Ks6`Phsv3y_bH0Y~mttOp zR6}^?>}8a^fZzq~3NiyoGh2o~-RaZ!Z}Td6FYxI4!6uPD*kbD?htR+f)ILc!fTjLW zKN$PkrlzKTRh2ayY==I$-)P;EwdKwP0e+MEJh(D_(%Rw5A7;89X0fpfu{fC}PDVZR z^%E0ib7q+gO#wo0{;RazjI#c%=5grg77yequ_$h{gxO>RW*I1?@lyL3aC6=oK&ceN z1!|H*S+00KEN0_q13QBU$g(&!&2laR<(V>8K z+%vX$A1j9ys7cG`1Ufaki3>|;g?u|EDKpR`HRu(VYeuLc1Skq)H*<}wbRZ508L})H zlR1JW;BSG&X&QYdtwPgw>2FsU+(7L@X1+i6wQy#n;CX)I%ryLZ+4rS*Jz2S7d#Ls_Nds)iHc!WG8!9fSrpHL{3N5+f_eHw`3lVoT0UAFM!@+t@@sr(r=)g3_ zPjoM_Hdep_1}D)tu7a^X!N_rfpgrX>ULnLc3VA( z&gQ_8Sh3NZk9K*Q^DAenf)d_fIuYQ@GPbFn`8LJ1P0M4pdh^mYqwEh%41 zQ%Gs4$V3*yCOQF>9#cVqqosFClg<@#SPw`)i1AY7iXrR1q_?;p_L5 zpQpLz%EH0G(qbs5`=J#N%q%`=%kBvf2t}m{zK}vP#mo5#0)U}bMyYBxfDQHSesunj z{O8Ubr_yhx78nBV5MgzjfAhZLu5)UL(%29vUQPf5lg!{t?}jNXcM{!EE+a<}Mlp|* zNrCDbDU^LLP+hK6k^*I#k!jb6akZo{9PI-(F5S{yCz;hV5`0V!7CcMuu<5#pBZQl$ z5)QS1R0+I}o`y*UP5{}sF&-;56|l!=!6pqyH$J(vYPav`>*vTMLA9o_w$=jn`9gjM zFcka)ladGMG~_SHA|Xvd3S+bFoTTQz5aQ_<1@EzkakvwKf!mNa77&JSnKk^=)SN%6 zvK$4EkjuYxK61qE4oBFM{vYFHD-@+X-Zt$}m9IedEH23r3x!f`C0%$gq&Mbqq6$&D zc^qWB`sNRUcpX|#p|$0PmiT(A__`Kn&Os7M{-E7#O4u~%;=iRqn98vMH)ZnsG6e29 z?heBt^{Y1ktBoU12m%lDZE$JRAA6+V4k{BQ?{adW%H`^MSxaS}`kuHlemo#c@EG$i zRb*uj|E@}eMK@?mGcxC+h$6!Xrlln=vs^8Bqu14BIsV5abP@wBnI6PXe}r? zKTs!`1H^&zC)57QjKh=Z{M&;~`(9}MHbnqGaan81l!N{tT9@cK$%v$GxfeAPCHU{Y zBJ;=$&c3z^aB6X&2MdS?xWC@s(9ceoo_(Mg?y546Lvo_#acD)GL#(YE5}Jl!f!P6*}6wv-UgdY zr+uhW)!~Td&9xjYq*Y=bhs+uBk4Z`;cB>rfegrC;LuO&J`srxEB8%{+EHksoV}xTf zVK34D_^RAC_;yC4Tn>o{nxpB1B0UhRaeD-{NM&^vW$|{arXfEVS2`%v`uaxv^gyOV zt`L;2ECbR z>H?%d@w8WJlI2FU3cR@?mAC5I@xp_3f#&%B`IawRoL%W9nHZ1Lq`Uu8O~)LSPSwfZ zm?&4r)C5FH@)fNCVQ7s1>I%rhrnG|IyZB1On^}qbuL$P@N1tR;nsQf(@;*h23l%F;s)|%iC^bc4}AT}eYtAh5La>>VC7KELMpxajP<3T(W z7|dWmoO7Kjgut+3^1oWF^0yTbUY5@>t`GPCE!7R6jtt3Vdl-zdF}G~D?+$nDD6q&4O( zEiG|LNi{lR^tNI?es3c7<4Eq|$!R?8Ja6B=G$USwc3Z$8a{)mD>|E0T05>a}8mj%QVrKU97dJu;;G{o8=AbM+4g1Ew&B5vLXx0bsitN%T8?ys{ce`4NC zS5Bd$>`Al|Z54`ZgPq#uafI+5*kq2NR3IZYkFzZw#F@t-`ZhF2kpkU&FK-)E2~x%i z5O43YQ?77`1AZyU%FKQ6AWn(c`1-b=g~Wc3ZHvQNCi-|~PkMdxq7!DhYXuP}Gzcad z+J=U)w*69sYnp^QQ+jo3+!9nV%2KdX4E)@gwH!gZfb#DOM_x*K_rlNxJt`oNdYQ9vA&O>-SL0vd zaq-d0&&FOJ3wOM(x?T{`?iF;b`SK^*;&KUa35 zl*O~&i*Ncos;b6c9sTE{Ql<1VGQ5zkYncz0c>Q#uBFDozQ5C|A1`)`h_{fXv4jHxWXPd;O0=fM5OjHAaE^7>}f77%rQ@8xZ^j${C4rnlzhfM`$%D4$cwcrY)4T<5>lns@vD~jGFA*a zH3)|Le&bRt(Dt&5>jgC@(C+JAMEPS<1Ag-G=qq>ycrY+)tm3l3&UA;B_csCHooOBi zwGyQ0O?ZQbF+8mea_0ey@FNLRq1T#>f5RFv9;qoxCmfmViiCHZA%g)WnX&wIsHs6u zx)zjgCXHsuSYB%O^S>@g`BS8y#SyG>oFMGg9)i6rF|t24VCL4Y`?=&Q-l9^L9ssEf>l5s>{4@gbA2uIju9kG zp99De3V15MKYfIB(<4E+3|1$|c|biBy^enUVeww)qw^4`A*pnTePd)WTNeCno;O@Y zoogwmv*p=>cI&f?YO5&xp{+ z{n$intNkj{d8~IQH2)z?DeilPT(%(Aw&nEX2_u`kD3X$SNu;ifGG8T<`1Wwf%KH&LG5TxF(XJS6rJ1R3gxVfh_ncese?6-$WL>=gnMB0(qK&R`y)W+{kI98@q$b+HK9H|{^C~ZWK3<6)9^H6q zD!Kj86ma@Y!Xy53I<1J=H6&Y^z0JnYg%Fe*G9CSZe=)L zR%UK;P2->i?neR{r13n_D|Uy26a6v6DtzGAk1tq!8F+4o%6I1bXU+R|&ulVHm z$g0TMC-1G={!`?CSfBoQ@yDeq-CuUr?eYF~&9=bHR;L$yv9Fm<4`?)X)l{jznMa|P zNL1O@IKNca2X?1rE%w8vc@dqAjdN`)sLqrnyeu0X?9}WMZT;nMxAo31F1mg(E!Cd< zGyxUhq@=?_q}oVEi9pUER0neglX>B;I8Eux8geL;*FZg2m z5r_8_;WdA9St_UHOeV&aYRjgK=bzX7-VZgK&nw^Oq(bXH8IkiH79yG$TykovEf6S@ zL2%m!2X$1E6CEW{Ho^#&C}PL^Riw8^jFc|@z>^_y6rAiCD4`#VD0jBqI(H{7@!4H8 zNRl_MSO+>P&(8TDMr7#t!ham{sjENnUKe%5jZBdknE$LdE;(a>bxiz zo)k7&F2E`dO^-FEjxdb5Jw4EUdxz+IXYd;3l~oD_Q2xN7YtN*U$TvZM zvKOmjgEEtclu$c-W#Nt|Ks$jbfBNi%NXqKy+cOC>14dL0G~MsJhc|M zprN*m!N)ObI1q0q6GcHJDj6;19CsSawg9XoZ|`NBsgsw5e9+YY+qDsJ9AW4t1!{jH zqY|u502Bequr-(5>ah);gB?8`D;prEz`hW&S6h)!pK+8Q?7-SId8hw(>s!e5R4zVb zI8dC17M>Fwh|X|JmMn>uJUe-a4qL@=ji7mh!%A*rOM=bPe>u80Zr^vIe18NJfZmw5+PQ%DA*WhYZTI;gg5b zIQ2oxbNYEZv)%Jv9Y&|LHWPn8c~4V@=R28L9D;5cR}QLanaIF_U*fcYUn+7fZPy)} zTv(l7{~t$ptH8uIAT+eZPSL4?wWFhkscH#KH<&?sk5!>)kBeXj5!s2MK{xXKKT%$I z=kFi7Xu;MLfYTSam581W54D#@%aE;RADM1@Dp$F{>H4E8W??}h(U#6-QZpr)G)X4e z8LEkcLbKUe!Dj3sI-)EF;dpCGSx|xfXOoe6q{8)sZoN_n7He@2x;3yRUhO3~I&|k8 zQ=su}_tKv9(0>g~=S`#0eFjTwd^Q2;CbnW}D$qWXQ^NUA$^9WY%C!gufj(p46!uZv z70UJkfmw1nDA0y5FUK5<_@-V2A{GFj~~u(eDhh@I$HIp`oFD;qV;qy1gO?P?=5!!o|*SdjZ5leS)cH zv8cW5pAEnA@0~xtb+>u2ueb+o9X>3yR*YKX%0Yt$R}RTr#-J^+&z3Af5iA)RRj2!r z$S8CRTO0jwJq9)?sDUk-xa5=+-3F%=O-Hk!rFx~TkjaY~xB0+<*$nFtX}*#rz$orW zC(`g28chUyanMDUc_pE005joa7!pn4FZ-4?>OM6#8Y75N6cIJ8`~ z%Dte3-*$6oqcd^Jv*&#CLD8=xR_S0+88$N%7=g$}rl#5MrR_Fb_le9_HV7Q)sFeYk zU$>Rhor@~{rryKfZ{i?EBPhL+#cJm+#C$oro0r~@)F8iT6r|CJqw z0M(N<=82Ov>B$b_lyDy%qLTLVFe*7wpP)O41@fj@(__eBtv7#VF&vvrASCheIY19B zcTU2#1&KSVf?wBp(}E_SL8pV*R*B_Ljs5g*7f(&5XO38ou!M}4(ec@X(5bf{M^cDL zcsfWT9i~fJ3OSiL=1M9gKRdbV?=#}T+XV|52{@t96YXG1UChAKqSegeEfJilD%baF z`;XD&*;TiE;F@8x**NHi#R;L2F^`O@d&)?~q%m5eoHWtVbN*qqz%l{qRdQgd6D~bH zonS}0XPjjG5`}Qh^bHP#wxX5(lh@Ia|3TqJr{p^1C7(9J>_iEw%hJUGa9o_{wnQDy zbhD^Yh*nbzM|ezDy#${%s3-5%BTHcOoelkXtf>vMI8Jm#hhRey9XGhf4%wt3`9{4z zfi<~kA_-}U`qPK#o%ia`-=hY#aVVGx478y` z9x&J_f73{oR6`#rIVHIJ{CBbwus%b}^(G$}=7ZNb_{ZsAKtU(s+ zQpsdU*Ia-c0R^$m{*7kLojFzLd>kI$wdk@UNEzaq@Itd6;l7!Yi@foR)sh%B{l&Z$l8uf- z%0eRCne7PSi|IG`&ilU>)MKA46qO@1qH5#%$5oO(zjywh@<&xBN>!9|A|=H}_~MW? zP6*Sa1}EdP71(o%% zPukefu$U9&7bTx6{CY1^teI}T{yZd9b>{E;3cZD#9da57|CP261_&mO3W94~mj3kj z;1W)lFuA5bL%9x^VQ`hHXY1JL?9wPnoowmkIXb_V|7)e?uNAYUUHWY>yCInjrb(Vj zM&`8wwjGy}spVYHhtp#vjdd`1{SX5bL$RBZ#e~To(=&cf9bI}(Kk02i z^uNfrgs2Y-7ty1}x(9jAckS>Leo}Tt;UXGE;np}!+3VeZv-4BKC))IZ?o}BM>y?il zRC*39MBf0q-dCV*eO$!oZOui>88CI!cfPgNd=LQQErVVG*1bV!#ksF^0dN!kMJSPE zr;?nf2X<4(|8Qu}i6wN376aI(%*gHKmQi8O&$4zF*o9{3Mm)GUp7LlLV(3( zGdN^qgOJllK*=F%)78_1ps90wFmzb#)3Xal8&RVjpliyWYvP1-3b6nMyk&q=IG|JD z=t>fh=`QMjdow7_t+MLBOX>qHT8A(cMcSv{xcvFbyim2uP+NPve-xRK{rjeG^9! z;$WXWF=b%d7#}p>rk}IYpOU9lRIe_gMa&=@NoKR0ew=!BO#j@HT6s0Qd3KcqYp{v$C&Rt=w~K zSAy=`U+h+XY4p{Pvp%W5dhD?Gx;>m-f1Le!&i1zAKXgClq@KO<)sKOhYrP)*@!hGz z_coa8+}(4dzFyFVWmA{V>$zV4z2vgWH(ep++wc`6z3`zS=W%p{m8)C-d2R+%uLH_~ zVj7q@px^^=Dnj*8?glN`fE?2$$BOm$HM`3il8sR!k!?10dmRn`I-`nA)TSHs$tTU* zh900%vQ5urYqKNtk>IRBiI9V;O7wEK8#WwhwA42IOz)%D?IMA)fd{@>4)1<7bT znXZgFQHdveP{_bjkVOsPL4CfUq7C|yGe!L&&h4cNzsYIax&7b$Oy8^j>1hJS0i8m2 zbiIj(dQ(LvC8{MO(n_f9UYWq1DxE~n=<)%7GJ3%5xm90IWm6h%|LRp8Qzn~}ItQ1* z`mgHRudI_m#C5UeIOK^rV>~oRx9s~{<*MU=rS-$6+qtjWvh8I`bW@2`%Ow;=Xn@?` z2qy&9sKdeuK_jKZX)K}RtduPxO(6g#Pd@0`SJM5AlF223Tzh47haz-@P(wMuz8#EG zrgZu`<6W5yUAN*T3ji%YFbQcw#wl#+_^D40^8x}jy1dKVt{vYo8FfTbH~*_!)@?+a zQDG~Ja4wu41==u~lt8f(F-PIJw?Woe!r!hXHX4oDNx%Qc*_#J6b*63O%ZsSP`c z&?QYcrH77SbMXFUGqc{}_pz#<&manwAq*fFdDB$fMA)1fLRdY`LNT-u`jJo8bI3zJ6Prd#LD4i|G$n!BOqg7sFg%DiKZs8)DIm~ ziB!b*8A81&Ss*W%_i7lr_hK&eK)tsxLw)=*GeQ5ChUtSa5CMrU*8D38rt|*d_lwp) z4~4oCx}p-P1M3;LA3C%!PH~ z8&|_VZ`y-H`|z+;Itl2|bko3nArp@3A`n(3o(^A3lGbFyl+gE2KbcMICI9_>{}yL&$^clh zSQiYGMRyhE6|*~5nqmUc!LJnWepgCYy54qy; z?4b@$AkiwV4vGQ$b1G=mwF1@9ObYy_E~PLR)!2FR3LcAzc9OA?6?6m~5RyUGrk}^d z^rG517z!W=rGtA?i=tkD=$qI+6??IhEZ#@D2Q3?fy0iUVI?=3a`+ZC~6A$p4Lj4xXWrVCXip~nMz zdK(e#)MJ%XbOJnd!*v9 zuV)uED#fGZ0G-s%mZ8qOJWowp-L+RLGj$K1oLmD?1nRNr3F>EHgJ7rcJ|T*nnP`|3 z^m9##Ofr7DmkArnO5$moxSmrVUYw}}B$@v{2IGBoHZo`r8a8Z*mA4fzd4pNJ+gOw^ zgP4Kc_wn}LvvFtoD`yzf`XnukX7PYO-M~k|8M8c5A5W~4hDRlq@y}tX41Qz|{!V=8 zppWOusu}!aB>GKPJz6$uxrpTiiv$rnZXRb2#kv&M0s`_IrpNkJTrNok@lBVU1V{Ue zmO&oC0trb_o3NlEp~2ZSgGQp?bS}?J-i#1YR2Bm&E-M_?6s5RKm;P)t_HQp|=p&wx zU6qTjjeKQ6`JRck>ag?(KypkKRvAv+AomXDF$#y(E4*PWwebrs&Q=K){XS;!i$8n; zp#eG}*qy_{3seE{MZbMl>K>pN91M*5ef;SAL_O^GhArxMX*9uu^#YgOD8=RMCr>T7 zXgGcFLu*IVNG6&8J{js4D`<)u_aYaIqJI?bl!_I5XP6O$KaDq?L!%!B)1Bt|$ zG!QXdSdqYU#_to(wX6IB$}TLuPD1|w5zeAY6A+W&(AtPR}bBvBLQMRp7Ljm@cH!%J|94F8-HG z{3SsuEJSfNN@P(r6(v5Pc9|c5O_nw9yyBO-cY&8iiD#Fb;MtaudkO`T+C-(+Z6v3^ z8>OsH&dy#6JLSrYiDwki@)-te&z1j|mGi!b%Pv%+51_)l1xmuclsf6ImopHs#K?Jk0P-k{%9!x*qN(iunQa4=Id2%Y_``L#^q{nfsNPB&-%8ZC;PtzS zsLQws4KY;}OmNR{!I|?RuWHIWzQ4%=COj%-fu+@HwI8q4X2ifgp@6~|(@jYV{oxJM z3{&|9E4ZLI1}arm<4?8|sn6bL+#1qC$&H0T#@{k4xu=rbgY1oYFiEMcGSkhkHrl^o zN>Pa&rFSm*7M8DK)Ik6WP^B9sJE%#=9yK&h`8AGNetrL{SuVh@CYzb{p*PI)K$DHc zbh1vvVn$>$fJ`Xy7NT|y=*p?k4(fxMd;06|N3UHTyy1YVVDC=MMm!t@T}Oczmx-z;^M=iY%AAUxJMucCzVrfj@o%DaP|y z8%)#`iES!!p?JlM8$!fNU#P1lqRqVZN*rf8PW6)CC#yCTn|2_0yTZg(Z-@R2#IDM? zVy1vP9BJbBQK7&~BjVM&@rXiG_A(7`W~h4Jd(*{^j0Ys1)VdhTR-zqjqrtLzR5gWL z@@fh_a~WPKFT1dr17lA~zg$Mm8kaCCMDWv)j3CoCLlOJ=-6QS*g$S#7r*sK_^+jAv z15`BcGLeupQwtgE44)Bkzo5=DR8aMfmeyR;Y+Au@x{`iA+RTgUiY5pRy^SatWx5$g z(@0o`2r&y<-jFd%_NuY?94os*P8v^Jpd`=uIVdaV&*qg`B7SD2tkGRzhB6DJ$kqa%m%toaJyVpjq8be*!Yrw4H&py zRnyVpa_xAF2fG_;ubw$x!*;Xe4#k@-y_YX(BxF6zTRr^j{;X+Xaawrc9^G zs8_OwbUdYwIrW{*KY6X5Y1$4ZwR89wQL~HA|7O{R+;O6<>jbFdFzJ7 z!J_eh1aZYoo-Bcvhp{>`reKLNJkWAS%VkJ1HvSv`Emj}E0jjb7~j_RQx;ZH=Z2RWA!qysU}&@~(V zF(OK2ynhP{ow<_a-zV|^;$jdk@h*~p3LvuihIkCz+}U!^NoxySBckcvFR_{Y@5?FG z%yspA^rm+Mi)fHTFBk$z8WX|*!CWUq=6Z}?jf&x~0Ch8mv3S$m1H`^3WEEBPU;p zq-So-);GN|Je4!Qm>H^X3oRHz`$wQQbUbPpc{Tt`fGL>$vNr~J6(M=Pr*tR_hE%Xy zGp(1+hb$~AaY2SUd@*%~$Jqj5YygBgd;PgLfLl(gE&OnvPv99%cU?`{I|( zAJqhG0D&nSnv_92gt6kux(*RD9wIOlK>P~>)eJTX+J*Up`{z&Sjn*V~x2=2osgr*^ zUm!LAcqjAJ^8aMlCSClx`QwfMsQB|mhRf2&*YhLj>--kH{mJsWZ?KFYL&yR+F-(HcRRc;N=y zmDB=t?_gk;mh-DI4f!+JZKneEnyjWm$I#;T7v3_}l|Ax9MNcyw3fC7p`{uRWK45|Z z!-$EOF>wD^AOH1B(NEr3sk3LG7?t7NF>)CbHS61>zL)1(zyu)^BFB3*3sPNY?aT0@ z%l>8QgsA=I2ko;I=hF1%=H^6d-x8bKLm`XWBu~Pl|5CJI};U zq-S~CetjA*y$*Y~=`{AI5a0Cr>r8-}guT7J?%9uQJu~K0-_CV@pM2LR)ce)I)Wx(>u~A=*xD`WUqU+-Tc?D zzU7sdqUN8+2-q$iVlivz$&KyARrAj>2UH(LIRY}`E8~ z?&Ur}MRk%IkDWB_~BG{E|I=Ljlkxzc_28Jmb zXNL;0NBbkNT|h%UJ>N4YDW88-D3a&INvSViQ`*I%c@Ophfr#dv)8_G^eEB)iD6gib zhKXuh3JdNj6z*xhuSn3Ja6J1-U@uVFr z^Y#s|g2^rH_Ot?}+?a*j6OeZ(DLNqpwk&=o3bpC`Fj)RmzBRBCNM&> ztQIfcvoUiA^RggjcQTqx!hooU;j=?I^PIG`!E@_D*q5htj42M1Dn(0h->N{fZg;Hv364Gwa~PIj4$p9~{fi3M--Iv)8nF zTyoEIR@JHJpMCg&h#|NzyF=&7o1TR1`SV*$aY#i06A^GsdyIylYs>8+*s#wWLS!;l zJjiU5g+cc3|NECy!Cv*0uW9=f<3}$`&lxSE0UL$mxiY#|1h&yhIuQ%`KgWCB8@+6bhI&Dh;{01>|VTdS+l=ZbJkm81;^4!%2+xOTCuGt zXsIEV_<(BC-AL7Tk%Z8VVStIiz%O17hm+)wj;w1xo~C}^O{8PDVN{u>qyI5^(Q4uw z-=uWL^bulRT^(j%!DnNvRy0$fh~ z82AETVE}2)w`cMui{o$Tp4>84`!ECCBs<@OorV}y z*bnXr9E0#UxB}XSRK`Y4no3YO18-Z@?Rj~<%?>?!?&-*!i+QR}N-pfi*^7k@5oP%S zP?U=b3%k?82X`J1CH+;el;@%jy74293xSOgnX_Y=vlMxv<_E&;DrLY7@xJ=Pj@K?z zURa4Z*R^fG z)O6HY!~(B@97IK=m3);;E4cKLeeJoG{_zE1<&&~%Upo{%a|0_1f){-S=aZE6!)z#{ zE*Di5GEs+XqQ0InO(1UFx|P@kjh2X1fUT)S(9v1(#~Hf~pENy63n7+Sp+PI-X3KFf z%Ga4vsgw!E!`&DQ$bqx`9CWW5iEB^II2rKx`i)EQvFgK}uRpc#?gWdA)}VLoJTN;_ zn1zMFsFj%rg}+j!Uaw~&i48c+XG8$9W*qo?uNaeE$gE$6vC@kbns4;r!JG>6*0k;V zP`>6|ZTVlMPW~zSAv-v~_DehJ@QQbp7cMxUC^R)SOZV63hgjtkSm;1BbMP`c2 zB?%v%wPc|Ucvx5g1Cf(x8m6CrCdnV3aO#5P`(CY~DC7ZFxO5x)E^SX#ofViN&=dL| zvz<-_HSiho|E8a0mdN$Yy~T5htDNxfIJ~z6o!+_3*P1<-{Uw|Qh;dYXxNi2&B6p5O z6=mMT=RgaYxh(zb1K2wF`Mk``L^sg>S1^+S6;_F?N0=Fkq13@iU9q6vaw_1i>2_#_ zYA~5B6j7Q)FY@tiJ|D_bg4G|InN4)Quu$Hpkq0(}bXnzogDjv++k|Y!ApGhezr6I` zYzhj0`OD0!kPXG{`OItAt}!n*K%Ylw7|Q};h1(urnRJzfR{1I$jbGVJe8tQ)PRmFf zKA$Y4xJk-r>oWg5yQ4?^+S!E;@v{wh`N(0{E(?HcR`LhfrL9Dn- zXt})Ot+(LY3nLBcmvQOYa}ZSp2Rj6F70>%DxbGTawMnjGPZ73!J z4@=+OTxm)K4`PPL-$lBBNQJgR;jqg)z~6xO<+|ByD+BM6qx*wd&wE9ixGgR&WzDv8 zQA^w;dFyK@N>6RE>m+jNVfq=5@#^_xHXS8eSy^GZ@YI>mi8IXA-lbm%5o4XChOWU^ znGX-`5;nj#OC}G{=qlK+wj1Uu|5@Y~X=O38il;ebVejat#Ds<`uD1gc#w7nn=CQ|V z-3M5V@bV$y_tIS{{EyK%mFCVQc`v`I^l=ck#_nJHR%f7Df*ECYypY9=2SCh6h}05H zx%@_k#f^QIXCF3~LlEGsmzF~oCWELd?fSLBk=$}Fm-ZwwZ0wK=Pv9Q-4|h09nCt6} zPG_lmVj9Xa!mP02@=g$^GtNOyo1;Mn1s3f+2OzCO6CHm;tg9GbQWsSVbwd|#sjdc; zm?0-Wlt-foE(v7Ls#-0&KI~EYolh`r_*lY)ZJYeHZ=;1(eYm|{<$tX>?(V2dN4tyI zF;U~9-|XsNVI_DhD4eK#Ea^_5K|yr{`b1!xz1^lBr|nTg_vSs&PDZ;Y3Pk7jMh!kW zog<*O9l1O55XB8ib+@EVH<%i&mQ^lIb!NtQN<$>ap+BTY;m~$o-*#p^h%W9DW>7~X znI3|K5Qro`hwq7=@&egXPH@YgPWz$$Le{0OXLthdaxC6`NYxzh&ztYzPFWJHu=0W`ItTN4g1!RpPlY9(|7!;B~g8WJbIyqYAavtZ9Sl5EE>C*lkYIF zQQ$_SH23x%Bm!u_I|e5lcJlQO7G0@h&tGWxV2H&}C*ODny9|OxoynXdaMks7Yp(8x6gcVgqchLMA^? zHe4rLHQ$z@niz98j`zi1mUt;UjXjfkbCFmzJFnD3W9|u!3*Bv7dYxRCchTsUR;us) zRbK-GfcAwDtE*!4DW@R{pBv(oGz%kg^+K--_3Vf}_Zb<09b5 zw5bEM-ET0V(FZ<%=)vi^&H^96ZdM(wiQ}qA?-+t2zI2lJN%K?NY#9k3l2>X9C*rtb zb$!%bRhhd$m-fDv@;AG3rF6>tw<`6^!O6CDB>h zQh!3G8dm38mJuj-N?cKNQ(-KGUd(?;V_N~UL)8vWMOkY67p?OOev<3RX)~(2?iIl45 z*9syz)2rYpVfu(qUdGR`!_WLEyn^EAYD-w;1p(_a_iW5@kLi$3JjItdWHmvyKRx;v zvyn+yb~;u$WikR84V?f+AnD>|7dx2%BV^Z!9kX(AIC#PySTYz4#r$@rlJA)LNRi za6NBgAF&Ji9TQy*&H5b8`;75YX70!-+dWaB&b}v4HrSl2A~)^jVYQB)wE;zL;LO3q z!Dx_)&|F5U4rL+Bl&q1wBQm%Ca41@jr;T4>wiU3-{X#vJ&mMkcETaJeR99CsB|Ew~ zF=|J0>ZA~Qg5)uw_z94?CA2_f#bXh71IVclZNSfki|Z_X{RB<($tw8|8iBMsrlIXd zxQ|oL@I66CNV3f(6gO%%H<_DF1kIw25cBTo9UUEQ*405c8K;bW@P6*!f6jh8M*v1s z59-1N!&wX3x#;-SO~+hf>~ps+5;TyTyg+rBnfN?8@fH)>%5$g2DdN)R*xX=m8rXrT z4RPNyYwC9q$>aB+cyKkt03B!g`t1sa8gf-PhzbTC--rjO^=>7&9l4KwvGIaH#s0;3 z4#q#lJ7zWl+K}1kQCJaIQy+bdTps%4dY;ZmiGls$p^4X+TEk-?=m zhbAV*n#*;%&Bi>Cgkhr2bu$c3QPpsi@VH^Tvn-M!kA5mW=OKcyRuJ1os)jl#g&lcFa z%8Af3vQ^2lRV3~(rMbQjXW`i8nnBZSFzyWxf~zu%ZS=BBuI#&1N4eA7MyNNmOy#Dp zSDld$XtoopUfAemu$KuxZBDf6#P_YD=|n~))4(R{x|oIut&vLHD(^L{^>7;4Stp;! zUTdQ(;Z_0cS0(o$>RUbZdx&;%_1oR}W_EwDkBZwh58nwXkF%l}d=%~BD3%pXj`gqT#>3F=XNyRc7G^!Y<^31EnB^d4!HRG2 z>i!aIgogSbF$6IK{DF<^_kM;^tfTz3%2ZLYE~rl4Za8`JBz)5_V^f7Prn3&sZ!-t_ zTDxd_KHNWV;h`ePb^K*%dF>)`Z)bZ2O$JwHA<;^(2>+0ORs9K1TE&9qJcWmV$Z@hB z3u_sypXAh8$axw*)el?^lkL1fqW4br>Ch)-9n6WVsr0h7Rf%k+>FNt}#{O3}X3O(K zN9`#Jbv^Pw{yu)>o|ADu^g3C+pKPmpM<(|2*pfV2lX@nOYY|tUL2H=HoH|3Qy2r#D zVrct?c%3YiIi%<7GisdDdnb$AzulS>lw3JRA+o_ni)! zIcDA~5Ec`vD+y%{-LRt`CnEhy))e&4Q!#zV=ox#|pr~?(Sykn#v_+SU9^Nsv`jt|* zu427^iY`jf?J&&B6v#;4SY;{XkB1F`E)pK(nJv>IxEF_vPjh$Nv|XVX5E+`B4N*ks z6({k+%q}R3Z6#W+_IAuHbKDnm%xY(HTtdt{4B1Ab8ecf%#nWG?k#*V(ScRmX!SA5H zi~L5CTwKZ@IbrM#^C>vvA}{|)GC9qM$iNj^SQiZGjl(C2dK>&`3sKKE8fu75+|9mG zWl%1<){yKh7r2j=htkB(D9@PQLo|+&d48qFBNFMQ3@h!{5c`^_MoSbrr@4r)pbLG? zV+V*SHwVJ(h%H|9<@5# z#Xok~K-ncc7)ZvDx1MtQVFwpCo_EfGpMUX3#<)S8GX8{-%ow+b`-x4tPy13RnD#e% z0e!y$=5AKvm?d-SM{{5wP3pz4l#?lgN1b0JxU?Id0xrg5@-%_`5^QTd(RGmE;W$aD zk$tH8L98hCcp^g3hV}6`snLx5*bW6+tiMs_4pgZ_h2w;53$w5Ss742bLDqz9D^$8k z*)a%jx!7>ym$;&9fmRHIo}+1}RzTt)i|6PoEjXqjE%YZf1fm)lz)<`aeD0!zHKF!a zg|O`=^wy9qbP{FlZ#)xMTpuW0*c9*(Ru=v zJ(qQsSm5!9s@G?@aGC7HdOUlGVPFGwEOgQ`loe3Ea44zCHo?~BKxufsrKZ;c;8V7) zoV3T(#s;Y%*s2C>87-y(UG+u-dKzuOgsOMNAtTfx2Juc#9fSOi%3`URnT4ob`^~7U z9}Fs_**6e&6|F>ZHTLkSM zUdw5_f1u-s`IIZB6N#vrGxYq}kDvY5@y9~l zhmGcviL1SF9Y7KKW@9W$31=4V*cc- zfBpHR#?6gC?2Jf`Yz|GNw?;MNriK4pZ%3 zIJo87T|5I$E;nP8(|t9LN7d6k{MW6P zk`G*K@rku!ej}wrrlB)s@y_AZ@4lCG?fBG%6>^+>S$BO7hqhLuMuLgNvGHR&4vBPfT z?8-@MxzZe3R-sqKtp)_{)MV>;R5v?r9Xets1IBh^32fam2_T^DZzj2vzKQbU-Y^c% zlc$Jtv+GSWOhAWho-Kv*{vVoSJrwg5-)5C612Z+ETqPpZ*%S%1~&y8IITT*U)N z_wh@3;UV-Rcc-(l>+-)G!fkZU7Gx>khcP|KZNz&wZks-`rP!$ zIQGQlgs~UOi4t7>Zr{-awNELzFx>yz z1#helnfeBvhASRkb@>k1*PupA$wf`yv9|fn)+FJ$badj><#j*m~AD$}QPQ>VJ)4z^35BqW$58-4u?YK5CpyDW3=TSjzRcd2i zI0z>|iRI}`FRPxfuDID23QopkS?*@wMKYlxE&*<9kt^ux)MfFp%vArVGJo!N|4q%D z#rBKd0hN@|doO5Rax?^ASxj%HOQlEU`iRb6#a-*ed(SkDo%eSInZFtmEtGzw6~E^$ zBZDG}4Eb8{xVA2{^&lstK4yPR>u9N;A-&%TW7#IH7vdWJAn;q`wLx-ny;tKx@yQGQ zrOzu<&#jiwiWgfJ6kA)KEuaUl*R_h~uAbj8+PW3ygP|eI5DD4}Hl*NW1cd8W3MDC|QcVnzI^6f{Gg<`BO|&#!shNuIeS z?s<`EB_ff?;}Wo}Jm^6oA3$pI1b!*Oz!n)np!T95Dh@}$_i{2z zO21KZIKWf9v}JJol;TP>KXAiB#U*8!BiQJp#aqgePBeN{&uMVYu_Q4ZY@ANq`YwbU z7YDs*3~vkk4cImN-+g>5pXIr4xzx9+JHkgEy-ySmQj4~5v)45X6&FdnnvQ~CbnsHb z-E#g9#-S|ZcKVUhU1LoVE(Pg#4N)%3r2#7%q(e@o6|dPF^F_DUQ^JB|Jr_S>myTl? zUVx4wo+7URmGZ7gI_{+J0zppGG6J2;*DDBg%gUa&2y}mF{=QVwmf*2MiHB-qlIwE; zvcs=^Jawji2LDX(FY?R|GXk#`)HKkbB9+rnAS=~Id!_IzRDZ~Gui&*!?W^e;QvEzJ zj~2JTt?EK!3+f<>ONQ>m<}No&v+DXUb)R;HXiq89qiK1yT3!F++E|zFB0b=Ui$)cy z=UY*d2aoKWp-3sHPD`zCILD6kyp54ot22X2*L_akanvv8Gd}4VP0sBG@7U?@mW6BY z0Ew6KoV&h<*L+-;G9?B*GOldmE|oxcuJfZ5te1Q|SmBrRB4}^h*&vUGa4Ne5 zyL!jv%ZkU`avo0~qaV59HL%fF|A#FQo8>Ygr7L-#y^0!WV@XGEbf0`$0r?U zZei`x3j$oz7quX(xlp{(N8F__clU8+tBb58g^Z0p1={p_91>xPcrXnlI;5!sN)=VU zQs5);q;Ya!oVjzObvH^PI}|rM3U40_HV+O7u}1<;TilRXAX&U`Bd@D@=2rI@Zo5i` zhX-2PPQ-MEZ>38G*TjO`@<;{fCx_OlGSg5TQ>`cPNxo~NpXq#qQEl5S zY+dVQ>WW2vmC9_?R+a_AriTmh%r;P)&6*}%C#}IZ);9?pk7LzD-NL)e~1F7KB z>t4;*@}m>*&qTpRpFbh&sDn75XXUftQuLj_@u;rnc;fbbjIN@2p=ko=iGvOEwTE_D zyTI;lTs_cZGLTXo2n+?aeNJw%f;C&@qxwU$OAHAg=vt^vj;`|c@!4sz$Dj~uvbCr- zvXp=t>e~{AD^<<7`kfA4=W%6FegsA7$;$Ui@FZ6|v+BWUur*uw&q3*tKu6-*+vpTg z43$Lh`({Pq=SZ7yi}7}qE9m_xZY^xnPZ4J>g2Hj~;1=k;%iJU(N(UQcS(0=o>#ygt zKAiU`w#~-a>zt0;nr4mEI74uRo5kyfIc3xY`Vq;b4s`#tn~iRSyU)*Uf4WM(Db;$p zoJ>mgqrQm!QX@F(XQh~$il-lO%hj3a^zQ!=9-KV>Re@q%cY-KkUIxB0rhBOj)N*^0 z$=~AQbc<+s!n&+gl9S~hOSwplkB+Js1o{eg<=wvGY+Go=u?XdQ?-`-*m7Q_zjwggC z>Kib_Mv`Q&75q)weZaH`0w=Eg6P)B6SOnFrgD0Xp3pENcY2h>dlfMBwBV~Leve7Z; zLS-a~No2!AeqH36NlN!EBU%V!KyCS$FqXjACJ^Xor%TXv_Q588h)DM}5C+0%;h+k= zl(3fs4h3YV1MVdkf^0Re{+hM=7OoyC6K{z%Ov-GL=cb#66D&DM5{b5Bs-v^|kz1Ti zbVaxaT+MV<#cqbBNY<)|IeC|NC}B^V50u|RKc}x8SZa7W z0Fk=qa&b%I0!tm4J)MiIpSlm8G;SaJ@xFXGxnw`@;B{hmdZWLezILjkj6g3qyj0RX zHjTd0>_vfMva$#IH*rPjPM;oi?#{t&iVs{MC{Xq;NNb1< z$stV+IPf|OvkJ<<)6m)mm?aM(H=yi{TTWt7u56^4m ze9Xc>9gfc8H@PmJw|#RZC~z?ZvXl{|WTRjXkcE0hY)|WPk!rs&hQ$RTWiSX;Ik>nk zxo-w8PVOb?_#A-Vg<`dfdLkZw8)8ix4t}c+@b>Hgjt}VEF4SKJ*~nUhN8(s|islJw zivnh5z&~*rVGOCiBWnlRemlT{`LZ)G^*05mvi}@c8VX!Jz_llAV92rsX7) zWZJe9vh2v_R9RPWF2>}^CJd0zP%cQpw=YcPHgwrn1y^we*f^ zrKGsH{5#yH@;FC`Fdl%XvE)5ZfB;_~$Ynsp z>FNC*OUA|Rjp?n9yElMGh)o*&8Xv1@O%7<@7V*N(g_ z1bevpAp7lfkX<1Y=qH?Vwj`pYszmzL?oc;|gL8-eiLkr78#LCC_!ZLj>Prcw9H#fJ z3XR)N?}M!#J22B!zMn6rUKtR(k-G}D-z{vL&`_^+lR6#Dl3KcGOhtBY*wbU?!sths zNTyQ`$SMKhaT%eoe259YAb0#au3n(;1UbRn={{TKB@b6#3AaPByhl#bSyl*@uEi0>}APpuauO1wlP#*9c(~alBA~z|C^?`O*$2$HxUL{FoYhIx!4J;MApkl3c zY&lA~Ui;9vVk64H7@lK34@cmE20aeN|4Z~mhwI>?+E)_RW$vKkFflx6 z+_Z3CQS48Itx;UU8=6`?vNnR?F6yA(f@8m zYJ(6klq@8x%*`BqkZj-%yS;3FvF zg)SjP9Ek>#uQYT;rsIl-2iD>Ju!uWsS-D(q-EfD;kWEm29MHX^3+$)583!a8;XN@` zgGK^9DEPwYGY&^sJF29}z;SvcbgbVmD^My&t@prblyWQI#UOUmX}|JwHj9`shv~SP zbYgn17PPcV50@`Y-Es|A51CUsZqcGe&Nt_{^Y^m@s|wV`Z4wgb91`egQ$@-+NG+#O zfU7N?Xb-xGn?y7KNb7+Yi>rr?){R@-)NWsL#oA;I71HPr%&KpP;Ds6i3e|D-9FcDp zE>5EB7X8--=}#}νULTkQBPZWCo9Y)RK#*hD>}=QWA?7jLNu2o5d?_wDHpOpCl% zx4c7tHlaguYZ5jt)KaffXV{Gs0SeikiY9S^E-)5!;)i1?@5&>OqY-e->5%$X6l`aA zNDuLm);vT>kIK&--U>0WCMUt2ex&5mGD5D3&8xcZwOmz}7t=JyA#D-*XwQyT`T5b# z_6YCTr>n%*_ASsV?CN4<>ZG^@T0K{wCqXQ2sO5uyOio{)b5Z)IlVgcL078u4Nw0)b zAAz3FX0L9E`M?dni;^?Wd`%pU4t2mnjm6NE9QV8;YESi7TPW|!RB?xOQ2Ky?3xG&> z`i^Dk$ffVT6?VoGIkXV2mlLvphIZf};p!_Omc!N8)^4F6amukDitOqJt$2FhagT)k zCOt*^Vwc!E-_Z(;5I=|WG}N1kg~IPmPw*Z2k0p+Py4`{E zLPATA$xd*0u(bR=8mP_V>QTS;WrS``2?Qp&6F=D6caW>Q(_&iVpQf_)Y0)%4?uiOD zcX;&-=8mhA)hIOCx0@>}Ahujs$7%5iyZ@H+0ACzwhX%m${d=ObJR|LV)Pg9hy4p<; z{fWimrHy;(edK9*=nC@_k19uyyzhc}qQ8BDK2g2k7B4LfVmZEQGQWtQYUvR;U0JCd z8QbcDBSiFE9qXVsTK34#rt)}*g&=_B| zR6%a|bQqu`#YuAu`zJuEu}af{d(*#hxn{s;5l?c~;8Ut=D+4l4`?& zcOH(99o!CZKpar_9{3rd zJ}-wCAQHvf9J3L*`5Liw#Vjf39%Cy<*1&=M0Q51f(-sYiRdIG@ee{I$e3>U2itM+C zO84euP$x!uarN&ybbi}6F?II(3#BGQx?lBI<9Mj}0{ryFR~ z3b@gzT|-8l#kNEq`mR#Zn>m{GCP$-kD2Hy%Ppn$BC|W>h5(Nf4pl`R9%LOjggEKAH z+xTgez9j@YFonn;TMZ``H&mfe43nD2lS236R zVO0{c8a;bTE8^M|abk=(i>rsM8hKC{yO#d)l<>U#Na{c_efC#9p*)Lwh2=ySSv_`?rU z6lnbv)=>>A2B^TEsLni$1a!Mc&N$#8mW@N^Y8e5`NRgy1mcp zD`m>J1&I_9=s;+xH9*1NNuxKUYBM#_dT(MCqm*rCrfs7J;}L%4e4`{UKzDPPt+=I` zL}bv79ouQJgXt$no;9fki8;8_DI+Isz}P8{d#IK^u$GvoF5VRJAd#?>PXmJcgv()@hmq*W8gG#@t%< zvciiK1%I`6^{zeZ(AJ>)E3}U@w*Sa2ZgAWPp9*7FsL-ch^QfjiCIONVCV>sWC~n$x zupHsyWzgtoQC(C;ze6SLjW_8RV{_A#Al6Bre5=g3IUQtQu~qh#doHpu9`tGODuBy? zo$*W;n}rP#5%d`e;^3aGuWhH zZL8)-2z>96kTyK-+T&YA{F36KaQxU3JRMhNn397wEwpO~h}IZLrMeQv&H|wy;65N| zjxT*cZ5{Rq3J%^_B9`Jdc7ic%)pqcYOUNDijQxxhM9hK5h_FMDHar}s3FdZ{NJrIW zL~1Ve{#%yWYiNQ3^ajTZ!y%n4S6@tbiIB<;Ezrr!TOZ%c_oBj{w>|Sb-6gpzw@vxR z+;J(oiv{hD?{g&N{WAROqkacsej@X@VY>#lHp_J`LkYCK+b5pdCT^zp&7t|!UI0x{ z8vTgNC(<8VK8rIB0%`)6)%U5z>bk5gh>54=;Zg4S+Q$JU2AXxoARu?$Ow7D<_~D_) zpOd6^{OPe;!@`oZiqYCd={S^(K9Q^cW*@`ywN=OGi~Oq!F(G5e#lx{Vxg4v$#KC_6 zHyI^Mfj?OM=_IaRqlv!8<3aL%EJ!qtR*)1m7O^FamDafqpVEFkRi)5S_f}9t70Lt$ zUW{4KiR0Rn9s2AJ-vY(uj@*s{n)(}3kxYZ4iN$ApY5f|5Z*g74D z6xZ)@JM_huJ)EsRG~eXrJ9>dlxv`58NKx1bL)Ix{(hmVR0!F&?fVzKbQ@l>=UX>L`8-$F+;PC5?sYnGgC=`a1MKh2H>M4JnD?J zOOmT7m+K{zd8#Lgh(o$T!?kjbUP*{(_sdYk=60o`+I3R;m8#Ur)EHqnvz&h>%5@Yr z1a|zFT_=p|SyTgvF^D_Uk8~_o{ouTOP`N*^QyqW>Wl82&o$SUwwTS-$%(@=3&#Z?L zG=hUCFJ`n{Ubvm^q7oZ8s8dK1J6-q&ZqEpXI#LlWGIBj(-QszR1_gULhSn)k zO>|jnL?Hk)hSd;_G|_ReU2s?TM#$F^qv?HD&#vAEsaEht9;3Kw8U{X47vtx*lrRRJ z>m3MVh+6o#Ie@DL6(a+z5R4O2GDkcpR&4pg$ScWR>_#blY{8U%z{$0C4EyIyud=Ww z_hh{s21zK}&(Oc{Uo;p%p*C}Pyj^TPaG0bM4sgdaaWp{U2h|m{guovfJ^uD>1 zjsEr}fu3nB3FKL1vU6ccpa2gbCV%kEqL5?vxPLnNDZOtXD-;~{a3lj}4fxyb^l@GP zclF0M1l(iDM=R9Io}ed%wjAp=1yhs9)a$e>W*BIi&Ja~(OqK=oV|K_5Gaj~9UD}ce zpqec-n42+pv*Da9g)-F;_bl==MC=0S zw^ecRD=7wL#GhQZx-(NadBN{gY|&x|h!Xc4BL@TP)TbZbSzRShdu`<@DW^$xZRPy* zBSl|)2=DDYY(l)c5V!>>*Ouu+8RkIc1^Wy;3{TWiru0X5>uVQ=0qUGr&Lv1gSbu(YhK{T5yk{>^?5h+b!V7258kHdlx#e3X-HGEi8q0=And z{%MVM+kZm^a|;_KDE;W!bBv7-PD|j32(96FoC-N!Q|9D&g0$3~oyxwTw(PneG2JkR z+2SPg>v4}JMnEp?QXi}CTGpJJsKt{(G_s#KpEPhq)AF?g0p@?0$G!`%bFyI;D~G4} zwRm*gKvRJ#UNz~0jqttqBno?4Ljtn6A-u@}6H*TO^_HpjpqpO^4ym0yv06!R@P&C~ z{)tIwF)J-!FfTbp&Z2hZ1fxVPQ8>m_pD6m+!rT1hr%Mc|W#OWJQW|U02h@}abPk8J z#jrJX@$nx-U_wg$19Z2~C=wro>d43zU49pj)eHOE?;ge%l;~=uqp1qlHM!xHxfE!f z9?)7>&eza}G(4*AZ!_>lo;S5y2UOPO^`kP+jX?sN%I8?n!UjcjxLa+CyE;?ICI2` zdkZZVvq?xID<&FEu){u(nWWgN{#alwRZ32C(%d_`VCMuj6 zlUV4P{gh7TJ(cz*TKsm&h!_5ho?WHdHn?Oi`0MEnZkc@w}IYj?nqAPM0j=|=6BKDc0)9kruIQZ ziKh3Rw3$92bz=5nXdi!U1s`I&9ANtcvNQdI2M!RiY+1Ij-{He|FuY<{NKNmZT}rHS z0QqZRtJs$HRcoe0(_J#F7hqWF3_g3=zqw@?bFUjtEJMdPx zuJdSLeV8Vy4uNvN`KY$`!euT!yYeDJ3bS|q%zXi*C_2Wlb2xe$E%li=DKL;#L~~=< z5`a2gUOTF$peja$k(Y=V1$IigHgi`yoh$g0D7^~H)*0B&Hm9sd2 z;#HBY1GF#~_Ap$eIaf_3#0!;I5+!P<58wGyin7SZ2q4Gm4#{C%Ia3T7!P5$jR|SpZ zRl%Zq6O9REV*>il4)Grz!(rwSwXD#S%Win@Wc?Xv@f!M0FV{Ay*ordK-=M0XvRp=Zg~Sk{n+19Oa3v zOMPUO^zU~MqzLBMT^wwf*k{+I{vhYX-wX>51bg$i;xhn&$n5ya-o0h>+^T$e0)6pG z0zHL5$Vi;*wrp8fxM`v9CEO+#J86zwf46zM@-di|@4X(_HS*8nCTaM+g~`V8`*%ZJ zqbT>z6h!@r9QBobO+q;>W8B!A)gaMM=eYt(|Ly(Y{_*shrS}6SD{i)D;M%RbGI8x9 zJ86w9^^pvJ?@Kpj^u8_G_Yd6gHmbzwI7|12QSPd`ljf?s^A0`Xq$2l}WM1fQ6WQq zz)pOYws_N%iRmz>R(WY#22?d~fnB}FR_(VFtG}mGh%L&!u%deQF!hg^n$3X~B_Sfi z;b1|V9_bE4q0+(zYItn6Lq`oKM+cSxivD-+KKT(Vi-05f7j?BTH)fwKR5*g##T>3^ z)-eV5F{H@SJM7aXJXZ!*_vqQv?yHyK>bF4vPl?`JVz+;}yqcvuSf1<;gtps z%VAPKg+DJ)%WcYU{JSw=DstNz{iD0z8~9MpF!p~2Yi={ZNtA;(&dsrTadFHm|AN{= zb**2?3mf?sS6@fkX0UME+qNJy_`G_)zWwr9aE{m`$yF-)_ix{B?60#`fkY434M)cz zVUKN_tH_s!u08%34E{yQ1h4nZB0A2JOaXGJeb(|y^CWi|PpIppC@VzrjT#TI4C-)j zxV8ics->lnZq|ka9)JZXwQ{iZ4WG|ct;cM#J4fv7XKmPQ3zgv@?IYb=tHmqj2k_<5 zQ26~P%@tT97bXpoxSjj_b4Rzlov?MAXPtA#hPnj>WfQYE^l7BeXS1(=>p3A0V<(I!v|h8m0OZw%&^Qdk zh7{)qG6YFLZi7Y=h`4ZV#QLA8gqknWQQSf)k|-mVen&Po$3oGroj^~B zt_&H9Dk(3&j9Y+Q0qW@mq_L^njBRum*h~aH<_qM5+iP$R?1X#}OAqzX)+aQY(mv5>O$G z3=2X;geWLMR)i2@2q7dPBq8g5LdUs(-2408`^z7iT52;s<9(m!d7lSdO$q(h0~$~! z&(AdB!g=|DAPX%323;&@>VT*exP~ONzEu=%Ay}rQ%Jk8vWQk@CkR6ET+l7= zPDMYfiaJ4h(uZAYONL;pcVFz#UiD-7r|vOAdk*d0arYB{Bz>Ox{=FnDh1luhr@REd z+r3s4q9%Ah{lt4cRc@mF?j2v;eFk^H;(e2VN#R$#_ny_(QJvfBHiI(rHSy=Uh+##sNE2Oc(;9GuE>iy>;J{?G#ns%hN@SxTrHoP3~) zV1VGh9Hce-9G;dF-$K@as(y)NY1TF%946W42Wq?-n)}^W?aMSGFg$!LvJS)F*jFQ= z)kU%ogomFD)25If$AdC<)q7%iChG1aNE`dm9att8PLue=2syn?qTWWNVln_HU@@D% z{GF;jopZmf*S{6`^dvvbj9t1~d{{{cd+UTY{(UMZd zo*m%nlRRh_%7c0$GA#nxxGE_nqs5rlbGA|}d|G47Z_#BP#f|o?8kju17~APH6uMYQ z=qIfvY3~pxB8yN6b2*Sbrsb(>qS-h22isD<`LjVSkNSUN5T#52P$QcG#*mM!k=_Rf zxyU+dlf`_rVw7zWsu$NStzCL!`MHIge;c^`>*jxcz9HuCoBw|A;)=FQuOyciEwj(L zm@$uh=di<|i(orr+fPXUuRdR}!0CtYzI^cd*>^9In_j+YSgd8y?Dtz{?jPN?RlAOS zL8xsV+4%n9{?hxELGMRW55XJ!{M;bx5T!E&VwvE=>({@TtJwWZn5~7J$rT1wg{;c` znX5j_MBvANgA3o6h6KRCiAspn7xl>f;mf~XLxK@wBx_H}jdLDoGy*ffbYOqQ+v7;( zh$f?%7>{Mf6(FTIBeSm~9WzHO;6f1v?Kg|3RBFy_{~MKxvee`+Ei1HdCQajEV7wCn_0)HBuXi zB4ldqlEq!jurCNfatkr941F_8&1-?{pPG_6maedU#C&|FuaxXDagOp@HF zSJ32v>oF_h6hUP1PShjVWILi(iyLz39uPG*0-uvVrvvO-!_B zZl*|iPpQ+DR9-)2J``xz`|&$Qx%e?AU>v(Djn=KRk;`7eGy z{o_7oD|&UhC}jS5WHj*kxq$@sr{QqnwtL?~x9M~$a{sCYP&-J+bcTSnOC7Rg->T~u zDgTxzBr;84@iGV)$k}BW1F%zjpmq>SL>uMvWvO*D8%|apgbOcSng#8t>&ruG45snH z&|O$ZHXE!HEgrOhlXTqhEx2&DTlK%-Su$-{zZ(ih3){@o0pUz?$7yiaY44K{>La{pznz#r1QFNF}_78fy{FkFOrM@H3YeuU8zT zkc=YdJ`aosf^vsUr_leHdpa%5-Q#TZt7!<+nf7G)h_cHG&o-`xZey#ykELJc#!q^j z6TdD*N)eb822y&hAl0^*s<@6c)v^x4$wMV+=R~}`^GChfg515M&!G^T==L&=G1KPI zZFG6j_^PCJe&IHYs1cmA!=lOS%IZu%8hSw<+Y`+Rb!xbkHPD1p?v2pDwmYZT6gI)_ z^oMC(v(+ochNBMu^>)A$*Y7Ue```chzkFVP@gdT>l*=dli(sU5Wf1N%Qu^=NZeW|r zQlLFWd!TGCNLg}*z-A;PON5G?iTngw{giFXisqml%3|u#<*W%P;L1)3u@Qt zs*%z+TkUqWrLhYne3pX`dRY8wU|;F`pGlZFws2rb{H}-4XUxFWv^2D|@XEp8FX8El z4uP0JwTedVL9rsVb&uuoL@o!-6Z8NEY>%DZQ%Irk>GTG^qGP}|lY?V>KH~It8u)4w z(GjGvT<4!T1L6=D0zI|I5{<@*LLv*)_g!B1+v_`CdF%VahKRl2j zoodpqAJvQH!tyB>J2an<^A(#o9D#L$V@S8ZG}u{l48t+u@o1c{Ojd(Q$&m0#p==u3 zh3@glX-P1DNGdj@us`Y~Qm;n^pPeqMDk9QlX-s-le!{jr^sKBbXU)@wOaMZyfDTq1 zX?(7AAKCoZ7w>vMf<#j3&X<&|gzSu&H!Z|UzMo-7#z&!k4PyH>%9Uo`wC*DP>$^+K zg59tBoY*j!KJwipV*zME>JwMds#x}u%vTW(r?w`rQoqSQK{Y6KB9ML!aHI6tDH zn|Kp(syUMs1V6JF#=f)}`+C`%#dFtL-0ql&wdI_0_OtV*#*e3BBg`QyftWDC@4yKkSFC|1vZK1bM)~d&PGbysu;*68dpB(f|yBkjTN!h z1wR4ARblXiHK4lLY&|-GseW^+gmF?l4ni%Qp`4tY<|$zAiR=mrpo8 z%-oANmgDn;#oA0?JE2O#4wZB1Oa`54k24bT)n}LnmJ#Pg_v{ah7xFt_$xJA2MxXwT z@)QeC>sd;tqWLV-OQr$E<4L7*PFc@Zta}iTWuX?|(fSxs*kQdlgRDwL^A)y2u+Aq4 zUL2f$Tm#l>RMyC_XQhY3|?$51;TJk9b>hLCNg+(xlsO zN=*EhPnjE~&lv2SohBnIk3SW!wM}Ow1))w9A)}u;p$p!s)5kNnTG&riW-3^nUBud; zqBoYe_(e>Hzi%1;oLFaD(J+5FpoCP4l=gBzxa4ya{NZP5C$f;z?q(k(l-bsV+izOy zet7Tb3X_h}Fe#xek=)&X;fL1%F2#*B3_rH(DKqhLnOy2p|B@_5TqDbE!G_Vf+w&|NO>H;evtvr2`9q zZ{l)+x>n0PxCUys{wP@2!0jsl4)oLi47Iz}jeZ9$S-cpk*P7CAQLk*fw{E=F4biVq zJG52xqpRZET-xm0T!cXMm17hc+oj1!$i^xC7i2VmYIeQ4)ll+225iFh;7Tyo;C0?O z&g3LWup7cNXWH@tuxX)&kLfoV@i-JqC=0X^Mq#p5Dyi-!g)h5@j|M+zK6#R)PS=_9 z%^be5R<95AphtmgNY6^GD|naIPeU_N1V;p!`B;-iBNZ@MV>(Yxo1MOvpX3cXN&(Ny zw@Tw>Tgq2Uq^3BMDmm4dNX*V=bVMg7Cx1-2;0so7io24IE zD_^TIIk1zbhWH!pgi^^VM)lxjWFMZ@pqXTa_wOO{TObV9pdT6-(iRj;%BNn+8b*p( zq{<@mIgvudYife!GscCjn)@0<@06^iPl`minq2Z|YGjG=?s6@#wY{edu1$4!FTs z`c&UqMt~C;D}6zH`$K43Bk|pi{E+>Rt|1?HCC7W>HzZ%X-Rem{>F0Xq_Lsi}*DHOJ z^B{Qnb`h#ACTyWHL(eLSklA&C#on7&PpB4DCXU8e9TO26vm*$*LaP*Cwrm^S5%-0< zr!LFWQq|`&@6R5~`W6qryhitN z-lW?CLKuw%fSV`DxI}ZBK1tt3#Z~K52tr!{FVj#yRnN=hiX`co(o+ryP;WR$w2E;y zIw(jXJg84<77b{OxT)GQ0Um*4e>%lN2T81Z(R`t?!=Rq%KZr9ra71*82iBdh!1*bY zm_{$RO~qtJj>HG+5l?lcP&P>T6EDa+YB~&ou%xg~NtqzjlxeYDq2r~<8PPHQjW6MUP+oab82)){2(m|Cz zr1y3Az~Lps!=DdtouN~}S~NIhtJFG@=#|*IqQTzK_ei|BdT;QuWz9-Ip&-EzG$mN7Jr%(d3>ea}Jmq*QLZUNN1F*qC zqN{PxqXf&cK~2v`1pBc`0Wv@Sz>rcHT;`&hXzO^-cOhkGhx7;fuuvo~6oM+bgJS1f zSZIDw&q0!2?`<^J7`*lBW>EpvMRFEY`^W+U8aL&-kS7pm)UQPaXex>%mKF%PJ$H@O zA3l6IBmG!3r_@6=F^}CWwGsx;GJ%}dE zLlUeLXConkV!yT?;GL)#N}DAh`L)Ua-E-+A!SYA<3YNy0yCu^Mj;S@!6j1-Kk)9Ei zpvEF7zLVFG@@i2wl-|%3JtV5I?h+I}cTa<3JhTUGo7TTb%ZTOa(Yb5=GVU*#8kkp5 z&Z=BEWva7`lJC;Z&$iYZ{^fhsZfs%@b>DO6$!80jW>HcH(+8ID!Cw@M-8K@@%Q+TF zI=10;&>oM;Uuq-rxg(|ZPdt`Ex0iBJ6UxC_;mY4d}`{Z4`WAn z&!0xmpglDX<{$%5T5kUX39De#kgz%zU?dyR*KD=Y{a$W@ciz22V*4Rv`XO#=)>LL4&ub?x5r# zVO_XoT8Wk}u;EZpMx!S$>d2ArW-;E!I7^4*7;iHs&2)n-n{wt7iS$FOgRo?<8R=o5 zoWWqyP!2)v1hg%`#pvZ(pqAVa$Qo#+B4#BXZ7Wc|mhtf$D$^g`9nsQFi)W9=_dULJ{xmK zC@SLmydiCt%S*i?#2{x1pP#y53a4|Ytq*$24rGfSMQXb{zDOO{={?y|oCQq??ZefB zXClJ7$I$xCqtD3+Ekyww`(VpQ(AQ-y8PM%#uk|}1URxHjU3k}#h2tJr4LqaEx)#JJCv7au6*L58K zs(5ka+CoHS5$-d)E-XrCju*&C?iNvmQ{%Ao=2&UV z&s+Z70oLDLm;4WQ!T8I!3avB5$F(JGA+wj%jsjff0Py34+WExE^>gQpq}IWO2Ols0 zeNWt@_#K2NDky}V@IE(cj;Mf=ÎSiE@gb?YU8#8LoYA}qS*VV#3HkYxzXGZqyu2DV4oE&pp7ESAcfg%x;dgqe4H}b=@v(TaUf-!5Fp3T8q=XEgaJ6b6 zRhjKGb`2R0g2^FsN+!UM^lAbEU>7shR4NBcore`l2SpB8fepCs>yXi=DGgk>>&_-; zexvw})Cd^C&G0iooVyspv(<(+dP8|et8y~M8VU?9eIVF9Bd_HPS-2!sm?{})gbGlY zGCL!lo$M<)O3TJ%kT3{?ehMc9R={v!Nl6Kk9R{>ZCm4mqWOSocrYg|3s3tx?T{JxT z*5ob-0-o8Ku(pKdlw=yt(15~V=LnC5&1Jgie|Jl;F5S?t%$a$XY_W2NUPm=5FeH#_ zqWfiFm}Ggya~_}W&OZ*Urf%V~ooXeu>un>6Vc(e79kLZ&k}ZqCPaa=HD{WsO{K9ga zoG|(Dg0lBvqy8Tr=k)(<#29#{h%jC3sD*lBp!OaLbP45b4SnjUWtnScP6f9k+jMWO z-LBlOgb_0FU4VJzYc#M-Z5V$4crE(jzAx3EtroAfJ6whsUhw+SS85xKuENv(*-}a5 z{$m|qh}HL4ezE=U%0-Bgsq%5B7*F<1;=I-MiF)ZdY9l@XnWPtf0QN zHOX@)Dz7+0bWl+tp*x#S`m1u)eVe$U!KF|-NJ)qHt;$70;zX}YRKXQM++<~EFV|-z zChmbZtY4oqWT>cE2eliGETj|!o&s=d^eI0NSpd*{xDbea0^oNsm9Wgq4~H7>8pbDB z{T}p!{Geb4W(}0l-9gJ6N|TTztJSsYnuY{oe|f}Nq!eq>V4Wf0MIVx*-nMNUoOlf^ zQ-M-btKccs8nx<^9CKTiW%bF02m@+7LEs}#^|?fDky_{edWFu^FgT1SNbz)I1k!)M zJHWylG6T05Y5T$o|>yz#-{`_L8_DzE?k!FdE~G;(VQ?IUnKI+Bp@)MkfEPaQaK?+?3G zfBbi0uf%jS!UQMreLXJGI~t@P4HLxTI~|d6ac;9Duh;W$Aa?}?xk$vOvnb1umI)Sq z0NfAWE8ri%XXnC`h&R~T&&azhTPdg`0i*bp&_QNgczYbq%g5)=yV5bLL&m4OGYzeA zR01Bum6(G5TE~YE4UvT0;oB#WzNhj7&02$X(xJj)zk$afK{H7}uYQyB> zT`&;09Io52=ZZ*~IiL+p8C1ZLdzBYGgH9y1x}0lR5`pPeqZe)xP7SZ6$nPJGpy?li z+B1QON@mfN-1tipev>(PG;P7UUcA$30r@0f++$LYY%Rcy|I|Qup75>X2KKtL45zTs z?HRN~=(4mC<>W2Wv=IM~_jtOI_h9~C@AtoZvT*nBkV77(XwP-zf*-&A9a6LM+Mv*M ztJ?{`lWAa4)Ujho2n_GsLq^NHE1QX{sV&WVj8;=lXi;a;lO6}>ARTc-*Wp6z{%%$l z?TOy4+6?x4eap${I+08R5UgGbNbNkM@77y(#b@LorPh#GXGoELSB$^*rxt+c1CQfb z9YybB=D6CukJ(tmF!~X~FlFT%jA2Gp)jq#ri6ln@{GqnO$zaX`IpBrx6gqrC~28l??w!9(rVB|MZaA<-pr0Ef?1Cpb}9gqKVop-(=`K`i#iWXS7x+u+40xn>mLDLRiD?yR@`@S6nE!V~+Sx7>w z{x>ZMV#<-*YN=e~(Ya&RI8X>C0w;L%CzDvYB$(vsXf;-a+DnSTc{v3fTUJ6-A8pVp zen%QxW$t8|PRCKp#ClPC0APm+pSB`kNb--XY^nF#OqgJF8Bi~n-I z0)(XOY*PSJzYMYd`*^e}xmCg66x(&pfz+|f1A?c=QZ;8f8o-6`*{4302E1|3h#Pn~ zsc{0N-F$~DYk1XBKIs)CK?#TX2GCRk3JzEsV21MYXuWJ~Or$mXau7@RKMf4CN z@HxiI@z6h8#+9qVHHnpGMc<=y9Fm)7$PD3I2HBi-d^BKiTmqFO<)})X1xOS-Bz?*Qo?n zLfGTn`s%7%fe)xIBNdK6J>cERI`#2#3!gIZ$hIY0ru?voQr&+^z0p76!-sW`FpGvA z=aZ86RR_9{#)s77G&PI(+wj;GjR|RD&#^K@r^kzyN3i6)YeMGS;Tk#p|F!PA0 zACFt^i`KGtH>R$?AHVYRpH|nSvM@U@VAlB2i8uM{LY%Vr>kG-Nl+TZB@DmstT`$qT zT(Axo`9rPpT+B}Ho^Hg78uZ80kuM(RK8$yz1s~`hW#G-B-ZX6E%)lLEAw7oiW@jh! z*yxbJi1&7esX+8o)VE*d7KUv&l;9K?i430CS4ga8gckCPKiG<> zOEsQlH5WB$Kqm0M;YSqLL1shxSBdb7s8VF!bKd2mT+mm!ZB!EbRm*ix_b2RCaTL?N zOCSL9K1{s%>MqJ`wqzNuG*=r>c0SsLG_n4{Heb73lYUdSwJJ*UdQ0U;(UjQ6P9Ry( z&|SJXEqQRj(b;hcgId=-Y>QroT6JSEv!0-CiZrzEQl}Inrf@VySL}UtKDo8yWa@Ba z_4_kibgrQ%ZQCnSNDGNv+N2s5Dpk1FL=tCod<#yN+Hj}g_{RJ+A>I_1$ab6KS+$uR zva5gDEAh1mANDmngHrpMgc(?xb3^j$jkfi{qwyJ>0@>N~#L;Vtxrb#}SK*oWJ``m0 z8%g#nq7<2TInozG@{-&)^ys`{M2A$c+qaoNQcV|6x|__Ki}Fn-@n`br$t^UKA52pG z8df$3k8mH%Jh*$+lhh75nH#l7Sf#>VeQ|0_P256h+K&!ge`fwWyQ3t=W<2_fF3kIk zvle35!zRUreX@$aF{Ipo4aa3eK+;`v>H)}TdP*SY-o!#&YE#}-8$F*e*wn!Y=BWcLaT3q%Qif*+4=5F?3{3&6=Qg~ zZ`HK^;UN4BKxY0hu#lNcqcDG60h=(q$%1$g()WY_LY(1=b}DBdoZNXJ3WASCMdcXP z$CHL{Mftjbp}P~RPz?$x&4yMDut3X)WEZd^I|IIl2-4Q!)8Z?-+fJI+1d zOvKf;d<*o$5o@f@mV3+W|SF5Ig~*Vt#@rY`}OtWGryYE|#Iu z9_rNMD;@F2j%|ZCY}&LOsghwD4d_+JP$w{o>IT^Xp2s5A-lFbBC{(A0u5b5; zv(aW9(&QKaV!!02C~x3myRFy^ZaFI&#)r%njUb`8o-sqn)p zoeXQqIBaoM*wi|BnTL(n(WH33XkenNI^iBtI*+b9H(d+vqLI}U0ih3t7?O%4*O{el z0$V|kV^A^g$(AOcZLwU=Qc>l3iv(8-vlxQs(^yyY*c7X`KjH} z)7TWMS=g^I{OvT0u(&g5$z;D|il>RgVC&c|J>N35#rcW;?TS-|kqj?|9j;Gv5QB)5 z`fjwC{==Dv?lfoK;+-85%|mgXi)?9{M^V?01Ap#olw~gk;S`BzyEHs1RGyDT>Gb;AG zCLgD=wY0M0wPiI6i?eo44iA17=^q&So!pa^f#hg!^tLrdyef(brQf8ecdkmvCpot< zOI{?eX;*$!vpu=LIM9N==_m;=xF}u7TaY)1Dt$tK3Tw;#{l>a&_Sv}f)>ls~!n~gq zR>JErjIBU@Kjps4_i#WW+LVqiQY~B6f92*3$`|YEP=_%yo<@?GmoZ)|h38461RVID z9t1DFq)EenpBu`zKR{G%xYC8Wn(nt>s7YEoGJfU!m&mHwxk#2_xO83IV#=>7!6nn( zS5HFydeN8Ftw%DsZz>)({jKA&xbQ*E>7PHdx8wA!Dj=s4p|b+RR_~v1=|$fT*n&e( z`&+}(+rKG3W?`W=b%m^Y$i&lPo8PXcGJSCt^)K6Yzwd2O$C_b5%;@&KOzG^9Vr+nlxv=(V1!{gSk}GM+ z>#oQzR39s?2w%e!?s8!KICnk{@fVr!=DM^9dDIXW^;euw%KNouV? zrED^@Tey*WpIbF+vf@7n=l0r`|H--i>k1@f6>KDr|7H^a`vKLlKMXooBvcRxnr}C< z#b6+#jdzjK_Y7hsvIGE01D;_rt-;D7Zuu4cr6)q?QEwv<{nN%1697W;0~Q0q>7&IGg7-E;6k)M z2YnqW2SqECp-&f56b+ioJ$5_^kXu|s7J%~>svQEFtViK>K*Rx}TK@$lklmh2KXv`&^=Mcoq3A91mph3&w;Lb~mjdjJj1x5*0<1%~z@+_C&0N@U| zAYgA708b+dpwkT+th9o+?E+xPhIAS}Mc@i`O~A9o8ae6=Srf~045zB~Nh)<(I;MYG z=a>`Z=hbWj(?54 z0jL20yY)2XS5UikjO}O3G@3XChNxFTwMJD-j&Tg!lvan%LY5Qg8)vHjahg}gB8`a4 zGgc|$hc$KlG~Zj=v_Ky@n3lKBSQTIeBe#4MSjyFu2`>k@)>Bs5q z%Po$tNPKOEIc;6MHvOv^&)Z+|77vL^RmuV@vPu{ed7SLNagItkJ9CC>nf~ohXvWnB zLQ93WBC`wE`)02L79kU-uF3asB41Bgz|&SW=oIZog?2t85$1DC?i@M$jr#_tCT5Af zd#J6bMAL7Xmv>J&C12}=r>8C|#+!~GNBd8?L+$ep-f@OJvMwZ~v!B-MYU3()!W;Hv zsmU?&Mf|S=@A)_~d}<1%!hQMVJ=yBdBuA#MHY&+PH%&Qdxznb%SJi(X%&1$+TefiE zTr)O&N%sd|nte87*NfFnvXm{}uRH6Lh9C!`}Cb6e#6TO8La++!ktW4 z_15FRI4ZKErZ4%Ko72j3Ee2`7!_kvZ4E=FJE>$;Wx|uyD5sOZdO_=fHkfo^e@sJqx zNTB+vJ!{UEO88yCk828C+9?{!%oV(hLVDIy%qM|%u$|&VqS?LtC4qKH@@-bO`s<3m zC9<{`iRF7=EA|)tW9?+l@E}SOlTQYRHZFS88gyKUT611$K5%Aa*$drDlm9(qFIzLn03jRkQxLq>5v z>|^;+4gyEa+bgE`sjmP)6dr!gzr1S_i)3cGY#O20y-;^=qv>73?{ao^iYvaVir zHdSEU9YQFI&rC!Qsrw3!M{35&@7=2oxK!EqB^ueDWrEdr!%sA-+$^+j>Zq-4c;n*l zOiriuzMF(JlHhMWp(JXLvf{;57kA!`6Oivad81YtW8?&)U$190shvt!t&24HmWIu4 z!MGHLaNq9|-iiDSq|>~`XPE1Shh8-+HnBq~Y2H!;rtXuAoYB%+FHER44YAAjJwysV zbmAUp-fqL6%o0fLD;z1BGs5kWT!y3J9|UInKe&h)J@Nkt1s|hNG<~(%DF_`K-)KG- zeb;ZcW;op!uRWehT-Aw)wlF{GeO&L-d$JWa;<==0Ys=F`H%RnWQ5IR5N#jbqVlCye zuX^l2*ZUje@}Jw}zy1AB-4!a)7q~y=Ai?LkV_HU^+cID=j#)2~X$I0Ng*Oe0RO<0h z*Cba!S&xcRpn-Ka5~7AB0xK~PP?Sora1_Bn%RAiw>?r|)Eo*ZzjVnz6M=?OzT<~CR zhLc~fSkf0M&@SwNt@&{P@bTbD_A@fTxR}-AG%>$H**-08Av+}QXveVht?_|_@oKdi zE=-Yi0<6Sp&!;&Hs$zBfz>xHl+Rr2na_J#pt?2X!HH4$eg?zyXg5Nj?upg_T3@`&o z?G;X=Ikl7BCvsBXDto8vX`xvzgk>9+WS!CcTO^Z=nQ+7>EWc28MkO-ZDuj9_BpSf~ z&pgQturfi^p{c;_yv%8N&rDm-e>O)2^1&q;ci49ZU}Qn!tCcg=bsg_z*<|%* zYqwMY0QQ**0nZ0J3fLyN&`J!$g}JLkY+XG9i>D#*Pm5@((lx6A6d z=o~<%7t>`z6xW*_Mga_xtCtxN`GI4?fs8&L7l2ziNcmH~Uvokm-4Sc-If(dVS`H1MD{#vMs4h{GG7QFe zylCkk05VZc(BXWr5=}i|-CzcRqcB)AmyyyMU~}|5qeIXvqM}K~Bn%pd11OiX*hOLE zY4KqTO=D?*;IbmV`X)T5g(<7Bx}L#>eEjfw$fzbx0DYuB)n^h(B2XLN5Wga z2sPFq+G5l`z&l*CU?%e=DDn?8PjA15N8Q5ZpW&_B37}~n&NAG3Cr7=AI%5X+(8=@4 zs(Y73_e9C36Wz1XWfwBKpWg6KIJmX$3lYOZ+h9Ig>52HLx>EmHWKP0j{^*5-j@!2` zf1A~GVOgi4_<}OwVPnwIeS@B?Z5M^L{?+}*(fJEYX35ghu`B^SrJ1)j&Y{bAaf9 zebdS*zo$xaz~eoS{R(vwCE?js1a_qOrv$H%>uXxhite*dlG$ReJX{NdWs#_i-9CH3oHKi!C~o*7_W1CDSX1W*vUcE4VXz5 zzkKOwXV=4y8x=n22L8UTq{q7w2*`!+4z?rOcZLNwwW~6*4I9&MDHubBfCDLEUtzH6 zN<{Z5zx1t7%eg*3Tk`6&?A>w^MvnTlK78Qqdt#$Do5jr|McwP81x+z#t34t!LspU1 zN0RREc^xulp6nxJeTa(lo8Xrn#r>jjurpX4sY@C9@gtk6NWWAkc`pUXN{w@K%>=ZdsHGqxD(Wp)bI`L+qqUYr#Ym+lu6nn5J^ znEUrpc+7$}w)uH>c!@*H=E}SqLv9@`Wpr#{I*J9=IpVU-{)pL zK4n(vGTUR*+~mxKallBO`^&*KO7H0kDqw}B>=yT|Pb}-YP!jwo^m;5}lFi)ys@i$R z^Lw*N3;(l0{+kQr@HhLXrC=Yg-?IuxTBz<+q#TTGz=bjf7XyPS39Gy$1fBqXkK5p6 z;QPM=2InZ_%m5YKFJHP}{;q~5s344J4b5s0iENz=x;`q5q7z_aGVm)}ZeXTyELK!# z4?~axriF3~S)K$y;_!FCZC(-wsC{K|{&!F};FfaKD%C(TNKb7}DSyWA)7+vG`x{<0 zTPH(@ynqxEY0-Hpz&Q%!EE4&+BaPZWmTm;Ml7GE+^4L?rDOJVZLQ1rT;XO7%QCH(>dTNl z1|rps#urwl{R{2yEC^@;{Iip4RD`zHR-igvz8p@pMq`4$ZsM*TG!6U=c?Y`l6d->B z9JAvMcHq;rc0K7FI?%*8;RpkUmvr951Q0(Tb0;p90totNtjAUljjAY zc27$^oed@>U1o_NWSsnt^grPs7H z0$hhMG927n(3B4g+?>t^I>2R|rF7wT%$NgO^7PiNTZKk%Vw=!bdwQnrH6&n66-z>$ zRLRNddXr&FZ)V}IQYZkfLYtDOMG&xUHkL@lj1gEOvDnIh{?73il5T1(aiISz#19P& zYiJ!kN@cAgNQ8z<;JXHfdIL_w5ZF#|^^;8KT7 zePG>ywFwMhK!Xj8)V_LTr`G;RmQCt@zvoN}5*Hb5#TrMVGpiM7WB*^<;k3}%Vv<=W zEdUYGfg1bG;2Zc6yqJfD10nzfP+Xm&GJ#teQd7lSmY$y{)@1ng|7}M3$vpdnu=thP z?qwGej>}^nM9KO*v#aNJM#bF4(2j#YTc6;0P_J5x*nBU-q0&2{ck*?s^7Q@$->dx2 z9S-Ewk&uRV!OBMcPwY3A5cGw;vqUepi$K;vvnR+~0aeiwp@CRULF0FOmMvJe@a%_0 zcXGrzc#k1WI(vXh&R zmapY**7P(Cr$jgql@n{}hqnzFFQqzY7coMvyggpmCvVipzQ0DxO?1ZXT&0aPpwj(B zTQb8ZJzJjnx>NJg&A}ybV==0nt~^c=f~B9zX7^qW-4!@zWtZO5a%6lG zqy3G)`tolrhE>;oEW380Z#!xHv)P6#HWWpD0zPeEH@HY7H~A8^(C*=We&$#5?yk^u zw37914<3C}QK+6%n)~LPHp%V`&clX~G(#X-dqRz)H4Jw!4;9?Zj#O5zVNr5dR}KN? z{>F0Q@Hh%v39M!3bY(0Pzrnil>^m*q7BIiacdJ_-tGKUcamP9HI_)wwKpJ5fRTt{K zmr4lkc7XvIpXF6TX7wqxCNB;qEyF7;J}txG<<$#rnRT;-T6ww;UdT}J1iz;CC@w}Y;+}tI8eEmz_uE0RsNA!Fwf#_r>459UN${KuBFP8yb(bT(Q zIQgaQ9ss&Rre{uuJcaZQ5PUTCZIF=Vpz~9yKFSQ>SBW99HWWiQ0VWwsVkLaC-& z^zk6BdS=UADsIZ#3}gn8iCD5z1`ry7+cz=MSK>}5O|xD7syrrAOJyFK;p7wMUgPvq zmL!P8WlA!J>x3pCG6(e1juA9MlHo~;PcY>%gFy3n5h-O@rWnr9>flE^2E|1U3BG^i z<$>jnnj1#Yyq{M0`GX`9d1l`CQn_&y}xY{n*HrpCpums%${+SrNj5@pDg5gh1BB_ ztxdD#UpFyjB*J$zd!8`0!85ish}_z;h0A@PB|3NI>;4P-6MB_*X!d-Hn&h2;*EI@f zlhKy)^ho6s`60OGsLEMpj4LWwrCdcxA+izk8gT{5|4f)5<8@N#$Tth6n z`cw6)OE=r@tqULfq6HJ?-l!Z1IhSBDMLEuQpYQ&O_cc94tjsl7|TgoYLuWjR8loW%CuKtU@VpBNu*HJ?N?1vxE_$t?bkG7}p>gZ=nRCX~$*Smd|H|$X=X& zwd}*}&9U*TlpWCfX9brt8%Kj;`I{(ZS?(_d7sRjo*4^WMg}oxahv?A~M?)4ojc&*g zUST>t`z$yZI~ij~G`|@STQ||JS>tFqJg|tS&m6M4l!}EFj+Z5wv?7V@#+0}VF^T_e!sqW<{*i__i-AVJ#ir`azV0=1 zC3hzyr`jsvslp-cinRGhrh-dp?)hy1OF?0C^#NTOS&6PZ`87L+sTsh|Uf8K1?%`s^ z?4$e^yTwGliM9d)l{EVZ@o`he-KG4!aD3ft^;DnTG4uGQ_ri+BXwhto)5wRw2SM9o zqPO;qz95o(jc%H3i=^6MiB;ZwKgvHQ-}{=b(K*@YRP|q|1K2u`}l485NJ2x!mW0`fYm-{E?3l%;RpwXM!Uhzx(lhMn;tVLw9 ztzv#_b)TGw8-MclIOtWpWv<|CB#zI7+I{+!*@X_ZMo(67>5t3te}7LUj$wv1{Xp_M z4{E2r*EC>uFz`K zBJz7I$2{Fu0D7PTDFvw|JK?xM8=j@ACvK_E=s**RW!XTVsky|g!<^Ff6TvV51m~?e zl$S37ZVkgaUCodtkjQO4wiljw;o_HY*e@-;if95i!aUmexij)QhDhdvrls}cnw86z zErYxfgo(G)*V$LDLBodp{CtN{?y&f0AUJ~y&)RzLfRh1g=FHRM`hjt6Gr^PC#`Ei! zju~3$R7#qk8W$+B$_@Xp0I>=Z4C6{bwqr1gbg2OhqOnwROj^2|J;!3z24lU=ZCC&o z6`1wJVy#v4?I`4m28hqW80hzFxnBL?8+c5oD0qByP+B~{<2ONqG2f2Bp|r@*WUJ0$ zx$;#ac?1Wbux=(=3g*lhe+lz5Lo1H__@qyh83$S>cx)*-j}rL|AGU7B z$B0$+>CBSQtZe(cn79>J19}{oOj!g=o}Hl64~Zj)*-hIz#vdm1Z)JYdfVlJ-S(!Xl zXYq)ms8eq%jTO59RoF_-ute=muw*9~53PQ!eS3(KEtn-5d$#VGeayK8)ky4D^7@*n zb1keDuX%OFPGcu{#MteZZ!H?Q9~@7r|3t7c`kHgCx{;c%s^K&Jmo8`5vNd?;3LQ9q9#0 z$Q3?w-z8Cdi<6=YTYb;KCYDo<@{(EPWr65 zMmL`RC64bNxdFZ&<66MBSS<-T!<|JJ4%P+O>=3bZbY_`?&jY?{)U?tBiXUcQ*X& z?e-H9W4kFFzIxYZp~>(EYVLizjqQ_4O;M}owY;tew!^FZ^Bj40po#piW&isxop)Aj znDg%=OA&wpsUl`c=~Mz-aT(f|b7l^-dyPJ{@7*aKXm{Fis2Z$(mqPE`uhxK(Ro3h_ zz9t`Ev|Sepw=;$I6}ZoZirIpFmW$@4Euu|CkaeM0c@wg4<7b1RYH2}l6-R)M=O1$# zM9nU~p=zL>UJX_IME1^uqImY4uCVlbrUwk7en$Dxr&Qo5G7oyvG}IrRoHlQ<5d&k-L)l#BOV*RTHY;*4dy%Yg zrhk%J`Q)}Q$}3VEI8L8%RsIn@CC^N@+xp(RX*CR{2Q68$WUk;m?k~wj`#X2;+`!_6GuMKi zsEaY3uG>kWca?-tKlOY1F(#@;k>4X1^YkPa5LPfL2cZ6~(9Le8@yuN$cLPQJ#t%*o zaR2B6#~{6?Ksbti0ymzza@)D-A6D?Piw%#7EDv(0SsgcCU8AT|pnx<)R-6VO^E3SRu?2q^ouI}@njv+5+SpMgA8RQIXPtW?(`D+FvfR*sZoVd{l zJ@6p(lEH|lBD?*0)=7d`dc?!D^%hPGeyy|092k!5WSScy@m|G}p!E?*n4n3vxUAO? zf3axXJ-~(d?R~vqGlfb{>Jo{KD>El0fzj=xDXc+6%!KA|w)WRD{#pl)%rt}{eQUo6 zNkUGl#7})KIc`t7EBQZPhI9(rv_0Kk?*8r0!@^ZKLz=f!iSZ4%^ zTQTHNxk53-RMF8vd1d~LA+Pj6hCx+3bBj6B8g8=|nly;?#N)=loc>UCtBE?KDS19$ zoL9=Cbm~*E7%KLA8`;`R`cs!)r2egmRIE9gGKGGM-%=oXq7+_JT1)$s(H-z8)u3R% zwLy1c?BBYPS8*yaG8n0jtY|g+{Wb?HI|v$?jk;#?j6Pv~+LUI$&0FCm)4cQN-$kNY zD=y_xHZjq!>y{6q(RIwCerSTZCLinUE|gXbnguk=Hf|g?_};7#AArqlhWg3Q@P{v- zG4l!81gbmFKhQm?u%BN)YQp#=--05)u zO;`|s8vQ$>)KzAmioC+jnouq)(uwr%;f$i@e^NUn-5He!Q@W}Kdcu&ro0F|O=sD`w zf*bVh+>cM2@CH!%xL2y@?h{kdUb)KYZwN_@zHG;FL1F}*#`2As1GPrA!_r8WUR5WU zYfoJ$kZ|c2FfSu|T{jgke&NbiQFn8kvQ-mzg|3$8;!&;G*cbHdk+%ajM@}(Wyd4;& zgOz6mJLB={@0V&@ozwA?%K5>J#kw>tQj8G07hE<8Rhyhb=9R?n@*Ts?`amxV!DS zSLJ)tO8olZYR6ZTlp|h_8be(6x`n>@jd{i%HDf0mR53oi$?S#KLSFLQs{R=Nu;HNA z>?oU_@YAeyYCD;I#rm$^R!z+QCbe^5BkY~4fnnCT+k`Z8oTGF3DrU?7g97Z8 zG{&Ij(F`Rp)}UUP@|lyFEFLYRc>tK`G`hSe^Zx)V{Du**_`S6rGYMQwIuO_gBP9=F zqG!ztw|I&q10ZpKt)boKNc4rUYxB4Q3d-IU~B(@7e!!mLL3-79ohB{4n3z`3VLGzqRmmCR^?;wNp?@T z`D#If$wFvG5Q1MjVyrytVEqE%-r$_iVd}g#Ul9&LF;eo}UHz$vYPTRaV+-#Ekd3$* zWDU>eQ`UckC5IIReABsRkP`gbLA-PTs&vU?)j`{ z>`SDRmu&N2%a~^sAJf)(^Oljb6IqOq&ivQe_+JaPe~e@5_$aRAY=O@`R)nzF!QZ_`FCKv&H+76 zF@veFB3sH*f;f`QgUCK>qK7)6%I-h~%d%wg;$T`0Td1@P^+U*CuQ*?s=2k|I#2$7qdWdxmgSpL7qsi_M5^oyn^+ zk5EgRggK`62y7?>~UgtSo9{V zAM_%)Oqh3fyvwt5;+f2KV(Lq2L9a7qz~-2PHb;_wq!aOoinL)Xw55rfl~BGxE=xKM z&vVEAZJg%jY_k*#JG$m*aeJv|+lt!c(L>7W(q}onTl1hTxdKdU==WQ~w6};U`Y+vW zS8ghzBv>olM8~oi;(2Mv_2_kHs=|qZJxhjs!fV>iduXY*P=x(LLSE_#>2$`q_m(}f z0m&+v^p#-f-6a|0mnNZ_Q{J77q<>er`a44Xk%+1#(9WhI---Dx^Q-7DZ|5Ty4diG1 zt$CpLuru%2rAy?$ud~T+4UFSV&r*YJG8=zgVtCnk45I%|WYnkWypzRZ{ra=}(>;P; z;!SQSZN8aBM5OV#*NV$L*q?@ElJ9rc#bW+Gs=UNXNZV*oHgW@tU)jz66L%Y~Tx)$l zMcK>gk-Uxnazg@#@3^pF$zE~8dc@ABc(Y_~RmdyY>P0Nln}+B@3Jmf)B5;AKFVj*Z zoGa71-VHu?#`h!^M7{e#9K!UYF;|Hf)NL+T2p?`q-j2vaVX9Y>u%;ESGCC!H^HDFA z@YWww?1%0O>Y7p#rg-;cx|#OM57Nh~0mW4=eMHpz^66d;T_n#{+oqN&)u&#MsEq_x z5)B`t$Rokl(CxF+=GWHXRe1FWs}{ zNV#j(>hgt(f;wJCuS0C!>BDm(A4NI5jI_b_3!lrE?n#b3=Ltt_i@a~addVbGZXzu& zMU)lY?ym?k4?*xY2Jihg{*r#VlJkB=Zg94bi;q_QHh8LH`WkBvWFa3egCIWZ!{Xrx zk5f~u(5Kbf-EwN&93<)Kt>}Qhsq%p4Rqvs45+IU@Q@;p-juBX2LWhu@=TAIHM) z+`=2bNvlx4!d{sv4^#<+{`T+uXdT;UVr;bP^%CCV=w9wT3TI4Fwe3o-;a|%xm~YZd z{CuX#GbB5Y1Pw>N#>u$CJ5ptz8(nC&2uxzkfI&$#*!V1Jt#G}qiMV+2VxlT6)<@$8 z4!)&-(l^aM=0DHR|7Ww_|L1A!hqwM*_t&z)(MiN(hPEJGt3&M#)`e)Iq>%r@VQo>x zvOn$y&KH2#CNPT~eb;X%*d+)r@hpj>omlW)qyb#zX{cJlLK;AuQ=9fzp37~wLKcMa zUB?qfNnP}5?hi{MGtJrnnuXhWsC_1|$-|DlXQwwkgR$$y7YbGtr*WOf-9_6Zz^45R zv~%aqWt|m3#+Jtu0}J?J{NO$F`RcyQyKlutIhJM?sE6fm4uWEqe}*!;ZRTik7bE zF>A^BtbAhedX&mY`P3a(fGctYjG?*IvjOKBKaA<{Yg;zN7^g4f3lQRn>Jv za@u;w3>LYcxw1+%e40oW=v9|v^0qz?0MP<^jIcSs9Bma$*FMod(iHBaHDBUGLJf;BI74NO$D_CT8= zMe@q4R^vC%3-4k!yG_(y+51N4^nz(s5={OeCoP1$I=sS?^79lQkrgtdDjDal7opL| zHMg5(NnbTl@Y}n449;wR>k7+zjH_8Pg?;+^{0OBr|UJLxi0#Q8}-h z(Ru7*Lus_ZqjJX`7f<3AwvgJ*#fuM;L%x@YMY~b<9q+imM_9O114$Y?XKM%ZCxH{{ zj0$XU4B}iZcI|r#@m#`hwL`z$>ytU`08ZG^z>3zFjAR~Cr1GKf8jyt95}r20x5o}e zqqloG3N;!KYfY(GefiAC-u5hE8QSsHi>(+jKVN$8@v;4#F2}mKkGEUd_g<`W_YJ)C zcYztS(;WNgMQWjdzje3lcl9dwYolpdu0_8W9Uv6T=0fSBAr1xhQpmkXO&#Yh)Wwc_ z(niHOqW11W9ynYNGe7hswc1mT>s#Xvuk8ugs8FO1K`(|II$D z$(v@DT8yQ8#@s~8G?wZwOzjs!p84paGIAN^yj@Rk&$Z(YEg*e~pzo=&_Q$2K+p$)yEDUaewW%1EN(%&KxHYc{mNIq%|4UH_Z+9_e%O)n)Dp**8X8Bw18V?n{5 zNi{dBMTHp@F5Y@DX5cXziyK2t2hg}gSnGp=|Q#21RNB&=)CX2e}{tH0bB3purFnr9~)^;JZ z*(mXsH_QGAnioyfg$nJ;% zKVfhIj4pqZ`dVIC=|TCyy_LtubS3ZEg84lA*_@75Zb2}vNwRq(YV`ZNn^npNHwTKE zp}8-4!XN=7PmL{OA&HIGs^k5G5M0!9LXnNjumniO=0o|h@~M3xQQ8TLa6Y7B;nl`a z|C7@$3#WILDr1R~OEF-X2Z|ZB3h?0o5Omm>&-aZHw6~enNqDxTK>Mf+Cq>F1HNJZs zRg<-j{D#;U7NuSe9as0v60r*vF92l;K|u8pAKpyR= zUz2E$oy?*)95!ZhR+&49>I`pc3aC}gutqY{v6D2e_lyr-(`!T0;+gdd zM4LS9gX<4am2eFC@th6>=1=g~t9S@K$QwxaZu%0EljqA%Fu9?fny#WiJ*G^|DNuKN zcvOTAZIV0`r0uzO&MzbCrl$vEJfw~ z?8w22{Rri9hEsv#eQt8+QijH=)VhF31R8bF-OO|_gz|w+RQ7-~SpHC9`QLzQpDJbz zYaU8{qRRI^J;5hNPy+4sMKr9(lMs?0rvc3{acD4N1-KSV32I?v1_OXG1kfrY%G7lB zfB`|X-4DMFEGkGeWJDAjcr&9xx4SMBTs|-#YU3X#*bFX!szE~hQa_A(<$3Q0mAi|H z3oVg?h^jkjSOnnwIy;)iM|mh^GCRjTrq-I zEP`J7?xi3?dVZGf6-PwfV=Lc0bz5B$yC~-!3|72oiO-C`J-IU7kZtW|wugFoNw;L+ z&XD~%(S5L7)1`DE!(x@6Dt}0?EYTuTJ8y?8C)RIH@={Ozn(LR#4INX^;%=6pgIYyW z$FW7U1<<4_xh2LEwMet&Q{y-jb@adCY*2DWT=k?*Oy!q`W}5`&kvH0J#ws! zM^`X)Af-OrhQdqJo^PluMXYanfm}14*L4n#ss4D6En=y>)?L?pG5ir6-R52H_TO{PP<5iO|w*dxzf6OtXq znK|Wf1H$R6ttjhdYi{Xt2covg)menJ=I)}c)>JlkTntEUw6(H)4B1R8IZC5n{m$sx zH!yl>`O52SJ2$Qxc@9tR_&{&cjZw^$*r3*xMa;CQj82n==d4|NuWN{sd&v!s5#=YP ziQ;`MTSAsTUAkL*0AUA`5J;%1M`3eJT<^vpnUDKM_eK(@cdbL&$}f$zk!hXH@-sA# z=~xOcpD`@081m{$F2}8XNBHz)ME)(vz}hL;ydQOHg6Gp6Eprl_r@K(fI@`$8;d$nQ z3YNKe)^cwo_|0h&p+i%?X^*{LKv} z^Uu53J~uFGqG`K?*=E^3YAR@#(u}L6#H(Y<<4ZX9&uDE|4&AiCv>qrk&S4YIHBGm* zY&mC9q-GEtrk*o3^QqW*sf%SRt>69$aBO99FY?URc=+|-b-gF$CbgjItb9H+DJKBY z(d9rmY&u-v(oa7%83WT0KiZ8;D~)ET$#mXkC*?^w<@jx(xr*vU>61ji!G^;&| zCge{OdifDmI3q;KRW#i<(&_ZGrT>gRcLlC=Va>F%@0dzmFcWi7BpU-dbR9nowieh2 z7x*bC=NT}(1K29)JMb#E%uZ0p4&ar*oHFORrq3UtrACPct2qo=3G+`dW)$4*ecCs*SP$!29A+z-s7K+b9Hm->p9$ zLgYbl8;(GbuC6W=2Ljjoap|tFPb_G?LI#Qd4`GB0yuZ}0)YoQTkwx0*3{Bc-m$rZ? zX`*xFtw#&omkd5Xm%Gv>9czNBnxz9BNrr4UZS>pTiwHQt6eteZeuBTlX&_x?!Dsz@ zj3YtWFNmn=y1Kd0E6?*TL5?wn{BGRXx!Jm- z4XnxLK)|}B0rCML$={SOGZAhV0*SdU)ckVKHc(`%JG83oT6p*hXy@L&9c{sN+?zup zIJwC51A^bcZ%&mAgrQ<=%($B2uyGiixxS5o&L_d^1#f!>dFUUg{hdq4jL$n%Yc5a} z_4+@YS`Gc98RGiR@^#SSy>|P$e^arUW%{nCQ%L~C94XTGBxM<&*H(P=N z#mxq0?Z-4G3H)&d-rT7CbGh@lDN}l1aLf46m@ z)?prG0t`<3y$$)6fE$J1L_7b-MReEn-b|#GCa;xqC7Q%PJ44X|+5ZwhMWm0$;xmU` z-Z%7@q053se~fx!+h5DrsZpk0Rq#~Ctc?{w9LX3F3D!RIH2ak6|LT+s%<$~K%{IJ3 zakE!n z9LqaE^LB{t$5@Ytjk^z5?oHACZ0_)yJ=6nJjihdG#E*-~RsqT=&yYj^a7w=jvcY9mtx+2!Gzj^`W=fje^KFOtN zUs33(W#N?jbX(TYkl?_#ZYzgjenV~_EgS^v)%b=uYfms_&MSQgYc~li`jDpvH{jRwEZanF+Kpu#k)4hy-$`Q*P5j+-B|m5ajozYZxt z5U1KwT2$&I&m0_;Td=NFq_eG;zxAe0%Yzg=H@=iKqLQgC0r_W}*v*w(SD+euUw9iQ zcgH;2qo2ZdfVo|xY+_jmoI9rP%PZ)NuH382T|9!jc0;h1rNEUqxsh+tS1l5p>5{xp zn~M&(T6J@S^<>`!-uFH zuVBtj&E%bc&xP^}uju;A=eH$Tdslr)h#%$N`Y~R26}FE%nu*50=JPAJ-#xvGcTAa5 zRpygs_ZdxIOKe*n7nADg>6KDQ(7PENOgip-kgV4DN=I1U6^(V(HU^_!11dh=^JyqZlpve&E-? zo-gdN^mOTU{n2dM6I**7ievgr&MRV=WgGoYGxikXJ`abqHwNJmTxE|MyA|)|YNhRS zDjRC!7Si#T73x2h5tEL0iUaVLC%1dSQzlry6JYkon?j^tP4KP5gbI%9?_$GjBIa*b zU5abk?SDx<(fBg*`XpB7XS!JCmAXRfyMoRdz}&LL%bTD$pbP&pcvb7T#W!S9OwyV_ zH-P^MfKF6%yx4@V-dy^}7wLh%3STWxI<%oI4 zinsnW6NCdna)%rLHQ7oFmU95sK(-rH4KiDe9GEH(&a5yWiknSlgW~EF!TBGwX9)W% zfOKC6GK-zu5A7@Q)So(UZ!=*4GLE5YAW2tLlkG8a9T#5I$?GU!4<6nl|30;^Wtjijly zt5+|a-csssl~mRlBC^ReWBxPz&T^exi_(w_%DSdvgK^95giZ~Ed)A@so@9L~;xB0o z=oEmmIN9GV#(_^7iZiB7fL7Rs?0LIP9=bA_A9Iojp?h3hT?c2vJ#4$9bfac#Y!M} zhjvmeaMJOvU;bW3vGF#D08x;0pl237i~KOA=G>!sYljq`Q*4gL2%0P{qXJb5Acp5a zeUiM$d7?miM6n73T?!WZ+fS?L^7>v%FM2)Xw_PmhxzeUzMNZwp#@PJ8 zX7mR3U1#7Gg{EFKGmC~>Hncq#`32u5mGhWoHbIO`QV4^$W0KdRLDYjj@N+Tp9U*aK z<^w9`Ym`h81Tvi*5h z-YXkhl*{L(G@T}?vYHrdWLD30Rez!|(pIj1lVth8lU8Pe0v`obLz5Vl@P?c%6P-R7 zct`-7LfQ6BD#A?5$SF>r)xY>Mf3Ol+=Bm)>OQt7G^`}?NW6A3H-^^smQ(-Xk%jJC? zR9w-aDmacn&Or?EDkgy&$U2`DMRots% zx9@Rm^bXGTK@QW0O`$lXwh8gFLZn_>wDn*J?$>F96=9l$t^^O}M|BSWd=6hC{F8sz z2R2L_JKLvT43-ZtyFDzYRK^{2Cn-1~$1is)4$l6p9Yd{k6UNG6x)5Axy(#+^-~aXm z;cChurTJd2ncSVhsQFaHSpvDUMD_t?2PELh<9NmEO35|P(&&R_{W32ap}&OJDcMHz zABu(tY;xXCdh-W4<7MmQ>8z#P;4-@oa_vgKupm#sLZ+kvVu#)xRavlm7~LaqEA5#^ zNJP(C!>D1}%8oYU`>)MPr;fARs+f;-H!}B%V`-!OE8)bTfv^R=W0#r)smN|YTVE93 z%i5?qi{CE9air4}+Lib-S3Dh_hp)M*Stqlx%D#QW-gam~kL5d7jCiSJI+#BBo}BZm(o>EL?ng%1B~pCLTUFmAivjo2NxiPrW8BXo#2+V>MR~K21n6G6i0C0t%5F!& z4-X3CpDZ%U_Le3Al>pF9y+Lp$ckb!o5Ec6b>r}KaQc{!>_qI}KMcnPE{dSh+smA&~ zHo}r_u<(Q}N_bbY)D>tkI!=gXThDSxo+B?|mC8J&4g}Mr(SbH@1Dx)>%?vG%G>h%? zvUq{Gx;seYw%G%Ikd&|Ax(6}+$=EY}s){E#&x$NllVj<2w7%i91V>Fd)yeY;>T9Dp zsdy&pcKsGX)r4fH_QZRIGU=W^ym$M&f$xhA3S^iVA*H(ZvQ~4{P3PZvD@uBgwL_vz z_E6#8b0S$YsvTuuOE>c)8yq zmN&3c4cn*?Z?wG}wJG$fD+mmggkc497!1J?;S?We-?IQH4nsBT?0xUJ4RN^|-fXTj;E+;XP2?Y{ls*n(0?CDcPC@)8xbJ zOqEZ$gKtw@=0MFvNBjJu0-_Zj&Qv}%-(DXKu9kskEHy1GuHjNK$?8SXx?%C%0K z9GaP-1i^qSz9h18vQa(DlrXq)HbG+b3|Zni5BlI$7My}vhA`&QZEYpu5HCJrFdFz@ zKc5GlTCa7?PTyS?nLc*23?MX%ACRA)-?~+nA~DJ#AG91m)u4;C=d?Bwjd#J`Uaytq;2@i#{q49r&@;%J?CXcM$RdVukX0cRko&FNSV_H%k)h;$Z#SaTpa9 zWpg+6a<)Hy89Nt;i&v~~If~s8n-8`(pQz(myn~M3nruCdmP;;UFM^0v=FOvByVNv}$fRe-(bB^Ej zG00=4KYtEEvz^2>Hav_)Ug{ub-{+(%GcrO0T18=XX|pBf5H#Df=NDL>ywjJ{T5jc= zU6tEKs=3f$|H*(B8&f-A?imB4w_E&()t>4#Ja-JYdB$$~jzmv0(SlaZ;7|GaKFoGj zMH9q8@G!5U7m01FMXPlaqBI>G_w)Rp1*{1o!TJ?)-h6f{E~eUcUoX%R}3QBg^Q;7RkzY5 zSJ;8kgNvmLDl=LdE_GwQ=TD4%pAuLbkoUw)ztEO1fNqFNSQUMgs)Ua?iuLduh!_-Q z{|i#Frv@`-ri3Zzddtc_q8yu&Q`xm|*rSg*JlgB#&+09jzTwAYZQ4c|?qi|H zj?@E>j4LP1V_}|Y%9`)h7NVxpk>uVi60oC8#8$vHxZZ}6zIiR(K*hkF?*qGUdj?eN`OwssH9;MyI0 z2wJr`Mh6Ps{-E*};V-E^rGiGDLdl-wDnEQ}s~{cf6%WK(_sKqsEifEX>E^U(8Z+Wg zkt;GdW8^`PSz8q0NiGmsavRx`*u6tv!EMTE9>4Ox0B zmCQEnmZ#(HtI>~1ZNjLFPhwQvj^CNR$pMfwGfGOQG`Yi^`Q= z8;iK_^5oQAEbfY;i*rkBVsfLjm#9~8ca#wn!3pwnjhl7vHSxPW!wGkI6#KDIdRTjH z@(tq4@U)49QlSiCXR1|aT@X|aH?BQq5sXN@vfIFAizO%I#bxbduw~M3e|?|6cM|(H zI1-^*uCzGO#X)Y2w#}~|bf;oG$p3D_RZgDy^F5>{vTv&?67fgLo=Ty&URUrR!R1C6T^b7by z?`EIgZWJG;ahm?4#(TcDR{nUS^DXg)BO4rmbYOlR`YGswC~gBOfUBT>f0)vJGImy) zam)pP((HjAvgwZlodUJBfT6rpv=Dl}=b91p3RE=BP~!Uj!O@plHIEdvVPhtF2-2#U+9lBDr$=kV z1!lh_vu@tJSxr2RH{Rhn7m8}~0yzveAm!qgsqh~8 zb;MxIbm*ueDaAc`dn%@uKW_Z2HtLSN9%D#QwsghgFpj4Or*Z;+-qZt4bv>Zp!;#&< z+d`iW4us->p}HU&_zci-GJiaizjklX0%E2~IZ!K$tXt6BP4vdUX1yYE&t zr}{I0tK8nRs&Dp4C4J4I6-Qxf0cE`ynr%CaSBfeNKyTghe=zt+`5)tNcIJ7N26_1D zt%Ch-@z%%Bq(Rc)EnG`>Z-`Wlcq@@ruBy&y^h_X5hbBjKM$hEhX)?XcOL>9P%AIYL+#NRk=j%^N$24woGryM6eUm_`d3hJ^D@+4(*5^BFVA2cA_4?PeFAGbqmc=+dbJ&WrObu6f|`BU;7p5Ew$$x$Ar$d(yVFKWXc|UNT`TUP=l-&@T?T zfjBp)EZO=jZ;+5w-nq76$&e7UbRuC=0~_nO)qVnL$R85D;|3JD)hbeGPLQx;b3KPa>I z*O%epnx&W)ITV+@iK@AMo>?tpm#Z^ag{YJz9+YztM{iQ$!3N?;gvyOA+xt8emcdTD zu%>(T4-=*mGnPjVw8%<(AZsfwqdrNo{Vy#5(S^!t_{aef1VVUIpR%f+z|*#X^AbI( zP7&$Ikv$+#XqBEnj=Ox^Wy5%|6Gld91@I5am4gDXOVPVIVc1>+`c`X&uo7`^XOKlOybb3 zw}#D3`Yx-0@2TGV`{whC8F`cq+I~Qi6FN8@EsUM|5UJ+I4oZ3QEKr%;Gt+WFh`#=7 zyF-uhy*&N_o;zZK%W~`=aVhUnJ4D$3b#G(@^Qb&FRAz2UK%!R7(fwSWD}MzeK+uwG znGQ4@*-Q-CJbr#6*H2;d^^yMbld6-y%dLA*UqR|D|FW{2S7jx+$@<`>)%?^$%!8_i zld_|#hA~f3+H2*oy}kRTA5r*I6QbI)9PP~#D=X}^Ju}KgI!EkpFTuL@o;kV!kbL{8 z!NP+36^tO z&#@7x5dNJQ|Fo>^&9l8ff_PQN7WCU`<89kyN`n#b8;Gl;iF)W#_wDm#O+(-#2r?kR zDIZv?XQ#%c(3ip`(AS{7I?!w(4%83QpH5H4f(k=*-)%p75hBN_01+}fi*Cy+|Lrh? zHzMT5jtV39*wAduHj@GS3qkOi8iP5Iv?t6Js;-RS&xscN9op43zESx|fdvkUx$4!7 zf`7Q%cmsHOHujzRB5geI8ABWudx9{3mc#r?MEoQ2n{BxN$ByyiT5K$!pl1dWbRnZ1 zo-4dbR)I?)znWX~{+XzZp`8v6CL`CqEr*%n8B99Sse>Ts1VQT%)bwn};-C8g!CFHU zFEaCwE>tx%9QjAuGG@qwF%K+YtQQlMTZrU3`C}W-OodB!(E)b74ess>#~3sdRfJP3 z{Jj62Qu}Y}Nq@w<(+X(fRmzRYwKH-M90|r+#>^d7j`=Zqh33APeGX+Hm@Amiuf;e> zJN;{B0;vA#7`_(=95+VarrhGaKnywC*5Mo{@P!!37*Q^1lVv2$y6G_WxPDvZ3$$#8 zOE6H}RP1Ui-!_r$`O#?FkiQE+yn{J<$$JA)`Cauw^$KMP3AUW6e?bdl>=3DOn3ar; zB5?udJ}Cz%c0mxIZmyYO3Q(3fSu)?L6f5JP z_RJh^>t=6F^KOg?!8c6fJNfL`Z#igrtNpmZv9Z@S;IQ6Gd+S^N_9&cU9PP+EG{$cjP#XV! zx+fG~G)1|`SiZA9ftVmxPBhtyjx_JY>f|(6g+~p&w^w4M}X7~*u zUOyB4A&R~i+n1gMmMroHCO8SoVziC*WB0p7Sd)1i zk9E@iuE~LzETy@XviSJ=0`$gUUiOEp2}(+=O=_)8;lz-uiV=7m;k-%c*jkK&?;Rbr z>b@@9wxgSHyGHVK51){lAqVDIA2;M02jW4cV`;Ri>%5~Hd$_AH4KMA=bINmFU#eZ= z57*4!)Qj*$e7sbd=ZGL*7kHs1*6|wWlv3j+ESi?r#qj^$qDy%cgNhzg=B*-UwzXNh zIB)cJa#Yh89DeouRgoUNJR$t*WG}u{Bl#BJ>xy8E`Jj}X7vYpq+u|Tt+l1jcx;a)A zk=W7tGXj5g$QyI}7TG86tw407ZTDNRk{SQv3z*5LUL)nPJz*5jUIrdi6e#m3XXz6> zLuahXxL|&hcrE5TY3&2ugXHHh+vKdg^;LaIM7GUj&$K$2Hae{)lI)mZ{U!_>k2^=2 z{-(K8MF|)RLQ!vi`8s{_;c@f#rAI9ettrp;-Y&Z%IzEQl#4~SL!^%FOqA9$B92hV@yX5ATE`Ann{j3qn>sZ6+YEgea0D)1N?uVV-=^In>h5Cx$QWKU^jLU zit`GtsGoj2#u2>Vb1QoE6*G*6Z3uwkSSkWS8vVBG(Vx#(PZ5v}Nt|0Kwlzu?QMt;WQT?KDcmp9aVY!A1ti-p$W&+!&(f;;u}Ba-1+@} zjf0KM=3UC(SYLnCXCzfAfZ|d~*U`~cMW1=?50EP50{y1V&f9!(y-26zK zzr^TL$VNF{qyx?7Z^d-`6On-6g5qWqIiR?Iz+rX~XKZf`P*P#Y*CBJj>Jy3s{Xu%N z9K9d*-(3;zeWkxyl(k>&sw+9MV4B2=ZdIHnCjdh?>X+bV@RI^R2{=n`a;zW z<|LEi6v;AsqaSOf{E-o0e;rN>Svu$@g(;N)dq=v+>gn|zOdK~E{ZyW>0@ZO)bpm|) z*MGrNeNizR+788)(Ig-DI}R!)D^~RhSxY-onPLZ8tMo<{AXTC2S=S%r4rUcs+kd=- zpv-YGWCQxtFpc+|d;>M=aibv^yDTcUkamnIty6#T@6q6JFY2bcz6~xITgyHWJ*imk zWTFGSu7yo=99Nr|%>CDd4+?ShA)B8M4`3Rf%0+T_*9#S&aT_LB?ubELA}@2hWk9SV;bMi>G|;L^Axyq16$3d=M{!B|*yfF5BZvNKi0ga08IuLE060Dj%RA_NSn z)a;)NAFdp4YYX{eu%JN9Cj7I^ovo>aszJd0ttf)@SKn+iviN~Ohm^>yFq;OgRg0kh zv0R{Vjh)VffphQ3LCZzs$EucM!&uyi36b5T>1~e)B;i~s`iG$xC%xvU&5y~57daVP z=?E8s^muN~c}N9{1ByFmn%|oiMqu&R?UN=zCoxa7SKe>=zr% z3=+m?lC9JzFJa=Q#V-RA?l(n#QXLWx;RoJ%z|TfuJ?mesVJAM?aewXwrtf*reK0|{YC{-^z(QsK1uh%%aDZbqfj+rDFO_|;kN07vnK@}%q<@?zDFWj z!`0zqwvo#_$}X$z=)+3JK6zBa(3N$>j~4L8R}ZD)+=3W)obJ=H}CaJ zUoR4Qm#Jd~x2|06vfmncF#Yw+$%)9!{88cBGgchgBah10d7}d(?MOXn9Nj?zzu;~c$u?-YFmn_N`}YvS_NQp%qi>w8Ti|L)P05To`oB(s> z9q^Tjjn^bDMyar}1YR$CgpoBkC&W@ejAeE7{!I>fWiu=_RJJ%mW}LxwM9M&GjityQ zf$WzKpR`P!iipui4Cj8r9N)E?x>H`kU#YuPn|Vdj9oZXOu9A z%|=+nrKahe7cY8T;aqp9`eS6LY|T~f@Rq_SsK1n6q=^R>WtymwV79O#>K>KXxQ!>D^4(X*!RjbBjtUKpdkXE^GvOgv!yV^ij&!{TN?m6CN#w%qVC}nM z`l4Y^Hf&^Rkf9*0kJSoM*8WJF%8mR(JyzPKL9e5Grg82a#;#d0kvOpqBiuct7sJZs zM6+O<%e5_wH!+g=*Qu8zt5z4tc1iAZmAOzR&{q0mTP}?xIN-{q-$|Vngv>hi$w5Q) zp|Br}T&mD^+D7T~^Ef_$liK|i+nBjwty_B!H=~rEFdco(`lBA^=^?Se;l6cB(loJc z@YM4-vG3um0p9QR3XwOkVF>>q5>iUN{b4J z^iHS=E`%URF98BXr345eQj(B}__vvSSjr-d0xX7i{!+HT2{#O^*J&l+(m-pi)8+gt|ud!*b zOWE6!UScHMCl_r-!zn>86;V@v4QZZ?VocA$&T{ooPR3E zL-a!>l96Mi^Z=C)5`#Bp(4T+gAa$7`se*##jdz>ptwVL~zA+LCv7{{Pwn$QT&cMRm zhy9NqQN+r&$aWcNW4>@uheYPl-KM#Cg@33hIfvEdK&)hNAY8MVP2_iN@87+~yRg^z zjB>uZqoIsCGh;U%wzQTwnbWo{nA1wRb91j6v;OM-d9xXn)KM2g9wYp=lgE_cyT2ci z@|K#BN|+AR2M7l)Z#Zba(Y-HxKd{r@%FU)QmX-u-y5URwbo6nd+~8VG3(QI7CkUwP zwxz7{GH>$r8_-L)2n?}4N&H%i%Az)W4>+DeF5M#3Yh&aHhy#4Na6TT3uC%ELJ49l) zuz7!xjL`p+!u;^>Dkm6j{{3+Kx7*%-`RV_pFu@=E%mPi!-x|pD4N%{J5Er^W7wG}} zd(xGS&^^$rF1GF!UN;T@HW!8l2f8=}+GzfO0AN}lf{JVkJUF+ge+!&XB>gfY84^PN zm&o%gyMg}6Wffy8M*dvQq`#Y3S04<>L>kc94eRg2lDA+z>fxMrTVR?2)O?A_XOnKO zuF$$;ch8syYRg8mBAnua#XJX)aimvwGNVJ1(j%6?kh3G^<(Y+mw!>}VN$ULs)uqh& z!H47SjgmUE2cRb4$YGtsj2O&IujHUxC!fsmI4PZm+SmQQ#!t%vS{ra)fri;nfrEgV zbFE+y1Y*EH0sII=o5%>a+I#$K+fFI6F#duc{58FPBK@t*t@^*Hgo=60DXHo-qzMOz z(lxnbo|+(>DF#V72mJ+o0}KLf$o2N^>|FRwj-b`PFuE+$uNeEozsG`slPV0A$@Ong zcCJ&qY=QEq*Y*P#eo}wtXJH!p7%ToNk#>g-s5QCH(e*SYn zmV{}g4SH-QO@g$j%>R0$0D@HP^+d{9kFN9s`n2YnlqGMO__OF~)M)K}XFBV>{gOZn zJ>eDTT+47gR+~}f?pl`1>%6`vm+(!P#uq-27LEW&fsWVJwO-QqAI*r$Ne@x~%$C<% zAkXJyd~Cu%yR;J0AV|T?k?>5#1wkZef04a55>M0nUh{>$8zCt@TFtqb&4I<3= z`h%7o8);%W8SxsY#tF_`aaBud`r^i_<^Vz4cytUeC#1h!WtS?B7jic^0&dcZI%4B@ zMzwx;Rj4;Lh;WSxDXcem>Z8klQ5LRznCz1Nrj9*f?lQ74KHNE5IWe67&JXeAsY4DU-LW)k3G0z{0h@(cpUj&M58!Y;$JGJQ%Q3s$-n!e!dGJ54jFsj(%(#`|FEvYYGM5A(~ z5L^e@b+O|_nIN#j;m%f}?+yZov1Kei?GYtVYi2JQfg+m`s%aM~w%T6Y(^GRt(^%oe zoD{n;|4(HiKo6wIY@M*pnfP9$CP~$FbQbw-UzAGhd1uDh!e*%KS`%zy=k6R5o+PX^ zo;#S!u1|^de<4*gLBtQxPYHwTS@8N!_EQZ>D&um5$`{o8bcChcnB?e%8ityw=}lM_ z?NfO4_wp7WCinI|l(&3&=mm%0dAWlu0YKc`FHRpu4+STF$Nw0h?PU05aEqhZfeP4> z@a_1N*RRumZ&$HiXcKh3lBQ1$@J4U-cMHNb_D$cM(?LxH-hTX{D2wq9rBxCtJ}gSwb-JrBOs2N`tSD3WH+d=%7fH zikqir+c^BwiUn`&`qI^JdO!23vUumquu}2gT&VeV+nx?S+Kfz*9|m4A zfnlw~wja@P-Hcoe@k%(0bcumN3Hv;I`~bUX(h!yVPfs4C7FJg~=6y63JVizioj=GH zX|82F(Yqht!unv){UNiOd|j0Noe^dka>_y3W@KsUxzF4n&~XPrEDA>C`(8Bg%)MgY zFtx^IonCs6FzCGz83d-7joeEDcj4D)QeL;eZXTgbG#iSgF5RCDb6N!Q_wUea{csJ) z71ZZx^{MEueFI){kk(ZGX?Yjnq>|k1_9yKN^^J0}Te>9g$fD%v7v~mHnc$Tr0qtDr z2~K~|hp_<~13d1Bdbh${K;b;}fZagVf9RYuJ`-4mietta&-)Z&{b9s8`)$1V$tTF# z#wX-`*g(B&egnoIq-A9{LIWUW+tk!_?tpxCBHH8j?ZZEPt`e!g0=uBr96+PidAHsE zxS~}o=j$)F1sK6L`YY0^?v9k7LjXQu`22O4?rxek^Hl?Pa9v;kl&P(Ymaw(4^+r4i zt_7#BtCcI?k;Kl6F4GG$Z@j}B&eSrnT-V?J{VBNH5O>VQqK&Bpc#Ov$%uy(RV2ISZ zQlRg#_a%XR&@EmQ^QK_vdJFAP7#d(@H;7>2RN1$-*O?*)Y7xb)R=fJUy4ZzRaoc}O zIC0~`@7w>nfAQ3V%_kZ*^hMYuB=)&Yho6&ZUH7 zFzJCIZ*HbMS`c3S&f`!QeFMSFpiTX%r&_W1HFmxvx|CW1dP|_DUSs*KuC5cQ1i8rV z<@2h+^HxyPFEohwioUNwE({w{Gt75QgL|`Xw?5woJIdAN z)!C3{;B;Nv6R9PrfF5wQZM`qHMa-KQMIX6iPdV+5pE-S!g*L1oBj!|0-Jc>>L5D=Q zaapUpmvT3jG^?Hu`26mS+3sWTk9CBwB>#HdsLWC@Tc}@l+Tj@6 z7TTO%9PT}*v$0?Zt(!O-tZ$KxFG0^<|AKH)wYrXqsca0fwZa4}mQ?{SM(ewNVDpnb zG5zO&S?fD)XWNiZ3_9fY%1PrD6PYr}0V9ooWoa*TWL?{l6sg+Ij4%+|>lfd4o%75o zvu0lOx#**o&sKZN>d58U=rKo?ETk)y5@GDkHD@?HcM+tVA8nXT0M!%Gnbi-zv#{ zF}GcIJZL1-aflXzucY?h;V&+uVfLddMYxpaWi-aVqoR`f^`4!cFA>cNV-RocRgGzZ z;bkj@kBcJdy=+}I!DA$9U^^$B`Z+^9`NMMc68W6W8GCrRZ_nM9vZ>GrBmVG@2+7!{ zd;IwM$AWEtX%50QF2Y#Yqk_ip#rwmDw#BLj2?~bbJ52ROa~9=`E00_s>Sea_Ca`tu{C=2& zezlZo+`H1{gY8?pG2UE_#e}8R{^g_(ktHi}=MxY)w+Or#yp-x5f=S~*K>()r?~B}q zHs4jx1V=07Io@*P)o)0wgEi6ed>+opFjKP6FV{fY`xuX=+sDLf6g!o8alb#Ag_{d3 zEa2A6+|J(S3mw<5>8ok0%!|95qVsSvnZo+7tAag?j`K`F?b&BWyubFFtYOA zQh02KzPUpHq2A-+aBlf%;2GLu4 z(CeFzz?oK)DK;Y}CEJG$PHP*x1j}~M_Ab8`s3cpv(BF;9mf8AyvnIwZTBq(ZByKuS z@6cX~rBlB%>Jv73TWm=s)R9YODa1>Y<*9VHcS{z=@670dhRrc9z8+zy;%|>uVxzce z=`#u{#KIxBBMA{9$%n@?GoQ{^y~;Hue)2hi!yi%C#22&&s>{mngI(etDQX+19J;|v zxHJ@hO?!6j<(E6IqOF?So`@slw--^9xor2WoTc2wlkb<#8JyC-x!9qCpPIES`^b@l zxh!WPEopz%9?p;dV4?9`qqO_AljlHwK5kzuRxPf?dO#Fqu4h-k>g50fy!QiwWDX}B zBQnut5h>hsCe{VTw1%2CN;yHU-`E(K!QMOyqm#nr{p74{GFXbS@RYF&S?&-NfU9X? z`x?5NU4qA|iaZ$3{w=%vO8S|KGgM!hw#;=*d==$&Ru8z17OItM0tV}U9#;RSzxMpV zUl)Pt3w%)9Q$>><(7C-)(1QcNLy*6W={Gr(+00J&3@TlyXs`qn&GnQl)OP!9Bpwzx za}{bjLzEF;2N8FLe#ws+3{CwIhn@=1hR%UZ732g=W^w4EQ?FgfWjGw^xUr)n`7IYQ zts$rm)ODy`kC=UXq7Vl`SJN~)O#m-g@UAkXncpq)wN27tWN$7Od5NkpR-x)=hVTTuk9BP}q)`AEf!fT9rDnL!3vtX)? zv|4I)LJR_%H*n@@kndI)GW$lKRh687_bsBEpEU9b)H@HG5a;_5qUM%)|M6hb#$Pb4 zID=sUuuy{3Rr|>j2pVzcTWypRYJ z_J(!M92t)IJ)8mh!l2VXd5t%_rCAe~JF&pddRC9esHhBk0#a>Ckn4}nG0=msm6@&H z6kjw$o#X_fpkV7M!QArbg|#HcrLnW3DtJSGu0`mN&p9I*p-($QtHDTeQ|go*1l4@| zdkX~pD^$JOZy|B|G-GHXsijfaaKD2fYrBUxcoUQDU1I&!Fn=k`X4!s_Tzi=+Fp8VD zqb4CLa1HF?Q(Fge&j&Hm|d_URTt#0Y%Rcr(hvqKY6rQy2-n zmAC3Dvn&=BnNnW53nj|R(z{V4FMj7W((w_l=@aMuxx3Yls%&s{Rr&1%<2g40?Crlk zYi#<#d5I>Z`O`17`h7%MFwsT@sypvgrB*t7<2Q7sq*_>2$w|U^j?2c^=}u{d)bl0# z3*V3Sr_W>=58-5MnrDD7)gbt7wtW4_R-=;-BEa=hmqOg^g5GUsusZzI@dQj(r8+C|Kh%Qo7(r!g}-5-vYKM!*+#tN0)ptH{(kdW1az5%pQlqA%3*bw94V`|_7h zf1Z0$6ZPAKmnb}6C5L}wb5AZK58t#9$E6P}v_-%U_3*7Wb(+Is7&a3nSkAL>isOfB z_lrZMR*w3}n(^TWrWyW<&!mwBw_qU?;ZpuY#=av$Bwj1oX>XXowoDa;+F?8vOl|~z zD9M+>39)ZKc)yPGWDS}fN^x&kb?d~w>KK9z(E{|PucQYG0 z@IFDmSt4$5G>^vFj1z5jR?PPg&8MrLn97KnDG67;bVE3EC``rsieMrvx^O36P^~Kl zSr`ZbG(1*2P*(M21(Rpyn48aY`8fV{`tW$L#*3wg@e8Xr!6G{}VRDo7-2-!XQl98J za&=SN^`DO|MdAu^ahE=bpFV9atxG@9HZPPD(EO*-1}ex`1HrNw(;oxcqFo%lr6u!TET(^ zh(pWAcd0E7xiyyL+(g-T`ZvcAqfOHt@LSCFH#)!)+HoZoEbHTQH5bcXlY|fTC_U`* z7$?kkY`HlvlPupiDS#e$V0WNSc^t`b=vy7js)on%bnCOx8WUHCJ~8m*V$f<-J?_B0 zxbujgx2NDzDQf?iAci|=)?#yNG~Al|;^y|K-nWyFdbiW-323I4cjbUlvGeF1Yk|#2 zg^eSp3qr~{Gm9ru_Y*FF`d~|H;&X{VXEMtllvteWl{zOpMsp;HJZ)e>5IC>z){e;P!mss zvI{B&^oZmL8z1vwsi-p5%z+;MNX@Cg6Us#TyO$=bORhE!<&I~kj)u!h{t?7~_vU)N z(lm1UQ_2P9ZV^C=&$`9ukEaOKX1<0w3mzva76`8|I3bGimlI!FOt_+AqHq!5b(*z2 zdMz%kO;D^cPPjxb7!0hqBamXkN78qeNG=dx&I|r-w-vmxIrmzV`bSIs|!!g2@jYY4OsB3z1GJV3Tk+pk95vnISAh9dqwP!3Z4e6iNfe$B`j0- zIfqRrL7Q0ga&-DQZuKjSOQ1^PMD%!z*O*HGGy#xqdX{&9i+8~xp`HOIq62HZ0P|Q` zaqoP|Ia|%hq9CeJCna?kPE!TeD3Sg87fPhZUiv>BcK+jP4?f`kF#!E(1f7#1c7^TQ zDp&4tb|19{C?6Z3&5t)gZ*y}cplj=!o%N*sPhdR(hS)w+6*1_jdHV0r<^ESmrJLQ5 zuH3}KfBXs`LNj0Ah(k4?3RJc+pF~3xc!-Yk`7;69xw5a(4rqE<2QGz4CJcqbjumDn5D^&V|Y3mX+1j15+I zp)@i}^MgglGn8Ul=LQHF9-hIl=Ym{5AngLDNKkQ_fj*#GLg0Ybf}qkt8%Hr{luQWILk23@$YBIV)mNAhl~FGzQpg!03f& z9P~=Db@JZ`xh@QsnH&6CquCS`?cp+b2;eBnV0Kb2X(t_o_=cjf z$=2AwVn<<7#%qcW2;2PHPW8TJ*z5ijn1zOFf)oIV(?7t9_|(YOjYm~Fa;L*7PuhY54@=WN)AE3z5qP&c#D({ol?vTDem>uqNwXvt%{&ZE^L@*_JQS^0u7ia&9Sq-Lz}Y1;o_7kn8*akO~2$voFT-s{DUGzb^om zra?~^N0~#cW6;D2INJhiNPOk^U15`78>wDyvK@l z=Y$@Z#GnUY4h8JpUsmEcP&}fD5!P+Oj1OOlywl%d^IHa^kl8yhH|TFe@;#@#YCIVn z%hOEc{P;wWe@R2>`~6du9DeFa2Jwu1(qw3Hiktn6N>(1cEqUObXCwQI&LbOp?#>n? z_DeEp?3CcSRi@;@Ico6ey^^E0J+uOyup(TP-o^5T;NQ#%I}ukO;~?$ewW z+^YWQoxQK^j?Gi9se0}^Znu_~ckOv1c9U6x>09RJC-VESTQWAi*>gAIN@n~HCgELM zPCmM(?P%qdol$j*sxC(KMOwh!?q)B~u;BYjz8at+q=D3B(vMHXkYqJ8bMTU7+Qqk=Ze78s zjZrb1)y5Ho8l39R!z%3xM8a3K;V&cns+b?%W2D>hM=V4<#7S#)E^nJ1M!#q2f-mc` zTdjyoLjbB7qmKqbs_ql-#|QK4Bx6ODaL2e&S=F5bzVd(0c?{9i>6*_P{*ID?hw*oV z?6Z8zeW4_*-eGD?Ie|#(dgUJ(zrg(~*m&8+A~1LfKARPc=Ul3)wp&T;0`!OIE5D_FqxGuVHh3ru4 z*ptEf5g(WE+X}PuCF#zGqA%s-`#)P$wMs&w)`Va=4&zZeAjm>rCeI_eh{B zkp#!)cN4o_Ob5Z{F*>@s)?J50*}HWoUmDS+dhT8p*do=1J@?uc9#=(QBO5vkl1Fxb zE~w^jzCKjO&Uv3)U(B3$c_of@LjFB^uqiLU67q4k+)=k%U3z?K#it>UKRhKh<<=LRO8@(C&xg>sDrZSH= z{E9q|Qb@MqR-&JI6QfANd2hn|;U9l#$DN2x=93N!7He;jL~52S>l)Vn7{pAT{NA4K zrZV=$SLt!+Q7hiZxsXfFI{YI{Evo%oN+)U&ECMpZ5+>ib1H2B4+<`w%lox?VVKl9H z`m@SDSww-lbXSex4qSRjf`*!0yIOk9L8~y9`uu*}ouJnj`bvhTIUVSTS`A!7oFbXO zC#*!ygj>&O+zGiBZ6)2~`&1n2eykM6khG1_!(blsCziM!<=Be{uK|eujXw`7=dnMc zU#V~Nd@aA7(}l90V0iDW-*Gsgah8rt@~l&mg|~ZSYCbG~c^uZL(w*yo_}IQgl=^~J zBnUbgYR(BOvl_j$8siDtu^{cq*F{fT#ZvKum^=8{kqUN zIQiUyKhlb5mALcSb948-vzfMe8o_#}14iZ7r>dPW0~YllXT>s>nwcmZ1iPiJP}BN} zJ-d=z+>(M81!tG8HB%Uf6z32z=(#*BGYP~`Z&!U{lT67{!xYkLenD|SF>lgsh(d8s z^M#K!$>vz)V70+Zo>7j_7u*=K@|7ph;*49)(>0dGrVL0LWQn zZ5+~ZeQ)L}I@}+;Bq?~rU%&L6kubmIsV{M|)eJlR&J0~C=qKp!5`*@fvOE3Jz%i^aiNLs!*J-fKL30eTt6ZD`%g)o*7&5=5pt1kj=>>#B0Yw}%Js1%(On^(GF zk`U+;7K*KJ2dfoAO$m0VbT~}+EiOL7na+F-FL3(QCae#@7V5u=XW#?bcTxmrt~K-w2zE>eAp z=bSVfRrNI*(E=tZBjSyVqj14902g70`Y-$thWi3LC=KDda?{t~b!TL07&?S9({UHz z)1e3cldW4*@tusl-Yz5es21R1-6x07(5Sx~GBM-Ep~-?}DF$lFNpaci>MEDnIa$te z(GU&H6fFXq^Ddo(1%Kc(O2e?0AFhf)V}Sb?=e+m$U6<<4cqY{-t#O$|v@=+55QFyG zk%HHke^50jBR1^tSm7!8x%7fo&vVPxRl&Z<#&mDI)!2yD^7z+zofgv^YG_q9m+hf&m!LEAmOFVS05N* zRVPn0m7rJz5W-GX4%xr4$|$DYl5X&oU?-z{t>2|Z(ScI8c>JrD#ht#-z8-mFdGpgz zE@-7Jgxcz*{y&T7P4?;%%?Hz{h&l&l+}MJ;g6W4mmig+3&c2GDx(+6-W@^&@CqwwR zwm*u?&=5U$ni{ZB8Voim4c0hByizDU-mZ-n>tQ;)m32~DKp3rl`=N^sB*$_wx2@nO z^pVcYciB%#R|Ii|WOHtJ|2rMLDQz#If?D>9f$v{kK%f4+VyL&qyzND?kf~=S^S3c4 zbd|bqOz%EL4y-n?9DLun>&A(Z#Ix}jl4L& zl1jOPSyhyBK056+Tod`(WThs;f%fQ=&zc`DO4>J_4GEH`IfhwYr#I-k{7wESnrQbKm|@>dhob1z5B zN&7NlMRBb>bg%oXA_NgLkMQIcDsY8vE`fy$@E2+q7=g9r$)*z}WG!>N?}gP8yH~3P zALOsk&fZj3q!&ymvIEOL%Wt8^93-rMJqi8EcX23a3v_E|B&-;iD9X_PJC>((vIJ5YrYbk%fDk*UZ9hGJ) z?SCiq869996{M&S?L8^@$Cmo1&NgjV_B59wpHdUL9K}dl!X?jIH(FcLw&!O^GM$(Q zT=eHl@|OrB2!-ck0?k5Qm#_1}^NqLSg=d`?+O$0~3+U(N?rzDEdzr6id+c&O-Ld>K z88z|8w*_7ISZVJPNAKtb^aMYtda_03nXDdr07%;Gi4aMUGhH#a7AY{)EkTsxn!v_n`uVWIp2& zD^@zD?zRi$Lg$?!c&Lw=7w zW0LaYB)!83;mh%25RP(mxyZH!7E&FX9OUC2T140w%j>9%f6OJ_xV~&%V0#tNL3Sz@ z9|_nt)i^P6WI3XGc0@jp&3SekzBes*r&r^i!T5=QthHzI5p0R&M$M`4FeW=pRw7oq zj*`OlS1a(2z6*PKjb_T!Nv4hlk99>g-$?QaKd{WD>5G7^gEg`K<8lr7YXJT=drDV< zbGLtq;gIuziPiDngqu1F<|b*3!AGVAA7$(7H2KdKmEWMNScCCe21EJ|fh}DPPm&b~ z7L%ng6fGXUGhFA#6NOE(CCk+2UwTxzh{Zmr9wEAmUa6xMOAo}8_4LWqrPAhYS*7cH zxCY^iL;t_PJ1)gN`JWHEePnY_)+eLY(|<}d$`Q{%2NDxRVt8QE5MMW)>OkkV{{}r+ z2UGnjWuui4R z^7K0(LI4bfQ%z4k}&cpohA8J*M>=)vNmA9T*vRtoy|Rkb^4 zb#V0E9B!_p3T{e_8iHu1k+0MKd~M$Ld8Bw82%&lBjv z)}F&TDCIi8o*?&5)l491pezpLgv20oow;m7as@Usseit6p$}j-zPI@wgLzWe{T19& z1_$ZNap(;}WS0iKmPrm7p9lSPIsw{()qddj0KLl))D06g^k+us&XJlygiHl$0LM%M)t;f9ql07Xt`q>UBI4hJ4hApj^?oc_89$Vz7* zPJx|fS;Qz0PxvoS-bClxit*iv#LmDf%6CDS4NI4wYrs4XAu=R22K07|>V*+|fqp8o z=ZEFFr*w8n3w5@HGZL|U5E%i%#h4X*rz(!)sL>60J=NmS^L?sO|0v$x9WRg4y}9)5 zd$jyEFeZKMq_U~sl~vs1`J8v?bnlfbf-t%}0a&>u0OEDnO$-A5BCwrUkI(q%^KezXkABA>;OCgaiF>tiDaa|i zp|E~bOxpS4kDK3j$0pF`+${ll_Ka~hwi25k4os>Db2e4_R?37|>Q7s#6yiI(jy{xs^3Rz< z0d?+M_g2{+Wis;)YMbeo?#mcp65@R+|Kd|a+mOOR6_L)-?|kIY;P4dxj=>$6{fC1A zeCpbCHEa(uz@eS1Pch6zBmGz}8iX_Vklz~93&po0HAyQTVo*+HQ!8vkW#xv=@79_f zeE)dxF!;HN&565o6Q3B$#I2p+I%?5aIGDHOke{g*BZEQFm2J-+kS*6l8(bZP6B9 zMt;dC?X!x8%ZuvsiY{Mf#smhZT`>{T;eF)n37OYOn z?Guy*d@ifcthf2Awwii}@~8suF9(vTG`~S~Xi7fHXi(YzA1WG_U@}Pe7OCb2%wb%J z(rR(Bj-!OK;QG${*CIy>1OTGF){Gtdphzl_JfkKh%0xhjQivq^A-`;zxrsZhSVh- z#r{^^eg6HDs*K=yCDx(jX?or01<7vhY2CB<@(Zob+q694UbfYQSdM+Ll~?;xvYJ@ZiUM|g)8Lqcc5P58d%DI?b{ zBo9fj3U(A;)}3Kr<_Oba99oJbB%)oUo;cd&{geFf>Z1vL0bj&lI0P!-FSfxh8vMOQ{Co@I*uuV&fv zkx_{ZE#Y;TZTr&(cah_d9#wN4)2ZH<{KuVbz5NfDmhL@4Ua$9@+?y9Z9uGg!`EXn94)Ii-@DO$962OxjP-dVJgie1 z*mA`7iU0{zAgVb$7DrS81eV7HH>oIWV)Nber5e*yNRDu>|AaL}P92L0OY zxB>*G`;o$bDjk&OzFkbn(garKlk1X<7?cXo)o+W6wn9CgYMTZtL~0*MBX@^-5giES zl)%vz8%N)bznEYU!~lWw^yT63wOaDZ?Us9F zUz|46)wrPh{5uF5Ue`2`RkRP7j%OuOm*8-)S_UN+00?vM(?(Ft42pE22N4F0*@6;G z2<5bZ)!{YF!;5~FfLUcd-Ub5F{y#22O_!v7r?L?gB`}uhE|WXK${+Yy_{$xja}|pL zPc7)T3CYAU9BWa!?sIRPQjUhg^x<(Vuq9=@ju3?4^NEt^M*A=`VaPa16g*@v0sU-b zpd_Jtb2%v+pmnCG;k0K+@Ug~99$K)*{6Qj&oE?h=+o09{4UwGxsj}vg|*eo|F;IkP-)<4VF~F2(skB5K31kY_YsUId1H$OI#%d$5>~>Ko1l%?*l*< zs2kU2qx376)6i=nEx2yHV40#RDAS_)zPL@ z+`wD`O>-3=H9dY*QOTp;*DJv;qL>yiVD6#t1{PO0;K2yTi9wb6$7t_+)LnKCz0#dG zM_=m?BYpyag9hLDoT&0d+^&U5)1v~ zv`_GGPH>n7-BqI;UC3%yQ+}<(gyxEagJ)4yDX~dzOtQq z7GL*ph!=}2iacV;h4pDi_ai(TCi;Thln}4`gPR@>g&I!nD3E zW3CeNrPXTqKCfBBi=)iz@U`O!A=G1c{dp2{3zTQe$R zU(Q&LPMXV)xJ`C$^Tr@YDFOaUNr&F(G8jCUpWCSw%vIP&hq<2*g(W4kFn?PulQWaJ1@qjX z{to#JOpKn?mlX-Qa_oF`T-t9A-7>JfnW8Tr1terf>Ck=SZt*JB(LXn}np7DuYA4Z9 zlXxf$ynOxQJJXRR1GgJ0U-$$rB+wZULjHwy?A60uhrB27a+CxTSywgYmi>BEPIPY& z0J`Q#E-E4B)3*ft)HvIj@*~XoLqw-CGr_|}w;i;a_&Bm1?@DmQnI)ELDecXH63&Ty zn0Kwfxk@U~1Ly$}leak;Gzl0dvwG!ewJ8a)HA}phWrv4_HL^EA2Bk}2ACN1bAP8u8 z5Tbd-EIpGI^N2I0@sab0R?v}xtwJ4Mz4Cr;<1YmHPtLp;0*2tv74-yl*|n$E57&S4O$JtiSrDHTSVPa(zV9K zrTUtq%z^s1lWb|K;hY`HuPKc{W&9hyjUZel|WsGRK3l ztAC3rVZB)DO3sd7+aroP>a70L_ibNr;CIQKUtCQs09oBMXh}c~vroZTz8Ux{S%bunjsJ=P>HsRuz?pyf$MUkz23 zH68d&sTz2xGxhdCJ-xkR(Cqwtz_RZgIc%Nny1K6T7le*3jtfgXUVRts5LDEy+*4dfw7X!vHdY&ud+ zpozk<%?!}Oq6j>i%ocKx~JHn11m0MIAwnIOGKUkOPI=4pe!Xa$n zzueERqm^FZ9hr4Wn==ZKMG~a;O*QY;bo<{$tLQ}4DdVs+)%HT0LE0K36vN{z?c^ZtSneK&}$vEgi zS67!|KYWzGy$f^X(Tm;B5Yf@)9Y)TYBG;861KfK)N3EhKJ8AL3)euYc|-^)#q7C`%K^2G`DZl{&Z zm!vIIncI?p1G@l2`AHS$(e9m+uip0kLz>?PDE(IvWH8B@?Mr_OV+><4`c^W8uTx`e zZns|G_)X0H6%23H&irt3_du|-kxN|C-(88xcXrs-j2i=o8J3^1`JSaV3!lAjd+kB8 z(p#2Hwz=F}4-6(9LPYwNub@Qt|tNy+RTB0-*3ywKr-ub`RP}9}a zwL7c(o?h{$KUg(ShDg`6mfu^%8DhqL+A+9gFr`kex%ZqB{3?B_N4M%@u^WrA*arUeH`y_YV%c zv3n}>-woKv*=x^ujjh z7`uhC@5WVn6--^-0#vPKw;PdgpkUdTBfGz*JIBb0{h3TKN>`~)rP-a=$2yV4p`SSR zn7Sv%^_!kdC3gEqnf`C%jY6d~Pt0irZeQEwMvcA(t}j`_a1nF68e9x~I{J;(6hG%B`!4&&nTt56|jyv{# zHjJ_3dV^zeN^r`Zsz`tHc5>f+1*s{u%%qUYj!)$VHJX;eJn7gZBZPyY$uu)+%7TZX zaAvxX_#v-OrF+fme0#4pe94k;q~Vtmbi-MSe3ag>rz6~w^@6!jzqs_5SEzJ}%FT%I z`c!#m#F(Q0R7XhzNQcmjEH*%k_-Pf8Tu@QE7JVc{xr9?3Jt}97&l_#7o&lc87_3MG z`qsV=KwF<(LbUMATVeE&(5;G@)1gY!Jdq~=x=Rc}p8Pl2}#zsDE6YTfxdAh9(C!5rnM2<5Vw)d#Sq--_D%Riw{SL3S0*LUtK1f?9dV_?EI{tgSJMn zybLO_Z@I~=7=ui7;d4i&utwWvi}8{}MY`yc?NQE z4X7zO%@+W}Ys!Y}hle5+onZQf-^GI$%;@or7~?V(l2Z<{cm5hIoAzB?mh`-C5N#5! zMSr9!2BkPH=|fFXj_ozKJ{%6g+mgf2No?;s zvbJ897*`%<7A>>SD>sdPU^0>BcNzr$S#^_Q5OCr%D`y;b1`nSboikM-{0ZGhb%C3l zGj3cG`oCCv&!8svf9*F`tYBG+NYmv~EEEe!4G|kE3MxtyhzimL0V#nL6%`RADot7< zqJq+;6G%{+f)JDvdVmN4LWm)xxAVib=bUHuf6tuRGtV1G9h_l=;r`xN`CQ?mG0cq7 za}oJ{yp|6#o!o|~dqazO(LHDz_Ko-#c31`w=Y!D7m&He~h&Lz|X{t_DoynKI)rf`m zv1}u)h9{cayJmiKzT?sK zTFKKsae5PVC@_7Nnm~C$kE=+y(sJa8iFJO%{;yN_`dGRo5YUj9MKx+*yQCgpl6!40 zv_#EE6b8QJ#oko4Rt(YHpzi&oT%7pPwd_jCE>P)vJagmdPyc5_a5GC{Si=NnIYh$Q z$j+-iPYCS_`WRh9w%JdnEQ`@PjZL73_S^Cfq#m^3@k>yaTZy_{?w7p3N&Thpr`VZ=oP0uQCKEs|+ z|LX5UaQ1xJEq0Bb-6a%_?inZbCJEjrrtl~forwbOS2jC(H#$Nv2c8<3np5NV;%mzo z5f?D{OX92FV{t=>=rUGKucbv1h2d|h9p`k{9Mp>*tSi?Wyh!rZyRG@{)&==XE8Q1W zj&wXIgqf00NoD&3;~&FV^>THJ90#+h7Y(BkQuG^GyqoZoOx#!UQ?EkWCy^E@|;f&qWnP&r@U}7>u)=SfFJ*S4D%Yi&IXga{1+D?2Ch#@VzkWMGb9Tr|JKYK>6R_ zqq|ng*8?K|qC}RrBsES9t58?!iV#s_0@cu@DHV1)Z0MgIm-KhXM^YWz0 zx=X)mJ?R!37Dq}$;@z8`w|gvs)Ya92{Yj0;b$^;*M%*orvux#d0il>`)8voJ?+ugj zr?bb96p}v`aEcZObEc*kpe_fp+kXPq*_PPQ`2b5Re64Bq%*Q-6C_oAJ`2110oGU{w z?@v;LZub`-F*OdGbB5F}!uX9M{Fw~{!+%oa{|YFZ_nf~oe`{LE8>U23WlrdbhzJOT zGugfjk`X!pE-H$Hb$%6TgrBL9S zqTFT6A%Q451$b8@y>kO%J6Ndyv;)-1ykLkptoPTNjz~Mr_c>sw>A@~MxHlmX*NExe zmLojeJGao`S^ld-ZV(8Q$+VU_Nd+ReAoj#=d@`Lk(K6GpP@SFa))_tkK2Of%Cj%-S=!m`3=sb zpN#Qm^g%GH09yqS)GL^e3P4i4jzy9PKgdw>>a3r;9O>)VnhcX zOQld0fXJNNuZ$U)zf-~zaXOhp@kcee;Bp22DBxRKt4ZGJp}rwH;QWaqg<>Je+(Vg( zfWbWI63)cCx5JOq_Sk-Bm|@O{48AT~@zreP()xYWyma+7kXM54x-^Rd>?Gqdc2J&@ z!MIBcdaLPmRv7u|FqH_sr-jMnGbg%c2ypWkt(Ka81|hCBt*U9R+X(w6(oFP5aIu>% zz_|`|V+CWEoaYXOm*HatZojs|vKcFPL?o3`^fUFl(TQ~mjy4Hxt;nf-bNdGirH3ag zua94;JLy8Qealu^sk6l|y0yHgXs;9IeR9)FyW;(KUEtb-A;s&4wcAoiZ?`Rx&o6E60MG?zn|%{~OhM(cmloKNE!e9a2t$pg7B zUo5oVuu%P2*wT$$a9XzRaCqtGGk(xh`CBr(xL5Au_*b|!w|Bl}{B#bs{7QJ)#4T5Y z#YJE%G&$sf!gg%MrX#ujgln>TZf2&Kk`jvX{&lPF)nof(r`v9Y9?X_O6S>`adYc zg1WMEnteBY;Xu0gr=TmZ2Vz&fESk0_C$bwNHSWypOldP=+rE4vQB8GC${rd<)71g_ z%yS6-$MB7 z?X73aLXcg}#-$&z`{_q)j6p-FG|yeKYn7X&o;0LWhTajn&;q-ODt!I}wRUR^)SRB~ zY5G{}f}i~5$rjDl2p0-v&)@iYuNMuVw$8HYzoE*k^nn>?i`RL<%=37tEBb3dSvMo! zhU%UGntgclE4(hml#H|#LJaHWyGuMfkW9r8Z`g;)^7OANctl%``1;#*XBve5d%iL* z5i#G$=p~Twv8I$VL=EF*0nv8-ldc%vJ*9qh6{AC^sQ5r+$o6ZfQP=I6;K(JwuEjqC?v!&#N%!57*>{H*07t$;oVqry(((NLjqTm~6xt0UhvUiFwzp~&P#UUU2 zGNTleM6A6Z>&`K@Yv^|WE^-;P>{3zsPG4g!w$}IW_~$a~RuAVNAo@9*cEdD08@38F zXg0TzM^jXBG66$)A}Qe=zGq=ALPgWJdnmAHrRWO&l&;I>f6!*^D?j+RVSkHr+}G}x zUujvfX%N$-Ztj4Qma9buSH-hr?6ko~4d*)_%1T#!QaWKbXb*=M4K!plZ>*8q@1K@X zbd=Dwd^ZuUd9tYmB%a_)N*rI_Wx@)QcjdIgV~cyzUEg@J)nWj_?zmMx=Fh;&)LrKt2UFg(U#ooEgw<) zzQ%H|;0~Oe)Z1NGlKD3&q<6q-mU4gI^xFaXOQ{19HNppx7xTRUoUp~zp@5Q6Rp~r) zcYLsNE^2GnA`)Sh01%s=3AF}`T4#%xbp3ADkhH!{yj7GJI@hv{ zC^wx*v@O$=KH%UqF?~o9fF*>bHpebnkx5G;cK;6++EIurwBjhI=v3=-OZOBr(P!81ZRO662QI=a9vc%gazmQdq}2k5wU+uE#v z_?0wm?Fa*^U@)$2B4a)wObdVzJm2#}hZoux$P0JhE;KK|7JGn&we#obgkHt-RLf`E zfep$m1E8A4LL8{`X`LNc)2YiEC>gE{eMzP-fh^LUAfx}%n1Os2_OtxY0PrY?L@)g> zMX|ScQn1X5GEy3(loSAygX5?p@aNmJQ3Gp9wRZ3m!d)bD6oF1}kYp4ohf*;kKf%Q5 z@HN-(@s4#uY6&vU^%{CsY!^UO(}vrj;e|YPI2MH}r!&QNR9Xw@JOZ)o^I1=CS>sPW zn@P}w3%LFfQfp!kAJjr|hY7=IqJqAt z3p$BHsd8Koe%iLqHSFg!lNs{!NOd2|v=ZzqHhoKZ7a*Fg^b{9DsXaXgx$3Ox=|H0A zv@ocQu7j~7+3AXjRN{H_&|zGqBu)bw(HY8QBn4o!p(K=(3@QFn1y-H^+YR+~<{?be zJr@xpGXtOHBum?=Y!&49Xq3{@Z35}=qN;V3R?l6g7 z-a%uf5E%iz?@VO3ZmimV9T$a---3_&R`q;}=oO9?IN)uCULuR;Uwc~8CiTAi%7(l} zQ*%iyD7PLI3h`#L*U9#UVIMMJH4Q-7EHFgsL9U-aTc|wpcW43i3O&|$_0l@U1m97Q zx41Lrgu_8~Srj+vu6<`n2fCi$y|$vqUW$IQ^;yT61$?R91t&~Qt3fw+Menh+kR#hb zd*bqkC;eA5qXBC&ZNxL@3}MQC%7Ien75Q~VVqM>gB-q=`lPb?R&%1a!Ew_maw{9B^ z#2??{XJz}<%halgx&b4|b5%)jNoN$F* zoJw>%rFsV}($FzdRd2HQcwg*uuFWd{@IEtq1?*a(JG1sjPN)HQ0M)Smx@djG3@GgH z3W*17rk^844$iMbfiJl>!r63lTp-hiWriXeoJ1$wIzavzZ*{=amT`erjD~qLNSmN` z`mh(xeChe*8paz01hQ57z7n4BHU7i0O4#nI~NMUG3LX=QoBw^`lFrs{YD{PC}>9+f6#U&JBlj9F=wb46lC6yCpwi z4u<-;H6)`zi_$EqLP)1EGhhe>G`M@!!IQCRg|@wD^^poqlkNytf@=T}m&*Pz_og7P zlv62}o`)1roWm0IqnmCjSt_?$V|G-?k_Xy8;LXnGpZna)ZRPKW4m%N(+=q^-)_+0# z#F0?F@^i3iU3OwY*!shp=my(s;*Fm(k|b)7dheVJFe%%(Q=BJxn~|DPTUl(0rhcN9 z{@f8V;ApNF>Ju0egCq^T4?QFKZkaQ;2GVzz!CwZ6ht2@mO;vBf+boCu@o4ws=JYJP z&QhB?2pH=+1@0lFFnn5zGFE74=vVt1jyb*-)N;N#GGWaZUeE&2AzMJv=D{P-?kTn+ za7_w-c8AQUAc(0?GEP5BkuFC|#a5?Sd%hwvrh8-`;!R&IiS={2>%u)de<_w3c0xVp z!YRc;RCw@NOii3vjsJ-DRufH)ovV7vO(=&789asNDAiqV=4I5a-p(8K{{du{WRYlC z@0OW~GF{;|>Av0hmm7`auGu$}$kNZCe9SdWL_K`ze07WKi0@R*^l^7ukCSUpH16@& zy4Nyvf{r^rT2n4M4pM651lq1J^Ignil!TPQr!%r1%#=|FhupCeVs-}`7dcBL|5Z?w zXzp$#>gtqU?k+sWl`n@F zO$x|oVByiv-qnSk5hJs==WTOs2bYn4$4pbsO%bHNJv+tYv3*?SLai z*Id$>a5%okp{1b(`#t$fU@rRjVBnsj(96=#fm)jDA+I;<|$qo+^k`D`G-Sp?xYMoHAMaliP`dIf?Oqj ztFJ#i2;>dO=q^pFs{}D4x*xZGXm&ptY5CSDPv*NWJ9#->t=%Mw#h z{!~g2{i@YeBWTI|i%* zofDtktP+E5F3<~QT9PE-X?@KT)Z?n8e5G8MlBz-8?ow%LH}S)WW9?@t2ZUzA*OkL? zLIq6>=3|zqKxXb&q1W?B`5fsMfD%OUg4;Ef)*04_KlBUFFg59IpMUeiIQ+F`^a~ml zWU6ZH*|1keubnqJi5){k4gbZ@33aCsYL>%vhjja()Z1OA_uKnklT zw=c%p)(>=%+CI&0q+!acoDmWnURIwFG#zvV3SAG`o6-h?C`vE@h_0d3={_N|S!N_0 zCdIA9n>No3nhe_5R!s`4_C1=;`Ke@Pj-}g^L{~eNB-MN@Kxb$|iUGH3IlF9-Y1v&` zSfs9phw;T#)Ntuyx9?jmZWS)goXPaeIii;HX) z^7&xFE1V{P$^%g?L8JT;S)eE|9II84?kcMufCYr5*w|2M*onU@_-uZOIgf{B==sa5 z@ZrW#XKgZ$R5d&w&nwMuO(SE^c*q8JuzXm)IC#1N zTeRzU2v|N8pVi7-etnpG6&!X)b;d*T{Oyr-v)(zGz$N`t7XSf(`Wl7~N-F*1Rtu&q zVPH+B=C1dK5tTCR-xa}Z5y6Ob{KWuS;+|0JS`Cv&U^!)c*Zmq3+>#KCmn4)Tb z9*@0~8j$lfq~5P1bnw^P%RFwkc1~X8q~Te|xs~`MgTtRzAZOZ?0Gz{iY;AMw&SnO? z=oi2IqbGQEnGD06jrtt-K0kU>*KMaSd^GKNUUYYFRL+Iu$}ik839&8cprOMn2lHzO ztMlf znSQL*l@0j;ch6Jr_PIpo85lbBYNDv=gC(CTvr(NMk;xyT`KYe+Tv&23{_uD{<7p!Q zyL?8x1Tu(N8fQ37r{eRJo?b}7x|lY}^uNrtEB+WQ?^-9>_;wcNg0|n=q&WSZA0=t( zmwdHgx6f^Z%?FbdpOw_s7L_`epWUw;cICtB*j-s8OSm{OReD!yevK62NR4pPlAp&s z6>5ycO?a)4sctPEejh;RBdNE9mhE~ceIHOkxp*j0q&w+fZr^WEI@NyPj=x=_&DRWVCB1%Q&h{pgteyiiVH0Oj~K%vL4O+QPgK4BzS7auy6mx6 zXFwk!B?ow=1I^91@-*{X6IC@G{J2VY*=45)KQ(WM38N{D zo~ZD{edNaBo3MaK;!4gPd3XoUY?^r#9uAs9{ysUL=$j#Bt&w>tKYol|uborxZ5cCH zavV3h1D2PMPQfQ*no1`^lSxO&Pym>1wg>O`uaHl~O^j}<6JxTW7xPPfTY~jgEP5Ix z_tAWIE%`vBxL+)ua#Y$K-*wI0wd1k2jB~;Ij+Jcjvv|<_IB4+^0X(a@0Ty* zQL?L#^zDxc^IymJwR&!9v|m6(xu$TS>s!_N%F0Ya)}yU+4S3+ljg~7oNedjkdAf{4 zBaLLM-<*BNSESQ9zl^lh-`i#xm$LbEbJTrkz$c7ICXz{>q3plv;B9qEF?iXO)^`?l z;;fo(n&hYv>GF4N+;&E(m2j4^Z);au`W7SfFG<7JK?gaEiyv793NO(CYkoD+e~83F z+l1I3pT04bJInxhiEo9wK+4-sJEZW~rx%xy;A@2Sl6~tn^#|~~g(*;3OM(AZ_hm&y z(|)X2@b$}Z*xPGVcM9{ATe;^1eNCqNjYf6 z2j47AIl-qlOjY~1-m1E&S%aI!Cme&lXY#yrxA!HjHyqLq_)HFXC9F;K*Xr(S$Mx{3 zY5}2w+brY(`U*tHi64bsaYat59`aGdDi_?c&w@x717Jccsp+ ze9Hg&9_--{@5i)mQDxOh_4A6pMe4v{^3OI=2ox*FS30TPX;r8RjwZitY~xGpnlyph z{>l?<#0@r!Ba@|5C42t#p3k zE$XfWg)}di{RnnbTQ&{2{9hT7`@ZYc)qm&DH()W~(wU2_1BIABm~Z6hGEE4?-Ek!( z_sV|Az0=vC496SrT>?=u7XVM(&u<;%puYZJ$P{|@`hT7tP5gsrIBp@2lIY#1V9r-N zgYM?E6@1M=-mpKp&sVw?t(3DKJhiI z3P~7>BuZx3;vT%xjB?Zh8;p{9%o+xYcJ10_VdffuD~#r#=f=QC2zy8z8IxS+l2J7j zC~llkyMiWxIbe1;x=vjZi5c-b8pTI*!^eY!oIaVklNpMfL#d>cw!NT=#eEtr+)Z>- z>lOxeVrOz?6~2+$GJhmjorYZRQ)$7Z2W`n=v1s#=&>o+1l{N;u$|cQL+$24$%i19f zyJ^hLtF%(n<%w@bf*-hN{Qn$HPZ*~DT1A#hhEOOpJENJ)6mEMfDw|M<=S^^48&acVn_ zURt2Xrbd{F_!bNc2JG(#;dEd1SBu|DhHR7XfACPh#Zt_>MU|z$HwIjVu`&$3=V!em22y)|e_zdGLN;*A1 z0lE)=YMA*1MFUqBLweLzZUPszawZtTP)(w4SZ9qqe7La4whl~>FM<5KQ~z+YH3U)% zKE?H|o9PWSE_Z=#e%}G#zJ+?~VyljHWzX1Oc)ee!Z{0du4-OK2YTG8dn42h?{QS(6 zUm7=m&f^Daetp`q_SvULMK@nf%w4v2DAaZe%NPiT7Yt8wY)L^&Afp_33485nZS!tF z&6Ph77rDiytZ?|eNp}*R;(d)Ae|YVz$Icr9kJ#0mQ%&PkADAyS=aOQF{8tMoJKnBq zY;E$|?>eTf97M3&1itVaEZRf26&ByKFU77k8aih(a`2}`@S8R%Qi=-ey$S7Q7?WyW z&NqZk2Vq`|*ApeRrtOa_R(=d5Smd0+9$(u3VH45;Sg=?V9aW0jUcs3zJ&`UzhUjxn5z{rx$!0EW`~=oBRB;22)cL z+oheZui9_U$v)KJHthB5kTu|1r6vO2ijXsGRkub&}1P? z{w~50_Hl}IBB@U^S8ke`+`Z&(6eVr>nB;n}@OJmKJWLxGfMOEB7AeUCaWG&5 zIlYmS!A3JTCi5**`i#Sj`!nAv86O*5#!CCak3E?O&^-Srp0f1sqU8iu{eKiw8{mAuXWSEr-Ub z;2iO9HCEK4Cy%(uLEwHgO%r~LqiIlT$=t)Y&AB0;`vIw&)(HS~n5xqLf^T!%iIsQW zjbGe>F{WeIxZpI8!E)M#TGSS(vGLhzMeg^XTIuSj9(MFHNQd}~eg238c_L`jK)9PDFLw*PuTVi6@Lltv+C>3l*R$&8J2Jhf$w}+O?*2&|R)qg4aOIz5rkxq5ulg*8KA;&}XWhe@Sxj4c8-> z6pCaeq?pncwss~gDyPWFjFRQFcGB?hOzBMVL|Dx!1+#LI%W#?4$y<9bNeuN5YnC7r zg2HuAxjm)dzhj;^K@a~+NA@kbpKxO)FM;aEyelPRe7QgNDe#)H^u34clJd@DLbu!D z?mpxkEIZ*ilB!&NpsOQnP(8WGg3v6=`-IH&ZcV*)x00TY=Z0z96MRD}ti%_sj~ma5 z5Qob5GbqEgzn$YZO}xd7DEzD+K@hH-Gb2CZd`KOF zHQ7;%1nKt6WfxK+zH{KCX1Agr6P@yZ;7^a*dt|`cH|LQ~?=nkq|9Fc(ojW1_Wtb`@ zJ*H2E>_O+ufuV7bP5F})sREgp6Q+%UwvxyQ zr0W%AVOowVo^Jqe6vmQcVx_e3%r1Z-n7QLzPmk(I$z*)x@j9Y4-Orc)kv|FY-tNdq5v5M49;hhHfa_S=6bbK-cj(BLDHyJW+>0j8dvbhyXp4E z@$7#-`u@9<^WTh}|KX1}HTHd6^Y4Q*T(SuH+_AU^lLi1o?MDYe>gr`m?eag}JiS%$ z0=p#uqX0PIe}=#oy91E=#>VCUM{jYExpMjmOb>yBjY}8TQF)*sXkp5afG+YHR*ev9 z!>iWc0>dZIv7rR@x0QFsfC#)ORzSjg@?5t(UTPDU@jBY6FWWi(gQ#fk_NZxF}E(%#;U8o9DN--S$h75~|E=)y+uO z`LLU?PUggpx~b_%NuapOtQrG;2Nm;i@zK~puo4_d3UMxUL(!)NVh1GzafU03<8F!O zh(b_*@mc+nrw**9=|<)@#%?T?VTCUH4=eA9*3!}f(i4hJqvV$sY)NsbOHz^Y6AT~I zTZ&r&3P}X!1OB2)bYYz98rAD7BtA*^uUQSb!)Z5E;5AY3!kWqtANE1&_bvJw;SH*w zDQYzKaI;65C)=bYT-~?U6;a68`e*~Au;m%Wt(ja$tpnH03J8*Mjha@15sAp*C@%@J zQ0!m^C*pd#RMgei|KxjbJJ?~6?iaw5Y1>KiGZ4;2VTl2Mq>jTK5WN{?>X#g!|+SFyP-^ba16Yz=AE)~tdr zNvO3E5Wo5oHvd}Ivm*31Tc}Ud8INebGaAniJ8bMm))_vId)M$325Uz#( zc~jPg#MQu>BB|8Wi!h2q{Jud$r)jhPH~rH`A@x04x80hw2L&+Aa%7mJ5;-&CK-V)) zQ~25g8}_45&g%yo_YZkoWWX^_^a@Rhd?xm3brtmH@t*iJ$!L$0UO{kDWAED<}>5FcfN zS3y5!U%_lu50ef~zfwc84x6UDIcaeRvokO-DNZl$B&E8qs#_Q zj2-%cgvn)B?~mwpvCgfn+d(?9t3i!FAf*x~!jq$D*F^zDo6stgO=TxAm@s{%OY^AC z#ORvs+j9&L z)4!eulF~Gr97e0wr@E>|=(D<5E#`i3ciI<{F>gbJ7Sjv&l^=F3GA>Zf z$iSa@(9DN<1bU0>lsE7Ea1Z^)Y31eI9_AzQ8?4EjU9r4?NBzzQvX9`47QiQGa1iOrt=HBrRoJ=j zZolg(-U;89#E3xmSJQVtkOz|Q>tR0s;z@!pD9U^&ndf!Am3aLDn!$#KM}&EXIoqgv zxkK)+mGjDfBM(sUXn#v2`pe^&|BSItdcP1=s&^NYy6c=kg; z&E2p9*&1U`N>^u)>{F}o!W{|qUFP{u|NP#Z*8bc1c}Dy-tgLyCTp6+v)b@6}Wgq*8 zxkIiZjW$qUMjF$6Lm5%Fr+~KU(bHQaXBg$Ajz4U9&Q#gC!@d$ytFHQS+&KtnM%8HL_p@9doM5I&SiM|m#HR( zrF1RL$r-eW+**{j~S0X_H>ff)CaXYH6N8peoqI?)(+a{Yci`3Kt?@b zRUPBm!+KllG6@!he;z%E150<`?+f@sf%8X!*}=*L|1c?^O~%LoBniZVV?$qW7hGTq zUFKlT^r0rYIl_EAxMGqaX%Ej($CI{-E7^$aZ7F({h}X9(QcKKf2z;%$-Nf;Q+ec1QP zOaFi9;r|b}&i~JK^nWNqHqM{4mqBV}+#&_Yp9d!-4vPiDnmr%auX}n_Uoc}&0u*hK z%z6+Gq@yn(gkcp{ljJ2`7{_OGiU2Gdau00O)oN;J;Mmob*K)d+fO);cImif!u9j|? zm_-6FCx4<(+Kb65Kze{c7KsL(3@!xn=z&yEJUuOe+kOUa5~BIWZ7DLU5x+q2B;(guGYpZ%R$fg)Ty9|II05=?XZ_BB6axu-Ma z7NSlPAK4hXw^u7r0xly$6L)I<*VuS?t*hD4<4qK7TP$sYGvCeNstoqi*rCrRn%a~r zNzr*%_8)1zIuwudA5z3Ak?F5Ryl~oGXePM%8i7^BYa)fgnwjT78DM^b9bpnkgl4Y4 zbyT{s?Y5eEvKxjq=4PtSS5l(e2iUXVfzoOu0&1*lkBg@;{Dg7R&lW-Yx$2o!88{rh zVdC#(Rm|K3FAv5KH$!HSt7ICtOZ#z<&@-zAr^ zrnG4bcqj3!#hJNQn!&5&GZBql94-lm4JV~U^SLadZXaiQBA#zMgcdU)_b9cK29SII z4)au0*6DOcX<#v$MPwX<7%LOJZsV55ytCbR5nF*8)}f|U>uw7zSU(u@fMsbbMjwju zjOnp~juBtVjv5wbvot?^4&w1)caWdw4Yv6d?E?Cfq_F+T%NuB%u)d{mcOju896z5{ zVwSXN{nhj_A*xk!NB-CT6SaJ`aCEgfp*3R0`j7^(NE`TUlyU(cllv^4$WbYUU(+iI zUmNpCv+Y91!K?T-q8LT*(VoDvAFK{Z~K1DUA=fM zg5Qo#ock>O6pxk6&HnNI%d_HvoFU1=al#KBhseZw>DduH60?#dpI5?srk9UGBfi_r ziK12?N^L^q>Rz^?mZrP=V0P9VFg})z)A=#CXQ+h|O0at5us)20~#|pK_ z6i~2hW>H_-ljjx=&m<{$!~G|vlif$OG0jUo3(hDNdR7l#nu6IZm6{ftF$AfXB<8d4 zVNqX>6AIsCI#;O-rcN-Ngd;&|{ryqN1zGUY^rZccCn|k?d&wst_n;=MPvV|X+0w@i zvpb}uW9K_Fs|8X~tEX=U}ua^$G-YIR?15P7-m(^|c9M81nnS|H9B)#ABAq`oEI zFb&$;kT}7g4?qsizvzD8Pxw;WUNcgH)^A3=98PYs9~XS>8tiip z>sfDCxRuYCoP-=Dyl3sdI>+!I6qo3H$Bek)Jskv3l?LBC7Wy2t>Gb+-OqWI=^t|MS zc>b99^|a??V#`8N=EK9IP;DuSYR6m3aT!>dN`OGB}W(lUO z8jvq9QxAJ9TT5Q@gfDIuyIfPlyGl8}I;0QJV5;p`7knL+)Cv9-shPVzaTTvEfrKKP zL$}&c#=i}Y33rsI^~f7%&W>DzdZ;Im;%Ks&=RMT47=zDdr=K2_CnO|%HnS=5;IhAv zMnIl8Dz@ml!1}k#q1F7-Fw;kf7o_7irP+sWhjMdZ0`G$b7jFt{GW7Odt%Bj^1s@X% zhNG2pGBmSlx%8MSxUnbknjQJXE^~6v&Dvz_KNGBSC_zLFqOxY^M>&^ItYy~l&Go=q z7>AZ_B)uWbUGFHmz>B)vG{AZcA2bu)IJ`6X3&tkbrgvF2{Gk5#(3_N{Hj5 zsC{HJBkr+1ll?~3V zvqaE#3a0R-Fwg#(Ev6H%XFq9&xy_)gRZgpOgdS26?n-9up(~3+Nzaav6_q#{t2#W( zBttKKc0VaPRY!|Jxc8A=#)O+t?D7Z)gBzhY|U)w;oBU@h`uR*g2C_ogwN5I8<47!`Ee=i@<=;OPwOhND*4?F1=#N=%K*gd z8MOt08sLXU^Y)u*e#H2%T9NyexjLEdOP$n`YpCa%J->M zD&!BxSHTUYkoxjrGn{>@9_(&s^h^PM{%^!O|R0ivrD0n6usY5Xr#z!%P-WTTghDXA|zzw zBxu`)4Fy_0kEe4iVIlw?Z_}w<4p9StXMqev*J2RHW^y#k9*Y4(%#eF|g*5=~{{oA2 zZ)VXVBEh(c=E4yr!^4L+b^H1Sj4!x}X%jKjme2-j%hdK{J#p7jS7I zo&Ae)`e&fY(GKz^sY$Yek}@N@YS@hc9<`t?i)?ZKsIEX!=sq`N(bLC?%V(E%`$ z;-@1THrj^&{rCajA+*%~-qc1$tb`u|HymetP?Kj4Q&i%fJ^RC)&)uORcX0(D_CBVl zVV-*{s<)_m*sVN;f^V_9X!=NI;3AU&4F-`ha%-8|b=h@c5G|+-E$UT4!^=KB*Lh$T}A4VgV zR<-^{u1cy!NBs4-e-5TtvCY-66h1~6;fP^HaNfk~E`>+{t9>P;mZr`EuG#D@y|v>7 zJw8Wf2CH9gMGc!WvafT8&Q_Z%CtIhsg|($h5A|TksC7t1*EME`-Qs3K(kNNqaeBWe z>d%gi;j`NA5U<_j+p8^s2yAaoScW7%j)pI!X|I}i-4)v4PTAD5Z}(%HZNWEJpg}xu zDTFs5ZsJwlWK^+Ss{uN!zX%S3zTCR7sP!F7?Y`U%e&6N39QSq(KTB{Dk3$pCh zSA%zi+=jiQL-rR7t51Q833p+D>17b^>S?;(^SpgSu`xR~4>aHVSj#F{isd)#3A~n_|itt%s>Oq4GB_ex#8vM9F1%wfQW6^V8K-N^y)>Gk@fkrJ5i2&}}K` zLrpR(L_i5#xexwSofpaQq`070v-;b+h;ITLmO;E47J?7)Q4YMC&*-DyWfC}E6oB2^ zeLyPVH0Y{U_>HYu*Sr4}@x$B1JbyU*BWLv`?_ydB>}^@}tl`&T^1kIMXv}F1)3;G- zJ>1ASK0Bl$yq9`I(Lc=ZPd|sBE-%?DPn!Nba0ypm%Nn<`U3DtbIWBi_ z?Oy0OKZ;jieCx_9rLHbVAEx7ZX6?h{6dvOJa6x``LZ+jlsb1FLA-PBHji+qf*nX+R z6!2_QjrMk@uRRfXtc&ZtfV^&?eO*L*VFvx=(8TjO8hMn#`QEO;FX2v@tzC2F`w5c6 z!?I!QekT;y#HBX}OG9FIx7=7U;CU2;%bucypTQ5RLRtR>JJ#7Le)a3W_U%YBI zGh`EuDlO$v+w`g3Txp!dW#VJrZMpEo0Y8{%)#-k{{r9h>g)y-W*ob#YrM?qt<5gJ{ zzfzc;(aAT}OT3VFlG(Z04U_5Eo5o#w!tJ0M$btH3R-EZB-Lh3;uw_qm~0C_wRcL0JdJH=-cAdw*88OS$_=?tVi~+` z__cF&HFv?N1E02L(xp|66^g_M&xm6f`K0+cA(yvp`x<)8z-{r|y=8tf-b)VAQx^7l z7QT}$TrDDa`$WzGd!cZyuL>6N`m^x;CE#_nb9UwG%{P)SWD&lIsQhBo5=fh|{O(9K9LUK+ zwbBCx`>2($b9Kk6y(>gGN&_8pSaiR@8y{A_T=~i@KY66e_?$9^4R8$;${>LeKF!e9 z)KUwe=lYer%RJFNBdEUmXou9b2&%^^EP+ikRu2FkTVgTnAxS}}zg03;P*D7Od(!64&9dMK*+_S2{Y|mNi*D?>%Unr^ zcVs*-#aA_+64rk!MTMg}SRJmG&n*QHuA_U{)o<7}j+Lc`LwA-fs;Oj!;!yQMjNz2C zV`Xq#vd%IH5}>mI-Kv(;apb)+zQJS)c@BIw<0TLC6aVAI|9>wDtvZCAqLSABIj7jm z-wZO-vf=Aor2m`+@euVe@GykQIj|*=i69lo#l3y<4dhS2GW)YZTTF3{T)~3M7RjLR zF7Fdq`)xzu$bE_<@yt-uDi${a6fM#Ge@~&Z(iS@N*O+u<`mTwEX|phf1;qi#R%6&~ zNy}2mza)S^hlb(y^^ju)cIk)Uy6}>r<}nGHxa}X(h=wZ~1Y~~G1M4DEfwHn!v<%VEK#4&bOT*LM%HZwQyBLj+z zU{M2lMC_nmhm9I*2TFrMGa|)T%6CAEiP9a(6oQ-kfen zjZI4?fAhh--R`=XF!`kQM}X#Is}qRhJJd<&JuSO-Q#Q{Q&GMmW^9Og) zGq7ypzC@SLzi|`$os5&?(_L?uE^QkkzDS|Jw=tE5>X_FKW?gxbmN_AtQ0F_CX3X0` z>cd-Q!!D~JUWr}BBxS1`%TFci@%f=HjeMJ+L+ASC`}RRA+|nGN&%#eEq<=nKTZIR$ zxXI8TTO9ok)h=Z^l^~y0oR^uNvj2Zrd(WsQ-)`GGii#DiAkyrpfT$>)L`46nDA*{{ zqJs3^TM|2B0SkiEpn&uyy(B@AUP2E&LVy4vgb8Q8=${TrUJr<=Q&x8sSQ_9pRV9uAUArL|_$yEI65>lY-5D0w$z%NGCs=1N%Y zr{Dk!Z_YvcqU$)Y!fZsESV%7d2Dk%L!1#2 z8D9ZA?bIzO$4qHd*#_f23_~D+f;d{YijMZ#rRR+v4AG@S9pSRfMnT|d27LixRiigV zDvE819B6frvpxi~zu(=|@FZr)LG!8m>?{`IIzh5^=##l8kTp*HVQW_Fq{)^JyX*8KIYw0Pho zR>|RYu-%*54fCoGVaI*1dqku%N`!fmz8d*NB`;XD^cyop8P8lRCk|G~5`7}pIoVh+ z%oeE-LREPMl%yex3U3$mg-lga`C!8j0hkYj%1w^ax@T`BPubSJ`!*@nyERxszruNj z%IqQoOr3yry-%+0ezRfyZ&zGTvrh|temz8{ScMp4rDBP5KYI^_?%_UlX<$^n&@lo9pf zSf9NRT{Q%c9qtJ_dC}?c-n_FGw|^{XH0>I!x>>1B7tCiMx%^{E9!2Xam0vpy$+kwQ z6@_SSJ5%qibRTmY=eg38IfN*4_twyHUF>ZMDOR07$*I0|1mlB>*7dJ#~sNiYvGH^r|9V0P`;CU{zc zC)Z-Xw$x) zf8LEwPfeV^VQi{lJn#TE>{K?Q>Cy2mF}uU&&QFfn6k$@T?(hvq>-?hxgZFYvi}j2O zchQ9P#v{W?w~3}0;e?65WxKYWUweoLQ4}kOkbN70={tZO$RX*{k;!<`3e2uN|ix8GbYMV9IY&u zu^h4yUpT{4?9}uYA9nCQhw*;J^7$Z;8k zWG3A`C3aG>mT`&CZqUPdG>~n%v_3ee85sKIdHoIcJ>;k3(r4w+)PyX*xy463P99E= z>=Y~r9eIzI4F0)EVcq1uJ7@~eYW4TkCHts5$mZwS3Tu71a#T)mp)7V)#F}Fr8B)7V z{TbtIC)d0T?Il>z8SOTPjZ7IzCF&r!sG)a3u`KbhLW zmk#_p-AaN?1i6bLJFQmytNwV+^vXd1Etm_28;7oibWlN7V4~5#!j1p3v{x@&ps85B zHm-`hFMn9~clGw^>ZP}*#CLCgo}7fSUa5fpQG$asI`N-u(cmbNm@POhxzVAa$(q`r z5TonhLRVj3zqTTH_BI;$7>K-4(bWQeWjk?c$#*|0$tLdGqbXd6mG>+{RXuDRQ#1m9GT8H5Zlav&Y7`!W#Op%Dxn$6g6Bd)Ugfvty2 zF4e1JK?L6$2OWS#Y!ASy7MNiLrC_dSUM1I0Bm#gY4rh*!q4j^e8i;x!Pzqevd1yL~v!}-z?|^BzNX9ZuS>Nz{kK))f5=P1*`HKz^>Lq zes|~t3)f#Vm~DjS(GFBzn3V?y`6LkYP^O(}q+=9VyxTi6y)lrJ;GB;`Z)Sac<483A zkrc^R8n!V=yhp$01HTx~EpHC>4^6IpwrFy#aokePjW_GQ+y@-e>$lFQMB5J?yxc6| zvED~fIB^7S51sqownNRvqPu`~v3WVx#=_Fhf9B~?vbc5V8jxv7z?YiOzdulAq?&Yw zw(E6z_Zalkt;+Z7KXbmxUTR`BuR4K}otwJ`L2~!A}v5G3Vpv;I726Dg7zh z?jfHclm*2+1VQ@-1g7lh&tJqQONTb9_D`L&{u%s$FPZYD$|B!nAmWX}!{;|oN)#*q zmRI3t^`fSFn?omt33dL)zZr*`beeG5-qAugN#)Y$x8mAI4yp(2Y+{D&#yxbG=IMi=-5uCQ9MEJT#(Hc>iO8snH4WM3p9x{D;p2l zX=)X#S4=Vn#Rqtqrm@HdAZ^&QzQrup1t0c(U)$`dk6-x5I)rKRCaso?6xaqa7y+Mu zdGp6sxXP)J%jH4^!?hU8i;2iblc=H5HKv#%x2M-jCz(bzMX^+CIcpNRDJ3UN4=KkKTXahiZx0cCWypWZ>LrfXO~LY!$$J{j z=J$Lc>mgeAz}REamhK=qG{k^ZEc@W-O?^Nz2sY4Xn}lJ?$wro+OV|G2?-PI+WV*_uM^Y&E237wLuB%x!9Tip zF-qN4<$xo8HXS>W;pL1kl&@Skssok(5Y%ub;b1p%e53*q8eUTN{y38 zp#0Fes98dm=az{NRM(%wtpjFJeW5G2>MCLknZ!E~bVh}bqV9Q&c|}llEn@b4kwd&` z9J2_C%8$W(R(Z9@MQD_-k8xjAZ3-&b#|+K2*h)rdigzmJe1%^SKH_8E#q4d*pY*hv zU?|Ci_1rQnBa}!ga~0l3PuhOJPtUuH>6*xm-XLhJ{|Xbf(M?>%wiKsWx-nDtSF__~ zq|F3?8z;xTQp_9q7O;Pon!urMU^pP>`)!}EZTJBa>JLHg^(w26k8S=U)OHf;eEaYw zZ2N;PQnScL%QAo`iQJL+7M?jf-pkI%U3{E*&8zG)xxwU~m%ma>cVFdLR8Q%hw;1^t5>3COxsgBVrVio@_-vh^H%S96Z z&WQfrO#WVLPOSu(HV}4kxrc<9UV=6sn5@@Ah7s%$NMD}eK51!@0#?94ZrSL*KcTek zpXY7;WWTQ3l0@Cnp$Nfin#cj}P+e?3EJAxq9J^n%U~x*u~DwgcR3Itvg; z@~un=sOLmpB(E%zEg*66>}}=fc{Wx^UQIcJfF&ka?yTN7)dsmqtgM690iSVVxCiW^AYx?o3^N4bxo*S*|@xIv!_ZP|x?0MWW4?lQuDPu!6C7 z`VfeaRSEju5_k9*_hntZ!5%~0kMvKP(Wq26&TN$Os3;yL4_PpdA_*&(L?sHGj-T5i zJJrH^?^c1HtWqzI)m1r-+x9~7(_MklG4-^D#`5es#L3=$LnYG147>*}h8n0qc|5sS z>b|b0{MJc1tb8LuG^8RdD@vvLQZ2X6UUyXCjlv{*6UEE!CH=cxys~Vh!t&Pl=V9E*JBA$iUKNFuEde2YwI6BSZF7jz?{NyhEUbjLP|VrTYr4Z< zkTdLV{|xzHPV!>{R~P>+@ZrkjO zx$tDSNW6#YPxl{;V;@XU$r(Q4ffam!ODX^~sLE{ynxE#4Lz4_WxJC&z$? z{qT$|WjErVoF9F1B!O|_k~5$6Pm(Y>XtF}V(yinOa_@$BjZ zyvfiBlmHx@c80G#s@;%Yb!5Y-3>d*?M`J8S7wt&*DX;9=Quj9WER8$Z2_tYWwM8)5 zHu#BDQ{_&ZMXM9-^63fcifQ&REmyWPw!fO9eVsVEQUylf^Oa2Z*-JcGw_bhH<0LCg z@Hxo6$*kUlQl$;d!cyH@`p@Z?1L(|iQq+$6UIj`cn&M`2JK6zE(Kj+fKHxQ6IfjfH zzT#(N)>ECtyHFpPeA7yfa*Q(a+KgU4ijB?_lyZ94<58&52%K%|J|U=UwaoNLZ7%6r zUiT8c7^Wgo>vI}L4>yKDPL}_EvOj6r$`Iy33-?AQ3I!zQZ=^$LUV6U%0`r1}@C2x= zP-;LZY^eI{kPm8}u&hHF^1y+&6Cm$E{8TmlA4tu0^$|PL@+Uq9% z4 z&-F9B5PUiemBU1r3q(g6fEQSgsyT!S3>{yb3lz%8%LB>LpG2)cs{|yF=K^@=s^=Cm z4^rZjiOcL!A_++Y%YQUnme2sQRgK>XBDClCRt?G}W3x4xbK)?rhM=wcQue(P@O4B^%Y4wzi^7+eH2*Ef{l*GS}jr=_o@38@r z5I7zVB;Fte^$#XTXL*!zZd7=Rw#;q+m8VPa$fRh_7u|#2@wM6m$9vTRH~QX-=8toqjc0u?N@4eO_|A z%9ViP4H1CEBea4lqP@_A1GeU#he5}1n8EGq4{Tvr-Yb`5#1@>_EsCitUh3ce+q+l$ z4CSLi+HUiRKD+&*83i)bbFOeCd_3UNeAwLgAII6BvsC5gafZ1gre^y_cfP^R$tpSq zoIR@lWBLrNZU@w%T(0}LD$MP*(CLUG(Im$D!NAp@c07)Q&Lvjan>A;q(mzWa4`ZA~ z&Yu$5M9HPaELeO=I9!U_L5hyNRKS0ko#oToaF{Hfb#5wM@~nE6{(Wp*;(4ReX$wB= zl-1Uu<70@nL7&atkVho;FqlHjpaSDB(I#6z-VnnKg#>)H3Q|16ND~i9Ovb_&s>%PX z`Q8D)wqxq{%j<({Qn5x(qQO{P<=)vf1kx4l0`zp-T;$ejc*7$DUJM3pUo3y*+sBgO z-?xoVTR9k3MT~3$B(@NV&z#TZcKIK90K$@2NNe7bQ?0FIM{2I0?lVmojhe}HjhYOG zc3b}fkmL&Nz-y+baCnwT3V1F>Fqr3=4v8EzucO3uK%Y`KMJvE}O%Z$HrJ^lf3TUEa zn!L4kXUVJBTHAll={yxgOnOBx2UB9@?3=>kGkYe|hG3!xukI&v)=s|=lzcX`h@#0R zmF6-D>34C}Y-91liX0(p`bO#!=~aOslyeCPGpgzG#;4S?&$Un4hOmYRJTHxi>lrp! z?;q-|)cY&FA2d9n#WURVtnJ33^ZkoTf3J>_|o!` zk~8Auy9#^5BzCm~`c%rk@hDOfpQ4?K;GLP}=avTF+KwZ@Iz}29l4Ba9IbeQ@R&sygxvwx02ZS^3uNg+|i6UoB%(LKKOv;8wDpL}|~V34Z423TqcJf%FI1 zijexngV=UI?pL>&d@1yAovlN!BZ8yD$oUg@e+nF#&|Akgytr4&vIyWFj<*zSsg*IJ zuY8UpcQ?fMh{#AfxGva8w2N1Saxr0sJch=V?I)vE3Ln>!o5JrQ^G4M4Z%pW1u_&VS z?51v_{^<5EA|Cf2>8+3V5b)I3LXgWNpbdwH)JuZf_J}<>CCb|c1w$W8w^25m!wwbt zN;B1lMy9sRS2kf`yjGtz5Dz{IrJe$fSK+-sJQj4df~Q}Rre5lKR>m0iPXvC@oN_?R z_gMBhf>1YDs_wi;ZcyV=Z;9DZ#Y3W?lrQFA7l72)-Ce4mNfdV1T>Hi2C_djDb|u=V z%MTMH%1}|}u6gV9k$rWt5RvM=QuR|zTj$*O_!--SXxa8MKTVXCRYTq_Vv+9VV)fcB z26BhaQ8{HR%63A3?K{Vh>iIcf3PLYzEX?d8(3To_c4(Oaq?jgBl28X2WHdFInI~MAd_nq}Kk3dd4QKSUWj{ zkM#Ebz*@0^)G!Hf57LSY@cRu1Q*zbjr-ILv4LxO{mdh2P3Iu8@+Lp>;`Z<8)XC27B zrVmH-s_B(QdU#OaR!gRb-2Zjl``=Fd|LMzLI=AlZ`CEJG%jq{Z3Pl5x$dEtLoOh$w zkJ+q@OM(^mpp(L%ioUy=y|8LwUHFtAx*o)mo^AjPuhp*7DQ?3|__kWg5T$!47uZ=lZ5&5shJMhjzAc|&rTjv!N6rKvcgID8U8~TCo4<8Hupt`U&`J$Y+yzDR^i(J!p za2npmh@dYBy&+ts)CqR7)LGL(VqAj02kXt$kI!pk~Ut=CHl9-QvL(pBV!I2x_2GS%}i{`{T{U?;k7`PZj zgFOsh29RM&dBak%$=BA>+V_m%x9rlFvj$@2Rk!EIxpw2~dG%5J7!ASC&u25U!@9MO z9q41})5Ni(ME*6JN+F7Ou^W{qI*gBChSL;1F{Cjl7$yMSUB9h3qz2Sf`CsanarH?4 zhFu7>0qP+F(aOJi`Kf0M3-(ixp*-R6Ex*&r>rt)OKTS(YgQfDM@4`S8P`6)4uiHd! zHBxjyNX+R~+^`Sr0Wrr0mFrvJ^(-foIb;+qV89bIdET7 zbu^$Hr96pv%qqaaoa<+WEhH(Y68$yDd~`j#)RU{fk7@&(!={HtwV&e(T+(E)4!X8> z{dwKjRPL8+O{R@)v)V-2QQUIqs6lRy-uh?z(T01iOWXuC9ypHQE@KsmmKeW^`B>`5 zGPi`pn10wHUXp|%V~8fhRbQY7;&2ART_am(dyP6N4iPgkiqIW9Ozl45HH94>D6XO7 zwKWjA#@wW4MP;k1dRleJ_ryLcJD#Itv9(;~>brFKuUYimu6^C_Q9q!Uq5i?bQDxd7+4A#%nC|R6 zMK+3{M!gV2I6a%4d3Z_xAA`LDz%5{py7R7wpDK}k{m zz$b~8*iOM@9?l8>p?52umw>#5=O+!y@XPkRNtYgUIeSR^Oq=I>>!!QnO_!If6}$2i zk0H;5nmD3^-(#9nTn?Nft7;~x4(VPb8caDS2Fm4X91+2`h(2RjoOshA*5T2puRSR> ze^f5fLFd!_XVGD|;}SVvy0UaNmK&df9T!Jk8vZs)VgpIVZWwQhp1-FlNn;MsHJfPKqM*+{v;QD z6TzJsJAcez+T-Zu3juBsTNiCf3{`Ix7(-R_F8XxRvNllgSoGkZJD=Vw=c?2J8c9_DzSn(Pr#-Yyq*sI3zakvarfIZfogfO0D!)7s^^O{Tvi9x0^HNVsJrbhGT^M1}Q@B}`y2~~Kgb`Itzls6Z*TlYpceZ=`P4?)TX#U0CRd+nEWm8hSI zlgfne7|?pxUs4-1>ff$x{dJ}*Le34U`pMU%B%rX~j(SlR_41l>`V`5@&U4i=yaC1Y?-ENvso4&@1e|-Uf;v%`yOVIodPa{%=$7y&zY(&{t@g9#fFp0iDU5slQXa;kwA{hhx2N!szp#E0Iy&n{ zFDoJb{+-+%G2~ta%zII{jlAYMs{wdu`vo<}ecNIi3i+;JY#>`@$Lc2<59aYGhWzsO zzKTIVly%M|32*S3U&IfDvn#>1--b6xXb$b{=x}D7TS0j75xw9nf45D_LiPWTB5WrX zKCtd@b#@J1rw4&F0wLV?7xTi6kzWBiysr!kX0}P}YJ;`U%r89C!6$}dIc*+mAp*L* za0jHry(}@barnmW=iPaI9{wZkEx-)0D%}S`4T#y>kVmH3>meDY*+xRnwd>b4XRe8j zf^1}MKKffGquL^O17zA)3-YL`=?Ymhx*Z_9jFA4tl%r^VvnJ6TVXwvIj}w7XF|se8 z;=*sLoUzuc@DAjRtff<%ows^}OJi8oYWSj?zPvi|aF$@u?p|Ub8k$|VIPKf_)67cSNB9bEK zrrUMbA#we^;KVE(wxcf%jKpca^K1fc5upq|hl*c)EkjxsmS!%jX7Zl=K(Gg0s1+Ce zm#7{v|ACrGVIG6lY!RhY@HAkgdho#CO*RJeUnJxn%U` z&z?y16%|*<&qQ4vow)=ab*wDLx{Sr);89|be!v1XrKH2GgB2bj)D?oO2YZXckKfWf z+~u(i<8kIaP5WJXoO_q^iLf7fs@oxtWJR3NG4n;~rk9z4^T>QNYQ0`u;)~O6$_8mW zT`C+5x}sTgcP4oe?|;5Jg8CJ#!=b@}6bueuNjRM0f_eR`tssS(OpiS$66faT=tf{3 z+1gTsZv*=@-Qw)G!Y#=)?i*VbDoNmIM#L;eUZ0DE$PHLS+ig?61MwD zUP9U^`$Cbu-T8>ygGLt6iNU1sq%eyoH;=QQF)h_v$SyoTURes&QmR+bLoW6GrJi0q zvh<_yO}x>i2>~X5IRsKe9`*m=_vo-X;_iF$om}|U-mt=`+7}@Ylc#nU$r;QCs#Lvl z@&7d#d2?!ZBsu@kMuQt02cp8!H~0k=vx>51oX@i{9fgdIzSU`N1fwNs2i3$FXL;{7 zm7Cjk?#|=i)a{lfkXj&+5OF04L@DnjKQkL4g$JOO(r1G_g*&kNvaBb;7b$ethP;$w z(XFvlT|}l}L9AE3r3cj?5G|X`)$XtzqvZc|OLynRezVRbD-QE2sY46%3^@;$LRbnd zeX>@Y%O9D!t&FEpk{l3SZ5Ia`+}F9pMhchj)tnZ6xPSed=0-!Zn$?{bJ+eOM%`1sd z%ZAP%#X~q?^54!~88~yTSD5$fjM!q%Ls=1sIV)p2n@BuNVpN1F-z;>@(%K}eo@o(7 zuFJH-Ef2xA2b}z%aVr1S`G;BLt9z>b_pcR-djyxf1VFHB8TT5#M5}B|cx>`kL{`iT zU+yijqk5O|hW9esS;n^g)p0syt@?@T@!rilG6br)3Z+T&?!cP70HQ~(!XiLN+(EYH z)9Pn3r2{Mxq>F5YW2wZh2yDJ|M)|8W-?n>s-glgFrM|G8@hUHsM$c&PwBTL+C*`$d z6Mj}a#EvkyoKU-II)T|{2Hr0yrB|kprUhL^?cX4iUxx62fYT;Aw z5CA>A(Yk01A)FJ#s3;oGno2xYI8}^L{;pu$;M>wI3-_9yuwAr>o@ZC83GEqP zcCzHQGl_(rAhh}9ftxT5x9qjY59*fjpKUOy3g}64L`_t}!t2>#)hiH4O;_5u15#)S zCXVge>?WhS+G3{T9L<;k{c`qlzqW!P^o{Tx{cO6a$-AB0#`;somCS*sz%CI?4rR3!U z8lFDSA6-|-5d{-cj1Sa>r)tS}n7T?0=NS7dVc7Zi*_gF#{Gn&>Mv*3C9FuwIYcp%V zPqyl={I>&7S0O?*OVH3#n_^;R$F@BvOeC(orVl?DjJEW^^g04OT1Bg7eB2;}Kgpo0KP0CC7dlQDAVDphi#G3--qgg<)Hmahv6RDxx22oi*> z(5`&Ym@HeF3q|~haX{&qS6Ni>1@xiXcB&)$<6DuBF7PDL4I2`??vd)+Epwv}!2}uw zz5<$>5j7wLs}lB22;%5`(4SH|9W_Wpi!NnD?TlGw<)N}@aJ%$L0!rs+t?BaA25o!v z+%P+z8Vrb)Gm-JKC6qS-|5nXbADkVtpJ!S-iXROiA|Q{zC|nZqjo4rj9HIgmKY&F5 zk=cp7Z6V^Wl(ZFg6{v@emA~^vN|_h=a|r{o+E_3}1&Y7G-;W`WKw=)a9)JOMz2>NQ zi}IiDmy8U_gUg{t3I+_T@bEJCWlAPD*ODB3?E7(B%9LC65C<%rH$Xne#d+7Kzda<4 zjz;RIS>$SQ8Y!3Lm}4SwE{j(M8;{Z*jOPU!p>)b7IQ+>}Je$UTzBMP*6LG9v5&yFhU21xk=(a38tGl2Sdg3$Az;T$!xXTpG=*R`U=w=tu56z#t> z*f~oHc#$Q3-NW<_tvz`8^Y3T3@ONZcKs$< zmD;?K+lV(md($_%r8XeX_wuTVevCVJ;QKqtm%cSD1(Xm!_t32+LzZgUfQ{n`chH3V z_PSh2ogI%H3lCvx#yfEWxGAG~6}8&v>BQV~ zG5KZ94wd_=tBE|!Y@fVhtF15E)_^Fx1w(ci)#RSlpH4GCb6d@EP+W{ltZ^qVSYnpl z5NvBT6=C&FuY;_&<5Ppo_80pCqQ}K2PsGH#6<(hDBo@s{g$t(txhXzX=43GS-goZC zG2&z40`3=%NDivHj1zNAFeztTczB#UMSnOB_5zZ)kKWS@(lQwtq0`WfCpK8ps=+!e zK2btrGNK26Or$p~t=ZsRh_gzF#b9ToV#_r)VqyH6_8^vJ&!WNm(xN)? z*|HLVsl=TV2qYxZ$@@^x?w2@dp@>rP_=O?W&DU*)mmb>~f7p)M?G#XXbMhd8m&@;y zpR4M!D`7|o1P6|hr!XlcAtkf(4L7=jo)|&X2qS_kCmyS!ZUldDWQg~{pEIS0y-r!K zaewjk)Y?x^4|a2$yH$f4!qr_Z(smkZq2%{{tXd1I+x@-FIq@@w{2gWq9Ff)>omL~YxwS-N?o;UChMHa{pbUba*ei0~$(7gug} zMsATj*~N*=8VtC3)-O?1I`6V36qz*_W4g(8)Coy_uM+TB@^VyKnTp9h|3H2R>i%4` z>f)#>7kPJrBSjIf#AfW%qypQ@31a-t3mT{$TF7L1f-jp+pORO?)De(eg`~ir;oVf| zYkBo0_Wa256(X9$RBiS6&MXvQqlP0t{5kn(w|P-PMjlBM$8X+7^KEOPUdi%i%GQ;j z9;}do)WNeuPjn3=TFHy-w}Nm(z-kv6>;YEGeI6?gt_RLu*WGlD_(%U^!TSGL;$pQQ zsIB>1D5atIrv>5*N-e*}B1uW)f9`@zyj7VFXt+Yhcdf!}HbM68-3x)-Ik5*)10q62 zAsVw8DG-R>_5wN?;?i*JRRY5(owO8>!mJKHji4MmNKH`^6JQ8{=LK6L%wgE0 zkVhioI@=)yAlU)}S=0%L!0Pl;{0xvviB0+`ozurfkwLFPsZDj-3~?+Q(q*7&bX(F7 znxwXcRyM8##|#_spxQCNbBAbx_aw*KdhWnPEf;oWCpmPo`4i;PmoHzEye9#WfK$v< zLTQ(IJtSP8ch#|gs<~e$_9LR(aYT9aQMOtVf~OB2=87_%`i({$>?||0r?twT({^Ox z=Td13@gq#M;eQmm_!EN43zi;sG!qaQJ)A_~v-3TWjQBOz@-0h3sr+2wC;=|M`eUsd zifmux9y-J8#?LRaT)}u4I0AfHPyY+u@{KS8h>76gN_$lAfOrDQGz0?3duovL*}dL# zXf*CqHj(Rtvff3=-nDqgRcEMt2^UGvA1Y@wkYCK>hPYRvBpxxO{6WU;2G9<+(JIeRXPr7R|Pn1_Hoki5tyb}VtFQP}ByRhw{-AF(lf z>Z1lVJ_}hm%($?03lk-{Zi2M!)*tnr(&_bz`PGh}w3^AiHKg)pnBLts7mG|XC|F#osuj^HpsLv!c zI&5#1kH|%{_~vz-FRfc-VFzQbUsvDW`dz;ARW4Zg2X<~LS+O|T?T{=qcqzp5CEoR|nbvOCHMr}(*ce}z57*@^Rvk&I zhE?>9(o{Vs;WG0S{)K zoc`EuOe18^eH249TjoF!axaH}Jshg~QDPX&DN~C~_Q!2q2Kws)&gWL)ycLy`9U0Bn za_3gOVfcQhmRDW+h6J|+f)zuRt`1uUJl~&R^wMFqH@ebe-;m&f39Y8|BlzrIiL5vm z$2B9I{1iAMH6yz-PF1VuS~Gk$BnS&+;eA=eVBsyk&1>P1V+B1p zmGQPB`*IEp}*DT^-c&%+iRB@y#k#?p{~kZ^{&?5_&$02NN2y97}LR};k5H3rhLqS z;1sCF3I8vL_>R1{*NVU0F}f%&yZD{F7GlaTnu9aziUihV%0>61MOR7=ZLvb6G7gFJ zC-opT^P(qXckx`9#*ACk^QnsoSp&@pT;IvyUBoyx7Y%|Z@S!V32UTvMQljYA{yk#T z)rKbNmlnORPF~~sh$o`8mv6+D1^^WmgLz`O{PeAmLHbf)0PN{O8dWHKd$-v0eM*eto)OW^ zQ`%X`7;ed~T|0^Y=uNcNwyWERRSC*DPd(Z2;8(=+8xO=UuUr~vK!4}Y%ptObavJ#C zWN6VL3GsmnzB@18U5Nryo~N_XbM0on{I;L5^526tt#%34(`EUAIKC?%@rjT4s*D-P zbSr(oj1cmH0$V0HquQVtk<-@BV<7B>bHj7V=yo`ID6wHNkH?Az)o$SPZ}<8CK1=)G zRKZ4>okg4e9_<4gW%(L5)QTOeZQE)%Az*EHo^H{hInpBq3&w+D_glE^ZIr6B($G>Pni zFX82e@)y~KOa4}H?ntu>lRbp#p3zuzBiTAS0`jBxm|~E6tIg0euk{1lGO?=7d(C7U!s&?eos%&Zpi*CJ0aaPTOvIo01`MG zoPqR|-39(e`p1$I6@K3?X?u1Of6hS!fG=EBAT>=2z};zH0<^ck|ExrRgN+W^&-$St> z4kWD=lvf<9<;2yWc%usSEHEMc+=kSFgl4mpr+#czlbjWYWmVCCx%sz*p}Vmq{H~!O0!|KYFerl5=da z%VnrFkc4#a8{ro|nZ12lqEjKss#mkg8NPqLA;n&PX(nJ7{;-uk$I7f&DB@z)R(6ym zD(e(E`93P-qNsBw_0-TUSJv(@W!6#xv+CUCxhU;9^|tK_h{0TT!MuHBhQpJZDxG_h zca+onkc0T9tVTD3ZsW9Xiq|}xnr6>@$NBaeuYthtpuDR;uY+tgSj41=iQaiNI;miH z`vD<(nm%L#`_dIf`t?jT??(6I!8@3D86(H95<7h(M<~+>%$c;{TT~C1FEVBCyk0*H z-nyhwtpKp(m#54~R?Y0sgOP}Cl=s|y)2i!TB|@A07AQnd|D1zwSzqn$C?UT>Q%gLA zl{Vf-IW7p}il?y?1IH4x6O-)I2=2SGOFR}}G0MCIwxUDniy1!ouh!GfjOc zA6e!mf3(`iW%Ph<>2Pqv!y&hbnzE2IuC|TW+V(@OS5|hO?R^_Xmvpwel>r?jCnM~N z-SPxkS~+^@U+3Ltf2kKX6Y}!}ILM5qjS%%idZy*P2AX{av~QyTc+Lx z@;0WUZ$-SAO&QDD`p!RltP=HQg3;(*kw2&Op&;v|*Dce&koqP;z)Rzy6-#%o>PCI7 zSm!G7tSWO2x~nro=6%m0tNjE<#B%M;OYF9d>32r9I-#x#>-QN>o-d zHloO!m;X7XyzC%edL*>4`00yJdlGStCXX#awzjr_Gp}8e5Z(>#UQA<1JI%C`C-#;3 zul@A#`x$NeZCJAQ7E|W7#CGE#ulFAQ(m@SU9zQ-F%(7uzw~Lq(FTqrKr9H=$;v<`_ zhu<aB8d=TFK2B>7E?x{7)Ee<`U$QaHW30TS$8T>OEJsu<_Du@{YA{ ziNau7t%LfqizQU_0JX}!&0SchB5TWbKj9oxZfq@gQ~*R;~{ z@|Rw&X-jO4lc>FMwuNk>aeWl`(0kP{H|xEjy*tAKkIB*JJb$-}w^<6Eco+4qQw}j( zUzfHUo0On?gpFsRcX&F)H>R$F;w+K{FY{U=eP=x|1(;-eZ$qb!x*cxUptKid=@qg! zB-Y5L_jwgju{qkXV~azTC)sde&x!%h(ofxnaYG zo}>z|f;x{xqp=ARO?8%LHbbNJ(=fO;k!5;W-3|%yTosw#O*l1seZ$pCplc!Eh+? zB##)d%&H=?%;RfojAma`g{|$}|@k9ry0r=UuO#-hH36YqihK5t)j) zTYVqd_XmXVSXAy`z3Ie6>aGCu;qSes;GRB6EhVvIU3)$C) zw+6IvBdrj$%Z#h+X#D6Bxb-e96nq>-skqQQwpp@QpPlQ=Wq(&+-`)S}(8Y8X&MPE( zh_)FG`W0p0z75urdlXH9rS zbyj#+e0~4K-cRnj(%g_QG3p-{TCg@-e>T29WXDCADe-m9-xDpl3?ZK`e#9NoOG&+} zMbd(pK!5&n!{3Ui!i{S{YM#QlNz*ZWYRbaw_)!L1 zH`pgn^qV$iJ8tgv7+Di{x3ct51M@Q0rPU;V@}=5I{-s8xmnXK#ifi55_fgB!Kg!nO zLc_;Ug*Q7A(-W*Rq>9aik7BVZ$K8Hq>uozHXJ}}4U~98ReU@d9n2vGVtF04tle(jf zU&uOEtoPY(%pUH+pg0+?Zdj}t{wsc)A~QfmOj+N|IG5_6#g{XCKwW~LZXth0N8#TY zSG>USsP{Rg)kIa|4=)-;DO65W82l)fZrNa5vE{nUx1o3QNS!`hVBU$f)R(&^2Zm2^{ z*fzqp*fN%4Ft?UJ)ubd>5>&cgWG6c^|AU63XY#lCXwKvZd`e5SCf(DKYCWOLVB?IJ zNWMJG=Z7NxBv)Ais+wvfJLSG2H^bPe49}pqcZ{#mB?PT~OVKY89|_c*)5U9mML^G? z#igw0CYKE6ev%?}rdXD{wW&M&S`;P=Xcyd25zc<+$x;?^?*9HtU27ebozfdjm2Rt7 z&NN<$yUKAR#$UBc?B+-hnH|n_4ozpB5o<%0vEF(22NXWvFrHXVZTb}N5uJ8Iv{L-4 z!M1;*ZfyD(Wb1gjzB4-l0WN$Wd`|At?BhhvFxUIZiXJ4RI1KneppPT$5G^>s#*+WI z_#-9s$@FV^YyPgzn8@54RR@C=N;nTfaCQvS*)Y=$6I?UAFl(Yrf3ANA4@o3Ow$8nk zt@5`alfRo}wM zHPkZj9iY{dg+Tsz+w6;Gh8;LD505{-ETc_+)~bM&iL2hA*THb~22;5Z5r|yi?=_H` zKSIM*N8R=p2Oz)IaU38fe|ackbs#sZLsqM^a=-3Xq7aBTzr3{d;MIca++NDVyrzp1Imjpvc_{~t!I+KOG z(@^(rmf9ujuuax1Q>-=Xn{iBT=F@TJlY~;RB5@UrlcP|a#C8REA-5c1qHeHVmQ8d00fA0L*8fPo?q(U6SHE~ zd*k<3eCh|sy zp)a$)*f;m%D`;3=sxDi`YupCrUh2)EWmirff2Em;D(dc742Pm7-aj;|PAp?`(n<`b ze@Saoi(f}6G|Dpfe#(RGn^C#HS7V+up8E^y+cV$O{L2lM8<2&sq8JBc+HAH8BCs6K zrb1cR0_g@9Sbva=8Tp&6-*DYNc&>TjPbVCZAL+)B2L<~tsTr&>KaL-vf*z^Z9tLQj5GLQQyLg_B}IcF_qnVeYU5X9I#JVwZoX#Ny>NIi7$)b z1fCpG>q4Vu@Np-D-}2s{z;1jZtrsge81wTA23s$77#rz07sTgGKiZ@fHEH6w^>cH_ zc=DY~GrzTr2TeW)m@{C!NN4K%FS}m9%btCLEB#iQt@tJg8SFA~_`n5tocf)T8x0W^ z%J=$B3fiHAh@ea8_d&ZPXt6a*dn>c1mRCk5=KXLhl$sZ_uXl&t_cuDyN&6#j0*S|I z&Qp@V1-?hTd}{f;lD&FY=854NjIDvtc>PV0`b=$en^tznCg9e)1Ha5t%{BJeqe{k#l57*pH;Be?xW&6p0w)sGe zg${Nz@cd)J9~&wjZ8&cYma%2BogpoY>arwmd%l#J^l#VRx}$5e>5qOY;7kT1%5x<- zcVDN@E%Pi~LVkhW(yw3EK;z-{e)mlbojgwZAV z_1x!Y*UJI3Ag=W)LKmAn2{#7cNmXSJEzy86<%p2P7{bp{Jn0lKtJaoN_`r&4K4p1T z0je`pfTIQmf8G+-Z$!F;8fE8DrvohUG_tVewbH{ZE@cG> z#f2^v?K3Cu7TzZ1;3a#3(O6MW$7EZNQYZV)4MN7^%58Y7GSW{LM_!+^`ZpZp3yUst z7Hb9*PNTKnEvh@`g%?5h=zsKX+!BH)-R@<`dbFeJaLL#45b%bL3qHeap*$=Y~m~Ys6C~aXvnSOqV z9Dd{HQK)@vklOaNFx8tUJ}`E}i=9g7^Dn|MeEZY8S>x)M)|!DpK8?P35Z-|iP;{Nt zd>I~rJYNa_G*_WAc0yE7`5``^;}q@flw6&=;l;TwCYrg)j%?F$b?o40e-l69%{^Y) z{Hu|(6W_Ey^76S(b3YYLc;ACo*Avc?!JyL_yed2jV=rB$oE4>I$Tv_n1LjCbx;5tw2GXDvP1 ziFd*tdHl8{_VZbV->IEbR_7^b)!Rz4AR1~?db|hmRNY*cn1|f)`U7P4``K@%KZ~t z_L`I2?in^{siLS#>5@ZYZF%80%7=I7Lrb7L3b0o9C!53-mk$X%n`ec@oUQ4*wU9Wf zmD9*I?$trvQ$Iidoo)5!z&AZA;sb!Z zygU#C?wvn?6eI+wR$+G-gE6JExU_HIaHel91)^Ju6M7RgExI=Y z8weGt(&Z`&F2l9VSk#xV#TURA{9lfluS)>U(V<}C%A?p0FjQsD5*JhM3LYbAR4^MI z3Sjz3LL+%OeNg=JaN&D@_?GI@)_Kx-0X=ONo3TmbSUs|^i@onlb_^loBlP@s*Q%H3L$ZYI^34yJqu?@8E42Us@2k= z;G&Y=4S9XXf}WwHZT!kygteVuEF|?w)8CyFXQ>wIB-k#-#>Xi>5L>Ufx4S8c=MM&i zOGiP`{|n`jw?XOO1FubLqplgI(%bC(lVlVoNN1(~B+3C7+7ZskPZ4Nt!+YwFi4AWh zBhQc3(tn_)p%O{CC$n@_K>^ib&i5RPvDwn3ay$n)M4FeiCTJS`>(fcP@1Y_vXf=e~F!oqYAo5n42UYe$y+8g-iDNs{Qed3$o$ zod*5b>K@imR|p;y+3HC?k0hIK zqiAIXr}+y9JilWaXWFbguHkOQQalT-j-3^F9`u~bCQhV#@sr);fVWpwj+WgJ4@gQa z8R2iGp)bB4`*9v{4ISDD#84>nfdbRiKsBugg@=E+C#DaD5BGx8ASqWeo*@t8|? z5(a z@>r^x4{koD5U$^#cBR>ta^bPa+66`nqP>rG!u+4-v}JXi{p6-}#(@&f4caN3tC&oFYSvZ0hleBTSE`BxpqXF<)3q-m zTLXHjHgl2Eg%~pjiAprWx&~qWX3H#St8`3s)sx(OiBMawWj2hINx<-DkdO=hz9&P# zNoWY-jQF&(mAB}mP$c&ndJ8@{n~cryM>}0buq9H25iUfsNrr9crGfV{d0+_SLxCdu z@S$Ic>?%{SAQ{14sxBuj#WjN%id2li!+aqAd`2Qu>HhEm@Q;qivG)FJEc@Rp(SIpS z|Hm)?gC^+1{x!gagZU60s z!ZtC&z#tb!08$k=YG}9`yztUTM|Z|d^NV$&#uVj%rC5mU5T3Mep3*%ia>=DcgYU)n z*q8zRrr8hR^>=Q&2R}+gqcHO^ly=;p@Zgz~=a*;Ci<3IYf#9M3#d11mz5=cl_4d)> z0&Y2Lt}MKmM8Ap!6U^lRW(2_)0JO61pCO8#UC!&WB`y-tVO3F6KLT_vC(5tj^mw|J z3XgXE@z4f(<#KIj3Um1RJ>&&Yuv0wA`0n))qV*VERBgV^x1X2)GVJ2P+i_jRCE%oY zFT<{%duxv^;Dj<^Q!S@uSDhzvok|KD2_E;(yPS}e{uTD%N9ss&Y&AKi`0Ry)^bBiN zFvUsuMJ33wEycS@Yna)0PD7g))!cWQ5cv3V)Y&bN?(RJ8=dTO2>;3)TBTxx8I*%$2 zoH3tE5;rRB8&E>*ylt>A$l&OmpqCY_*1w3S{H}8rzdiIG-TFPyg<$@kde}iSyxqq5 zmayTMG4(Z0yMofLv%1%HZy}M;taIKDmCGyGq_YBehSmjBd-2&WrkldlclIjD!{1T= zdR371V6f~n3ioQ3`l@7p!7Bp39!nHelinCTqDMgSBF`V5)m+w4E6$vADPZ_@WZ2ib zqM47#h6cxP+8W9-ylvj5SLR0Q1D>}cPK~7AIbZ65qkeM;lblz8K!*r!5`^DnLf|d} zEB;s_wff~R!z0u=nv87>H0n8sZ}cWHXk&8o?^F z!LIA3_FJ$DIkKGZl(MnA<{B%2n%OSxJgK7{*Vj%`X%m;ce^3_Q1U5Ez`G*vMLqmS= zcn&jaqG*f(iK1zLX7v?3@)rgG&a%p@6F5GDUOooSw%osnN(Y06ypkD{@DHpjV;$ik z{MtW$aDB7NN3i}J>+H4zTW=r$zGBEVo&aoqaUa+q9+41b4yx8 zcCLCFz0w1rIQc;?L1z+$N%ej6R@2@L0 z=!If2>B=k@X8~U34&iD1E~^=QSd~mWzF$&T&F7Lv{sbbdXpAIH_n=X}jDuJo>?13B zu&ARia~*$Qz)DZIozVKzLhI0GYbqT21plx-CN6p~@ z$ol@rO-dPpxMj8eeNd8DaC4f|Kpbf!GbQ7)Tc+`m#U|3oKnjt?hhisQSO5Ssk|N6D zAfo1)LLp$G$njMY3r1egy5mDXNM;?^0(>HhK#h2z?Vq@1&=v51dnJ zkD0_h7koI>mP4up*J$Ugdvvj_tYy(g4al*BClxJ0QfsC^W@fR1z%g;4+VUm+=T~H< z0hQBj6)vuhk6*pK1*2wwvt-bS?gQ29f2+L%J=;No**~#9A4$F2LDFg? z2i^rn_p}R!M`*%D5(8U;B}~>Wj~nErbs@;%I#dS%lYl^k4bOVx#j6Kiw!9XUfE(QH zTcv!>rEH;tX=`GpnHJ!d3{4YJhmxvGyOQi7MajKnoTuS30}Yh)hRk>|Jop3bS?PlZ za`JbE8d;NaDAy8=lk$`P!dT}G1~GKn}igyJ)d|<6InzZK8!NTev4$qAQ>(<)aDJ3)x-lrW} zGw`zYBk26vvX4xUX|O{MoabVa@`HjUH@ShqR|vzlYCqA&nCY&n`;zA=awH9(uVD?R zIOmIr#1p1r-PA|v^?9*gHYn#_If`Tb1E>eCWue2NPLQ#Vc-qGRe$NQX!D$q`85K`* z-&@t){wOS?3Vi8fG+Umblb~I_eC}x^hJM!h;@>GR-I3UG*}E3cnTxUI(I?LlLr(aw zmN#6lvjIClo`DBjsbM5#YOw_UW>GFXnF!E{r{%7~H(^-if=A=Mitg zg+!z*8UTz<+*Mm4DS>JpHaLUp_?mmgTUjP`O*?0|%ichk#AHToNPHE!yvn(!@b`R5 z)U3qLTv9yd#;QMJKr;pbInlG09St#)OeVyQz1lvf_I7jhCZel!So;H>yaV&-u`21) z&J&6S1Z`K?lOE5IQ@t0*URJ6i9${%J4{d1bXISRYg-x#^HRAU^*N1B)@5d8%r;7Y> zV_Sg6o8cpSB;~YCX=f{Z(!7ULx?QOG#eZhe6e>Bl9L#o6#|mXBLCMMS!)i7%BYV8D zwoSe?n5*u^7m;j)lizb)*8?@8^s-Ax%ju~5zD$Un)$ecT2GXgHC^Kc?J_yZyrls)l z9WT54IRPX_myy$hhe4diXwJf zSQRbvre36~LntaH_Pe#-3tDW!L}Yj3CZdEh2nB$dyUoAyht}N+>-YIa!~F(>Ad2uh zqC4xZ7Y)j~zVdQ*Ze%qoCnw+q5QitjrsKaI3X#w05g2}29@SjVJGocqRf#0vchq1q zV$nT?^0Z&FGg+^}zew^ju+zT?l624*wc+z6ch=qQ^>m9fHLURI?C^Yw39<=k)zEh2 zxKgB4u!K;rIWs<_OMDqY;2l#39cJUIkdc%4Z{j~Auj0A+NL^W*bB6mQo9~Q<{ppjj z)c*q?`DHFu_fk?vpczL=f!a&fWm81vu#qj&6^p{Yo{n6Vi6Woxg3ePeT)mMK>ENsdpZ}r&%Kg9*MvmYH+?n9X ze7sI(T7tzdZ|BbWnx+mIJEJDZrbr1KBIY0vk{pZgEzc4aQzMO~^Kp<_L!2+x4eb=# z#aH#DJDWx$2RJbT8VL9Lm#rYrBEv`;ju#3@OJ+6R0{*P7nAtp4IQ-N`xNo872HJ_m z$&w!3OTYB%)!*#1cHBGZJ`Y0M)-iY{oWA(M=~WuBj|QRVi}r0@rS0TqdXg|{tUF?Rw5;y^-ehCw7%u zg`Cr1A{N0JkgK!bN1|mG?eW3njqSV(r?)+t6PtfxYkt(s6S10>T8sIw%9QuQq636C zf88s1)T401JtaRGDEu02T9_uj)O53YUlBd@d zamQN&m#l0#%2D-M}j2h5{+(|72F$g6RBL^MT-|k!;Y+QDRc=v&gxD! zJk8_Q2*)$`p}vW4u=aFaX$jc3BSrMrzK2zN2xE1N>XyT@rtz=Wsad(n@U%S!_PD1% ztOip=f8Ucljg-`Mk3%3hX3IMjfnHWp$BJl;t{Haml}rdJ#oc}kH6M8 zm_0WDxlOtm9y~q+uB8`4?Mo4pHyotYpOx2-o=K#%Yv^4^80{cDf!?q)G~{U}rSpDC zI~dW~Lev(bRfh9q`PzmW*M-PQ>oO2g6UfqgQPyJhdyZ(`Jzp2H05a|ItANFy`H<ap+E90Njx?H)19OzpzWg;hv%vP48~4FgAZ zSx8yZZ@LG;E92`78Qo;lSTBtL1MUb8;Pk>^(N4jTeI(uA`!eUu9r=th>sFH)x~-L< z{CBTi9cBF;@xF&moAPHGo=iVH(<>@UGWt8wU@KdxkkV$8R6FzT8f|qaT0W?*eaiVV zoNIBMtN#;u6!Imhwfr~gu99gn+uNcC707LH<8{sHV*}tnIc2TM10_zDvlhi`{4Pt# z-DcvzUW5voG7jnW3CdA<`Cy>aMkF535QE;TZm!}t+4ijvueYUa>EnUuw1jKo6rG%1 zBC`h0d`2r*Zpj~|HOk^6kJi2_k%cf((%p!*Nf1NpdvjofWmU(#+dG1Bwn^IgVQlXs z-@#Xsq|5p_dL_PaFz0pOM~cjQaU%y|wU&wK=9_E4z~#r=hkJ1q(GN>&r*@#3i8<20 z10cSTw*xK7!H==w7?`{^9>4y}YzkVG=HLp3-?6?a4;%bqrxr^*;CRsJd#C$N+_Aop zY%hG2aF{9U2+dzAWkaWn{H_a(MK`!6}0=`?Y;IzlOGbO{g(oy$<6yfXSAIa z{(Dy@geN57HGp!_Tz%O|DNAyyjYI@<)&I-cb#jZCUx9X7*T^x_cP32X@!)9*W(I%( zP94njk@6@%tP5mTinyPx1|Eos@vmnG2NBSUkJC%byhkN8-P)7*?mihfCU!vxz%9I8 zk-Z^bK=E6o|H0tb?trA@1^6)k$?DKFgsE64oLnGT+ZWjZjjAK_pnor3E9cK!aO~8Z znG^&`!u0FoNkjJ?)b7&g}QHTCM?h8_3BRsEb@86!721oT_Y)(#` z*wMeAb75ulgkd0AJK{(5LC*(z87*Z6Cwji+fh2;pvZmEtO4hey9RfBRsvW!=W!16= zh>0Evee+Dz){)_UEKi_hR_lB$Z{Lm&Gn8@~)Mi>{kOIp%p{w@+uKhyMLOG@8v0qAA zTKr5|L|&4WwF_Bq!nj$%2NZo~Q?x0vbQw3g70PwEW}hACw_;?jN9b0~C{mj;#*JSUm_{9{6KaF4n?FU}#pMNiZtT*JrlFQ1=N zdTJg0Y(PThO5F_P?YcwR=)i!aGo3uAb8XN!w>y~;*XJIjxe8Yo_g!0&1O1$iSMXV^ zPvHkwkrrytqi7krF9SXjpNcv5Bllv1CS9fP$floQcu&xtQ7^gh?MS@>P~L#c6%v&* zIg80_ynbI1i|esy zvlq|}c}@X-WnGEMZQEBx;iI>Gq*C-UX$LJwEr$nco3+!PDl0)I5q_vt*)*s-Orywj zSQwx8`t0jeuU_WI7Tv?N12WDfps{vq7HCTxa6d&)F+$wj;GOkGFZ9NbU~14sHuGL> z(7HW$ZfQM0D1f)Ynp3l_7(5K*&lhUT_LCTqt!FsFIbM**p?1nIWMTDw;9#2fAks%{ zF!vpWhHj{p++K5#)1}vQWERZqafTFuTau=ma97{4tHops5;CE zC}7a(he$R z(di3x{=kcT-=en(wd`!-=F0qB-r(9s zk<1ZK{JVw%p4N5aGWcl*ZdiXqUlt61lTY3P%x5@jricf-BRmiH`?lUrNLUy_cjADIyta+=d_SpYS-lUd!eRb zlJa~?S3pi(5$WLwgQ9N=r(64uvbFL$P#LuxQbn2RAHe{{sg_`^&Bzll zmkCg?tpb^`m9ciEL0|KK_COTRa*O^x&B(74x~*eCQv-UPcf@7BKKF5xpea+KqfW7#7 zQ{S5xT|)B|UistDjh*am)w1zp1T|_F1RR^2aw{3o8N34@i3zsQKe0Ek4qx`mm*Pp5 z^qEPGoU|z+LzBY5tu8xRW0$nDCnhqz!~xuCjK(q!!xYI08MZQB_Peed5B!(s_kS>< z`VR-v@$i3#V_aNnYu@h%0Jr@z+lHl_=W4P)IBs>O2*LO?eH?2YZ8`RE8PpF>(zk<+ zuptcqKvg^dC@U*>z&4-n2wpv~;h10;1Q7wiKR9|OoK*i0-*`vjD&W?VRcam-&&UB+ z4JS7Pjmsx-KZu-&OiDfkwk>FjO4Hr z!Upbc1scuG7B*`6UwZK0m zoUr%HkJyu+Jhst=gDyHZhA#Bx2FW?(oQt~E_NvTe5Cd^E8d%yj*ow_5=vMh}80)xE=K6eiYSU4v(L zUsl(t6>OaexRTfwc#3fM?fMI@us1%h>hn*_IW+3(U?mz6{IVYU^bVt`kz?~6o^=71 zWo6S$AGK?o?IeWsGzxw@Rw$gN_KMI}2h2P3BTJlDwIAzoH*eO9aM28pX<3a=zx~MB zW;N%DmZx}_u8=_Zq4%<>7S0TuIA#vYyTn5o-!#{)>cxhvY#SfB;BG@af9m+UwO59c zjAkyTC0rR+*2i6CH?p?=&?~;rk5;mmjpS3fdeI$De2ma`Pr`(#SqUjV0W?l)3X5Bk zRn{p7`zaiN?%Rx|z|PFcpaJQN!b+%cKgf}e4(kgn4y>DLzev{WT&((YFU3Yu>ymuw z5HXQmCsYk;nAZj`K2a=1O8QvNvrkSPRZ%vjPdk+FoGd!2Ry^-sHwe-s2q3^X253CyoL^Pqk8GCa=4)>L5RaZ~J^lBmnwFbe@7`2-d8qY~ zqhoV<41EV~MUjvFAIB=ROFunXeY$b>>C-iJ2XEilGk);Ti*GuIF#*LwNnpqH$U)&% zrJu7(Zv+j}x4-Ms=soN#TEWq5T)Tt%V&E%u1gMUHtZ&rnsHFDKQ>c<|0sE%d-#qU{ z8HVCEWCD#kQZ~tGxg6@}R$s4MJ*)%g zwE|h38RQ?M2MFX))3M1@$UbUwS<#1}e8tlH!;^WEBol*a5%>AwN@`y<3%@v41}yTB zu5yhIxRBLAa>NhkqrvPb+2;`r4K z`v!|9X@@TKqADoAvODSAr)`$H0V<0}D=Fba!-JPd(g_b^4HaW#Gz#z3-Vjk4M%04{ zHUgTm^0ay)gym>I!X<6~K^ZHD?hoG+5MM)Addf)l@L&bAH4m4;h6@5#jFKR@5xOlo z=fm7&o+c{f-lSbR&+duu-!(obW=+)D&)fk6ri*s+Usz#J0B*xb&Ia<(_?D&nuT~@xHY4f+s98x)a*^#ura0HUS_iRMQPROQuh0jvIZT+ zjw*^9uRoizwKzTRGxain%)6BBRvmHexj7RJrq zyK(7}B1*p=ck0~#=y9jJU}nRUrN{UC27iEwDVwGKCo>;UFSCW`$CQxUKyR?ex`aZ# z!*w8PA8hhe06fSy1p0l-u)qymlp*l&3lQU4=I{!7=fDrP+7g{OLQK_EYHWi_gO)J8)bjxR(iDW0Og2<4sy%j-T9t;GnJU7keHYYxfP>g$MosFOHbp)CPCYhCcnhq z=L_!(2FE=Nm@T9v1NXr^h0TM9Q?{P3-#4WRZfHiqU}Sl)Yt`wOF24=`{$XIKSuzlT zJDg)d=ZhY@nP}?k>dH3?Xej7nbWqTyn1KPuTT)n8>Q$rFuaF%_V!htXj3z97>zi9( zUi)cjtVk29#avCvpE0WbHDj(Rap~;w?*#M2WUu|5h<*#C0_k+l9P z^FXtYMOu5qcKvX%#6YQn?p>>yaaQAVvQ;N*F>}8L=~x0}u&?x^8;Pq&qWGo?yzVUq z|8n$T%s9;jX6@w1!=HT>{9JVwzxHtHrpPNf0WRV|&@b`3Qts6c37lw}PdOA6# zVTIvuWGg~wR0V19H$U{XoIEuA-RAfT&IcYSj3x_-_BnNbmJ%Ozku4t~jA1x6bBQKnoBc5zP@KG>BPriJ~MfpG`_e?=QVCyP0kl*D1F_AN#o!&3v3_JBZbBw+kt4zm6q zc%>Xa3RZxk+`wC*Df@GXr%BX+-()(4z~Wo7qx$m8BJ@x5{hO&#QRJw@_@o!`8IoJO zXy2hS2;*k{iutTu*IVSK8bYF<=agid9nsiZrEI57!ox}m4`h%~DQ0Uxnu=fWQTJY3w?vR4yht2ZTB4OQuiim^13KiplUg z2{{T@JzR;sh>{2`4j)C`pO=Erx9{#Is)-FDR2&4|&vl-QyXg&UyDe$3&auPpQsY{W zOU4T@k#T2@M^to}npk`q+aXHot*sO}f|`*e>*<&W>)dFO2xa|-s489$+CCtAaVCEt z_DX*cflw0Vb3G0>vWRmWrVfw8y?=}^j$7S8wkV85r#JhwjTgR16uHft{MCxBOK8v% zo=>gso!ybf_w%>WL4J(0SMo(aRF^qq`%{{X*zNWxodCzk+}F+m%InW2TKt(pCbvJs zOy&nj=?eA%crnU#RinG6?7lM;OO;%Ur@3v>4^2+)sKVIB@%W+0D&Y)O^K*?zn4!ZD ztEWYy?Z=F!xyCuf8)_J`eTtp-FU_gI6E|d`Dpa=hLBGoDQpU?iUKW1LD`?sSK+{iB_&OHCvjI@n1P+L(BF8Jk^xm7Ws^tS9M za;*X=@mlWO7D1e-67VrUUv3+C?!2`xQt8wFym{ZH6{qkN9Tx=E=!q2b755ifwYGj#V&(KjGhnUW&4XHw z{)D0zWa&PZ>l+Mom4*hgp4|aNF|9BlizQ4^nI~OuYCN*m6X=(HQ}(S(F!Q8}%p9AO zA|FPYM}C}Psbzc_@`B(8#CS)bh_r!Bn1i%ptk=)xMzOG{R;>5(RRWn5u&Hh&PF{u6%KWka;#c zMikrAMyqj+_U$ce8bVlMqsY4}@EFy8QH^b)9qz7)Mt$Sa6f?9Y5)L7Ic;*$l`}0O&jixaIGBkh?bZ~?k7^QRqNK`Yk57fmRseXXOyuMt0pDI2;@YrYz??Cm)I(% zx{go3qtSB+`k7mfHa$==xs)f@I2U=N{vKb%oCqXw$G=R`*BzdJ;}!hnwpZ!R*JUz; zmvQ0#vcH<~777uz7^r7`HFRAWh47c3?5Yr{qcFPCs**?PVc3t9W%b9(hTp3Btc@}Z zYuKS;jDyxLW&$n?1v&$vnC>LNWc0LBx%Zu-65s}j8}_wo(oo;d^PPGb+GKA)9}a4SLKsog*p7N-~fLZ#AeT7dt$h? z>t&8@GHvczHt}@3bncG56_fRLcs4@XrY&bcU*`$O%`n=B7G!gcQ)Snqka5Fj>fd1p z5y)jB~v9i*31hfw?@O=Dka1jTY$#ilk)bemr=DLLXQi65BJqGzEZ%`w~ zK~~g^jDGD$2q8?B6^U~1OZMdngOA`3aD&Yad+4ypw|(2}zLJsHf$sURgd4QFpwNKHzLzCh7Jh=8Oow}kBKAH%Rav_ZLMf&!x*1o*nU6DF8Jg$3>Yy4R(bvsH$h5MjPyT#t;)XLjh{!mBtBh#b?g`AN zqQrm1EQ$4Eoc(L_I!EFKROSXg*}XOpDjj*jzZuCu1L9dvi2N-|H2?6Gtwh6f^_JWh zOI_KM@0$XY=>K$R%U}OreegB1Q(v*%7p-3E(!em4ZBTH6w{3kKkQMaf01(B=)dasE z9(2h7I54zZetwvV*cwAHoeKo2$bJCm+FM_D2sT;6yp;QxTL$|k$ zn=yS{**k7YYkL;h?~mc4{_Qx69ye_K?%qa}ZwY>=KOCYAI6$LA-RdHRl7tA2&)w6T z7smQazvsI{me!r1YEu0l?ZP9@0s2cm|z`eT}v{SilHRk1)A2a-@}E zqJ@*8AnX#rH}v~4o~VCKK+W1cZ==MI^sjpWuejXv+JB9?28mH7v==(G{oLa*tF4t(xmfY1O}9x4;}&RhqGGr~^FE4ppoBjt0Hh*yQB( zO2ulAYjNdJ9OL#jDcsqIRy^&9mq6t=mmQ)Ra2@gM>oIUikbpR6iJy$M5LYDrj_Kb+51-RoVZ4!eHe;3*L+bb^yzn^AUBxI<7s?YM*to0~YzcgW&5~_&i_VJJ96w8Obk4<4IccLaOrxS1rUafqQ%#9QnOQ zlNsw^wZ*Et=&;U?fB_!2>$c1u<0G>3`!qMO&2|Krz$9HS+LE6=Z7lxgXlU41MaLTZ z5Tc7cXqDXoiTj~(W>pc}tAjPSAj|$bDJ%C3w6^jyeb~`R{Z*ZBQv?Dw%F1y+%lHMa zLvxay#dFmeX?UjOH`&BeBVuS#3a>4%m?V7?nhE)vn5XJ7{EVrDDrInx_;5e+{mzc3 zcp`0`D}^%~QRZ@&Amau=w#V#Q50AacO!UKwzO+4lfI*2WAG(oo4;|Om}OaEdd%tvl#huLQcKhe4d348)UWj zu&$nDFKEqKoY)bD@OO@`*Rx1>j*1!`s+5ah1w+?2qL6`Dn&~>GL&f5adXJCK6`lzM zZD2MeZ1_i-HeQf>#-(FYuIY9bsXP8F?CD&W3Act{h3+eiz`Rb3km1R^9Pv{JU_K5i z`PFvV8A=j)`XtUgF(JkM>P(n|y(z{=YwzBzO479X;qZgcry3_g7r$-=3_!0Au-F-A zcK!F(c$yuRxj5o3sO^n3(#i5M7x1!>IIcqjRh_wCEqNhW4nAkURIAV3@#?mH_n%se z|Lr@muvX0w&OlKuahX}z{C>37u{fwW{e;*o-WrUAC7f(6D}Grpb&VarLb|JaljL`uJNgg_7a3w zvD$by^go|78~sfgL7HQ7@3w9P9sM6&2> zf@#^orT66XuHBmJSXpCW6sYrePj|7YL~V@2IkB__1VW6m&mhzU!4s3qz2TCm@~F(@ z&RTcjf|wP#pGLK?)2m=4WA4>SktL_6)~O+_+WFzNOy(avKBp07r9QRsC~l`F6it zl)WVd<#MYKuVyv|yv~ku9WKQSJ6kYCC*4G!6&b#2fzMFs$DlEq#O=`eb5+TV8cj}} zEn^YM3Jx2L!Wt=BkUuTFHsR7S{rD~py?i>A;fNL>jk7-AD=4XYhfgPG-c!jizB@Vi zxhaVqA%2$T>ALP?9%-Yh1_3)nHLhT^N?@|Qk>qYplv`gL+PhuYgjndNteB5cn4RuD zgT8jPkGAH5;}0j(`kcPK>EU|EBPn}*tejk~&zeBONUH#p!I!DXDKJe;uR1YBiDwEvbzN3zan$RyuC!V=+yk<%chvFQ##_l0YrNK;YJ3CE%`3|2w$#m1!?7XnPmaC}b8XU7Q$AP01J<)V% zt^FVj<*3WyE`~)kk1)bavoSv_`lx}JUDO8#~!W(&CMH;T9#8QZgT$Gj_!yNm9oc|B zF%;?3&jW=Kh%wsTu8y{1B*yKwViAE7o-}DCIH|uURET zMv|p=WPdsEJVT~Bd^;I^SXgf|a$y+RYI^O(#?{gx&YGPYQxbnreh_n8%7+zg?SlIqxX;KlOFrr;g-^p3%C-5q?8ZtZ=4CZV_w`6&pbx{!y8W^A$}h zbZL*q=KD~kzK5)q4{rB!i0!a@6BWmn1Ij62ir51IZL_;DQYiQmapd?2>1l(}0!_B&hBIP8`KHe&} z|H)|&)gf*ZI{_CEk;m~$8S@>hvEdr9YOdIbB0DRob|nELjJl!Z05!s)O9^f%{Y=<1 z;%r7GN^k{m4IIh$t2;50`*RH~F^od})xpd~vA_)O$S-kB6_!@!N9me@&W`Z^hqX5W zOEUl4M;~>{X?2=PE4QYatXybySEea*+B7qF#mdUe1$RXhY?`t%wX&pgp|Uhra>WI~ zGR53TO;J%X5y=G+5Rm;mI`8}be)GG|`JeY(*YV=w0(^LQl<)Vm-k$dm z4)^ePfiCXTw0v|H?B(6)8wKLo7cx1Gy{K=SOB+PVWc8{af5JHi=J-Z(=Hsf4y#D<& zXK1mOp3oqEpph?2iVLVi9SGrj*}DYID3QnVPoL@=x~!QQ^8P06$PFv|VEV|2uW2aS zo|(*?+TkM#edYB6{hTs6_4_m*jl|>B-q(}#1<7quz^rT4s=$H8%*Qh-1B3pve5qrQ@rYfu`6S!53ti!Up9l-=Erd&nntCu18q|n0VeAn9+qb>~BG1qO0e%mCp=j zH5v=quAa4Km9kfyOpo@!+rsbh7KaI^$!6f#RS*{9X^R=+Yg^J+>XQZ?zWrW|pC&5R z;E=(^dnIg33;r?A4`U2{@fHAU1qvmG)CXh-V|^QsK$Oza?OtY8U74#kLp8WIKWaz& zmw1Pml2?<=GF=@brNrz4U9x9WYEzfSuwLRuhxpV!M=5k*y66qZ|1%9wNC9 z%3S~}8)*w@#Sai!hpM`&k;Of{_uLssO0-=iy}p^vow2&Z9eMn&<~*HauBdty>`!mH z;a9tNyFA&~<)7@o< zr&E4E^2ER5_T>g0yc4=3->~`ToiA?IiXtKjI@*CA=;kj5lz6LN?$_QcUL0GELES`5 z_7(cmAO31%yx1K(A4g_Bf(042>8)cqmYC{9w)|p&vyay#iEvbX2)G0}}6!zz29z=jxHP z#hE0Pc?QS3A(Z$#5B(TEOub-jG9Esw5eK7RK@kb0D^IhP8mQrE`I;_#+{{jY)DYhx znfl=YbInG-_IQg?+&1wuJiJiwiSfA$InrJgO>5tpmVWPmVN(BD`XnHC%pM$Zx{a+R9NdQzplz32A^eevh)8FbW-_wLG+uVlK-PL$^@i`OYn@@dhu z;G1HO1!~5Nt~%BUM^*Ic&-8ERecaN;vJ*Y1Nz<2ZrW^s8)?INe5o%#s3p%CCcDyy= zR$P(Y->&Tc>%Hl#!u2ntZMrnDeucWmqVP1LiWkQIYQpQ5~dxiQ}M>FMbJ0VHl! zA45=(ddq{sz~WpCY0a`1UblDyY`eI_jeNn&zEUvh_;=ZGF%h6cD(vSW8J*pExxYa8V)tfIO|(Dvq3> z6}gtEj-|lkcLGv2-uF^HL!RE6YtQ7D5z5`W5>C9Gma@?p2Y*fIE(PIwXC71lHs;1U zaC)3}E~AIXFkM*uJ`|7WOcm*otw+YEoUxU$^+}`-CjonUorH+gqaUNQKb0oE(g5k& z!iXe>UZR@sL$dQEhi@0(ya=Oa%urpbJl$TY9loQ;d840qU-#NHN=7Sa?lw0a@q}Pu zs7f1k`TaGJ@>TxUq1wckT~F2-WDJJ(qewUEg2T*qj9$GbARUL^Bl)@)ZEhEIF!p{< z&bKP?`q10TmSoEp@{1QO=eEL92ayj7eCZySDU@Ep3e#DMFYP^DKlDkb2&(0Cu>wh$ zd=8~xV{0EjRfQLjeJ7bj{fs7uxVOF`g5ND_(lMEMU*1-n{%gQ>E##{6F)Jm_l>Jw= zzp=hoy1UpahbA>hIiOvdqu2;YC;A=Znyri+@}wg5v9Vr=KoYKzhIP^NN6D)QM7ASR z<;xbQ{S$A09uI+CM-U7uy0kXll=|ySVpPL%qVyiAj7beLjXd;t1E0>GiQKrkQrpsY zuRI5GP=1+aF6N1ZOeEilUhwJA!) zA0VsQZ#}D+$f=ojnXUV^nLyLmBc}86O0^Mv&xY=2@28Y=tJFa{COU0?gJbMFW3z05 zZaJenyNY?Z$Z$2egu%Wa*V!ii@J`}?43~7X`Dnb^wUHni#cQ&2`o5Nbwa-Sw-~^>a z=*%|P6Bz;~vaiiRJWHF7XJdsNcr2>_x`RYWM!wvj?(xgo*R-9EK}xp~$|SPq5?Z~H zkrrzNPVJPJ9I2NP=>5dbYTOM3pD3MEYt%SDolqYdi-$effpMYhPa znOQ%m9XlGmP*Dv1cAHWjJsd;gud`CR4G9*KvF@XidaN(Up#0m2|69(N3Fh%#Cy-*V z)Cl;?^((^?4zHFw1@yJR8yKemg<`uQJ|}YGD5w}h>w?*K#71wh10n-JbUBCneF3LEsc`g!yaPy||a>_v%r;?HA%*_mHG8 zB1_NT!YL>JHlKc$EmDVm9kS6vP3;7{Hr@qe4x0xbInUCOO^a<2?1+2%r zc0!(|0eXHw2lcGE?%W&g?fxsi*8EU>l@QZ5rU86ZMi)B@#cPI&%OBO zncLTLJzFV5?7gI}Vp!+C=!qPz>Wrhs>`Mb6wA-I}g*}Dyte?CC9lp#OY%hrGoUZ@M z&*-qK(ay>5Iy1O<0G`iMAkDniG;W|&@+QCd<^0&FC!M{_Jl)4Ao+_ExYUma{6s3-f zX?UuGsKx|t#==eXbvHnF?BJnk`Wq463(m)F-9j*Sqt0E1lhR*0{Pg%+03wKDPzzmd zT7fKN{j}%nD9(38K?;u4i^xHD+eQ0wL1xE*R|zm%+rN!{`Qf*gKm60~|F55AIy@#r zw=N+ez;fCnJ#nkODu}#MY)HIObz@!HE|u-=2bV$RzO~u4wlTP6}%+7G%9p+izpDu7i|x0fl=FB1?D^yoh^DLd*qQ9X^?7Z{xU? zZ)b}dzh|HtIvhq;0kt_-c97g$x@hg3?A8(flZr0&u_9mjKA?huvR1dz_w;@!;Zo$H zPl_&xB1SpV6$8DNCiGYgM;i+0RH7(+>lD0%aW9Prljff}|nrCAyqgma|w z*#SCX#guO(gR}b82{KdSH$3^zgm7Oh#~n3j4?H^=pHN1qZ)eoAxISruuG>}mHN&Z& zZsrdnlYEs^oMe(gmHA8JNVPB2THXoF;^}%sQ8P14!`HT~_G!L#r~N>b;|M5g%A}$e_Tzh|gh11~6zOIzv!hZ9QD*m}jH0 zO}kWhId77A$>oDV`QUv{&V@Dd^@n2I^W+%P@XAIkoUnCCj;)&~pcU%`T#bU`+z|(f z9R~|tXhEwQLy)tojl%i?WCnxb8X~%bfvJk3TyH18?ShIg>ibD}tE>*S^hG>JkV;0b2FzF*qJemC(?aR`W$na@-Q7 ziy!;<%5+?#7)%g}jpK1d9>3z^S0V zWtHCB@}B$P)aun%(R?X z*4Lo7iM%kzUMqjY(^gDk4U?rM++lZX`c{539mLNASz#?(Z_|%e4E`{Q)Y7(Nggf20zHM}YoPAFoz8BOw_aRTSIY=Sj5=fV7j1vWS$j z(Jj7R{_Wxjf>ssCm!T3WnaWpNs_Sj$Eezy1iOWVahYs8Fdl+m&Jm+Wa18g#qz%=;M z)im*C{8F&({GD^;@v*)3YN7nMTC~Xi;n)3mdcZ)u{QkP4frmHOIqOrr7sZht1tSxg zPuK0V4IXfqMYmYlzMh`p&tKO`c$bEqUzyvi%&^HEtA0WD_iwpl?CUUsOWzu(asK%p z9sH>|{@%4_Z9^U70r=D^DlT)rX6D2#+7MrGbHyLHagO8qlB~tAo9QdY0?;CN=?KMs zGyo|+e2rn#xH}^AWu^pqV?$c`VZ_ac5`?u#-|>gBUf=rRnS!VNzPB)5x{%+dU0)Ul z#@*}_T)NgiIDRroEp*1sq>A@CFwB`BzVB>&T$%wvC(}@KUvoH)4ZT1Zn~t>(c@DoK z6AKr5F4vmPk#ipp#7P&Ct8KlENA1mdv2=9(nCoOeuBuLxSx#}PP?T&&WuX$9Ae)fk zoJYkVWvurVOVe2qFR*z~f+IDTQa}wv~QVHN=)cs&c+j@;my};{lWgKO_u+ z^6sr1K=(jgE_72KUlpY+Cd)hcn9@o7a1E@i4?SmG)ia zN%L}khLh<&aaxc+KKn%YRHD*Ou8^Ukto45qaYci{lpchGmJDf&E8Ntd`Q+y&YYDmM zJBerAjxqaA;~Yy%t1NV+jcT=Fofr(uGzCvLdO&5FKReNWjwN?ij3hoYY0GtYsG63$ z`f$)9#b7ikKmGj^hJhfiO&U0T*3dVao?rlgg$*r3`?)8qH!{wrqG_>{wdl#!gtYuR zJVO(ucd{?-ZtLVI3WD-KFywr|miJxV=KE0~e%cwUI(7&vUb|d3D7KlPXdg%E@|*c8 zhHgXhXCUNf1r6R^dY=`@5V^yGe8tj2Cg)fSrz{(6fzJNPhAel7H+a1GTKFGL*uQ`A z|9(ICSK)t!o8{fhQz`3h7R~{-;4F>mJZOp5E{&gTS-TPdYbVQNm5sNxU_UkvRR#VH z-338F8NN8`8oox-G2M6%-j1B!!^W}^j%ZCUXlOo0zM`=;6(&l(_T)cW4jKvs z_80FpBAqV)_@W3A4GslTZG;1Y5Vv71y{d-g^TgPR5zVb;keabSKXM+yGq7$t0-ehb z(&8<6^rc}Fumm#ok;kyf|7qr)$kw&gd)2aNUE@)8-O91Jc8*v&=kon~JHwg6ZRuOxl;8c&aBm)3f5g59mcXrIQX zCr@-z{TUwkeyy(y0$SuPV##|C+}>Ezo7bAhzYRfrgIJ>dX`~_E$5puX{v~pssm&;~ z#G@Yu4N{Ult-lhou4~fq9Z19v8QJ9f@{uol-G>S_^^GCwaf+dDV{3%45JBTHLcIGV z^Ep^GZ)ox~+qX&E!5wjeJ|38)mZmzkQ5k0mIHCq_mFRH816ta1f2q!LZQij@e}FD1 zl-r#2+>^9>vJIbt)VLUYxm{PJtB?D-n)#~&Io&MAUMm?zHF8ne>+t35;b?onHO7sm z=p0I|B731O2CD?B3#Bh0@vt%jk?LE~cmA8h3X1J;d8|*Y+_4r}Et5EkdEP;i=1B+@ zEnYb(Fp@PS27Su!o?9SBo`*epx<)YEmNE_U>~8j{AH_rwhUW#lxuYBVv9?|#Gz_1< z`skz13orULXC1#IYn@Um>%WGiP4S0$Z;XsJi<=IQrt8GMc(Q4-XxKj0w}}P~8|Dpz zWZ9kZXlJNxbk@r5RJ~2~W7%kBrTb`%+O75@f_2$r{$iAh2*QF z*e9Cfru@~G`mw{i8wA3lb-k}B{!L&Z&I*EpT1ar0k*|G&zQ&xn;@o`HwQl`=dhT8< zCf4L)9o3}6&G@N*OdEuy5Qe|=opnr+z31FC>(HE|JU}@msNV=M#E_V=Mwm5Wid^sS zaiolx(>;6z*){=AMA}R>z0>ONEEO;-BY(~a7j1U(6(loVHW?J4p(cC#_4xwCF!dhq zSIF0h2mv|+;tszLnw-6J9J1)-PDl#jSKgD>%y!YYG~1!M3Q*AHCP3h4gKdUnU)kkL zwFe%sZZK+T;V3-{wS~jv;94XM3;MulI7+u&3f5`W8eQCcRx~cP=dvT0t;!=QV6)le zAG`K{{iJ-AOVp3c`QH`isYpu@12n=}ITn@%s{J;7`)SGKEVU42i)84^vGHWc%B6-U z#0Zj?ESAbq-$bY0s{WS4vRo{mXclbf0qu=$nC+IetEqP0>P&Hkkzt1x36oTZ0zcr2 zGwVR^@zD+9(-Ve|L|<=It>C9GTIJEd2=4u(KNw^(ZX0Cn?@=;j=*J~^Jb+huHp&tP zV(0jGv_pWgo%-S*_bcN)hsi0@x&v~rQJyAPbqy{y=(jqBqb|+N&AeGU;qt)SL{@1S;exau_Gx%)x5Q0`z&q;&L z8F+U+=nd#j2!sUP5#vT>d>A@VM5zT5pHk4(a%&sEraUOw6VGTU_G2LnH?Vq#>))7B1%5-%QVdvf-Np&#wH>z4oD>KI+60S!MxilQft9; zz}t&2O1go-H z1?>7*&{Ii-8uA)Q1>cp+e<($K7g;;ZT65xtomFa>A06WeixYYVDfjX8jmh|Hrgwp_ zC*42tVpB&Q{YDsmLln>ipu5}NGE%EHusxuh?qB@mR+X~}KK)&qs`<2X)o;)6q-Ov3 zu?_4UhGwpvHlx1NqJl-6>}NYx#wlbk#aY+d1!jlzJ=qa&fTMFLd4o! z==QlZJMNJV_1mahZ}j9VGw23Q$05T@x1HUoxG(pOpbH8%lXgBnC$Sp;Lc3S%_VO59 z{mRIA%&9i&QE>u29GyY*Ee~s>-`tB$TD0kk5zE{Hn-@RJ2&r-K`XF;EdThqHf#P#k zGh}c!lnCG`QId%_Kn;+)K zbfG#%ObR?aMN=zbrFISH_jda3^KTQR5~08BC`a6vE`Z*8xKvLt()`YGw*lMsb0%GO z?V^IH^je~F2qAD=R?zT|(lGq&*e=sN2--2=YcyHLwQefE8m*k)h+X^v9Gd>Lb9MXQ zfy$$|CmpBm|6>#Wub%|{{@*CZqv+!+{w*bx;l?D6C0D6 z`pHb_BdWdP5SV$P^8-9vrG7R{!O~pozkY&B#}xtAX`tOGx*X;keeGJ_e%rj6NJ}uc z0cGpLPhBGNc$X!)X|~%ElWb-$HOk~8&q@hCSHtv|2Zko*_CW0y%hD2xACiS}4Ieom zzuq%6?7*F0c*E`?OJ;(4qiQuGs@Q->2l;cg6Wto7T>$IvM>RY-(d7NSaUMUN$=WR# z{kl62+hqy4LQjCm@2UXitDFKQ(uB=eB1?P!{JIj)VV4;_w3;uLc&6N`aA4J(ADfNO z;iUnDnM_B~UfCxpLJuBeJiTv~Qu@R2G+Qur-xxQ=Jy(KIHhxhJa>tW0Yug@4|N8F8 zoe4X8gUY%8%7;_of^)MKc>vWmGIJp0vkj^chC1UVGYa%Tm_Myb5UWAZ(i&6`NXfLOyyn&*j~9eifynrg`(+^ zPsp3d8DPOridrHF_K| zr|YZMtl2%LJ+%FCiM%4{gWS^y#v(#{IFk6M0%}Fm<=Zw?M<)L%N0dQV8ly?aO;gNs zn2M$vv_7M~sg{?o@88#+zY8Msv4u1FX7j2+CVHnhX;hi2!{ggnjW01lbJ|?DQfixg zi1{tv#$r#T`X^46XT|%h5WLqOx3)0IY3Gvj))a)Rk;dcm4NqDk)wJXA3*O!n2-o|# z%UQ?u-VemofzFoy6J^z>nA7d-XW_QaUFbFhcX>R|gg7v#9sm89zUo+3#4It~+U=YI zb!EZ=EI8k`wylP=kY&v1;2^jdSp-#Z(^M9Fv5FrWL9Mvb^uOba^1lkqyqCj;|Ic?< zHWvQ(IDSi8`P#(8Rp?}E5Pk%cPlI+HEWNKvtO}&6e&L8zu2*>i-e|c1T&g}C)#duk z%aIVR3@;GqgcilwEQwNsfLf2G4z4w24Wm6wm+B#oGc<(c$NRK!4m{v4#Xs7GS@2;O z5U2Do>Gq~KC}7U(tiW2eOpJy#cfM0Dy>_+G>`&9 zXX{!WcRHq8AK^_rBX(a0_!P4YKC7&(>|-P)VJ>1#6E7m|7v6U#j!m=h)Bb8?nRsGu zO7L2KnH6{rPjZ_pS4|vrjMq#o!3r+`o`EGc5$zf6vcEj7lJDaK!_oQ%fkUAeSvZ?O zbV$_LBVcy_B@SieCkn~c7{-m=IdoFbw?kHw@KnQbl0`zE9a?;f#y{nfZr|dLOpgvx^p%~4>(v3|D-?v<7XXdB`zs=Ltc zqk=eG$jzt`JFLpsM}1l2bWseEuDSmj-;tU9?j0eX8@?(HKiqz=qb(`W&E0w~T2|2z z&l`UoESNwl#V&x3xW?WqY=Y}`_B`ZjdCHI+ZOP9CvbZTY1cIe9B4jVbWoJ;nRLL`S z1{SNHGuJf@)hI(~`0{IfdiWzjn#<3mF3EFWM^k0Zj&pNIoroK$!R6PEzt$0)o#OCj z%ufEIyR9Ls_OGf1(L1qeWIXXEI&BE0`8;O>4SIu=emjjG8SDvGq@Qc|bz$B1ZqfVmx0ffaj)KBApu^%%G{-Cyy)D2d{q<4OMz`21 zAi_Th-f>>GuFO%MCtY02YEK!LFY-iJ3X>-s5gC~AOYT`O9m3=mYg$IKC)Ki*(uX|6 z1HL|U_MLf0(#coWCm$D|Ti+bM@8sKaFiJD^giycL#s_6b*pw77Er@eH6Uuw@O#SJM zGF1CVMy{v+T3l}7?7aJ*_W zkL3=xCV7=oF3T=cR6Z>@QJN#xVti2+Ks|?8?@+hwom+p-tKa*-&PV=v;J-$iO&7Rke155teuajpWbI_7-!J`6;mud3JMt) zw|SPV?4YM*FBO16;w;catwGb{W=eo8lw=CbPKFk3GH5dbz?}KS85jV`hS!kD33T56 zPjn{h^s>D%ZRs?luWh`zf6qid7j^zY+9eB(U_9;i)J{&YX(kw>B$o|?B1QKA!sHR0 z%Eh3d;{Cox`9?f`D|sgz0yWr5(-KE?XkA7w=(V;4e0}}uG8q{>$AXlJhF_ots6LAU znX`kG_Z#qD(tCMTn0GF@N3S&*-Pz!e$|+<3!}~>blyGg(QFnW~WGOSEOtZXis=wE; zflU*)qe*kA%+B%X;(Fagj=Y_wvY45~@q=mvK@DB##Ef9A?7iBlG}PJk!3lv@{W%L& z&VoLJ&COq0S~prfJUPjEbk)@)2!DIUwc!3Pd+e&rOU-Y{^kCHJHk87KIIZdEA3u2z zfJrNxtlJ0)u(I$1U#{p&%YK~Cgd zj2{esq(SF~sKJ`YEBLl%Hl`tq`(vSGaVh|~rmcyCSo0H1WsMd@Ylv^W)ydtaA<96a z;trYjk5(thH_ic$lXh?p50@<8MQ4sfwt4ExH@BOg1P_UrPp`$lSR#_lv>V> zU8cndVrITjdhyw&ABl8qu+*q=@rG4R6Khz2Qz7Zeip(FASozjTs&1*v-)l5L1?C($CjT z$+c{4NzSCJL9C0PgfluOSDVgTSWR0z47WHJqVZ|*#;CGTVH&tj56pj^c?{c}a->ot8pg@+V zfzNUPF=6`~CF1{-d&tnUBgh&PH=i*v+@%4NV1>!rmALC}BKz$t?xlyxht+m`dD-f7 z{kZ?9yqg#EnqP)vG>`qYpceVtPlvp7RQ!XE9N+#)vqx2x|E2L(s@BT+&le4;Ky5Z{ zRGL7q>(HOU3-k6NBmVRUv19R2WwEnMQNL%^4VOO8Dz^sv$GcAc=k1Nn;qui>FSDPP zpHXTddJhLQ9yoO@m1V<00Ly%>-U))fhS*@5>ifF7)s4xpg&jaCfp-chES!GpOFaUl ze}02WjkIiITi`h$9pCr5QY{FM5?)WC+7nangj*tz4hAf<4)DXLjyrrsZxMPqJn7os zV4d0;J;VCrf3By8JrWALsz3(k_&!(aWl~ur5^PDXQpY0v4W?^y@)|_Wk&l8n?xn7M zG!`HIS{&?=ish$+7f;;S#QCLsnA0gAALexMHzd(r&g44-n#p!>ge_Jr6jMLEB5@6b z@KYjkWCms>L?+4SE?Gp7Vy*Q=ESf35Z7zG2x-nb*TEzeqcQ^C}G2PE*97Fa?2;!tR-%I0+cRg$2o!He2 z(F(p{U)-$dMp3L^v_w%+WA~o~t!>hbdb6=!tM+-<S{6CKLybODk_Qw~^Mh_D`bmo`W~vtU-U`?V5ATJz0z;%Xh7=i;U*j+Ie6nR#bj> zJ5EG1|=0LZ7E7}gvIR_BF8cn0j z4vcPOUOOmrAnb`xi?r14;%tF&>Km{W?rv_*MLkabsc`A_XaXGGz-i@hz38{F{S;hu zKZ)}xK;Kqb^cuR|h)0hZm$9O2qR{*5bZL}%QQfYSc*iEXla4|$6*~MFd#DLDJ1=rh zA|WlaG~QP}QLN`WLj}(Ng>zY^VOg?Baf~-9DJi48%ljQhgHcCwD4DEZi%Uw@jp{Gz zJOQ`Fo8EBiz|H^KUjMVt{_h`D#+-1yx%9U5z9?M))R#TyvKctryUK?ZS`XEW8HL-`@|IN6)(B z^erK0qxTTwq#v- zTnCrG%EoTD&mfMnWApsQN2!53U6r47MQZb};Cd=!b%+pq^<2Fkc}kx<+PaUSpt_WVM zbUw-`3H{P#aJoTya_hQzaa!CSfCZf(dup=WKU zZ+vSO@7>Hu?fbyHjFD{7Xbao*F=8hn`XOw%)P2ey8Je?WxaNxXlS+OpF_*@zekwIP z`=H`Q=6YGoC_FGCO1l8(gm(85N(-XO1mnAGh&=wq!&6bBy6QnO0`D?1(iuk&-o!kYmx@oDhn zdbhH^bI~EbBQHlb=hV~w{3amL1VkEVYhQ4KU%m1Dk;$})X!r;{BS%?3qA|U3*r0k8 zZ6_UKBPzY>pX{;-Nn(?f98;KOH~$JAfvbO;j&&<4no1a7>ZSlWODME?FVa(mf(HVw zOF{9k$%xnDwTRI*6koMOpi4DDf9hMP-YvRQ}k zKR;`#--MFy4TG(`Xqu_A|M{7?c5Q9#rn(6Vse6?j9s!4csX7KQozKgfMS9^fW_3=)V^8=MmsR!FO z&}Q7}Z)fsbrObu4bi(@A!!C-s_ifauCx=RD&DZY7MwjN>6=D-y_9?qfQ*I6ZA&BWG zZc&gT?THODKTsORhe4hhqW&69b#re;w%7Mnm~UMt)MT&#|L<#<=2nA2+UHZT-PuR413K`{7mtg9$Hs@wy>6U3)`6gh~Il@IVa zu#%0f6y|-U?iOuAuPq0^-DApJOzKk1M=E4*!o$mt8ttw-TI%ooO zOGr$u4Lm{VdZIpAFd_61l5PcNE@ZCrD*5XqK8eJ1ZM^RuJ;@+Ko+cgCi!w7k&A zrX6&?1JIJ(8snYiy~4C%IyZS@C{B-TGi3+WYsTg`eS5H{x?k@_hO^!$`p>$9=gAFA(IR%JWIZ|^F&TI(H?26ds)|^r?d!A7D z9vZnXg%)`kwJ2eoP+P25@Qr=ni4O-KH{Uu8+!^{1?trsBO4vAZ-TWwVB<7jJLcY1- z3VkS}Y46Yi00cHeflxob8lW?s`j*yWT}7{l6ZfU4DW>Fd+6MwPxb7sHsZWtRPMKEA z{7irUd-aT5J7OnGWtA;lo;x5q%Xx}d5I5GvJ`(0aUyL^Sa7UHbnC$J!E)cpkM~&!! zn#V8h|64M35tMrdf|PU9K^d^SlOm;vC~81*Dc1ZmhG0+XD|22TIHf^1iw?6bAwAquO5ZvYk5QdH*q@x&-H8EtwjCTBbcpcyvKfY0=1|0QW*N9b$v1-dvqK0A& zL>qzsi%>F({hnks%NkN{{;jX7)(+Tj!pOILu?RD6aeF}C`|P-Ez&qu<%&y^k6=S$G ze5Q?2*?oQ9d3*2;4m;>jbDD7EMD2^Z$GQ_-w-_ql@Mm4p-QEW7$ndRAmTY0cw1svR7{q0Fg>aNp;n5uVLbqDD$<5=~9<3!h|x9&qN_?ArQRu_=C(Gp8s zghgCvi8`lA-*z_tkHhJi!&-OweUs8-1dYoFC>o=YbqoTOdvnS{x+rQeY^Gasy4UXj z#aAd;)p_4i2>_tQZ*h6+dQXzR1?9!Jhqa0BiB3&H*$2{8#=0NT=OroaH=Vrglg%4H z@A9#?(-M*Ix=Y$mS!Uzt-($`xe3W-+qr1lM{VWmAB+=?KeSRadQxsNb{jdMJ0&Bf< z#H}m1SNB_JgfU=We*_@H<$(pH3aF1#y|9vj$v|yRM9!hnB)^C7i~JAa){_|w+lWuQ z_x!!srhecNhQnmp<`lnH{#u^oSkiixc^XMh17ti1IIXHL9Uw2u-~ctWk-3$PAb6q` zW~&G(T(aULrsmo{|6EW1wuG0C__y1?t?18Jdb~=(H9HkK7b7AzE)@~pst9c z`PQw~!8h6BM-as4(}25?Ch&mlcM)mp-+LIa7JqhJIR>&eFQ$qo79z~IiBG-&-xq?Y zg<8rS_SA#KUfpYwq*uk#+)JeBXNr00&Jp6%kx-pY?AK=*`|_r&s#6b}&~{Z-ApTN! zKglTHm(m_?m;=w_4qbWNpH}$9(FXoH+jYaC5(uXMyzQgL;*(R56nlBtC8xl%}6 zgn0^0fl<$Capcu_ky)2OQ|wwB%`mSsZ8J-mNgtK$ zOFg7-7VM38g6EbzNp>DW6<+=tXx8^m*56yj&IEtSM6ytCrp%n`3 zUu*e9Zuna&yS=@=Au(^mmYkqjuM9SaW3u5R?VMlc?P&3O^t;QOh25vQ3EIgh zY37qMVzO}C)Y=!!cc*4%+)em`bJY#8a3mf3c0p@H#5P*yk41mfyf`ie2|ptp=Hj-b zki)FQ5*}ALDhqRmPkYhzPY`{&9j_n7DZ6>$u-KarZ3IIDmrV>#*X8} zUaMlZ^jv(5#n!h%mllL}v-tT0<2fXJg}rUJNrcU8=ak>Ag>2UOBXd>w4Px6&zdU2U z<O-+qZNhOP#>?-k3O@u`>MZ)%w#3IrRwG@31a@7?~T>Ba3MySj)2et=K&v9-z#o_rrcp1YVtEZjpEvxU=o+aJQ?GMqPFq>IDRHN`7;-O6 z=Dd47ylz+@4tZhu6ygwSo6~JR7ZJR%z4sl)8~bQ>7m@zp$ki*AeR_2bTS**_>v_br zZoT{k40&O->0bL=f5vH8ydAgPmWGM8*4M7+9OakY{V}Z8sjnNxFnz!E63XUqO0Um@ z3}>qcd?Mkga>uyat@_i1{QKI0nVdCYHivUBBtG3uH|SOHTVo;~3voji8lTo^xK-3B z>|K;A_hsLiKA6)a*qCcWs#nJ5*w=g|Y4pxXew#~D%G9M@Q=7~pNS9+dSFSJgBvpR?5h{Ny^R%zZ#fF9hlE4Cb1bIejJq^dqomKj;tw9yM7L7gq`@4SQ zGl5WVqMz06Y8n|B04^dd6gfYmsRPD}`Iz$&vTvNa<)!EV={kY=kJa)2_%uKxrC4>z zwjtjdwZq)0QQKl4^nCI2^HWP?GMS?&uxs(Rb^#a`CIIpCxAbS(q}tk5jl={g5c~Nw zOtn!>fAKe9y^i^p)6o3PrllqAw5fr|y!uaz`KH${M+>nye+kHu4zpBgd=o9OxnM!b zkN>n&Po=RpM|gNFk8hKir}DC}5cW(&;U5RL${Vkq5c$`j0pCvg8m2rC9CA=v$=2`V zKxJL63fy_MlSixitnXhpD@f?`TDQBZ{aQ{xxScUoQYik)-qGeI=%Uw5o4p?9Xpt!4|l-6x`6mo;vL$%QwRgdE|QCbJMO zqH~;t8Y=pd5iq*(0lX@{Erv{vZl7Ef-U@odbJpLs8*t1?#YGTxwVZSW_(9D z=a}J?mEz`ge!12!3hO;8HvE^x8|n^Ke4H0K@ae&Pp-lRYzldaknN2Wd>MA0Ex=xWZ z%gI|hv?rj`W9olvBzLi2J#yK;WS8nq$F>NPbU;sR)na)6HD0#NCIkvVJp$qx+Vt;i zI+(L8x!@&%^S?W7Mw-bm1z1T)TPU50`?lZrKbD}^sT}$0p zP}pht{Lr{GQZ}lvfYGXfSsGp=@w7?p8@E)DjyQ`R!d%*c4!1`n1qRc&^&)K{n$5}C zU8nO2dfEEIT0B{Ft@+B&+@9F)TLP@BTFgtZX5HI+c(2cJUzoJ`HE&7`!)X6uwa0O2 z|9#a5)kNph+!MW(gqJHXKIZEVuxF|>Kc&uUU4@+?TfChOe%F%TYIfg;oErRSbLOBRiA2AbH+jv|#^b))a4z5dI3lJttLQR*^?lVd(e(=WNN3E2RQTvU zieSG<%md%G(Uh_l=5>r@IQRbE6Lg{V=39?bxTv_m@bQ^koxjB4x+8><=mUdQHrX$0 z?aaaY(=A%-S)O)+KlOQ}<+JE13t5amT{{u|f3fx^U`<_H`*_q^+gjzewxTk9x3*HN zf;bT7wAU)N3W#t(Wr#`@Q6LOL7&6d$TUrrPii*mRwy4O7fQS&LBv=Yjk)#YEj0qSZ zkN^Qf5;Fhy3DtUghwpyB=YJlaa1zMb=j>q(?|S!I31N|E;#`d}{Tg$ zi4IA7E1IMw^?F^cUlS}Fd|hdCG5QHbCu#yr&k*C%G@1%`v#EfZqruC=+Di|-dx$>4 zj4ik5kePjoj?vbbtjuuAnp*kWn{pnveQqyT9Y$2< zM0IoTmH+!3@|S{Os$h6E$ad3lz<&AA>pn?KQEZCMisK0h@21_3KrN=4vJfupL)p~~ zEk#}Y8TDhCR1n80hCf2}RV&{A#{kgHApb({H8J|?soo%t+`>F#IjVEw0o?%8%9aj= zfc*I&#xv@Modc}=ohXp*xB7}ba`O)!D2P4BgLzv!RAEJrzt}$g`7FO~SK%?7pKkX3 zZGgY+ku*+MzUz+DgfMK5D~82)+r6JhBU+vK4NG9;e&s~v+} zn2(*9)N_Y_-JAOhSv^v`Z|RoJXF6lCflp65KcVM8!z>Kcd+HuAC6Vy}2mWXU?w~%4 zm&DSd!*8n^F>y`Yo3F{gt$Azs%hxDMtyvIjdw^%nx=896>EPd`1g+tK!=!QG=@b}K ziw&MN*T2$|XW5<6o#r4&5^oT9QG-5Tv8y+D>Cl&0!XtWOcs0d7SK2eQ5T`Ftr+sTw z+$kr%7){fdOsDloAa!cRx^Uj! zUk_x2yA3}E#gYqKms~3zx%TuVE8_F;hx=|;<{#cdyBus6_;;_IXZCxUdD6-U4Z8-5 zHhebvp=T>YNAId!@Z2D7^V}@7ze@A9aWG`WIeaI&6f0r|3+^!YMn<2OJ7s;Bc?c-K zp5ZI=gu*L$Z?1Hldr!Tm-Cjd*>(BCHhCE&u8{1f)=b&0>eDy`33&$75{ zpLmp&rx<9zxLsqr7wnX`Hu#iFYyzhftukUtSXispB9UG}t?6f@dbpXWyHEYTk{Wbi zSH~Ycru@Fs_vQEh@w{)(MjFW5v%jMx;GN~DgplGdpMW7SUJPtlnnodvY(xPZi@N0a zsZ+Pi$q13aRN7(mO5oCaIH3m$aiS`F-vp+{DZn-O+SU|$`J=tTpMl$UJSu8cT2s2& zvVOT$rJK)=bk^J!*3!mR=mAt?k{iH-PXs{A8&yQ1YKh6Q>zD6{cQFlM-G@(h7}{5k z*V$d|yy1BDyW?-YRln=tvrvrcAj+|C*L0ui*Wx9=1uN4df}xvMo?aI*WgQC<9qp}2 zVb#~4dF38Ha)mKxx6P{)bEBtXt9RK_$z=nft9kg={B618t63ASwN1W0>DH>tJvisL zzmdru|P_4uiUy=3WKr+nPl>VUsn zyk(uW9Uthp(aq^YV};WIA=np7R@i%7cBFL3)(oh7NT{;S+^os0p4VPp1P{VLH;vLC zgZxJ)zcliTAKql_TqLv%o_+PhBEw&9ZPgm-YFDA!8)ZHFttTpGastX5;au$!_c!7?_OW%B_S8E$F2) z-}w-h_7s|57mcdFPuAbc4SfTk_sf5b3PQZ;<8IceZzI<#xH?x;y!BcAJ;$q_6ZI8) zf2D0CEMyoqqKq|#&#FdWNqd%bv39--EaD!j&Ky-%?$@v<; zZ1c^6CrKy6a^9bKr&hiyd`XPrA^T>J|2>I%TsZgT<);IVzr%c{F}ukS8Nd1KKMmhM zcKnMG@37u*bUE_Qy9%g11WpF1+=myJBB-*HY9M~eqA`!twczGEHn+}`f8w6r`TJhe z737%}AQBkiBfXU;66^ldLUcsV<5$w!+PqL8Jac^4=Ts7LcqJA!R8>ygh62_b3bD#u zl1U>6u$sfDX+XW0z9vj^tL{K%Xu;JndGZGSuTF1t;KSAOdyd50`vPyxwr|1e0byvw zxL`av^bV$?;#*lyfL}xNX^nN1rpFdFQ-Th?(SpS@!rsVm`*DNrQ_tKv`;7U5W7l(V zCxZh=7n=85sfHb`hQ54M+<#CJL->H#w9^jo#B<3yT*qqYlJ>OJVdm24{flwMBBCk- zd-X|B%P{Y)D7Wtp(|y0{XtCYjv2D_#75nPYZ>LkZK@?V@wcPRmS2pW`DjQV2gJO~< zkD_!lNi(5N8qdhpSLNrPMNIQQJ!1C8;Ww-4_3zZUKHLF(zI^O?)!GBT17Uha5IcL@ ztAkhPHx!iHv6iG2tst*U<0;O8vRQp+CFO%LVV@^nek1Ph7AIBjTKx2K7b`DS#=g9m zU;f)aoxdcH0sjP%tpJtn4+XLr@#HRryDbBS3me`}qX4=t>TX$?vg0TKRU77S0prLN ztj5gm!Mp-Mi2&w5Ufbn^Io+eWGJwVAI0MbedM%Qhmw#~6B6!A?ZzoT(NpG5p3FZ29 z?qVb*8@>>j0@>c?$-$RdZ{%^s5&ERp({_C0T)FGbXM1e)pMl-p>o*U4CM+rEmw*FH=iU30@>>!zzQuSv7KN4_ z)_CQuj)@0@>rNUoM=iA6&ARktUCh3ome>R@jC;r28ta)NUygsr?E@2(XT_iOt@F7G zMTo#+UYnShqgg|B{a&!9JklC9w3_W_6dg*oLP>MtJ4ECU6e~P123ZO9zSGi1Rh!L~ zx7{|p?ny2Qxy>g(Tlk;+Pm|&?Y$1)STrqK}NV{*E^Ypz-(`H-VYT>T26c{a0(bqgY ztiJ-NTAS8WbHAYzy6XI)PYt*b{{M9^mq??e^uOdR+S*dcKZ$CeLSr&E^2 z=$+hu%p0B8m7C+iF4X|ch~$CrSPH2;$`Ums<$0odSQrI^W7?&!Pprd+IS zC-xk}T-Kro?M_r2Xps4H$fpi5Bf7X&hZk<~jILJXz+dAu#4q)}_91ziPcn8WD5Rmf zTV3wQG=GyABlEL085yJFHn(GlgbTogdub?BTa2sry!^p`I{zcOGWX@y4 zB@g zKz&aaqxmly?U?c`Bs*O{h`J-)P%}YbTb9GHRj!=yYl9Ay@!F7vL>&Qc-EzVsqj_l> zou!{bS-PCR6Ub~3#)O{cn5YHb=A(BoW$Oc7jsE+xsvM6PDKaevwUEvBcq^@G@NnND zN?7`Z&2Di{U(+0-;m$cieoK(C=zuWWD&;Bn)>P30-ET5yi!{7ZU~xDufslLko8-uM zHZy`HvHgnA^UcHGGOi&T%}{PqHFw3(O3}`_kL~9&6b*2sjN~S0I3y;sas#^_v@SdpE zcRkTR)x4E;|kgj;5y1sF->uy@pp2k zhWC36c_9|yi+=w>nU78Q)5?44uP^^6IDe4-JFBpBaGCWC3KgonpSxscE|+DQR#mkd zQMf-!T~HqS37=&%reQO07+Q|V^`$Wg)Yc+$035`dOS(Q>^%G)BgW$&@=*pkWY>*lo zp<_fkO1I%6BTd}ENUrBoxfz%YZWsp{zWH5HkRc1-P(&wZqwGpk)@(hW(F*Ec_Y>))aR|N5b7^G&6?+C-${&UaJ+FVN#R8L zIn1qXqdh2g+H8Sxn(x6rT6X`PfKzXv`SOos`@iL0pYvyWLL^$hoPslY_!KWYb| z@%<+TKh?BmV<%acpJ*S`46mi_DBkg%Y`E@-FSk?a`w@11^SOG#iO&Idb3c9UU{+Jb zNy`(Gx4K3vx~7iSZc5l#<0HAc74VtF(>F9LEl zydHDMRXcyy#z&3j1@lN)CCPmxgUm1!yf8nB?B1p-P z(0*R9<4o&20nO{@T|E96Y~UD`rga|u)2ROYw=bl71m@oQTR*kg>pSe1%*;~AmT6|h z@jW0?u;gQ)UG;+5EI^PHY6a$L=@{KG>hdWKhF9FV83lp>APGxg;F4=x@t;*V-4ECR zza@m&572Qoz*k##kI7u!*wJwq$8uxMn#!%)Cp}z=>H*+#wRq>)_F`2hM&F5Y6Rqc> zj_T4DHurF^>xv=J{!o>TRKSC6qWHxOJ+q?Ua{g}$Lv%Bt(39?)`Aym51nW zR!n?+CEcBS4$y)dm2NGOfJiuVH{6{@&+lxNTS@C)9a=#eT7jw=`Qbns+2Ga^Ty`QE znWPidu4xBEux_QrInj==XYWwoRvPppDOO#f9=Wlt7+J< ztYNQp%wCTQkCyoF+1Ln6ezaq@;n*9+6-4Wa2Nl6LXGeywTn>HrG1@asS9{|L_t}|` zy6c4nS?2^wO8uo}He%|!{r#Q}p;7x^r~3+guROcr6n**qiTQW+X^EDtCBj2jOKi4B z+%D0+HTX?(tQCsy!@5yt+xF$S;3nyBowx6u^>b0{=RSA1XxMY58zi^;zJn{P%^MFN z8Lj?If8CC4=U3-7DZGr$G;lLFhvv<#$ESBF$JtL|3)B&N@mse^onQ8?MbPKxKR1IY z|8FDtI}!BOtq)M3oXQTAwdS|Zc`!k`*W zj(Px)0C%Kyx!fc-fddtW!3?Snj7A=+ZtzZr>UhE>KD_@}NKVWC1cHrnVTJL`1}gD? zh}gvW&n}X??R9Z|!+lMggI#xi9YqLq^b&pINMWbNx_>R&WmojAtbOiYX=&I{^|woDutxRb)XyLD3sW!kGiV|Fg%!O6bM48x}(x6bo77PQ?;B3q9JXD-m%w;j7kK*M`V zumQK@?BeJ*L3Z)L^r8W^lVAFoTO6yh+DrtmdhtWwK;muw z3FF^zU}@G16lK5XnbygHsWRSPgxYFvzw8jO(2>N?OOk*?{92<-#{p^Ek>J_G3oA_V zUt~?g@Z6t3=JT#OV*9b=Z<9TSzt{oz1QuCrkzlT0pmaoe=!Ov>%v#>iVrgG2{|4Q# z1c6TEkJV@L=wxBq{2WG{x&xOSa z(1zl+jY=GI0u*`D6G4^orlieLM?U@L)XhM8MSi?ps`}nL+BV0q_&&qUbdOikglZm0 zy1TwY755H(i-vu4>){l2OqN%`k|Po->DE13bSr*pfDBVM}KufQ^JwK4fp>w z-tCvV1F`~7#o74_0UJ*rqhf7x+pLx>Xs?_@U90ZwGM@3nJUgCqm1d_n*LN~o^Dghr zG*Qvso4If2?`zNxe%?o3JNj1z`t&IYC3uatZPVrgSFXy_r6j%MY6*2$M&Xh5`a7|? zPpaOYy?Wz;aX9wG#+S*e9i{ss{vY;mbcO)rc@%8tvekfH5<@`$x}nH=K6zDRztE6~ z0+0pHXlXQo{&=zqza&W@FI>^sUgViNVJ81QTKjyC(QUv~0kmzz&`#@(d^M7bemZP& z!c3dV!6vY;5qMe*yl<47#FtWcbo5bTp72_$3EH8fGu%QYH+E&6J$Zed?YquTm)&_M z^1+0*^43SV&qU1oUZQ*ae50(>N!AeFQbQa6U=QD?ysR?`x9g+SezE)kX~(uy(J!U3 z!3C86DV6Pm?1y{byF#-no}KuxyS(kayoir{`*-PQBY)*C=ZN~+lkz2{`&&g%sGG9i zpE$oWqC?J{xO#Y<{=(k#A74zo@m>J?Fy^O&!glj^nN#J_??`+4aPR1^-tNCN|1*?vn1 z>Sg`|+nw%08vMVn#^^1GAq;{7-+5S5Qv0#0d@M+4< z!wvf{7kztjZPZ5}o6l|%Elhl0okeIBeAyLCC|a<#vkC9s`ZFQhTQD%XfJB=7wmHf% z9dZ(3t77Goq7tJY)k^`;1j>OH%lH!mFdWTk`Mh1d33 zezom<&ZievHOHZTI&<^tvoyQ++o!9&(!Q+-{NjTRVfX&(kn_#<%ZKRG#M|-S#tJIc zhttG3wmi2)qR?Du6KK(sV!D)4&yrTEYVTK&9alIhjZNeQGiVTO8ixjnszD2R)_~~Y z!%rZy)Sf0dg+~ib;7Q%{GIUMBw84ecB`B2j_G&zun&Dk7c99R9SH?z6;)HbZouhfO zk~Z#nY~)nFGg%>~4|?})^B8(K{dAPWzA(fy>NLxYoMxpCF2>L4UW*}bmwYMB(N`w4 z$DXzn8%~h5&mJ4|B;3l;dD+E+(F;CV<=)fU;m52^96j9#k1ssMENdZK%g^b;?=zU) zm^+S!oiv3GPIuiYQzqFMN7h8FsLZ*Vu_e!#_mlJ=6L&_?wNaTdHVDtT+;^(6zVwLez!ts7H99WbRR3E@>h+Kw(g^Sg@L) z@lt^NB(PGcgj2vKw@{vszRL$vlu$J#7E_6MJX#q@gP_2&;Oo|n)wezGO-)T@N-d_q zoh%+{l>BV-V7pvi#58W?FKqRRd?6?_-h=6v0g?+2@*zt6Xm!%Edbrbac>5S5X83Oq zMpJ}2#sfj8dk)jR(sg>y*axP|)sU~Xowm@5h-%8^b8UF#La$U8qt`!gX=XMu`t|2; z{>AShP4E3zP^d1j7ZV`*r#6n?z`Di~Yq$&8Hzl$?YcVY)TsXFicnF&wjSt?40SjEPEy zI`v%Y?)pHH{)7lC)lJMsxJpEP30|qQ8HQRwh-arV@77ToGsEb)w2kRbao;pKksM{{zRU(B zr{FV~G;2<-fc1JkTurCbI(0*j)_9C=_UY|eT{L(N|G=iUsi43Ly$ZR|(2~i?)JeVz zGq`LTs;?k}DXtRArW%TbL@SwqgD+Lhajmg!(;=*L1vZM5=WW+m-}HTc{x5zHx%B-$ z@taTj@=_ROXC3pP1Oma$eIYU`>VXBDtIXpT3lEbYt_(fI13nE07R-Eu_OD_vLcmIB zZ_(jWI`iNZWhFI)Cp2owTl2maGH;I+%mFKK%t|R&ZxwdOIj>& zcO^qZKf;JC1X7Q7%q}Vkyu-~c}#x8t~z2gm1 z9Aqkij4YAt1RM3>6u?zxv`3Fv;1d(!PQq}n+S5BW-)2#0){yJEwKDP%tuH<_id)_n zozF5~CMZ@)&1MPw6629{HKSqrT&bM#$K0zi|1Tf^ZU7A)lTV=A>0l;;N3dX|!HB=>=^X=hQ@{v{1_3Ri?`k1z zq&q3gPmQck2kBk3T{g&-(pFrpOc*!oPVX}ojWR=3Y^m_MSZ`#Tv-Uk-`!8C3>4e{Y z^#kyB+I~z=#lo_Fu-IwU7!Qv!$wM|9@g>C4O) zI(fu>C0cl-$ebEiQ2?-6COOJY>4w;tdi~#h7XA*0%5U)f6GWNl=wQ_ue0NRO@G&*p z0fXFnj1eu^=n=HSY9C`}EDz_=OXb*8=(IjT?fDK|A6ZtfLbH280M^D!#2u4ROi!x{ z7wx;6SUTHF;Abq2m?qftP_(53t zZfvCkqE)`payXopB&i}LtYI|nDDUCJhb;-22GeaLFEbzSh{JDWp}|WazBGT($zv{A ztJU%%ym2FMkcZ7lKf%Qq{RGr@x)NEB5ZdoLfF zWjqyf>2`LZReF#dD42MhxB>jRn~<>jITxTtrMvM$x0$~Bk1xTVK}g>w{TO7Iizv*@ zsqH}Z_vs&F)V8!bImeZzk&^Z0feGi&`-5Ki1Twb|y(>YqojG&nYX~S)FUWoPb)P$D ztByelph^MEDR8B?<1B@JH>`(GS0062U0p4vUNk!NofrQ*(WK{wYbC?%bJ1}l65UL9-L~gU+`Q%&wR$cAbXI2E( zT^YzxTbul%0k`9&e5qttAX-VGP|$sCV!31&9vzn~T9_F_vufdTeP4O0w>(YErIvfg z6TJAB3hRC!s30Ymz4E?WvRJ z&f?{5=s=r=0&j5rA%Q$#^NZX6=ab*3+3`3;VSgAE(24AjO$r2(`|sgs-AU5QRD4+I5o zo*zm7bF`e#i^z%|nQt*Xn$6S=K6*VXSlD9(5oQGps$+C##(LJb=Gq4$mH|-nNQ4Yh zT&Jq9Td>o!%`KCg&W*lf8vL?TU0c$9jc!3UjEUQMaGiQA4>5kHT_zlO+j2eqh;xG= zFUWpYiFa~Rk`L6MQDShWdL^{ox@9r~Yp{LSy%aa-F^-iSS81hWIWeXT!t8!wM1e z2!v)PGh>;$R#pQHj2RxtLG|+#g+E3in0^@C*h+i&K&C(Ys8LSgb^XBqnKD2rmy?C(4qC=@<-m^jk>hFD%Pc zUG!wctxMqek}W~ej`To9s;s^kE^3-^7az4%v^GUF6m9V7sJvIzHTZVjd zz^qN&iQg?$Ar6!*gmb#zR$Q!^dDwmeqADY(r`y5*4DBIW)Blqoi_u3O1>VEZm(z%a zGdMU1Bd($09*f0_C3vtjxN;+pr3s{^fGr4zd2~W6J#roc=FeGRL5Jnz^xe2pNr8+I z4q;UZSh|Do)`8sHD{4z8e4{Il)V#bhGc5-n4Fj(T7n$373KDY)XkY~z=c$HHIJ$nS zIx}_}c!DsJLKi+=+1uh&4t9&f8XecDlkCDJbvrSSm6oLxfL0exubA>+De;*v%Tn>k zm#y<7OMqW$PoBc!JAQN_Vv-Z)Qs%*GXTcIKK^g9!kVM!ah$QY{RxM63j7tv&mP8rH z=( zL{=8=k@0+>iRa0kPryk;6@EHDNEuxS;J{4?Qj(p)? zAJ4IegUIL_PFD2vP`P6AabSNEc_N?WCw?$uco6&P#D0dv2w0{>2qe+cheF&`f!wiW zJ)e$vB*>yNoWUbn_3M@9h)WswXHB^=Vq|n5DY~ccMrg8&hlk({dM;+8F|pd%JLP1I z$Ee~Vcj-|s>>VH9I(pxwZ7@^4FLVZ!AS#yt_?ZA928NufcWrSBr%)3dDX$@q6VrVs ztgzHJl(sr0Cgu>&obb?igm+Ro<(IDZ=exP2kfGL5f1!>?p2OXaEhHWeFTlDulSPkf z(xXkTc_aHCsFcCTk{!0T@ktO(HD3-7ag;pr(-!gO8BfTwWaBZY-qSA>&jT4SE76(p z+cUM!82$AZAKv$%aaJX`p(jbq?VX8lmrJWMyb7Y}+&xVVGfsa;A z4^-3}XYsVU*&N)p7HocltRc=csZkzlGhJrd+x;I|)dEdd;!RL@j7lCvHw1en2-Soq zqs&>VuA{~9?QE9YRBgPUmAcYHT6x+5+F_)UE27N~%g#Dup5 z-qvSR-CeHD-NPlf+6V5Ydg&>-Y+F5A+Cd>shSALK9zYb5YVf~-V^AKuOyC+abpXN` zr9>3(?Cfld!PK1;;{qj60ct?if_BUN6FR-nP!)@XMW71_$lnkGA*dW<W=JI zi{v_UAb~BWxMO4c3UOd323H>p0h5_Z;>pA1i^|I;qDYY3eTTdn^yLDZ906hUJIf0+ zRKi)wcv1=`xq3J6C>co4QkZ3cumQZ1Aecb4i7B%R&a!gcI6jWRbs)hyd;tS>Z zaHvtk2Y6>~HpOC!J)7<}@>fXztB>we2;sk?$E@U;$}mB;u|6R5FUU2AQr0It@m;|v zw+kU*A@W2erWgjcatJSiC{NnWbESy=3Y50Sa$^ykXA?Ikn4MmZHZ7H*u-_iBtt~A9 zK6OpPtZbeqKITc@4^MR?0??gf>#xYfswQEQC=(|f$1o4n*1KlP8^rGAaIyPT#99l` zbXiUEqy6~Lq_m5h%+@-o?MNV7v5qdlkqJ*dt;2fRmK zgkWk?>VqtYa&28XO#*182NG@+TZpm}H?mdVr@P%;L?GF2V-BEm2|+yZIla+P5ZuU{X zK@J=>8e-G?wDgyFOS$3F4sfYP8%|IOa?O8p#8|ZfyRvTw_%mU=MZ^Q%ixV^04$Z-V znPiFGx)$+FOz_ko8Gz13aJlYD`CSP}s)tAPJ7k3nd3}8l56naeVQA4b!#0Lt7%A@= z{d8wF01+(oFNEn3umL2%21rek zkNnlFWKp^xVsz`>=uI>$5NrltwTh(bAUkcSxt!0-!YgzP-iz|1(D!F%v1 zDhimON~OP;(LlySXslBp&%#Kh`zzY$VEwO@Vh;1VPfEx!_pbNr(~aZdG)*U2!p(oW zi^!cjChSKP;56gEmJMNS#LWf@dbf4IK+VAiDE@Bi_7jYBW9Nmz$m(_y0CoT+xX2}wyc028uN zS7oPbN3n)cldB}z#Z6StjY@Wj9p@s$@)5ZJ4T61p5U8eR)>l*Lz^N)j06^4`Xt391 z9MIgl;b>6bPV&IRRZ>v|UhWF@P24g`rQun4K(V4{3r(LB8)&=JOHUu`RA10X2%0$^ zUZ%dSfs4S@$)*-4*?8+fwx2$s`kJIZoV@QiJ>HK6fSQmmpes4oZ#of}bud?RmkWbL2`Lb=mO=TB%Gwji?Fr;yWAVQ~`0bI5(DH3SZZ>*y zp9kgG0NJ7Sfy0S}fL}V_W;7q-`o9e@Qopod@;)!)G8v@$NkE-c{nQ~WFe3&6# z%z+XnI&Nj^EBOu}bjfRRd(pbvy-T{DB=?AsC;K>&h%MSi&&(g_)%7CCXI;|lNeIwz zr7!|#A1xT;?-N8#Qi2E8$>bg~Ib0{oa&>I6sjbT{&Pg@N`Dxj{!Sh|7Yy+tI0w(eN zZ8TJz&{-*a7o%$jq6We!_dr&hVpX2F-jA4ACGeQQw9zZCNvbM&Cm_$Z-_0@FjW=jT z*7fzNOt?An2|a6I>ch&chDi?RykYz}tg_?#xhIaq(*x@veGy-u0~-W*!-W##!(G3n zI4k=5uJ_qkd+UX|F?68fj;H%sQQbaba-?TyQZTaSVr2XMUd+1}{qW+XF=1CwIsaTM z_+X};4%RW^nVK-W)TE>%JPyYj;wH81gf$2RWD1M)35CLSVgQ9ei1<{fL2fIE0XDo0 zt|KF}05RuwTU)RRVFLg&j14p4{YY~F^9NXQ!5H_O>^&CARVN-;=n)~+HweMz-sOS*9yDy>g=L*N=(%JrI3Qs6#8VIR)kjylgeGi&OHvXfn>*eWYeD;4Nm z`t+n1n~mRWwtBU9pO=`<@lo{h4E}s~blzNz%rK>b6qjuW9NXg6Bd%Aa5&6C=tknay zZo{ubjG3?P2WqmiWmdwUEES`bTJVUC4D|nk!aRMU9_KAV?*ZumA;Iah9>xpgexe%( zLBtViMnHmgijKQP2f9;fk7Wk)5ZOyf=_-O#CJtba@tUla_h5iOz`zEy(6`KjSO$Wm z|6vrTt@ttffJJh3Ho0FYEJt8K-iopAZxELQau+*uOC(~l0l>k*1LK&uL{Cpo3%My6 zOiO9mS?EhVUI?sb{_Z&(GIA&to}YqE?(!m!r<3k1hV!QO5fgEaym))AFpx(#Fv1~4 zEnPPXU+wSIvNL%8HUfdm_2bUG3lzS1VA1QKT|@K2fP5)O&|Z$Qt9h9!SRlTJc1&6LVt@wA+D8ZwRweG&!_;C+$>Q-dXG^-h~dNY5@<2lFu6aN}K= z!C*)pl(|WMEh?N$%>nlr<1YCRQytZqu=Az4zGy(3CNUzwl}Mn@MeBslYlAh}HtKAc zIU2;gW%37CuU-w-eZPaumh{XvK(UBge9f9M2cOEbr0Yc1R#q?J(`FxUBv*rxEg0(o zo@BxebUh+i0O9~K^iu$U09C?i5{!XuJW??BY+rQr0~=9RGHGT7`5tVq`^EmQw>+k4 zop>^-siw@q1l>03dA+J!PkrLLgl9y$fcFY^2>*eUDN2kmtIl z2X+Y_8oF?JCc4d)dX#{SUmEE8(PN&R1fGAm$xsCaX9PVqQ!RKKyy~{gWOw^{gOoLA z0c(sTy%n@A%bOWkn{e1X8eU^la5+l0@Dsp*n*r2#*iw zfq{xu!kJab0O1~B`d^kK**3_e27qp{OIsBP%SJuWB4WryZU{&K3InX1s;UGYuoD2K z1w>*)cvNjXxeOUh$u6Oz2D>F49fu2#$D3|g1y~rvKx>33bmc}(d7$D0V3YA8Lh#-G z_(4J4PjIW&Hi2(ZEXOm4SI|<%n~LQtXcIdFf+{1(yZh;jfs;UoxU!8Z7p6@zfQ|~r zf^iK=VF@?rgM;o^9w+4qAFM9t(OBaI*H}5BI|*~MTx1;1F2krEgwAjD7l!hT%CC%7 zn#SNMBvgzj_J!Y)Y4852M_+zhR`kL#q!c0o0HNw+*?=fD!0s6pEU=@+{WrwyV*>+0 zgd<9WXmyJ2AXM?ZTyi;;{5(qw-k$U{iNoooa zYI7^Q>?pG_o8 zkLbF{6}w&6lO>orx^Y}=a3BMQfu4-aw~>RPsVHdcUB!1<1SCFokkE_DTqL&;nS+Fq6!XOx223@iQvc+0QQ*3Hikg;J7qQ|%O9}hW232{7eXyypD+8DSsimTD z77l?c34c+b5PrtLJ^0)|WJP%;C84r+GPHmUh`@ooEg)Q&DPKgAm=!EAaU2Ao?8DC{ zb2l@wX&gi`uskpHpg+%DHBjjw=#^UXO$4od^?(o5K0Vp+qS*o0jnpbSJrKd7ATGtN zr@K2K;l|2-Kv)}BS+Hg;AW)Gz3Sxl#OoL9CIc(TKr54!;Xfg?F9qdgPvat>q?y=Tl zXWzy8@{m16P_b;e4e(tN#uS`IF{2#=_A(PpgJPIXXm5fylrOQ#k5q9AgzJiLlgv4? z0K`C@nCYIko{g+lht5Xs#>5Yb7?S1|p#yc>U@+3m5Zt0#S*{3as?h7}My|^?mi70I zvqEjCk;Npb`f;MWF)J4H2o1x~a-sNgq^ws%bry^qxzBA8-*sSK8svUK*v z!>3a%M49>1cSKpD^5nL2uqL0Y&d+2W@IVBO$DCWHY0ym%_fIb`nTRpQcmg4q2l@;k z;rC5sD+8zium=J}h)NjXp?A-^*&)D^2#IS07GEwo84~ITg$xJb8FWi6g5x7GA%K+J zF@@eh;D4XCl22CFaSf(0gsF8x%s!7r>pUUTjo+*5ew1#)u%84n@53xD-Uz+~Cj4m= zUQnD|Q9)F+LILJl()t7JVP2M?Rxk?t^mv1M%Vd3dM()~qjjoMeWZ+j zsI)sT?TYQC=Uf1pEoQ<3L^^gFy7^wqIK@hQh=BpGtq-!DD%Q*rcJnUK%{!Bd%G}0) z78F%BgQ)i1c9T~-#B|BSS}7j;;s*rRt7)VDRLffa4XRGIfAIY7{&ChdA9a6oq{dG4 z@N5^Rm=q*0iPi{=^Guj&=*HcZgczsYp@M$q@E12C24>ya*)%KXejtuzC5UdkpNA|T zn=Oi0|NG^G1T_gvbw}>Ao;aG2VwsDm@f{e`g(8+VQdfF2~FdO839x^-)W=B&r$ zwpGz*zT}GACY{;qj1FVL`qNpOws8K0VP>Il;a-kjkWOg60H<`A5%5qTvEysmcj8W2 zZlJVa&+<`4c8FprR})j9A3E7Cvty74-JbdcLq67j~|zy$kG@$Lj5K!J-AK zQBE#<5v1cqkfJO-nddCo3Z$wiA19YI49?f;d%NFxigAF5ceXxt1jJVWmRCxhW9eY+`C!X)d9KlRH_e@n9$r zo2+CuBzlquN#pyh)Toolpc~>akPF`QB7!JemTEWxObq} z)!A;Xt+tTXB+r?x*P2UgunInY2mqhU|^axztNRg_GseGHmR9j(3X{&RZE} zcG{Lbo0@5g^QM51fJ1Hh0lhVXSbLlid^T1YQpj9WG0O<`Gk~m^* zZ~%Z#U2B=4<+K}IAHz2F>SaMZ5mVd_Ah5q; z5AjZdJ|^tK`b>-Fo$~UzXxAKt{;{fqxd+5bP$;_-lR44G!eOOsV;1odz}Lc8tgvN| zo;?=6Te-(a5?o8}z}31vo)94TgS*j~vLCWv_zrm9&6*Lsg;`=bESa2sT!R-F1UYgV zH$cH`-Y-zXgEq12NSx5tvkEB!rpU2ry3t zABzOf3h$nC%=2o=V9}9Q7@y`LKA^?$u+EfTQleqfvg>3nT zWieGtCcNn#8E%;7TPV@&pqq z#gFAEvWTOK$P}9}caVKK8nYf5xP{Kl0m=5bsxWHoBnB7=-Wrx{TnQ^x-24al4raZJ zday;$*aD}>Tz!-n@>nr&?8JsvaX?l4`4htVUq}zsuaKLBaMKC9ahK}!u zcBSv;fL!CAAbL~mQI%qJ>$?ZAI~BJoN(>yXJFEf=3$f`kx(Pdl`}7B@qn@gjZ6d9p zpdf=>tgGWQm@rZf5n?Fu*!UeW?)BRZBj?GgFIdSHE^5KT)9X3J5&YmR3$B}cQVHpC zDv&2oR(psE!gPkItOnQ zx+wkFiW3j84z7N@ld(Uf68ZhW`NZioxIo3lTs;QTbIt)aa{H96_9to1@RV#Scpav# zGfR}hXoztuG2XNVJ4h&GoyhJ||G{on0nHuIQ@!IGMEL(1#UX=Hwvl7fq|ZC;Prq~9 zBS8?~4T+voLZym2SgjioeuYpbrlzL@c86*KPlf6d4Ztq8`c5U#TY66hR&xsrOI%rU zSH#QBpSMY9sfi^bQGKv8#HR!(U6~e60rrc>1bxtWSoK1aHLegN`xj}NbmNT8BID4d zEj@Ff`%wgfm6`(FReYel5jrZ*oIGnDSOrk*&Vim=bJ(U^0Oq%{xnqSqL7Hp0wtnEi zRAm{cNHO(Gbz~l90wcM*N#r@${A@&1Stz#$(K*0xIh#y4e^~Z6V+92)yRpBGf+_jsFN({vso3}t zf3_kU;BvhtdK*^IvzF&)=ih+tEu)hZ)YKjNTQ;fn>0~~%d-7E|Pq6AzU1jm_vFP`$Jar%e1cGO-l zEJ<&I4+(c#&W+ZC4Z$!|od>v`h)s#CJvzd$c&tW5CtzhI@Ss4GK=xOd86INpNI>bK zRFfD53Mc^hd+7BT4katgJ2WZ0GDB&xNbnf2d9mVa4qVFbe6%w&zq7Y8oYvXU^(X}K zn`Kni20l06*^g(19{~K9;%|_$6%Y{r`XeUB66}JX4oWC2xTEh{f)Go#WX=0 zIi#?HCS~{m5y@nwEc8?#E12BEn()HG5-rQ9rKKQVOTn}!V6RK`DhfI;M?Pyc(_?0% z<@5_zP?Wym#x>qbkeTPE4=gEE87{-qfEocQ_EXQd?Puw8h*Jrz`wNn_hHsJHn{hZh zX`dzZA+VzW#u$Zjb!Lj9j5Ju)AblV2s&w!S#Y@9i8GlVI!)sNm>G6$W82K8I*NB$# z%bLPNCeGhMjaeAk-`eC8Y8LpW82&#D%pY%tcH97URWsS(lg4xtF%Gc+=S92aU`GuZ z;vw+n!pQRTj(PK}XBW4?fTC5{1$>5J2m%M{Cf~S+Iu{!H#kNO4DP!%je%u~R1YO$cz zf$9;CN5$*VaSWtGJ*3jBu6b>nL~LD=A@2X9>`UNc&j0`S zvKEWAmM9flcZBQG)ghRks{3HbS+&FPT=};kTD8T2TSS8Cb2h z1KG(MjUtTd(ay}&KFtC#DVE@=C3Ezi6UELwhb|-rq!0+q5k|G}!`lO6O_tJ>GL4vh zg4}&{xv^@?n9&18Jrp0}>fTruRlYmUcJ}NzF75n&x?|t`Uel}zxv{g6vxMp;+#q3T z4_jl&SU60AOldQsT9GY;wLpT2=u2WcG>(OaH#mV~|3cemmyBqKe!S;@etcx5{4w&M zF0}&P3r2lIHdGBsQ{7SIFXFW_h&M5y<*d_VV_{vP&%{1{f2sL3faa{nC^D z$Hy0`+IQSz7-9EhQg_`Eq+AP9 zymq+f^l6RKg*^rxZWcbHlKn%@z7w$T@||YHZ@;mu=xs4~&#~APWa;Vb%XChov$r)h zNA=1DjrWKwh8ViBH^?)$u^FkxWDv)rz|)jHp{1Km;8nw&8wDa2rRYlj^Pe{ZSO4ZQ zKCUB`(0D@bSu(403a04BtY23$aGFWsRISe-hy$Xd4H=SB#i{nPtD=JC5PutH>ygbo zo83c4Sk92zh%6f$L5}-zfxj5uC6S#N>?-b^gl4vGWr5qNQ|lK4xX5M@?n#;~tfr0b zOCq|1LDr<`CfhARXFw91`S|gI>DnKD{gW!gMlvFE9>zK^;V#~k($n;6eXnTWpQi7# zjPZG|;M6Ba=QfU&a7KM7{gh7CytlK-!tRs_Td?FB%!5(X%yDAoqgW7aaLYZ$9dsbKzpxQsAJv*B6A90>6k;+$SGy}kn0{4UHLZW%RnnjOn#Y@T8@sC+~2H?IU^{J|!`72Zds{@^p}C zaz(t3ihV&M;@AhXI@IoL7})>t^m0@)|7_ zTv}8?vH2n$Rv?cM==>Upw259Db@+B zpc0Y}26gy6EsN3U#DwO!HG`=A?raE(EjL0t2$`RjpmmR7my+*sz`9JMJJ7E-al^o2 z83h#=Q|aQNjy;Vdgd7WQF2nCeOga=3wO!i0SK$Q@S=6EX`F(kmiC+bG>L*STKl8!) z1ZCM=4Uf6|Xgi&+(IvUjq#!+Ij1INz~Yt8lCg`8V;3x6*>VLxW}4a3cweW}631@g#vfsf(8hnONB#!h zoHF`&(}e4f_dPd-}>iQf7N1NOyuG4DdL*-;d<%`C3}*Eu$w_pCYcQOSUx)mpca$Z z2BBi?*LR8DG2;G-tuX}y;;^dZZZuj{#7*8em6vlrZ(v;`DQxGBw5lb~<*0vr;Sa&8 zE}LcKl*@M}TetqIn{2blJI+~1MCiS)dkTve1X7b~ZC(5b%>yU`=)57zljEdhy}Jk+ zhc3qpT2iH^Es_o1*+;P@$Mi3b0Z`X7e))3^wBPUuV+yeT_|pIUC?ET&q-i3v?7GlH z3qYPg?2ZtyjVYTTw2`AVObVHkfC6NKfQKAe<69`vvcHx%A<&SIW=(1?VI$<(I#ki;jY_eaFQNR~?ys%E>xt#Y^fQsi0BG#A(?p zI53;%3{fO?Co(FcHtH#?_j^%)XiAljZk*`Qk)3n859m^Dnu>}|0^Mmh`aizJ)W=8X z9sg&~pZnZvon+C7AGLDmwY^7WKenBc<6xtd>S#VBgN$xWEk8EpviXoP$Z_h?1WU94 z5CC$6r~)V0JqCF3f}0G?u+SC>cSJ3cNcj{Ff0Bn}aRdA+AzO35rh!-&N|mut9@tHc z??vBVH2*Gx81;B4!8&V-g0b24#efmv;J%*3Dw{nTSQbR_9EYv3%& zf4mJB_oq+M!2kI6r!P!(AEFTGUxaUDCBbh)?i@<1CUl}i;m{>|+E9gfgTP$T^Jf-f zn%0{UUNCr@tcB5|Dc$e@liGJ^L{#vi6rg-wfy2ncMNEcLuV_vIPFu4l6WNa(SAzT6 zT<7Dfe+b^-vb)hcE_jys$n7btqO4+EkEp_Ut)rc)-B_GNRTsU+k%Wq|JeUMd@YqU* z&=X6=Yxm(+_f8&;Io`frHfL_okhhoF6&WILa^oH?69L6^aoKJUY8fy(;ItLSr!Z- z(eR1thct#NYe$EKyY8HJ5!6uu_ntIL$c_D5>WgPII~PgT+9X>y8u#G@sz(eo6&J)Z zBpkDDs=sKnXMeUjA5shJpZ;w%KL7O8;uoJhK1akp6WY*PTX)e4p+q1?Q;7O|?sOA` zpLs1&eh?*`=ADNW?e00mbK~R#?_v~yu+C>Ng_)Bc#EhDvI5q)xh-$Pbjv4?7#l9i# zq?)v_p3C3SM@yr3+;4U=6;%11n^h53~?Pbtkb@eBEB_RvYkI zD?|>57x4QNZvb9PzaVYjawzXbtr4ICk@|Gs_|tud_?QOlbHEzzLjA?$68NM#@YL%~ zI*cniC@mdizNkZ+7UMcvk`2I>3F$Q36CGJBT?0B9BT;YU%aJ%q8>Ju?N3KMgqcq6|l4FC40`JC4tycwEP z=%rnFEH$D@?w|MlA=UbS4j*(QPP&&$BxQd+F^Y=V2t!7B?-r1S!}Lin=tP<0;iRY$ zKXc?#1t+5_3$*%(bbr}g?_Ff}a3EW4lQdVH1BK@bK2YEl?g%IwY}hSQugr^#`_zKC zjogpv3t=x;0-8$H=EztpxvYk~tBQ&zZ3CYe9U#Hi9aHOttUqBSLL_#Z>g=^r@HOF2 zY`SVdYxT(Bsc`&SJy_$aEnb#nQJ97@F-9kwzNamS#ao;a^B+5@$mrQW7v>*d{c|UI ztwYz+RLNcev}uG;o`HzkTT2rSb=f!3y3f$PbY145bR@<6oz29OOo2g@w|GHA=VM@9-mDTI@2(&T_Ih6eRlvqkYoHQkCl$mZ8_0uqFa94R66^z^OByz zp^@U)fC(9ehfztJh;b`ud31KaR^g4g6v|g=OOVIO@mc&}&SG6^dv{Vwa(5(#D@~hBgscJ?1+cy`;3WB`ozft*1qFI+%Z4xm?Z?XK>&jUeDvoSlyee} ziQOXXP7BFW++^a0Uhdb;lg8|eRL;`87rKP3Be-JsAI*~0?8~M6XiqyOyI#t2%?X0Anwy-0N z(R=LCd%)alk2fVgB;qYQnb}O`*BTk|7eLfTrZm@6%*YlWrdg~vDKf4f$|Jn-*y5f> zfk+2|OMO%n9;~`}Q)rK0*?N~1G8LFyVJZTlMb1Gq$z@7w`nuHIu+|~7Su`jiu6%uY zrt2mIHl>OPK`DCX83cxDCc>jJ!^8E?^_LmJ8^k&Cd{g=K@*dNV(e2O0LDWeZ`OLBY zeck>B)qb%zpuQDimTU|ZAj*yGgY^ov)H|baM2X!3+N2<96sLigFsKt9m;gn@N9NnI zs~gB@q7p^Q<1Llib*@m#<}*Zu4zpi%Lif)>Y8ZEOylFiPZ4;B#A7B21oOeX-kr}yGl(P_mH6ucc?56aY<-C!x`d?$s7ro*{jW&&<5@{-(!sV#NR46p` z)uvG0$vXUlI^?OW0Mt{9XksG98HDzWh!4aHr3#`-1)>sjmq*b5Nj}Bfqz=>usUD4C@{66|i`-(l6NJ{FDQU>H`E@bHK+3Pn^S=N56 zD4$*qRX$zRfAG=B2?pz+I1$^49F)DGs(nwe1^{p zJM#^99%g0=66zY0bDImyIuypwBDt8FQE4>UZE>%APeJP*h~wBXgQDUDD7_22QIHJ- z7+Iv+ZI4!>B6dc*e7tJEMBO>Zh#vcq-+MF*i`UqrdATwBk3DTG zSbPJTDYO8g6OG(Ox^vpXX(QoDK5c6NcK1zA^bw7D##3HRUHw{_#(+%hL{?vqW~p%n z^h^e|qN0L0@|`h3XlBIB2m(*ivZZILr~KpV1A=J!-?wVTc@zMfAbT++fOKJoMF$Y( zRzHy32+MSLxN(`>txv|21Y{Y``vF3eK zF7E9w!u9LdyZk6qzkWbmCgpy!Hj8dbq;!MQ9Gy#|erb$c5*{W`zSS8&kG^c8wU^-h zL{Yh2VpF&?70NLZvyuw#rNpOblr=(39WxRQR3JB>Dx6JjMztDsm-ls>b%sNSR_kZE z^69ym{~vaDS5$S6M0IC61mr$+&&U^z<`L0g%N4vyWEy&HPCam5A4!ztu9eHh;NZU{&qQOmObhXdkB@f>L~Y6l$83g( zq9dcnk$Jc_61{{a)}bv=Z85wro5>(%a;d)v#Q5I15jBz{ z0X<_O>r%oj4L|P!zRbq>8aMf$G{BO zWQM}%Ed@sRmvg-ywW_h`ock0eN3Bk=p@=75zmr)B9KMnBF$0{S4fSejG6p6lC+!}b zx%&I^tFp(R{P^9k_h-!fS>t5-t)FzyI?vqRQ+q$a@niPAMu>c%&V{Rv^xqavaNgbO0%t7 zw5;y#{39qRY>>^L#ZsSjCbikP@0X{GxvIN&@0KM=sN(bl%1B#yNSr&jjuhh2)6kY4 zQ<u*!zNf8$?2-*TMir70ypl_~^RwzSBT zE?&6M@6+7lu_q*8vJs7Hk}vxMggCOIK30K~#j-qf=yLrK`Y=fc?CtLwHRZT-Wl5Ow zz?3CPQwD=&@7%xtGL8M>#fy-*r%$(&4(!?EZ*A^UJG3;-KPAQRDawo6E7eP67Uv$;|$KPs+zRpGE`_@MnJ$pt{_mMoG zYJMv=w$N_uPD1o*lKR;Dhs#MALz*mW^Zr$8QB%dqBDH;%`4=mbAA90wqo0NM`G~QF zfkTu=E8$0M;GZjbdjExYKmPMu|MJxjnmEFN^LC0_`@w`Ysii6vd+U}S2O|#@V>|`J zSm=lZ^I+sMx<+N>X?0yd+yfJeFgYmiY}EN1g0dhckfb z>*md~ws*3#3zdC_TMhYkuGCtRxDGW>*5>IS^FrbjEm^xl9z4YDnwXe`#6?6bA|0@^ zOEU7+)zHl~tpDh^iA#TUv9v8j)}c9v9CA4(MuD_?<;qL44zC~V2svbIypm*VV-q~c zc9&wsrUu=_)Fmvj!J@{%j)}S>iz+DfR*SeAqk=vK(lv;DWxKAsmIup5x!0MPH&Pn= zGo_xr$({K5Wluo|*H_Z7qMAMNe=pV#s|`&54MzLtZ~a!(-tVPpI_0@uO0sm;WHmdI z4!F6c)eo^)x}>Uevn6CnNrv}AWGMqBav^azZHIIKZNsvwR;32o&n+lYOip6f3x%#h%=$o~P)ned?38Qh{MS{V4vl1v6;JePjQRHV z6)FybJvop5MbAls;V zI0g<5_ntq0u0FYD-8!<3&7Gm1#sr*yLOyh8rM&#!Ip?>1J$=rh!-uaOIC${NwQH-- z7gmnM^lK_A2G0WBSuBzv;14ZPJ(l$m1NrOj{`Q%V|GRg`UPLHvyM6n1yz`#0wUzVt zY2J*8D4vYF*4R^rQ-iVG<)YAK)H`lHt5IPYRmkcw6vD^OyiO_gkQ1jf*GgJBAP zae_;>6pV6H?A|^(8|F;c9QcEXtK}24;kW0nY595-j|uQ|+$PtGo&hQvq}6$48`ACS zlcmy~G8?Z12jARs=G&4kx78aqJmk^liYxEBdiCn=efx~;?d?+*SGQ=ZG-IIgP5j9{ z0yd3lbG`8G%TYdeqy4M(D>}Z|xN)ORQIO>5JAZ{i;u>lhNKm;g-k_oFNZT7qn6Csxt;AKMc?VkreyYJcSu!d(RC(_;oCT0U*tGW z%vDyH$SDbx<6<^4{oT8FK5dKOxP@=$(q0lR0rGugIBal-k(oO=^6PJRg(xX0$=-2&-0|k|ndKx{+GW}0%a=>? z{QOd4E1fu%Ud@78n+q)7DZcZQp3Rz=C>ZBR^o=d~n_ZEjbh5g9*^S*3na-2VUXD*C z1YMLqxWrW2=p%g-iCOKl6!LC} znS0&jg}D4TuaYVmo6pwGWESj?vL~5>s!v8IkUCj%cnLge{y0h?Ad(TF8Bze!-o%dxMns= zKKNPMA{?G!aB$GE=Llk3c3UTu0H4V>XSS`c1FW=}$ries-RW#_eD6$tJ zq4xQeHTwR})aGBmo*ogQ-y(w?uLiB8SduewY6c> z^v{34_~-M5kEbszY8#h6X+G|bV?YmW+qNx5nA<;Cz0Wgy_@?ZpB|$e9F4Q*RX$;Js zdY@Xa-z~lmpZd(r=*W>s?CZD-7cZ)xxZXc)e&mbx1*#{?7Lg`}TT5X*z3FTmiM0HV zt3h2t40ScB;enO^R9;n+omH$x#fDifX>xs)q)F#;lA@xbc|~9ki^}piGtc^ZOy6%e z5k69O#?lJv3$ymJ`jMY3Cs9TRgX7AdX~Fx2#9^&=hu}??R8mqRas*!aj(*uij+wqa z)|%ptfkS>pg`ITg3Bv$2PHHB_`ER$L~HzE+Z9B#ju@KMsqu#u+DWDIxpy(qC-}gjL&p!7J$B5r>wJ0h zOUX5gij@!c|6*ct6?Ul3JR)g>$63=yoAfQsB0T$Vv*bVc;M4U0Yfe{&cc>IqZy_V&&{Cfo&!wR)WD&1-j0?s{C(;}toXdreG&!CDh zW;j%1TVPB3_~aO+c5$5d_RSuV9Qk(YeaZ@-C|vy4hSVFXYJS7_;XdY{8b{Wa=Fph& zhoF&Byaisis=E4Xbwi9zd+^|ap5?X3NOvr#`3A?ne1ZAqX>Ax3t1^4h`bXUKhK9pe z!o$z!(GSM!89=ubF5q@{zHCUhlc5F@lc*MNIB^n<)CdEcjT_$gSXN8w>+AaZ4yNb*9;|9{X6l=9X6eQTdf?*VbU-IR-@t zcLxRrF0;ew5Zk!*DVcdJg!BAkd7eEDR*ScNDZeCzM>FCbV>938>m%#LFnMMoR_(}G zMWpJ!88c?cJ-sef@*lBUtZ@0%nl&$wkl^y0j9+!;KB{EDTfnYwL@d7J+JDf+<$H|Fj zdvqrQsf+qq-ircySWu{P{8fDgy?mcRPf3b}Dr&DL3uYis`CVWKI^MB&&R)pMQcKw> z(;~=D)$tFRx-ikdRh0sI;8;CN=*-df!Dg(t#4Xmx&Z}=~@+(v0P_vwTS-j1(OC@ZI zJKH0mp0Zf*I&dcG+kWtp19KUzwLz_mju4xuE@h=sj9R1?OnTY1!Ea`*qofzxm zrd99te~)a#(Vma zDpy&rdOT)JYhV9n7J>u2HgA}lb0k3Fa>4MsSw!4#ZlOK-ea4nG&pnhnJXyn-vK5uJln`T`hMbwv0 z1tak-v`L}-f&^E!C8OxRNkVRQvb8u^T(sB6Y zBbG%$WL@2JqeDGOu;=uQ3PWp_QIol$ViK#@tnsZ~q^zpSMPr&Jed1iel1&4@U#~8VVh1~261`77nWf_^f0_z? zIWIWF(il9E0A&J@WrP8y3BFAk~6-rtT;q1NskaF#AA5K*E(+r~>k2D#ZiiwGJ)FwOS^`Cr-=6v7u4qr1rv7W{Cg?|XNqEnl_ z20h0M&(D>1&GQooJpY=P_{A5Of(laJm$H4n;$+uO80vUfHiZkc9B-i7pS~zytmj*f zc0r~2!Goc)t1wYHDTaj*J;tJ9nZo1BrTLmYY1OhM8Qp+OvijGQNmVpjO|fI9G|xw( zh-c<>)LxM9SkPg~k6ovm=e&fy^OI{?ddQ=<>2(_gUM$_2qf3AY^SXEw!m)M!n_Ao3 zJ-Q;0UdK0@$%l7yxiZ@3DEfTf{5pLtr~O4_0wgonSaET2E0-_dVSU~CTyjqLYIugk z8#gv1x6^T-6bd4H$67=8bw4PT=jK^U==l!4PhES+1q9bgY?-hkTSrGj+!G3h6a*pn z>^Cj-$f3KSy?zOcNIzckBa>Y}_gss*iWwhSr5(S!)~u!P(XU_I+S?yuQiXe;duJCE znC}UfuSHO8l<%|4Y;^Cs9o~(@DeEH?EiYfadgtKYP0w6(=tZ$-rq5*{@Z@^*R3S|9 zhdJ~kRCB;J8{sxCnV$dU#Wdg(>WGZkJpg4=$VrCoc2dSUZg*I8=@itUI*A=7#>OXK z$J{xXaL%oK#-SZMc62!XKD@zau=9_fR_y3=$t$01F7cm@IxC-1+*2e?yIXJWd#`a! zBWf?I)Pc|P#wFn&ZSd3c_{S$p-{HWIG^%jav1g2^h*3JuTXpWh{{6okZhiFj)X5KT z9-FD3Si0le?(XiNwNDT`yGm|_%<6Doxqjo@&d&kVp@(my=uR4Zz9z-(>_=g3!90^84@KtUH{xOC@sMT26}u zD?SM>dY!-cX!XmNH_n|q7m7R!u@74Kg<60E%o?2FT8Q&EHafNB$yd+_%AbTuAO})$Y)X`i@jKv@&o_J& zGl`~0{rPfxYi?{g^P5NX$jC^Tv)MYT#A@8(LDY3NWyt3DGPWL3b8erEZhcl2ytP4! zo?sNP1sQADx+8Tx@4WH7Umss%m-H2@PQ6d9^$(6e&74A4n%g~cmv<#TJ8tM-8qhN_ zl-W?>g;6Krvd^A+P0j~ne_mEH{nUz$8_6N+VkR$utA_-QKG+8*C~H%nY!(t1D51JL zB*TdB1r%=+eCkT&^rIgc(R-MqOahB9Q5iSd3{jNBk$acc>k z6~6i6Cbg)&@Ph7LSzmwq{r!MVNcFZCCB?_<&ei;I+Nud@+eU|mG{^L}9=Yq-^Y7fb z190a1l1&&bXp4;;*5jFZlC>(~`mJD@owhI<0))RO3hijU9z?>o`ps`j^5J`M{==E; zdlOESMa;|Srw_hzt4&HNur;ZviWwdCk<|3Me`MOpAZy%S4utn^WSMgW3Ez|peCrJfzf);Pk4N>-Z|H^WxsPlZKg5^d44!3sDBdN~!|VBumciW@{CCw=ReED~nxouspy<5*OcdV=Z*rRjn)g^GY*)^30jDeFua<(PivZ#I9(vh| zQaX2`ylG-qR@Qxfc1}(X&$nZ66FbML89K18?_TGZg@pWgB`oY*z7T$k`*^x^O~B#}gXnEsdZXf5 zxCQl&&O|?**oaSDoK&kU=dVOW$5(p+a#1Z8;%p~>`J7!i)kBq15wjLk-G~_=ohUuN zTiNpVR3{3P+G%2@FGn=KeH(J=(hZY`C%S+e)c`KpT1Q>#`WOPEGeguqHj_z3>v#9d zo7)bj9y@DePwc0=Cb7@_#ROT+4<0Cyjf^5$_~D`lH;mLU>%R2ZZDfj3c7CS@Hnnun zlJeBntTv|vwDx?Q-HUSb2?-@p3S4*x-Hrd3eM+@MSh7wL2(&zW25yA&&&*qrN0Jcz;E zX1IrtJ@N)I+7vzdK?4Kl17*)(q&X<6_<-xb{Yi&1>IRt2mx%6#=4l;Vyejf}sRx{$ z$K*R3GgEr=czXkvY^qWkf_j&X0qXR|wJTTNA2*HhXs|Jpr%KFhYikR8wmU)Z>amf` zpX8FKHf@hb?tudbE{BD+tFqc$?j$G6zl*6J>&>kg)WjGpkB!R8i6|1Bu2**f^q2h% z;C$q~=G>n+NKWZ+)gP&+iaoy!PHnch-q;f+TRLqM=)8_xW}nzLKW5px+ge5e5F%mQT#X0JvgU8&=uIh61s2- zC&$NQ^xeAkw|cIwDG(SSZ4tU6&fv|XUxA{Zyd(?gWphYPiiJ)}J$}=J|tO6wWTaMBd7f5sl2MdNN9zt{1B(#-B3cle82^+EU6Hfiec zO?D4_WKLhf7*TtUty>L@{5A-k)d~s<_QH(S?Qh)FuUxrO+r|NNmC2(f>LBB_b#O>u zduqNt;mm>fuH%HQHM!N#DH0^EOFH&D+-~8y*|SQ#bDA9y{8t_8k%-?HlT7SXOu^1P zL?&B_b6v92T-tW)Usp@zY5OH^cd^e=O3@r?4UMnwdH=AY4v8v5%XbI`%vh8KURe%+ z%2u|5XKu64`_!T|`_~UnzY|+)d}F~H!*9~P4m#z_2Va|o41oZr%rI{v-kIRX5p3b- zUOHyB^?RiOqA$(bJafj`ZA)Qfd!rh%S>MW8n0sqhOd5JS1L6Ul${uM{npng%nLh(5 zvO@k~h)uHJ&}(bEmOSs#G4`b6sJE1geFV6=lD;RGJ`WMf!-gtvRaFK z5jsk%ow`x3B{0>G)_gsvwogZxW>tCl7w6pO(ScG0eM{YdMV0rStN{HZuH}b12}e`wSlc4(^9CSh+J^5p?tO;^Oa0yj8%VA+?HEgil&# zH_0tm<9v^{6

(Z~&o>NQXG$(vTdnyMaJbsu7bNM6iF`^y+(bsc_abb;oH1-?< zL>IqfUZD4S6_v1Y`KO;_np@@m+RG;%PD^XHFh%&KH}Eow+A)8y!Bj{Mhd)N$wfxw}ikAI>$(ZEpTcfH-&v5HS(~p`#8eAtY&W%lCvuEZqMp!yys? z3}Zuu=7v^^qpfY#wT*UqmM1EroAGQgQwX)BO5i5cisBLy8&AD?bU*MQf}ND#hb^U9 z7-Dc!@7_73=@$2Y`87FjV7J!b z%Wd(_$aGWkhpw&AzxLAu-#p||o72+MZHTeE42Dt<2hmWL1e_4R3fYqS340Rh!nJGH zzFOVxs=0dAs!9+et%7l8uLOJeTo{hL0pSw*uJ0g1+wU{x!(}iprNvyia(hF$+BPC2 z1+D?GigeT0Et+7B34_@UU~xDCP;MuOuA>r2kS@dnslH$*6t{MMD zc@TVvD00{TEGM@U8-Wn8ESYnjtp@0iz`dmCa;=o)#Kik++t)ibWjQY3-G6N#_G}&? zidrOh8Hxj5nm#C1((LNxtJM;!?;v`wS*0$&e+DsXOwi%bf#Pjv0Eh0@}{UYAqy_{>~guGbjWb-8K*w!^Eh94 zld!Iffq`Ua=Tv0;LRP~RHaRZ;{eNtk^IL!Y`!4wW^PgFq{!WxqN`Z@k1VVO1FmX`8 zMY1NKjy{M0Up(eOCPKu9;sk1`I~7K2 z`Re(%?yADx-|A5Sv^FiEY-qh7!ts!xW=zWH8K;v3UYu2tH5o%tLX`}~xmE@j`OYbt*MA8dH`dLxY z8acTlr=|>t#+u5?d3A~>ot)||=~^eQ$yiYMwc@Id*JTMILOchW4Dp&{&Y!1ifBe2k z{@<#x)4OayUQUeW?*%BvDo!QF&tpXW~YSOV?IaaSDm?!Bx>5G1LgmzfYB z?=h6C0W9|9;)6y;Vqqc!hPNgIZ4wWUin_PMe<45XJ`^RD*jDcc+%?NJbYcoKY|4-pC&Fu& zcfjdhywE?%3xM;PK4*#A+je$$w{0^;&>Yq$M;dKYpxr_F7=4Hq^HtO=n1^^3RaQ>! z2&<;9&Lj5}eTs^6^p4KIeexjjY<)f8+(1MlVA#w1Ehpw4Hea$}fz{!fWF%O~r@Glp+N;q?&}!ZN zMbd)Y<}SBxsJx?Z-MZE4k(=(=tX|D&Yi<4J^8Cq01E2+*9p;{QUh`#7E-ZGgRgr$>=4A_A@T~SJpJX@InBBN zAdhD@>if^fa-6dYfd3>ynlXzQQw$k~mSYMSMiD+hf(XXP$7ff0<`kB4)kY6o8w46p zCcGCT%fE>^v@m| zHL30Yt~uDg8wD2@Qs@Y8V}QWf!KQLI$rem-&WTF+=qgI6R(KjJns!| zK8cHyg^!73{T_QN5PM{gLjG}e-H^-S;R%x%K0jl3BtT^->_k||-r|dK_&-T;aZi;` zj1G3~*3g(=VzPK}NJdM=kWiJBJ`vX$t)wd-Fa9B=9yEbL ziX@lf{PXKv&lJpMU>mbGEtx|c{xZ*-x$`i>EtoXe`A=0;=zdBgkRdrrGjHA63gRnX z5zxP2^vIG`*a4#-3g_xu;-NxC*u=>|R^-N}2yC#_hjBh-ThlQsV2Y!}u$`{E56chi z-5UfgmO6oufKvUUWGRo9$vgJ&$x%2`MoP-uBW#_K*7^AZ&)39fze820?*9nY^x^VU znCpzgm1>`3&)a97{p#PG*!D6kgnnj6Yb(-lPwmXt_Mh0D2|b8a zWdw>!sRb&ap}JA$+32A{*gQ?r=T=shF{tl{0mQL+ZKu?JtBNNFFA?$lSPY)4517y^ zrEN-qn}mba+v*CgT$nF+Sd>%3A#^(I^sz64r5Z211t>3P_?oEYe?hLbjK);mxY2|)h*G|h<_cdVDmgaT)vbMoLfKF6 zPim~R!W*AuHWJPsRMag%`rSx!v)g=cXqd`}6Z{uKq1~(dM-PJlu+siL)N-QF*4%!` z9AFl5`PqAzb0yW|tV+YKQq76#HpLDKF2`ywVDDhzg4t)6eO8WcK54M>-^R9W-VTES zv0SO#ZoFe2q)!Pbsv>&@Pi843W@m4b70Rz&`@Q$kQ1BT$E?l?}uZR_#ZSz(fIEa{q zU*&_WqMADg-6uvmrr&Ee%k?l}f%;N9-jNSv>yHd4E06_WF0fj=YSj#%#$YLl4Jieq z3=vxpK`?PJOr36bgj4_`hYJ+8nni3dCDe-NrKzu!j(#2}W&&NbDJu?^p|4(e>iMdKPEbwjoBImq>eQgQ z;7p4(R&#vZuR0qtbjdxO*Lzje-QTnpV*Q-LH z!9tb1AjH)X#sr;_boC-+Ebx&#-@ArhyT*VwD9!psEb5QJSH@9X99*}q$N}UF)t_#h zf8PJ%#jS>Z%1y+q+tV%5nCX%W4aUCb%zeL>Puq6x`x!PmkdwhZtmPnH1g&OBKm?q&t0npdw931!tN6T@Kc$AnjR)xa!B-|rh3(YchTrm9-c z)dV(dI&ws7TSb^tggk(H{S>tvUi_NnzT_JxdYM z*Unn5tfJBlbaL5Z2?FGcp|!a974!!>b)vS>ec9lEa;&>HN#7DV&@uHPEd!_tz#d*@ z#U&){Op71^rBe18JXx9r%ZG|PX1%2)-+bNLwaS6HaF16YE%wYME8c==x%9VXMQ0#n zA(Cs6l@p2_n@R0yN@4`9iP#n=MDTwJkqlOU!-ibGs2D^AleqH9{ZEUYa{5n# z%o05f@CKa&sZfppq);Ntx_O&2MY-Ff4<()3A>|1blUu$VICY7WABffcuD@D(a{^WM zW}T)Wl+@&T^McgtA?B^-5P)H2Wo4`9vhvT1Cm+Rsmu&~%2cC?GH`3herY@YgglT<+ z?f^2l*&Wfm+)325v4C&KwB#EE?nwGxVxQ|<5=a4DH$PDl1FTI}zQyl=V@+Fos#+4Z zUr9*ukYwgkN2_C=J7&VgOzD^-SYLJi4aI_BA_&X?3C)(Lzaz_=(gJv$DlT^gZLw@bqneDJ z4lwcH!GmpX>bZXJA@nr@C$%|zs078?Z_{QGc=+-aD*%@!LYn|3Xak}-QfsmOmCND4 zr_EY8sOF@$IO-&i<~i^Vxfr1?2J2YVWR1#Ti^8>@NQt#(IW>bq3Sj0$BoF2(VE&IR z+))Oq?ZpWh<}7FG&VntX_o*;@`ep3?rT=#dD*Eo}KVCsb)-?pjGkb~JhloZ%sO}}p z!y^{XY|8h|RB>pC1AjIBUIiErk&8v%=(fKBrTE5XbI3DTEY=OWKQeOH3N34sM{NMo z=5(iD3Hm8WaZQK}38IYIhuvZm>Otd8^6b#?80{B`3%kn)i zNjxVt2obhwi@k5kK8TkJ%L*fCg>DzypYi{00d}qd?h@QQ_Guuz)iv0 z$(M4MO0Ig~3SyW`Q1<|~(V}BHnwgf9bC|F{5ccT=Q(R6)!b-5#q?uPI*YS>>FLXY% zB^^E)hS!CfYE!#50a4o67@+n$(_#VtfdR$RRR5oLSG{}YztQUXTvPdc9LxzoWDD1G%a&ie=wlj7=FFDT zR9op_vOxM2QU~N}qp#k#xrhgtFhJ3^%6dpNl}it=gloQHurAXfD~%@Y~xJz;q#l zPzn6A6x-jx1=nMxXJ%%$zvx+y2N#GBeC8FKCg?g7(daZSzX#ZCjWWUaLs<*|hI7Mg zkdwqKf*&^*jkQ4Pt0R7Su{`(z zQJp=JL%_z0rmel}{`0Kk^52~>Fh(0)a}st>xsv1K-=^(c@NT9X)fh=i+t^XqCqg?K z*{raRWs6?hXPl;c4}Um1r8N&Xb-5tr`#$w_`;Q`W8`&xsvdJnC-R{7?;G_xQ+N^uS z-ZCQcd%-mpU5|V*vl|Q+82x3PLOu$|_N&%g(%|L~{uM9*q?P zxj+1epl$BI3);TRg)4#mMX&-EDL!$_eQ~0Cq$5TFu0u5*?P?DmncRRPH_eA{TOC2n zig98WY_U<;w8>=7`9kG*^tvT-9=LYw)f(-L6$Mm`ZJGPE{D~hEQSv#wy8l>tg{rC% z;wK(Q9Z?>fJEa~47Z^C)A$RH4YLr#TE99QOT2hLpwRrqjr_6#-24?xCSqhOj6DYGH z!5QbqzpXUsUJNO{GY zHKl3anPeeLr=Z>rtN$6O*q);1ht~8q=cv62IJm9g#tSFV)asmo4TGJrbX!=9_(t2G zNXvM%&EEY8slw(jL4(UB{L_-_=J2i7kaa79Di&+17U}v8Nb;JrDUXWD*BM;m0(~7;<7wr$GZV@6<>AN$tGDlZkrdaVG2kaLv7HBs$DOWc2>{-b3f(m;)dRiq_fz zHEe7=Mf7LD2$G&Wc_Jmq2Egzjq7bFV8IQoPR{dmbV6co{G(Szz``raOJp#f@E*P~% zP5O;c%K++ilg&rb9{J$4JxqJyPd^#60;F}ckN0@)Ku3X<9^PKg6zuS^_oKjO2;tP) zj!Qt>*S^sPrdfLH0Fo0Bb6WBpO;H-2hs!NOlo1ci`=#h{o7;Z_a{tY}m3LF%dz<}u zFT!FrA<046_n`5tg-2hD!)w;%RR_0vM4K6)=>_&)9RbkW3_$;v($z?j5)+G+h{zx( zw7#Xf$w0(%FJJ!5=_uhOss&W+r~P9&Hq|SH01OphT!ED1ouxSqu@^<=a~H>ltDAOF z#hUGOV*ja*RTKaB0^)ZJ6+Ev!F>F~K0p-R@ZEfuvoEKL&?1ZoA8zNu`kY8H8f@HH? zJ;L5`8Pj^ym&?ko+_0fqIUWFnW2VMR%K;qH0E31_V=jb18HB+NrW{HtNFGHgEyCNv z5J4O=CukbiHA-vyd8G`0pPrUx%=#wjl3rX9%V^uSGa_o|HyLP=wuO)Wv|zz)w7Oko zp-?%T(RhTY+Lcy-Lr;9Rm%bB59kSN{qPr2eC7iu3Z3EaaEEOW>AVR`a#VdR9qGti; z0E9Io0z!oFzUq1hC`#HSRa7^or78cIY5s0|)a2JC{vD86qCHW{^~iw(A#8^Or~G#l z!XEUYg?6Q9VW`bG)e(cF(mi?(SF1p>KfZ8>ER!od`&G$5L~f^)|BKsB%E?iOTDVsE z1mXmY@JmSrfcAjblmopHBix2Rymb@+0_B1UQ4Itp6F+TM?(-85O&01CgdcQ@K23E% z)CaNHwS>>@#2gpewgcDWz^Xrpu!J7iYFSJ#60^!FU>xm^3Drb$17!nRmaI(m^a2R= z{8(x~AD)ybJs*y*l|P)e^3A4JZlCi8ptK;~)q2~TKzoW>jhu=s!!p^sTNOU%QF5RW@Fo)F)Fn*l6( zK2C^QZGo6rBS7YQds35&FMt1@xjoi4M;(6k%r$92!Ne zB%1Ts6~7zoFIy{(Qk;xR!A1jpB11=h3LcmD`kd{0@?Y4lCk%M|!Sf#wsj*LT+{#38 z<(zZ2sSkQv&@7Ns+kxB#`eo!U(mLkQyPm8s!5}Y!>sD4)MmDxh%hnoMu@7o#5ps*% zdr1v%EC(=3=@UikXzzHd;Iq9TV^h~&R>Q;?bAuXy1CT>MQj&+nekEr?SL#R(5=8R< zWA7~ks@}IPP}^;869Eys0YyMU5DNoA5$RNs7Lbw#V~c@+1%i}_fRfS;ZWU0G66utX z7U_mJCdR$zY@g@8`@Y`&;of5{*7~pCZ^oEojJbU0x$TaGu%}|{tXXTt%(;xGnAA+! zE&cXnNZbX!X8Hl$lnKWl2TzpP+s_znRUQ9;9xE6&%NuH@Z~}}=0Auzc>(UP()yE3v zqLkXYdiCmBwGSZW<_eEjaBl!78wIuU*m7NImiNG1(6_GBPkC`eaJ-*;8+M6-ulTgO z9khI*oTcjtkx@vojHzyK^fL2?>!_OLt;?6EWF>I8**W|>(3ZDc;`cyi2~HO#X|e&F zYY#a^k8s+kLcBmm3p&hT2OyTs^JTshB~*ViAZZVPUez)Fq+zpCjYq~ybfR%+G6VJ4TiiOu9& z9hNg)X!Gpyii!{blUnIFmx7}f$-p)+UXs+^^t2_(xz6Xvl-0LA{@PHXL*5XuMSJ2q*KLuWU%wT-HD*>jyxzWlF9o$FF>~y$3Y-t!vi2>`5Z#Y7YvT z%HZntgJGu`rr+g#v%G#@*LSZHj%~MoD=J$>gvuD(QOg2%_-a5{{zPVY-mF>qmw^rmY5oyuD+ z>1Zv3uW1wy1de&;q@@o4MtR+^MauQZM&JX<4rI} zBQpEXKY6{-0U*ZnVTV}CyHc4DIbUdI%qwg$AbMsuJ{B;Hk}MCP|7n4OL{nW%wC{C{ zcy@gIwk*DLLobP(_$}OYaWgx6A*hz(nwB^Zfm3cFwj9LUmAP;sHq#xJ0FcrI%h=VV z0d7P0;NMIS7xI9uKC0os#F^|*D=3@?n_8ef2^bB{mDN|jJb7Hm9}VkZyE`7$1W2oi zzC0AC%}PMOi>XubAm5!k zcQC=hG0)D{lM=avDK!NBk)B6IMI%SP^vd_65owO=aP#(73H;!i7=pSh>-orOh_fvr z0h;fuRaXVLopD0n8PL?c_0(&lS139ckjfq zX0F$Mb9=Sx^v$NH8SOzu7S|-4I1mf4G+PO*_d0U)XaPpZUy3zfdepkXsNyNkur2=> zUJOPPIsyfLhyIKKfo|`JJ?dZftQGTKeLZqV?0JV{Pk606I0>22a{Gs5EkN?GkPKIg z(aL|>l|*P2ss%@l?9i~VBIv92_=}@7kNe!OR?5luJCw3TI4C&LHTI)A`ybqhDB#Me zz%aa1-d!5BQ!2AH1unD^RrrzJI5D+%tiSeZ^|e`lwZ0hcWBoWETx-Wkx8E!M1jIbY#`N$55j%#mX$pX2q@xV)#x1xPbQe zmL^@dK2){J;5MHAH>_1_)28q8j$dQ13%0|4*-?AeK~PTsGRg+pV8V@y{C4|XXD(pX zWyu!Cp?y{(1BM{uxaagVU5!y> zNx(59#c3%svyS`)^vxQBN!o|8TB1YR{pqVuU(0PO;tYJM!`xwNfH*oemSNflAcw@M zgbT>4o)nn}9onYC`thG;K!CyBR9u6nRfd1kz?BE-nn1hox^)Kt0B>iP8+OIPp8BWY zqmAC?FC+IX4H-{;myRo{(ohRudvvFWI`Cr<@B(I<*;8IUR@Bqai@GwrY9&So&pn~= zoioTWY0xQo@5E#O_K+9IE%~kN@sB?MG+;)F=h;%272Wh&6tiv@cXw?8GDbi(Ymt(h zoGe&ycj~L{_c5t@pj-4=CQ`DgsV`Id_J-sP{Dw!f9DEz-)b-&?o)oQmN`GGU*gP} zHUEppejSZK)}mW@V5lDo2N|(4nCy$1-xEx_Lcrur@MuD%KA1FaIqTM~dy1i&ZGr~s z%9f%~kLLF{0XouSlAs7vdlfw*aTm;9eR)Ya*z_F*{2joR%HcGaiW^Q$(*Q2}(@$yY zP|Isl`+8P;Wm2(`K8k1^NO1k=yz77!8UygY2zn(J5CPIU`H!#WI-gKh4n@wC z3$ySNFx6R*+kwleHW&uZd#Nor&SuN@pIx=x(wB{~ZvZM(eXAw+%$R-A9enFSq z_~=~+SVBC`%ah2mK{0bc$g*eqD0_lZn1WX|8lxU)U-En6crj-Tx3#0P!+IGX%ls^> zJ*mNVscRuptw>k|TM|^WtHwP9fdib!`f;3++7c8yA(zXc+rKpH7%==WOlx%*2 z2mk7 zg3<)Yo~3YWRoL`Y4MuzPH2$RSZEkLcxZ+B2tGO~Z)MiB9LIZOQA$`tJR4d{;f?$@e z+ugg89cl+HsBZGeX>$gPiVsLjDl+z}XHL8&o8x{=vp*mM4ep048%^IOpynmW8U|7IrC>lk% zrocTopx`K_DRJS9!?(4-9Y?oYN?c z><=(<2_$Vz&aRH0@{VR5!pl+i`TF|OeqJ$$DfnQ) zq-KGNmba2PiR5FW2`c}nWhDw!y~Grdl2Fi!>ox?Mk9ocarj57Tj2p%9(oFD2S*Y+(lWT^r%zK8Aba4z zUkcG$tI@6iZA3E0U7InQ2w9kdZ7vcW?{%JV(Dopy4d6oA-lzRm0?KBZ+y$W$vP1;d z?JWi1EAF;^FT!vk?xils|Bf}8L#p9Qp0{sH1S?eph%a|8$M+D3rlx`u$N4|4T5Y%g`2fO%nvSrQdR=$(qNdVI1W{XD8z_vl?^-hytNcS6SMe< zQwXH8#`AoiHVmWcCZ@T7F|-v{(zjcJV@?5In7s8wZ}G#%><$4Qo|wIYEV$P@EIv;+G z!$46B*FS&@p>_7G&TOBu|8qwHaggMQvimzE(%cEQ3y~%U@?)()3XZfi&NxMJy>a}aFuoWpLdpezuPZjJ^f#ef|3?ry# zFcsXl3ai_UI&uB*c;y??p*OHFRiP=r5Q7Ypy)Sy_X9z(mKE-I*CZ{^jjF~Vl@t*{8;A@`t^Y}Y&Z~yu!)OZ4814`d4vcWEyMV{*$|g^2@_Luv92bg z{hBpv#5A!`M0X;j;7jk58|i8{q;i95@m0CFu?eX|EnBlRALBZrH3JTryyfyO;JZT| zjY$0yS|k1kuC4*BLYCYuR^4wm`pAuaxg3hyjzU9siEenD|Au+b2J*0d(N*+_WSoBG zlvoCsyFG)77!S71M1Va_6T;mq1Kz02$cllz8b&G?dRi@>`1?0QtCP2@Bl%?Dq9`i8 z9&SH<*@=aJ-vS(4Q;x89=4EI;Vcg+d?%?3S-XDQhd9-fj_}BPR5@iyG5!}56rG<0-m$qLXTc=5qTNd%7xqjBA#!JZzItA7pj z=O*I21@KVdjxXF@zdG3>YWHv(SAjd^fX8v7=9O@L(lZphw?Q0*B5Fb+_?l>dDrmdy za&#>eXIlAd!e`q;UQ>UsdtOn>X6Ro4wU^WsN8tuRMwomeBn6>YB^&mP+-}BUNXio_ z&w4QDs|Nd5^&8U9amvdgrFO#468B>6P=X4|B&yE5jvPKr(Raj_16B!nm~w_gFV{+Z zQtio|4IxSik~GLR?P)sk5~nxR$PlW=&^{(`+#gN`zyT7wkUnbK0x*~BBc#eOyNB(G zN}6n{DP{l#k9CfK@AjZr#Q0}MT=kvZ&b{;Zoz~YR$lpEmkBf>*Lbh_;nQ7rdU*zaw z6OGbHb=Gk=TQn)^@-XRD9e(0X#=5h?;DlMQp0qfaK+3lV=!y_oG)(r}e zp%A{$!Zx6TxMp?~hx=4JtO{j;GLojao>8!(LAAo%oj68oCdm457q$RSJX4PDOrQJs>MVz|Ktq0C~Z&a()Rv zC|fOn!3)r+^FS1?V1rl_L!xk)9PeLscC>%XGMKOzTRP4`{siESY6F>D`$P@&IMF!A z&gFsP9s%GLD^!<5U?`X4|K!Q-e`dA6_x^{h7WC{{K8A(jn&1R3MQzrtrg`=(5G$(+ z6-4@NX(L^cgUo8Mz8aJeZ$oWyTf3dqOYPTDGN*7p>0Y#5xoeCPX(9diweFC%~R4w-d=&Cjlh!#5(6qp+MHW$PI; zAi~JY%e&|e|Id?PO-+p7K#1keUN)``nko&O>x~lru$LVi>JaD1L7jNuz=6;{IBcF# zsDE(pUR~$lRtV7GZdlV91=TT-g{Qqk=bJyQFX;N)pue5++CPJ@@9%@JD$K=n0d53v zp*fK`e7$2_PysDENj#C(V62GXJl;{=*wEl^oAc>BhQn1I#oN}cdmVWnWBsp@BA0OL z9>V%S{nIDh$CY_g{z7Al^qXU{0U|b>o9?X6;Gm#rjcn!uCXB;j%J69H0+b)UWxC<= zIS4j}bs2;8yi0Cb)fpvlV~P0EB}?3L&^nP-3edmsad}Lr8u)!MC^!+4W`EjTdMDm= zW5hY`Jj~Kg@!x}N`QBwFgQ6*_u5#1w@iu4xSlPSn3oZxi9P6}kB~+!mv>F+4CBYg{ z1Usqd^^L_TV-=8i&BPmYIA!hlK~$TOoxZRl92qYmJL@#2V`$U^J{j4Y{# z*Mo$)d3<#^_U)ZSNiVL6>|s>;!4V-(7u`pI50R4#fgoSpy%^|NBwHgG#Mdy~MPSC; zr@(KrK|A5w7b?-zlIdY`4#a*k3a05++7@eg(-Ehw=Mt?f~ za01pq{@5#AjFyDB1f=stVcKFb3_V5zj`=m&PCDjcx*SIjAKpHyr_6D(zOk_thVx#T zxxG-A?l@|7*T=^|ehNPXWq=0$vTkhrH_-79)|xXGG*}wX9o8Y|8;oeqF zh4@++e*&l)QOs9CSxEk0iPZ$kuU>s=@SeVm|3ZXEXhDbK>|}w#K!&TJG7O187UZx! z?wdAkl9<E;pB`T0Sem7-DsTyY+aXaMzwa_AAHt#Ub#3kr>VeYJaQ>8zd<2v@)47eY?lH$X`){r6i?5ZF5QuJRPNQz6qa%6DH& zcfIn6Sp}+vW?DGOmlMx_y4;|IVbm4dFh~=m5rCSB(8zq?ZC+e)8xeJoTpFY;av@J) zD=rC6FH9U8=33^_kd6T=Zq9gG3|&=OJe$m1A;>}qwD8KA&@FKRfB7mufBjEf{ri~X z(dx&)k0aISl5sF|O#)yNWy-Ot^B{U-ZXBcncd&dhGipn++ilesf?iErxBw6JqVl2O z)|04kaenNE&8T|nh3L$~Rv%+gw2iX|j5{IFa&xUeEQy+>+Nnd_P#mnQsWQ_BeTb!t zWvQ|%*}Dg7t^jS)jqcY#6eCNu=3A#3(HE;yJL_k5jCo2I1 zSJddMni>@WQJ~Pxs@A}?8R%|LTPatKde9h!)GT)(xHE*fXny3Xn}<31mDt^?$K>SP zsNxs*5-2p53}~3CoM@5{oygL! z`)2}3!4{T_QF(u&DymHS}TRaQ222V@RVpoStxT;eE?s(N8TOi64`u4(P9tRyZ39b zZ=bs#Oeopc!zCX5=b*Nd$n69M?V4_g-%ac*(_LQQ>xC2`Te?bEcswZgnr!R$G#O-k zLk9Nf__9&lZQeW>XY9^v;GM}+ynQag^xVRVm38nQbIXB8>NB9>zd)kS{~n2YVQ|b# zz#qjgfOH(D4t3KBm~#I(&=^>uJRpyGFFwPU(aA&yK=zrOz&KHa%@`>oj5Z>ZHHxkE-k9{J zE{K!{dIYE17?5ulL{aj#xhuy9GIhWjd+}vI1QUgrtxTX^xahn$P2*7wMZ$)|cn$;G z*Ps_~e7!v9A=^7Fu^T8au*W?B0?#=xQ#{+?q=+pat~mp+|avX`>BiC zoM&u2`3!55hTD#y=#drKn2PCL5Lv1s!K!2nBe>AEFsif#ywIh%s-Jix)L-ET3hB%q zmB0S_OO8S0XVv`*uy0FgRA7S=A}g6gMR$jwfCXfsEuaRHGI0Hiq0=VSKP1SiAOr^TeqaTH^-XECwL=s>U7WUr!Yn z-9~FdWE1>g*hTSF%I10t0dhlJ0ngO;&6~r{Gq`qv{o7PZCAR) zklGbp^Zf$@heO@|_NaYGpfCW0$w?bfsegnjJOFrZaqu7v)thiZLS!!;f8LQzE|xGK zamztOf(g~8NB*7@Hg{ply9$`#H&76GoX$8y! zuHnU(5ofk?bNAddUm2%ekYS{O_l5Ri*1JK7lJ-Fe!#Tn6fToGL#}q3uV?DK>rtiOG zgeNbPz{kOn(={NID+6RQvmIbG9V^s+z39}w)Urur!-U``W7acbvzObth)b7&Lx$59nd)*cG1xyP&uP< z_YlS}`ltcrMeQgy(WBx0e2*)xw{Ne)P1HnS@&lP{#z+Og{9d9B8R?_v-kmf)X4F|L zlEoBNTfMciFnJ=b^nXzfS7KST=rM2~s|s(dAOKzp=)|rI0)3E})ZZ>BmpWTqjT{_GTm`=b9*GX7|9aWrMzBH=%Re-D(_6$ z9EON6>rISYjE)P7QTp8S}i<(yvc1k0k z_JG_asiC29B^B0l<-sotacWg#O26u;(A*#6h~BX(4H08ZK=KT1yO&e5j2Vu3?G4KZ zQyx5cpjg$2FyPam-L+z3a6=RSZ;zEZa{F)187A3n+PoPwXYXcG0`JM)EF?rkXM~QF zwmyQ(;hu_79BV%SF?U@m&M7m6$QN0;Sjg*dKry&$i^R zmm4aaZF|<5c2o&z>?7|G}+k=!6%h=7ro;!?PIsSvktKqCHq5_4$v3&VQ{|JY6I^41k=SBx=Nbw+3re} zHWWu6`VmyAG!~NL<9-#Ac|2`H5(Z|gVmE3EKf@dY^|_bgd4a=_eN=XE7U3 zv@D8w@nS!Kq6K$7@I)~Nz8F5!v@r1aO1L=51MrD#Xv4@1oO=_J;KQY$dpH4;WGy`P z04@|>70qOBF>LS^uw-6Oa{+tS6S#MV zqA8nUN0#c^38|p-Dl&|#+@Oal{2xjaN9LH(7u9zd0reAKK(=Z^vJPA?)Zj>s^45;x3-3n4YA1$Ex& zCWB?ei%1QQn!70Rz-O8$4s_OyDm-3z-RjwwE(jO?8x3o zE`K^;ui%c^@{^}Hh99l4_>569!)1r<%v*s6_x0hQjeJ~yn>AvoN2M}Jt6hx|pIp<^ z#-D-sh6Mr)e%jv&4Ih|YQyvDs{V|33 zF>cm+^LvI;jWHs)s@-VJ#1IX=%T!x^uo;QUP@p;Ql>{Q~6(e!Df{VwY^eI{x;Z`5#*<6tjRqd1pRdAL&9^Jr`_|Fib69JtH<0cG6i^&lNPoP+5B>+5x zi2|@}M^x6-b^JH*OU||taKlXhTiwH$a?wAnn0!KAgQ{iYQ!3Z8?QBRy{*2`S5W5Ewsi60ldZWVrh{dybc(Yo=23_OGq`_I zBHjKEeOMz}&1kZrai#$VaASX_(BRUY7}1fd)W>Xn)YT6=HVt6s=;Rr5*OIU!y<7%~V;~ zuK@&t(9VVm(Cul@)J&UsSoYci;H_BG1Ov|#cWaG>*56M zW+!(=8Tx@Ca-SladK6Q)SafNyKc6TX5^JlnG5^9C*#&r&jyUi@PnyP|?kXHE0pqC< z=ZVL9;5Ksrgf)P1&kfmBGlj1<}3ujZMJOidiJ?J4P zmV99UAabtB((EX&7EwOx-4e-w!>RyhAldq1e(VV(yIvE-Pb;2rcz-uh6ow$o1i(Mk zZEzYhbWJNjYYsyhvSq*nxHpE%LH_gQ4i5|uy={lIF|-}-873O)rJ1a^d%}&PCrov* znFbt1lr#&lRQxVjSmp;V{UiodgPx)jrexFS_?u^CrY2 zP4DI(2M7%;B3LH(^KrYOlSDYG9{?j(mcULij4Nzh4S&wee}vSX%=5W_Z~?TM{!@4b z7oUgyD#~LWyj)(+Hb5mfvEDNGr4-~oA3pEwm8oi&RY#9>7NW9&J#7IeN8$ z`jNlmd4Xi-!3Kh@k(|%)uwwrXQL9=KC+D)7M+(r*(5sbWk8qh4!mn(V3CY7;Yu#w@ zt+U{bld&>RZy|`1#8*Skz)$m~UXh)@Q3D$arwjVquyBLB;x^{?)l zK!npWQ+Qx+sBj#5NgEI{T#X@-KP)lFO@4g&F%C#pT1q1Pin}o_lL4C~H>?s3Jv&!A zM&=;OGC$`T)%kf440#&Ak?$_rJ3Zd>>&yS`cXyj%F-bu?8X~gw$jb@G34|2-TDF9>@>p@8dfizG1p5zV8`%2c52lYKo-2z!f+Nv2XnW`uqpvC|0kGyU zw@ZO@NmgSaYF}(+q_*Dgg|ySqF-&Vde@I zmqk1oR#DKZPTbdF%a&(>fHI38eL0S~z*$CnZS8}|2Nn8ep+0Zz*T89w21Jkx1X1NT zFZLYnL*+*Qbzn6j4X4yr7vhmjtfC=v$YTvc#C5cVUaJ+rkR*m7;cl(gAk#PH+Km}| ziPix6>Lnxj9Ati=)}}h=tG#-whm}0t-J1)P+Vq(EF?l8Pw}G~JV6+NR8jcMHm9iE? zIzQPfKZ2Z=;J)9lg|vpOLBX{Yc>Ca!zkGMs*46PqfU1yP-0)||@J$Tw02n2ElSw-i zEK`lV;6x-PDOuZJ2P`@2*wu3sKX3?{-y598h+qih?*L*o;DdGP-Voc~k&WQe1J7DT z*T1nahl)P^^Sbii5N~l!T<%p5vI?ITx1oh3sF?Ovf#Y4eDmPzNL;y88D$V)V>VnKw z3C*>`GwVgjo!Ki0I!zCd9%T?g29`>faT$LW1Hu@dCSr$HqWl7`QUxF7U5z}UO93S^D{zn@W*b&@p4BJIl+TW=&~f?aV; zWV=!ONMjD3ms6|}RTw0vj@@OVgsLP^tK~X*aG!G8@oytp#*dBcFkuf*8%w0EfQWcf z6vI$uhc{Vu!ybXArmj9-oN5R6go)^(LEOA$%R3Vd7y_V#(CKN7g^L2BT#QMxWD4*^ zGLIeNhwB`L6H3Ma`C9MUSVl1bosZEj_#9-R2^vJJi>wN2J)0$8hrgWXBbV=HvNFYd za76N)hnt&Ru;eM#!^e*oBZ&~RUk1bRu!7wjfchbERm`CY+`LGwo4vyE3GsmRUDunE zG6AQ;+Oh(rpuDU2#3ryd@zkU)5?uV$ioGb@^n0z?iw7Y$-=Ym${(hx<2MQOQvH+wy z`5Y9k#oPR+!~j5PETr|I=mF4dj@Tai#*VNRc0x7<)25}T6o5fiEwgIYn>Ts1;S`)v zMQ~M-!&6?qoHD#|(iIB>&_euZZIP?b3eGcs5n?kV!4-Q+lu-Hv6o19&6E=-|*%$V% z2lxdc)@aR4;XI_ho{Z%NT4)L;?;1lR@xv1f!8{lgnNJ0yR#6ORH8cI&US4rJ{un(inR8fQ4%h zZW0hkR2eRVmjqF~EmnZ=ku#%TFk-BjgPaU}8f!7qK=Bh4A77%zK8<66W&Hccep z#wbCXn9o1RvOlRo93yG}-EaIKS4Rs!)iHI_r` z75>@VvSo7&I>%LwpqQ)4bJWa1sx-CLPaXWc+{g3EX-2RKC6Gs8*W(9bGb$Dt)Qgzh z#vtCP$CX2O99U9P@@U!78Qnio#bj^c!MDfHG9J95lm(+GSkVMv`?8fMp3(^bd`K)-geJ-sDhAm4A$lo|fu$t>AzD%+V*qUCvAF}fR zbb?5uMX=*|4_+f=vg)fS`O(<}oUY=9Zk1F~GoY!a#6<9KM z{grTJ({mQlsiL1@JMe0y4)tJ1k9NQ92WT*1yE+G));wIQP<+A}{VWsTmRa+M^u~me z?Q`*MSyqfWsE$0&X~^=%ssQv0iaB6v^E}cWa&h|y2oqm|IlV{LKfb1a{f*=JOVm+d zSPZzV31A7dhk58KX=4%nCU&5LQ6H)hBMC)4qJtl1(oa!JpuBNxyD*$K+K+f_e0q62 zs>qHnrU<71z)c~UvrFoL8bu0*^Z4cpefQ6#@ABIO5G)0{bd)G-X5T@C7^pNO{v3WR zcw$d5fyIv{>=`Kw7&F|)Wf7rK`vl@bV;AEIybMHtp%1yWYYVysY)9hBx=q228@4tG z?atK;CVd|@|2(|se~%a9q3oH8263ep-Z^XBa(iEzRWWZrHEG!#?9%!H2FEY>Js6Ca znO2NFXFW6*$`QjD86;XIHoW+EEr8av-**pqfs3nXIa3Vk6X)_3^Dz!;T7A{t-Y5cp z^n;|Z@b^qp{LM+1-(dKTbtE2W4l>;#hC2p5PR$vxfPXZzYyU$*g%_EYk8Y5Nj?U!} z51U}k)_LFm{x*Kkxte$zT4=U#qT<_jLdgfQ_NEOR?B`$^&<{PG;~&2tVBkd-tRqAK zwBKgKVK@l~f&3Zxq2Msxv_yd+|F6cu=e-oSIhK!iVfb!sy~oE zX{O;^?t>l+T)%&L8w-EW9GQ3;r+}y_M4U-ikE3{FR%Nkq8GzEC?_u+Qeh)7&62dB} zG}K~aX?pgjo7^?MVfV7+Up~wKgOHwrvFF( z)hu8AdlSKbc@^txkN5n`Z~X2T*4H2JshN%@E)oSiEQlG8L(ID2Qq7Lhwv_6AA$g9}-@G6Bb-yJDMIQdGPsiCi zkAC;b->ljDyK&xak;}j1fvs|w_B)y69)?-JldfK5Tl8C*(*J$%U!RKq`^JC$T>kH< z|N6Q7-yQ$;bNT-p+<5YSwHY_)F{(*a>MOy_a)e8q8?S*1>ds7nXLx z<->PhlYjYeTez^a(j10YXCMRPgoO`#8SkCLuQ8WhiQUMeEVcum?qDj3qDtaWs8Yg? z`McVlM^`UL^_UzFxc9M2iY4>J>weU>or7xS-~W4|fx>(XNbIqEH5aGWf;CrBd;7)p z>t}Z`K?SJgY4mP$C%7mAG!RAsDRGEhIguNDT-VsoSOTzQ(Zt5uwk6D3s0;t`U$vBy z5(_3H&am(k(DmVmPqtRx3*jp>O^;yGAP7h9PN4f>nHjQ}rJ@CT2vPwD?U{mIHneFB*9Uc)#57@K}y1rRrE|Pe0f$+H9s>$`(MIKNQtBG^CrQ zc6$lLF>ne`*rs@_{Z+B>)G!=ddLX#n;Ik6UWr0@4KbpvRthO_JkqmE4nfav`wlLI?SzLgQPP8WTwV9H2j>^nmgA) z1Ixq*V}aVi3DA}45NA{uJtwaA_@%(OpJYnszVtRT$^u4O|BgN(46r zmYJ*39Am~UEG&$l@Bg>Nq4%ErTx#2oAE4V4q8P_!$#r9CYq_XJhR5NA;Y&^5QW7uHpPo}J-?u^D~uF)3y)M>gE!gcl)QmIEW>=c8!V z)O-vWwaTKOEk6+YFzv(DX$KEj-C+FaAp> z5}z6$U}G)N-SHjgViLiM+6 zbJ~s!Fk%-fbff~9+ch3Mu@yfju+sbRy2oHD$LO=4v7HE!v97-=tsfmEU_JsCH9Eq_ zFjDoy(o$v>5%^z-CF6V7)?IYt0NK?V;*QBMFNl8&58Q z>SC+{I#z;-0#E2{M4?)8sZ+X8s?M`0U|1X?9>X&2z;v2=@O8dkT)E)FY#&T9Z!PEL z!2Y#}SJS518dsWPHZXhE6PavvjCaKp?;(zW;S{6s^8>h4cm20ToIm0uj}Ejum~=WH zUbjeoybT_W9N1OZXO3O6wQ!Ay(2&hXWU9p!AX|20>#%W zD-Z15yO&$%<0gvc*!{8X48)3@_#AG?1Ps}s`1&{z^~~}8(7o1QV44F2x%SQ>)*GOT zocEYAW$}#z)`-2%IL^X=Y!|{O`A{2Z7ZG6eOuQgUh=}u_UYcXXeJ0};lcMP@;~f5t zeI;st@;WN0!Tt^D=wa;V>ua4#K6fnU`58w`Ysa7e_@e*Y|BrP5j(ZRT#JZX*e>ws1 zc#YlPEKpu^8rHs!Zw~PI@MRc(l7Xf4by_9QeGlsUAyJy+q;t)XRzfz2$lX1x#n?89 zWFvgogy;uhN|w+1MK{c0WTjI-aBal}Ib_Dbc~cWn+4!M2(wCWmN}i!1iwsreze=i^19O>Z1n zGoPb&^C#p9HAg_m(7v${D5on7|qAd3RQ$~A(Uou zrJO^k#%|$Pyj%C~F|B;$_?G?d4=%voE46TElFvvyux25|1j-cEzraP6@3WiQV~jYM zu3W}EoE2_`H>rlgZCY{Tz+=%yQy^&vKsZ=_zzbl|&G?$jb7J|{!HUPJlfG5BX0D_V z=xOrdNIo#pEfl%y>+4@bpu8A_4Ng>IjBqeygPUmpyy_mPSZe!s)NixxbjWs!c)r#{ z$m&8y0$YJu$_n+LkIVlO!#^L_q+K{RH_gGlD=4b}{CPslwDl-tRZD@F9aS_glGjXE zv1<31e?D~p>SLz549DT3T&td@g^1wKBD(oL{k8ZECRJ2RxM2w|R)X!M3-=o1B|l8j z_DuyW*j&+c%KO$Cbw zERbf{`&_X)5*ml4&U~0ct6`iVU9}Ne6w2vwowCsDJb?%HWcPbV!N! zuFbgr0DZ7JY`MG+KB891PEmhJLohBgNX4!#jxNgm{p@H_krA*`An-kHGE9#5}0(_Wt>A%RJ+t4~@q#zYY_hY`Jpi7U`}&#_#Gc zh$-r7$Wp_+pc5oCSqq5P{WK}naL`&pBYLW%arf?u>urw&C}4|d5Vn;Vurd^LYph|JZaZErC0 z7=MkD;$AFCT+k_5WnW(L0>q=FQtCHw*K{PVy;&Xe#maM#Y2uT}{0bMlxFQ2x-dcCDfKUtc; z{q8d22?>|V9%5GW8oAzA{<#;FE$OJZqDu8p*PoD(cyuI24tKWp@*Ixq@T1ucM)Cqg zCkt+zas?-1PE$dWk%%q}Az6QZj3oXY6%m~{X0p$Sdd7*x5R6#acwt_1IShm+(#k5+ zE-NLBJKz6!{>{Hko~I6>=(~RbMf?JL7ZDtT9LA>nVA*Yd-+J!lFZs+I#wBOKrtzq( z!ZAmya`j-L5!w=j>f^7`3Cgr*3V>F&7Pkb$rGg?dZ*uf4!gB2Eh%3T~LCXF^NX?w& z-GlQ}ELb9nU2Uh!+VLUsWAzlfqrN@V3j)2-zlazm3vupN)3+PVg9$H#z03M%Qd_D( zH;Lv!v=uOEdW1)sjhwHa*c>1Vx0m7S@H=0bgH**UGbxXtq}WkJeJIMP3Pv3lM6b;p zkhpIt#3oANTVy-DZ_4TAX_=rjuDDF!qop3#@^fTc(#T%qq0J1)nbsQ6Ux{0=*o_w z_4kfsKbDcl4Nx-47m>ESfL^+Z%&~P$&6(OZRo78gMR_vR>wuB@$*6B#!}EfLzrs)3 z7Eo&n!x%9|7;SYtmCY6QbDak=l_Tav0ipaYPyMD@zPQjwWe>etb03lk_*EY_#U2K| z3Q<`jq#QgyjT5@mDCj3NxVrJ^%4C$m6+Rvx!w+$cd1kp5?gdr3(LBi0T|zk{bLqR= zwLXmr!P3mzhovW6dHwl+uLSOG==ur6a-?TN0M^ zkSTNBFGsbKZRu+_Hlp zO8B9i$j9|5K{HIq03LoCwfU$p6nZ&8*523&%NW`fn+&Iz6Tm^}ir`G~L3l#{0JF$t zAg?$Bpb^3A^(M|w4S#!f2XbW$HDrRAFlp;zMbL{9oJg}4{(8g_C+H%P^^d@RDIJr2 zHS|cG=#{|_+DnBIs62jPzUAXBY88dJ-{4&X4aD^??bfqhE+_!x5>2$(l;XG3W2dJCE!t~5y))oD&{Nvhp@*>bnBE*j30rLXKq&m6 zpPQSDak^sCEO~Dq%9|I<_Wr7qd|n9+?`{>_!G>ran$02vXpWXp)9eWqj*Gs}4s}tg zt&a~gA&%gp`MGgxOVako6EZTfrHOFCGML^2p*$~jo0+Uu=h~2M3ybPVNlo7pt*NV{ z8V!Flx))yp`Oh}wcHpcegBU75CsZ!Y)+QO8q)y;tufa(4?z3)u@4gt)veB|!lpiBa zhlFoZKZ`8CoJ#wc-aa~g`{(nYjrw$eP8ufon${PfbYZj`F0VdQ|K^U1J%pi4ZQ6(6 zPkz_sc|h66*2J+?4!P&6$gn)F!ssY02LnZiM|#8U9I2mCv<~smPuxt)47OZ*;cu10 zn^1dmE4>uYA~snS6;^OGju9|J7fQ-fg|Gn76l~iqQog>NQ%8j+khBQVj!bZ^d6$>S z$;mOoB2Ixe5ENx6;o*V4?G?en%Nv(7eYz`h8d!XFXsI3HjuL2d0aaaZs1Mc-9}{SP z!q@ivRji_frk;;gSM*W6Q!GPiRW}#KCy8Bu{AxNCn}nBz#OjD*L7Lro32?Y5%L=MlLCpV9bar;C z;`38`D^S97f{PaIyVS*fw~eJ3wj&d*7AB3xRB-72%|Jl`JIGhTci*3&)gAUz^&QaT zYp&J;TK%#0(zH6_~?oeMA;+E#3`CSF~dk zsg?ml+#r)`)R*RPsHo#j8x;4yWa>eUJz&PlXO_yc)x~Hj(s2?d04*e7yWkvMT`GL@ z69RDrn(<6xW_D9#r=k_b+s(}<1I`t8TY|7kNLFoZxxRzXNd3lq_*{#&@-A~qc)Rcr z%bmW_synC#+=T6HGZK;53f*HeBGt2M2{%LYb=o1L+(b=;)m22~DKj;PvJsATTPpaPoz3`0Pwce4_OP z!!oXA*Pb2jw~#k8$p{6Dn8iI&)OyUjGKSWAhrLN(N40Rf@rm-HHQ(yYQ%8SYXIepQ zoy~ZPppB53&NeV0=mb6_n~9hk{TyVtdCi z3d7pNV+cx8L$-auwiI3-(tU}{QU{XKJM-g^v@5q3Ga~HLb>g<{|FQyTd^ONJ19X)t z6g~}K&f54IZki|5ilykgZAq^NIbchuF-J$`8yQY#2p?ExAmu2?<^KTw(Jxozp?Du*gQx^NiLj&q`kO5bLCLhq8jhovH7=Q3rJ z6A-w1**O{DHfz6XMTwAHx{6k+r32#nhRIX_`_n7r`S@9-IHunei%AIeY2Wb~K1XYS z>fDKG6kM~0mzNh1g*1C(5@O##8n#}>Bm72}K6@X>RimJ+6^E@X1<9u6!$;A9vcC?*?dYd2q*HA`&WBB~8S%i=Isn3zC;-x0o<5;uLn5-7J} z29u`L(Ze+xuxC)U0(iFS@;d09?(AgbRP?=2Zu|pXh`stNX!=wVRobxK=hQ=etG8y` zwr#DMW8f?o=y#x|A$(DFY!vav5e81zs1}gXJ&r-b87$80L|8J&w{^K1E)@q3BHF-o zhK_!Gg7l@bN(=FthGiG;oD;=N0ElqN)jIgTuR{$e-`s~AY{MRcc5oAc6R0JtWL9#a zSwvxbAyK8+zn=%FFy@a@l^K;NfYy=vgf0d(;w6uBLvDg^%pDeAdov!(0E0#oXg?v9 z2dZN>xOtQ$M4df+d{%-48SK*i1toed=1}kDcu&2!NSk&;lTxL&ET!QO3nXVetll0- zx0oZ|qV5zeVNg3?jQW>n`dBu9qyp)lsTGB>8tAXPMjs||I6qT`@;Repap6+IF=>4H z&eT5ES)8>RGsl;ph95w!w~n0?eYp$vSg?@*wU&MBm3;ug! zG-=YQB=EB)Mt^$Q0DuQ$COtn_g5d|`E@(#4=J=uHU$~GH^#mVkHC;B7CK!_(0@S(O zu>Q^=nP4fV*Gs6|s!$eqU+W2Tkr|@{UaJh9m0)qCd3xLPcMmngi0SMl$XKN2vj7X8mT&BJg zxd^Z<8{7kv9Fy>K-?bRCE~GvpeEL1T;E6gJeC(YFHBsrql%1VWW>dREZvarjV)~J| zfsiScYV>1}L_z-msA_&FV`v&0eOidHcxt!aS(qU*5WF7EL(LyJlf{X;NBt!U>lq-| zX7>5}T2Xsrw6~qQB-BVZ(N+!MAw0%jbyC7^xz@J+-dj39G{K?4u~e36k$pno229D< z;()qD&h=%0s+znx2HJ#z4je&IcY@6oJMrSl<2Ej#wz#T{*Ort1Z zH0!WOcLCLV=(k?O{bB29vUq$tXJe-8!Q70%!eADq z2biixW^p)RW0(je7iS(QUl^3hhP5^8O6#wCafmh$jNZwGmh}@K1aWKc96r};ig_*F zkb)w5LTlF0M(j*rb!sS>TvY0imFn&iE9Mb!M%nv$wWVm-fEjqX?E`; zj{d~3lyLfpm@tHVXh#o3qq7XyI^6D4K^$XGMVl$p%BbwpD0t^EOddH_7%%Dr-jj|OTp+7#Dm`)Wnxvo$(p6{7 zN7+zdZ>0xbL*Ut7@1pxq1z&L3Hw7v;Ml7&Af|>d@&^#5JOQ?RP{(S_~up5LaF$oJJ zu;CNe;j_UVV1(IjV;u07#o$IPfHcHBBN4yJhmnn0OH5zR_5j)yrY2Ey;?N40-kMH> zEhtvaSxY^Isgy+dmcwv$nl}J?L)5KAR!C*pn%ewOH|z#1t=;$x&rmw3X6^>aq=v4| zuuxi7U1_8G+GK56M$MFgPc+x?__+YibpB)h`pM>fnnwr2x0)b;Opgt$6)as0R0YT`^mLEp3UyB_~ zqQh-p0OVk{ulgD%oDnkRigVd$R%&l?--G88}{}fWUI0$-}3B z*HcG+Y*%}92@cYZB3`U5w&5P) zf*Cvsg`IGy>-LbKX`e+7U}x$Y42Tj-^-y^#C69DvQ4QGyor=rli$rH34>Om2W`qs_ zZjevD0IVX*cjHE_NlTL6j}x(K*tXQU1^HXGc@2N~?;2Tc%@$GWbC_Vtqhv zbyfy)2{n3nH{gAcKO~4&=fI z*d=Jmq5)~NHxAI5@1P8|iH+^O+&I*^tFcc06|>Dmq&NwL;f~=s(RGVB&)2*NnZZ-E zbJr_31^5>43<=v%6z7qn22UoYwBoaBV^lI!peUuRxTVks6)_EXtE1th^{rfi*se z`106+Y>zTvf=!r4&Cx(@pxP3QOM{1WbwZ_K8;RYOAt`Lc2M?{V@ z*nLu+_!-ontx)+q!#)#(n7_CS{Z%?{b+lpywsm#E3(bu^8$NqUPqhRbPx=|f?fwnAfl(nTj%pfv=% zn3({BPX#*{L%J9bC12{&Y3@f*IK^!;F*+pY;Pwp|J-2vbFz{krts|!{h^>3v*>1u4 zG{ebGw`ZQ%b3%f^qef;LBq6{HqEapd80xhGRldyA)`DLPn9=2)rQ z(WmYTqBkE!{$YOc%9@%fZ}|H=oGm{3sg@?_*Q6T67I$|xIp4|UFSb$ckKwfY`YS@> zq)E)T;Npjt&4&jeiBo89EAF8El&c&|5Bq9AB$+iUK9=QlRXq0+iINQPKEzu4@qgDr zsH!Or>HV~sGd;$_uz#=t#m-$kOdr#S0dE0cG$6%<$X*9p?z!W|Gi+E<`6Lc!lihYO zF=nHR)^15&Hv;G}O#C+>r!tOFR{M@t{>y5#6qj+cWIqnJsPUd;Y2J%5tccBqcu-FCoOxOnU9Hc2S6pb9J+ zGREW0+Lp#fAoBWEs~)N3E?{Qff?YfB5>d^%6?H(S1;5nYx*@Y)452$%Ymn0GYGkd7 z)o1P~L{iG<%$|K07|c*er@5f%m~D9RR`fB!y^XlH>yHcd-uFMvldTt`SZ0Y0-gh59 zY>C2pv%@H9*sA>;-y37M7MzfJ^=Vb>0~F$$S|KTd1$kuAUa8bn19Ss1Lt-$iYHqw+ zsM!vwughsu2=BT`0tzHWz%C3LIw=&xoDIrrV3kapQ%>&h!XK?q8%W>wy4hqX1k1?o z!b+ticEj?PwuqC^G48l6eqi0^&Cia6N^`|L5h{Gsu^2dESika~^{^vN$SsNsPYlb# zsZ&Q~_^u4`>K$f05R@-jh)sE+U}8kc_;9ZMSdx?UN8<#*jI_D+>v-EQM*Ke$s@4L$ zCUp2VMOqIvjxfr=jluV#`%1*88x_{w*#UoXgOOP#aq32KD^F-Oxkk)*Cb#e+{BGG0 z!Q&&_(Q&-NpjuOO80+ETCTv6G*N7ZF)XT@9K9UK}6@$MMpE z_uhrMbJPa-O@Dk5HT2#%ut2^;bMv-s4=9j>t*AY5fn&;8u@bzS#$*CK8O zqqDJ>iFt+XVAa+4@85qRO#?8I3Dh}PJuFB|w6v=oJV3|=Ai;&ziF8_QB}+``O>=xD zlnar}Q>Ra#2VjzzyUJOkQl}W*i}GOPg3h_)iPU)(K6yf0MEBTEY1pq|4l;^=+O$8d z=EAMY{(>rk-<;*RKJg2W9a@PPOIGGB2isSB_tq09ez@_JEC zt9|OirAyiSGm9H(z#9z(cRx4ENsEjrMgQjMg1UAJ46Q}b40qduzCxA_>$wP2xt&%G zNce(4P)Tudi7&vlDxXP5*600z##9PsYMYoWA=X0RzW<=~L3X!|6vGkk;bE;oT3V@9 ztzJX!0xE8aqU=}X`8@X1;Sl8^##>PfLK6F$;|*cL>;ZF!d_t;h-n)12nGCa(2$duT zUR)Z|vmJ0Wfp46M^z3xYWP#5^>!mJ76?DEV)N93kjr$cA!kAn;hH~|>bJco< z;p+u!sy`;a^mVa$_}+jDvWKphE|1R1=TSO~bSm)|+CglTG%W{Q?vIVT31q<*_p_LT z4MXxB9LtbB>h5q5BHr?Jc|;rjIdg2A4mLD26c9_G=C|O67b+-}B|3x9mqhxhYK_!+ zTSF^5FK^qSq3*sEw3jymJc_g)-sMJdu>Hty6P7jiNwlI4&}nkAY~pP1rlNti_Qi(D zTPVTsQiR@Nh4icTKIjN`DIB)|3_!y1J9ZhqkdLd1T9+S~Ux5NK4|ly7;8xA1IiLbI zU~uV23UP$G1ecLWZY1hw_+NVxRTJ9iPf1%pf1i+J3oPY4Mkg_a$b;%zZ&v!e>jj*V z75Be7%ntGKnON|X3347P9!5}?JbLWOwL>+AWh&J@4OHc3STgvp4#jA^ zql$cKysag7XQ>nQ@U7gTqj}A{%)=2f(@kvl!HolpaT?)A|3y>d+0J*Zv}|v?ZuTQ7`STQ+Wr> zO-6od@5$C=fWzKaF!0=`evm%fWm?g&W(muxD>9b;)!5@ZLTm}K`gw#=9@ zLmgN5>t6WdYNG>Wi`Q9VwluW2{;Riq_@zCXQ*`l^yxNOq(HNm&jfxC08pPQ8cDtCo&-t)0o5ogo#4ms&UgBJs z{x8Iwi04_!)}=+-#uylR;m(=;s$T@jw+(pKeO=-)KDld~7OMe?D_ih$y1+;YyL?#+ z5V^P2+c$5_0Wt<1KVxoTA!Uu^0lVema}$huVKGqVGP({R9m#jLcye(*Z1tAK6hf&* zfw=+u_6*J9TJ%E4b+}7QO9vuBm~{&`X{_|U-GxphUy+uO2*8`_W1jN$WjcJgR^Fj3 zCbj`Cr`(s1F$9XNfgWku_q>KBu{dAK-N~UiEx8m4&qhpITnt8PWugg+-}0M%9XhzK zsYv+q(}o=$iZ0thoV^)7=&aa`jF;{5mfhvWQ$do@NGxPj(GT6kAX{RpPZ`3}@t$l> z`ly0aMvNs{MZR~HN*rz;)!cIJaXSSkuV{py1cW3dw63hd zUTeDio(a3qKc9yTLTh+iWAfg@AAb0Ok$5Q}U_P{JhNlD?SNu> zc|)d8+l+2~pu7crh8QWpBw=m(#L}qUItp-o`DWz5tuc*Rl=`Ue+H+g%jtA zyN0`<{#xt5e^!RsMfyPBJvqL!SLu>#$toSm9Xu^8Z?1qd0%X#3%JU&yo8?)a>8vGi zCqTKV*V?tj=*}+ji_f0$FfqTnJk2SZ9RfoO0T|awyzRki*&^DjSGy|1H{)*b z3)mmSa6|-wEgSViyw4_>#c-!5Td2XoPxfh&>TWZ$avkfoJ~d2pwr4M2aJwiNgc>>V zRL?>r?=N2LpjH9ET@u2e4BE2ktK*n5QZLGyMrDMnzMT=?bq1Y~?;j5o{qmgl?Yg>R z&=1S+K-Jz2OAsH?fu-=y883R;2Xc~!G#MZ?wEOqh(&b@p$QZwz!C*)pQvoD0AJREQ zTSz+Ffk@_(A`&do%8!?DnW^KThE=|BG8P!WK;#X^1%OgZ7QGWeme`P#6NnieNd3iR zWMqQa5(_$visg_I-*O<6ZD^#@l}GXnAkR$0Yo~ie?$G^tF_Y&;N8>6aPW;db8w@Ws zNm5aetSIkW=j4>skBeTns1UA|m4d=`Z3k)comF!MD-liLqqhild@JNiV+^8U6yg1& z5chtgqdF3*G9Ar+&-=`~N~YlDXs-i08@W)RN+6)-yTCDaUqLz|;?Ci%?LL<^I+FbQ z?IQEGF^RikD^_qmI>XRj0`Y%1ormr=sB<++D}hgc*3Sxs(_eWwn%7ldG1D#bY`tg|!1J%)@1P`QyO+)vHaj`r9?=MD%zb;8I1`7W_7lHZ-Rr zM~)QMH(QFsY3Txn?+Pi+WL8K)d-Y2}Zz+6sC-L!7wdC8nPvMyx%n5ac`#$xcxpAL? zj?MyN*UPd6ra`fIFS4T?Zoi*<+?RN!Hj_s&Rvz9N?_34*f` zov2l#yYGa^vnTM*ng?gdsN!y*VNyRWS+5ogeL`DzcS=S!UQHlMpB^U003;t@CIAmq zDKdtTn7dJg5WC4-b{xQnyqfv2)uP=CL)n-#?&qJ&dyVk3J2{C+m&!v``09W$;|w#H z2~7j|GX?N%k>=I_7H*m@Ue4dW<1+@K6FX`7k6e#uPR8Ju7IUourII-$`Dy4vDQ~w` znRAYnjiH-JTV5?PyrR*KCiKqXpQ$`f)d{K|jO2c5QkyyDLjCKLb@eZ=j|$DL7!-Auv} zET8%D2tYOYBNXU_H;}>H8PJ9>#x1R{tCM)}67oSQBq@p1;iUT%w!;ZR$&UxLkQ?N4N9ohS+S6zkgcA(Dk6ZVyw^8rDJk2aX?f<9$)OLR zZzGELS1$OruzFkEmk7>FVa47?G1J`@wiFpcd(F^bDJyFal`2u1g%LtPe1(9;=HLwj?)3y+N6YO$2Q{i#ddn zk!4-Fgj#ic(r*#a<96D{Wn>r$OrM21(_q>4SS|~#l+-HdMX3Pwg4iiI*7;DS!Vmr- zt#uU!v9ap$NRcBhZZ#wk=8nno<;#Zwi{x=v1THm@9pBM?&W>4e1(4 z?%*`Uip2pbeA8^X33rTl(~~=^jZb6IKqN(R=p;)cNq&PZ{JGt&qz}=xPfY9Lma=Ii zlK=8cAU8qPQ=UIW83TGMt79r#W?-gDf|#Nr<;NeJhAiS@vPPxczkJgRgl8WPXEg5= zj85{0Q14PjY-P)e>9r0Z8ZXU|bRmHqd?C#V1wX_0NI(}HMC}ZgneE&eNI(caRW|sk zTIe@}pbovvuPY+*2FC(Qx-QCf%Mc8IEa$K1gc96DkDOKDuthP@j1<(|c;;H2V4+pe zD=8ui_ryuOyouCb$s${i=bg2yAgzl4a`xzhQ->QU%Nh3yYxkqchV{P2-~vSz6%|#j z(Ft1%0z08I6_Xw?TV+$~9>}#`E-ni9#=KbS%_3$(?qZbc`OC`4sPn?AptKwHwYB12 zbZD!YpZfX?D@t($>l|Jr#Uad+piZrVVn%a{es_uKPH`$pUNUD=JZHZ4_yU6OS~OON z$k&j;Q`Q7-JwF73!OPXQaOk`G!5bI$16)n$ z7E!luod=vP$?ffKwJT8WI;hODsa#RF3S&8JpC&k9*UOqc=B3UH^vT88_j!bHZ#m+s zE45_F47l?okYSXhel(;<T@l#0 z%~9h8CN02l&=fgkol>XrbeA})Cu}X59vJZJ-D^3)xRmciH>j}aXQL1Ne?T0`0G3(s zE*;K~khV73%!Q*m1w&{TVxq`%83m6|w^?{?JIFLsORe7{c02RvqHH~OycE-a@-Z65 z98F};V+>bnR3Ne2(Ph(Ko98sS_fMu_mz(rRrU%yX0;Kl^XeV)1MbQEn!7MCm2oE0P zIBBgMri%6>*qtJ)3T(ZD4omw*g3Y>jqZ9 zQ}KD0uC8uAnY%+V)lgTDfI(zA8zW*6J`t>HK{4)l@3iOmtN&>yGVm|~s=rM@PlGBy zh9X*}lTA(nKdN%3==>8k80Zp!Ja?Acugf3b-Q!^UDr$u#W!%WATVgTX#9}u_ifd z>mrD@fBf;s#fAh3;>&xNE-xs)cLIXRgkOcVIS}**tCULFxe)Tvja^ zz6wrEAEpG0V4NIu@~1ayJ4;~6Emc>^<`3}^>+Es>*9EJqyj;4hYQFf{fz+b5%WHDG)+fTbg2y zaiBa&V(O~M%n8tPUIHQ3ny*LUIa~ju3Oj+c9^ z6+&D7uHTmD+}-4_4sOlzPix?7Krvaiz@}v(TNab|`hrxeT4VSCFph`Goi9EU9pFOB zQ|#z$L9j>-Qza4_T=6FqgwHCm4Hw+o2>2?@E%m4oD@NDHd=Y~;Ma>YVAQuQ$zmf$pb5-gB8QilA-Uq%&~i>^x$7&RUpex!C? zNY1kA447F~Hp9f=!sYDG`H{yks)I1K&o#VWnImnJDR{|H&*;Djm;fbjd(r`=q0;Iz z&t5H9wU&)PjQ(a_0fjxCMtxa;t1khIygKz7E|7A#=57g9h<|i^s=%bFF&bqCp2D~R zhf#mr3QFuoa%|<0L?F@4Ps5-X2B@Uquaz!k# zHKfE+T-x&9Bz)W3@(Idq0=ruxUf+W~#+7ILjhhQL$<_4S>{Vi< zfV`TzC#M5uIeGA>G^Ai?6w4X+JqvRhHgDOoPsU{;h(1*O`6}$1ni`G;=2YyAy@WQn zlAct-x8=qDXaq2S7k&q}?f@Swr(0AYB+&Yd;qEbvQ9<&#=yT}@^p@%gXMObkt80+Z zD|`H7rTm{s1Ym{}HVeH32cF}H#SZ)rnIc?VOa%_gfXA3LapJP1DstK|6EL~x$juQg z0LvCqn4s|oU{!a@Zjvy7z;}tKJ#G$pLkNHea6L-x% zPe#20*^6LC01M@(^&a1JNri2;dv{Zbx8$qsYFj1=Zdb{{O~j{C0HxjHDV9$_%|af( zlZ;Db!b_ilZdi51g;af`qqC2M)Ijv_Pi8b(_@Pz-#J2Ks!VTy>_UwI+)LqD+Mt7CV z`_O$gS&(?(SSf^U#bp9f1P#Ft`{4PN+M&Vj?DLl{S-P6~jr%!d+LBne6rmOS`~fXc zZ9w{DcJEsCUL^?^6X zuln;PgrkSqwA=(#NaG*&$dh$B2Q#b1Zg_!A`^{BOH12a zSOYk%0JtO)x>?JvnEbKY*Qvnhi}rwFbOTqj-)=$;8Xi4%jO#RG)};%3CR{v3N0@-^ zkHF-kiEgox3`RcGX1rnN(7AH~&6rz`9zAN%Y@j_yXH?fn0c@sVlUN4KxMAuHfg&)I`UqHapYV|;We*}O~Rcq{87!+q< zIY(qSXsNfcoT{ZUi==MKiN$0s3>LGA90{RZ9Gwli3jf}*wSp{2AOJFUvAv_pl4GEg$HkW{8%Tm`u%j{{2 zo#{IFRw#m&Qa90Occ?{XT`h!Q38cRF(aI-xG)w_Joh4n!$zgLhRL6iFoG=b&tJd{Pt`2mVpICIjR0)GY{^xLIg_kGIx->9VYiqyBIA+tVX3d9E_hf(s$pg!VaodNN(UlaJJQ^=?S+b&Ei z(9{usLlEeLD4C4K4uRo6q0<^YV^E_*sk#|kQ`*zUs?`}+1|u}sd6WR4%Ef8tBnp>yOe@I)(8}i}hMAnz7J*^^%Z@}&87Pm+i;;qM zqotVoIDA-t`_0U9$idz|f{6PXF!S~}OGWCazIx($4(={NO&}`vi zUziW}_vB|*KFJdFOo4WrkzI+@H4kn42GK6okE@=HR%NA+X;Ja@UcS4XuomyDxWfuS zbMuJLK#ssC2o9~peXgq#KYRH-<4rN2L);pjBW9aT7l??^UaJ5&KM$Fon2Z;SBPjGr zEi`d&%VwO8-;+7$k6_zeCPSAeB?Q$L(ClX@=Zag>+lB^4P$)8ft0ISL&PD4fWzqQ zgb3Ik-S?Y-0LFOA_u8VE-`$j6n49a54(FWx;%`z0T7VH=NF7zM2z?u-UF$GJf%3;@daK7B{q5}xp{1{`Az<_#z zl$@I}JN!C5o3N()FTx`1h19!~@wS(^5R`2|>gK^2H*VafUAuN&zt}(;qV+zMr@$E@ zib-mf&{Zx)v@A%Bh|U7Q_ibJJnX_jx%hum5#niw+5(EluSb11ML^I*A(L|%VQ1D5O zcL<6wV~o1jl>v$>AFA#^C$MY5XsD8}Ot>NQp}RYnxZrso<;O;-Bg4DE(i*Z8hvA!N za)q>~^+Aa@1Sjz*gSwF2a(hKzHyN!6~bIpH1+IDD9=pw|n-ta>0Z z33MW&C(#c#u(+sifXNe*?#dZ_o(F57>r4#53~W~vItDJE3KjIeW6FjOg5<(S(kc6d zi8V)9EyIQHd5oP^0yKttrxbdxh0@Y7)iz9n|T;R08s6l_2p`j`Vi z>y}%DF)Ufg3SHstvk*o#XC7$5ls3=^?Fsp5ByN6GA$Ar-8L3WErZgP;{J2A%QtOWF z(d=xtTnzsl`<@m%FYBAm1U1H7SD<;x%9pXNfD=MA!p9c~bgt`#!9-qO8VWiodK*Bm z&iB!%Nr=_}0-fS{p1)56Lam)u2^lFzsjPK-7T+W}`=HqhrZpb=5x zm?qLJbGXTD*Vf)u?f_rYeCRD{f9_jNNI6t&Bj;k{v6#4e*a;oT6r9BGfZsWgkkuUC zfZ^i=Xs-+PgdqjVeofq86S)Q2Kn9<&q2iY$tR|b?-CC1v@~0hG30VXVSB!Y|fbdZa zqtN^zJShEm_hmi$9{$9+yAiq?o+bX4?$z)spkldLUPFiK())|)g9{-$-5it0keoMf z4GdPLx85k$9>ASBA0CD2&^#Fq?)#!URNjc`BxO3*R{_kT?sTWiL;nF?=2f>OY>EY# z2XF&Xu4$unKP<+yixijlO15Y3vRlyV3`E{3%j=X6eq8}3jm*b|`e_@W%Dx(@KB2jD zOTk+y-@I`zgPAP~aWlY=@E0nhq& zXNl3{G||}7yLZ>Dn|}^EQ&u8-L%l1?b6IBYP2duF*o=WrM7K_STAZvFNK0!XrJI2& zz3|+j88c@tLR1a~^Y|t(O6tU^kZ1~P6G=1_2BLOcN6Ch6irB9}rpV{nB{u_zE;)d8CcQC8m2x5AuO+&v z%%decfT<1>p_{jET|uo%Ij+*#SK|PO0tqfrhbrQ%#bnnYA4W60Is8gl2!(S8JkZl? zk9E=j4qJzFMMkE6InE~Tnkz6e=!W}Cz%X^UqGQiPSo~q*YC0q&q{l4zW9g_X`>^+# zZ-==82&r)ubW+|rDv64S-2V#>W1KKGJ{`*)Mz%4FOIG^BA~Jjr6GY7#-KQW9pmC-^ zdp{d;r8;jRTYo@P5VprST!wfE{i-uk;k`*DhQXxW0--arcbmSxI7o9Z%7bGZe7G*EOR53WCI_9Mksofbf#H#y7m zFg#KxeVonNXwcv&)+Wq{?~5_Ug9kc6@)Hb3b~C8KGX~x4I{Q4>UDTnW{sM{L1L#u} zN6)j2sP-+1P!&HLRpR#O%>{i-#%_`4R)83@g^!mBqJ@+SxH4rOSE#C*;P2u5&c{iI zmWbwr-m$a9WXQWF200D{)?m%glII+pQT)AB!W#vu2u}WwOyjM6>gPNhMUi9Ei@X=k znI>@%1#hGPkY9|8AdxwQkvtjZxAUiHUR-vMc9-tWyQlvuxlIsM{=B&Y2ZqQ~35SGl z&&9i;(J{vrcCE_9Oppr8DyVr)1yJIl17y2Ek=YK^negN7W0Lvucp|sGD&nHRdK$-( zXO09%oQp)g4$TSO0zbO7y;+aA5`ayj7PQ1z5)|bN*YJxM_ckfd1+xRzS%Ql~vktU= z5ShSzcJAs!dAEh3!2#P)?b&TCKve&*kO&S92896HTNI8t{79!@JKR=Yg9BBXW(u42 z0K9S&S;r_DWHl>9Z$DXr?}iQAZt;0es6S;7yip@dOqK>e(tRNbT+I3K@Fiq;Ixo=6 zLG-b+pUw%U(H454^=wOefm~q1jd+;DClRDb7JJ9Q(-ul}S%|Y!(b+hxuEPu!ddJ)C z7J7K5P+=D!Ak@P8fQE(=4Qyy-SX-&CMkj~b`t|WQI8buaeT5aDYqzYMqOuUP(gT{C zo9Vm;QmA#w+M}md3OExD`9aTlCnDB^kJ)C#&IqI^;64_&a=Tj(BCXoVse>jTMR+Yn z!33bim+p~5awqhv7od!!!V5jvBZVZw%5ZW5TF=TR(HcyuVOM7Fs5(|Ig6G@NyMUwd z%#=!L5aNWpAk`CThY(;T;3_1{95L;Z#nj$L*xr!PKk;TCU4y(IL>37y8PR0B3-`V16V=FCVW1!rTCa0NMXov*V7xi#MV4#Fo?$U9a)-4r)tHY<8Q6n4`asWL zyeJMC=LH}Tr;*`6dWmd@R2b!V4qTVjj>tk~aE%C8T#m(MGE_OzPMN2DaCywSWte>^ zlgJKP;T*uHd9YC0M{m#_S3-t@0e1E_D9l9LX+Gc&>kzURbdW<9DBsU@yb;GuO@% zJx#tAYb(^hglL6I^BOeaJBjU?lIOraq=FxWlWH71BHTBLz!SgymJr=Xw@oz)`Z z5XkGb2i1B0;S3G3zX?GNnPOI73N}|yzwaf_fkB7IoB;K97VBqYzb2EW)qi!*dP5cHPkNL=&pWCO0HxXtE(Uw4YmecT-8hc~|(mPjH3Hk|=Z?tU>Upx#)cU zIP|l@DIdJ94T!i)dIUZ?0fygn{ZsT{cQnubZJt0#NQUPB$dT}S8gONuIlcqrO!E@Yko0^$l%(Ip7}CiMQ$-#rI7hky8Kv1RR5)Rl#~0m zbnL}uXXh?0@1qwb#Kai7ouoGa?Uqb3$(;IHPecUA`Lh@rm+_;Tn(XS966gvmf0;5R zWno#a4|vTyqHzS8mYlfq3v5X2zMLlSz^2 z?${_o+JB*2uXw)Z8KxumknUl)mLA?4!r519?m#*jNe%)ta#Q%7*;EW}p9>5WnUn~a zS6sun;Rad|fY8XxAaG!X%ZByq^AOI>Nv4Q4u(<<9rA{y!OUaC@$^M0SMKOL}NWO>E z$ABu*YaOC{A*t2Z>9y|6${A^uzH+r~=;OCKb#KZe>BAumN4n6)Kd0(W zXn|~|m$&qjE_Ab>o9OcL+9Fhr7#O! zgsI5oar7g$b{NW^l+(1Hes7(oCkYq>SD{NOu#U?VBLxoi2Xm*e2BHF5T>y_pV(uD* zga3l-eyHPeO)hXdQsk&UID^{pP`d@9kJIG(&)qe#VlE`$L{naBU>uD!CgA!*ewT}8 z$}Pql@r~^;q9o#DDpG}@mi2|N5j~&^!Eetc4FEWN;Sd@;<8+~tkR)ED=9qtAk8<7> zHf!XEo`XK>+VLANP%{+A3z0Z7yOrT(91qciCQ*8WwZ3B>23C)-n43(qLf&Lti(o@T zepvuWXA$xf$Czq#Y!BBFq!J)-vHfu47-?r9N?>Jp&6AEM!QM~?INJ9{WLr>SL6)yl z#O+ik$9*sZ)UK1;An=b;R~z~1h>#*8*C#~DbowFD zk*wKCrAD%~)uQ`)V-Pa)q4iyo-CGh@i=68Ma=WtqUI1RjK@ta}Pl=jKzm*M$*OtF; z&Pf0KVefkMVizs@+r$3HKh_j?!G1R_yJ%aCf1c4Q47B1;kMEefP8l|YPB04ll ziwTYgTD7$x{X($9wL|%<1wph2K=WEzDyW45nu{LRR)IQT~oJy?3v8MMcG+PphYw*J@ooJu^#7iJ;(MvjYcY@UQEcnJODLZoHli1G_+V z%mRai{cLRP61!Q0ZJB6wF!S_OQT6yV_44J*+oDT$ySuOAl$KtPiIE;ZVS<^Xqa0ig zr$=s=@IHC+EqVB+jEak>$X3nmv zs(MylE`Th+ue!SWT}z8!Y-}v~5FQ?$t)lAGL}R~L;JAPPR3RauZ85sn4PrLL=$^4k z6129q-XG#;K-~bQ2D}4A+ZTgw@W+Rt| z+>x9DY{b#}2PnSv1G%yb<2U4^gWx*AF_KR-@P}&9+&pJEIk@(ozDS%PK!~b2`e0;1q zp9WE?#((;z^NPf2lkS4CE6^3TV_~<$bi7+X*!P49e@y6v+G~itbEdMBdam$qi`l# z+&5f*=3}2KJbU)+p|(!46^x90q4~%`HF>DV4cMZLtn6<6FF$p7!Mw5jF+K?C8you} zw-TRm20Vx#-l-3|0RV-n;kiG?W`7I2t)UWY3@f=95^{R&)m{Jj8_O#z&wysQNT(0c z!%Y{@cnkTLjDo`6krQ}$f&G)QN;I#t(4Jik4V4@D;@xTdV>xtxTK6)|NotRU#apna zk4IKTVeOcE-s>=K=p3{-U5kz`7&2wEjJB0M6-+}t7gtg6aJ;mhwhT;o61`T6=67Ao!eR!4X4g0Y_e z*PkBy7>s;*Uck5i^q9|SJ(gy(y5 z^6xeIo}Bz^O}-~5|5}p&Pm_}&+t#zg5Y6}=L#^Ewr^fLt;~xu47UH+{2uS+H&1 z$ajwM(N}FO{A%PIzyINpmH7Pe`%?dXJ$SxvuD>tH_v7^U_4tnfBA)41^~V3O=XUZ_ Oo7Znui(R|x=>G#?IKPJg literal 0 HcmV?d00001 diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index 80561c27..c2b27e2a 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -13,12 +13,16 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf -To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) . We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. - -To use TGI on RoCm-enabled AMD GPUs (only MI210 and MI250 are tested), please use the image `ghcr.io/huggingface/text-generation-inference:1.2+rocm` instead. For details about the usage on RoCm, please refer to the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). +To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. +TGI also supports ROCm-enabled AMD GPUs (only MI210 and MI250 are tested), details are available in the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). To launch TGI on ROCm GPUs, please use instead: + +```bash +docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2-rocm --model-id $model +``` + Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint. diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index d7d45b70..34775139 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -41,7 +41,11 @@ text-generation-launcher --model-id TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. -TGI also has support of RoCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are missing from the RoCm version of TGI: quantization and flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm). +TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: +* Quantization (GPTQ, AWQ, etc.) +* Flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm) +* Kernel for slinding window attention (Mistral) TGI is also supported on the following AI hardware accelerators: - *Habana first-gen Gaudi and Gaudi2:* check out this [example](https://github.com/huggingface/optimum-habana/tree/main/text-generation-inference) how to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index) +* *AWS Inferentia2:* check out this [guide](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference) on how to serve models with TGI on Inferentia2. From a41c1a6bc7b763a4e190da79a19a0dec08a4103a Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 5 Dec 2023 14:42:55 +0100 Subject: [PATCH 002/153] Add a stale bot. (#1313) --- .github/workflows/stale.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000..a5e50a79 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,14 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + days-before-stale: 30 + days-before-close: 5 From a7f52f3812db7c2ed35bf8f05e860df888733631 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 11 Dec 2023 12:46:30 +0100 Subject: [PATCH 003/153] Speculative (#1308) --- Cargo.lock | 254 ++++++----- docs/source/basic_tutorials/launcher.md | 8 + .../test_flash_medusa_all_params.json | 98 +++++ .../test_flash_medusa_load.json | 414 ++++++++++++++++++ .../test_flash_medusa_simple.json | 103 +++++ integration-tests/models/test_flash_medusa.py | 59 +++ .../models/test_flash_mistral.py | 4 +- launcher/src/main.rs | 15 + load_tests/common.js | 21 +- load_tests/tgi.js | 4 +- proto/generate.proto | 39 +- router/client/src/client.rs | 4 +- router/client/src/lib.rs | 8 +- router/src/infer.rs | 95 ++-- router/src/queue.rs | 38 +- router/src/server.rs | 1 + server/Makefile-vllm | 33 +- server/tests/models/test_bloom.py | 4 +- server/tests/models/test_causal_lm.py | 4 +- server/tests/models/test_seq2seq_lm.py | 4 +- server/text_generation_server/cli.py | 62 ++- .../text_generation_server/models/__init__.py | 48 +- .../models/causal_lm.py | 34 +- .../models/flash_causal_lm.py | 188 +++++--- .../models/flash_llama.py | 13 + .../models/flash_mistral.py | 63 ++- .../models/idefics_causal_lm.py | 16 +- server/text_generation_server/models/model.py | 16 +- .../models/seq2seq_lm.py | 18 +- server/text_generation_server/models/types.py | 40 +- server/text_generation_server/server.py | 24 +- server/text_generation_server/tgi_service.py | 14 +- server/text_generation_server/utils/medusa.py | 51 +++ .../text_generation_server/utils/speculate.py | 12 + server/text_generation_server/utils/tokens.py | 100 ++++- 35 files changed, 1511 insertions(+), 398 deletions(-) create mode 100644 integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json create mode 100644 integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json create mode 100644 integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_simple.json create mode 100644 integration-tests/models/test_flash_medusa.py create mode 100644 server/text_generation_server/utils/medusa.py create mode 100644 server/text_generation_server/utils/speculate.py diff --git a/Cargo.lock b/Cargo.lock index 04d42397..2b537e09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,9 +88,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "arc-swap" @@ -128,18 +128,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -288,9 +288,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecount" @@ -350,9 +350,9 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" -version = "1.0.90" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" [[package]] name = "cfg-if" @@ -397,7 +397,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -675,9 +675,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "encode_unicode" @@ -687,9 +687,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -839,7 +839,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -893,9 +893,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -920,9 +920,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -993,15 +993,6 @@ dependencies = [ "ureq", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "hostname" version = "0.3.1" @@ -1204,6 +1195,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -1358,7 +1358,7 @@ checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1421,9 +1421,9 @@ dependencies = [ [[package]] name = "monostate" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "878c2a1f1c70e5724fa28f101ca787b6a7e8ad5c5e4ae4ca3b0fa4a419fa9075" +checksum = "a20fffcd8ca4c69d31e036a71abc400147b41f90895df4edcb36497a1f8af8bf" dependencies = [ "monostate-impl", "serde", @@ -1431,20 +1431,20 @@ dependencies = [ [[package]] name = "monostate-impl" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f686d68a09079e63b1d2c64aa305095887ce50565f00a922ebfaeeee0d9ba6ce" +checksum = "bf307cbbbd777a9c10cec88ddafee572b3484caad5cce0c9236523c3803105a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "multimap" -version = "0.8.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "muxado" @@ -1662,7 +1662,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1878,7 +1878,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1919,12 +1919,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" +checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" dependencies = [ "proc-macro2", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1953,9 +1953,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -1972,34 +1972,33 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", - "prost-derive 0.12.3", + "prost-derive 0.12.4", ] [[package]] name = "prost-build" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ "bytes", - "heck 0.4.1", - "itertools 0.11.0", + "heck 0.5.0", + "itertools 0.12.1", "log", "multimap", "once_cell", "petgraph", "prettyplease", - "prost 0.12.3", + "prost 0.12.4", "prost-types", "regex", - "syn 2.0.58", + "syn 2.0.60", "tempfile", - "which", ] [[package]] @@ -2017,24 +2016,24 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "prost-types" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ - "prost 0.12.3", + "prost 0.12.4", ] [[package]] @@ -2055,9 +2054,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -2310,7 +2309,7 @@ dependencies = [ "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.58", + "syn 2.0.60", "walkdir", ] @@ -2406,9 +2405,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "ryu" @@ -2484,29 +2483,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -2689,7 +2688,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2711,9 +2710,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -2728,9 +2727,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" -version = "0.30.8" +version = "0.30.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b1a378e48fb3ce3a5cf04359c456c9c98ff689bcf1c1bc6e6a31f247686f275" +checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b" dependencies = [ "cfg-if", "core-foundation-sys", @@ -2824,7 +2823,7 @@ version = "1.2.0" dependencies = [ "futures", "grpc-metadata", - "prost 0.12.3", + "prost 0.12.4", "prost-build", "rand", "thiserror", @@ -2903,7 +2902,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2918,9 +2917,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -2941,9 +2940,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -3035,7 +3034,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3131,7 +3130,7 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost 0.12.3", + "prost 0.12.4", "tokio", "tokio-stream", "tower", @@ -3150,7 +3149,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3223,7 +3222,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3464,7 +3463,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3563,7 +3562,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -3597,7 +3596,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3637,18 +3636,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "winapi" version = "0.3.9" @@ -3687,7 +3674,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3696,7 +3683,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3723,7 +3710,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -3758,17 +3745,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3785,9 +3773,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -3803,9 +3791,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -3821,9 +3809,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -3839,9 +3833,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -3857,9 +3851,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -3875,9 +3869,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -3893,9 +3887,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winreg" @@ -3924,7 +3918,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index 62abe8c6..9590e463 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -67,6 +67,14 @@ Options: - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model +``` +## SPECULATE +```shell + --speculate + The number of input_ids to speculate on If using a medusa model, the heads will be picked up automatically Other wise, it will use n-gram speculation which is relatively free in terms of compute, but the speedup heavily depends on the task + + [env: SPECULATE=] + ``` ## DTYPE ```shell diff --git a/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json new file mode 100644 index 00000000..d8a298eb --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json @@ -0,0 +1,98 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 338, + "logprob": -10.0078125, + "text": "is" + }, + { + "id": 21784, + "logprob": -15.515625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -2.8847656, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -4.140625, + "text": "?" + } + ], + "seed": 0, + "tokens": [ + { + "id": 13, + "logprob": -1.1582031, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.23083496, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": 0.0, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": 0.0, + "special": false, + "text": " learning" + }, + { + "id": 29892, + "logprob": -0.61816406, + "special": false, + "text": "," + }, + { + "id": 607, + "logprob": -0.7089844, + "special": false, + "text": " which" + }, + { + "id": 508, + "logprob": -1.7724609, + "special": false, + "text": " can" + }, + { + "id": 367, + "logprob": 0.0, + "special": false, + "text": " be" + }, + { + "id": 5545, + "logprob": 0.0, + "special": false, + "text": " considered" + }, + { + "id": 408, + "logprob": -0.3869629, + "special": false, + "text": " as" + } + ] + }, + "generated_text": "What is Deep Learning?\nDeep learning, which can be considered as" +} diff --git a/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json new file mode 100644 index 00000000..413af1d7 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json @@ -0,0 +1,414 @@ +[ + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1724, + "logprob": -10.734375, + "text": "What" + }, + { + "id": 338, + "logprob": -1.5488281, + "text": "is" + }, + { + "id": 21784, + "logprob": -9.2890625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -1.2753906, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -0.48046875, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -1.1845703, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.5727539, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": -0.00010967255, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": -0.1239624, + "special": false, + "text": " learning" + }, + { + "id": 338, + "logprob": -0.04510498, + "special": false, + "text": " is" + }, + { + "id": 263, + "logprob": -0.018295288, + "special": false, + "text": " a" + }, + { + "id": 11306, + "logprob": -0.45922852, + "special": false, + "text": " subset" + }, + { + "id": 310, + "logprob": -0.00020992756, + "special": false, + "text": " of" + }, + { + "id": 4933, + "logprob": -0.0046539307, + "special": false, + "text": " machine" + }, + { + "id": 6509, + "logprob": -0.00025844574, + "special": false, + "text": " learning" + } + ] + }, + "generated_text": "\nDeep learning is a subset of machine learning" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1724, + "logprob": -10.734375, + "text": "What" + }, + { + "id": 338, + "logprob": -1.5488281, + "text": "is" + }, + { + "id": 21784, + "logprob": -9.2890625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -1.2724609, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -0.47729492, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -1.1826172, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.56689453, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": -0.000108003616, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": -0.1239624, + "special": false, + "text": " learning" + }, + { + "id": 338, + "logprob": -0.044433594, + "special": false, + "text": " is" + }, + { + "id": 263, + "logprob": -0.018295288, + "special": false, + "text": " a" + }, + { + "id": 11306, + "logprob": -0.45922852, + "special": false, + "text": " subset" + }, + { + "id": 310, + "logprob": -0.0002104044, + "special": false, + "text": " of" + }, + { + "id": 4933, + "logprob": -0.004711151, + "special": false, + "text": " machine" + }, + { + "id": 6509, + "logprob": -0.00025892258, + "special": false, + "text": " learning" + } + ] + }, + "generated_text": "\nDeep learning is a subset of machine learning" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1724, + "logprob": -10.734375, + "text": "What" + }, + { + "id": 338, + "logprob": -1.5488281, + "text": "is" + }, + { + "id": 21784, + "logprob": -9.2890625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -1.2724609, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -0.47729492, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -1.1826172, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.56689453, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": -0.000108003616, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": -0.1239624, + "special": false, + "text": " learning" + }, + { + "id": 338, + "logprob": -0.044433594, + "special": false, + "text": " is" + }, + { + "id": 263, + "logprob": -0.018295288, + "special": false, + "text": " a" + }, + { + "id": 11306, + "logprob": -0.45922852, + "special": false, + "text": " subset" + }, + { + "id": 310, + "logprob": -0.0002104044, + "special": false, + "text": " of" + }, + { + "id": 4933, + "logprob": -0.004711151, + "special": false, + "text": " machine" + }, + { + "id": 6509, + "logprob": -0.00025892258, + "special": false, + "text": " learning" + } + ] + }, + "generated_text": "\nDeep learning is a subset of machine learning" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1724, + "logprob": -10.734375, + "text": "What" + }, + { + "id": 338, + "logprob": -1.5488281, + "text": "is" + }, + { + "id": 21784, + "logprob": -9.2890625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -1.2724609, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -0.47729492, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -1.1826172, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.56689453, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": -0.000108003616, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": -0.1239624, + "special": false, + "text": " learning" + }, + { + "id": 338, + "logprob": -0.044433594, + "special": false, + "text": " is" + }, + { + "id": 263, + "logprob": -0.018295288, + "special": false, + "text": " a" + }, + { + "id": 11306, + "logprob": -0.45922852, + "special": false, + "text": " subset" + }, + { + "id": 310, + "logprob": -0.0002104044, + "special": false, + "text": " of" + }, + { + "id": 4933, + "logprob": -0.004711151, + "special": false, + "text": " machine" + }, + { + "id": 6509, + "logprob": -0.00025892258, + "special": false, + "text": " learning" + } + ] + }, + "generated_text": "\nDeep learning is a subset of machine learning" + } +] diff --git a/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_simple.json b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_simple.json new file mode 100644 index 00000000..15754b14 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_simple.json @@ -0,0 +1,103 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1724, + "logprob": -10.734375, + "text": "What" + }, + { + "id": 338, + "logprob": -1.5488281, + "text": "is" + }, + { + "id": 21784, + "logprob": -9.2890625, + "text": "Deep" + }, + { + "id": 29257, + "logprob": -1.2753906, + "text": "Learning" + }, + { + "id": 29973, + "logprob": -0.48046875, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -1.1845703, + "special": false, + "text": "\n" + }, + { + "id": 2772, + "logprob": -0.5727539, + "special": false, + "text": "De" + }, + { + "id": 1022, + "logprob": -0.000108122826, + "special": false, + "text": "ep" + }, + { + "id": 6509, + "logprob": -0.1239624, + "special": false, + "text": " learning" + }, + { + "id": 338, + "logprob": -0.044433594, + "special": false, + "text": " is" + }, + { + "id": 263, + "logprob": -0.01852417, + "special": false, + "text": " a" + }, + { + "id": 11306, + "logprob": -0.45922852, + "special": false, + "text": " subset" + }, + { + "id": 310, + "logprob": -0.0002104044, + "special": false, + "text": " of" + }, + { + "id": 4933, + "logprob": -0.004787445, + "special": false, + "text": " machine" + }, + { + "id": 6509, + "logprob": -0.00026226044, + "special": false, + "text": " learning" + } + ] + }, + "generated_text": "\nDeep learning is a subset of machine learning" +} diff --git a/integration-tests/models/test_flash_medusa.py b/integration-tests/models/test_flash_medusa.py new file mode 100644 index 00000000..003409b0 --- /dev/null +++ b/integration-tests/models/test_flash_medusa.py @@ -0,0 +1,59 @@ +import pytest + + +@pytest.fixture(scope="module") +def flash_medusa_handle(launcher): + with launcher("FasterDecoding/medusa-vicuna-7b-v1.3", num_shard=2) as handle: + yield handle + + +@pytest.fixture(scope="module") +async def flash_medusa(flash_medusa_handle): + await flash_medusa_handle.health(300) + return flash_medusa_handle.client + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_medusa_simple(flash_medusa, response_snapshot): + response = await flash_medusa.generate( + "What is Deep Learning?", max_new_tokens=10, decoder_input_details=True + ) + + assert response.details.generated_tokens == 10 + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_medusa_all_params(flash_medusa, response_snapshot): + response = await flash_medusa.generate( + "What is Deep Learning?", + max_new_tokens=10, + repetition_penalty=1.2, + return_full_text=True, + stop_sequences=["test"], + temperature=0.5, + top_p=0.9, + top_k=10, + truncate=5, + typical_p=0.9, + watermark=True, + decoder_input_details=True, + seed=0, + ) + + assert response.details.generated_tokens == 10 + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_medusa_load(flash_medusa, generate_load, response_snapshot): + responses = await generate_load(flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4) + + assert len(responses) == 4 + assert all([r.generated_text == responses[0].generated_text for r in responses]), f"{[r.generated_text for r in responses]}" + assert responses[0].generated_text == '\nDeep learning is a subset of machine learning' + + assert responses == response_snapshot diff --git a/integration-tests/models/test_flash_mistral.py b/integration-tests/models/test_flash_mistral.py index 63cb09b5..7d21afd9 100644 --- a/integration-tests/models/test_flash_mistral.py +++ b/integration-tests/models/test_flash_mistral.py @@ -21,6 +21,7 @@ async def test_flash_mistral(flash_mistral, response_snapshot): ) assert response.details.generated_tokens == 10 + assert response.generated_text == ": Let n = 10 - 1" assert response == response_snapshot @@ -55,6 +56,7 @@ async def test_flash_mistral_load(flash_mistral, generate_load, response_snapsho ) assert len(responses) == 4 - assert all([r.generated_text == responses[0].generated_text for r in responses]) + assert all([r.generated_text == responses[0].generated_text for r in responses]), f"{[r.generated_text for r in responses]}" + assert responses[0].generated_text == ": Let n = 10 - 1" assert responses == response_snapshot diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 0a023234..e814b833 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -157,6 +157,13 @@ struct Args { #[clap(long, env, value_enum)] quantize: Option, + /// The number of input_ids to speculate on + /// If using a medusa model, the heads will be picked up automatically + /// Other wise, it will use n-gram speculation which is relatively free + /// in terms of compute, but the speedup heavily depends on the task. + #[clap(long, env)] + speculate: Option, + /// The dtype to be forced upon the model. This option cannot be used with `--quantize`. #[clap(long, env, value_enum)] dtype: Option, @@ -377,6 +384,7 @@ fn shard_manager( model_id: String, revision: Option, quantize: Option, + speculate: Option, dtype: Option, max_total_tokens: usize, trust_remote_code: bool, @@ -435,6 +443,11 @@ fn shard_manager( shard_args.push(quantize.to_string()) } + if let Some(speculate) = speculate { + shard_args.push("--speculate".to_string()); + shard_args.push(speculate.to_string()) + } + if let Some(dtype) = dtype { shard_args.push("--dtype".to_string()); shard_args.push(dtype.to_string()) @@ -890,6 +903,7 @@ fn spawn_shards( let shutdown_sender = shutdown_sender.clone(); let otlp_endpoint = args.otlp_endpoint.clone(); let quantize = args.quantize; + let speculate = args.speculate; let dtype = args.dtype; let max_total_tokens = args.max_total_tokens; let trust_remote_code = args.trust_remote_code; @@ -905,6 +919,7 @@ fn spawn_shards( model_id, revision, quantize, + speculate, dtype, max_total_tokens, trust_remote_code, diff --git a/load_tests/common.js b/load_tests/common.js index be812e9b..5d71abea 100644 --- a/load_tests/common.js +++ b/load_tests/common.js @@ -7,7 +7,9 @@ const seed = 0; const host = __ENV.HOST || '127.0.0.1:8000'; const timePerToken = new Trend('time_per_token', true); -const throughput = new Counter('tokens_per_s'); +const tokens = new Counter('tokens'); +const new_tokens = new Counter('new_tokens'); +const input_tokens = new Counter('input_tokens'); randomSeed(seed); // const shareGPT = JSON.parse(open("ShareGPT_V3_unfiltered_cleaned_split.json")) @@ -19,7 +21,7 @@ export function get_options(reference_latency_ms){ thresholds: { http_req_failed: ['rate==0'], time_per_token: [{ - threshold: `p(50)<${3 * reference_latency_ms}`, + threshold: `p(50)<${5 * reference_latency_ms}`, abortOnFail: true, delayAbortEval: '10s' }], @@ -28,7 +30,7 @@ export function get_options(reference_latency_ms){ load_test: { executor: 'constant-arrival-rate', duration: '60s', - preAllocatedVUs: 100, + preAllocatedVUs: 10, rate: 10, timeUnit: '1s', }, @@ -48,17 +50,22 @@ export function run(host, generate_payload, max_new_tokens) { return; } + check(res, { 'Post status is 200': (r) => res.status === 200, }); - const n_tokens = max_new_tokens; - const timings = res.timings.duration; + const duration = res.timings.duration; if (res.status === 200) { - const latency_ms_per_token = timings / n_tokens; + const body = res.json(); + const n_tokens = body.details.tokens.length; + const latency_ms_per_token = duration / n_tokens; timePerToken.add(latency_ms_per_token); const latency_in_s = latency_ms_per_token / 1000; const individual_throughput = 1 / latency_in_s; - throughput.add(individual_throughput); + const _input_tokens = body.details.prefill.length; + tokens.add(n_tokens + _input_tokens); + input_tokens.add(_input_tokens); + new_tokens.add(n_tokens); } } diff --git a/load_tests/tgi.js b/load_tests/tgi.js index 93a0e278..1db4ab6f 100644 --- a/load_tests/tgi.js +++ b/load_tests/tgi.js @@ -1,13 +1,13 @@ import { get_options, run } from "./common.js"; -const reference_latency_ms = 30; +const reference_latency_ms = 70; const host = __ENV.HOST || '127.0.0.1:8000'; const max_new_tokens = 50; function generate_payload(gpt){ const input = gpt["conversations"][0]["value"]; - return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "temperature" : 0.5}} + return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "decoder_input_details": true}} } export const options = get_options(reference_latency_ms); diff --git a/proto/generate.proto b/proto/generate.proto index c7f9f3c1..0041b907 100644 --- a/proto/generate.proto +++ b/proto/generate.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package generate.v1; +package generate.v2; service TextGenerationService { /// Model Info @@ -32,6 +32,7 @@ message InfoResponse { string dtype = 2; string device_type = 3; optional uint32 window_size = 4; + uint32 speculate = 5; } /// Empty request @@ -135,43 +136,27 @@ message GeneratedText { optional uint64 seed = 4; } -message PrefillTokens { - /// Prefill Token IDs +message Tokens { + /// Token IDs repeated uint32 ids = 1; - /// Prefill Logprobs + /// Logprobs repeated float logprobs = 2; - /// Prefill tokens + /// tokens repeated string texts = 3; -} - -message TopTokens { - /// Top Token IDs - repeated uint32 ids = 1; - /// Top Logprobs - repeated float logprobs = 2; - /// Top Token Texts - repeated string texts = 3; - /// If the tokens are special - repeated bool is_special = 6; + /// special + repeated bool is_special = 4; } message Generation { /// Request ID uint64 request_id = 1; /// Prefill tokens (optional) - PrefillTokens prefill_tokens = 2; - /// Token ID - uint32 token_id = 3; - /// Logprob - float token_logprob = 4; - /// Text - string token_text = 5; - /// Is it a special token - bool token_is_special = 6; + Tokens prefill_tokens = 2; + Tokens tokens = 3; /// Complete generated text - optional GeneratedText generated_text = 7; + optional GeneratedText generated_text = 4; /// Top tokens - TopTokens top_tokens = 8; + repeated Tokens top_tokens = 5; } message FilterBatchRequest { diff --git a/router/client/src/client.rs b/router/client/src/client.rs index 486e13d9..ca86f330 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -1,8 +1,8 @@ /// Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. /// Single shard Client -use crate::pb::generate::v1::text_generation_service_client::TextGenerationServiceClient; -use crate::pb::generate::v1::*; +use crate::pb::generate::v2::text_generation_service_client::TextGenerationServiceClient; +use crate::pb::generate::v2::*; use crate::Result; use std::env; use rand::{distributions::Uniform, Rng}; diff --git a/router/client/src/lib.rs b/router/client/src/lib.rs index f334be21..c38b931b 100644 --- a/router/client/src/lib.rs +++ b/router/client/src/lib.rs @@ -6,11 +6,11 @@ mod pb; mod sharded_client; pub use client::Client; -pub use pb::generate::v1::HealthResponse; -pub use pb::generate::v1::InfoResponse as ShardInfo; -pub use pb::generate::v1::{ +pub use pb::generate::v2::HealthResponse; +pub use pb::generate::v2::InfoResponse as ShardInfo; +pub use pb::generate::v2::{ Batch, CachedBatch, FinishReason, GeneratedText, Generation, NextTokenChooserParameters, - PrefillTokens, Request, StoppingCriteriaParameters, + Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient; use thiserror::Error; diff --git a/router/src/infer.rs b/router/src/infer.rs index f108a1dc..c1b01211 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -11,7 +11,7 @@ use std::sync::{ Arc, }; use text_generation_client::{ - Batch, CachedBatch, ClientError, GeneratedText, Generation, PrefillTokens, ShardedClient, + Batch, CachedBatch, ClientError, GeneratedText, Generation, ShardedClient, Tokens, }; use thiserror::Error; use tokio::sync::mpsc::error::SendError; @@ -54,6 +54,7 @@ impl Infer { max_input_length: u32, max_total_tokens: u32, window_size: Option, + speculate: u32, generation_health: Arc, ) -> Self { // Infer shared state @@ -62,7 +63,8 @@ impl Infer { max_input_length, max_total_tokens, 16, - window_size + window_size, + speculate ); let shared = Arc::new(Shared { batching_task: Notify::new(), @@ -533,50 +535,63 @@ fn send_responses( } // Create last Token - let token = Token { - id: generation.token_id, - text: generation.token_text, - logprob: generation.token_logprob, - special: generation.token_is_special, - }; - - // generation.top_tokens - - let mut top_tokens = Vec::new(); - if let Some(top_tokens_) = generation.top_tokens { - top_tokens.extend( + let tokens_ = generation.tokens.expect("Non empty tokens in generation"); + let n = tokens_.ids.len(); + metrics::histogram!("tgi_request_skipped_tokens", (n - 1) as f64); + let mut iterator = tokens_ + .ids + .into_iter() + .zip(tokens_.logprobs.into_iter()) + .zip(tokens_.texts.into_iter()) + .zip(tokens_.is_special.into_iter()) + .enumerate() + .peekable(); + while let Some((i, (((id, logprob), text), special))) = iterator.next() { + let token = Token { + id, + text, + logprob, + special, + }; + let top_tokens = if let Some(top_tokens_) = generation.top_tokens.get(i) { top_tokens_ .ids - .into_iter() - .zip(top_tokens_.logprobs.into_iter()) - .zip(top_tokens_.texts.into_iter()) - .zip(top_tokens_.is_special.into_iter()) - .map(|(((id, logprob), text), special)| Token { + .iter() + .zip(top_tokens_.logprobs.iter()) + .zip(top_tokens_.texts.iter()) + .zip(top_tokens_.is_special.iter()) + .map(|(((&id, &logprob), text), &special)| Token { id, - text, + text: text.to_string(), logprob, special, - }), - ) + }) + .collect() + } else { + vec![] + }; + match (&generation.generated_text, iterator.peek()) { + (Some(generated_text), None) => { + // Generation has ended + stopped = true; + // Send message + entry.response_tx.send(Ok(InferStreamResponse::End { + token, + top_tokens, + generated_text: generated_text.clone(), + queued: entry.queue_time, + start: entry.batch_time.unwrap(), + }))?; + } + _ => { + // Send message + entry + .response_tx + .send(Ok(InferStreamResponse::Intermediate { token, top_tokens }))?; + } + } } - if let Some(generated_text) = generation.generated_text { - // Generation has ended - stopped = true; - // Send message - entry.response_tx.send(Ok(InferStreamResponse::End { - token, - top_tokens, - generated_text, - queued: entry.queue_time, - start: entry.batch_time.unwrap(), - }))?; - } else { - // Send message - entry - .response_tx - .send(Ok(InferStreamResponse::Intermediate { token, top_tokens }))?; - } Ok(stopped) } @@ -601,7 +616,7 @@ fn send_errors(error: ClientError, entries: &mut IntMap) { #[derive(Debug)] pub(crate) enum InferStreamResponse { // Optional first message - Prefill(PrefillTokens), + Prefill(Tokens), // Intermediate messages Intermediate { token: Token, diff --git a/router/src/queue.rs b/router/src/queue.rs index 6734c6a6..6227d70c 100644 --- a/router/src/queue.rs +++ b/router/src/queue.rs @@ -44,7 +44,8 @@ impl Queue { max_input_length: u32, max_total_tokens: u32, block_size: u32, - window_size: Option + window_size: Option, + speculate: u32, ) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); @@ -56,6 +57,7 @@ impl Queue { max_total_tokens, block_size, window_size, + speculate, queue_receiver, )); @@ -106,6 +108,7 @@ async fn queue_task( max_total_tokens: u32, block_size: u32, window_size: Option, + speculate: u32, mut receiver: mpsc::UnboundedReceiver, ) { let mut state = State::new( @@ -113,7 +116,8 @@ async fn queue_task( max_input_length, max_total_tokens, block_size, - window_size + window_size, + speculate ); while let Some(cmd) = receiver.recv().await { @@ -256,6 +260,9 @@ struct State { /// Sliding window window_size: Option, + + /// Speculation amount + speculate: u32, } impl State { @@ -265,6 +272,7 @@ impl State { max_total_tokens: u32, block_size: u32, window_size: Option, + speculate: u32, ) -> Self { let default_threshold: u64 = 120; let threshold: u64 = match env::var("QUEUE_THRESHOLD_MS") { @@ -281,6 +289,7 @@ impl State { max_total_tokens, block_size, window_size, + speculate, } } @@ -365,7 +374,7 @@ impl State { } if prefill_tokens > prefill_token_budget - || (prefill_tokens + decode_tokens) > token_budget + || (prefill_tokens + decode_tokens + self.speculate) > token_budget { // Entry is over budget // Add it back to the front @@ -457,13 +466,13 @@ mod tests { fn default_queue() -> Queue { Queue::new( - true, 1, 2, 1, None + true, 1, 2, 1, None, 0 ) } fn default_state() -> State { State::new( - true, 1, 2, 1, None + true, 1, 2, 1, None, 0 ) } @@ -667,6 +676,25 @@ mod tests { assert_eq!(batch.size, 2); } + #[tokio::test] + async fn test_queue_next_batch_token_speculate() { + let queue = Queue::new(true, 1, 2, 1, None, 2); + let (entry1, _guard1) = default_entry(); + let (entry2, _guard2) = default_entry(); + queue.append(entry1); + queue.append(entry2); + + // Budget of 1 is not enough + assert!(queue.next_batch(None, 1, 1).await.is_none()); + + let (entries, batch, _) = queue.next_batch(None, 6, 6).await.unwrap(); + assert_eq!(entries.len(), 2); + assert!(entries.contains_key(&0)); + assert!(entries.contains_key(&1)); + assert_eq!(batch.id, 0); + assert_eq!(batch.size, 2); + } + #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = default_queue(); diff --git a/router/src/server.rs b/router/src/server.rs index c2eab874..1830a879 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -600,6 +600,7 @@ pub async fn run( max_input_length as u32, max_total_tokens as u32, shard_info.window_size, + shard_info.speculate, generation_health, ); diff --git a/server/Makefile-vllm b/server/Makefile-vllm index ddb648ea..c9c1d520 100644 --- a/server/Makefile-vllm +++ b/server/Makefile-vllm @@ -1,22 +1,25 @@ -build-vllm-cuda: REPOSITORY=https://github.com/vllm-project/vllm.git -build-vllm-cuda: VLLM_COMMIT=f8a1e39fae05ca610be8d5a78be9d40f5274e5fc -build-vllm-cuda: BRANCH=main -build-vllm-cuda: build-vllm - -build-vllm-rocm: REPOSITORY=https://github.com/fxmarty/vllm-public.git -build-vllm-rocm: VLLM_COMMIT=ad9b7c4095ef54419a0533d254f2ad84bd2dfcae -build-vllm-rocm: BRANCH=rotary-no-positions-split-cos-sin -build-vllm-rocm: build-vllm - -vllm: +vllm-cuda: # Clone vllm pip install -U ninja packaging --no-cache-dir - git clone --single-branch --branch $(BRANCH) $(REPOSITORY) vllm + git clone https://github.com/vllm-project/vllm.git vllm -build-vllm: vllm - cd vllm && git fetch && git checkout $(VLLM_COMMIT) +build-vllm-cuda: vllm-cuda + cd vllm && git fetch && git checkout f8a1e39fae05ca610be8d5a78be9d40f5274e5fc cd vllm && python setup.py build -install-vllm: build-vllm +install-vllm-cuda: build-vllm-cuda + pip uninstall vllm -y || true + cd vllm && python setup.py install + +vllm-rocm: + # Clone vllm + pip install -U ninja packaging --no-cache-dir + git clone https://github.com/fxmarty/vllm-public.git vllm + +build-vllm-rocm: vllm-rocm + cd vllm && git fetch && git checkout ad9b7c4095ef54419a0533d254f2ad84bd2dfcae + cd vllm && python setup.py build + +install-vllm-rocm: build-vllm-rocm pip uninstall vllm -y || true cd vllm && python setup.py install diff --git a/server/tests/models/test_bloom.py b/server/tests/models/test_bloom.py index 1f70d000..303e9e71 100644 --- a/server/tests/models/test_bloom.py +++ b/server/tests/models/test_bloom.py @@ -135,8 +135,8 @@ def test_causal_lm_generate_token(default_bloom, default_bloom_batch): ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([generation.token_id.item() == 10264 for generation in generations]) - assert all([generation.token_text == "Test" for generation in generations]) + assert all([token_id.item() == 10264 for generation in generations for token_id in generation.tokens.token_ids]) + assert all([token_text == "Test" for generation in generations for token_text in generation.tokens.texts]) assert generations[0].request_id == 0 diff --git a/server/tests/models/test_causal_lm.py b/server/tests/models/test_causal_lm.py index e467d291..e9c2cd3a 100644 --- a/server/tests/models/test_causal_lm.py +++ b/server/tests/models/test_causal_lm.py @@ -141,8 +141,8 @@ def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch): ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([generation.token_id.item() == 13 for generation in generations]) - assert all([generation.token_text == "." for generation in generations]) + assert all([token_id.item() == 13 for generation in generations for token_id in generation.tokens.token_ids]) + assert all([token_text == "." for generation in generations for token_text in generation.tokens.texts]) assert generations[0].request_id == 0 diff --git a/server/tests/models/test_seq2seq_lm.py b/server/tests/models/test_seq2seq_lm.py index 2b59f731..60be77c8 100644 --- a/server/tests/models/test_seq2seq_lm.py +++ b/server/tests/models/test_seq2seq_lm.py @@ -155,8 +155,8 @@ def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch) ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([generation.token_id.item() == 259 for generation in generations]) - assert all([generation.token_text == " " for generation in generations]) + assert all([token_id.item() == 259 for generation in generations for token_id in generation.tokens.token_ids]) + assert all([token_text == " " for generation in generations for token_text in generation.tokens.texts]) assert generations[0].request_id == 0 diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py index fead2297..0817aee6 100644 --- a/server/text_generation_server/cli.py +++ b/server/text_generation_server/cli.py @@ -10,6 +10,7 @@ from pathlib import Path from loguru import logger from typing import Optional from enum import Enum +from huggingface_hub import hf_hub_download app = typer.Typer() @@ -31,6 +32,7 @@ def serve( revision: Optional[str] = None, sharded: bool = False, quantize: Optional[Quantization] = None, + speculate: Optional[int] = None, dtype: Optional[Dtype] = None, trust_remote_code: bool = False, uds_path: Path = "/tmp/text-generation-server", @@ -39,9 +41,15 @@ def serve( otlp_endpoint: Optional[str] = None, ): if sharded: - assert os.getenv("WORLD_SIZE", None) is not None, "WORLD_SIZE must be set when sharded is True" - assert os.getenv("MASTER_ADDR", None) is not None, "MASTER_ADDR must be set when sharded is True" - assert os.getenv("MASTER_PORT", None) is not None, "MASTER_PORT must be set when sharded is True" + assert ( + os.getenv("WORLD_SIZE", None) is not None + ), "WORLD_SIZE must be set when sharded is True" + assert ( + os.getenv("MASTER_ADDR", None) is not None + ), "MASTER_ADDR must be set when sharded is True" + assert ( + os.getenv("MASTER_PORT", None) is not None + ), "MASTER_PORT must be set when sharded is True" # Remove default handler logger.remove() @@ -75,7 +83,11 @@ def serve( logger.info("CLI SHARDED = {}".format(num_shard)) import subprocess - cmd = f"deepspeed --num_nodes 1 --num_gpus {num_shard} --no_local_rank {tgi_file} --model_id {model_id} --revision {revision} --sharded {sharded} --dtype {dtype} --uds_path {uds_path}" + cmd = f"deepspeed --num_nodes 1 --num_gpus {num_shard} --no_local_rank {tgi_file}" + cmd += f" --model_id {model_id} --revision {revision} --sharded {sharded}" + cmd += f" --dtype {dtype} --trust_remote_code {trust_remote_code} --uds_path {uds_path}" + if speculate is not None: + cmd += f"--speculate {speculate}" logger.info("CLI server start deepspeed ={} ".format(cmd)) sys.stdout.flush() sys.stderr.flush() @@ -119,7 +131,9 @@ def serve( logger.error(f"{cmd} exited with status = {proc.returncode}") return proc.returncode else: - server.serve(model_id, revision, dtype, uds_path, sharded) + server.serve( + model_id, revision, sharded, speculate, dtype, trust_remote_code, uds_path + ) @app.command() @@ -153,7 +167,7 @@ def download_weights( logger.info("Files are already present on the host. " "Skipping download.") return # Local files not found - except (utils.LocalEntryNotFoundError, FileNotFoundError): + except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError): pass is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv( @@ -161,6 +175,42 @@ def download_weights( ) is not None if not is_local_model: + try: + adapter_config_filename = hf_hub_download( + model_id, revision=revision, filename="adapter_config.json" + ) + utils.download_and_unload_peft( + model_id, revision, trust_remote_code=trust_remote_code + ) + is_local_model = True + utils.weight_files(model_id, revision, extension) + return + except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): + pass + + try: + import json + medusa_head = hf_hub_download(model_id, revision=revision, filename="medusa_lm_head.pt") + if auto_convert: + medusa_sf = Path(medusa_head[:-len(".pt")] + ".safetensors") + if not medusa_sf.exists(): + utils.convert_files([Path(medusa_head)], [medusa_sf], []) + medusa_config = hf_hub_download(model_id, revision=revision, filename="config.json") + with open(medusa_config, "r") as f: + config = json.load(f) + + model_id = config["base_model_name_or_path"] + revision = "main" + try: + utils.weight_files(model_id, revision, extension) + logger.info(f"Files for parent {model_id} are already present on the host. " "Skipping download.") + return + # Local files not found + except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError): + pass + except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): + pass + # Try to download weights from the hub try: filenames = utils.weight_hub_files(model_id, revision, extension) diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py index efe9b62a..f7377c6b 100644 --- a/server/text_generation_server/models/__init__.py +++ b/server/text_generation_server/models/__init__.py @@ -1,10 +1,14 @@ import torch from loguru import logger +from transformers.configuration_utils import PretrainedConfig from transformers.models.auto import modeling_auto -from transformers import AutoConfig from typing import Optional +# Needed to properly setup habana_frameworks +import text_generation_server.habana_quantization_env as hq_env + +from text_generation_server.utils.speculate import get_speculate, set_speculate from text_generation_server.models.model import Model from text_generation_server.models.causal_lm import CausalLM from text_generation_server.models.bloom import BLOOM @@ -18,10 +22,46 @@ torch.set_grad_enabled(False) def get_model( model_id: str, revision: Optional[str], - dtype: Optional[torch.dtype] = None, + speculate: Optional[int], + dtype: Optional[torch.dtype], + trust_remote_code: bool, ) -> Model: - config = AutoConfig.from_pretrained(model_id, revision=revision) - model_type = config.model_type + if speculate is not None: + set_speculate(speculate) + else: + set_speculate(0) + + config_dict, _ = PretrainedConfig.get_config_dict( + model_id, revision=revision, trust_remote_code=trust_remote_code + ) + + use_medusa = None + if "medusa_num_heads" in config_dict: + use_medusa = model_id + medusa_config = config_dict + model_id = config_dict["base_model_name_or_path"] + revision = "main" + speculate_medusa = config_dict["medusa_num_heads"] + if speculate is not None: + if speculate > speculate_medusa: + raise RuntimeError("Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match") + else: + set_speculate(speculate) + else: + set_speculate(speculate_medusa) + + config_dict, _ = PretrainedConfig.get_config_dict( + model_id, revision=revision, trust_remote_code=trust_remote_code + ) + method = "medusa" + else: + method = "n-gram" + + speculate = get_speculate() + if speculate > 0: + logger.info(f"Using speculation {method} with {speculate} input ids.") + + model_type = config_dict["model_type"] if model_type == "gpt_bigcode": return SantaCoder(model_id, revision, dtype) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index bdc0b4c5..e8e63586 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -35,10 +35,9 @@ from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models import Model from text_generation_server.models.types import ( Batch, - PrefillTokens, + Tokens, Generation, GeneratedText, - TopTokens, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( @@ -48,6 +47,7 @@ from text_generation_server.utils import ( is_tokenizer_transparent, ) from text_generation_server.utils.debug import dbg_trace +from text_generation_server.utils.speculate import get_speculate tracer = trace.get_tracer(__name__) @@ -647,6 +647,8 @@ class CausalLM(Model): kwargs["attn_softmax_bf16"] = True kwargs["trim_logits"] = True + self.speculate = get_speculate() + super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, @@ -842,12 +844,12 @@ class CausalLM(Model): # Select next token input_length = batch.input_length if logits.shape[-2] > 1: - next_token_ids, next_token_logprobs, logprobs = batch.next_token_chooser( - batch.input_ids, logits[:, input_length - 1: input_length, :].squeeze(-2) + next_token_ids, next_token_logprobs, logprobs, _, _ = batch.next_token_chooser( + batch.input_ids, logits[:, input_length - 1: input_length, :].squeeze(-2), self.speculate ) else: - next_token_ids, next_token_logprobs, logprobs = batch.next_token_chooser( - batch.input_ids, logits.squeeze(-2) + next_token_ids, next_token_logprobs, logprobs, _, _ = batch.next_token_chooser( + batch.input_ids, logits.squeeze(-2), self.speculate ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, @@ -1017,7 +1019,9 @@ class CausalLM(Model): clean_up_tokenization_spaces=False, skip_special_tokens=False, ) - prefill_tokens = PrefillTokens(prefill_token_ids, prefill_logprobs, prefill_texts) + prefill_tokens = Tokens( + prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[] + ) else: prefill_tokens = None @@ -1027,8 +1031,10 @@ class CausalLM(Model): clean_up_tokenization_spaces=False, skip_special_tokens=False, ) - special_toptokens = [token_id in self.all_special_ids for token_id in top_token_ids] - top_tokens = TopTokens( + special_toptokens = [ + token_id in self.all_special_ids for token_id in top_token_ids + ] + top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, @@ -1040,10 +1046,12 @@ class CausalLM(Model): generation = Generation( request.id, prefill_tokens, - next_token_id, - next_token_logprob, - next_token_text, - next_token_id in self.all_special_ids, + Tokens( + [next_token_id], + [next_token_logprob], + [next_token_text], + [next_token_id in self.all_special_ids], + ), generated_text, top_tokens, ) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index f1a4854f..79344ea1 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -11,13 +11,13 @@ from opentelemetry import trace from transformers import PreTrainedTokenizerBase from typing import Optional, Tuple, List, Type, Union, Dict -from text_generation_server.models import Model +from text_generation_server.models import Model +from text_generation_server.utils.speculate import get_speculate from text_generation_server.models.types import ( Batch, - PrefillTokens, + Tokens, Generation, GeneratedText, - TopTokens, ) from text_generation_server.models.cache_manager import ( get_cache_manager, @@ -41,6 +41,7 @@ class FlashCausalLMBatch(Batch): # Decoder values input_ids: torch.Tensor position_ids: torch.Tensor + speculative_ids: torch.Tensor # Flash Attention values @@ -120,6 +121,7 @@ class FlashCausalLMBatch(Batch): )["input_ids"] position_ids = [] + speculative_ids = [] cu_seqlen_prefill = [0] needed_blocks_slots = [] start_slots = [] @@ -163,6 +165,8 @@ class FlashCausalLMBatch(Batch): input_length = len(tokenized_input) input_lengths.append(input_length) + + prefix_offsets.append(input_length - 5) read_offsets.append(input_length) @@ -186,7 +190,8 @@ class FlashCausalLMBatch(Batch): # Paged attention # Remove one as the first token des not have a past - total_tokens = input_length + max_new_tokens - 1 + speculative_length = get_speculate() + total_tokens = input_length + max_new_tokens - 1 + speculative_length needed_blocks = math.ceil(total_tokens / BLOCK_SIZE) blocks += needed_blocks needed_blocks_slots.append((needed_blocks, total_tokens)) @@ -224,7 +229,7 @@ class FlashCausalLMBatch(Batch): cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) - max_length = max(max_length, input_length + max_new_tokens) + max_length = max(max_length, input_length + max_new_tokens + speculative_length) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device @@ -255,7 +260,6 @@ class FlashCausalLMBatch(Batch): cu_seqlen_prefill = torch.tensor( cu_seqlen_prefill, device=device, dtype=torch.int32 ) - position_ids = position_ids.to(device) slot_indices = slot_indices.to(device) input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) @@ -309,6 +313,7 @@ class FlashCausalLMBatch(Batch): top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, + speculative_ids=None, ) @tracer.start_as_current_span("filter") @@ -419,6 +424,7 @@ class FlashCausalLMBatch(Batch): slots = self.slots[slot_filtering_indices] next_token_chooser = self.next_token_chooser.filter(indices) top_n_tokens_tensor = self.top_n_tokens_tensor[indices] + speculative_ids = self.speculative_ids[indices] if self.speculative_ids is not None else None start_slots = torch.tensor(start_slots, dtype=torch.int64) @@ -454,6 +460,7 @@ class FlashCausalLMBatch(Batch): top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, + speculative_ids=speculative_ids, ) @classmethod @@ -473,6 +480,7 @@ class FlashCausalLMBatch(Batch): total_batch_size += len(b) total_slots += len(b.slots) blocks += b.blocks + speculative_length = b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 max_blocks = max(max_blocks, b.max_blocks) max_seqlen = max(max_seqlen, b.max_seqlen) max_length = max( @@ -480,6 +488,7 @@ class FlashCausalLMBatch(Batch): max( input_length + stopping_criteria.max_new_tokens + + speculative_length - stopping_criteria.current_tokens for input_length, stopping_criteria in zip( b.input_lengths, b.stopping_criterias @@ -577,6 +586,8 @@ class FlashCausalLMBatch(Batch): device=batches[0].next_token_chooser.device, ) + speculative_ids = torch.cat([b.speculative_ids for b in batches], dim=0) if batches[0].speculative_ids is not None else None + # Needed to avoid dropping blocks when the batches will go out of scope for b in batches: b.block_tables = None @@ -611,6 +622,7 @@ class FlashCausalLMBatch(Batch): top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, + speculative_ids=speculative_ids ) def __del__(self): @@ -714,16 +726,55 @@ class FlashCausalLM(Model): def forward(self, batch: FlashCausalLMBatch) -> Tuple[torch.Tensor, torch.Tensor]: # Model Forward + if batch.speculative_ids is not None: + input_ids=batch.input_ids + position_ids=batch.position_ids + cu_seqlen_prefill=batch.cu_seqlen_prefill + kv_cache=get_cache_manager().kv_cache + block_tables=batch.block_tables_tensor + slots=batch.slots[batch.slot_indices] + input_lengths=batch.input_lengths_tensor + max_s=batch.max_seqlen + lm_head_indices=batch.prefill_head_indices + + speculative_ids = batch.speculative_ids + + B, speculative_length = speculative_ids.shape + new_length = speculative_length + 1 + new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) + arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) + arange_int = arange.to(dtype=torch.int32) + new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) + slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + + # Add Copy the block tables for all members + block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B* new_length, -1).contiguous() + max_s = max_s + speculative_length + + input_ids = new_input_ids + position_ids = new_position_ids + else: + input_ids=batch.input_ids + position_ids=batch.position_ids + cu_seqlen_prefill=batch.cu_seqlen_prefill + kv_cache=get_cache_manager().kv_cache + block_tables=batch.block_tables_tensor + slots=batch.slots[batch.slot_indices] + input_lengths=batch.input_lengths_tensor + max_s=batch.max_seqlen + lm_head_indices=batch.prefill_head_indices + return self.model.forward( - input_ids=batch.input_ids, - position_ids=batch.position_ids, - cu_seqlen_prefill=batch.cu_seqlen_prefill, - kv_cache=get_cache_manager().kv_cache, - block_tables=batch.block_tables_tensor, - slots=batch.slots[batch.slot_indices], - input_lengths=batch.input_lengths_tensor, - max_s=batch.max_seqlen, - lm_head_indices=batch.prefill_head_indices, + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=cu_seqlen_prefill, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + lm_head_indices=lm_head_indices, ) @tracer.start_as_current_span("generate_token") @@ -752,21 +803,32 @@ class FlashCausalLM(Model): del batch raise e + if isinstance(out, tuple): + out, speculative_logits = out + else: + speculative_logits = None + + if prefill: next_token_logits = ( out[batch.prefill_next_token_indices] if prefill_logprobs else out ) + if speculative_logits is not None: + speculative_logits = ( + speculative_logits[batch.prefill_next_token_indices] if prefill_logprobs else speculative_logits + ) else: next_token_logits = out - next_input_ids, next_token_logprobs, logprobs = batch.next_token_chooser( - batch.all_input_ids_tensor[:, : batch.max_seqlen], next_token_logits + next_input_ids, next_token_logprobs, logprobs, accepted_ids, speculative_ids = batch.next_token_chooser( + batch.all_input_ids_tensor[:, : batch.max_seqlen], next_token_logits, get_speculate(), batch.speculative_ids, speculative_logits ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs ) + speculative_length = 0 if speculative_ids is None else speculative_ids.shape[1] if prefill: if len(batch) > 1 and prefill_logprobs: # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs @@ -792,6 +854,7 @@ class FlashCausalLM(Model): iterator = zip( batch.input_lengths, batch.all_input_ids, + accepted_ids ) # We do two for loops as the first one can run completely asynchronously from the GPU while for the second @@ -799,9 +862,11 @@ class FlashCausalLM(Model): # It is faster if we delay this sync for the maximum amount of time # For each member of the batch + index = 0 for i, ( input_length, all_input_ids, + n_accepted_ids ) in enumerate(iterator): # Indexing metadata start_index = cumulative_length @@ -830,15 +895,18 @@ class FlashCausalLM(Model): start_index + 1 : start_index + out_length ] - batch.all_input_ids_tensor[i, input_length] = next_input_ids[i] + for j in range(n_accepted_ids): + batch.all_input_ids_tensor[i, input_length + j] = next_input_ids[index] + index += 1 cumulative_length += input_length - # Set values in batch - batch.input_ids = next_input_ids - batch.position_ids = next_position_ids + 1 - batch.input_lengths_tensor += 1 - batch.slot_indices += 1 + + batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1] + batch.speculative_ids = speculative_ids + batch.position_ids = next_position_ids + accepted_ids + batch.input_lengths_tensor += accepted_ids + batch.slot_indices += accepted_ids if prefill and prefill_logprobs: # Get prefill logprobs @@ -851,7 +919,7 @@ class FlashCausalLM(Model): # GPU <-> CPU sync next_token_logprobs = next_token_logprobs.tolist() - next_token_ids = batch.input_ids.tolist() + next_token_ids = next_input_ids.tolist() # Zipped iterator iterator = zip( @@ -864,13 +932,13 @@ class FlashCausalLM(Model): batch.next_token_chooser.do_sample, batch.next_token_chooser.seeds, batch.top_n_tokens, - next_token_ids, - next_token_logprobs, + accepted_ids, batch_top_token_ids, batch_top_token_logprobs, ) # For each member of the batch + index = 0 for i, ( request, input_length, @@ -881,29 +949,43 @@ class FlashCausalLM(Model): do_sample, seed, top_n_tokens, - next_token_id, - next_token_logprob, + n_accepted_ids, top_token_ids, top_token_logprobs, ) in enumerate(iterator): # Append next token to all tokens - all_input_ids.append(next_token_id) + next_token_texts = [] + left = 0 + before = stopping_criteria.current_tokens - # Generated token - next_token_text, prefix_offset, read_offset = self.decode_token( - all_input_ids, - prefix_offset, - read_offset, - ) + current_stopped = False + for j in range(index, index + n_accepted_ids): + # Generated token + next_token_id = next_token_ids[j] + all_input_ids.append(next_token_id) + next_token_text, prefix_offset, read_offset = self.decode_token( + all_input_ids, + prefix_offset, + read_offset, + ) + next_token_texts.append(next_token_text) - # Evaluate stopping criteria - stop, reason = stopping_criteria( - next_token_id, - next_token_text, - ) + stop, reason = stopping_criteria( + next_token_id, + next_token_text, + ) - if not stop: - stopped = False + if stop: + left = index + n_accepted_ids - j - 1 + current_stopped = True + break + else: + current_stopped = False + stopped = stopped and current_stopped + + _next_token_ids = next_token_ids[index: index+n_accepted_ids - left] + _next_token_logprobs = next_token_logprobs[index: index+n_accepted_ids - left] + index += n_accepted_ids # Shard generations # All generations will be appended in the rust sharded client @@ -943,8 +1025,9 @@ class FlashCausalLM(Model): clean_up_tokenization_spaces=False, skip_special_tokens=False, ) - prefill_tokens = PrefillTokens( - prefill_token_ids, request_prefill_logprobs, prefill_texts + + prefill_tokens = Tokens( + prefill_token_ids, request_prefill_logprobs, prefill_texts, is_special = [] ) else: prefill_tokens = None @@ -958,7 +1041,7 @@ class FlashCausalLM(Model): special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] - top_tokens = TopTokens( + top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, @@ -970,10 +1053,12 @@ class FlashCausalLM(Model): generation = Generation( request.id, prefill_tokens, - next_token_id, - next_token_logprob, - next_token_text, - next_token_id in self.all_special_ids, + Tokens( + _next_token_ids, + _next_token_logprobs, + next_token_texts, + [nid in self.all_special_ids for nid in _next_token_ids], + ), generated_text, top_tokens, ) @@ -981,7 +1066,9 @@ class FlashCausalLM(Model): generations.append(generation) # Update values - batch.input_lengths[i] = input_length + 1 + batch.input_lengths[i] = input_length + n_accepted_ids.item() + if batch.input_lengths[i] > batch.max_seqlen: + batch.max_seqlen = batch.input_lengths[i] batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.all_input_ids[i] = all_input_ids @@ -994,6 +1081,5 @@ class FlashCausalLM(Model): batch.prefill_cu_outlens = None batch.prefill_head_indices = None batch.prefill_next_token_indices = None - batch.max_seqlen = batch.max_seqlen + 1 return generations, batch diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index d2ed0b15..3a84b1b6 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -28,6 +28,7 @@ class FlashLlama(FlashCausalLM): quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, + use_medusa: Optional[str] = None, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): @@ -66,6 +67,18 @@ class FlashLlama(FlashCausalLM): weights._set_gptq_params(model_id) model = FlashLlamaForCausalLM(config, weights) + if use_medusa: + from text_generation_server.utils.medusa import MedusaModel + from huggingface_hub import hf_hub_download + import json + medusa_config = hf_hub_download(use_medusa, revision=revision, filename="config.json") + with open(medusa_config, "r") as f: + config = json.load(f) + medusa_head = hf_hub_download(use_medusa, revision=revision, filename="medusa_lm_head.pt") + medusa_sf = medusa_head[:-len(".pt")] + ".safetensors" + weights = Weights([medusa_sf], device, dtype, process_group=self.process_group) + lm_head = model.lm_head + model.lm_head = MedusaModel(config, weights, lm_head) torch.distributed.barrier(group=self.process_group) super(FlashLlama, self).__init__( diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index 919e4625..e103d9fc 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -21,6 +21,7 @@ from text_generation_server.models.custom_modeling.flash_mistral_modeling import FlashMistralForCausalLM, MistralConfig, ) +from text_generation_server.utils.speculate import get_speculate from text_generation_server.utils import ( initialize_torch_distributed, weight_files, @@ -132,7 +133,8 @@ class FlashMistralBatch(FlashCausalLMBatch): # Paged attention # Remove one as the first token des not have a past - total_tokens = input_length + max_new_tokens - 1 + speculative_length = get_speculate() + total_tokens = input_length + max_new_tokens - 1 + speculative_length # Needed blocks can not go over SLIDING_WINDOW_BLOCKS needed_blocks = min( @@ -183,7 +185,7 @@ class FlashMistralBatch(FlashCausalLMBatch): cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) - max_length = max(max_length, input_length + max_new_tokens) + max_length = max(max_length, input_length + max_new_tokens + speculative_length) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device @@ -272,6 +274,7 @@ class FlashMistralBatch(FlashCausalLMBatch): blocks=blocks, max_blocks=max_blocks, prefill_cache_indices=prefill_cache_indices, + speculative_ids=None ) @@ -340,17 +343,55 @@ class FlashMistral(FlashCausalLM): def forward(self, batch: FlashMistralBatch) -> Tuple[torch.Tensor, torch.Tensor]: # Model Forward + if batch.speculative_ids is not None: + input_ids=batch.input_ids + position_ids=batch.position_ids + cu_seqlen_prefill=batch.cu_seqlen_prefill + kv_cache=get_cache_manager().kv_cache + block_tables=batch.block_tables_tensor + slots=batch.slots[batch.slot_indices] + input_lengths=batch.input_lengths_tensor + max_s=batch.max_seqlen + lm_head_indices=batch.prefill_head_indices + + speculative_ids = batch.speculative_ids + + B, speculative_length = speculative_ids.shape + new_length = speculative_length + 1 + new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) + arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) + arange_int = arange.to(dtype=torch.int32) + new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) + slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + + # Add Copy the block tables for all members + block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B* new_length, -1).contiguous() + max_s = max_s + speculative_length + + input_ids = new_input_ids + position_ids = new_position_ids + else: + input_ids=batch.input_ids + position_ids=batch.position_ids + cu_seqlen_prefill=batch.cu_seqlen_prefill + kv_cache=get_cache_manager().kv_cache + block_tables=batch.block_tables_tensor + slots=batch.slots[batch.slot_indices] + input_lengths=batch.input_lengths_tensor + max_s=batch.max_seqlen + lm_head_indices=batch.prefill_head_indices logits = self.model.forward( - input_ids=batch.input_ids, - position_ids=batch.position_ids, - cu_seqlen_prefill=batch.cu_seqlen_prefill, - kv_cache=get_cache_manager().kv_cache, - block_tables=batch.block_tables_tensor, - slots=batch.slots[batch.slot_indices], - input_lengths=batch.input_lengths_tensor, - max_s=batch.max_seqlen, + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=cu_seqlen_prefill, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, prefill_cache_indices=batch.prefill_cache_indices, - lm_head_indices=batch.prefill_head_indices, + lm_head_indices=lm_head_indices, ) if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None diff --git a/server/text_generation_server/models/idefics_causal_lm.py b/server/text_generation_server/models/idefics_causal_lm.py index dcad1fa9..2f4bb139 100644 --- a/server/text_generation_server/models/idefics_causal_lm.py +++ b/server/text_generation_server/models/idefics_causal_lm.py @@ -20,7 +20,7 @@ from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.models.types import ( Batch, - PrefillTokens, + Tokens, Generation, GeneratedText, ) @@ -791,8 +791,8 @@ class IdeficsCausalLM(Model): clean_up_tokenization_spaces=False, skip_special_tokens=False, ) - prefill_tokens = PrefillTokens( - prefill_token_ids, prefill_logprobs, prefill_texts + prefill_tokens = Tokens( + prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[] ) else: prefill_tokens = None @@ -802,10 +802,12 @@ class IdeficsCausalLM(Model): generation = Generation( request.id, prefill_tokens, - next_token_id_squeezed, - next_token_logprob, - next_token_text, - next_token_id_squeezed.item() in self.all_special_ids, + Tokens( + [next_token_id_squeezed], + [next_token_logprob], + [next_token_text], + [next_token_id_squeezed.item() in self.all_special_ids], + ), generated_text, top_tokens, ) diff --git a/server/text_generation_server/models/model.py b/server/text_generation_server/models/model.py index 73e1f1af..52be52ff 100644 --- a/server/text_generation_server/models/model.py +++ b/server/text_generation_server/models/model.py @@ -5,7 +5,8 @@ from abc import ABC, abstractmethod from typing import List, Optional, Tuple, Type, TypeVar from transformers import PreTrainedTokenizerBase -from text_generation_server.models.types import Batch, GeneratedText +from text_generation_server.models.types import Batch, Generation +from text_generation_server.utils.speculate import get_speculate from text_generation_server.pb.generate_pb2 import InfoResponse B = TypeVar("B", bound=Batch) @@ -22,6 +23,7 @@ class Model(ABC): rank: int = 0, world_size: int = 1, kwargs: dict = {}, + speculate: Optional[int] = None, ): self.model = model self.tokenizer = tokenizer @@ -32,7 +34,14 @@ class Model(ABC): self.rank = rank self.world_size = world_size self.kwargs = kwargs - self.has_position_ids = inspect.signature(model.forward).parameters.get("position_ids", None) is not None + if speculate is None: + speculate = get_speculate() + self.speculate = speculate + + self.has_position_ids = ( + inspect.signature(model.forward).parameters.get("position_ids", None) + is not None + ) self.check_initialized() @@ -42,6 +51,7 @@ class Model(ABC): requires_padding=self.requires_padding, dtype=str(self.dtype), device_type=self.device.type, + speculate=self.speculate ) @property @@ -50,7 +60,7 @@ class Model(ABC): raise NotImplementedError @abstractmethod - def generate_token(self, batch: B) -> Tuple[List[GeneratedText], Optional[B]]: + def generate_token(self, batch: B) -> Tuple[List[Generation], Optional[B]]: raise NotImplementedError def warmup(self, batch: B, max_total_tokens: int): diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index d4d3cd19..279b5505 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -11,8 +11,7 @@ from text_generation_server.models.types import ( GeneratedText, Batch, Generation, - PrefillTokens, - TopTokens, + Tokens, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling @@ -733,10 +732,11 @@ class Seq2SeqLM(Model): # Prefill if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: - prefill_tokens = PrefillTokens( + prefill_tokens = Tokens( [self.tokenizer.bos_token_id], [float("nan")], [self.tokenizer.bos_token], + [False] ) else: prefill_tokens = None @@ -750,7 +750,7 @@ class Seq2SeqLM(Model): special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] - top_tokens = TopTokens( + top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, @@ -762,10 +762,12 @@ class Seq2SeqLM(Model): generation = Generation( request.id, prefill_tokens, - next_token_id_squeezed, - next_token_logprob, - next_token_text, - next_token_id_squeezed.item() in self.all_special_ids, + Tokens( + [next_token_id_squeezed], + [next_token_logprob], + [next_token_text], + [next_token_id_squeezed.item() in self.all_special_ids], + ), generated_text, top_tokens, ) diff --git a/server/text_generation_server/models/types.py b/server/text_generation_server/models/types.py index 0e27680d..87c03d63 100644 --- a/server/text_generation_server/models/types.py +++ b/server/text_generation_server/models/types.py @@ -58,33 +58,15 @@ class GeneratedText: @dataclass -class PrefillTokens: - token_ids: List[int] - logprobs: List[float] - texts: List[str] - - def to_pb(self) -> generate_pb2.PrefillTokens: - return generate_pb2.PrefillTokens( - ids=self.token_ids, logprobs=self.logprobs, texts=self.texts - ) - - def __len__(self): - return len(self.token_ids) - - -@dataclass -class TopTokens: +class Tokens: token_ids: List[int] logprobs: List[float] texts: List[str] is_special: List[bool] - def to_pb(self) -> generate_pb2.TopTokens: - return generate_pb2.TopTokens( - ids=self.token_ids, - logprobs=self.logprobs, - texts=self.texts, - is_special=self.is_special, + def to_pb(self) -> generate_pb2.Tokens: + return generate_pb2.Tokens( + ids=self.token_ids, logprobs=self.logprobs, texts=self.texts, is_special=self.is_special ) def __len__(self): @@ -94,14 +76,11 @@ class TopTokens: @dataclass class Generation: request_id: int - prefill_tokens: Optional[PrefillTokens] - token_id: int - token_logprob: float - token_text: str - token_is_special: bool + prefill_tokens: Optional[Tokens] + tokens: Tokens generated_text: Optional[GeneratedText] # Optional for now, since it's not yet supported for every model. - top_tokens: Optional[TopTokens] + top_tokens: Optional[List[Tokens]] def to_pb(self) -> generate_pb2.Generation: return generate_pb2.Generation( @@ -109,10 +88,7 @@ class Generation: prefill_tokens=self.prefill_tokens.to_pb() if self.prefill_tokens is not None else None, - token_id=self.token_id, - token_logprob=self.token_logprob, - token_text=self.token_text, - token_is_special=self.token_is_special, + tokens=self.tokens.to_pb(), generated_text=self.generated_text.to_pb() if self.generated_text is not None else None, diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py index 67358d3c..33a26bd2 100644 --- a/server/text_generation_server/server.py +++ b/server/text_generation_server/server.py @@ -107,9 +107,11 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): def serve( model_id: str, revision: Optional[str], - dtype: Optional[str], - uds_path: Path, sharded: bool, + speculate: Optional[int], + dtype: Optional[str], + trust_remote_code: bool, + uds_path: Path, ): # Remove default handler logger.remove() @@ -126,8 +128,10 @@ def serve( async def serve_inner( model_id: str, revision: Optional[str], - dtype: Optional[str] = None, sharded: bool = False, + speculate: Optional[int] = None, + dtype: Optional[str] = None, + trust_remote_code: bool = False, ): unix_socket_template = "unix://{}-{}" logger.info("Server:server_inner: sharded ={}".format(sharded)) @@ -151,7 +155,9 @@ def serve( if revision == "None": revision = None try: - model = get_model(model_id, revision=revision, dtype=data_type) + model = get_model( + model_id, revision, speculate, dtype=data_type, trust_remote_code=trust_remote_code + ) except Exception: logger.exception("Error when initializing model") raise @@ -181,13 +187,7 @@ def serve( except KeyboardInterrupt: logger.info("Signal received. Shutting down") await server.stop(0) - finally: - if hasattr(model,'finish_quantization_measurements'): - model.finish_quantization_measurements() - logger.info( - "Starting Server : model_id= {}, revision = {} dtype = {} sharded = {} ".format( - model_id, revision, dtype, sharded - ) + asyncio.run( + serve_inner(model_id, revision, sharded, speculate, dtype, trust_remote_code) ) - asyncio.run(serve_inner(model_id, revision, dtype, sharded)) diff --git a/server/text_generation_server/tgi_service.py b/server/text_generation_server/tgi_service.py index bf1bab40..f88c8c8b 100644 --- a/server/text_generation_server/tgi_service.py +++ b/server/text_generation_server/tgi_service.py @@ -9,12 +9,18 @@ import argparse def main(args): logger.info("TGIService: starting tgi service .... ") logger.info( - "TGIService: --model_id {}, --revision {}, --sharded {}, --dtype {}, --uds_path {} ".format( - args.model_id, args.revision, args.sharded, args.dtype, args.uds_path + "TGIService: --model_id {}, --revision {}, --sharded {}, --speculate {}, --dtype {}, --trust_remote_code {}, --uds_path {} ".format( + args.model_id, args.revision, args.sharded, args.speculate, args.dtype, args.trust_remote_code, args.uds_path ) ) server.serve( - model_id=args.model_id, revision=args.revision, dtype=args.dtype, uds_path=args.uds_path, sharded=args.sharded + model_id=args.model_id, + revision=args.revision, + sharded=args.sharded, + speculate=args.speculate, + dtype=args.dtype, + trust_remote_code=args.trust_remote_code, + uds_path=args.uds_path, ) @@ -23,7 +29,9 @@ if __name__ == "__main__": parser.add_argument("--model_id", type=str) parser.add_argument("--revision", type=str) parser.add_argument("--sharded", type=bool) + parser.add_argument("--speculate", type=int, default=None) parser.add_argument("--dtype", type=str) + parser.add_argument("--trust_remote_code", type=bool) parser.add_argument("--uds_path", type=Path) args = parser.parse_args() main(args) diff --git a/server/text_generation_server/utils/medusa.py b/server/text_generation_server/utils/medusa.py new file mode 100644 index 00000000..029de122 --- /dev/null +++ b/server/text_generation_server/utils/medusa.py @@ -0,0 +1,51 @@ +import torch +from dataclasses import dataclass +from text_generation_server.utils.layers import TensorParallelHead, FastLinear + +@dataclass +class Output: + logits: torch.FloatTensor = None + speculative_logits: torch.FloatTensor = None + + +class ResBlock(torch.nn.Module): + def __init__(self, config, prefix, weights): + super().__init__() + self.linear = FastLinear.load(config, prefix=f"{prefix}.linear", weights=weights, bias=True) + self.act = torch.nn.SiLU() + + def forward(self, x): + return x + self.act(self.linear(x)) + + +class MedusaModel(torch.nn.Module): + def __init__( + self, + config, + weights, + lm_head + ): + super().__init__() + self.heads = torch.nn.ModuleList( + [MedusaHead(config, prefix=f"{i}", weights=weights) for i in range(config["medusa_num_heads"])] + ) + self.lm_head = lm_head + + def forward(self, x): + logits = self.lm_head(x) + speculative_logits = torch.stack([head(x) for head in self.heads], dim=1) + return logits, speculative_logits + + +class MedusaHead(torch.nn.Module): + def __init__(self, config, prefix, weights): + super().__init__() + self.blocks = torch.nn.ModuleList([ResBlock(config, prefix=f"{prefix}.{i}", weights=weights) for i in range(config["medusa_num_layers"])]) + n = len(self.blocks) + self.out = FastLinear.load(config, prefix=f"{prefix}.{n}", weights=weights, bias=False) + + def forward(self, x): + for block in self.blocks: + x = block(x) + x = self.out(x) + return x diff --git a/server/text_generation_server/utils/speculate.py b/server/text_generation_server/utils/speculate.py new file mode 100644 index 00000000..38a91972 --- /dev/null +++ b/server/text_generation_server/utils/speculate.py @@ -0,0 +1,12 @@ + +SPECULATE = None + +def get_speculate() -> int: + global SPECULATE + return SPECULATE + +def set_speculate(speculate: int): + global SPECULATE + SPECULATE = speculate + + diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index c50d10e3..acd55f26 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -142,6 +142,22 @@ class StoppingCriteria: ) +def create_n_gram_speculation(input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool): + # Very trivial approach, find first match in the string. + # This is much less refined than actual n-gram but seems to work + # relatively OK in grounded mode and is by far much faster with + # much less worst case complexity as everything happens on device. + B = accepted_ids.shape[0] + device = input_ids.device + seeds = next_ids[accepted_ids.cumsum(dim=-1) -1 ] + indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 + all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange(speculate, device=device) + all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) + + speculative_ids = input_ids.gather(dim=-1, index=all_indices) + return speculative_ids + + class HeterogeneousNextTokenChooser: def __init__( self, @@ -206,16 +222,72 @@ class HeterogeneousNextTokenChooser: self.dtype = dtype self.device = device - def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor): - if self.watermark_processor is not None: - scores = self.watermark_processor(input_ids, scores) - if self.repetition_processor is not None: - scores = self.repetition_processor(input_ids, scores) + def __call__( + self, + input_ids: torch.Tensor, + scores: torch.Tensor, + speculate: int, + speculated_ids: Optional[torch.Tensor] = None, + speculative_scores: Optional[torch.Tensor] = None, + verbose=False + ): + if speculated_ids is not None: + B = scores.shape[0] // (speculated_ids.shape[1] + 1) + S = speculated_ids.shape[1] + 1 + scores = scores.view(B, S, -1) + else: + B = scores.shape[0] + S = 1 + scores = scores.view(B, S, -1) - for warper in self.warpers: - scores = warper(input_ids, scores) + next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) + for j in range(S): + _scores = scores[:, j] + if self.watermark_processor is not None: + _scores = self.watermark_processor(input_ids, _scores) + if self.repetition_processor is not None: + _scores = self.repetition_processor(input_ids, _scores) + + for warper in self.warpers: + _scores = warper(input_ids, _scores) + + _next_ids = self.choice(_scores) + scores[:, j] = _scores + next_ids[:, j] = _next_ids + next_ids = next_ids.view(B*S) + scores = scores.view( B* S, -1) + + if speculated_ids is not None: + accepted_ids = [] + B = next_ids.shape[0] // (speculated_ids.shape[1] + 1) + S = speculated_ids.shape[1] + 1 + indices = [] + for i in range(B): + _next_ids = next_ids[i*S: (i + 1)*S] + _speculated_ids = speculated_ids[i] + validate_speculative = _next_ids[:-1] == _speculated_ids + index = i * S + accepted = 1 + # First is always valid + indices.append(index) + for valid in validate_speculative.tolist(): + if valid: + index += 1 + accepted += 1 + indices.append(index) + else: + break + accepted_ids.append(accepted) + + accepted_ids = torch.tensor(accepted_ids, device=input_ids.device, dtype=input_ids.dtype) + next_ids = next_ids[indices] + scores = scores[indices] + indices = torch.arange(B, device=input_ids.device) * S + if speculative_scores is not None: + speculative_scores = speculative_scores[indices + accepted_ids - 1] + else: + accepted_ids = torch.ones_like(next_ids) - next_ids = self.choice(scores) # ignore logprobs if we use greedy search if type(self.choice) == Greedy: logprobs = torch.empty_like(scores, device="cpu") @@ -224,7 +296,17 @@ class HeterogeneousNextTokenChooser: logprobs = torch.log_softmax(scores, -1) next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) - return next_ids, next_logprobs, logprobs + if speculate > 0: + if speculative_scores is not None: + # Medusa provided some scores + speculative_ids = Greedy()(speculative_scores) + else: + # n-gram + speculative_ids = create_n_gram_speculation(input_ids, next_ids, accepted_ids, speculate, verbose) + else: + speculative_ids = None + + return next_ids, next_logprobs, logprobs, accepted_ids, speculative_ids def filter(self, indices): if self.watermark_processor is not None: From 9aef9029823e6a8fc1938866202e72fd056f9277 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Mon, 11 Dec 2023 14:43:40 +0100 Subject: [PATCH 004/153] feat: mixtral (#1328) --- router/src/server.rs | 5 + server/Makefile | 3 + .../text_generation_server/models/__init__.py | 1 - .../custom_modeling/flash_llama_modeling.py | 85 +-- .../custom_modeling/flash_mistral_modeling.py | 82 +- .../custom_modeling/flash_mixtral_modeling.py | 708 ++++++++++++++++++ .../flash_santacoder_modeling.py | 1 - .../models/flash_mistral.py | 59 +- .../models/flash_mixtral.py | 26 + server/text_generation_server/utils/layers.py | 185 +++-- 10 files changed, 928 insertions(+), 227 deletions(-) create mode 100644 server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py create mode 100644 server/text_generation_server/models/flash_mixtral.py diff --git a/router/src/server.rs b/router/src/server.rs index 1830a879..78e2af3b 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -633,6 +633,9 @@ pub async fn run( // Batch size buckets let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size")); let batch_size_buckets: Vec = (0..1024).map(|x| (x + 1) as f64).collect(); + // Speculated tokens buckets + let skipped_matcher = Matcher::Full(String::from("tgi_request_skipped_tokens")); + let skipped_buckets: Vec = (0..shard_info.speculate + 1).map(|x| x as f64).collect(); // Prometheus handler let builder = PrometheusBuilder::new() @@ -645,6 +648,8 @@ pub async fn run( .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets) .unwrap() .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets) + .unwrap() + .set_buckets_for_metric(skipped_matcher, &skipped_buckets) .unwrap(); let prom_handle = builder .install_recorder() diff --git a/server/Makefile b/server/Makefile index 49001f6d..23dc6959 100644 --- a/server/Makefile +++ b/server/Makefile @@ -16,6 +16,9 @@ gen-server: find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py +install-megablocks: + pip install git+https://github.com/OlivierDehaene/megablocks@181709df192de9a941fdf3a641cdc65a0462996e + install: gen-server pip install pip --upgrade pip install -r requirements_cuda.txt diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py index f7377c6b..581a007a 100644 --- a/server/text_generation_server/models/__init__.py +++ b/server/text_generation_server/models/__init__.py @@ -38,7 +38,6 @@ def get_model( use_medusa = None if "medusa_num_heads" in config_dict: use_medusa = model_id - medusa_config = config_dict model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_medusa = config_dict["medusa_num_heads"] diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 4aeb447d..d06b87eb 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -34,14 +34,8 @@ from text_generation_server.utils.layers import ( PositionRotaryEmbedding, TensorParallelHead, get_linear, + FastRMSNorm ) -from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM - -if IS_CUDA_SYSTEM: - import dropout_layer_norm -elif IS_ROCM_SYSTEM: - from vllm import layernorm_ops - class LlamaConfig(PretrainedConfig): def __init__( @@ -95,75 +89,6 @@ class LlamaConfig(PretrainedConfig): ) -class LlamaRMSNorm(nn.Module): - def __init__(self, prefix, weights, eps=1e-6): - """ - LlamaRMSNorm is equivalent to T5LayerNorm - """ - super().__init__() - - weight = weights.get_tensor(f"{prefix}.weight") - self.weight = nn.Parameter(weight) - self.variance_epsilon = eps - - def forward(self, hidden_states, residual=None): - if hidden_states.shape[-1] > 8192: - if residual is not None: - hidden_states += residual - residual = hidden_states - - hidden_states = hidden_states.to(torch.float32) - variance = hidden_states.pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt( - variance + self.variance_epsilon - ) - - # convert into half-precision if necessary - if self.weight.dtype in [torch.float16, torch.bfloat16]: - hidden_states = hidden_states.to(self.weight.dtype) - - return self.weight * hidden_states, residual - elif IS_CUDA_SYSTEM: - # faster post attention rms norm - normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd( - hidden_states, - residual, - self.weight, - None, - None, - None, - None, - None, - 0.0, - self.variance_epsilon, - 1.0, - 0, - None, - False, - True, # Activate RMSNorm - ) - if res is None: - res = hidden_states - - return normed_hidden_states, res - elif IS_ROCM_SYSTEM: - # We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not. - if residual is not None: - hidden_states += residual - residual = hidden_states - - out = torch.empty_like(hidden_states) - layernorm_ops.rms_norm( - out, - hidden_states, - self.weight.data, - self.variance_epsilon, - ) - return out, residual - else: - raise ValueError("Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") - - def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) @@ -363,10 +288,8 @@ class FlashLlamaLayer(nn.Module): ) self.mlp = LlamaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) - self.input_layernorm = LlamaRMSNorm( - prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps - ) - self.post_attention_layernorm = LlamaRMSNorm( + self.input_layernorm = FastRMSNorm.load(prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps) + self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, @@ -430,7 +353,7 @@ class FlashLlamaModel(torch.nn.Module): for layer_id in range(config.num_hidden_layers) ] ) - self.norm = LlamaRMSNorm( + self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps ) diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index 959949f0..4e56b188 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -35,13 +35,9 @@ from text_generation_server.utils.layers import ( PositionRotaryEmbedding, TensorParallelHead, get_linear, + FastRMSNorm ) -from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM -if IS_CUDA_SYSTEM: - import dropout_layer_norm -elif IS_ROCM_SYSTEM: - from vllm import layernorm_ops if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM: raise ImportError("Mistral model requires flash attn v2") @@ -100,76 +96,6 @@ class MistralConfig(PretrainedConfig): **kwargs, ) - -class MistralRMSNorm(nn.Module): - def __init__(self, prefix, weights, eps=1e-6): - """ - LlamaRMSNorm is equivalent to T5LayerNorm - """ - super().__init__() - - weight = weights.get_tensor(f"{prefix}.weight") - self.weight = nn.Parameter(weight) - self.variance_epsilon = eps - - def forward(self, hidden_states, residual=None): - if hidden_states.shape[-1] > 8192: - if residual is not None: - hidden_states += residual - residual = hidden_states - - hidden_states = hidden_states.to(torch.float32) - variance = hidden_states.pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt( - variance + self.variance_epsilon - ) - - # convert into half-precision if necessary - if self.weight.dtype in [torch.float16, torch.bfloat16]: - hidden_states = hidden_states.to(self.weight.dtype) - - return self.weight * hidden_states, residual - elif IS_CUDA_SYSTEM: - # faster post attention rms norm - normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd( - hidden_states, - residual, - self.weight, - None, - None, - None, - None, - None, - 0.0, - self.variance_epsilon, - 1.0, - 0, - None, - False, - True, # Activate RMSNorm - ) - if res is None: - res = hidden_states - - return normed_hidden_states, res - elif IS_ROCM_SYSTEM: - # We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not. - if residual is not None: - hidden_states += residual - residual = hidden_states - - out = torch.empty_like(hidden_states) - layernorm_ops.rms_norm( - out, - hidden_states, - self.weight.data, - self.variance_epsilon, - ) - return out, residual - else: - raise ValueError("Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") - - def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) @@ -371,10 +297,10 @@ class MistralLayer(nn.Module): ) self.mlp = MistralMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) - self.input_layernorm = MistralRMSNorm( + self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) - self.post_attention_layernorm = MistralRMSNorm( + self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, @@ -440,7 +366,7 @@ class MistralModel(torch.nn.Module): for layer_id in range(config.num_hidden_layers) ] ) - self.norm = MistralRMSNorm( + self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps ) diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py new file mode 100644 index 00000000..66753d5a --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -0,0 +1,708 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.distributed + +import numpy as np + +from torch import nn +from transformers.activations import ACT2FN +from transformers.configuration_utils import PretrainedConfig +from typing import Optional, List, Tuple + +from text_generation_server.utils import paged_attention, flash_attn +from text_generation_server.utils.flash_attn import HAS_FLASH_ATTN_V2_ROCM, HAS_FLASH_ATTN_V2_CUDA +from text_generation_server.utils.layers import ( + FastLinear, + FastRMSNorm, + TensorParallelRowLinear, + TensorParallelColumnLinear, + TensorParallelEmbedding, + PositionRotaryEmbedding, + TensorParallelHead, + get_linear, +) + +if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM: + raise ImportError("Mixtral model requires flash attn v2") + +try: + import megablocks.ops as ops +except ImportError: + raise ImportError("Mixtral model requires megablocks to be installed") + +try: + import stk +except ImportError: + raise ImportError("Mixtral model requires stk to be installed") + + +class MixtralConfig(PretrainedConfig): + model_type = "mixtral" + + def __init__( + self, + vocab_size=32000, + hidden_size=4096, + intermediate_size=14336, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=8, + hidden_act="silu", + max_position_embeddings=4096 * 32, + initializer_range=0.02, + rms_norm_eps=1e-05, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + sliding_window=4096, + num_experts_per_tok=2, + num_local_experts=8, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.sliding_window = sliding_window + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.num_experts_per_tok = num_experts_per_tok + self.num_local_experts = num_local_experts + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + +def promote_scalar(x: torch.Tensor) -> torch.Tensor: + return x.view(1) if len(x.size()) == 0 else x + + +def load_attention(config, prefix, weights): + if config.num_attention_heads != config.num_key_value_heads: + return _load_gqa(config, prefix, weights) + else: + return TensorParallelColumnLinear.load_multi( + config, + prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], + dim=0, + weights=weights, + bias=False, + ) + + +def _load_gqa(config, prefix: str, weights): + assert config.hidden_size % config.num_attention_heads == 0 + assert config.num_attention_heads % weights.process_group.size() == 0 + + weight = weights.get_multi_weights_col( + prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], + quantize=config.quantize, + dim=0, + ) + + if config.quantize not in ["gptq", "awq"]: + weight = weight.to(dtype=weights.dtype).to(device=weights.device) + + head_size = config.hidden_size // config.num_attention_heads + num_heads = config.num_attention_heads // weights.process_group.size() + num_key_value_heads = config.num_key_value_heads // weights.process_group.size() + assert list(weight.shape) == [ + (num_heads + 2 * num_key_value_heads) * head_size, + config.hidden_size, + ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" + + return TensorParallelColumnLinear( + get_linear(weight, bias=None, quantize=config.quantize) + ) + + +def _load_experts(config, prefix, mat, weights): + if config.quantize is not None: + raise NotImplementedError("Mixtral does not support weight quantization yet.") + + assert mat in ["w1", "w2", "w3"] + + world_size = weights.process_group.size() + rank = weights.process_group.rank() + + assert ( + config.intermediate_size % world_size == 0 + ), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards" + + block_size = config.intermediate_size // world_size + start = rank * block_size + stop = (rank + 1) * block_size + + tensor = torch.empty((config.num_local_experts * block_size, config.hidden_size), + dtype=weights.dtype, + device=weights.device) + + for i in range(config.num_local_experts): + slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight") + + if mat == "w2": + expert_slice = slice_[:, start:stop].t().contiguous() + else: + expert_slice = slice_[start:stop] + tensor[i * block_size:(i + 1) * block_size] = expert_slice.to(dtype=weights.dtype).to(device=weights.device) + return tensor + + +class MixtralAttention(torch.nn.Module): + def __init__( + self, + prefix: str, + config, + weights, + ): + super().__init__() + self.max_past = ( + config.sliding_window if config.sliding_window is not None else 0 + ) + self.num_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.num_heads + + self.rotary_emb = PositionRotaryEmbedding.static( + config=config, + dim=self.head_size, + base=config.rope_theta, + device=weights.device, + ) + + self.softmax_scale = self.head_size ** -0.5 + + if self.num_heads % weights.process_group.size() != 0: + raise ValueError( + f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " + f"and `num_shards`: {weights.process_group.size()}" + ) + self.num_heads = self.num_heads // weights.process_group.size() + self.num_key_value_heads = ( + config.num_key_value_heads // weights.process_group.size() + ) + + self.query_key_value = load_attention(config, prefix, weights) + + self.o_proj = TensorParallelRowLinear.load( + config, + prefix=f"{prefix}.o_proj", + weights=weights, + bias=False, + ) + self.num_groups = self.num_heads // self.num_key_value_heads + self.kv_head_mapping = torch.arange( + 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device + ).repeat_interleave(self.num_groups) + + def forward( + self, + hidden_states, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, + ): + qkv = self.query_key_value(hidden_states) + query, kv = qkv.split( + [ + self.head_size * self.num_heads, + 2 * self.head_size * self.num_key_value_heads, + ], + dim=1, + ) + query = query.view(-1, self.num_heads, self.head_size) + kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) + + self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) + + if prefill_cache_indices is not None: + kv_to_cache = kv[prefill_cache_indices] + else: + kv_to_cache = kv + + paged_attention.reshape_and_cache( + kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots + ) + + # output tensor + attn_output = torch.empty_like(query) + + # Prefill + if cu_seqlen_prefill is not None: + # flash attention + flash_attn.attention( + query, + torch.select(kv, dim=1, index=0), + torch.select(kv, dim=1, index=1), + attn_output, + cu_seqlen_prefill, + max_s, + self.softmax_scale, + window_size_left=self.max_past, + ) + # Decode + else: + paged_attention.attention( + attn_output, + query, + kv_cache[0], + kv_cache[1], + self.kv_head_mapping, + self.softmax_scale, + block_tables, + input_lengths, + max_s, + ) + + return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) + + +@torch.jit.script +def select_experts(gate_logits: torch.Tensor, top_k: int): + # all_probs: (sequence_length, n_experts) and upcast for softmax + all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) + # weights, selected_experts: (sequence_length, top-k) + weights, selected_experts = torch.topk(all_probs, top_k, dim=-1) + weights /= weights.sum(dim=-1, keepdim=True) + weights = weights.view(-1) + selected_experts = selected_experts.view(-1) + + return selected_experts, weights + + +@torch.jit.script +def round_up(x: torch.Tensor, value: int): + return torch.div(x + (value - 1), value, rounding_mode="trunc") * value + + +class BlockSparseMoE(nn.Module): + """ + Built on the paper and library Megablocks as described in + https://arxiv.org/abs/2211.15841. This implementation is + strictly equivalent to standard MoE with full capacity (no + dropped tokens). It's faster since it formulates MoE operations + in terms of block-sparse operations to accomodate imbalanced + assignments of tokens to experts, whereas standard MoE either + (1) drop tokens at the cost of reduced performance or (2) set + capacity factor to number of experts and thus waste computation + and memory on padding. + """ + + def __init__(self, prefix, config: MixtralConfig, weights): + super().__init__() + self.hidden_dim = config.hidden_size + self.ffn_dim = config.intermediate_size // weights.process_group.size() + self.num_experts = config.num_local_experts + self.top_k = config.num_experts_per_tok + + act = config.hidden_act + if "gelu" in act: + self.act = lambda x: torch.nn.functional.gelu( + x, + approximate="tanh" + if act in ["gelu_fast", "gelu_pytorch_tanh"] + else "none", + ) + elif "silu" in act: + self.act = torch.nn.functional.silu + else: + self.act = ACT2FN[act] + + # gating + self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) + + # merged expert weights, all of size (n_experts * ffn_dim, hidden_dim) + self.w1 = _load_experts(config, f"{prefix}.experts", "w1", weights).t() + self.w2 = _load_experts(config, f"{prefix}.experts", "w2", weights) + self.w3 = _load_experts(config, f"{prefix}.experts", "w3", weights).t() + + self.offsets = None + self.offsets_block_rows = 0 + + self.process_group = weights.process_group + + # Calculate the number of bits needed to represent the expert indices + # so that we can pass it to radix sort. + self.sort_end_bit = max(int(np.ceil(np.log2(self.num_experts))), 1) + self.blocking = 128 + self.quantize_scatter_num_bits = -1 + + def topology(self, x: torch.Tensor, padded_bins: torch.Tensor): + padded_tokens, _ = x.size() + assert padded_tokens % self.blocking == 0 + assert self.ffn_dim % self.blocking == 0 + + # Offsets for the sparse matrix. All rows have the + # same number of nonzero blocks dictated by the + # dimensionality of a single expert. + block_rows = padded_tokens // self.blocking + blocks_per_row = self.ffn_dim // self.blocking + if self.offsets is None or block_rows > self.offsets_block_rows: + self.offsets = torch.arange( + 0, + block_rows * blocks_per_row + 1, + blocks_per_row, + dtype=torch.int32, + device=x.device, + ) + self.offsets_block_rows = block_rows + offsets = self.offsets + else: + offsets = self.offsets[:block_rows] + + # Indices for the sparse matrix. The indices for + # the intermediate matrix are dynamic depending + # on the mapping of tokens to experts. + column_indices = ops.topology(padded_bins, self.blocking, block_rows, + blocks_per_row) + + # For now, use meta init to save the device memory. + data = torch.empty( + column_indices.numel(), + self.blocking, + self.blocking, + dtype=x.dtype, + device="meta", + ) + shape = (padded_tokens, self.ffn_dim * self.num_experts) + row_indices = stk.ops.row_indices(shape, data, offsets, column_indices) + return stk.Matrix( + shape, + data, + row_indices, + column_indices, + offsets, + False, + False, + False, + ) + + def indices_and_padded_bins(self, selected_experts: torch.Tensor): + # Sort the expert ids to produce the scatter/gather + # indices for the permutation. + # selected_experts = selected_experts.int() + + # returns bin_ids == num of experts for this sequence ? == unique selected experts? + # and indices == how to sort tokens? + bin_ids, indices = ops.sort(selected_experts, self.sort_end_bit) + # bin_ids => [0, 0, 0, 2, 2, ...] => [num_tokens * top_k] + # indices => [14, 32, 33, ...] => [num_tokens * top_k] + + # Histogram the expert ids to identify the number of + # tokens routed to each expert. + tokens_per_expert = ops.histogram(selected_experts, self.num_experts) + # tokens_per_expert => [3, 0, 2, ...] => [num_experts] + + # Round the token counts up to the block size used in + # the matrix muliplications. Caculate the starting + # position of each bin. + + # List of size num_experts + padded_tokens_per_expert = round_up(tokens_per_expert, + self.blocking) + # padded_tokens_per_expert => [128, O, 128, ...] + + # Cumulative selected experts per token + padded_bins = ops.inclusive_cumsum(padded_tokens_per_expert, 0) + padded_bins = promote_scalar(padded_bins) + # padded_bins => [128, 128, 256, ...] + + # Calculate the bin bounds for the sorted tokens. + bins = ops.inclusive_cumsum(tokens_per_expert, 0) + bins = promote_scalar(bins) + # bins => [3, 3, 5, ...] + + return indices, bin_ids, bins, padded_bins, tokens_per_expert + + @torch.inference_mode() + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + x: (sequence_length, model_dim) + gate_logits: (sequence_length, n_experts) + """ + # optional reshape + input_shape = x.shape + x = x.view(-1, input_shape[-1]) + + # gate_logits: (sequence_length, n_experts) + gate_logits = self.gate(x) + selected_experts, weights = select_experts(gate_logits, self.top_k) + + ( + indices, + bin_ids, + bins, + padded_bins, + _, + ) = self.indices_and_padded_bins(selected_experts) + + # Permute tokens and pad to prepare expert computation + # (top_k * sequence_length + padding, model_dim) + x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, + self.top_k) + + # Create the sparse matrix topology + with torch.no_grad(): + topo = self.topology(x, padded_bins) + + # Perform the expert computation + # First Dense x Dense -> Sparse for w1 and w3, + # (top_k * sequence_length + padding, ffn_dim * n_experts) + x = stk.Matrix( + topo.size(), + self.act(stk.ops.sdd(x, self.w1, topo).data) * + stk.ops.sdd(x, self.w3, topo).data, + topo.row_indices, + topo.column_indices, + topo.offsets, + topo.column_indices_t, + topo.offsets_t, + topo.block_offsets_t, + ) + + # Then Sparse x Dense -> Dense for w2 + # (top_k * sequence_length + padding, model_dim) + x = stk.ops.dsd(x, self.w2) + + # Permute back and remove padding + # (sequence_length, model_dim) + x = ops.padded_scatter( + x, + indices, + bin_ids, + weights, + bins, + padded_bins, + self.top_k, + self.quantize_scatter_num_bits, + ).view(*input_shape) + + if self.process_group.size() > 1: + torch.distributed.all_reduce(x, group=self.process_group) + + return x.view(*input_shape) + + +class MixtralLayer(nn.Module): + def __init__(self, layer_id, config, weights): + super().__init__() + prefix = f"model.layers.{layer_id}" + + self.self_attn = MixtralAttention( + prefix=f"{prefix}.self_attn", config=config, weights=weights + ) + self.block_sparse_moe = BlockSparseMoE(f"{prefix}.block_sparse_moe", config, weights) + + self.input_layernorm = FastRMSNorm.load( + prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps + ) + self.post_attention_layernorm = FastRMSNorm.load( + prefix=f"{prefix}.post_attention_layernorm", + weights=weights, + eps=config.rms_norm_eps, + ) + + def forward( + self, + hidden_states, + residual, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, + ): + normed_hidden_states, res = self.input_layernorm(hidden_states, residual) + + # Self Attention + attn_output = self.self_attn( + normed_hidden_states, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, + ) + + # faster post attention rms norm + normed_attn_res_output, attn_res = self.post_attention_layernorm( + attn_output, res + ) + + block_sparse_moe_output = self.block_sparse_moe(normed_attn_res_output) + + return block_sparse_moe_output, attn_res + + +class MixtralModel(torch.nn.Module): + def __init__(self, config, weights): + super().__init__() + + self.embed_tokens = TensorParallelEmbedding( + prefix="model.embed_tokens", weights=weights + ) + + self.layers = nn.ModuleList( + [ + MixtralLayer( + layer_id, + config, + weights, + ) + for layer_id in range(config.num_hidden_layers) + ] + ) + self.norm = FastRMSNorm.load( + prefix="model.norm", weights=weights, eps=config.rms_norm_eps + ) + + self.head_size = self.layers[0].self_attn.head_size + self.num_heads = self.layers[0].self_attn.num_heads + self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + prefill_cache_indices: Optional[torch.Tensor], + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + + # Get rotary cos and sin for this forward + # Avoid to index in each layer + cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( + position_ids, max_s, hidden_states.dtype + ) + + residual = None + for i, layer in enumerate(self.layers): + hidden_states, residual = layer( + hidden_states, + residual, + cos, + sin, + cu_seqlen_prefill, + kv_cache[i], + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, + ) + + hidden_states, _ = self.norm(hidden_states, residual) + + return hidden_states + + +class FlashMixtralForCausalLM(torch.nn.Module): + def __init__(self, config, weights): + super().__init__() + + self.model = MixtralModel(config, weights) + self.lm_head = TensorParallelHead.load( + config, + prefix="lm_head", + weights=weights, + ) + self.max_past = config.sliding_window + if self.max_past is None: + raise ValueError("max_past cannot be None") + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + prefill_cache_indices: Optional[torch.Tensor], + lm_head_indices: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if prefill_cache_indices is not None: + # Slots also need to be sliced as it has the same size as the whole kv tensor + slots = slots[prefill_cache_indices] + else: + # Clamp in decode mode as paged attention requires clamped values whereas the flash attention + # kernel requires the true values + max_s = min(self.max_past, max_s) + input_lengths = torch.clamp(input_lengths, max=self.max_past) + + hidden_states = self.model( + input_ids, + position_ids, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, + ) + if lm_head_indices is not None: + hidden_states = hidden_states[lm_head_indices] + logits = self.lm_head(hidden_states) + return logits diff --git a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py index c3c7617a..cd93d32a 100644 --- a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py @@ -6,7 +6,6 @@ from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn -from text_generation_server.utils.flash_attn import attention from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index e103d9fc..5ce37164 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -8,14 +8,13 @@ from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase from transformers.models.llama import LlamaTokenizerFast -from typing import Optional, Tuple, Type +from typing import Optional, Tuple, Type, List from text_generation_server.pb import generate_pb2 from text_generation_server.models import FlashCausalLM from text_generation_server.models.flash_causal_lm import FlashCausalLMBatch, BLOCK_SIZE from text_generation_server.models.cache_manager import ( get_cache_manager, - set_cache_manager, ) from text_generation_server.models.custom_modeling.flash_mistral_modeling import ( FlashMistralForCausalLM, @@ -46,11 +45,11 @@ class FlashMistralBatch(FlashCausalLMBatch): @classmethod def from_pb( - cls, - pb: generate_pb2.Batch, - tokenizer: PreTrainedTokenizerBase, - dtype: torch.dtype, - device: torch.device, + cls, + pb: generate_pb2.Batch, + tokenizer: PreTrainedTokenizerBase, + dtype: torch.dtype, + device: torch.device, ) -> "FlashCausalLMBatch": global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS @@ -100,12 +99,12 @@ class FlashMistralBatch(FlashCausalLMBatch): # Parse batch for i, (r, tokenized_input) in enumerate( - zip(pb.requests, batch_tokenized_inputs) + zip(pb.requests, batch_tokenized_inputs) ): # request id -> idx in list mapping requests_idx_mapping[r.id] = i - tokenized_input = tokenized_input[-r.truncate :] + tokenized_input = tokenized_input[-r.truncate:] input_length = len(tokenized_input) input_lengths.append(input_length) @@ -278,14 +277,16 @@ class FlashMistralBatch(FlashCausalLMBatch): ) -class FlashMistral(FlashCausalLM): +class BaseFlashMistral(FlashCausalLM): def __init__( - self, - model_id: str, - revision: Optional[str] = None, - quantize: Optional[str] = None, - dtype: Optional[torch.dtype] = None, - trust_remote_code: bool = False, + self, + config_cls, + model_cls, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, ): global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS @@ -305,7 +306,7 @@ class FlashMistral(FlashCausalLM): trust_remote_code=trust_remote_code, ) - config = MistralConfig.from_pretrained( + config = config_cls.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize @@ -321,10 +322,10 @@ class FlashMistral(FlashCausalLM): if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id) - model = FlashMistralForCausalLM(config, weights) + model = model_cls(config, weights) torch.distributed.barrier(group=self.process_group) - super(FlashMistral, self).__init__( + super(BaseFlashMistral, self).__init__( model=model, tokenizer=tokenizer, num_layers=len(model.model.layers), @@ -396,3 +397,23 @@ class FlashMistral(FlashCausalLM): if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None return logits + + +class FlashMistral(BaseFlashMistral): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + ): + super(FlashMistral, self).__init__( + config_cls=MistralConfig, + model_cls=FlashMistralForCausalLM, + model_id=model_id, + revision=revision, + quantize=quantize, + dtype=dtype, + trust_remote_code=trust_remote_code + ) diff --git a/server/text_generation_server/models/flash_mixtral.py b/server/text_generation_server/models/flash_mixtral.py new file mode 100644 index 00000000..c45ae50f --- /dev/null +++ b/server/text_generation_server/models/flash_mixtral.py @@ -0,0 +1,26 @@ +import torch + +from typing import Optional + +from text_generation_server.models.flash_mistral import BaseFlashMistral +from text_generation_server.models.custom_modeling.flash_mixtral_modeling import MixtralConfig, FlashMixtralForCausalLM + + +class FlashMixtral(BaseFlashMistral): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + ): + super(FlashMixtral, self).__init__( + config_cls=MixtralConfig, + model_cls=FlashMixtralForCausalLM, + model_id=model_id, + revision=revision, + quantize=quantize, + dtype=dtype, + trust_remote_code=trust_remote_code + ) diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index a93ccd0e..d533016d 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -18,7 +18,7 @@ except ImportError: from accelerate import init_empty_weights from text_generation_server.utils.gptq.quant_linear import QuantLinear -from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM +from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM HAS_AWQ = True try: @@ -43,16 +43,18 @@ if os.getenv("DISABLE_EXLLAMA") == "True": elif CAN_EXLLAMA: try: if V2: - from text_generation_server.utils.gptq.exllamav2 import (QuantLinear as ExllamaQuantLinear, - create_exllama_buffers, - set_device, + from text_generation_server.utils.gptq.exllamav2 import (QuantLinear as ExllamaQuantLinear, + create_exllama_buffers, + set_device, ) + HAS_EXLLAMA = "2" else: from text_generation_server.utils.gptq.exllama import (Ex4bitLinear as ExllamaQuantLinear, - create_exllama_buffers, - set_device, - ) + create_exllama_buffers, + set_device, + ) + HAS_EXLLAMA = "1" except ImportError: @@ -112,7 +114,7 @@ def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, st @classmethod def load_conv2d_no_bias( - cls, prefix, weights, in_channels, out_channels, kernel_size, stride + cls, prefix, weights, in_channels, out_channels, kernel_size, stride ): weight = weights.get_tensor(f"{prefix}.weight") with init_empty_weights(): @@ -136,9 +138,9 @@ torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias class FastLinear(nn.Module): def __init__( - self, - weight, - bias, + self, + weight, + bias, ) -> None: super().__init__() self.weight = nn.Parameter(weight) @@ -162,9 +164,9 @@ class FastLinear(nn.Module): class EETQLinear(nn.Module): def __init__( - self, - weight, - bias, + self, + weight, + bias, ) -> None: super().__init__() device = weight.device @@ -183,13 +185,13 @@ class EETQLinear(nn.Module): class Linear8bitLt(nn.Module): def __init__( - self, - weight, - bias, - has_fp16_weights=True, - memory_efficient_backward=False, - threshold=0.0, - index=None, + self, + weight, + bias, + has_fp16_weights=True, + memory_efficient_backward=False, + threshold=0.0, + index=None, ): super().__init__() assert ( @@ -526,9 +528,12 @@ class TensorParallelEmbedding(nn.Module): try: if IS_CUDA_SYSTEM: import dropout_layer_norm + elif IS_ROCM_SYSTEM: + from vllm import layernorm_ops else: dropout_layer_norm = None + class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if hidden_states.shape[-1] > 8192 or IS_ROCM_SYSTEM: @@ -563,10 +568,81 @@ try: residual = hidden_states return normed_hidden_states, residual + + + class FastRMSNorm(nn.Module): + def __init__(self, weight: torch.Tensor, eps: float): + super().__init__() + + self.weight = nn.Parameter(weight) + self.variance_epsilon = eps + + @classmethod + def load(cls, prefix, weights, eps=1e-6): + weight = weights.get_tensor(f"{prefix}.weight") + return cls(weight, eps) + + def forward(self, hidden_states, residual=None): + if hidden_states.shape[-1] > 8192: + if residual is not None: + hidden_states += residual + residual = hidden_states + + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt( + variance + self.variance_epsilon + ) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states, residual + elif IS_CUDA_SYSTEM: + # faster post attention rms norm + normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd( + hidden_states, + residual, + self.weight, + None, + None, + None, + None, + None, + 0.0, + self.variance_epsilon, + 1.0, + 0, + None, + False, + True, # Activate RMSNorm + ) + if res is None: + res = hidden_states + + return normed_hidden_states, res + elif IS_ROCM_SYSTEM: + # We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not. + if residual is not None: + hidden_states += residual + residual = hidden_states + + out = torch.empty_like(hidden_states) + layernorm_ops.rms_norm( + out, + hidden_states, + self.weight.data, + self.variance_epsilon, + ) + return out, residual + else: + raise ValueError( + "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") + except ImportError: pass - try: if IS_CUDA_SYSTEM: from flash_attn.layers.rotary import RotaryEmbedding @@ -574,12 +650,14 @@ try: elif IS_ROCM_SYSTEM: from vllm import pos_encoding_ops + def _create_inv_freq(dim, base, device): inv_freq = 1.0 / ( - base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim) + base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim) ) return inv_freq + def _get_rope_config(config): if os.getenv("ROPE_SCALING", None) is not None: rope_scaling = { @@ -589,6 +667,7 @@ try: return rope_scaling return getattr(config, "rope_scaling", None) + class PositionRotaryEmbedding(nn.Module): def __init__(self, inv_freq, scaling_factor): super().__init__() @@ -606,12 +685,12 @@ try: if IS_CUDA_SYSTEM: rotary_dim = cos.shape[-1] q1 = query[..., :rotary_dim] - q2 = query[..., rotary_dim : 2 * rotary_dim] + q2 = query[..., rotary_dim: 2 * rotary_dim] rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., :rotary_dim] - k2 = key[..., rotary_dim : 2 * rotary_dim] + k2 = key[..., rotary_dim: 2 * rotary_dim] rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif IS_ROCM_SYSTEM: @@ -630,7 +709,8 @@ try: True ) else: - raise ValueError("Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") + raise ValueError( + "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") @classmethod def static(cls, config, dim, base, device): @@ -713,9 +793,9 @@ try: # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) @@ -729,7 +809,7 @@ try: self._sin_cached = torch.sin(freqs).to(dtype) def get_cos_sin( - self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype + self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype ): """ Return cos and sin for the asked position ids @@ -747,6 +827,7 @@ try: # Note: this unsqueeze is not necessary on RoCm + VLLM ROPE implementation, but we leave it as is to avoid yet an other controlflow. return cos.unsqueeze(1), sin.unsqueeze(1) + class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding): def __init__(self, dim, max_position_embeddings, base, device, scaling_factor): inv_freq = _create_inv_freq(dim, base, device) @@ -755,18 +836,18 @@ try: self.max_position_embeddings = max_position_embeddings self.base = base - def _update_cos_sin_cache(self, dtype, device, seqlen): + def _update_cos_sin_cache(self, dtype, device, seqlen): # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): if seqlen > self.max_position_embeddings: newbase = self.base * ( - (self.scaling_factor * seqlen / self.max_position_embeddings) - - (self.scaling_factor - 1) + (self.scaling_factor * seqlen / self.max_position_embeddings) + - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) self.inv_freq = _create_inv_freq( self.dim, newbase, self.inv_freq.device @@ -783,8 +864,11 @@ try: # Inverse dim formula to find dim based on number of rotations import math + + def find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048): - return (dim * math.log(max_position_embeddings/(num_rotations * 2 * math.pi)))/(2 * math.log(base)) + return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base)) + # Find dim range bounds based on rotations def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048): @@ -792,7 +876,8 @@ try: low_rot, dim, base, max_position_embeddings)) high = math.ceil(find_correction_dim( high_rot, dim, base, max_position_embeddings)) - return max(low, 0), min(high, dim-1) # Clamp values just in case + return max(low, 0), min(high, dim - 1) # Clamp values just in case + def linear_ramp_mask(min, max, dim): if min == max: @@ -802,13 +887,16 @@ try: ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func + def get_mscale(scale=1): if scale <= 1: return 1.0 return 0.1 * math.log(scale) + 1.0 + class YarnPositionRotaryEmbedding(PositionRotaryEmbedding): - def __init__(self, dim, max_position_embeddings, base, device, scaling_factor,*, extrapolation_factor, attn_factor, beta_fast, beta_slow): + def __init__(self, dim, max_position_embeddings, base, device, scaling_factor, *, extrapolation_factor, + attn_factor, beta_fast, beta_slow): inv_freq = _create_inv_freq(dim, base, device) super().__init__(inv_freq, scaling_factor) self.dim = dim @@ -818,15 +906,16 @@ try: self.attn_factor = attn_factor self.beta_fast = beta_fast self.beta_slow = beta_slow - self.mscale = float(get_mscale(self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation + self.mscale = float(get_mscale( + self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation def _update_cos_sin_cache(self, dtype, device, seqlen): # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): if seqlen > self.max_position_embeddings: inv_freq_extrapolation = _create_inv_freq( @@ -834,13 +923,15 @@ try: ) freqs = 1.0 / inv_freq_extrapolation inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs) - low, high = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.max_position_embeddings) - inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation + low, high = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, + self.max_position_embeddings) + inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to( + device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask self.inv_freq = inv_freq - self.mscale = float(get_mscale(self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation - + self.mscale = float(get_mscale( + self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) From 79f268f95ad2922624fb793e47dabdb49155a7ca Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Mon, 11 Dec 2023 14:49:52 +0100 Subject: [PATCH 005/153] chore: formatting --- integration-tests/conftest.py | 13 +- integration-tests/models/test_flash_medusa.py | 12 +- .../models/test_flash_mistral.py | 4 +- integration-tests/models/test_idefics.py | 4 +- server/tests/models/test_bloom.py | 16 +- server/tests/models/test_causal_lm.py | 16 +- server/tests/models/test_seq2seq_lm.py | 16 +- server/text_generation_server/cli.py | 30 ++- .../text_generation_server/models/__init__.py | 4 +- .../models/causal_lm.py | 5 +- .../custom_modeling/flash_llama_modeling.py | 11 +- .../custom_modeling/flash_mistral_modeling.py | 9 +- .../custom_modeling/flash_mixtral_modeling.py | 182 ++++++++-------- .../idefics_image_processing.py | 6 +- .../custom_modeling/idefics_modeling.py | 14 +- .../custom_modeling/idefics_processing.py | 2 +- .../models/flash_causal_lm.py | 124 ++++++----- .../models/flash_llama.py | 15 +- .../models/flash_mistral.py | 107 +++++---- .../models/flash_mixtral.py | 19 +- .../models/idefics_causal_lm.py | 13 +- server/text_generation_server/models/model.py | 2 +- .../models/seq2seq_lm.py | 10 +- server/text_generation_server/models/types.py | 5 +- server/text_generation_server/server.py | 10 +- .../utils/flash_attn.py | 10 +- .../utils/gptq/exllamav2.py | 132 ++++++----- server/text_generation_server/utils/layers.py | 206 ++++++++++-------- server/text_generation_server/utils/medusa.py | 28 ++- .../utils/paged_attention.py | 35 +-- server/text_generation_server/utils/peft.py | 4 +- .../text_generation_server/utils/speculate.py | 5 +- server/text_generation_server/utils/tokens.py | 31 ++- .../text_generation_server/utils/weights.py | 7 +- update_doc.py | 6 +- 35 files changed, 676 insertions(+), 437 deletions(-) diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py index 35c8faae..4cb4ca59 100644 --- a/integration-tests/conftest.py +++ b/integration-tests/conftest.py @@ -25,6 +25,7 @@ DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data") class ResponseComparator(JSONSnapshotExtension): rtol = 0.2 + def serialize( self, data, @@ -69,7 +70,9 @@ class ResponseComparator(JSONSnapshotExtension): prefill_token.id == other.id and prefill_token.text == other.text and ( - math.isclose(prefill_token.logprob, other.logprob, rel_tol=self.rtol) + math.isclose( + prefill_token.logprob, other.logprob, rel_tol=self.rtol + ) if prefill_token.logprob is not None else prefill_token.logprob == other.logprob ) @@ -153,6 +156,7 @@ class GenerousResponseComparator(ResponseComparator): # Needed for GPTQ with exllama which has serious numerical fluctuations. rtol = 0.75 + class LauncherHandle: def __init__(self, port: int): self.client = AsyncClient(f"http://localhost:{port}") @@ -198,6 +202,7 @@ class ProcessLauncherHandle(LauncherHandle): def response_snapshot(snapshot): return snapshot.use_extension(ResponseComparator) + @pytest.fixture def generous_response_snapshot(snapshot): return snapshot.use_extension(GenerousResponseComparator) @@ -219,7 +224,7 @@ def launcher(event_loop): quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, - dtype: Optional[str] = None + dtype: Optional[str] = None, ): port = random.randint(8000, 10_000) master_port = random.randint(10_000, 20_000) @@ -282,7 +287,7 @@ def launcher(event_loop): quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, - dtype: Optional[str] = None + dtype: Optional[str] = None, ): port = random.randint(8000, 10_000) @@ -335,7 +340,7 @@ def launcher(event_loop): ], volumes=volumes, ports={"80/tcp": port}, - shm_size="1G" + shm_size="1G", ) yield ContainerLauncherHandle(client, container.name, port) diff --git a/integration-tests/models/test_flash_medusa.py b/integration-tests/models/test_flash_medusa.py index 003409b0..a0ce0570 100644 --- a/integration-tests/models/test_flash_medusa.py +++ b/integration-tests/models/test_flash_medusa.py @@ -50,10 +50,16 @@ async def test_flash_medusa_all_params(flash_medusa, response_snapshot): @pytest.mark.asyncio @pytest.mark.private async def test_flash_medusa_load(flash_medusa, generate_load, response_snapshot): - responses = await generate_load(flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4) + responses = await generate_load( + flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4 + ) assert len(responses) == 4 - assert all([r.generated_text == responses[0].generated_text for r in responses]), f"{[r.generated_text for r in responses]}" - assert responses[0].generated_text == '\nDeep learning is a subset of machine learning' + assert all( + [r.generated_text == responses[0].generated_text for r in responses] + ), f"{[r.generated_text for r in responses]}" + assert ( + responses[0].generated_text == "\nDeep learning is a subset of machine learning" + ) assert responses == response_snapshot diff --git a/integration-tests/models/test_flash_mistral.py b/integration-tests/models/test_flash_mistral.py index 7d21afd9..ace3328b 100644 --- a/integration-tests/models/test_flash_mistral.py +++ b/integration-tests/models/test_flash_mistral.py @@ -56,7 +56,9 @@ async def test_flash_mistral_load(flash_mistral, generate_load, response_snapsho ) assert len(responses) == 4 - assert all([r.generated_text == responses[0].generated_text for r in responses]), f"{[r.generated_text for r in responses]}" + assert all( + [r.generated_text == responses[0].generated_text for r in responses] + ), f"{[r.generated_text for r in responses]}" assert responses[0].generated_text == ": Let n = 10 - 1" assert responses == response_snapshot diff --git a/integration-tests/models/test_idefics.py b/integration-tests/models/test_idefics.py index 5a81a4f0..7e1d3e11 100644 --- a/integration-tests/models/test_idefics.py +++ b/integration-tests/models/test_idefics.py @@ -3,7 +3,9 @@ import pytest @pytest.fixture(scope="module") def idefics_handle(launcher): - with launcher("HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16") as handle: + with launcher( + "HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16" + ) as handle: yield handle diff --git a/server/tests/models/test_bloom.py b/server/tests/models/test_bloom.py index 303e9e71..5cedb9f1 100644 --- a/server/tests/models/test_bloom.py +++ b/server/tests/models/test_bloom.py @@ -135,8 +135,20 @@ def test_causal_lm_generate_token(default_bloom, default_bloom_batch): ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([token_id.item() == 10264 for generation in generations for token_id in generation.tokens.token_ids]) - assert all([token_text == "Test" for generation in generations for token_text in generation.tokens.texts]) + assert all( + [ + token_id.item() == 10264 + for generation in generations + for token_id in generation.tokens.token_ids + ] + ) + assert all( + [ + token_text == "Test" + for generation in generations + for token_text in generation.tokens.texts + ] + ) assert generations[0].request_id == 0 diff --git a/server/tests/models/test_causal_lm.py b/server/tests/models/test_causal_lm.py index e9c2cd3a..3a9acfbb 100644 --- a/server/tests/models/test_causal_lm.py +++ b/server/tests/models/test_causal_lm.py @@ -141,8 +141,20 @@ def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch): ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([token_id.item() == 13 for generation in generations for token_id in generation.tokens.token_ids]) - assert all([token_text == "." for generation in generations for token_text in generation.tokens.texts]) + assert all( + [ + token_id.item() == 13 + for generation in generations + for token_id in generation.tokens.token_ids + ] + ) + assert all( + [ + token_text == "." + for generation in generations + for token_text in generation.tokens.texts + ] + ) assert generations[0].request_id == 0 diff --git a/server/tests/models/test_seq2seq_lm.py b/server/tests/models/test_seq2seq_lm.py index 60be77c8..2a2bdbb3 100644 --- a/server/tests/models/test_seq2seq_lm.py +++ b/server/tests/models/test_seq2seq_lm.py @@ -155,8 +155,20 @@ def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch) ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) - assert all([token_id.item() == 259 for generation in generations for token_id in generation.tokens.token_ids]) - assert all([token_text == " " for generation in generations for token_text in generation.tokens.texts]) + assert all( + [ + token_id.item() == 259 + for generation in generations + for token_id in generation.tokens.token_ids + ] + ) + assert all( + [ + token_text == " " + for generation in generations + for token_text in generation.tokens.texts + ] + ) assert generations[0].request_id == 0 diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py index 0817aee6..2caf63c0 100644 --- a/server/text_generation_server/cli.py +++ b/server/text_generation_server/cli.py @@ -132,7 +132,13 @@ def serve( return proc.returncode else: server.serve( - model_id, revision, sharded, speculate, dtype, trust_remote_code, uds_path + model_id, + revision, + sharded, + speculate, + dtype, + trust_remote_code, + uds_path ) @@ -190,12 +196,17 @@ def download_weights( try: import json - medusa_head = hf_hub_download(model_id, revision=revision, filename="medusa_lm_head.pt") + + medusa_head = hf_hub_download( + model_id, revision=revision, filename="medusa_lm_head.pt" + ) if auto_convert: - medusa_sf = Path(medusa_head[:-len(".pt")] + ".safetensors") + medusa_sf = Path(medusa_head[: -len(".pt")] + ".safetensors") if not medusa_sf.exists(): utils.convert_files([Path(medusa_head)], [medusa_sf], []) - medusa_config = hf_hub_download(model_id, revision=revision, filename="config.json") + medusa_config = hf_hub_download( + model_id, revision=revision, filename="config.json" + ) with open(medusa_config, "r") as f: config = json.load(f) @@ -203,10 +214,17 @@ def download_weights( revision = "main" try: utils.weight_files(model_id, revision, extension) - logger.info(f"Files for parent {model_id} are already present on the host. " "Skipping download.") + logger.info( + f"Files for parent {model_id} are already present on the host. " + "Skipping download." + ) return # Local files not found - except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError): + except ( + utils.LocalEntryNotFoundError, + FileNotFoundError, + utils.EntryNotFoundError, + ): pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass diff --git a/server/text_generation_server/models/__init__.py b/server/text_generation_server/models/__init__.py index 581a007a..4ac1529a 100644 --- a/server/text_generation_server/models/__init__.py +++ b/server/text_generation_server/models/__init__.py @@ -43,7 +43,9 @@ def get_model( speculate_medusa = config_dict["medusa_num_heads"] if speculate is not None: if speculate > speculate_medusa: - raise RuntimeError("Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match") + raise RuntimeError( + "Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match" + ) else: set_speculate(speculate) else: diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index e8e63586..826a6304 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -1020,7 +1020,10 @@ class CausalLM(Model): skip_special_tokens=False, ) prefill_tokens = Tokens( - prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[] + prefill_token_ids, + prefill_logprobs, + prefill_texts, + is_special=[], ) else: prefill_tokens = None diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index d06b87eb..3b424f80 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -34,9 +34,10 @@ from text_generation_server.utils.layers import ( PositionRotaryEmbedding, TensorParallelHead, get_linear, - FastRMSNorm + FastRMSNorm, ) + class LlamaConfig(PretrainedConfig): def __init__( self, @@ -202,7 +203,7 @@ class FlashLlamaAttention(torch.nn.Module): ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) - + self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) paged_attention.reshape_and_cache( @@ -237,7 +238,7 @@ class FlashLlamaAttention(torch.nn.Module): input_lengths, max_s, ) - + return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) @@ -288,7 +289,9 @@ class FlashLlamaLayer(nn.Module): ) self.mlp = LlamaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) - self.input_layernorm = FastRMSNorm.load(prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps) + self.input_layernorm = FastRMSNorm.load( + prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps + ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index 4e56b188..525bf6bc 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -27,7 +27,11 @@ from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn -from text_generation_server.utils.flash_attn import attention, HAS_FLASH_ATTN_V2_ROCM, HAS_FLASH_ATTN_V2_CUDA +from text_generation_server.utils.flash_attn import ( + attention, + HAS_FLASH_ATTN_V2_ROCM, + HAS_FLASH_ATTN_V2_CUDA, +) from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, @@ -35,7 +39,7 @@ from text_generation_server.utils.layers import ( PositionRotaryEmbedding, TensorParallelHead, get_linear, - FastRMSNorm + FastRMSNorm, ) @@ -96,6 +100,7 @@ class MistralConfig(PretrainedConfig): **kwargs, ) + def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index 66753d5a..6f5edca2 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -29,7 +29,10 @@ from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn -from text_generation_server.utils.flash_attn import HAS_FLASH_ATTN_V2_ROCM, HAS_FLASH_ATTN_V2_CUDA +from text_generation_server.utils.flash_attn import ( + HAS_FLASH_ATTN_V2_ROCM, + HAS_FLASH_ATTN_V2_CUDA, +) from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, @@ -59,28 +62,28 @@ class MixtralConfig(PretrainedConfig): model_type = "mixtral" def __init__( - self, - vocab_size=32000, - hidden_size=4096, - intermediate_size=14336, - num_hidden_layers=32, - num_attention_heads=32, - num_key_value_heads=8, - hidden_act="silu", - max_position_embeddings=4096 * 32, - initializer_range=0.02, - rms_norm_eps=1e-05, - use_cache=True, - pad_token_id=None, - bos_token_id=1, - eos_token_id=2, - pretraining_tp=1, - tie_word_embeddings=False, - rope_theta=10000.0, - sliding_window=4096, - num_experts_per_tok=2, - num_local_experts=8, - **kwargs, + self, + vocab_size=32000, + hidden_size=4096, + intermediate_size=14336, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=8, + hidden_act="silu", + max_position_embeddings=4096 * 32, + initializer_range=0.02, + rms_norm_eps=1e-05, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + sliding_window=4096, + num_experts_per_tok=2, + num_local_experts=8, + **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings @@ -166,16 +169,18 @@ def _load_experts(config, prefix, mat, weights): rank = weights.process_group.rank() assert ( - config.intermediate_size % world_size == 0 + config.intermediate_size % world_size == 0 ), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards" block_size = config.intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size - tensor = torch.empty((config.num_local_experts * block_size, config.hidden_size), - dtype=weights.dtype, - device=weights.device) + tensor = torch.empty( + (config.num_local_experts * block_size, config.hidden_size), + dtype=weights.dtype, + device=weights.device, + ) for i in range(config.num_local_experts): slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight") @@ -184,16 +189,18 @@ def _load_experts(config, prefix, mat, weights): expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] - tensor[i * block_size:(i + 1) * block_size] = expert_slice.to(dtype=weights.dtype).to(device=weights.device) + tensor[i * block_size : (i + 1) * block_size] = expert_slice.to( + dtype=weights.dtype + ).to(device=weights.device) return tensor class MixtralAttention(torch.nn.Module): def __init__( - self, - prefix: str, - config, - weights, + self, + prefix: str, + config, + weights, ): super().__init__() self.max_past = ( @@ -210,7 +217,7 @@ class MixtralAttention(torch.nn.Module): device=weights.device, ) - self.softmax_scale = self.head_size ** -0.5 + self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( @@ -219,7 +226,7 @@ class MixtralAttention(torch.nn.Module): ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( - config.num_key_value_heads // weights.process_group.size() + config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) @@ -236,17 +243,17 @@ class MixtralAttention(torch.nn.Module): ).repeat_interleave(self.num_groups) def forward( - self, - hidden_states, - cos, - sin, - cu_seqlen_prefill, - kv_cache, - block_tables, - slots, - input_lengths, - max_s, - prefill_cache_indices, + self, + hidden_states, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( @@ -399,8 +406,9 @@ class BlockSparseMoE(nn.Module): # Indices for the sparse matrix. The indices for # the intermediate matrix are dynamic depending # on the mapping of tokens to experts. - column_indices = ops.topology(padded_bins, self.blocking, block_rows, - blocks_per_row) + column_indices = ops.topology( + padded_bins, self.blocking, block_rows, blocks_per_row + ) # For now, use meta init to save the device memory. data = torch.empty( @@ -444,8 +452,7 @@ class BlockSparseMoE(nn.Module): # position of each bin. # List of size num_experts - padded_tokens_per_expert = round_up(tokens_per_expert, - self.blocking) + padded_tokens_per_expert = round_up(tokens_per_expert, self.blocking) # padded_tokens_per_expert => [128, O, 128, ...] # Cumulative selected experts per token @@ -484,8 +491,7 @@ class BlockSparseMoE(nn.Module): # Permute tokens and pad to prepare expert computation # (top_k * sequence_length + padding, model_dim) - x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, - self.top_k) + x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, self.top_k) # Create the sparse matrix topology with torch.no_grad(): @@ -496,8 +502,8 @@ class BlockSparseMoE(nn.Module): # (top_k * sequence_length + padding, ffn_dim * n_experts) x = stk.Matrix( topo.size(), - self.act(stk.ops.sdd(x, self.w1, topo).data) * - stk.ops.sdd(x, self.w3, topo).data, + self.act(stk.ops.sdd(x, self.w1, topo).data) + * stk.ops.sdd(x, self.w3, topo).data, topo.row_indices, topo.column_indices, topo.offsets, @@ -537,7 +543,9 @@ class MixtralLayer(nn.Module): self.self_attn = MixtralAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) - self.block_sparse_moe = BlockSparseMoE(f"{prefix}.block_sparse_moe", config, weights) + self.block_sparse_moe = BlockSparseMoE( + f"{prefix}.block_sparse_moe", config, weights + ) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps @@ -549,18 +557,18 @@ class MixtralLayer(nn.Module): ) def forward( - self, - hidden_states, - residual, - cos, - sin, - cu_seqlen_prefill, - kv_cache, - block_tables, - slots, - input_lengths, - max_s, - prefill_cache_indices, + self, + hidden_states, + residual, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + prefill_cache_indices, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) @@ -615,16 +623,16 @@ class MixtralModel(torch.nn.Module): self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( - self, - input_ids: torch.Tensor, - position_ids: torch.Tensor, - cu_seqlen_prefill: Optional[torch.Tensor], - kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], - block_tables: torch.Tensor, - slots: torch.Tensor, - input_lengths: torch.Tensor, - max_s: int, - prefill_cache_indices: Optional[torch.Tensor], + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) @@ -670,17 +678,17 @@ class FlashMixtralForCausalLM(torch.nn.Module): raise ValueError("max_past cannot be None") def forward( - self, - input_ids: torch.Tensor, - position_ids: torch.Tensor, - cu_seqlen_prefill: Optional[torch.Tensor], - kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], - block_tables: torch.Tensor, - slots: torch.Tensor, - input_lengths: torch.Tensor, - max_s: int, - prefill_cache_indices: Optional[torch.Tensor], - lm_head_indices: Optional[torch.Tensor] = None, + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + prefill_cache_indices: Optional[torch.Tensor], + lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor diff --git a/server/text_generation_server/models/custom_modeling/idefics_image_processing.py b/server/text_generation_server/models/custom_modeling/idefics_image_processing.py index 4760ae6f..e323d365 100644 --- a/server/text_generation_server/models/custom_modeling/idefics_image_processing.py +++ b/server/text_generation_server/models/custom_modeling/idefics_image_processing.py @@ -198,7 +198,9 @@ class IdeficsImageProcessor(BaseImageProcessor): image = image_url_or_urls if image.startswith("http://") or image.startswith("https://"): - response = requests.get(image_url_or_urls, stream=True, headers=headers, timeout=(1, 5)) + response = requests.get( + image_url_or_urls, stream=True, headers=headers, timeout=(1, 5) + ) response.raise_for_status() content = response.content elif image.startswith("data:"): @@ -213,7 +215,7 @@ class IdeficsImageProcessor(BaseImageProcessor): image = Image.open(BytesIO(content)) # image.verify() except Exception: - raise ValueError(f"Could not load image from url {image_url_or_urls}") + raise ValueError(f"Could not load image from url {image_url_or_urls}") return image else: raise ValueError( diff --git a/server/text_generation_server/models/custom_modeling/idefics_modeling.py b/server/text_generation_server/models/custom_modeling/idefics_modeling.py index 946f7683..555bf5af 100644 --- a/server/text_generation_server/models/custom_modeling/idefics_modeling.py +++ b/server/text_generation_server/models/custom_modeling/idefics_modeling.py @@ -62,6 +62,7 @@ if IS_CUDA_SYSTEM: elif IS_ROCM_SYSTEM: from vllm import layernorm_ops + @dataclass class BaseModelOutputWithPastImage(BaseModelOutputWithPast): image_hidden_states: Optional[torch.FloatTensor] = None @@ -431,7 +432,9 @@ class IdeficsRMSNorm(nn.Module): return out else: - raise ValueError("Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") + raise ValueError( + "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." + ) # this was adapted from LlamaMLP @@ -613,8 +616,13 @@ class IdeficsAttention(nn.Module): query_shape = query_states.shape key_shape = key_states.shape - self.rotary_emb(query_states.view(-1, *query_shape[2:]), key_states.reshape(-1, *key_shape[2:]), cos, sin) - + self.rotary_emb( + query_states.view(-1, *query_shape[2:]), + key_states.reshape(-1, *key_shape[2:]), + cos, + sin, + ) + query_states = query_states.view(query_shape) key_states = key_states.view(key_shape) diff --git a/server/text_generation_server/models/custom_modeling/idefics_processing.py b/server/text_generation_server/models/custom_modeling/idefics_processing.py index 98e43a27..beca864b 100644 --- a/server/text_generation_server/models/custom_modeling/idefics_processing.py +++ b/server/text_generation_server/models/custom_modeling/idefics_processing.py @@ -112,6 +112,7 @@ def is_url(string): result = urlparse(string) return all([result.scheme, result.netloc]) + def is_image(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" @@ -344,7 +345,6 @@ class IdeficsProcessor(ProcessorMixin): image_objects = self.image_processor(image_objects, transform=transform) - text_encoding = self.tokenizer( text=full_text, add_special_tokens=False, diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 79344ea1..14d30635 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -11,7 +11,7 @@ from opentelemetry import trace from transformers import PreTrainedTokenizerBase from typing import Optional, Tuple, List, Type, Union, Dict -from text_generation_server.models import Model +from text_generation_server.models import Model from text_generation_server.utils.speculate import get_speculate from text_generation_server.models.types import ( Batch, @@ -165,8 +165,6 @@ class FlashCausalLMBatch(Batch): input_length = len(tokenized_input) input_lengths.append(input_length) - - prefix_offsets.append(input_length - 5) read_offsets.append(input_length) @@ -229,7 +227,9 @@ class FlashCausalLMBatch(Batch): cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) - max_length = max(max_length, input_length + max_new_tokens + speculative_length) + max_length = max( + max_length, input_length + max_new_tokens + speculative_length + ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device @@ -424,7 +424,9 @@ class FlashCausalLMBatch(Batch): slots = self.slots[slot_filtering_indices] next_token_chooser = self.next_token_chooser.filter(indices) top_n_tokens_tensor = self.top_n_tokens_tensor[indices] - speculative_ids = self.speculative_ids[indices] if self.speculative_ids is not None else None + speculative_ids = ( + self.speculative_ids[indices] if self.speculative_ids is not None else None + ) start_slots = torch.tensor(start_slots, dtype=torch.int64) @@ -480,7 +482,9 @@ class FlashCausalLMBatch(Batch): total_batch_size += len(b) total_slots += len(b.slots) blocks += b.blocks - speculative_length = b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 + speculative_length = ( + b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 + ) max_blocks = max(max_blocks, b.max_blocks) max_seqlen = max(max_seqlen, b.max_seqlen) max_length = max( @@ -586,7 +590,11 @@ class FlashCausalLMBatch(Batch): device=batches[0].next_token_chooser.device, ) - speculative_ids = torch.cat([b.speculative_ids for b in batches], dim=0) if batches[0].speculative_ids is not None else None + speculative_ids = ( + torch.cat([b.speculative_ids for b in batches], dim=0) + if batches[0].speculative_ids is not None + else None + ) # Needed to avoid dropping blocks when the batches will go out of scope for b in batches: @@ -622,7 +630,7 @@ class FlashCausalLMBatch(Batch): top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, - speculative_ids=speculative_ids + speculative_ids=speculative_ids, ) def __del__(self): @@ -727,43 +735,54 @@ class FlashCausalLM(Model): def forward(self, batch: FlashCausalLMBatch) -> Tuple[torch.Tensor, torch.Tensor]: # Model Forward if batch.speculative_ids is not None: - input_ids=batch.input_ids - position_ids=batch.position_ids - cu_seqlen_prefill=batch.cu_seqlen_prefill - kv_cache=get_cache_manager().kv_cache - block_tables=batch.block_tables_tensor - slots=batch.slots[batch.slot_indices] - input_lengths=batch.input_lengths_tensor - max_s=batch.max_seqlen - lm_head_indices=batch.prefill_head_indices + input_ids = batch.input_ids + position_ids = batch.position_ids + cu_seqlen_prefill = batch.cu_seqlen_prefill + kv_cache = get_cache_manager().kv_cache + block_tables = batch.block_tables_tensor + slots = batch.slots[batch.slot_indices] + input_lengths = batch.input_lengths_tensor + max_s = batch.max_seqlen + lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids - B, speculative_length = speculative_ids.shape + B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 - new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) + new_input_ids = torch.cat( + [input_ids.unsqueeze(-1), speculative_ids], dim=1 + ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) - new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) + new_position_ids = ( + position_ids.unsqueeze(-1).expand(B, new_length) + arange + ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) - input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + input_lengths = ( + input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int + ).view(-1) # Add Copy the block tables for all members - block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B* new_length, -1).contiguous() + block_tables = ( + block_tables.unsqueeze(1) + .expand(B, new_length, -1) + .reshape(B * new_length, -1) + .contiguous() + ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: - input_ids=batch.input_ids - position_ids=batch.position_ids - cu_seqlen_prefill=batch.cu_seqlen_prefill - kv_cache=get_cache_manager().kv_cache - block_tables=batch.block_tables_tensor - slots=batch.slots[batch.slot_indices] - input_lengths=batch.input_lengths_tensor - max_s=batch.max_seqlen - lm_head_indices=batch.prefill_head_indices + input_ids = batch.input_ids + position_ids = batch.position_ids + cu_seqlen_prefill = batch.cu_seqlen_prefill + kv_cache = get_cache_manager().kv_cache + block_tables = batch.block_tables_tensor + slots = batch.slots[batch.slot_indices] + input_lengths = batch.input_lengths_tensor + max_s = batch.max_seqlen + lm_head_indices = batch.prefill_head_indices return self.model.forward( input_ids=input_ids, @@ -808,20 +827,31 @@ class FlashCausalLM(Model): else: speculative_logits = None - if prefill: next_token_logits = ( out[batch.prefill_next_token_indices] if prefill_logprobs else out ) if speculative_logits is not None: speculative_logits = ( - speculative_logits[batch.prefill_next_token_indices] if prefill_logprobs else speculative_logits + speculative_logits[batch.prefill_next_token_indices] + if prefill_logprobs + else speculative_logits ) else: next_token_logits = out - next_input_ids, next_token_logprobs, logprobs, accepted_ids, speculative_ids = batch.next_token_chooser( - batch.all_input_ids_tensor[:, : batch.max_seqlen], next_token_logits, get_speculate(), batch.speculative_ids, speculative_logits + ( + next_input_ids, + next_token_logprobs, + logprobs, + accepted_ids, + speculative_ids, + ) = batch.next_token_chooser( + batch.all_input_ids_tensor[:, : batch.max_seqlen], + next_token_logits, + get_speculate(), + batch.speculative_ids, + speculative_logits, ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( @@ -851,11 +881,7 @@ class FlashCausalLM(Model): stopped = True # Zipped iterator - iterator = zip( - batch.input_lengths, - batch.all_input_ids, - accepted_ids - ) + iterator = zip(batch.input_lengths, batch.all_input_ids, accepted_ids) # We do two for loops as the first one can run completely asynchronously from the GPU while for the second # one, we need to first do a GPU <-> CPU sync @@ -863,11 +889,7 @@ class FlashCausalLM(Model): # For each member of the batch index = 0 - for i, ( - input_length, - all_input_ids, - n_accepted_ids - ) in enumerate(iterator): + for i, (input_length, all_input_ids, n_accepted_ids) in enumerate(iterator): # Indexing metadata start_index = cumulative_length end_index = cumulative_length + input_length @@ -901,7 +923,6 @@ class FlashCausalLM(Model): cumulative_length += input_length - batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1] batch.speculative_ids = speculative_ids batch.position_ids = next_position_ids + accepted_ids @@ -983,8 +1004,10 @@ class FlashCausalLM(Model): current_stopped = False stopped = stopped and current_stopped - _next_token_ids = next_token_ids[index: index+n_accepted_ids - left] - _next_token_logprobs = next_token_logprobs[index: index+n_accepted_ids - left] + _next_token_ids = next_token_ids[index : index + n_accepted_ids - left] + _next_token_logprobs = next_token_logprobs[ + index : index + n_accepted_ids - left + ] index += n_accepted_ids # Shard generations @@ -1027,7 +1050,10 @@ class FlashCausalLM(Model): ) prefill_tokens = Tokens( - prefill_token_ids, request_prefill_logprobs, prefill_texts, is_special = [] + prefill_token_ids, + request_prefill_logprobs, + prefill_texts, + is_special=[], ) else: prefill_tokens = None diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index 3a84b1b6..2415a245 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -71,12 +71,19 @@ class FlashLlama(FlashCausalLM): from text_generation_server.utils.medusa import MedusaModel from huggingface_hub import hf_hub_download import json - medusa_config = hf_hub_download(use_medusa, revision=revision, filename="config.json") + + medusa_config = hf_hub_download( + use_medusa, revision=revision, filename="config.json" + ) with open(medusa_config, "r") as f: config = json.load(f) - medusa_head = hf_hub_download(use_medusa, revision=revision, filename="medusa_lm_head.pt") - medusa_sf = medusa_head[:-len(".pt")] + ".safetensors" - weights = Weights([medusa_sf], device, dtype, process_group=self.process_group) + medusa_head = hf_hub_download( + use_medusa, revision=revision, filename="medusa_lm_head.pt" + ) + medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" + weights = Weights( + [medusa_sf], device, dtype, process_group=self.process_group + ) lm_head = model.lm_head model.lm_head = MedusaModel(config, weights, lm_head) diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index 5ce37164..0fad5aa8 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -45,11 +45,11 @@ class FlashMistralBatch(FlashCausalLMBatch): @classmethod def from_pb( - cls, - pb: generate_pb2.Batch, - tokenizer: PreTrainedTokenizerBase, - dtype: torch.dtype, - device: torch.device, + cls, + pb: generate_pb2.Batch, + tokenizer: PreTrainedTokenizerBase, + dtype: torch.dtype, + device: torch.device, ) -> "FlashCausalLMBatch": global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS @@ -99,12 +99,12 @@ class FlashMistralBatch(FlashCausalLMBatch): # Parse batch for i, (r, tokenized_input) in enumerate( - zip(pb.requests, batch_tokenized_inputs) + zip(pb.requests, batch_tokenized_inputs) ): # request id -> idx in list mapping requests_idx_mapping[r.id] = i - tokenized_input = tokenized_input[-r.truncate:] + tokenized_input = tokenized_input[-r.truncate :] input_length = len(tokenized_input) input_lengths.append(input_length) @@ -184,7 +184,9 @@ class FlashMistralBatch(FlashCausalLMBatch): cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) - max_length = max(max_length, input_length + max_new_tokens + speculative_length) + max_length = max( + max_length, input_length + max_new_tokens + speculative_length + ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device @@ -273,20 +275,20 @@ class FlashMistralBatch(FlashCausalLMBatch): blocks=blocks, max_blocks=max_blocks, prefill_cache_indices=prefill_cache_indices, - speculative_ids=None + speculative_ids=None, ) class BaseFlashMistral(FlashCausalLM): def __init__( - self, - config_cls, - model_cls, - model_id: str, - revision: Optional[str] = None, - quantize: Optional[str] = None, - dtype: Optional[torch.dtype] = None, - trust_remote_code: bool = False, + self, + config_cls, + model_cls, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, ): global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS @@ -345,43 +347,54 @@ class BaseFlashMistral(FlashCausalLM): def forward(self, batch: FlashMistralBatch) -> Tuple[torch.Tensor, torch.Tensor]: # Model Forward if batch.speculative_ids is not None: - input_ids=batch.input_ids - position_ids=batch.position_ids - cu_seqlen_prefill=batch.cu_seqlen_prefill - kv_cache=get_cache_manager().kv_cache - block_tables=batch.block_tables_tensor - slots=batch.slots[batch.slot_indices] - input_lengths=batch.input_lengths_tensor - max_s=batch.max_seqlen - lm_head_indices=batch.prefill_head_indices + input_ids = batch.input_ids + position_ids = batch.position_ids + cu_seqlen_prefill = batch.cu_seqlen_prefill + kv_cache = get_cache_manager().kv_cache + block_tables = batch.block_tables_tensor + slots = batch.slots[batch.slot_indices] + input_lengths = batch.input_lengths_tensor + max_s = batch.max_seqlen + lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids - B, speculative_length = speculative_ids.shape + B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 - new_input_ids = torch.cat([input_ids.unsqueeze(-1), speculative_ids], dim=1).reshape(-1) + new_input_ids = torch.cat( + [input_ids.unsqueeze(-1), speculative_ids], dim=1 + ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) - new_position_ids = (position_ids.unsqueeze(-1).expand(B, new_length) + arange).view(-1) + new_position_ids = ( + position_ids.unsqueeze(-1).expand(B, new_length) + arange + ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) - input_lengths = (input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) + input_lengths = ( + input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int + ).view(-1) # Add Copy the block tables for all members - block_tables = block_tables.unsqueeze(1).expand(B, new_length, -1).reshape(B* new_length, -1).contiguous() + block_tables = ( + block_tables.unsqueeze(1) + .expand(B, new_length, -1) + .reshape(B * new_length, -1) + .contiguous() + ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: - input_ids=batch.input_ids - position_ids=batch.position_ids - cu_seqlen_prefill=batch.cu_seqlen_prefill - kv_cache=get_cache_manager().kv_cache - block_tables=batch.block_tables_tensor - slots=batch.slots[batch.slot_indices] - input_lengths=batch.input_lengths_tensor - max_s=batch.max_seqlen - lm_head_indices=batch.prefill_head_indices + input_ids = batch.input_ids + position_ids = batch.position_ids + cu_seqlen_prefill = batch.cu_seqlen_prefill + kv_cache = get_cache_manager().kv_cache + block_tables = batch.block_tables_tensor + slots = batch.slots[batch.slot_indices] + input_lengths = batch.input_lengths_tensor + max_s = batch.max_seqlen + lm_head_indices = batch.prefill_head_indices logits = self.model.forward( input_ids=input_ids, position_ids=position_ids, @@ -401,12 +414,12 @@ class BaseFlashMistral(FlashCausalLM): class FlashMistral(BaseFlashMistral): def __init__( - self, - model_id: str, - revision: Optional[str] = None, - quantize: Optional[str] = None, - dtype: Optional[torch.dtype] = None, - trust_remote_code: bool = False, + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, ): super(FlashMistral, self).__init__( config_cls=MistralConfig, @@ -415,5 +428,5 @@ class FlashMistral(BaseFlashMistral): revision=revision, quantize=quantize, dtype=dtype, - trust_remote_code=trust_remote_code + trust_remote_code=trust_remote_code, ) diff --git a/server/text_generation_server/models/flash_mixtral.py b/server/text_generation_server/models/flash_mixtral.py index c45ae50f..6f77a658 100644 --- a/server/text_generation_server/models/flash_mixtral.py +++ b/server/text_generation_server/models/flash_mixtral.py @@ -3,17 +3,20 @@ import torch from typing import Optional from text_generation_server.models.flash_mistral import BaseFlashMistral -from text_generation_server.models.custom_modeling.flash_mixtral_modeling import MixtralConfig, FlashMixtralForCausalLM +from text_generation_server.models.custom_modeling.flash_mixtral_modeling import ( + MixtralConfig, + FlashMixtralForCausalLM, +) class FlashMixtral(BaseFlashMistral): def __init__( - self, - model_id: str, - revision: Optional[str] = None, - quantize: Optional[str] = None, - dtype: Optional[torch.dtype] = None, - trust_remote_code: bool = False, + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, ): super(FlashMixtral, self).__init__( config_cls=MixtralConfig, @@ -22,5 +25,5 @@ class FlashMixtral(BaseFlashMistral): revision=revision, quantize=quantize, dtype=dtype, - trust_remote_code=trust_remote_code + trust_remote_code=trust_remote_code, ) diff --git a/server/text_generation_server/models/idefics_causal_lm.py b/server/text_generation_server/models/idefics_causal_lm.py index 2f4bb139..86389ad2 100644 --- a/server/text_generation_server/models/idefics_causal_lm.py +++ b/server/text_generation_server/models/idefics_causal_lm.py @@ -792,7 +792,10 @@ class IdeficsCausalLM(Model): skip_special_tokens=False, ) prefill_tokens = Tokens( - prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[] + prefill_token_ids, + prefill_logprobs, + prefill_texts, + is_special=[], ) else: prefill_tokens = None @@ -803,10 +806,10 @@ class IdeficsCausalLM(Model): request.id, prefill_tokens, Tokens( - [next_token_id_squeezed], - [next_token_logprob], - [next_token_text], - [next_token_id_squeezed.item() in self.all_special_ids], + [next_token_id_squeezed], + [next_token_logprob], + [next_token_text], + [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, diff --git a/server/text_generation_server/models/model.py b/server/text_generation_server/models/model.py index 52be52ff..c835ec34 100644 --- a/server/text_generation_server/models/model.py +++ b/server/text_generation_server/models/model.py @@ -51,7 +51,7 @@ class Model(ABC): requires_padding=self.requires_padding, dtype=str(self.dtype), device_type=self.device.type, - speculate=self.speculate + speculate=self.speculate, ) @property diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index 279b5505..a85ef58e 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -736,7 +736,7 @@ class Seq2SeqLM(Model): [self.tokenizer.bos_token_id], [float("nan")], [self.tokenizer.bos_token], - [False] + [False], ) else: prefill_tokens = None @@ -763,10 +763,10 @@ class Seq2SeqLM(Model): request.id, prefill_tokens, Tokens( - [next_token_id_squeezed], - [next_token_logprob], - [next_token_text], - [next_token_id_squeezed.item() in self.all_special_ids], + [next_token_id_squeezed], + [next_token_logprob], + [next_token_text], + [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, diff --git a/server/text_generation_server/models/types.py b/server/text_generation_server/models/types.py index 87c03d63..f85f27e5 100644 --- a/server/text_generation_server/models/types.py +++ b/server/text_generation_server/models/types.py @@ -66,7 +66,10 @@ class Tokens: def to_pb(self) -> generate_pb2.Tokens: return generate_pb2.Tokens( - ids=self.token_ids, logprobs=self.logprobs, texts=self.texts, is_special=self.is_special + ids=self.token_ids, + logprobs=self.logprobs, + texts=self.texts, + is_special=self.is_special, ) def __len__(self): diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py index 33a26bd2..8cd03150 100644 --- a/server/text_generation_server/server.py +++ b/server/text_generation_server/server.py @@ -156,7 +156,11 @@ def serve( revision = None try: model = get_model( - model_id, revision, speculate, dtype=data_type, trust_remote_code=trust_remote_code + model_id, + revision, + speculate, + data_type, + trust_remote_code ) except Exception: logger.exception("Error when initializing model") @@ -189,5 +193,7 @@ def serve( await server.stop(0) asyncio.run( - serve_inner(model_id, revision, sharded, speculate, dtype, trust_remote_code) + serve_inner( + model_id, revision, sharded, speculate, dtype, trust_remote_code + ) ) diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index aca95e11..3237df82 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -51,7 +51,9 @@ except ImportError as e: ) from e elif IS_ROCM_SYSTEM: for idx in range(torch.cuda.device_count()): - if "MI210" not in torch.cuda.get_device_name(idx) and "MI250" not in torch.cuda.get_device_name(idx): + if "MI210" not in torch.cuda.get_device_name( + idx + ) and "MI250" not in torch.cuda.get_device_name(idx): raise ImportError( f"AMD GPU {torch.cuda.get_device_name(idx)} does not support flash-attention" ) @@ -91,8 +93,10 @@ def attention( ) elif HAS_FLASH_ATTN_V2_ROCM: if window_size_left != -1: - raise ValueError(f"RoCm version of Flash Attention v2 does not support window attention (window_size_left != -1, got window_size_left={window_size_left}).") - + raise ValueError( + f"RoCm version of Flash Attention v2 does not support window attention (window_size_left != -1, got window_size_left={window_size_left})." + ) + # RoCm flash API does not take the window_size_left and window_size_right arguments. return flash_attn_2_cuda.varlen_fwd( q, diff --git a/server/text_generation_server/utils/gptq/exllamav2.py b/server/text_generation_server/utils/gptq/exllamav2.py index 1945338b..f820f0d9 100644 --- a/server/text_generation_server/utils/gptq/exllamav2.py +++ b/server/text_generation_server/utils/gptq/exllamav2.py @@ -11,40 +11,44 @@ logger = getLogger(__name__) try: from exllamav2_kernels import make_q_matrix, gemm_half_q_half except ImportError: - logger.error('exllamav2_kernels not installed.') + logger.error("exllamav2_kernels not installed.") raise # Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension none_tensor = torch.empty((1, 1), device="meta") + def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): """Matrix multiplication, returns x @ q4""" output_shape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) - output = torch.empty((x.shape[0], q4_width), dtype = torch.half, device = x.device) + output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device) gemm_half_q_half(x, q_handle, output, force_cuda) return output.view(output_shape) + def ext_make_q_matrix(w: dict, temp_dq, key: str = None): """ - Create Q matrix + Create Q matrix """ # EXL2 - # won't work as the moment because the tensors are not the same. + # won't work as the moment because the tensors are not the same. if "q_weight" in w: w["q_scale_max"] /= 256 w["q_perm"] = w["q_perm"].short() w["q_invperm"] = w["q_invperm"].short() - return make_q_matrix(w["q_weight"], - w["q_perm"], - w["q_invperm"], - w["q_scale"], - w["q_scale_max"], - w["q_groups"], - none_tensor, - none_tensor, - none_tensor, - temp_dq) + return make_q_matrix( + w["q_weight"], + w["q_perm"], + w["q_invperm"], + w["q_scale"], + w["q_scale_max"], + w["q_groups"], + none_tensor, + none_tensor, + none_tensor, + temp_dq, + ) # GPTQ elif "qweight" in w: if w["scales"].dtype == torch.float: @@ -52,31 +56,40 @@ def ext_make_q_matrix(w: dict, temp_dq, key: str = None): # GPTQ with g_idx (act_order) if w.get("g_idx", None) is not None and not (w["g_idx"] == 0).all().item(): - w["q_perm"] = torch.empty((w["qweight"].shape[0] * 8,), dtype = torch.short, device = w["qweight"].device) + w["q_perm"] = torch.empty( + (w["qweight"].shape[0] * 8,), + dtype=torch.short, + device=w["qweight"].device, + ) w["q_invperm"] = torch.empty_like(w["q_perm"]) # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx. - return make_q_matrix(w["qweight"], - w["q_perm"], - w["q_invperm"], - none_tensor, - none_tensor, - none_tensor, - w["qzeros"], - w["scales"], - w["g_idx"].cpu(), - temp_dq) + return make_q_matrix( + w["qweight"], + w["q_perm"], + w["q_invperm"], + none_tensor, + none_tensor, + none_tensor, + w["qzeros"], + w["scales"], + w["g_idx"].cpu(), + temp_dq, + ) # GPTQ without g_idx else: - return make_q_matrix(w["qweight"], - none_tensor, - none_tensor, - none_tensor, - none_tensor, - none_tensor, - w["qzeros"], - w["scales"], - none_tensor, - temp_dq) + return make_q_matrix( + w["qweight"], + none_tensor, + none_tensor, + none_tensor, + none_tensor, + none_tensor, + w["qzeros"], + w["scales"], + none_tensor, + temp_dq, + ) + DEVICE = None FIXED_BYTES = 0 @@ -106,14 +119,15 @@ class QuantLinear(nn.Module): super().__init__() if bits != 4: raise ValueError( - f"Exllamav2 kernel supports only bits=4, requested bits={bits}. Something is wrong in the model initialization.") + f"Exllamav2 kernel supports only bits=4, requested bits={bits}. Something is wrong in the model initialization." + ) self.q_handle = None self.q_tensors = None self.bits = bits - self.maxq = 2 ** self.bits - 1 + self.maxq = 2**self.bits - 1 self.infeatures = qweight.shape[0] // self.bits * 32 self.outfeatures = qweight.shape[1] - self.padding = - self.outfeatures % 32 + self.padding = -self.outfeatures % 32 self.outfeatures = self.outfeatures + self.padding self.device = qweight.device @@ -128,9 +142,12 @@ class QuantLinear(nn.Module): outfeatures = self.outfeatures assert qweight.shape == (infeatures // 32 * self.bits, outfeatures) assert infeatures % self.group_size == 0 - assert qzeros.shape == (infeatures // self.group_size, outfeatures // 32 * self.bits) + assert qzeros.shape == ( + infeatures // self.group_size, + outfeatures // 32 * self.bits, + ) assert scales.shape == (infeatures // self.group_size, outfeatures) - assert g_idx.shape == (infeatures, ), f"{g_idx.shape}, {infeatures}" + assert g_idx.shape == (infeatures,), f"{g_idx.shape}, {infeatures}" global FIXED_BYTES, LAYERS FIXED_BYTES = max(FIXED_BYTES, self.scratch_space_fixed()) @@ -140,33 +157,31 @@ class QuantLinear(nn.Module): assert self.qweight.device.type == "cuda" assert self.qweight.device.index is not None self.q_tensors = { - "qweight":self.qweight, - "qzeros":self.qzeros, - "scales":self.scales, - "g_idx":self.g_idx + "qweight": self.qweight, + "qzeros": self.qzeros, + "scales": self.scales, + "g_idx": self.g_idx, } temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size()) - self.q_handle = ext_make_q_matrix( - self.q_tensors, temp_dq - ) - - def forward(self, x, force_cuda = False): + self.q_handle = ext_make_q_matrix(self.q_tensors, temp_dq) + + def forward(self, x, force_cuda=False): output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda) if self.bias is not None: output.add_(self.bias) return output - + def temp_dq_size(self): return self.infeatures * self.outfeatures * 2 + 128 - + def temp_fwd_size(self, max_input_len, max_batch_size): return self.outfeatures * max_input_len * max_batch_size * 4 + 128 - + def scratch_space_fixed(self, max_input_len=4096, max_batch_size=16): return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size) - - + + class ExLlamaV2DeviceTensors: device_idx: int @@ -177,13 +192,16 @@ class ExLlamaV2DeviceTensors: def __init__(self, device, scratch_bytes): self.device = device self.scratch_bytes = scratch_bytes - + def prepare(self): - self.scratch = torch.empty((self.scratch_bytes // 2,), dtype = torch.half, device = self.device) + self.scratch = torch.empty( + (self.scratch_bytes // 2,), dtype=torch.half, device=self.device + ) def get_scratch_slice(self, size_bytes): - if self.scratch is None: self.prepare() + if self.scratch is None: + self.prepare() size_bytes = ((size_bytes + 127) // 128) * 128 size_half = size_bytes // 2 diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index d533016d..77e2fdb6 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -35,7 +35,9 @@ HAS_EXLLAMA = False CAN_EXLLAMA = major >= 8 V2 = os.getenv("EXLLAMA_VERSION", "2") == "2" if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: - logger.warning("Disabling exllama v2 and using v1 instead because there are issues when sharding") + logger.warning( + "Disabling exllama v2 and using v1 instead because there are issues when sharding" + ) V2 = False if os.getenv("DISABLE_EXLLAMA") == "True": @@ -43,17 +45,19 @@ if os.getenv("DISABLE_EXLLAMA") == "True": elif CAN_EXLLAMA: try: if V2: - from text_generation_server.utils.gptq.exllamav2 import (QuantLinear as ExllamaQuantLinear, - create_exllama_buffers, - set_device, - ) + from text_generation_server.utils.gptq.exllamav2 import ( + QuantLinear as ExllamaQuantLinear, + create_exllama_buffers, + set_device, + ) HAS_EXLLAMA = "2" else: - from text_generation_server.utils.gptq.exllama import (Ex4bitLinear as ExllamaQuantLinear, - create_exllama_buffers, - set_device, - ) + from text_generation_server.utils.gptq.exllama import ( + Ex4bitLinear as ExllamaQuantLinear, + create_exllama_buffers, + set_device, + ) HAS_EXLLAMA = "1" @@ -114,7 +118,7 @@ def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, st @classmethod def load_conv2d_no_bias( - cls, prefix, weights, in_channels, out_channels, kernel_size, stride + cls, prefix, weights, in_channels, out_channels, kernel_size, stride ): weight = weights.get_tensor(f"{prefix}.weight") with init_empty_weights(): @@ -138,9 +142,9 @@ torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias class FastLinear(nn.Module): def __init__( - self, - weight, - bias, + self, + weight, + bias, ) -> None: super().__init__() self.weight = nn.Parameter(weight) @@ -164,9 +168,9 @@ class FastLinear(nn.Module): class EETQLinear(nn.Module): def __init__( - self, - weight, - bias, + self, + weight, + bias, ) -> None: super().__init__() device = weight.device @@ -185,13 +189,13 @@ class EETQLinear(nn.Module): class Linear8bitLt(nn.Module): def __init__( - self, - weight, - bias, - has_fp16_weights=True, - memory_efficient_backward=False, - threshold=0.0, - index=None, + self, + weight, + bias, + has_fp16_weights=True, + memory_efficient_backward=False, + threshold=0.0, + index=None, ): super().__init__() assert ( @@ -325,7 +329,9 @@ def get_linear(weight, bias, quantize): ) if use_exllama: - linear = ExllamaQuantLinear(qweight, qzeros, scales, g_idx, bias, bits, groupsize) + linear = ExllamaQuantLinear( + qweight, qzeros, scales, g_idx, bias, bits, groupsize + ) else: linear = QuantLinear( qweight, @@ -533,7 +539,6 @@ try: else: dropout_layer_norm = None - class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if hidden_states.shape[-1] > 8192 or IS_ROCM_SYSTEM: @@ -569,7 +574,6 @@ try: return normed_hidden_states, residual - class FastRMSNorm(nn.Module): def __init__(self, weight: torch.Tensor, eps: float): super().__init__() @@ -601,7 +605,11 @@ try: return self.weight * hidden_states, residual elif IS_CUDA_SYSTEM: # faster post attention rms norm - normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd( + ( + normed_hidden_states, + res, + *rest, + ) = dropout_layer_norm.dropout_add_ln_fwd( hidden_states, residual, self.weight, @@ -638,7 +646,8 @@ try: return out, residual else: raise ValueError( - "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") + "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." + ) except ImportError: pass @@ -650,14 +659,12 @@ try: elif IS_ROCM_SYSTEM: from vllm import pos_encoding_ops - def _create_inv_freq(dim, base, device): inv_freq = 1.0 / ( - base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim) + base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim) ) return inv_freq - def _get_rope_config(config): if os.getenv("ROPE_SCALING", None) is not None: rope_scaling = { @@ -667,7 +674,6 @@ try: return rope_scaling return getattr(config, "rope_scaling", None) - class PositionRotaryEmbedding(nn.Module): def __init__(self, inv_freq, scaling_factor): super().__init__() @@ -680,17 +686,23 @@ try: self.scaling_factor = scaling_factor self.dynamic_args = None - def forward(self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor): + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + ): # Such controlflows may add some overhead. if IS_CUDA_SYSTEM: rotary_dim = cos.shape[-1] q1 = query[..., :rotary_dim] - q2 = query[..., rotary_dim: 2 * rotary_dim] + q2 = query[..., rotary_dim : 2 * rotary_dim] rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., :rotary_dim] - k2 = key[..., rotary_dim: 2 * rotary_dim] + k2 = key[..., rotary_dim : 2 * rotary_dim] rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif IS_ROCM_SYSTEM: @@ -700,17 +712,11 @@ try: head_size = query.shape[-1] # Inplace operation, updating query and key. - pos_encoding_ops.rotary_embedding( - query, - key, - head_size, - cos, - sin, - True - ) + pos_encoding_ops.rotary_embedding(query, key, head_size, cos, sin, True) else: raise ValueError( - "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction.") + "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." + ) @classmethod def static(cls, config, dim, base, device): @@ -732,15 +738,16 @@ try: elif rope_scaling["type"] == "yarn": return YarnPositionRotaryEmbedding( dim=2 * inv_freq.shape[0], - max_position_embeddings=rope_scaling["original_max_position_embeddings"], + max_position_embeddings=rope_scaling[ + "original_max_position_embeddings" + ], base=10000.0, device=inv_freq.device, scaling_factor=scaling_factor, extrapolation_factor=1, attn_factor=1, beta_fast=32, - beta_slow=1 - + beta_slow=1, ) else: raise NotImplementedError( @@ -773,15 +780,16 @@ try: elif rope_scaling["type"] == "yarn": return YarnPositionRotaryEmbedding( dim=2 * inv_freq.shape[0], - max_position_embeddings=rope_scaling["original_max_position_embeddings"], + max_position_embeddings=rope_scaling[ + "original_max_position_embeddings" + ], base=10000.0, device=inv_freq.device, scaling_factor=scaling_factor, extrapolation_factor=1, attn_factor=1, beta_fast=32, - beta_slow=1 - + beta_slow=1, ) else: raise NotImplementedError( @@ -793,9 +801,9 @@ try: # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) @@ -809,7 +817,7 @@ try: self._sin_cached = torch.sin(freqs).to(dtype) def get_cos_sin( - self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype + self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype ): """ Return cos and sin for the asked position ids @@ -827,7 +835,6 @@ try: # Note: this unsqueeze is not necessary on RoCm + VLLM ROPE implementation, but we leave it as is to avoid yet an other controlflow. return cos.unsqueeze(1), sin.unsqueeze(1) - class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding): def __init__(self, dim, max_position_embeddings, base, device, scaling_factor): inv_freq = _create_inv_freq(dim, base, device) @@ -840,14 +847,14 @@ try: # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): if seqlen > self.max_position_embeddings: newbase = self.base * ( - (self.scaling_factor * seqlen / self.max_position_embeddings) - - (self.scaling_factor - 1) + (self.scaling_factor * seqlen / self.max_position_embeddings) + - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) self.inv_freq = _create_inv_freq( self.dim, newbase, self.inv_freq.device @@ -861,24 +868,28 @@ try: self._cos_cached = torch.cos(freqs).to(dtype) self._sin_cached = torch.sin(freqs).to(dtype) - # Inverse dim formula to find dim based on number of rotations import math - - def find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=2048): - return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base)) - + def find_correction_dim( + num_rotations, dim, base=10000, max_position_embeddings=2048 + ): + return ( + dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi)) + ) / (2 * math.log(base)) # Find dim range bounds based on rotations - def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048): - low = math.floor(find_correction_dim( - low_rot, dim, base, max_position_embeddings)) - high = math.ceil(find_correction_dim( - high_rot, dim, base, max_position_embeddings)) + def find_correction_range( + low_rot, high_rot, dim, base=10000, max_position_embeddings=2048 + ): + low = math.floor( + find_correction_dim(low_rot, dim, base, max_position_embeddings) + ) + high = math.ceil( + find_correction_dim(high_rot, dim, base, max_position_embeddings) + ) return max(low, 0), min(high, dim - 1) # Clamp values just in case - def linear_ramp_mask(min, max, dim): if min == max: max += 0.001 # Prevent singularity @@ -887,16 +898,25 @@ try: ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func - def get_mscale(scale=1): if scale <= 1: return 1.0 return 0.1 * math.log(scale) + 1.0 - class YarnPositionRotaryEmbedding(PositionRotaryEmbedding): - def __init__(self, dim, max_position_embeddings, base, device, scaling_factor, *, extrapolation_factor, - attn_factor, beta_fast, beta_slow): + def __init__( + self, + dim, + max_position_embeddings, + base, + device, + scaling_factor, + *, + extrapolation_factor, + attn_factor, + beta_fast, + beta_slow, + ): inv_freq = _create_inv_freq(dim, base, device) super().__init__(inv_freq, scaling_factor) self.dim = dim @@ -906,16 +926,17 @@ try: self.attn_factor = attn_factor self.beta_fast = beta_fast self.beta_slow = beta_slow - self.mscale = float(get_mscale( - self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation + self.mscale = float( + get_mscale(self.scaling_factor) * self.attn_factor + ) # Get n-d magnitude scaling corrected for interpolation def _update_cos_sin_cache(self, dtype, device, seqlen): # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if ( - seqlen > self._seq_len_cached - or self._cos_cached.device != device - or self._cos_cached.dtype != dtype + seqlen > self._seq_len_cached + or self._cos_cached.device != device + or self._cos_cached.dtype != dtype ): if seqlen > self.max_position_embeddings: inv_freq_extrapolation = _create_inv_freq( @@ -923,15 +944,26 @@ try: ) freqs = 1.0 / inv_freq_extrapolation inv_freq_interpolation = 1.0 / (self.scaling_factor * freqs) - low, high = find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, - self.max_position_embeddings) - inv_freq_mask = (1 - linear_ramp_mask(low, high, self.dim // 2).float().to( - device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation - inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask + low, high = find_correction_range( + self.beta_fast, + self.beta_slow, + self.dim, + self.base, + self.max_position_embeddings, + ) + inv_freq_mask = ( + 1 + - linear_ramp_mask(low, high, self.dim // 2).float().to(device) + ) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation + inv_freq = ( + inv_freq_interpolation * (1 - inv_freq_mask) + + inv_freq_extrapolation * inv_freq_mask + ) self.inv_freq = inv_freq - self.mscale = float(get_mscale( - self.scaling_factor) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation + self.mscale = float( + get_mscale(self.scaling_factor) * self.attn_factor + ) # Get n-d magnitude scaling corrected for interpolation self._seq_len_cached = seqlen t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) diff --git a/server/text_generation_server/utils/medusa.py b/server/text_generation_server/utils/medusa.py index 029de122..634119cb 100644 --- a/server/text_generation_server/utils/medusa.py +++ b/server/text_generation_server/utils/medusa.py @@ -2,6 +2,7 @@ import torch from dataclasses import dataclass from text_generation_server.utils.layers import TensorParallelHead, FastLinear + @dataclass class Output: logits: torch.FloatTensor = None @@ -11,7 +12,9 @@ class Output: class ResBlock(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() - self.linear = FastLinear.load(config, prefix=f"{prefix}.linear", weights=weights, bias=True) + self.linear = FastLinear.load( + config, prefix=f"{prefix}.linear", weights=weights, bias=True + ) self.act = torch.nn.SiLU() def forward(self, x): @@ -19,15 +22,13 @@ class ResBlock(torch.nn.Module): class MedusaModel(torch.nn.Module): - def __init__( - self, - config, - weights, - lm_head - ): + def __init__(self, config, weights, lm_head): super().__init__() self.heads = torch.nn.ModuleList( - [MedusaHead(config, prefix=f"{i}", weights=weights) for i in range(config["medusa_num_heads"])] + [ + MedusaHead(config, prefix=f"{i}", weights=weights) + for i in range(config["medusa_num_heads"]) + ] ) self.lm_head = lm_head @@ -40,9 +41,16 @@ class MedusaModel(torch.nn.Module): class MedusaHead(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() - self.blocks = torch.nn.ModuleList([ResBlock(config, prefix=f"{prefix}.{i}", weights=weights) for i in range(config["medusa_num_layers"])]) + self.blocks = torch.nn.ModuleList( + [ + ResBlock(config, prefix=f"{prefix}.{i}", weights=weights) + for i in range(config["medusa_num_layers"]) + ] + ) n = len(self.blocks) - self.out = FastLinear.load(config, prefix=f"{prefix}.{n}", weights=weights, bias=False) + self.out = FastLinear.load( + config, prefix=f"{prefix}.{n}", weights=weights, bias=False + ) def forward(self, x): for block in self.blocks: diff --git a/server/text_generation_server/utils/paged_attention.py b/server/text_generation_server/utils/paged_attention.py index 57a59599..4b12744c 100644 --- a/server/text_generation_server/utils/paged_attention.py +++ b/server/text_generation_server/utils/paged_attention.py @@ -7,23 +7,26 @@ from vllm import attention_ops _PARTITION_SIZE = 512 -def reshape_and_cache(key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, - slots: torch.Tensor): - cache_ops.reshape_and_cache( - key, value, key_cache, value_cache, slots - ) +def reshape_and_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slots: torch.Tensor, +): + cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slots) def attention( - out: torch.Tensor, - query: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - kv_head_mapping: torch.Tensor, - softmax_scale: float, - block_tables: torch.Tensor, - input_lengths: torch.Tensor, - max_s: int, + out: torch.Tensor, + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + kv_head_mapping: torch.Tensor, + softmax_scale: float, + block_tables: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, ): # Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py # Copyright 2023 The vLLM team. All rights @@ -45,9 +48,7 @@ def attention( # value_cache => [num_blocks, num_heads, head_size, block_size] block_size = value_cache.shape[3] num_seqs, num_heads, head_size = query.shape - max_num_partitions = ( - (max_s + _PARTITION_SIZE - 1) // - _PARTITION_SIZE) + max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE # NOTE(woosuk): We use a simple heuristic to decide whether to use # PagedAttention V1 or V2. If the number of partitions is 1, we use # V1 to avoid the overhead of reduction. Also, if the number of diff --git a/server/text_generation_server/utils/peft.py b/server/text_generation_server/utils/peft.py index d37e8940..45e23320 100644 --- a/server/text_generation_server/utils/peft.py +++ b/server/text_generation_server/utils/peft.py @@ -38,7 +38,9 @@ def download_and_unload_peft(model_id, revision, trust_remote_code): os.makedirs(model_id, exist_ok=True) cache_dir = model_id logger.info(f"Saving the newly created merged model to {cache_dir}") - tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=trust_remote_code) + tokenizer = AutoTokenizer.from_pretrained( + base_model_id, trust_remote_code=trust_remote_code + ) model.save_pretrained(cache_dir, safe_serialization=True) model.config.save_pretrained(cache_dir) tokenizer.save_pretrained(cache_dir) diff --git a/server/text_generation_server/utils/speculate.py b/server/text_generation_server/utils/speculate.py index 38a91972..a1b37a34 100644 --- a/server/text_generation_server/utils/speculate.py +++ b/server/text_generation_server/utils/speculate.py @@ -1,12 +1,11 @@ - SPECULATE = None + def get_speculate() -> int: global SPECULATE return SPECULATE + def set_speculate(speculate: int): global SPECULATE SPECULATE = speculate - - diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index acd55f26..22bd6be3 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -142,22 +142,31 @@ class StoppingCriteria: ) -def create_n_gram_speculation(input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool): +def create_n_gram_speculation( + input_ids: torch.Tensor, + next_ids: torch.Tensor, + accepted_ids: torch.Tensor, + speculate: int, + verbose: bool, +): # Very trivial approach, find first match in the string. # This is much less refined than actual n-gram but seems to work # relatively OK in grounded mode and is by far much faster with # much less worst case complexity as everything happens on device. B = accepted_ids.shape[0] device = input_ids.device - seeds = next_ids[accepted_ids.cumsum(dim=-1) -1 ] + seeds = next_ids[accepted_ids.cumsum(dim=-1) - 1] indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 - all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange(speculate, device=device) + all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange( + speculate, device=device + ) all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) speculative_ids = input_ids.gather(dim=-1, index=all_indices) return speculative_ids + class HeterogeneousNextTokenChooser: def __init__( self, @@ -229,7 +238,7 @@ class HeterogeneousNextTokenChooser: speculate: int, speculated_ids: Optional[torch.Tensor] = None, speculative_scores: Optional[torch.Tensor] = None, - verbose=False + verbose=False, ): if speculated_ids is not None: B = scores.shape[0] // (speculated_ids.shape[1] + 1) @@ -254,8 +263,8 @@ class HeterogeneousNextTokenChooser: _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids - next_ids = next_ids.view(B*S) - scores = scores.view( B* S, -1) + next_ids = next_ids.view(B * S) + scores = scores.view(B * S, -1) if speculated_ids is not None: accepted_ids = [] @@ -263,7 +272,7 @@ class HeterogeneousNextTokenChooser: S = speculated_ids.shape[1] + 1 indices = [] for i in range(B): - _next_ids = next_ids[i*S: (i + 1)*S] + _next_ids = next_ids[i * S : (i + 1) * S] _speculated_ids = speculated_ids[i] validate_speculative = _next_ids[:-1] == _speculated_ids index = i * S @@ -279,7 +288,9 @@ class HeterogeneousNextTokenChooser: break accepted_ids.append(accepted) - accepted_ids = torch.tensor(accepted_ids, device=input_ids.device, dtype=input_ids.dtype) + accepted_ids = torch.tensor( + accepted_ids, device=input_ids.device, dtype=input_ids.dtype + ) next_ids = next_ids[indices] scores = scores[indices] indices = torch.arange(B, device=input_ids.device) * S @@ -302,7 +313,9 @@ class HeterogeneousNextTokenChooser: speculative_ids = Greedy()(speculative_scores) else: # n-gram - speculative_ids = create_n_gram_speculation(input_ids, next_ids, accepted_ids, speculate, verbose) + speculative_ids = create_n_gram_speculation( + input_ids, next_ids, accepted_ids, speculate, verbose + ) else: speculative_ids = None diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index f3344988..802c1a90 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -16,7 +16,7 @@ class Weights: dtype, process_group, aliases: Optional[Dict[str, List[str]]] = None, - prefix: Optional[str] = None + prefix: Optional[str] = None, ): routing = {} for filename in filenames: @@ -213,7 +213,8 @@ class Weights: bits, groupsize = self._get_gptq_params() from text_generation_server.utils.layers import HAS_EXLLAMA - use_exllama = bits==4 and HAS_EXLLAMA and quantize == "gptq" + + use_exllama = bits == 4 and HAS_EXLLAMA and quantize == "gptq" weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] @@ -283,7 +284,7 @@ class Weights: if use_exllama: qzeros = self.get_sharded(f"{prefix}.qzeros", dim=0) scales = self.get_sharded(f"{prefix}.scales", dim=0) - g_idx = self.get_sharded(f"{prefix}.g_idx", dim= 0) + g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) g_idx = g_idx - g_idx[0] else: # The triton kernel reorders the scales/zero points instead of the weight/activation. diff --git a/update_doc.py b/update_doc.py index 6206e211..6127418c 100644 --- a/update_doc.py +++ b/update_doc.py @@ -21,14 +21,14 @@ def main(): block = [] for line in lines: if line.startswith(" -") or line.startswith(" -"): - rendered_block = '\n'.join(block) + rendered_block = "\n".join(block) if header: final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" else: final_doc += f"```shell\n{rendered_block}\n```\n" block = [] tokens = line.split("<") - if len(tokens)>1: + if len(tokens) > 1: header = tokens[-1][:-1] else: header = line.split("--")[-1] @@ -36,7 +36,7 @@ def main(): block.append(line) - rendered_block = '\n'.join(block) + rendered_block = "\n".join(block) final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" block = [] From db5053fc8686d527b1beb34d7ab9ab1917dbe043 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Mon, 11 Dec 2023 14:55:03 +0100 Subject: [PATCH 006/153] v1.3.0 --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- docs/openapi.json | 2 +- docs/source/basic_tutorials/gated_model_access.md | 2 +- docs/source/quicktour.md | 6 +++--- integration-tests/pyproject.toml | 2 +- server/pyproject.toml | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b537e09..ee4df612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2798,7 +2798,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.2.0" +version = "1.3.0" dependencies = [ "average", "clap", @@ -2819,7 +2819,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.2.0" +version = "1.3.0" dependencies = [ "futures", "grpc-metadata", @@ -2836,7 +2836,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.2.0" +version = "1.3.0" dependencies = [ "clap", "ctrlc", @@ -2852,7 +2852,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.2.0" +version = "1.3.0" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 3677d2b6..3eda8871 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.2.0" +version = "1.3.0" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 6a15ae66..153630c0 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "1.2.0" + "version": "1.3.0" }, "paths": { "/": { diff --git a/docs/source/basic_tutorials/gated_model_access.md b/docs/source/basic_tutorials/gated_model_access.md index e1abccac..1437717f 100644 --- a/docs/source/basic_tutorials/gated_model_access.md +++ b/docs/source/basic_tutorials/gated_model_access.md @@ -19,6 +19,6 @@ docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ - -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 \ + -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 \ --model-id $model ``` diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index c2b27e2a..03ea03bc 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -8,7 +8,7 @@ Let's say you want to deploy [Falcon-7B Instruct](https://huggingface.co/tiiuae/ model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 --model-id $model +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 --model-id $model ``` @@ -20,7 +20,7 @@ To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://d TGI also supports ROCm-enabled AMD GPUs (only MI210 and MI250 are tested), details are available in the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). To launch TGI on ROCm GPUs, please use instead: ```bash -docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2-rocm --model-id $model +docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3-rocm --model-id $model ``` Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint. @@ -91,7 +91,7 @@ curl 127.0.0.1:8080/generate \ To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash -docker run ghcr.io/huggingface/text-generation-inference:1.2 --help +docker run ghcr.io/huggingface/text-generation-inference:1.3 --help ``` diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index 75411131..9457efbc 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.2.0" +version = "1.3.0" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/pyproject.toml b/server/pyproject.toml index adc274cb..94e264ae 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.2.0" +version = "1.3.0" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] From 09c556dbd7a875c0a3279b896b510c95bd29dcf5 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Mon, 11 Dec 2023 16:46:44 +0100 Subject: [PATCH 007/153] v1.3.1 --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- docs/openapi.json | 2 +- integration-tests/pyproject.toml | 2 +- server/pyproject.toml | 2 +- .../models/custom_modeling/flash_mistral_modeling.py | 5 ++++- .../models/custom_modeling/flash_mixtral_modeling.py | 7 +++++-- 7 files changed, 17 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee4df612..277a7742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2798,7 +2798,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.3.0" +version = "1.3.1" dependencies = [ "average", "clap", @@ -2819,7 +2819,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.3.0" +version = "1.3.1" dependencies = [ "futures", "grpc-metadata", @@ -2836,7 +2836,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.3.0" +version = "1.3.1" dependencies = [ "clap", "ctrlc", @@ -2852,7 +2852,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.3.0" +version = "1.3.1" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 3eda8871..741b0533 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.3.0" +version = "1.3.1" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 153630c0..6c372148 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "1.3.0" + "version": "1.3.1" }, "paths": { "/": { diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index 9457efbc..ae5876b0 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.3.0" +version = "1.3.1" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/pyproject.toml b/server/pyproject.toml index 94e264ae..0c0e6a71 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.3.0" +version = "1.3.1" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index 525bf6bc..5a4f5be0 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -391,6 +391,7 @@ class MistralModel(torch.nn.Module): slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, + true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) @@ -398,7 +399,7 @@ class MistralModel(torch.nn.Module): # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( - position_ids, max_s, hidden_states.dtype + position_ids, true_max_s, hidden_states.dtype ) residual = None @@ -449,6 +450,7 @@ class FlashMistralForCausalLM(torch.nn.Module): prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: + true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] @@ -467,6 +469,7 @@ class FlashMistralForCausalLM(torch.nn.Module): slots, input_lengths, max_s, + true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index 6f5edca2..76ebc6b8 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -401,7 +401,7 @@ class BlockSparseMoE(nn.Module): self.offsets_block_rows = block_rows offsets = self.offsets else: - offsets = self.offsets[:block_rows] + offsets = self.offsets[: block_rows + 1] # Indices for the sparse matrix. The indices for # the intermediate matrix are dynamic depending @@ -632,6 +632,7 @@ class MixtralModel(torch.nn.Module): slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, + true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) @@ -639,7 +640,7 @@ class MixtralModel(torch.nn.Module): # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( - position_ids, max_s, hidden_states.dtype + position_ids, true_max_s, hidden_states.dtype ) residual = None @@ -690,6 +691,7 @@ class FlashMixtralForCausalLM(torch.nn.Module): prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: + true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] @@ -708,6 +710,7 @@ class FlashMixtralForCausalLM(torch.nn.Module): slots, input_lengths, max_s, + true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: From f9b58ac7a1c3345c5018715033b073ee6583063c Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Tue, 12 Dec 2023 17:55:03 +0100 Subject: [PATCH 008/153] feat: add quant to mixtral (#1337) --- .../custom_modeling/flash_mistral_modeling.py | 4 +- .../custom_modeling/flash_mixtral_modeling.py | 175 ++++++++++++++++-- .../models/flash_mistral.py | 34 ++-- server/text_generation_server/utils/layers.py | 6 +- 4 files changed, 184 insertions(+), 35 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index 5a4f5be0..b97866f7 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -434,8 +434,6 @@ class FlashMistralForCausalLM(torch.nn.Module): weights=weights, ) self.max_past = config.sliding_window - if self.max_past is None: - raise ValueError("max_past cannot be None") def forward( self, @@ -454,7 +452,7 @@ class FlashMistralForCausalLM(torch.nn.Module): if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] - else: + elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values max_s = min(self.max_past, max_s) diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index 76ebc6b8..ff2ed9fd 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -365,9 +365,9 @@ class BlockSparseMoE(nn.Module): self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) # merged expert weights, all of size (n_experts * ffn_dim, hidden_dim) - self.w1 = _load_experts(config, f"{prefix}.experts", "w1", weights).t() + self.w1 = _load_experts(config, f"{prefix}.experts", "w1", weights) self.w2 = _load_experts(config, f"{prefix}.experts", "w2", weights) - self.w3 = _load_experts(config, f"{prefix}.experts", "w3", weights).t() + self.w3 = _load_experts(config, f"{prefix}.experts", "w3", weights) self.offsets = None self.offsets_block_rows = 0 @@ -467,8 +467,7 @@ class BlockSparseMoE(nn.Module): return indices, bin_ids, bins, padded_bins, tokens_per_expert - @torch.inference_mode() - def forward(self, x: torch.Tensor) -> torch.Tensor: + def sparse_forward(self, x: torch.Tensor) -> torch.Tensor: """ x: (sequence_length, model_dim) gate_logits: (sequence_length, n_experts) @@ -502,8 +501,8 @@ class BlockSparseMoE(nn.Module): # (top_k * sequence_length + padding, ffn_dim * n_experts) x = stk.Matrix( topo.size(), - self.act(stk.ops.sdd(x, self.w1, topo).data) - * stk.ops.sdd(x, self.w3, topo).data, + self.act(stk.ops.sdd(x, self.w1.t(), topo).data) + * stk.ops.sdd(x, self.w3.t(), topo).data, topo.row_indices, topo.column_indices, topo.offsets, @@ -534,6 +533,156 @@ class BlockSparseMoE(nn.Module): return x.view(*input_shape) + def dense_forward(self, x: torch.Tensor) -> torch.Tensor: + """ + x: (sequence_length, model_dim) + gate_logits: (sequence_length, n_experts) + """ + # optional reshape + input_shape = x.shape + x = x.view(-1, input_shape[-1]) + + # gate_logits: (sequence_length, n_experts) + gate_logits = self.gate(x) + # all_probs: (sequence_length, n_experts) and upcast for softmax + all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) + + if self.top_k < self.num_experts: + _, not_selected_experts = torch.topk( + all_probs, + self.num_experts - self.top_k, + largest=False, + sorted=False, + dim=1, + ) + # Mask not selected experts + all_probs.scatter_(1, not_selected_experts, 0) + + # Re-normalize + weights = all_probs / all_probs.sum(dim=1, keepdim=True) + + # Expand to [num_experts, sequence_length, model_dim] + x = x.view(1, -1, input_shape[-1]).expand(self.num_experts, -1, input_shape[-1]) + + # Permute to [num_experts, model_dim, ffn_dim] + w1 = self.w1.view(self.num_experts, self.ffn_dim, self.hidden_dim).permute( + 0, 2, 1 + ) + w3 = self.w3.view(self.num_experts, self.ffn_dim, self.hidden_dim).permute( + 0, 2, 1 + ) + + inter = self.act(torch.bmm(x, w1)) * torch.bmm(x, w3) + + out = torch.bmm( + inter, self.w2.view(self.num_experts, self.ffn_dim, self.hidden_dim) + ) + # Mask not selected experts + out *= weights.t().view(self.num_experts, -1, 1) + + # Sum experts + out = out.sum(0) + + # Reduce sum + if self.process_group.size() > 1: + torch.distributed.all_reduce(out, group=self.process_group) + + return out + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if len(x) > 256: + return self.sparse_forward(x) + # This is faster when there is not a lot of tokens + return self.dense_forward(x) + + +class DenseMoE(nn.Module): + def __init__(self, prefix, config: MixtralConfig, weights): + super().__init__() + self.hidden_dim = config.hidden_size + self.ffn_dim = config.intermediate_size // weights.process_group.size() + self.num_experts = config.num_local_experts + self.top_k = config.num_experts_per_tok + + act = config.hidden_act + if "gelu" in act: + self.act = lambda x: torch.nn.functional.gelu( + x, + approximate="tanh" + if act in ["gelu_fast", "gelu_pytorch_tanh"] + else "none", + ) + elif "silu" in act: + self.act = torch.nn.functional.silu + else: + self.act = ACT2FN[act] + + # gating + self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) + + self.w1 = [ + TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.experts.{i}.w1", weights=weights, bias=False + ) + for i in range(self.num_experts) + ] + self.w3 = [ + TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.experts.{i}.w3", weights=weights, bias=False + ) + for i in range(self.num_experts) + ] + self.w2 = [ + TensorParallelRowLinear.load( + config, prefix=f"{prefix}.experts.{i}.w2", weights=weights, bias=False + ) + for i in range(self.num_experts) + ] + + self.process_group = weights.process_group + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + x: (sequence_length, model_dim) + gate_logits: (sequence_length, n_experts) + """ + # optional reshape + input_shape = x.shape + x = x.view(-1, input_shape[-1]) + + # gate_logits: (sequence_length, n_experts) + gate_logits = self.gate(x) + # all_probs: (sequence_length, n_experts) and upcast for softmax + all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) + + if self.top_k < self.num_experts: + _, not_selected_experts = torch.topk( + all_probs, + self.num_experts - self.top_k, + largest=False, + sorted=False, + dim=1, + ) + # Mask not selected experts + all_probs.scatter_(1, not_selected_experts, 0) + + # Re-normalize + weights = all_probs / all_probs.sum(dim=1, keepdim=True) + + # Final output tensor + out = x.new_zeros(x.shape[0], self.hidden_dim) + for i in range(self.num_experts): + h = self.act(self.w1[i](x)) * self.w3[i](x) + h = self.w2[i](h, reduce=False) + # Add expert output to out with masking + out += h * weights[:, i].view(-1, 1) + + # Reduce sum + if self.process_group.size() > 1: + torch.distributed.all_reduce(out, group=self.process_group) + + return out + class MixtralLayer(nn.Module): def __init__(self, layer_id, config, weights): @@ -543,9 +692,9 @@ class MixtralLayer(nn.Module): self.self_attn = MixtralAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) - self.block_sparse_moe = BlockSparseMoE( - f"{prefix}.block_sparse_moe", config, weights - ) + + moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE + self.moe = moe_cls(f"{prefix}.block_sparse_moe", config, weights) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps @@ -591,9 +740,9 @@ class MixtralLayer(nn.Module): attn_output, res ) - block_sparse_moe_output = self.block_sparse_moe(normed_attn_res_output) + moe_output = self.moe(normed_attn_res_output) - return block_sparse_moe_output, attn_res + return moe_output, attn_res class MixtralModel(torch.nn.Module): @@ -675,8 +824,6 @@ class FlashMixtralForCausalLM(torch.nn.Module): weights=weights, ) self.max_past = config.sliding_window - if self.max_past is None: - raise ValueError("max_past cannot be None") def forward( self, @@ -695,7 +842,7 @@ class FlashMixtralForCausalLM(torch.nn.Module): if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] - else: + elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values max_s = min(self.max_past, max_s) diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index 0fad5aa8..abe07c30 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -136,9 +136,9 @@ class FlashMistralBatch(FlashCausalLMBatch): total_tokens = input_length + max_new_tokens - 1 + speculative_length # Needed blocks can not go over SLIDING_WINDOW_BLOCKS - needed_blocks = min( - math.ceil(total_tokens / BLOCK_SIZE), SLIDING_WINDOW_BLOCKS - ) + needed_blocks = math.ceil(total_tokens / BLOCK_SIZE) + if SLIDING_WINDOW_BLOCKS is not None: + needed_blocks = min(needed_blocks, SLIDING_WINDOW_BLOCKS) blocks += needed_blocks needed_blocks_slots.append((needed_blocks, total_tokens)) @@ -152,12 +152,13 @@ class FlashMistralBatch(FlashCausalLMBatch): slot_indices.append(request_slot_indices) # Create tensor to slice into the kv tensor in prefill - request_prefill_cache_indices = torch.arange( - cumulative_length + max(0, input_length - SLIDING_WINDOW), - cumulative_length + input_length, - dtype=torch.int64, - ) - prefill_cache_indices.append(request_prefill_cache_indices) + if SLIDING_WINDOW is not None: + request_prefill_cache_indices = torch.arange( + cumulative_length + max(0, input_length - SLIDING_WINDOW), + cumulative_length + input_length, + dtype=torch.int64, + ) + prefill_cache_indices.append(request_prefill_cache_indices) all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs @@ -209,12 +210,14 @@ class FlashMistralBatch(FlashCausalLMBatch): input_ids = np.concatenate(all_input_ids, dtype=np.int64) position_ids = torch.cat(position_ids) slot_indices = torch.cat(slot_indices) - prefill_cache_indices = torch.cat(prefill_cache_indices) + if SLIDING_WINDOW is not None: + prefill_cache_indices = torch.cat(prefill_cache_indices) else: input_ids = all_input_ids[0] position_ids = position_ids[0] slot_indices = slot_indices[0] - prefill_cache_indices = prefill_cache_indices[0] + if SLIDING_WINDOW is not None: + prefill_cache_indices = prefill_cache_indices[0] cu_seqlen_prefill = torch.tensor( cu_seqlen_prefill, device=device, dtype=torch.int32 @@ -222,7 +225,9 @@ class FlashMistralBatch(FlashCausalLMBatch): position_ids = position_ids.to(device) slot_indices = slot_indices.to(device) - prefill_cache_indices = prefill_cache_indices.to(device) + prefill_cache_indices = ( + prefill_cache_indices.to(device) if SLIDING_WINDOW is not None else None + ) input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) input_lengths_tensor = torch.tensor( input_lengths, dtype=torch.int32, device=device @@ -314,8 +319,9 @@ class BaseFlashMistral(FlashCausalLM): config.quantize = quantize # Set context windows - SLIDING_WINDOW = config.sliding_window - SLIDING_WINDOW_BLOCKS = math.ceil(config.sliding_window / BLOCK_SIZE) + if config.sliding_window is not None: + SLIDING_WINDOW = config.sliding_window + SLIDING_WINDOW_BLOCKS = math.ceil(config.sliding_window / BLOCK_SIZE) torch.distributed.barrier(group=self.process_group) diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 77e2fdb6..011a9382 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -64,8 +64,6 @@ elif CAN_EXLLAMA: except ImportError: pass -from typing import Optional - HAS_EETQ = False try: from EETQ import quant_weights, w8_a16_gemm @@ -489,9 +487,9 @@ class TensorParallelRowLinear(SuperLayer): process_group=weights.process_group, ) - def forward(self, input: torch.Tensor) -> torch.Tensor: + def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor: out = super().forward(input) - if self.process_group.size() > 1: + if self.process_group.size() > 1 and reduce: torch.distributed.all_reduce(out, group=self.process_group) return out From 05f8c85a8b4947553900a3a64bc30b4816657a36 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Tue, 12 Dec 2023 18:10:22 +0100 Subject: [PATCH 009/153] v1.3.2 --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- docs/openapi.json | 2 +- integration-tests/pyproject.toml | 2 +- server/pyproject.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 277a7742..4baa1f8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2798,7 +2798,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.3.1" +version = "1.3.2" dependencies = [ "average", "clap", @@ -2819,7 +2819,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.3.1" +version = "1.3.2" dependencies = [ "futures", "grpc-metadata", @@ -2836,7 +2836,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.3.1" +version = "1.3.2" dependencies = [ "clap", "ctrlc", @@ -2852,7 +2852,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.3.1" +version = "1.3.2" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 741b0533..0ffa4254 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.3.1" +version = "1.3.2" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 6c372148..d70023c6 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "1.3.1" + "version": "1.3.2" }, "paths": { "/": { diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index ae5876b0..2747473c 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.3.1" +version = "1.3.2" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/pyproject.toml b/server/pyproject.toml index 0c0e6a71..a64f5ad8 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.3.1" +version = "1.3.2" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] From 2f88d8dfb3fa8d38f1de27f91b732f62c27e9315 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Wed, 13 Dec 2023 09:19:19 +0100 Subject: [PATCH 010/153] fix: default max_new_tokens to 100 --- docs/openapi.json | 2 +- router/src/lib.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/openapi.json b/docs/openapi.json index d70023c6..92faf1ed 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -523,7 +523,7 @@ "max_new_tokens": { "type": "integer", "format": "int32", - "default": "null", + "default": "20", "example": "20", "nullable": true, "minimum": 0 diff --git a/router/src/lib.rs b/router/src/lib.rs index b547dc15..898fcd04 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -107,7 +107,7 @@ pub(crate) struct GenerateParameters { #[schema(default = "false", example = true)] pub do_sample: bool, #[serde(default = "default_max_new_tokens")] - #[schema(nullable = true, default = "null", example = "20")] + #[schema(nullable = true, default = "100", example = "20")] pub max_new_tokens: Option, #[serde(default)] #[schema(nullable = true, default = "null", example = false)] @@ -141,7 +141,7 @@ pub(crate) struct GenerateParameters { } fn default_max_new_tokens() -> Option { - None + Some(100) } fn default_parameters() -> GenerateParameters { From c974437ba75c81deff4dd06702c88ded66af8d7f Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Thu, 14 Dec 2023 11:02:16 +0100 Subject: [PATCH 011/153] fix: fix gpt-q params loading --- .../text_generation_server/models/flash_llama.py | 2 +- .../text_generation_server/models/flash_mistral.py | 2 +- server/text_generation_server/models/flash_neox.py | 2 +- server/text_generation_server/models/flash_rw.py | 2 +- .../models/flash_santacoder.py | 2 +- server/text_generation_server/models/galactica.py | 2 +- server/text_generation_server/models/gpt_neox.py | 2 +- server/text_generation_server/models/mpt.py | 2 +- server/text_generation_server/models/opt.py | 2 +- server/text_generation_server/utils/weights.py | 14 ++++++++++---- 10 files changed, 19 insertions(+), 13 deletions(-) diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index 2415a245..8a3bccdd 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -64,7 +64,7 @@ class FlashLlama(FlashCausalLM): filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = FlashLlamaForCausalLM(config, weights) if use_medusa: diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index abe07c30..8c6cb025 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -328,7 +328,7 @@ class BaseFlashMistral(FlashCausalLM): filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = model_cls(config, weights) diff --git a/server/text_generation_server/models/flash_neox.py b/server/text_generation_server/models/flash_neox.py index 58f345a9..80f8804d 100644 --- a/server/text_generation_server/models/flash_neox.py +++ b/server/text_generation_server/models/flash_neox.py @@ -53,7 +53,7 @@ class FlashNeoXSharded(FlashCausalLM): filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = FlashGPTNeoXForCausalLM(config, weights) diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py index 195b3883..dfab8888 100644 --- a/server/text_generation_server/models/flash_rw.py +++ b/server/text_generation_server/models/flash_rw.py @@ -62,7 +62,7 @@ class FlashRWSharded(FlashCausalLM): config.quantize = quantize if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = FlashRWForCausalLM(config, weights) diff --git a/server/text_generation_server/models/flash_santacoder.py b/server/text_generation_server/models/flash_santacoder.py index 29505902..22171ec0 100644 --- a/server/text_generation_server/models/flash_santacoder.py +++ b/server/text_generation_server/models/flash_santacoder.py @@ -63,7 +63,7 @@ class FlashSantacoderSharded(FlashCausalLM): aliases={"transformer.wte.weight": ["lm_head.weight"]}, ) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = FlashSantacoderForCausalLM(config, weights) diff --git a/server/text_generation_server/models/galactica.py b/server/text_generation_server/models/galactica.py index b296c96e..42ff1c80 100644 --- a/server/text_generation_server/models/galactica.py +++ b/server/text_generation_server/models/galactica.py @@ -199,7 +199,7 @@ class GalacticaSharded(CausalLM): filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = OPTForCausalLM(config, weights) diff --git a/server/text_generation_server/models/gpt_neox.py b/server/text_generation_server/models/gpt_neox.py index d4c64dfe..45df4839 100644 --- a/server/text_generation_server/models/gpt_neox.py +++ b/server/text_generation_server/models/gpt_neox.py @@ -57,7 +57,7 @@ class GPTNeoxSharded(CausalLM): filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = GPTNeoxForCausalLM(config, weights) diff --git a/server/text_generation_server/models/mpt.py b/server/text_generation_server/models/mpt.py index 19de497c..e419467f 100644 --- a/server/text_generation_server/models/mpt.py +++ b/server/text_generation_server/models/mpt.py @@ -81,7 +81,7 @@ class MPTSharded(CausalLM): filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) config.quantize = quantize model = MPTForCausalLM(config, weights) diff --git a/server/text_generation_server/models/opt.py b/server/text_generation_server/models/opt.py index b2b87246..58fb212f 100644 --- a/server/text_generation_server/models/opt.py +++ b/server/text_generation_server/models/opt.py @@ -55,7 +55,7 @@ class OPTSharded(CausalLM): filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": - weights._set_gptq_params(model_id) + weights._set_gptq_params(model_id, revision) model = OPTForCausalLM(config, weights) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 802c1a90..67fda511 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -327,13 +327,15 @@ class Weights: return bits, groupsize - def _set_gptq_params(self, model_id): + def _set_gptq_params(self, model_id, revision): filename = "config.json" try: if os.path.exists(os.path.join(model_id, filename)): filename = os.path.join(model_id, filename) else: - filename = hf_hub_download(model_id, filename=filename) + filename = hf_hub_download( + model_id, filename=filename, revision=revision + ) with open(filename, "r") as f: data = json.load(f) self.gptq_bits = data["quantization_config"]["bits"] @@ -344,7 +346,9 @@ class Weights: if os.path.exists(os.path.join(model_id, filename)): filename = os.path.join(model_id, filename) else: - filename = hf_hub_download(model_id, filename=filename) + filename = hf_hub_download( + model_id, filename=filename, revision=revision + ) with open(filename, "r") as f: data = json.load(f) self.gptq_bits = data["bits"] @@ -355,7 +359,9 @@ class Weights: if os.path.exists(os.path.join(model_id, filename)): filename = os.path.join(model_id, filename) else: - filename = hf_hub_download(model_id, filename=filename) + filename = hf_hub_download( + model_id, filename=filename, revision=revision + ) with open(filename, "r") as f: data = json.load(f) self.gptq_bits = data["w_bit"] From 5c9ef069ed33e21b297e5a4d67b078072f567340 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 14 Dec 2023 15:59:38 +0100 Subject: [PATCH 012/153] feat: add more latency metrics in forward (#1346) --- benchmark/src/generation.rs | 2 +- proto/generate.proto | 14 +++++ router/client/src/client.rs | 56 +++++++++++++++++-- router/client/src/sharded_client.rs | 55 +++++++++++------- router/src/infer.rs | 17 +++++- router/src/validation.rs | 4 +- server/tests/models/test_bloom.py | 32 +++++------ server/tests/models/test_causal_lm.py | 34 +++++------ server/tests/models/test_santacoder.py | 8 +-- server/tests/models/test_seq2seq_lm.py | 28 +++++----- .../models/causal_lm.py | 24 +++++--- .../models/flash_causal_lm.py | 23 +++++--- .../models/idefics_causal_lm.py | 21 +++---- server/text_generation_server/models/model.py | 4 +- .../models/seq2seq_lm.py | 16 ++++-- server/text_generation_server/server.py | 14 ++++- server/text_generation_server/utils/tokens.py | 2 +- 17 files changed, 242 insertions(+), 112 deletions(-) diff --git a/benchmark/src/generation.rs b/benchmark/src/generation.rs index 67afa04e..ea7c9778 100644 --- a/benchmark/src/generation.rs +++ b/benchmark/src/generation.rs @@ -163,7 +163,7 @@ async fn prefill( // Run prefill let start_time = Instant::now(); - let (_, decode_batch) = client.prefill(batch.clone()).await?; + let (_, decode_batch, _) = client.prefill(batch.clone()).await?; // Get latency let latency = start_time.elapsed(); diff --git a/proto/generate.proto b/proto/generate.proto index 0041b907..fc4617f9 100644 --- a/proto/generate.proto +++ b/proto/generate.proto @@ -182,6 +182,12 @@ message PrefillResponse { repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; + /// Forward elapsed time in nanoseconds + uint64 forward_ns = 3; + /// Decode elapsed time in nanoseconds + uint64 decode_ns = 4; + /// Total elapsed time in nanoseconds + uint64 total_ns = 5; } message DecodeRequest { @@ -194,6 +200,14 @@ message DecodeResponse { repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; + /// Forward elapsed time in nanoseconds + uint64 forward_ns = 3; + /// Decode elapsed time in nanoseconds + uint64 decode_ns = 4; + /// Total elapsed time in nanoseconds + uint64 total_ns = 5; + /// Concatenate elapsed time in nanoseconds + optional uint64 concat_ns = 6; } message WarmupRequest { diff --git a/router/client/src/client.rs b/router/client/src/client.rs index ca86f330..f0ecb05a 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -8,6 +8,7 @@ use std::env; use rand::{distributions::Uniform, Rng}; use grpc_metadata::InjectTelemetryContext; use std::cmp; +use std::time::Duration; use tonic::transport::{Channel, Uri}; use tracing::instrument; @@ -294,10 +295,14 @@ impl Client { pub async fn prefill( &mut self, batch: Batch, - ) -> Result<(Vec, Option)> { + ) -> Result<(Vec, Option, PrefillTimings)> { let request = tonic::Request::new(PrefillRequest { batch: Some(batch) }).inject_context(); let response = self.stub.prefill(request).await?.into_inner(); - Ok((response.generations, response.batch)) + Ok(( + response.generations, + response.batch, + PrefillTimings::new(response.forward_ns, response.decode_ns, response.total_ns), + )) } /// Generate one token for each request in the given cached batches @@ -308,9 +313,52 @@ impl Client { pub async fn decode( &mut self, batches: Vec, - ) -> Result<(Vec, Option)> { + ) -> Result<(Vec, Option, DecodeTimings)> { let request = tonic::Request::new(DecodeRequest { batches }).inject_context(); let response = self.stub.decode(request).await?.into_inner(); - Ok((response.generations, response.batch)) + Ok(( + response.generations, + response.batch, + DecodeTimings::new( + response.concat_ns, + response.forward_ns, + response.decode_ns, + response.total_ns, + ), + )) + } +} + +pub struct PrefillTimings { + pub forward: Duration, + pub decode: Duration, + pub total: Duration, +} + +impl PrefillTimings { + fn new(forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self { + Self { + forward: Duration::from_nanos(forward_ns), + decode: Duration::from_nanos(decode_ns), + total: Duration::from_nanos(total_ns), + } + } +} + +pub struct DecodeTimings { + pub concat: Option, + pub forward: Duration, + pub decode: Duration, + pub total: Duration, +} + +impl DecodeTimings { + fn new(concat_ns: Option, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self { + Self { + concat: concat_ns.map(|v| Duration::from_nanos(v)), + forward: Duration::from_nanos(forward_ns), + decode: Duration::from_nanos(decode_ns), + total: Duration::from_nanos(total_ns), + } } } diff --git a/router/client/src/sharded_client.rs b/router/client/src/sharded_client.rs index 062ec102..b71c0a7d 100644 --- a/router/client/src/sharded_client.rs +++ b/router/client/src/sharded_client.rs @@ -1,5 +1,6 @@ /// Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +use crate::client::{DecodeTimings, PrefillTimings}; /// Multi shard Client use crate::{Batch, CachedBatch, Client, Generation, HealthResponse, ShardInfo}; use crate::{ClientError, Result}; @@ -119,49 +120,63 @@ impl ShardedClient { /// /// Returns Generation for each request in batch /// and the next cached batch - #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))] + #[instrument(skip_all, fields(id = & batch.id, size = & batch.size))] pub async fn prefill( &mut self, batch: Batch, - ) -> Result<(Vec, Option)> { + ) -> Result<(Vec, Option, PrefillTimings)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.prefill(batch.clone()))) .collect(); - let results: Result, Option)>> = + let results: Result, Option, PrefillTimings)>> = join_all(futures).await.into_iter().collect(); - merge_generations(results?) + let mut results = results?; + + let (mut generations, next_batch, mut timings) = + results.pop().ok_or(ClientError::EmptyResults)?; + + // Merge generations from different model shards + for (mut shard_generations, _, shard_timings) in results.into_iter() { + generations.append(&mut shard_generations); + // Return the timings of the slowest shard + if shard_timings.total > timings.total { + timings = shard_timings; + } + } + Ok((generations, next_batch, timings)) } /// Generate one token for each request in the given cached batches /// /// Returns Generation for each request in batches /// and the next cached batch - #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::()))] + #[instrument(skip_all, fields(size = batches.iter().map(| batch | {batch.size}).sum::< u32 > ()))] pub async fn decode( &mut self, batches: Vec, - ) -> Result<(Vec, Option)> { + ) -> Result<(Vec, Option, DecodeTimings)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.decode(batches.clone()))) .collect(); - let results: Result, Option)>> = + let results: Result, Option, DecodeTimings)>> = join_all(futures).await.into_iter().collect(); - merge_generations(results?) + let mut results = results?; + + let (mut generations, next_batch, mut timings) = + results.pop().ok_or(ClientError::EmptyResults)?; + + // Merge generations from different model shards + for (mut shard_generations, _, shard_timings) in results.into_iter() { + generations.append(&mut shard_generations); + // Return the timings of the slowest shard + if shard_timings.total > timings.total { + timings = shard_timings; + } + } + Ok((generations, next_batch, timings)) } } - -/// Merge generations from the different model shards -fn merge_generations( - mut results: Vec<(Vec, Option)>, -) -> Result<(Vec, Option)> { - let (mut generations, next_batch) = results.pop().ok_or(ClientError::EmptyResults)?; - - for (mut shard_generations, _) in results.into_iter() { - generations.append(&mut shard_generations); - } - Ok((generations, next_batch)) -} diff --git a/router/src/infer.rs b/router/src/infer.rs index c1b01211..8078cee7 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -390,15 +390,20 @@ async fn prefill( metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill"); match client.prefill(batch).await { - Ok((generations, next_batch)) => { + Ok((generations, next_batch, timings)) => { // Update health generation_health.store(true, Ordering::SeqCst); + + let start_filtering_time = Instant::now(); // Send generated tokens and filter stopped entries filter_send_generations(generations, entries); // Filter next batch and remove requests that were stopped let next_batch = filter_batch(client, next_batch, entries).await; + metrics::histogram!("tgi_batch_forward_duration", timings.forward.as_secs_f64(), "method" => "prefill"); + metrics::histogram!("tgi_batch_decode_duration", timings.decode.as_secs_f64(), "method" => "prefill"); + metrics::histogram!("tgi_batch_filter_duration", start_filtering_time.elapsed().as_secs_f64(), "method" => "prefill"); metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill"); metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill"); next_batch @@ -427,15 +432,23 @@ async fn decode( metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode"); match client.decode(batches).await { - Ok((generations, next_batch)) => { + Ok((generations, next_batch, timings)) => { // Update health generation_health.store(true, Ordering::SeqCst); + + let start_filtering_time = Instant::now(); // Send generated tokens and filter stopped entries filter_send_generations(generations, entries); // Filter next batch and remove requests that were stopped let next_batch = filter_batch(client, next_batch, entries).await; + if let Some(concat_duration) = timings.concat { + metrics::histogram!("tgi_batch_concat_duration", concat_duration.as_secs_f64(), "method" => "decode"); + } + metrics::histogram!("tgi_batch_forward_duration", timings.forward.as_secs_f64(), "method" => "decode"); + metrics::histogram!("tgi_batch_decode_duration", timings.decode.as_secs_f64(), "method" => "decode"); + metrics::histogram!("tgi_batch_filter_duration", start_filtering_time.elapsed().as_secs_f64(), "method" => "decode"); metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode"); metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode"); next_batch diff --git a/router/src/validation.rs b/router/src/validation.rs index 0ec93109..ee3dfbe0 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -569,7 +569,7 @@ mod tests { let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; - let max_total_tokens = 6; + let max_total_tokens = 106; let workers = 1; let validation = Validation::new( workers, @@ -629,7 +629,7 @@ mod tests { let max_stop_sequences = 3; let max_top_n_tokens = 4; let max_input_length = 5; - let max_total_tokens = 6; + let max_total_tokens = 106; let workers = 1; let validation = Validation::new( workers, diff --git a/server/tests/models/test_bloom.py b/server/tests/models/test_bloom.py index 5cedb9f1..4b7dde81 100644 --- a/server/tests/models/test_bloom.py +++ b/server/tests/models/test_bloom.py @@ -105,7 +105,7 @@ def test_causal_lm_batch_type(default_bloom): @pytest.mark.skip def test_causal_lm_generate_token(default_bloom, default_bloom_batch): sequence_length = len(default_bloom_batch.all_input_ids[0]) - generations, next_batch = default_bloom.generate_token(default_bloom_batch) + generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch) assert len(generations) == len(default_bloom_batch) assert isinstance(next_batch, CausalLMBatch) @@ -156,10 +156,10 @@ def test_causal_lm_generate_token(default_bloom, default_bloom_batch): def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch): next_batch = default_bloom_batch for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(default_bloom_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -182,10 +182,10 @@ def test_causal_lm_generate_token_completion_multi( for i in range( default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1 ): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(default_multi_requests_bloom_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -205,10 +205,10 @@ def test_causal_lm_generate_token_completion_multi( for _ in range( stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1 ): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -229,11 +229,11 @@ def test_batch_concatenate( default_bloom, default_bloom_batch, default_multi_requests_bloom_batch ): next_batch_0 = default_bloom_batch - _, next_batch_0 = default_bloom.generate_token(next_batch_0) - _, next_batch_0 = default_bloom.generate_token(next_batch_0) + _, next_batch_0, _ = default_bloom.generate_token(next_batch_0) + _, next_batch_0, _ = default_bloom.generate_token(next_batch_0) next_batch_1 = default_multi_requests_bloom_batch - _, next_batch_1 = default_bloom.generate_token(next_batch_1) + _, next_batch_1, _ = default_bloom.generate_token(next_batch_1) # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches @@ -293,10 +293,10 @@ def test_batch_concatenate( for _ in range( default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2 ): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 @@ -318,10 +318,10 @@ def test_batch_concatenate( - default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2 ): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -342,10 +342,10 @@ def test_batch_concatenate( - default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 4 ): - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_bloom.generate_token(next_batch) + generations, next_batch, _ = default_bloom.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 diff --git a/server/tests/models/test_causal_lm.py b/server/tests/models/test_causal_lm.py index 3a9acfbb..00c312a8 100644 --- a/server/tests/models/test_causal_lm.py +++ b/server/tests/models/test_causal_lm.py @@ -111,7 +111,9 @@ def test_causal_lm_batch_type(default_causal_lm): def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch): sequence_length = len(default_causal_lm_batch.all_input_ids[0]) - generations, next_batch = default_causal_lm.generate_token(default_causal_lm_batch) + generations, next_batch, _ = default_causal_lm.generate_token( + default_causal_lm_batch + ) assert len(generations) == len(next_batch) assert isinstance(next_batch, CausalLMBatch) @@ -163,10 +165,10 @@ def test_causal_lm_generate_token_completion( ): next_batch = default_causal_lm_batch for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -186,10 +188,10 @@ def test_causal_lm_generate_token_completion_multi( for i in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1 ): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -212,10 +214,10 @@ def test_causal_lm_generate_token_completion_multi( for _ in range( stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1 ): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -234,11 +236,11 @@ def test_batch_concatenate( default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch ): next_batch_0 = default_causal_lm_batch - _, next_batch_0 = default_causal_lm.generate_token(next_batch_0) - _, next_batch_0 = default_causal_lm.generate_token(next_batch_0) + _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) + _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_causal_lm_batch - _, next_batch_1 = default_causal_lm.generate_token(next_batch_1) + _, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1) # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches @@ -297,10 +299,10 @@ def test_batch_concatenate( for _ in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 @@ -323,10 +325,10 @@ def test_batch_concatenate( - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -345,10 +347,10 @@ def test_batch_concatenate( - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 4 ): - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_causal_lm.generate_token(next_batch) + generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 diff --git a/server/tests/models/test_santacoder.py b/server/tests/models/test_santacoder.py index fceec560..1e40e766 100644 --- a/server/tests/models/test_santacoder.py +++ b/server/tests/models/test_santacoder.py @@ -55,10 +55,10 @@ def test_santacoder_generate_token_completion(default_santacoder, default_pb_bat next_batch = batch for _ in range(batch.stopping_criterias[0].max_new_tokens - 1): - generations, next_batch = default_santacoder.generate_token(next_batch) + generations, next_batch, _ = default_santacoder.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_santacoder.generate_token(next_batch) + generations, next_batch, _ = default_santacoder.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -83,10 +83,10 @@ def test_fim_santacoder_generate_token_completion( next_batch = batch for _ in range(batch.stopping_criterias[0].max_new_tokens - 1): - generations, next_batch = default_santacoder.generate_token(next_batch) + generations, next_batch, _ = default_santacoder.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_santacoder.generate_token(next_batch) + generations, next_batch, _ = default_santacoder.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 diff --git a/server/tests/models/test_seq2seq_lm.py b/server/tests/models/test_seq2seq_lm.py index 2a2bdbb3..ba9f5578 100644 --- a/server/tests/models/test_seq2seq_lm.py +++ b/server/tests/models/test_seq2seq_lm.py @@ -107,7 +107,7 @@ def test_seq2seq_lm_batch_type(default_seq2seq_lm): @pytest.mark.skip("seq2seq model not enabled on HPU yet") def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch): sequence_length = len(default_seq2seq_lm_batch.input_ids[0]) - generations, next_batch = default_seq2seq_lm.generate_token( + generations, next_batch, _ = default_seq2seq_lm.generate_token( default_seq2seq_lm_batch ) @@ -178,10 +178,10 @@ def test_seq2seq_lm_generate_token_completion( ): next_batch = default_seq2seq_lm_batch for _ in range(6): - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -197,10 +197,10 @@ def test_seq2seq_lm_generate_token_completion_multi( next_batch = default_multi_requests_seq2seq_lm_batch for i in range(4): - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -213,10 +213,10 @@ def test_seq2seq_lm_generate_token_completion_multi( next_batch = next_batch.filter([next_batch.requests[0].id]) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 @@ -235,11 +235,11 @@ def test_batch_concatenate( default_multi_requests_seq2seq_lm_batch, ): next_batch_0 = default_seq2seq_lm_batch - _, next_batch_0 = default_seq2seq_lm.generate_token(next_batch_0) - _, next_batch_0 = default_seq2seq_lm.generate_token(next_batch_0) + _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) + _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_seq2seq_lm_batch - _, next_batch_1 = default_seq2seq_lm.generate_token(next_batch_1) + _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1) # Copy hidden state because it is removed from the concatenated branches next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state @@ -331,10 +331,10 @@ def test_batch_concatenate( ) for _ in range(3): - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 @@ -349,7 +349,7 @@ def test_batch_concatenate( [next_batch.requests[0].id, next_batch.requests[1].id] ) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 @@ -359,7 +359,7 @@ def test_batch_concatenate( next_batch = next_batch.filter([next_batch.requests[1].id]) - generations, next_batch = default_seq2seq_lm.generate_token(next_batch) + generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 826a6304..387a5618 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -7,6 +7,7 @@ import itertools import math import os import tempfile +import time from typing import Dict, List, Optional, Tuple, Type import torch @@ -33,6 +34,7 @@ from transformers import ( from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models import Model +from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models.types import ( Batch, Tokens, @@ -821,7 +823,10 @@ class CausalLM(Model): return outputs.logits, outputs.past_key_values @tracer.start_as_current_span("generate_token") - def generate_token(self, batches: List[CausalLMBatch]) -> Tuple[List[Generation], Optional[CausalLMBatch]]: + def generate_token( + self, batches: List[CausalLMBatch] + ) -> Tuple[List[Generation], Optional[CausalLMBatch], Tuple[int, int]]: + start = time.time_ns() # Results generations: List[Generation] = [] prev_batches = [] @@ -939,6 +944,8 @@ class CausalLM(Model): htorch.core.mark_step() + start_decode = time.time_ns() + # Stage 3. Finish and return previous generations stopped = len(requests_to_generate) > 0 for prev_batch in prev_batches: @@ -1073,13 +1080,16 @@ class CausalLM(Model): self.hb_profiler.stop() else: self.hb_profiler.step() - return generations, batch if not stopped else None + + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, batch if not stopped else None, (forward_ns, decode_ns) def warmup(self, batches: List[CausalLMBatch]) -> None: # prefill - _, prefill_batch = self.generate_token([batches.pop(0)]) + _, prefill_batch, _ = self.generate_token([batches.pop(0)]) # decode - _, decode_batch = self.generate_token([prefill_batch]) + _, decode_batch, _ = self.generate_token([prefill_batch]) # shifts self.shifting_warmup(decode_batch) @@ -1088,12 +1098,12 @@ class CausalLM(Model): return # prefill - _, prefill_batch = self.generate_token([batches.pop(0)]) + _, prefill_batch, _ = self.generate_token([batches.pop(0)]) # concatenate and decode - _, decode_batch = self.generate_token([decode_batch, prefill_batch]) + _, decode_batch, _ = self.generate_token([decode_batch, prefill_batch]) # decodes while decode_batch is not None: - _, decode_batch = self.generate_token([decode_batch]) + _, decode_batch, _ = self.generate_token([decode_batch]) def shifting_warmup(self, batch: CausalLMBatch) -> None: chunk_sizes = CHUNK_SIZES.copy() diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 14d30635..930082cd 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1,6 +1,6 @@ import math +import time import itertools -from text_generation_server.utils.tokens import batch_top_tokens import torch import torch.distributed @@ -9,9 +9,10 @@ import numpy as np from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase -from typing import Optional, Tuple, List, Type, Union, Dict +from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model +from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.utils.speculate import get_speculate from text_generation_server.models.types import ( Batch, @@ -689,7 +690,7 @@ class FlashCausalLM(Model): self.dtype, self.device, ) - _, batch = self.generate_token(batch) + _, batch, _ = self.generate_token(batch) except torch.cuda.OutOfMemoryError as e: raise RuntimeError( f"Not enough memory to handle {len(batch.input_ids)} prefill tokens. " @@ -799,7 +800,8 @@ class FlashCausalLM(Model): @tracer.start_as_current_span("generate_token") def generate_token( self, batch: FlashCausalLMBatch - ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch]]: + ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]: + start = time.time_ns() prefill = batch.cu_seqlen_prefill is not None prefill_logprobs = batch.prefill_next_token_indices is not None @@ -941,6 +943,8 @@ class FlashCausalLM(Model): # GPU <-> CPU sync next_token_logprobs = next_token_logprobs.tolist() next_token_ids = next_input_ids.tolist() + accepted_ids = accepted_ids.tolist() + start_decode = time.time_ns() # Zipped iterator iterator = zip( @@ -977,7 +981,6 @@ class FlashCausalLM(Model): # Append next token to all tokens next_token_texts = [] left = 0 - before = stopping_criteria.current_tokens current_stopped = False for j in range(index, index + n_accepted_ids): @@ -1092,7 +1095,7 @@ class FlashCausalLM(Model): generations.append(generation) # Update values - batch.input_lengths[i] = input_length + n_accepted_ids.item() + batch.input_lengths[i] = input_length + n_accepted_ids if batch.input_lengths[i] > batch.max_seqlen: batch.max_seqlen = batch.input_lengths[i] batch.prefix_offsets[i] = prefix_offset @@ -1102,10 +1105,14 @@ class FlashCausalLM(Model): if stopped: del batch # No need to return a batch if we know that all requests stopped - return generations, None + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, None, (forward_ns, decode_ns) batch.prefill_cu_outlens = None batch.prefill_head_indices = None batch.prefill_next_token_indices = None - return generations, batch + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, batch, (forward_ns, decode_ns) diff --git a/server/text_generation_server/models/idefics_causal_lm.py b/server/text_generation_server/models/idefics_causal_lm.py index 86389ad2..2f28688d 100644 --- a/server/text_generation_server/models/idefics_causal_lm.py +++ b/server/text_generation_server/models/idefics_causal_lm.py @@ -1,17 +1,11 @@ import torch -import inspect -import re -from io import BytesIO -import base64 -from PIL import Image -import re +import time from dataclasses import dataclass from opentelemetry import trace from transformers import ( AutoProcessor, AutoTokenizer, - AutoModelForCausalLM, PreTrainedTokenizerBase, ProcessorMixin, ) @@ -670,7 +664,8 @@ class IdeficsCausalLM(Model): @tracer.start_as_current_span("generate_token") def generate_token( self, batch: IdeficsCausalLMBatch - ) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch]]: + ) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch], Tuple[int, int]]: + start = time.time_ns() # slice the attention mask to the correct shape attention_mask = batch.attention_mask[:, : -batch.padding_right_offset] if batch.input_ids.size(1) == 1: @@ -699,6 +694,8 @@ class IdeficsCausalLM(Model): # Hardcoded remove image tokens logits[:, 32000:32001] = torch.finfo(logits.dtype).min + start_decode = time.time_ns() + # Results generations: List[Generation] = [] stopped = True @@ -827,7 +824,9 @@ class IdeficsCausalLM(Model): # We finished all generations in the batch; there is no next batch if stopped: - return generations, None + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] @@ -847,4 +846,6 @@ class IdeficsCausalLM(Model): batch.past_key_values = past batch.image_hidden_states = image_hidden_states - return generations, batch + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, batch, (forward_ns, decode_ns) diff --git a/server/text_generation_server/models/model.py b/server/text_generation_server/models/model.py index c835ec34..372c48c0 100644 --- a/server/text_generation_server/models/model.py +++ b/server/text_generation_server/models/model.py @@ -60,7 +60,9 @@ class Model(ABC): raise NotImplementedError @abstractmethod - def generate_token(self, batch: B) -> Tuple[List[Generation], Optional[B]]: + def generate_token( + self, batch: B + ) -> Tuple[List[Generation], Optional[B], Tuple[int, int]]: raise NotImplementedError def warmup(self, batch: B, max_total_tokens: int): diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index a85ef58e..f2e4cec6 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -1,11 +1,12 @@ -from text_generation_server.utils.tokens import batch_top_tokens import torch +import time from dataclasses import dataclass from opentelemetry import trace from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, PreTrainedTokenizerBase from typing import Optional, Tuple, List, Type, Dict +from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models import Model from text_generation_server.models.types import ( GeneratedText, @@ -613,7 +614,8 @@ class Seq2SeqLM(Model): @tracer.start_as_current_span("generate_token") def generate_token( self, batch: Seq2SeqLMBatch - ) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch]]: + ) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch], Tuple[int, int]]: + start = time.time_ns() if batch.decoder_attention_mask is not None: # slice to the correct shape decoder_attention_mask = batch.decoder_attention_mask[ @@ -644,6 +646,8 @@ class Seq2SeqLM(Model): torch.log_softmax(logits[:, -1], -1), ) + start_decode = time.time_ns() + # Finished requests generations: List[Generation] = [] stopped = True @@ -788,7 +792,9 @@ class Seq2SeqLM(Model): # We finished all generations in the batch; there is no next batch if stopped: - return generations, None + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, None, (forward_ns, decode_ns) # We don't need input_ids after the prefill forward batch.input_ids = None @@ -799,4 +805,6 @@ class Seq2SeqLM(Model): batch.decoder_attention_mask[:, -batch.padding_right_offset] = 1 batch.padding_right_offset -= 1 - return generations, batch + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, batch, (forward_ns, decode_ns) diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py index 8cd03150..83a65251 100644 --- a/server/text_generation_server/server.py +++ b/server/text_generation_server/server.py @@ -4,6 +4,7 @@ import asyncio import os import sys import torch +import time from grpc import aio from loguru import logger @@ -70,18 +71,23 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): return generate_pb2.WarmupResponse() async def Prefill(self, request, context): + start = time.time_ns() batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.dtype, self.model.device ) - generations, next_batch = self.model.generate_token([batch]) + generations, next_batch, timings = self.model.generate_token([batch]) self.cache.set(next_batch) return generate_pb2.PrefillResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, + forward_ns=timings[0], + decode_ns=timings[1], + total_ns=time.time_ns() - start, ) async def Decode(self, request, context): + start = time.time_ns() if len(request.batches) == 0: raise ValueError("Must provide at least one batch") @@ -95,12 +101,16 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): if len(batches) == 0: raise ValueError("All batches are empty") - generations, next_batch = self.model.generate_token(batches) + generations, next_batch, timings = self.model.generate_token(batches) self.cache.set(next_batch) return generate_pb2.DecodeResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, + concat_ns=None, # TODO: measure concat time + forward_ns=timings[0], + decode_ns=timings[1], + total_ns=time.time_ns() - start, ) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index 22bd6be3..a97d8a0b 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -89,7 +89,7 @@ class NextTokenChooser: class StopSequenceCriteria: def __init__(self, stop_sequence: str): stop_sequence = re.escape(stop_sequence) - self.regex = re.compile(f".*{stop_sequence}$") + self.regex = re.compile(f"{stop_sequence}$") def __call__(self, output: str) -> bool: if self.regex.findall(output): From 28fcdcca6d9383854423a7f1823c256661a2d7af Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:04:26 +0100 Subject: [PATCH 013/153] fix: fix triton OutOfResources import --- server/text_generation_server/utils/gptq/custom_autotune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/gptq/custom_autotune.py b/server/text_generation_server/utils/gptq/custom_autotune.py index 17dff02e..589d89ef 100644 --- a/server/text_generation_server/utils/gptq/custom_autotune.py +++ b/server/text_generation_server/utils/gptq/custom_autotune.py @@ -90,7 +90,7 @@ class Autotuner(triton.KernelInterface): return triton.testing.do_bench( kernel_call, percentiles=(0.5, 0.2, 0.8), rep=40 ) - except triton.compiler.OutOfResources: + except triton.OutOfResources: return (float("inf"), float("inf"), float("inf")) def run(self, *args, **kwargs): From b3c2d7291e48feafd161a2a1dc9e7af31caa0cd3 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:45:47 +0100 Subject: [PATCH 014/153] fix: fix quant linear autotune --- server/text_generation_server/utils/gptq/custom_autotune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/gptq/custom_autotune.py b/server/text_generation_server/utils/gptq/custom_autotune.py index 589d89ef..1eb40f1e 100644 --- a/server/text_generation_server/utils/gptq/custom_autotune.py +++ b/server/text_generation_server/utils/gptq/custom_autotune.py @@ -88,7 +88,7 @@ class Autotuner(triton.KernelInterface): # In testings using only 40 reps seems to be close enough and it appears to be what PyTorch uses # PyTorch also sets fast_flush to True, but I didn't see any speedup so I'll leave the default return triton.testing.do_bench( - kernel_call, percentiles=(0.5, 0.2, 0.8), rep=40 + kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40 ) except triton.OutOfResources: return (float("inf"), float("inf"), float("inf")) From 04dbf7a506adbc695815b70eee237c0daca48e6f Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Thu, 14 Dec 2023 17:01:43 +0100 Subject: [PATCH 015/153] fix: slice stopping criteria buffer --- server/text_generation_server/utils/tokens.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index a97d8a0b..cbcc153b 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -109,7 +109,7 @@ class StoppingCriteria: self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 - self.current_output = "" + self.current_output = "test" self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: @@ -121,6 +121,10 @@ class StoppingCriteria: return True, FinishReason.FINISH_REASON_EOS_TOKEN self.current_output += last_output + # There is no need to keep an output that is too long + if len(self.current_output) > 300: + # Slice to -200 to avoid doing it all the time + self.current_output = self.current_output[-200:] for stop_sequence_criteria in self.stop_sequence_criterias: if stop_sequence_criteria(self.current_output): return True, FinishReason.FINISH_REASON_STOP_SEQUENCE From 214ec0eb491162e098a54ebb994133b20b5251b3 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Thu, 14 Dec 2023 17:04:58 +0100 Subject: [PATCH 016/153] fix: only keep stop sequence buffer if we have some --- server/text_generation_server/utils/tokens.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index cbcc153b..08828835 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -109,7 +109,7 @@ class StoppingCriteria: self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 - self.current_output = "test" + self.current_output = "" self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: @@ -120,14 +120,15 @@ class StoppingCriteria: if not self.ignore_eos_token and last_token == self.eos_token_id: return True, FinishReason.FINISH_REASON_EOS_TOKEN - self.current_output += last_output - # There is no need to keep an output that is too long - if len(self.current_output) > 300: - # Slice to -200 to avoid doing it all the time - self.current_output = self.current_output[-200:] - for stop_sequence_criteria in self.stop_sequence_criterias: - if stop_sequence_criteria(self.current_output): - return True, FinishReason.FINISH_REASON_STOP_SEQUENCE + if self.stop_sequence_criterias: + self.current_output += last_output + # There is no need to keep an output that is too long + if len(self.current_output) > 300: + # Slice to -200 to avoid doing it all the time + self.current_output = self.current_output[-200:] + for stop_sequence_criteria in self.stop_sequence_criterias: + if stop_sequence_criteria(self.current_output): + return True, FinishReason.FINISH_REASON_STOP_SEQUENCE return False, None From bb6200503cb2b8fc7b48d333709b802f0dd1fb05 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 15 Dec 2023 01:18:39 +0100 Subject: [PATCH 017/153] fix: max_past default value must be -1, not 0 (#1348) --- .../models/custom_modeling/flash_mistral_modeling.py | 2 +- .../models/custom_modeling/flash_mixtral_modeling.py | 2 +- server/text_generation_server/utils/flash_attn.py | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index b97866f7..afeaf7e5 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -149,7 +149,7 @@ class MistralAttention(torch.nn.Module): ): super().__init__() self.max_past = ( - config.sliding_window if config.sliding_window is not None else 0 + config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index ff2ed9fd..35bb3735 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -204,7 +204,7 @@ class MixtralAttention(torch.nn.Module): ): super().__init__() self.max_past = ( - config.sliding_window if config.sliding_window is not None else 0 + config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index 3237df82..02f01e65 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -72,6 +72,9 @@ def attention( softmax_scale, window_size_left=-1, ): + if window_size_left <= 0 and window_size_left != -1: + raise ValueError("`window_size_left` must be > 0 or -1") + if HAS_FLASH_ATTN_V2_CUDA: return flash_attn_2_cuda.varlen_fwd( q, From 3600fc9dbe16444820d30d764165e8fa75410cae Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Fri, 15 Dec 2023 01:20:42 +0100 Subject: [PATCH 018/153] v1.3.3 --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- docs/openapi.json | 2 +- integration-tests/pyproject.toml | 2 +- server/pyproject.toml | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4baa1f8a..7565d7da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,9 +294,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecount" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "byteorder" @@ -2727,9 +2727,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" -version = "0.30.10" +version = "0.30.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b" +checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83" dependencies = [ "cfg-if", "core-foundation-sys", @@ -2798,7 +2798,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.3.2" +version = "1.3.3" dependencies = [ "average", "clap", @@ -2819,7 +2819,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.3.2" +version = "1.3.3" dependencies = [ "futures", "grpc-metadata", @@ -2836,7 +2836,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.3.2" +version = "1.3.3" dependencies = [ "clap", "ctrlc", @@ -2852,7 +2852,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.3.2" +version = "1.3.3" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 0ffa4254..a09a8ca7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.3.2" +version = "1.3.3" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 92faf1ed..62751928 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "1.3.2" + "version": "1.3.3" }, "paths": { "/": { diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index 2747473c..64723ae6 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.3.2" +version = "1.3.3" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/pyproject.toml b/server/pyproject.toml index a64f5ad8..a027ba2b 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.3.2" +version = "1.3.3" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] From a95e6d603dc2206dd1fab05788d1c5ce075d68d8 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 15 Dec 2023 12:52:24 +0100 Subject: [PATCH 019/153] feat: relax mistral requirements (#1351) Close #1253 Close #1279 --- .github/workflows/build.yaml | 78 +- server/poetry.lock | 2127 +++++++++-------- server/requirements_cuda.txt | 20 +- server/requirements_rocm.txt | 18 +- .../custom_modeling/flash_mistral_modeling.py | 9 - .../custom_modeling/flash_mixtral_modeling.py | 20 +- 6 files changed, 1176 insertions(+), 1096 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 395a0b6a..066ea889 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -146,11 +146,50 @@ jobs: cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache,mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache,mode=min + integration-tests: + concurrency: + group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + needs: + - start-runner + - build-and-push-image # Wait for the docker image to be built + runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner + env: + DOCKER_VOLUME: /cache + steps: + - uses: actions/checkout@v2 + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4.4.1 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Tailscale + uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 + with: + authkey: ${{ secrets.TAILSCALE_AUTHKEY }} + - name: Prepare disks + run: | + sudo mkfs -t ext4 /dev/nvme1n1 + sudo mkdir ${{ env.DOCKER_VOLUME }} + sudo mount /dev/nvme1n1 ${{ env.DOCKER_VOLUME }} + - name: Install + run: | + make install-integration-tests + - name: Run tests + run: | + export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} + export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} + pytest -s -vv integration-tests + build-and-push-image-rocm: concurrency: group: ${{ github.workflow }}-build-and-push-image-rocm-${{ github.head_ref || github.run_id }} cancel-in-progress: true - needs: start-runner # required to start the main job when the runner is ready + needs: + - start-runner + - build-and-push-image # Wait for the main docker image to be built + - integration-tests # Wait for the main integration-tests runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner permissions: contents: write @@ -235,43 +274,6 @@ jobs: cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-rocm,mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-rocm,mode=min - integration-tests: - concurrency: - group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - needs: - - start-runner - - build-and-push-image # Wait for the docker image to be built - - build-and-push-image-rocm - runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner - env: - DOCKER_VOLUME: /cache - steps: - - uses: actions/checkout@v2 - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4.4.1 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.9 - - name: Tailscale - uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - - name: Prepare disks - run: | - sudo mkfs -t ext4 /dev/nvme1n1 - sudo mkdir ${{ env.DOCKER_VOLUME }} - sudo mount /dev/nvme1n1 ${{ env.DOCKER_VOLUME }} - - name: Install - run: | - make install-integration-tests - - name: Run tests - run: | - export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} - export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} - pytest -s -vv integration-tests - stop-runner: name: Stop self-hosted EC2 runner needs: diff --git a/server/poetry.lock b/server/poetry.lock index 400c6e54..360eeb36 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "accelerate" @@ -32,87 +32,87 @@ testing = ["bitsandbytes", "datasets", "deepspeed (<0.13.0)", "evaluate", "param [[package]] name = "aiohttp" -version = "3.9.0" +version = "3.9.5" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.9.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6896b8416be9ada4d22cd359d7cb98955576ce863eadad5596b7cdfbf3e17c6c"}, - {file = "aiohttp-3.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1736d87dad8ef46a8ec9cddd349fa9f7bd3a064c47dd6469c0d6763d3d49a4fc"}, - {file = "aiohttp-3.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c9e5f4d7208cda1a2bb600e29069eecf857e6980d0ccc922ccf9d1372c16f4b"}, - {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8488519aa05e636c5997719fe543c8daf19f538f4fa044f3ce94bee608817cff"}, - {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ab16c254e2312efeb799bc3c06897f65a133b38b69682bf75d1f1ee1a9c43a9"}, - {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a94bde005a8f926d0fa38b88092a03dea4b4875a61fbcd9ac6f4351df1b57cd"}, - {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b777c9286b6c6a94f50ddb3a6e730deec327e9e2256cb08b5530db0f7d40fd8"}, - {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:571760ad7736b34d05597a1fd38cbc7d47f7b65deb722cb8e86fd827404d1f6b"}, - {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:deac0a32aec29608eb25d730f4bc5a261a65b6c48ded1ed861d2a1852577c932"}, - {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4ee1b4152bc3190cc40ddd6a14715e3004944263ea208229ab4c297712aa3075"}, - {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:3607375053df58ed6f23903aa10cf3112b1240e8c799d243bbad0f7be0666986"}, - {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:65b0a70a25456d329a5e1426702dde67be0fb7a4ead718005ba2ca582d023a94"}, - {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a2eb5311a37fe105aa35f62f75a078537e1a9e4e1d78c86ec9893a3c97d7a30"}, - {file = "aiohttp-3.9.0-cp310-cp310-win32.whl", hash = "sha256:2cbc14a13fb6b42d344e4f27746a4b03a2cb0c1c3c5b932b0d6ad8881aa390e3"}, - {file = "aiohttp-3.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ac9669990e2016d644ba8ae4758688534aabde8dbbc81f9af129c3f5f01ca9cd"}, - {file = "aiohttp-3.9.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f8e05f5163528962ce1d1806fce763ab893b1c5b7ace0a3538cd81a90622f844"}, - {file = "aiohttp-3.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4afa8f71dba3a5a2e1e1282a51cba7341ae76585345c43d8f0e624882b622218"}, - {file = "aiohttp-3.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f929f4c9b9a00f3e6cc0587abb95ab9c05681f8b14e0fe1daecfa83ea90f8318"}, - {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28185e36a78d247c55e9fbea2332d16aefa14c5276a582ce7a896231c6b1c208"}, - {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a486ddf57ab98b6d19ad36458b9f09e6022de0381674fe00228ca7b741aacb2f"}, - {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70e851f596c00f40a2f00a46126c95c2e04e146015af05a9da3e4867cfc55911"}, - {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5b7bf8fe4d39886adc34311a233a2e01bc10eb4e842220235ed1de57541a896"}, - {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c67a51ea415192c2e53e4e048c78bab82d21955b4281d297f517707dc836bf3d"}, - {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:694df243f394629bcae2d8ed94c589a181e8ba8604159e6e45e7b22e58291113"}, - {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3dd8119752dd30dd7bca7d4bc2a92a59be6a003e4e5c2cf7e248b89751b8f4b7"}, - {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:eb6dfd52063186ac97b4caa25764cdbcdb4b10d97f5c5f66b0fa95052e744eb7"}, - {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d97c3e286d0ac9af6223bc132dc4bad6540b37c8d6c0a15fe1e70fb34f9ec411"}, - {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:816f4db40555026e4cdda604a1088577c1fb957d02f3f1292e0221353403f192"}, - {file = "aiohttp-3.9.0-cp311-cp311-win32.whl", hash = "sha256:3abf0551874fecf95f93b58f25ef4fc9a250669a2257753f38f8f592db85ddea"}, - {file = "aiohttp-3.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:e18d92c3e9e22553a73e33784fcb0ed484c9874e9a3e96c16a8d6a1e74a0217b"}, - {file = "aiohttp-3.9.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:99ae01fb13a618b9942376df77a1f50c20a281390dad3c56a6ec2942e266220d"}, - {file = "aiohttp-3.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:05857848da443c8c12110d99285d499b4e84d59918a21132e45c3f0804876994"}, - {file = "aiohttp-3.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:317719d7f824eba55857fe0729363af58e27c066c731bc62cd97bc9c3d9c7ea4"}, - {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1e3b3c107ccb0e537f309f719994a55621acd2c8fdf6d5ce5152aed788fb940"}, - {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45820ddbb276113ead8d4907a7802adb77548087ff5465d5c554f9aa3928ae7d"}, - {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a183f1978802588711aed0dea31e697d760ce9055292db9dc1604daa9a8ded"}, - {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a4cd44788ea0b5e6bb8fa704597af3a30be75503a7ed1098bc5b8ffdf6c982"}, - {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673343fbc0c1ac44d0d2640addc56e97a052504beacd7ade0dc5e76d3a4c16e8"}, - {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e8a3b79b6d186a9c99761fd4a5e8dd575a48d96021f220ac5b5fa856e5dd029"}, - {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6777a390e41e78e7c45dab43a4a0196c55c3b8c30eebe017b152939372a83253"}, - {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7ae5f99a32c53731c93ac3075abd3e1e5cfbe72fc3eaac4c27c9dd64ba3b19fe"}, - {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:f1e4f254e9c35d8965d377e065c4a8a55d396fe87c8e7e8429bcfdeeb229bfb3"}, - {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11ca808f9a6b63485059f5f6e164ef7ec826483c1212a44f268b3653c91237d8"}, - {file = "aiohttp-3.9.0-cp312-cp312-win32.whl", hash = "sha256:de3cc86f4ea8b4c34a6e43a7306c40c1275e52bfa9748d869c6b7d54aa6dad80"}, - {file = "aiohttp-3.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca4fddf84ac7d8a7d0866664936f93318ff01ee33e32381a115b19fb5a4d1202"}, - {file = "aiohttp-3.9.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f09960b5bb1017d16c0f9e9f7fc42160a5a49fa1e87a175fd4a2b1a1833ea0af"}, - {file = "aiohttp-3.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8303531e2c17b1a494ffaeba48f2da655fe932c4e9a2626c8718403c83e5dd2b"}, - {file = "aiohttp-3.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4790e44f46a4aa07b64504089def5744d3b6780468c4ec3a1a36eb7f2cae9814"}, - {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1d7edf74a36de0e5ca50787e83a77cf352f5504eb0ffa3f07000a911ba353fb"}, - {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94697c7293199c2a2551e3e3e18438b4cba293e79c6bc2319f5fd652fccb7456"}, - {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a1b66dbb8a7d5f50e9e2ea3804b01e766308331d0cac76eb30c563ac89c95985"}, - {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9623cfd9e85b76b83ef88519d98326d4731f8d71869867e47a0b979ffec61c73"}, - {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f32c86dc967ab8c719fd229ce71917caad13cc1e8356ee997bf02c5b368799bf"}, - {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f50b4663c3e0262c3a361faf440761fbef60ccdde5fe8545689a4b3a3c149fb4"}, - {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dcf71c55ec853826cd70eadb2b6ac62ec577416442ca1e0a97ad875a1b3a0305"}, - {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:42fe4fd9f0dfcc7be4248c162d8056f1d51a04c60e53366b0098d1267c4c9da8"}, - {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76a86a9989ebf82ee61e06e2bab408aec4ea367dc6da35145c3352b60a112d11"}, - {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f9e09a1c83521d770d170b3801eea19b89f41ccaa61d53026ed111cb6f088887"}, - {file = "aiohttp-3.9.0-cp38-cp38-win32.whl", hash = "sha256:a00ce44c21612d185c5275c5cba4bab8d7c1590f248638b667ed8a782fa8cd6f"}, - {file = "aiohttp-3.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:d5b9345ab92ebe6003ae11d8092ce822a0242146e6fa270889b9ba965457ca40"}, - {file = "aiohttp-3.9.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98d21092bf2637c5fa724a428a69e8f5955f2182bff61f8036827cf6ce1157bf"}, - {file = "aiohttp-3.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:35a68cd63ca6aaef5707888f17a70c36efe62b099a4e853d33dc2e9872125be8"}, - {file = "aiohttp-3.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7f6235c7475658acfc1769d968e07ab585c79f6ca438ddfecaa9a08006aee2"}, - {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db04d1de548f7a62d1dd7e7cdf7c22893ee168e22701895067a28a8ed51b3735"}, - {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:536b01513d67d10baf6f71c72decdf492fb7433c5f2f133e9a9087379d4b6f31"}, - {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c8b0a6487e8109427ccf638580865b54e2e3db4a6e0e11c02639231b41fc0f"}, - {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7276fe0017664414fdc3618fca411630405f1aaf0cc3be69def650eb50441787"}, - {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23170247ef89ffa842a02bbfdc425028574d9e010611659abeb24d890bc53bb8"}, - {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b1a2ea8252cacc7fd51df5a56d7a2bb1986ed39be9397b51a08015727dfb69bd"}, - {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2d71abc15ff7047412ef26bf812dfc8d0d1020d664617f4913df2df469f26b76"}, - {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d820162c8c2bdbe97d328cd4f417c955ca370027dce593345e437b2e9ffdc4d"}, - {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:2779f5e7c70f7b421915fd47db332c81de365678180a9f3ab404088f87ba5ff9"}, - {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:366bc870d7ac61726f32a489fbe3d1d8876e87506870be66b01aeb84389e967e"}, - {file = "aiohttp-3.9.0-cp39-cp39-win32.whl", hash = "sha256:1df43596b826022b14998f0460926ce261544fedefe0d2f653e1b20f49e96454"}, - {file = "aiohttp-3.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:9c196b30f1b1aa3363a69dd69079ae9bec96c2965c4707eaa6914ba099fb7d4f"}, - {file = "aiohttp-3.9.0.tar.gz", hash = "sha256:09f23292d29135025e19e8ff4f0a68df078fe4ee013bca0105b2e803989de92d"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, ] [package.dependencies] @@ -153,21 +153,22 @@ files = [ [[package]] name = "attrs" -version = "23.1.0" +version = "23.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, - {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, ] [package.extras] cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]", "pre-commit"] +dev = ["attrs[tests]", "pre-commit"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] [[package]] name = "backoff" @@ -182,13 +183,13 @@ files = [ [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -334,25 +335,26 @@ cron = ["capturer (>=2.4)"] [[package]] name = "datasets" -version = "2.14.7" +version = "2.19.0" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" files = [ - {file = "datasets-2.14.7-py3-none-any.whl", hash = "sha256:1a64041a7da4f4130f736fc371c1f528b8ddd208cebe156400f65719bdbba79d"}, - {file = "datasets-2.14.7.tar.gz", hash = "sha256:394cf9b4ec0694b25945977b16ad5d18d5c15fb0e94141713eb8ead7452caf9e"}, + {file = "datasets-2.19.0-py3-none-any.whl", hash = "sha256:f57c5316e123d4721b970c68c1cb856505f289cda58f5557ffe745b49c011a8e"}, + {file = "datasets-2.19.0.tar.gz", hash = "sha256:0b47e08cc7af2c6800a42cadc4657b22a0afc7197786c8986d703c08d90886a6"}, ] [package.dependencies] aiohttp = "*" -dill = ">=0.3.0,<0.3.8" -fsspec = {version = ">=2023.1.0,<=2023.10.0", extras = ["http"]} -huggingface-hub = ">=0.14.0,<1.0.0" +dill = ">=0.3.0,<0.3.9" +filelock = "*" +fsspec = {version = ">=2023.1.0,<=2024.3.1", extras = ["http"]} +huggingface-hub = ">=0.21.2" multiprocess = "*" numpy = ">=1.17" packaging = "*" pandas = "*" -pyarrow = ">=8.0.0" +pyarrow = ">=12.0.0" pyarrow-hotfix = "*" pyyaml = ">=5.1" requests = ">=2.19.0" @@ -360,18 +362,18 @@ tqdm = ">=4.62.1" xxhash = "*" [package.extras] -apache-beam = ["apache-beam (>=2.26.0,<2.44.0)"] +apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] -docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] -jax = ["jax (>=0.2.8,!=0.3.2,<=0.3.25)", "jaxlib (>=0.1.65,<=0.3.25)"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"] +jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["black (>=23.1,<24.0)", "pyyaml (>=5.3.1)", "ruff (>=0.0.241)"] +quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] -tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] -tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] -tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] +tensorflow = ["tensorflow (>=2.6.0)"] +tensorflow-gpu = ["tensorflow (>=2.6.0)"] +tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] torch = ["torch"] vision = ["Pillow (>=6.2.1)"] @@ -424,27 +426,28 @@ training = ["Jinja2", "accelerate (>=0.11.0)", "datasets", "peft (>=0.6.0)", "pr [[package]] name = "dill" -version = "0.3.7" +version = "0.3.8" description = "serialize all of Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, ] [package.extras] graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -452,104 +455,119 @@ test = ["pytest (>=6)"] [[package]] name = "filelock" -version = "3.13.1" +version = "3.13.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, - {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, + {file = "filelock-3.13.4-py3-none-any.whl", hash = "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f"}, + {file = "filelock-3.13.4.tar.gz", hash = "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] name = "frozenlist" -version = "1.4.0" +version = "1.4.1" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, - {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, - {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, - {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, - {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, - {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, - {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, - {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, - {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, - {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, ] [[package]] name = "fsspec" -version = "2023.10.0" +version = "2024.3.1" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, - {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, + {file = "fsspec-2024.3.1-py3-none-any.whl", hash = "sha256:918d18d41bf73f0e2b261824baeb1b124bcf771767e3a26425cd7dec3332f512"}, + {file = "fsspec-2024.3.1.tar.gz", hash = "sha256:f39780e282d7d117ffb42bb96992f8a90795e4d0fb0f661a70ca39fe9c43ded9"}, ] [package.dependencies] aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} -requests = {version = "*", optional = true, markers = "extra == \"http\""} [package.extras] abfs = ["adlfs"] @@ -566,7 +584,7 @@ github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] @@ -577,13 +595,13 @@ tqdm = ["tqdm"] [[package]] name = "googleapis-common-protos" -version = "1.61.0" +version = "1.63.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis-common-protos-1.61.0.tar.gz", hash = "sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b"}, - {file = "googleapis_common_protos-1.61.0-py2.py3-none-any.whl", hash = "sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0"}, + {file = "googleapis-common-protos-1.63.0.tar.gz", hash = "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e"}, + {file = "googleapis_common_protos-1.63.0-py2.py3-none-any.whl", hash = "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"}, ] [package.dependencies] @@ -611,69 +629,69 @@ testing = ["protobuf (>=4.21.9)"] [[package]] name = "grpcio" -version = "1.59.3" +version = "1.62.2" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-1.59.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:aca028a6c7806e5b61e5f9f4232432c52856f7fcb98e330b20b6bc95d657bdcc"}, - {file = "grpcio-1.59.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:19ad26a7967f7999c8960d2b9fe382dae74c55b0c508c613a6c2ba21cddf2354"}, - {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:72b71dad2a3d1650e69ad42a5c4edbc59ee017f08c32c95694172bc501def23c"}, - {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f0a11d82d0253656cc42e04b6a149521e02e755fe2e4edd21123de610fd1d4"}, - {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60cddafb70f9a2c81ba251b53b4007e07cca7389e704f86266e22c4bffd8bf1d"}, - {file = "grpcio-1.59.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6c75a1fa0e677c1d2b6d4196ad395a5c381dfb8385f07ed034ef667cdcdbcc25"}, - {file = "grpcio-1.59.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1d8e01438d5964a11167eec1edb5f85ed8e475648f36c834ed5db4ffba24ac8"}, - {file = "grpcio-1.59.3-cp310-cp310-win32.whl", hash = "sha256:c4b0076f0bf29ee62335b055a9599f52000b7941f577daa001c7ef961a1fbeab"}, - {file = "grpcio-1.59.3-cp310-cp310-win_amd64.whl", hash = "sha256:b1f00a3e6e0c3dccccffb5579fc76ebfe4eb40405ba308505b41ef92f747746a"}, - {file = "grpcio-1.59.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:3996aaa21231451161dc29df6a43fcaa8b332042b6150482c119a678d007dd86"}, - {file = "grpcio-1.59.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:cb4e9cbd9b7388fcb06412da9f188c7803742d06d6f626304eb838d1707ec7e3"}, - {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8022ca303d6c694a0d7acfb2b472add920217618d3a99eb4b14edc7c6a7e8fcf"}, - {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b36683fad5664283755a7f4e2e804e243633634e93cd798a46247b8e54e3cb0d"}, - {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8239b853226e4824e769517e1b5232e7c4dda3815b200534500338960fcc6118"}, - {file = "grpcio-1.59.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0511af8653fbda489ff11d542a08505d56023e63cafbda60e6e00d4e0bae86ea"}, - {file = "grpcio-1.59.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e78dc982bda74cef2ddfce1c91d29b96864c4c680c634e279ed204d51e227473"}, - {file = "grpcio-1.59.3-cp311-cp311-win32.whl", hash = "sha256:6a5c3a96405966c023e139c3bcccb2c7c776a6f256ac6d70f8558c9041bdccc3"}, - {file = "grpcio-1.59.3-cp311-cp311-win_amd64.whl", hash = "sha256:ed26826ee423b11477297b187371cdf4fa1eca874eb1156422ef3c9a60590dd9"}, - {file = "grpcio-1.59.3-cp312-cp312-linux_armv7l.whl", hash = "sha256:45dddc5cb5227d30fa43652d8872dc87f086d81ab4b500be99413bad0ae198d7"}, - {file = "grpcio-1.59.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:1736496d74682e53dd0907fd515f2694d8e6a96c9a359b4080b2504bf2b2d91b"}, - {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:ddbd1a16138e52e66229047624de364f88a948a4d92ba20e4e25ad7d22eef025"}, - {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcfa56f8d031ffda902c258c84c4b88707f3a4be4827b4e3ab8ec7c24676320d"}, - {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2eb8f0c7c0c62f7a547ad7a91ba627a5aa32a5ae8d930783f7ee61680d7eb8d"}, - {file = "grpcio-1.59.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8d993399cc65e3a34f8fd48dd9ad7a376734564b822e0160dd18b3d00c1a33f9"}, - {file = "grpcio-1.59.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0bd141f4f41907eb90bda74d969c3cb21c1c62779419782a5b3f5e4b5835718"}, - {file = "grpcio-1.59.3-cp312-cp312-win32.whl", hash = "sha256:33b8fd65d4e97efa62baec6171ce51f9cf68f3a8ba9f866f4abc9d62b5c97b79"}, - {file = "grpcio-1.59.3-cp312-cp312-win_amd64.whl", hash = "sha256:0e735ed002f50d4f3cb9ecfe8ac82403f5d842d274c92d99db64cfc998515e07"}, - {file = "grpcio-1.59.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:ea40ce4404e7cca0724c91a7404da410f0144148fdd58402a5942971e3469b94"}, - {file = "grpcio-1.59.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83113bcc393477b6f7342b9f48e8a054330c895205517edc66789ceea0796b53"}, - {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:73afbac602b8f1212a50088193601f869b5073efa9855b3e51aaaec97848fc8a"}, - {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d61de1950b0b0699917b686b1ca108690702fcc2df127b8c9c9320f93e069"}, - {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd76057b5c9a4d68814610ef9226925f94c1231bbe533fdf96f6181f7d2ff9e"}, - {file = "grpcio-1.59.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:95d6fd804c81efe4879e38bfd84d2b26e339a0a9b797e7615e884ef4686eb47b"}, - {file = "grpcio-1.59.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0d42048b8a3286ea4134faddf1f9a59cf98192b94aaa10d910a25613c5eb5bfb"}, - {file = "grpcio-1.59.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4619fea15c64bcdd9d447cdbdde40e3d5f1da3a2e8ae84103d94a9c1df210d7e"}, - {file = "grpcio-1.59.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:95b5506e70284ac03b2005dd9ffcb6708c9ae660669376f0192a710687a22556"}, - {file = "grpcio-1.59.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:9e17660947660ccfce56c7869032910c179a5328a77b73b37305cd1ee9301c2e"}, - {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:00912ce19914d038851be5cd380d94a03f9d195643c28e3ad03d355cc02ce7e8"}, - {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e58b3cadaa3c90f1efca26ba33e0d408b35b497307027d3d707e4bcd8de862a6"}, - {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d787ecadea865bdf78f6679f6f5bf4b984f18f659257ba612979df97a298b3c3"}, - {file = "grpcio-1.59.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0814942ba1bba269db4e760a34388640c601dece525c6a01f3b4ff030cc0db69"}, - {file = "grpcio-1.59.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fb111aa99d3180c361a35b5ae1e2c63750220c584a1344229abc139d5c891881"}, - {file = "grpcio-1.59.3-cp38-cp38-win32.whl", hash = "sha256:eb8ba504c726befe40a356ecbe63c6c3c64c9a439b3164f5a718ec53c9874da0"}, - {file = "grpcio-1.59.3-cp38-cp38-win_amd64.whl", hash = "sha256:cdbc6b32fadab9bebc6f49d3e7ec4c70983c71e965497adab7f87de218e84391"}, - {file = "grpcio-1.59.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:c82ca1e4be24a98a253d6dbaa216542e4163f33f38163fc77964b0f0d255b552"}, - {file = "grpcio-1.59.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:36636babfda14f9e9687f28d5b66d349cf88c1301154dc71c6513de2b6c88c59"}, - {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5f9b2e591da751ac7fdd316cc25afafb7a626dededa9b414f90faad7f3ccebdb"}, - {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a93a82876a4926bf451db82ceb725bd87f42292bacc94586045261f501a86994"}, - {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce31fa0bfdd1f2bb15b657c16105c8652186eab304eb512e6ae3b99b2fdd7d13"}, - {file = "grpcio-1.59.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:16da0e40573962dab6cba16bec31f25a4f468e6d05b658e589090fe103b03e3d"}, - {file = "grpcio-1.59.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d1a17372fd425addd5812049fa7374008ffe689585f27f802d0935522cf4b7"}, - {file = "grpcio-1.59.3-cp39-cp39-win32.whl", hash = "sha256:52cc38a7241b5f7b4a91aaf9000fdd38e26bb00d5e8a71665ce40cfcee716281"}, - {file = "grpcio-1.59.3-cp39-cp39-win_amd64.whl", hash = "sha256:b491e5bbcad3020a96842040421e508780cade35baba30f402df9d321d1c423e"}, - {file = "grpcio-1.59.3.tar.gz", hash = "sha256:7800f99568a74a06ebdccd419dd1b6e639b477dcaf6da77ea702f8fb14ce5f80"}, + {file = "grpcio-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:66344ea741124c38588a664237ac2fa16dfd226964cca23ddc96bd4accccbde5"}, + {file = "grpcio-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:5dab7ac2c1e7cb6179c6bfad6b63174851102cbe0682294e6b1d6f0981ad7138"}, + {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:3ad00f3f0718894749d5a8bb0fa125a7980a2f49523731a9b1fabf2b3522aa43"}, + {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e72ddfee62430ea80133d2cbe788e0d06b12f865765cb24a40009668bd8ea05"}, + {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53d3a59a10af4c2558a8e563aed9f256259d2992ae0d3037817b2155f0341de1"}, + {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1511a303f8074f67af4119275b4f954189e8313541da7b88b1b3a71425cdb10"}, + {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b94d41b7412ef149743fbc3178e59d95228a7064c5ab4760ae82b562bdffb199"}, + {file = "grpcio-1.62.2-cp310-cp310-win32.whl", hash = "sha256:a75af2fc7cb1fe25785be7bed1ab18cef959a376cdae7c6870184307614caa3f"}, + {file = "grpcio-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:80407bc007754f108dc2061e37480238b0dc1952c855e86a4fc283501ee6bb5d"}, + {file = "grpcio-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:c1624aa686d4b36790ed1c2e2306cc3498778dffaf7b8dd47066cf819028c3ad"}, + {file = "grpcio-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:1c1bb80299bdef33309dff03932264636450c8fdb142ea39f47e06a7153d3063"}, + {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:db068bbc9b1fa16479a82e1ecf172a93874540cb84be69f0b9cb9b7ac3c82670"}, + {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2cc8a308780edbe2c4913d6a49dbdb5befacdf72d489a368566be44cadaef1a"}, + {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0695ae31a89f1a8fc8256050329a91a9995b549a88619263a594ca31b76d756"}, + {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88b4f9ee77191dcdd8810241e89340a12cbe050be3e0d5f2f091c15571cd3930"}, + {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a0204532aa2f1afd467024b02b4069246320405bc18abec7babab03e2644e75"}, + {file = "grpcio-1.62.2-cp311-cp311-win32.whl", hash = "sha256:6e784f60e575a0de554ef9251cbc2ceb8790914fe324f11e28450047f264ee6f"}, + {file = "grpcio-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:112eaa7865dd9e6d7c0556c8b04ae3c3a2dc35d62ad3373ab7f6a562d8199200"}, + {file = "grpcio-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:65034473fc09628a02fb85f26e73885cf1ed39ebd9cf270247b38689ff5942c5"}, + {file = "grpcio-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d2c1771d0ee3cf72d69bb5e82c6a82f27fbd504c8c782575eddb7839729fbaad"}, + {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:3abe6838196da518863b5d549938ce3159d809218936851b395b09cad9b5d64a"}, + {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5ffeb269f10cedb4f33142b89a061acda9f672fd1357331dbfd043422c94e9e"}, + {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404d3b4b6b142b99ba1cff0b2177d26b623101ea2ce51c25ef6e53d9d0d87bcc"}, + {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:262cda97efdabb20853d3b5a4c546a535347c14b64c017f628ca0cc7fa780cc6"}, + {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17708db5b11b966373e21519c4c73e5a750555f02fde82276ea2a267077c68ad"}, + {file = "grpcio-1.62.2-cp312-cp312-win32.whl", hash = "sha256:b7ec9e2f8ffc8436f6b642a10019fc513722858f295f7efc28de135d336ac189"}, + {file = "grpcio-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:aa787b83a3cd5e482e5c79be030e2b4a122ecc6c5c6c4c42a023a2b581fdf17b"}, + {file = "grpcio-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cfd23ad29bfa13fd4188433b0e250f84ec2c8ba66b14a9877e8bce05b524cf54"}, + {file = "grpcio-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:af15e9efa4d776dfcecd1d083f3ccfb04f876d613e90ef8432432efbeeac689d"}, + {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:f4aa94361bb5141a45ca9187464ae81a92a2a135ce2800b2203134f7a1a1d479"}, + {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82af3613a219512a28ee5c95578eb38d44dd03bca02fd918aa05603c41018051"}, + {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55ddaf53474e8caeb29eb03e3202f9d827ad3110475a21245f3c7712022882a9"}, + {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79b518c56dddeec79e5500a53d8a4db90da995dfe1738c3ac57fe46348be049"}, + {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a5eb4844e5e60bf2c446ef38c5b40d7752c6effdee882f716eb57ae87255d20a"}, + {file = "grpcio-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:aaae70364a2d1fb238afd6cc9fcb10442b66e397fd559d3f0968d28cc3ac929c"}, + {file = "grpcio-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:1bcfe5070e4406f489e39325b76caeadab28c32bf9252d3ae960c79935a4cc36"}, + {file = "grpcio-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:da6a7b6b938c15fa0f0568e482efaae9c3af31963eec2da4ff13a6d8ec2888e4"}, + {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:41955b641c34db7d84db8d306937b72bc4968eef1c401bea73081a8d6c3d8033"}, + {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c772f225483905f675cb36a025969eef9712f4698364ecd3a63093760deea1bc"}, + {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07ce1f775d37ca18c7a141300e5b71539690efa1f51fe17f812ca85b5e73262f"}, + {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26f415f40f4a93579fd648f48dca1c13dfacdfd0290f4a30f9b9aeb745026811"}, + {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:db707e3685ff16fc1eccad68527d072ac8bdd2e390f6daa97bc394ea7de4acea"}, + {file = "grpcio-1.62.2-cp38-cp38-win32.whl", hash = "sha256:589ea8e75de5fd6df387de53af6c9189c5231e212b9aa306b6b0d4f07520fbb9"}, + {file = "grpcio-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:3c3ed41f4d7a3aabf0f01ecc70d6b5d00ce1800d4af652a549de3f7cf35c4abd"}, + {file = "grpcio-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:162ccf61499c893831b8437120600290a99c0bc1ce7b51f2c8d21ec87ff6af8b"}, + {file = "grpcio-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:f27246d7da7d7e3bd8612f63785a7b0c39a244cf14b8dd9dd2f2fab939f2d7f1"}, + {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:2507006c8a478f19e99b6fe36a2464696b89d40d88f34e4b709abe57e1337467"}, + {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a90ac47a8ce934e2c8d71e317d2f9e7e6aaceb2d199de940ce2c2eb611b8c0f4"}, + {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99701979bcaaa7de8d5f60476487c5df8f27483624f1f7e300ff4669ee44d1f2"}, + {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:af7dc3f7a44f10863b1b0ecab4078f0a00f561aae1edbd01fd03ad4dcf61c9e9"}, + {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fa63245271920786f4cb44dcada4983a3516be8f470924528cf658731864c14b"}, + {file = "grpcio-1.62.2-cp39-cp39-win32.whl", hash = "sha256:c6ad9c39704256ed91a1cffc1379d63f7d0278d6a0bad06b0330f5d30291e3a3"}, + {file = "grpcio-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:16da954692fd61aa4941fbeda405a756cd96b97b5d95ca58a92547bba2c1624f"}, + {file = "grpcio-1.62.2.tar.gz", hash = "sha256:c77618071d96b7a8be2c10701a98537823b9c65ba256c0b9067e0594cdbd954d"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.59.3)"] +protobuf = ["grpcio-tools (>=1.62.2)"] [[package]] name = "grpcio-reflection" @@ -768,45 +786,85 @@ setuptools = "*" [[package]] name = "hf-transfer" -version = "0.1.4" +version = "0.1.6" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "hf_transfer-0.1.4-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:6ff5fbde30a5bed35ef8f0d4ba78bde9f6d60a233dbff78a0e4035d6e6f71e4c"}, - {file = "hf_transfer-0.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1c5c20f76e7f3451cff476b85c55dcb8566ebc94a596cb9eb39c0bb75db8675"}, - {file = "hf_transfer-0.1.4-cp310-none-win_amd64.whl", hash = "sha256:84c3ce20c68863a7d998711b98726ba9ae8f2e3fc0d685bc2c9ac9833c0f4048"}, - {file = "hf_transfer-0.1.4-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:dab1cf4e2e6fcb963fe0e48e6b5e3a95cf65ee376c7b6618a05dbb2ef0dde183"}, - {file = "hf_transfer-0.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c9c7aef90facf45391c86131ed00e74333637735cfec52da4f5170004d0b3f"}, - {file = "hf_transfer-0.1.4-cp311-none-win_amd64.whl", hash = "sha256:eca1fe6ae145e88455d0a174248080498cea52ad45cee50702070b47dffa421f"}, - {file = "hf_transfer-0.1.4-cp312-cp312-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:d07c0d26b5c01ad50d22ddcff7d30c4e8cbb823565b7f61e0ddb35f7faeae415"}, - {file = "hf_transfer-0.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b9cf169c3c64883b07f7ded5e3f14ae1d437eb77448738b88c923fc5597c47"}, - {file = "hf_transfer-0.1.4-cp312-none-win_amd64.whl", hash = "sha256:6b8518b9ebb85b0238745be81f7b88383c7ea216dd8407d46444bcc7806dc0ef"}, - {file = "hf_transfer-0.1.4-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ea32e9f91de3f2dad3567577c293f2e81a9309e680def4712ec0c4ea49be6833"}, - {file = "hf_transfer-0.1.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81a10dbf2ac534083da06c200456b5d10ba7a1e8c4c5c48f7ea1ca4cf6af474"}, - {file = "hf_transfer-0.1.4-cp37-none-win_amd64.whl", hash = "sha256:97555bbff69a0459712e5d25d659c0dc74cb8f9726562ca66241f1e1b081f6a9"}, - {file = "hf_transfer-0.1.4-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:38bce7a511952e1b804168e956cd3a3b1ff7e38828259c3cdae27614060b90c5"}, - {file = "hf_transfer-0.1.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1977e94e8c8fc8a0e9ce74a651d4694629e526da246a492855fcfb710aa489"}, - {file = "hf_transfer-0.1.4-cp38-none-win_amd64.whl", hash = "sha256:6ca2d2c40e5e94c5de7e502037ad23ac1d803a2a12760b15b3e3f88c616202bd"}, - {file = "hf_transfer-0.1.4-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:c04a93acb58e50b8da1e2258185e54f6bf48ba24bf95e470310178b7047c1017"}, - {file = "hf_transfer-0.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3028a807363e0b2c64985c44732ba4ab187a569f013367d2115a6e09ae95031"}, - {file = "hf_transfer-0.1.4-cp39-none-win_amd64.whl", hash = "sha256:dc9c7c1d0d79fc06baf86d41620623bb6bb2736755329ea6b1ec5faf71e3e36b"}, - {file = "hf_transfer-0.1.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a466ae2b11d72df9e0005eb8ff7f537d5460c98b64fb6e49f3076ee14040dcf"}, - {file = "hf_transfer-0.1.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb34a023276936d4716112e17daea4ff98afc35b6113dd0f0383710dc208c058"}, - {file = "hf_transfer-0.1.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0647b84d7ff0eee1de6479179a5d43d0695001733f17eecc00153f0f8ab1ac"}, - {file = "hf_transfer-0.1.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27d0bc1f8b79a6d65751efbce7eb02d2c1bd7e4de1a46aac18995461590ce4dd"}, - {file = "hf_transfer-0.1.4.tar.gz", hash = "sha256:687e090639cd52a48dedbfaa9e455a2c99c5169ece3d911f95983b1d4d4c84ed"}, + {file = "hf_transfer-0.1.6-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6fd3d61f9229d27def007e53540412507b74ac2fdb1a29985ae0b6a5137749a2"}, + {file = "hf_transfer-0.1.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b043bb78df1225de043eb041de9d97783fcca14a0bdc1b1d560fc172fc21b648"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7db60dd18eae4fa6ea157235fb82196cde5313995b396d1b591aad3b790a7f8f"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:30d31dbab9b5a558cce407b8728e39d87d7af1ef8745ddb90187e9ae0b9e1e90"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6b368bddd757efc7af3126ba81f9ac8f9435e2cc00902cb3d64f2be28d8f719"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa2086d8aefaaa3e144e167324574882004c0cec49bf2d0638ec4b74732d8da0"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45d8985a0940bfe1535cb4ca781f5c11e47c83798ef3373ee1f5d57bbe527a9c"}, + {file = "hf_transfer-0.1.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f42b89735f1cde22f2a795d1f0915741023235666be7de45879e533c7d6010c"}, + {file = "hf_transfer-0.1.6-cp310-none-win32.whl", hash = "sha256:2d2c4c4613f3ad45b6ce6291e347b2d3ba1b86816635681436567e461cb3c961"}, + {file = "hf_transfer-0.1.6-cp310-none-win_amd64.whl", hash = "sha256:78b0eed8d8dce60168a46e584b9742b816af127d7e410a713e12c31249195342"}, + {file = "hf_transfer-0.1.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f1d8c172153f9a6cdaecf137612c42796076f61f6bea1072c90ac2e17c1ab6fa"}, + {file = "hf_transfer-0.1.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2c601996351f90c514a75a0eeb02bf700b1ad1db2d946cbfe4b60b79e29f0b2f"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e585c808405557d3f5488f385706abb696997bbae262ea04520757e30836d9d"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec51af1e8cf4268c268bd88932ade3d7ca895a3c661b42493503f02610ae906b"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d106fdf996332f6df3ed3fab6d6332df82e8c1fb4b20fd81a491ca4d2ab5616a"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9c2ee9e9fde5a0319cc0e8ddfea10897482bc06d5709b10a238f1bc2ebcbc0b"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f394ea32bc7802b061e549d3133efc523b4ae4fd19bf4b74b183ca6066eef94e"}, + {file = "hf_transfer-0.1.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4282f09902114cd67fca98a1a1bad569a44521a8395fedf327e966714f68b977"}, + {file = "hf_transfer-0.1.6-cp311-none-win32.whl", hash = "sha256:276dbf307d5ab6f1bcbf57b5918bfcf9c59d6848ccb28242349e1bb5985f983b"}, + {file = "hf_transfer-0.1.6-cp311-none-win_amd64.whl", hash = "sha256:fa475175c51451186bea804471995fa8e7b2a48a61dcca55534911dc25955527"}, + {file = "hf_transfer-0.1.6-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:23d157a67acfa00007799323a1c441b2bbacc7dee625b016b7946fe0e25e6c89"}, + {file = "hf_transfer-0.1.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6067342a2864b988f861cd2d31bd78eb1e84d153a3f6df38485b6696d9ad3013"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91cfcb3070e205b58fa8dc8bcb6a62ccc40913fcdb9cd1ff7c364c8e3aa85345"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb76064ac5165d5eeaaf8d0903e8bf55477221ecc2a4a4d69f0baca065ab905b"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dabd3a177d83028f164984cf4dd859f77ec1e20c97a6f307ff8fcada0785ef1"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0bf4254e44f64a26e0a5b73b5d7e8d91bb36870718fb4f8e126ec943ff4c805"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d32c1b106f38f336ceb21531f4db9b57d777b9a33017dafdb6a5316388ebe50"}, + {file = "hf_transfer-0.1.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff05aba3c83921e5c7635ba9f07c693cc893350c447644824043aeac27b285f5"}, + {file = "hf_transfer-0.1.6-cp312-none-win32.whl", hash = "sha256:051ef0c55607652cb5974f59638da035773254b9a07d7ee5b574fe062de4c9d1"}, + {file = "hf_transfer-0.1.6-cp312-none-win_amd64.whl", hash = "sha256:716fb5c574fcbdd8092ce73f9b6c66f42e3544337490f77c60ec07df02bd081b"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0c981134a55965e279cb7be778c1ccaf93f902fc9ebe31da4f30caf824cc4d"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ef1f145f04c5b573915bcb1eb5db4039c74f6b46fce73fc473c4287e613b623"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0a7609b004db3347dbb7796df45403eceb171238210d054d93897d6d84c63a4"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60f0864bf5996773dbd5f8ae4d1649041f773fe9d5769f4c0eeb5553100acef3"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d01e55d630ffe70a4f5d0ed576a04c6a48d7c65ca9a7d18f2fca385f20685a9"}, + {file = "hf_transfer-0.1.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d855946c5062b665190de15b2bdbd4c8eddfee35350bfb7564592e23d36fbbd3"}, + {file = "hf_transfer-0.1.6-cp37-none-win32.whl", hash = "sha256:fd40b2409cfaf3e8aba20169ee09552f69140e029adeec261b988903ff0c8f6f"}, + {file = "hf_transfer-0.1.6-cp37-none-win_amd64.whl", hash = "sha256:0e0eba49d46d3b5481919aea0794aec625fbc6ecdf13fe7e0e9f3fc5d5ad5971"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e669fecb29fc454449739f9f53ed9253197e7c19e6a6eaa0f08334207af4287"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:89f701802892e5eb84f89f402686861f87dc227d6082b05f4e9d9b4e8015a3c3"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f2b0c8b95b01409275d789a9b74d5f2e146346f985d384bf50ec727caf1ccc"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa855a2fa262792a230f9efcdb5da6d431b747d1861d2a69fe7834b19aea077e"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa8ca349afb2f0713475426946261eb2035e4efb50ebd2c1d5ad04f395f4217"}, + {file = "hf_transfer-0.1.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01255f043996bc7d1bae62d8afc5033a90c7e36ce308b988eeb84afe0a69562f"}, + {file = "hf_transfer-0.1.6-cp38-none-win32.whl", hash = "sha256:60b1db183e8a7540cd4f8b2160ff4de55f77cb0c3fc6a10be1e7c30eb1b2bdeb"}, + {file = "hf_transfer-0.1.6-cp38-none-win_amd64.whl", hash = "sha256:fb8be3cba6aaa50ab2e9dffbd25c8eb2046785eeff642cf0cdd0dd9ae6be3539"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d09af35e3e3f09b664e6429e9a0dc200f29c5bdfd88bdd9666de51183b1fe202"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a4505bd707cc14d85c800f961fad8ca76f804a8ad22fbb7b1a217d8d0c15e6a5"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c453fd8b0be9740faa23cecd1f28ee9ead7d900cefa64ff836960c503a744c9"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13cb8884e718a78c3b81a8cdec9c7ac196dd42961fce55c3ccff3dd783e5ad7a"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39cd39df171a2b5404de69c4e6cd14eee47f6fe91c1692f939bfb9e59a0110d8"}, + {file = "hf_transfer-0.1.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ff0629ee9f98df57a783599602eb498f9ec3619dc69348b12e4d9d754abf0e9"}, + {file = "hf_transfer-0.1.6-cp39-none-win32.whl", hash = "sha256:164a6ce445eb0cc7c645f5b6e1042c003d33292520c90052b6325f30c98e4c5f"}, + {file = "hf_transfer-0.1.6-cp39-none-win_amd64.whl", hash = "sha256:11b8b4b73bf455f13218c5f827698a30ae10998ca31b8264b51052868c7a9f11"}, + {file = "hf_transfer-0.1.6-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16957ba057376a99ea361074ce1094f61b58e769defa6be2422ae59c0b6a6530"}, + {file = "hf_transfer-0.1.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db952112e3b8ee1a5cbf500d2443e9ce4fb893281c5310a3e31469898628005"}, + {file = "hf_transfer-0.1.6-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d39d826a7344f5e39f438d62632acd00467aa54a083b66496f61ef67a9885a56"}, + {file = "hf_transfer-0.1.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4e2653fbfa92e7651db73d99b697c8684e7345c479bd6857da80bed6138abb2"}, + {file = "hf_transfer-0.1.6-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:144277e6a86add10b90ec3b583253aec777130312256bfc8d5ade5377e253807"}, + {file = "hf_transfer-0.1.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bb53bcd16365313b2aa0dbdc28206f577d70770f31249cdabc387ac5841edcc"}, + {file = "hf_transfer-0.1.6-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:990d73a5a68d8261980f146c51f4c5f9995314011cb225222021ad7c39f3af2d"}, + {file = "hf_transfer-0.1.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:652406037029ab9b4097b4c5f29321bad5f64c2b46fbff142509d918aec87c29"}, + {file = "hf_transfer-0.1.6.tar.gz", hash = "sha256:deb505a7d417d7055fd7b3549eadb91dfe782941261f3344025c486c16d1d2f9"}, ] [[package]] name = "huggingface-hub" -version = "0.20.3" +version = "0.22.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.20.3-py3-none-any.whl", hash = "sha256:d988ae4f00d3e307b0c80c6a05ca6dbb7edba8bba3079f74cda7d9c2e562a7b6"}, - {file = "huggingface_hub-0.20.3.tar.gz", hash = "sha256:94e7f8e074475fbc67d6a71957b678e1b4a74ff1b64a644fd6cbb83da962d05d"}, + {file = "huggingface_hub-0.22.2-py3-none-any.whl", hash = "sha256:3429e25f38ccb834d310804a3b711e7e4953db5a9e420cc147a5e194ca90fd17"}, + {file = "huggingface_hub-0.22.2.tar.gz", hash = "sha256:32e9a9a6843c92f253ff9ca16b9985def4d80a93fb357af5353f770ef74a81be"}, ] [package.dependencies] @@ -819,15 +877,17 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp", "minijinja (>=1.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.3.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -846,33 +906,33 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve [[package]] name = "idna" -version = "3.4" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] name = "importlib-metadata" -version = "7.0.1" +version = "7.1.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, - {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, + {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, + {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "iniconfig" @@ -887,13 +947,13 @@ files = [ [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.3" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [package.dependencies] @@ -922,71 +982,71 @@ dev = ["Sphinx (>=4.1.1)", "black (>=19.10b0)", "colorama (>=0.3.4)", "docutils [[package]] name = "markupsafe" -version = "2.1.3" +version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] @@ -1008,114 +1068,126 @@ tests = ["pytest (>=4.6)"] [[package]] name = "multidict" -version = "6.0.4" +version = "6.0.5" description = "multidict implementation" optional = false python-versions = ">=3.7" files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, + {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, + {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, + {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, + {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, + {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, + {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, + {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, + {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, + {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, + {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, + {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, + {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, + {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, + {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, + {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, ] [[package]] name = "multiprocess" -version = "0.70.15" +version = "0.70.16" description = "better multiprocessing and multithreading in Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "multiprocess-0.70.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa36c7ed16f508091438687fe9baa393a7a8e206731d321e443745e743a0d4e5"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:20e024018c46d0d1602024c613007ac948f9754659e3853b0aa705e83f6931d8"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:e576062981c91f0fe8a463c3d52506e598dfc51320a8dd8d78b987dfca91c5db"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e73f497e6696a0f5433ada2b3d599ae733b87a6e8b008e387c62ac9127add177"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:73db2e7b32dcc7f9b0f075c2ffa45c90b6729d3f1805f27e88534c8d321a1be5"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:4271647bd8a49c28ecd6eb56a7fdbd3c212c45529ad5303b40b3c65fc6928e5f"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cf981fb998d6ec3208cb14f0cf2e9e80216e834f5d51fd09ebc937c32b960902"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:18f9f2c7063346d1617bd1684fdcae8d33380ae96b99427260f562e1a1228b67"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:0eac53214d664c49a34695e5824872db4006b1a465edd7459a251809c3773370"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1a51dd34096db47fb21fa2b839e615b051d51b97af9a67afbcdaa67186b44883"}, - {file = "multiprocess-0.70.15-py310-none-any.whl", hash = "sha256:7dd58e33235e83cf09d625e55cffd7b0f0eede7ee9223cdd666a87624f60c21a"}, - {file = "multiprocess-0.70.15-py311-none-any.whl", hash = "sha256:134f89053d82c9ed3b73edd3a2531eb791e602d4f4156fc92a79259590bd9670"}, - {file = "multiprocess-0.70.15-py37-none-any.whl", hash = "sha256:f7d4a1629bccb433114c3b4885f69eccc200994323c80f6feee73b0edc9199c5"}, - {file = "multiprocess-0.70.15-py38-none-any.whl", hash = "sha256:bee9afba476c91f9ebee7beeee0601face9eff67d822e893f9a893725fbd6316"}, - {file = "multiprocess-0.70.15-py39-none-any.whl", hash = "sha256:3e0953f5d52b4c76f1c973eaf8214554d146f2be5decb48e928e55c7a2d19338"}, - {file = "multiprocess-0.70.15.tar.gz", hash = "sha256:f20eed3036c0ef477b07a4177cf7c1ba520d9a2677870a4f47fe026f0cd6787e"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-macosx_10_13_x86_64.whl", hash = "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a"}, + {file = "multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02"}, + {file = "multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a"}, + {file = "multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e"}, + {file = "multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435"}, + {file = "multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3"}, + {file = "multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1"}, ] [package.dependencies] -dill = ">=0.3.7" +dill = ">=0.3.8" [[package]] name = "networkx" @@ -1137,47 +1209,47 @@ test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "numpy" -version = "1.26.2" +version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, - {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, - {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, - {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, - {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, - {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, - {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, - {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, - {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, - {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, - {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] [[package]] @@ -1291,23 +1363,23 @@ nvidia-nvjitlink-cu12 = "*" [[package]] name = "nvidia-nccl-cu12" -version = "2.18.1" +version = "2.19.3" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, ] [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.3.101" +version = "12.4.127" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, - {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, + {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57"}, + {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1"}, ] [[package]] @@ -1477,13 +1549,13 @@ files = [ [[package]] name = "optimum" -version = "1.17.1" +version = "1.19.0" description = "Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to integrate third-party libraries from Hardware Partners and interface with their specific functionality." optional = false python-versions = ">=3.7.0" files = [ - {file = "optimum-1.17.1-py3-none-any.whl", hash = "sha256:508bc55db3c9434f4e8d5a30c39a46ac63c4cdb45bcc5a641b6c1c77cae88d23"}, - {file = "optimum-1.17.1.tar.gz", hash = "sha256:e59af717e8691b11903fe2cfb8c6efd6f6798b0417f3e70d231e578a02448ceb"}, + {file = "optimum-1.19.0-py3-none-any.whl", hash = "sha256:b259b2379f6904d7e1bef3f9ab1c3f22ae6c42357b416311950cd65526bb485e"}, + {file = "optimum-1.19.0.tar.gz", hash = "sha256:a1eb134d70d3093eca68160fcf84c1aff8887726641f3362adca878fda095e3d"}, ] [package.dependencies] @@ -1494,30 +1566,30 @@ numpy = "*" packaging = "*" sympy = "*" torch = ">=1.11" -transformers = {version = ">=4.26.0", extras = ["sentencepiece"]} +transformers = {version = ">=4.26.0,<4.40.0", extras = ["sentencepiece"]} [package.extras] amd = ["optimum-amd"] benchmark = ["evaluate (>=0.2.0)", "optuna", "scikit-learn", "seqeval", "torchvision", "tqdm"] -dev = ["Pillow", "accelerate", "black (>=23.1,<24.0)", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest", "pytest-xdist", "requests", "rjieba", "ruff (==0.1.5)", "sacremoses", "scikit-learn", "timm", "torchaudio", "torchvision"] +dev = ["Pillow", "accelerate", "black (>=23.1,<24.0)", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest (<=8.0.0)", "pytest-xdist", "requests", "rjieba", "ruff (==0.1.5)", "sacremoses", "scikit-learn", "timm", "torchaudio", "torchvision"] diffusers = ["diffusers"] doc-build = ["accelerate"] exporters = ["onnx", "onnxruntime", "timm"] exporters-gpu = ["onnx", "onnxruntime-gpu", "timm"] -exporters-tf = ["h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm"] +exporters-tf = ["h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm", "transformers[sentencepiece] (>=4.26.0,<4.38.0)"] furiosa = ["optimum-furiosa"] graphcore = ["optimum-graphcore"] habana = ["optimum-habana", "transformers (>=4.37.0,<4.38.0)"] intel = ["optimum-intel (>=1.15.0)"] neural-compressor = ["optimum-intel[neural-compressor] (>=1.15.0)"] -neuron = ["optimum-neuron[neuron]"] -neuronx = ["optimum-neuron[neuronx]"] +neuron = ["optimum-neuron[neuron] (>=0.0.20)", "transformers (==4.36.2)"] +neuronx = ["optimum-neuron[neuronx] (>=0.0.20)", "transformers (==4.36.2)"] nncf = ["optimum-intel[nncf] (>=1.15.0)"] onnxruntime = ["datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime (>=1.11.0)", "protobuf (>=3.20.1)"] onnxruntime-gpu = ["accelerate", "datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime-gpu (>=1.11.0)", "protobuf (>=3.20.1)"] openvino = ["optimum-intel[openvino] (>=1.15.0)"] quality = ["black (>=23.1,<24.0)", "ruff (==0.1.5)"] -tests = ["Pillow", "accelerate", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest", "pytest-xdist", "requests", "rjieba", "sacremoses", "scikit-learn", "timm", "torchaudio", "torchvision"] +tests = ["Pillow", "accelerate", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest (<=8.0.0)", "pytest-xdist", "requests", "rjieba", "sacremoses", "scikit-learn", "timm", "torchaudio", "torchvision"] [[package]] name = "optimum-habana" @@ -1543,82 +1615,87 @@ tests = ["GitPython", "datasets", "optuna", "parameterized", "psutil", "pytest", [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] name = "pandas" -version = "2.1.3" +version = "2.2.2" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acf08a73b5022b479c1be155d4988b72f3020f308f7a87c527702c5f8966d34f"}, - {file = "pandas-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3cc4469ff0cf9aa3a005870cb49ab8969942b7156e0a46cc3f5abd6b11051dfb"}, - {file = "pandas-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35172bff95f598cc5866c047f43c7f4df2c893acd8e10e6653a4b792ed7f19bb"}, - {file = "pandas-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59dfe0e65a2f3988e940224e2a70932edc964df79f3356e5f2997c7d63e758b4"}, - {file = "pandas-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0296a66200dee556850d99b24c54c7dfa53a3264b1ca6f440e42bad424caea03"}, - {file = "pandas-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:465571472267a2d6e00657900afadbe6097c8e1dc43746917db4dfc862e8863e"}, - {file = "pandas-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04d4c58e1f112a74689da707be31cf689db086949c71828ef5da86727cfe3f82"}, - {file = "pandas-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fa2ad4ff196768ae63a33f8062e6838efed3a319cf938fdf8b95e956c813042"}, - {file = "pandas-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4441ac94a2a2613e3982e502ccec3bdedefe871e8cea54b8775992485c5660ef"}, - {file = "pandas-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ded6ff28abbf0ea7689f251754d3789e1edb0c4d0d91028f0b980598418a58"}, - {file = "pandas-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca5680368a5139d4920ae3dc993eb5106d49f814ff24018b64d8850a52c6ed2"}, - {file = "pandas-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:de21e12bf1511190fc1e9ebc067f14ca09fccfb189a813b38d63211d54832f5f"}, - {file = "pandas-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a5d53c725832e5f1645e7674989f4c106e4b7249c1d57549023ed5462d73b140"}, - {file = "pandas-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7cf4cf26042476e39394f1f86868d25b265ff787c9b2f0d367280f11afbdee6d"}, - {file = "pandas-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72c84ec1b1d8e5efcbff5312abe92bfb9d5b558f11e0cf077f5496c4f4a3c99e"}, - {file = "pandas-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f539e113739a3e0cc15176bf1231a553db0239bfa47a2c870283fd93ba4f683"}, - {file = "pandas-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc77309da3b55732059e484a1efc0897f6149183c522390772d3561f9bf96c00"}, - {file = "pandas-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:08637041279b8981a062899da0ef47828df52a1838204d2b3761fbd3e9fcb549"}, - {file = "pandas-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b99c4e51ef2ed98f69099c72c75ec904dd610eb41a32847c4fcbc1a975f2d2b8"}, - {file = "pandas-2.1.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7ea8ae8004de0381a2376662c0505bb0a4f679f4c61fbfd122aa3d1b0e5f09d"}, - {file = "pandas-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcd76d67ca2d48f56e2db45833cf9d58f548f97f61eecd3fdc74268417632b8a"}, - {file = "pandas-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1329dbe93a880a3d7893149979caa82d6ba64a25e471682637f846d9dbc10dd2"}, - {file = "pandas-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:321ecdb117bf0f16c339cc6d5c9a06063854f12d4d9bc422a84bb2ed3207380a"}, - {file = "pandas-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:11a771450f36cebf2a4c9dbd3a19dfa8c46c4b905a3ea09dc8e556626060fe71"}, - {file = "pandas-2.1.3.tar.gz", hash = "sha256:22929f84bca106921917eb73c1521317ddd0a4c71b395bcf767a106e3494209f"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, + {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, + {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, + {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, + {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, + {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, + {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, + {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, + {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, + {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, + {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, + {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, + {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, + {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, + {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"}, + {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, + {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, + {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, + {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"}, + {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"}, + {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, ] [package.dependencies] numpy = [ - {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" -tzdata = ">=2022.1" +tzdata = ">=2022.7" [package.extras] -all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] -aws = ["s3fs (>=2022.05.0)"] -clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] -compression = ["zstandard (>=0.17.0)"] -computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2022.05.0)"] -gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] -hdf5 = ["tables (>=3.7.0)"] -html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] -mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] -spss = ["pyreadstat (>=1.1.5)"] -sql-other = ["SQLAlchemy (>=1.4.36)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.8.0)"] +xml = ["lxml (>=4.9.2)"] [[package]] name = "peft" @@ -1649,80 +1726,99 @@ test = ["black (>=22.0,<23.0)", "datasets", "diffusers", "hf-doc-builder", "para [[package]] name = "pillow" -version = "10.1.0" +version = "10.3.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, - {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, - {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, - {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, - {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, - {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, - {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, - {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, ] [package.extras] docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -1762,27 +1858,27 @@ files = [ [[package]] name = "psutil" -version = "5.9.6" +version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, - {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, - {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, - {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, - {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, - {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, - {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, - {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, - {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, - {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, - {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, - {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, - {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, - {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, - {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, - {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, + {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, + {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, + {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, + {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, ] [package.extras] @@ -1790,51 +1886,51 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pyarrow" -version = "14.0.1" +version = "15.0.2" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-14.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:96d64e5ba7dceb519a955e5eeb5c9adcfd63f73a56aea4722e2cc81364fc567a"}, - {file = "pyarrow-14.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a8ae88c0038d1bc362a682320112ee6774f006134cd5afc291591ee4bc06505"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f6f053cb66dc24091f5511e5920e45c83107f954a21032feadc7b9e3a8e7851"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:906b0dc25f2be12e95975722f1e60e162437023f490dbd80d0deb7375baf3171"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:78d4a77a46a7de9388b653af1c4ce539350726cd9af62e0831e4f2bd0c95a2f4"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06ca79080ef89d6529bb8e5074d4b4f6086143b2520494fcb7cf8a99079cde93"}, - {file = "pyarrow-14.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:32542164d905002c42dff896efdac79b3bdd7291b1b74aa292fac8450d0e4dcd"}, - {file = "pyarrow-14.0.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:c7331b4ed3401b7ee56f22c980608cf273f0380f77d0f73dd3c185f78f5a6220"}, - {file = "pyarrow-14.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:922e8b49b88da8633d6cac0e1b5a690311b6758d6f5d7c2be71acb0f1e14cd61"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c889851ca33f992ea916b48b8540735055201b177cb0dcf0596a495a667b00"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30d8494870d9916bb53b2a4384948491444741cb9a38253c590e21f836b01222"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:be28e1a07f20391bb0b15ea03dcac3aade29fc773c5eb4bee2838e9b2cdde0cb"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:981670b4ce0110d8dcb3246410a4aabf5714db5d8ea63b15686bce1c914b1f83"}, - {file = "pyarrow-14.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:4756a2b373a28f6166c42711240643fb8bd6322467e9aacabd26b488fa41ec23"}, - {file = "pyarrow-14.0.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:cf87e2cec65dd5cf1aa4aba918d523ef56ef95597b545bbaad01e6433851aa10"}, - {file = "pyarrow-14.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:470ae0194fbfdfbf4a6b65b4f9e0f6e1fa0ea5b90c1ee6b65b38aecee53508c8"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6263cffd0c3721c1e348062997babdf0151301f7353010c9c9a8ed47448f82ab"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8089d7e77d1455d529dbd7cff08898bbb2666ee48bc4085203af1d826a33cc"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fada8396bc739d958d0b81d291cfd201126ed5e7913cb73de6bc606befc30226"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a145dab9ed7849fc1101bf03bcdc69913547f10513fdf70fc3ab6c0a50c7eee"}, - {file = "pyarrow-14.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:05fe7994745b634c5fb16ce5717e39a1ac1fac3e2b0795232841660aa76647cd"}, - {file = "pyarrow-14.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a8eeef015ae69d104c4c3117a6011e7e3ecd1abec79dc87fd2fac6e442f666ee"}, - {file = "pyarrow-14.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c76807540989fe8fcd02285dd15e4f2a3da0b09d27781abec3adc265ddbeba1"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:450e4605e3c20e558485f9161a79280a61c55efe585d51513c014de9ae8d393f"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:323cbe60210173ffd7db78bfd50b80bdd792c4c9daca8843ef3cd70b186649db"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0140c7e2b740e08c5a459439d87acd26b747fc408bde0a8806096ee0baaa0c15"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:e592e482edd9f1ab32f18cd6a716c45b2c0f2403dc2af782f4e9674952e6dd27"}, - {file = "pyarrow-14.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:d264ad13605b61959f2ae7c1d25b1a5b8505b112715c961418c8396433f213ad"}, - {file = "pyarrow-14.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:01e44de9749cddc486169cb632f3c99962318e9dacac7778315a110f4bf8a450"}, - {file = "pyarrow-14.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0351fecf0e26e152542bc164c22ea2a8e8c682726fce160ce4d459ea802d69c"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c1f6110c386464fd2e5e4ea3624466055bbe681ff185fd6c9daa98f30a3f9a"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11e045dfa09855b6d3e7705a37c42e2dc2c71d608fab34d3c23df2e02df9aec3"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:097828b55321897db0e1dbfc606e3ff8101ae5725673498cbfa7754ee0da80e4"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1daab52050a1c48506c029e6fa0944a7b2436334d7e44221c16f6f1b2cc9c510"}, - {file = "pyarrow-14.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:3f6d5faf4f1b0d5a7f97be987cf9e9f8cd39902611e818fe134588ee99bf0283"}, - {file = "pyarrow-14.0.1.tar.gz", hash = "sha256:b8b3f4fe8d4ec15e1ef9b599b94683c5216adaed78d5cb4c606180546d1e2ee1"}, + {file = "pyarrow-15.0.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:88b340f0a1d05b5ccc3d2d986279045655b1fe8e41aba6ca44ea28da0d1455d8"}, + {file = "pyarrow-15.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eaa8f96cecf32da508e6c7f69bb8401f03745c050c1dd42ec2596f2e98deecac"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23c6753ed4f6adb8461e7c383e418391b8d8453c5d67e17f416c3a5d5709afbd"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f639c059035011db8c0497e541a8a45d98a58dbe34dc8fadd0ef128f2cee46e5"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:290e36a59a0993e9a5224ed2fb3e53375770f07379a0ea03ee2fce2e6d30b423"}, + {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06c2bb2a98bc792f040bef31ad3e9be6a63d0cb39189227c08a7d955db96816e"}, + {file = "pyarrow-15.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a197f3670606a960ddc12adbe8075cea5f707ad7bf0dffa09637fdbb89f76c"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5f8bc839ea36b1f99984c78e06e7a06054693dc2af8920f6fb416b5bca9944e4"}, + {file = "pyarrow-15.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5e81dfb4e519baa6b4c80410421528c214427e77ca0ea9461eb4097c328fa33"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4f240852b302a7af4646c8bfe9950c4691a419847001178662a98915fd7ee7"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e7d9cfb5a1e648e172428c7a42b744610956f3b70f524aa3a6c02a448ba853e"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2d4f905209de70c0eb5b2de6763104d5a9a37430f137678edfb9a675bac9cd98"}, + {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90adb99e8ce5f36fbecbbc422e7dcbcbed07d985eed6062e459e23f9e71fd197"}, + {file = "pyarrow-15.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:b116e7fd7889294cbd24eb90cd9bdd3850be3738d61297855a71ac3b8124ee38"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:25335e6f1f07fdaa026a61c758ee7d19ce824a866b27bba744348fa73bb5a440"}, + {file = "pyarrow-15.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90f19e976d9c3d8e73c80be84ddbe2f830b6304e4c576349d9360e335cd627fc"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22366249bf5fd40ddacc4f03cd3160f2d7c247692945afb1899bab8a140ddfb"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2a335198f886b07e4b5ea16d08ee06557e07db54a8400cc0d03c7f6a22f785f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e6d459c0c22f0b9c810a3917a1de3ee704b021a5fb8b3bacf968eece6df098f"}, + {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:033b7cad32198754d93465dcfb71d0ba7cb7cd5c9afd7052cab7214676eec38b"}, + {file = "pyarrow-15.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:29850d050379d6e8b5a693098f4de7fd6a2bea4365bfd073d7c57c57b95041ee"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7167107d7fb6dcadb375b4b691b7e316f4368f39f6f45405a05535d7ad5e5058"}, + {file = "pyarrow-15.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e85241b44cc3d365ef950432a1b3bd44ac54626f37b2e3a0cc89c20e45dfd8bf"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:248723e4ed3255fcd73edcecc209744d58a9ca852e4cf3d2577811b6d4b59818"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ff3bdfe6f1b81ca5b73b70a8d482d37a766433823e0c21e22d1d7dde76ca33f"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f3d77463dee7e9f284ef42d341689b459a63ff2e75cee2b9302058d0d98fe142"}, + {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:8c1faf2482fb89766e79745670cbca04e7018497d85be9242d5350cba21357e1"}, + {file = "pyarrow-15.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:28f3016958a8e45a1069303a4a4f6a7d4910643fc08adb1e2e4a7ff056272ad3"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:89722cb64286ab3d4daf168386f6968c126057b8c7ec3ef96302e81d8cdb8ae4"}, + {file = "pyarrow-15.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0ba387705044b3ac77b1b317165c0498299b08261d8122c96051024f953cd5"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad2459bf1f22b6a5cdcc27ebfd99307d5526b62d217b984b9f5c974651398832"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58922e4bfece8b02abf7159f1f53a8f4d9f8e08f2d988109126c17c3bb261f22"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:adccc81d3dc0478ea0b498807b39a8d41628fa9210729b2f718b78cb997c7c91"}, + {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8bd2baa5fe531571847983f36a30ddbf65261ef23e496862ece83bdceb70420d"}, + {file = "pyarrow-15.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6669799a1d4ca9da9c7e06ef48368320f5856f36f9a4dd31a11839dda3f6cc8c"}, + {file = "pyarrow-15.0.2.tar.gz", hash = "sha256:9c9bc803cb3b7bfacc1e96ffbfd923601065d9d3f911179d81e72d99fd74a3d9"}, ] [package.dependencies] -numpy = ">=1.16.6" +numpy = ">=1.16.6,<2" [[package]] name = "pyarrow-hotfix" @@ -1860,13 +1956,13 @@ files = [ [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -1882,13 +1978,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -1896,13 +1992,13 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2023.3.post1" +version = "2024.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, - {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, ] [[package]] @@ -1967,99 +2063,104 @@ files = [ [[package]] name = "regex" -version = "2023.10.3" +version = "2024.4.16" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.7" files = [ - {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"}, - {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"}, - {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"}, - {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"}, - {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"}, - {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"}, - {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"}, - {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"}, - {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"}, - {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"}, - {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"}, - {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"}, - {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"}, - {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"}, - {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"}, - {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"}, - {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"}, - {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"}, - {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"}, - {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"}, - {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"}, - {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"}, - {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"}, - {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"}, - {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"}, - {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"}, - {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"}, - {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"}, - {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"}, - {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"}, - {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"}, - {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"}, - {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"}, - {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"}, - {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"}, - {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"}, - {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"}, - {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"}, - {file = "regex-2023.10.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf"}, - {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991"}, - {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302"}, - {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971"}, - {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11"}, - {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597"}, - {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb"}, - {file = "regex-2023.10.3-cp37-cp37m-win32.whl", hash = "sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a"}, - {file = "regex-2023.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed"}, - {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"}, - {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"}, - {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"}, - {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"}, - {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"}, - {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"}, - {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"}, - {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"}, - {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"}, - {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"}, - {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"}, - {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"}, - {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"}, - {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"}, - {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"}, - {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"}, - {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"}, - {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"}, - {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"}, - {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"}, - {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb83cc090eac63c006871fd24db5e30a1f282faa46328572661c0a24a2323a08"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c91e1763696c0eb66340c4df98623c2d4e77d0746b8f8f2bee2c6883fd1fe18"}, + {file = "regex-2024.4.16-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:10188fe732dec829c7acca7422cdd1bf57d853c7199d5a9e96bb4d40db239c73"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:956b58d692f235cfbf5b4f3abd6d99bf102f161ccfe20d2fd0904f51c72c4c66"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a70b51f55fd954d1f194271695821dd62054d949efd6368d8be64edd37f55c86"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c02fcd2bf45162280613d2e4a1ca3ac558ff921ae4e308ecb307650d3a6ee51"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ed75ea6892a56896d78f11006161eea52c45a14994794bcfa1654430984b22"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd727ad276bb91928879f3aa6396c9a1d34e5e180dce40578421a691eeb77f47"}, + {file = "regex-2024.4.16-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7cbc5d9e8a1781e7be17da67b92580d6ce4dcef5819c1b1b89f49d9678cc278c"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:78fddb22b9ef810b63ef341c9fcf6455232d97cfe03938cbc29e2672c436670e"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:445ca8d3c5a01309633a0c9db57150312a181146315693273e35d936472df912"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:95399831a206211d6bc40224af1c635cb8790ddd5c7493e0bd03b85711076a53"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7731728b6568fc286d86745f27f07266de49603a6fdc4d19c87e8c247be452af"}, + {file = "regex-2024.4.16-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4facc913e10bdba42ec0aee76d029aedda628161a7ce4116b16680a0413f658a"}, + {file = "regex-2024.4.16-cp310-cp310-win32.whl", hash = "sha256:911742856ce98d879acbea33fcc03c1d8dc1106234c5e7d068932c945db209c0"}, + {file = "regex-2024.4.16-cp310-cp310-win_amd64.whl", hash = "sha256:e0a2df336d1135a0b3a67f3bbf78a75f69562c1199ed9935372b82215cddd6e2"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1210365faba7c2150451eb78ec5687871c796b0f1fa701bfd2a4a25420482d26"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9ab40412f8cd6f615bfedea40c8bf0407d41bf83b96f6fc9ff34976d6b7037fd"}, + {file = "regex-2024.4.16-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fd80d1280d473500d8086d104962a82d77bfbf2b118053824b7be28cd5a79ea5"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bb966fdd9217e53abf824f437a5a2d643a38d4fd5fd0ca711b9da683d452969"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:20b7a68444f536365af42a75ccecb7ab41a896a04acf58432db9e206f4e525d6"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b74586dd0b039c62416034f811d7ee62810174bb70dffcca6439f5236249eb09"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c8290b44d8b0af4e77048646c10c6e3aa583c1ca67f3b5ffb6e06cf0c6f0f89"}, + {file = "regex-2024.4.16-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2d80a6749724b37853ece57988b39c4e79d2b5fe2869a86e8aeae3bbeef9eb0"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3a1018e97aeb24e4f939afcd88211ace472ba566efc5bdf53fd8fd7f41fa7170"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8d015604ee6204e76569d2f44e5a210728fa917115bef0d102f4107e622b08d5"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:3d5ac5234fb5053850d79dd8eb1015cb0d7d9ed951fa37aa9e6249a19aa4f336"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0a38d151e2cdd66d16dab550c22f9521ba79761423b87c01dae0a6e9add79c0d"}, + {file = "regex-2024.4.16-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:159dc4e59a159cb8e4e8f8961eb1fa5d58f93cb1acd1701d8aff38d45e1a84a6"}, + {file = "regex-2024.4.16-cp311-cp311-win32.whl", hash = "sha256:ba2336d6548dee3117520545cfe44dc28a250aa091f8281d28804aa8d707d93d"}, + {file = "regex-2024.4.16-cp311-cp311-win_amd64.whl", hash = "sha256:8f83b6fd3dc3ba94d2b22717f9c8b8512354fd95221ac661784df2769ea9bba9"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80b696e8972b81edf0af2a259e1b2a4a661f818fae22e5fa4fa1a995fb4a40fd"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d61ae114d2a2311f61d90c2ef1358518e8f05eafda76eaf9c772a077e0b465ec"}, + {file = "regex-2024.4.16-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ba6745440b9a27336443b0c285d705ce73adb9ec90e2f2004c64d95ab5a7598"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295004b2dd37b0835ea5c14a33e00e8cfa3c4add4d587b77287825f3418d310"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aba818dcc7263852aabb172ec27b71d2abca02a593b95fa79351b2774eb1d2b"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0800631e565c47520aaa04ae38b96abc5196fe8b4aa9bd864445bd2b5848a7a"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08dea89f859c3df48a440dbdcd7b7155bc675f2fa2ec8c521d02dc69e877db70"}, + {file = "regex-2024.4.16-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eeaa0b5328b785abc344acc6241cffde50dc394a0644a968add75fcefe15b9d4"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4e819a806420bc010489f4e741b3036071aba209f2e0989d4750b08b12a9343f"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:c2d0e7cbb6341e830adcbfa2479fdeebbfbb328f11edd6b5675674e7a1e37730"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:91797b98f5e34b6a49f54be33f72e2fb658018ae532be2f79f7c63b4ae225145"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:d2da13568eff02b30fd54fccd1e042a70fe920d816616fda4bf54ec705668d81"}, + {file = "regex-2024.4.16-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:370c68dc5570b394cbaadff50e64d705f64debed30573e5c313c360689b6aadc"}, + {file = "regex-2024.4.16-cp312-cp312-win32.whl", hash = "sha256:904c883cf10a975b02ab3478bce652f0f5346a2c28d0a8521d97bb23c323cc8b"}, + {file = "regex-2024.4.16-cp312-cp312-win_amd64.whl", hash = "sha256:785c071c982dce54d44ea0b79cd6dfafddeccdd98cfa5f7b86ef69b381b457d9"}, + {file = "regex-2024.4.16-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2f142b45c6fed48166faeb4303b4b58c9fcd827da63f4cf0a123c3480ae11fb"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e87ab229332ceb127a165612d839ab87795972102cb9830e5f12b8c9a5c1b508"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81500ed5af2090b4a9157a59dbc89873a25c33db1bb9a8cf123837dcc9765047"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b340cccad138ecb363324aa26893963dcabb02bb25e440ebdf42e30963f1a4e0"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c72608e70f053643437bd2be0608f7f1c46d4022e4104d76826f0839199347a"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a01fe2305e6232ef3e8f40bfc0f0f3a04def9aab514910fa4203bafbc0bb4682"}, + {file = "regex-2024.4.16-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:03576e3a423d19dda13e55598f0fd507b5d660d42c51b02df4e0d97824fdcae3"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:549c3584993772e25f02d0656ac48abdda73169fe347263948cf2b1cead622f3"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:34422d5a69a60b7e9a07a690094e824b66f5ddc662a5fc600d65b7c174a05f04"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5f580c651a72b75c39e311343fe6875d6f58cf51c471a97f15a938d9fe4e0d37"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3399dd8a7495bbb2bacd59b84840eef9057826c664472e86c91d675d007137f5"}, + {file = "regex-2024.4.16-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d1f86f3f4e2388aa3310b50694ac44daefbd1681def26b4519bd050a398dc5a"}, + {file = "regex-2024.4.16-cp37-cp37m-win32.whl", hash = "sha256:dd5acc0a7d38fdc7a3a6fd3ad14c880819008ecb3379626e56b163165162cc46"}, + {file = "regex-2024.4.16-cp37-cp37m-win_amd64.whl", hash = "sha256:ba8122e3bb94ecda29a8de4cf889f600171424ea586847aa92c334772d200331"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:743deffdf3b3481da32e8a96887e2aa945ec6685af1cfe2bcc292638c9ba2f48"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7571f19f4a3fd00af9341c7801d1ad1967fc9c3f5e62402683047e7166b9f2b4"}, + {file = "regex-2024.4.16-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:df79012ebf6f4efb8d307b1328226aef24ca446b3ff8d0e30202d7ebcb977a8c"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e757d475953269fbf4b441207bb7dbdd1c43180711b6208e129b637792ac0b93"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4313ab9bf6a81206c8ac28fdfcddc0435299dc88cad12cc6305fd0e78b81f9e4"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d83c2bc678453646f1a18f8db1e927a2d3f4935031b9ad8a76e56760461105dd"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df1bfef97db938469ef0a7354b2d591a2d438bc497b2c489471bec0e6baf7c4"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62120ed0de69b3649cc68e2965376048793f466c5a6c4370fb27c16c1beac22d"}, + {file = "regex-2024.4.16-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c2ef6f7990b6e8758fe48ad08f7e2f66c8f11dc66e24093304b87cae9037bb4a"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8fc6976a3395fe4d1fbeb984adaa8ec652a1e12f36b56ec8c236e5117b585427"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:03e68f44340528111067cecf12721c3df4811c67268b897fbe695c95f860ac42"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ec7e0043b91115f427998febaa2beb82c82df708168b35ece3accb610b91fac1"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c21fc21a4c7480479d12fd8e679b699f744f76bb05f53a1d14182b31f55aac76"}, + {file = "regex-2024.4.16-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12f6a3f2f58bb7344751919a1876ee1b976fe08b9ffccb4bbea66f26af6017b9"}, + {file = "regex-2024.4.16-cp38-cp38-win32.whl", hash = "sha256:479595a4fbe9ed8f8f72c59717e8cf222da2e4c07b6ae5b65411e6302af9708e"}, + {file = "regex-2024.4.16-cp38-cp38-win_amd64.whl", hash = "sha256:0534b034fba6101611968fae8e856c1698da97ce2efb5c2b895fc8b9e23a5834"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a7ccdd1c4a3472a7533b0a7aa9ee34c9a2bef859ba86deec07aff2ad7e0c3b94"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f2f017c5be19984fbbf55f8af6caba25e62c71293213f044da3ada7091a4455"}, + {file = "regex-2024.4.16-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:803b8905b52de78b173d3c1e83df0efb929621e7b7c5766c0843704d5332682f"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:684008ec44ad275832a5a152f6e764bbe1914bea10968017b6feaecdad5736e0"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65436dce9fdc0aeeb0a0effe0839cb3d6a05f45aa45a4d9f9c60989beca78b9c"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea355eb43b11764cf799dda62c658c4d2fdb16af41f59bb1ccfec517b60bcb07"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98c1165f3809ce7774f05cb74e5408cd3aa93ee8573ae959a97a53db3ca3180d"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cccc79a9be9b64c881f18305a7c715ba199e471a3973faeb7ba84172abb3f317"}, + {file = "regex-2024.4.16-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00169caa125f35d1bca6045d65a662af0202704489fada95346cfa092ec23f39"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6cc38067209354e16c5609b66285af17a2863a47585bcf75285cab33d4c3b8df"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:23cff1b267038501b179ccbbd74a821ac4a7192a1852d1d558e562b507d46013"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:b9d320b3bf82a39f248769fc7f188e00f93526cc0fe739cfa197868633d44701"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:89ec7f2c08937421bbbb8b48c54096fa4f88347946d4747021ad85f1b3021b3c"}, + {file = "regex-2024.4.16-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4918fd5f8b43aa7ec031e0fef1ee02deb80b6afd49c85f0790be1dc4ce34cb50"}, + {file = "regex-2024.4.16-cp39-cp39-win32.whl", hash = "sha256:684e52023aec43bdf0250e843e1fdd6febbe831bd9d52da72333fa201aaa2335"}, + {file = "regex-2024.4.16-cp39-cp39-win_amd64.whl", hash = "sha256:e697e1c0238133589e00c244a8b676bc2cfc3ab4961318d902040d099fec7483"}, + {file = "regex-2024.4.16.tar.gz", hash = "sha256:fa454d26f2e87ad661c4f0c5a5fe4cf6aab1e307d1b94f16ffdfcb089ba685c0"}, ] [[package]] @@ -2085,121 +2186,111 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "safetensors" -version = "0.4.2" +version = "0.4.3" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "safetensors-0.4.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:69d8bb8384dc2cb5b72c36c4d6980771b293d1a1377b378763f5e37b6bb8d133"}, - {file = "safetensors-0.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3d420e19fcef96d0067f4de4699682b4bbd85fc8fea0bd45fcd961fdf3e8c82c"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ca54742122fa3c4821754adb67318e1cd25c3a22bbf0c5520d5176e77a099ac"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b47aa643afdfd66cf7ce4c184092ae734e15d10aba2c2948f24270211801c3c"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d88a16bbc330f27e7f2d4caaf6fb061ad0b8a756ecc4033260b0378e128ce8a2"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9223b8ac21085db614a510eb3445e7083cae915a9202357555fa939695d4f57"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce6cb86133dc8930a7ab5e7438545a7f205f7a1cdd5aaf108c1d0da6bdcfbc2b"}, - {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8a628e0ae2bbc334b62952c384aa5f41621d01850f8d67b04a96b9c39dd7326"}, - {file = "safetensors-0.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:88d6beb7f811a081e0e5f1d9669fdac816c45340c04b1eaf7ebfda0ce93ea403"}, - {file = "safetensors-0.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b57fc5b1b54cb12d8690a58a4cf4b7144730d4bde9d98aa0e1dab6295a1cd579"}, - {file = "safetensors-0.4.2-cp310-none-win32.whl", hash = "sha256:9d87a1c98803c16cf113b9ba03f07b2dce5e8eabfd1811a7f7323fcaa2a1bf47"}, - {file = "safetensors-0.4.2-cp310-none-win_amd64.whl", hash = "sha256:18930ec1d1ecb526d3d9835abc2489b8f1530877518f0c541e77ef0b7abcbd99"}, - {file = "safetensors-0.4.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c5dd2ed788730ed56b415d1a11c62026b8cc8c573f55a2092afb3ab383e94fff"}, - {file = "safetensors-0.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc41791b33efb9c83a59b731619f3d15f543dfe71f3a793cb8fbf9bd5d0d5d71"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c888bf71d5ca12a720f1ed87d407c4918afa022fb247a6546d8fac15b1f112b"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e6b2feb4b47226a16a792e6fac3f49442714884a3d4c1008569d5068a3941be9"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f41cc0ee4b838ae8f4d8364a1b162067693d11a3893f0863be8c228d40e4d0ee"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:51b7228e46c0a483c40ba4b9470dea00fb1ff8685026bb4766799000f6328ac2"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02697f8f2be8ca3c37a4958702dbdb1864447ef765e18b5328a1617022dcf164"}, - {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27fd8f65cf7c80e4280cae1ee6bcd85c483882f6580821abe71ee1a0d3dcfca7"}, - {file = "safetensors-0.4.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c487b5f113b0924c9534a07dc034830fb4ef05ce9bb6d78cfe016a7dedfe281f"}, - {file = "safetensors-0.4.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:da7f6483f3fe67ff39b3a55552552c67930ea10a36e9f2539d36fc205273d767"}, - {file = "safetensors-0.4.2-cp311-none-win32.whl", hash = "sha256:52a7012f6cb9cb4a132760b6308daede18a9f5f8952ce08adc7c67a7d865c2d8"}, - {file = "safetensors-0.4.2-cp311-none-win_amd64.whl", hash = "sha256:4d1361a097ac430b310ce9eed8ed4746edee33ddafdfbb965debc8966fc34dc2"}, - {file = "safetensors-0.4.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:77af8aa0edcc2863760fd6febbfdb82e88fd75d0e60c1ce4ba57208ba5e4a89b"}, - {file = "safetensors-0.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846666c1c5a8c8888d2dfda8d3921cb9cb8e2c5f78365be756c11021e75a0a2a"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f4bfc7ea19b446bfad41510d4b4c76101698c00caaa8a332c8edd8090a412ef"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:233436fd30f27ffeb3c3780d0b84f496518868445c7a8db003639a649cc98453"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a09237a795d11cd11f9dae505d170a29b5616151db1e10c14f892b11caadc7d"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de01c9a3a3b7b69627d624ff69d9f11d28ce9908eea2fb6245adafa4b1d43df6"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c1f25c5069ee42a5bcffdc66c300a407941edd73f3239e9fdefd26216407391"}, - {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7a73b3649456d09ca8506140d44484b63154a7378434cc1e8719f8056550b224"}, - {file = "safetensors-0.4.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e1625a8d07d046e968bd5c4961810aba1225984e4fb9243626f9d04a06ed3fee"}, - {file = "safetensors-0.4.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f74c86b25615cb24ad4cff765a2eefc09d71bf0fed97588cf585aad9c38fbb4"}, - {file = "safetensors-0.4.2-cp312-none-win32.whl", hash = "sha256:8523b9c5777d771bcde5c2389c03f1cdf7ebe8797432a1bd5e345efe25c55987"}, - {file = "safetensors-0.4.2-cp312-none-win_amd64.whl", hash = "sha256:dcff0243e1737a21f83d664c63fed89d1f532c23fc6830d0427279fabd789ccb"}, - {file = "safetensors-0.4.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:96ad3d7d472612e26cbe413922b4fb13933310f0511d346ea5cc9a1e856e52eb"}, - {file = "safetensors-0.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:88250922401b5ae4e37de929178caf46be47ed16c817b2237b81679bec07c120"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d40443554142fc0ab30652d5cc8554c4b7a613513bde00373e18afd5de8cbe4b"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27f53f70106224d32d874aacecbeb4a6e4c5b16a1d2006d0e876d97229086d71"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc068afe23734dfb26ce19db0a7877499ddf73b1d55ceb762417e8da4a1b05fb"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9be1918eb8d43a11a6f8806759fccfa0eeb0542b12924caba66af8a7800ad01a"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41911087d20a7bbd78cb4ad4f98aab0c431533107584df6635d8b54b99945573"}, - {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50771c662aab909f31e94d048e76861fd027d66076ea773eef2e66c717766e24"}, - {file = "safetensors-0.4.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13f2e57be007b7ea9329133d2399e6bdfcf1910f655440a4da17df3a45afcd30"}, - {file = "safetensors-0.4.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c772147e6395bc829842e0a98e1b30c67fe25d816299c28196488511d5a5e951"}, - {file = "safetensors-0.4.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:36239a0060b537a3e8c473df78cffee14c3ec4f51d5f1a853af99371a2fb2a35"}, - {file = "safetensors-0.4.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:d0cbb7664fad2c307f95195f951b7059e95dc23e0e1822e5978c8b500098543c"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b3e55adb6bd9dc1c2a341e72f48f075953fa35d173dd8e29a95b3b02d0d1462"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42f743b3cca863fba53ca57a193f510e5ec359b97f38c282437716b6768e4a25"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e6af4a6dbeb06c4e6e7d46cf9c716cbc4cc5ef62584fd8a7c0fe558562df45"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a492ba21b5c8f14ee5ec9b20f42ba969e53ca1f909a4d04aad736b66a341dcc2"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b25b8233a1a85dc67e39838951cfb01595d792f3b7b644add63edb652992e030"}, - {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fd27e063fbdafe776f7b1714da59110e88f270e86db00788a8fd65f4eacfeba7"}, - {file = "safetensors-0.4.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1b6fa399f251bbeb52029bf5a0ac2878d7705dd3612a2f8895b48e9c11f0367d"}, - {file = "safetensors-0.4.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de642d46b459e4afd5c2020b26c0d6d869a171ea00411897d5776c127cac74f0"}, - {file = "safetensors-0.4.2-cp37-none-win32.whl", hash = "sha256:77b72d17754c93bb68f3598182f14d78776e0b9b31682ca5bb2c7c5bd9a75267"}, - {file = "safetensors-0.4.2-cp37-none-win_amd64.whl", hash = "sha256:d36ee3244d461cd655aeef493792c3bccf4875282f8407fd9af99e9a41cf2530"}, - {file = "safetensors-0.4.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:16b6b3884f7876c6b3b23a742428223a7170a5a9dac819d8c12a1569422c4b5a"}, - {file = "safetensors-0.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ee25d311493fbbe0be9d395faee46e9d79e8948f461e388ff39e59875ed9a350"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eed8097968585cd752a1171f86fce9aa1d89a29033e5cd8bec5a502e29f6b7af"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:880e6865cf72cb67f9ab8d04a3c4b49dd95ae92fb1583929ce65aed94e1f685f"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91290f83daf80ce6d1a7f629b244443c200060a80f908b29d879021409e5ea94"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3517d568486ab3508a7acc360b82d7a4a3e26b86efdf210a9ecd9d233c40708a"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f43a77eb38540f782999e5dc5645164fe9027d3f0194f6c9a5126168017efa"}, - {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b684d9818aa5d63fddc65f7d0151968037d255d91adf74eba82125b41c680aaa"}, - {file = "safetensors-0.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ab1f5d84185f9fefaf21413efb764e4908057b8a9a0b987ede890c353490fd70"}, - {file = "safetensors-0.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bd979642e6c3a517ef4b84ff36c2fee4015664fea05a61154fc565978347553"}, - {file = "safetensors-0.4.2-cp38-none-win32.whl", hash = "sha256:11be6e7afed29e5a5628f0aa6214e34bc194da73f558dc69fc7d56e07037422a"}, - {file = "safetensors-0.4.2-cp38-none-win_amd64.whl", hash = "sha256:2f7a6e5d29bd2cc340cffaa391fa437b1be9d21a2bd8b8724d2875d13a6ef2a9"}, - {file = "safetensors-0.4.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a5a921b4fe6925f9942adff3ebae8c16e0487908c54586a5a42f35b59fd69794"}, - {file = "safetensors-0.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b691727228c28f2d82d8a92b2bc26e7a1f129ee40b2f2a3185b5974e038ed47c"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91ca1056decc4e981248786e87b2a202d4841ee5f99d433f1adf3d44d4bcfa0e"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55969fd2e6fdb38dc221b0ab380668c21b0efa12a7562db9924759faa3c51757"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ae429bfaecc10ab5fe78c93009b3d1656c1581da560041e700eadb497dbe7a4"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff88f194fe4ac50b463a4a6f0c03af9ad72eb5d24ec6d6730af59522e37fedb"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80cb48d0a447f8dd18e61813efa7d3f8f8d52edf0f05806abc0c59b83431f57"}, - {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b286fb7adfee70a4189898ac2342b8a67d5f493e6b21b0af89ca8eac1b967cbf"}, - {file = "safetensors-0.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ceeff9ddbab4f78738489eb6682867ae946178776f33699737b2129b5394dc1"}, - {file = "safetensors-0.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a26fae748a7488cb3aac381eddfa818c42052c87b5e689fb4c6e82ed58cec209"}, - {file = "safetensors-0.4.2-cp39-none-win32.whl", hash = "sha256:039a42ab33c9d68b39706fd38f1922ace26866eff246bf20271edb619f5f848b"}, - {file = "safetensors-0.4.2-cp39-none-win_amd64.whl", hash = "sha256:b3a3e1f5b85859e398773f064943b62a4059f225008a2a8ee6add1edcf77cacf"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4e70d442ad17e8b153ef9095bf48ea64f15a66bf26dc2b6ca94660c154edbc24"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b90f1d9809caf4ff395951b4703295a68d12907f6945bbc3129e934ff8ae46f6"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c7ac9ad3728838006598e296b3ae9f27d80b489effd4685b92d97b3fc4c98f6"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5730d77e6ff7f4c7039e20913661ad0ea2f86c09e71c039e73dfdd1f394f08"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44feb8cb156d6803dcd19fc6b81b27235f29b877660605a6ac35e1da7d64f0e4"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:523a241c33e7c827ab9a3a23760d75c7d062f43dfe55b6b019409f89b0fb52d1"}, - {file = "safetensors-0.4.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fb18300e8eb74291225214f26c9a8ae2110fd61a6c9b5a2ff4c4e0eb1bb9a998"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fe5437ff9fb116e44f2ab558981249ae63f978392b4576e62fcfe167d353edbc"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9304a0934ced5a5d272f39de36291dc141dfc152d277f03fb4d65f2fb2ffa7c"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:160ba1b1e11cf874602c233ab80a14f588571d09556cbc3586900121d622b5ed"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04fcd6fcf7d9c13c7e5dc7e08de5e492ee4daa8f4ad74b4d8299d3eb0224292f"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:906d14c4a677d35834fb0f3a5455ef8305e1bba10a5e0f2e0f357b3d1ad989f2"}, - {file = "safetensors-0.4.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:df3fcdec0cd543084610d1f09c65cdb10fb3079f79bceddc092b0d187c6a265b"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5ca76f13fb1cef242ea3ad2cb37388e7d005994f42af8b44bee56ba48b2d45ce"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:278a1a3414c020785decdcd741c578725721274d2f9f787fcc930882e83b89cc"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b5a461cc68ecd42d9d546e5e1268a39d8ede7934a68d1ce17c3c659cb829d6"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2341411412a41671d25e26bed59ec121e46bf4fadb8132895e610411c4b9681"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3497ac3895acf17c5f98197f1fa4769f09c5e7ede07fcb102f1c201e663e052c"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:01b5e71d3754d2201294f1eb7a6d59cce3a5702ff96d83d226571b2ca2183837"}, - {file = "safetensors-0.4.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3627dbd1ea488dd8046a0491de5087f3c0d641e7acc80c0189a33c69398f1cd1"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9d56f0ef53afad26ec54ceede78a43e9a23a076dadbbda7b44d304c591abf4c1"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b259ca73d42daf658a1bda463f1f83885ae4d93a60869be80d7f7dfcc9d8bbb5"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebc3cd401e4eb54e7c0a70346be565e81942d9a41fafd5f4bf7ab3a55d10378"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5bc384a0309b706aa0425c93abb0390508a61bf029ce99c7d9df4220f25871a5"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:af2d8f7235d8a08fbccfb8394387890e7fa38942b349a94e6eff13c52ac98087"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0911315bbcc5289087d063c2c2c7ccd711ea97a7e557a7bce005ac2cf80146aa"}, - {file = "safetensors-0.4.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1efe31673be91832d73439a2af426743e1395fc9ef7b081914e9e1d567bd7b5f"}, - {file = "safetensors-0.4.2.tar.gz", hash = "sha256:acc85dcb09ec5e8aa787f588d7ad4d55c103f31e4ff060e17d92cc0e8b8cac73"}, + {file = "safetensors-0.4.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd"}, + {file = "safetensors-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376"}, + {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d"}, + {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1"}, + {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf"}, + {file = "safetensors-0.4.3-cp310-none-win32.whl", hash = "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9"}, + {file = "safetensors-0.4.3-cp310-none-win_amd64.whl", hash = "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632"}, + {file = "safetensors-0.4.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a"}, + {file = "safetensors-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b"}, + {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee"}, + {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9"}, + {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c"}, + {file = "safetensors-0.4.3-cp311-none-win32.whl", hash = "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61"}, + {file = "safetensors-0.4.3-cp311-none-win_amd64.whl", hash = "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67"}, + {file = "safetensors-0.4.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856"}, + {file = "safetensors-0.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d"}, + {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361"}, + {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e"}, + {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e"}, + {file = "safetensors-0.4.3-cp312-none-win32.whl", hash = "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3"}, + {file = "safetensors-0.4.3-cp312-none-win_amd64.whl", hash = "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7"}, + {file = "safetensors-0.4.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd"}, + {file = "safetensors-0.4.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee"}, + {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3"}, + {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d"}, + {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d"}, + {file = "safetensors-0.4.3-cp37-none-win32.whl", hash = "sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50"}, + {file = "safetensors-0.4.3-cp37-none-win_amd64.whl", hash = "sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b"}, + {file = "safetensors-0.4.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4"}, + {file = "safetensors-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d"}, + {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721"}, + {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2"}, + {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270"}, + {file = "safetensors-0.4.3-cp38-none-win32.whl", hash = "sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac"}, + {file = "safetensors-0.4.3-cp38-none-win_amd64.whl", hash = "sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e"}, + {file = "safetensors-0.4.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c"}, + {file = "safetensors-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121"}, + {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed"}, + {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea"}, + {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35"}, + {file = "safetensors-0.4.3-cp39-none-win32.whl", hash = "sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3"}, + {file = "safetensors-0.4.3-cp39-none-win_amd64.whl", hash = "sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab"}, + {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0"}, + {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3"}, + {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da"}, + {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65"}, + {file = "safetensors-0.4.3.tar.gz", hash = "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2"}, ] [package.extras] @@ -2212,7 +2303,7 @@ paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] -testing = ["h5py (>=3.7.0)", "huggingface_hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools_rust (>=1.5.2)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] @@ -2271,19 +2362,19 @@ files = [ [[package]] name = "setuptools" -version = "69.0.2" +version = "69.5.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" @@ -2450,31 +2541,36 @@ files = [ [[package]] name = "torch" -version = "2.1.1" +version = "2.2.2" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.1.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:5ebc43f5355a9b7be813392b3fb0133991f0380f6f0fcc8218d5468dc45d1071"}, - {file = "torch-2.1.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:84fefd63356416c0cd20578637ccdbb82164993400ed17b57c951dd6376dcee8"}, - {file = "torch-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a7a9da0c324409bcb5a7bdad1b4e94e936d21c2590aaa7ac2f63968da8c62f7"}, - {file = "torch-2.1.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:1e1e5faddd43a8f2c0e0e22beacd1e235a2e447794d807483c94a9e31b54a758"}, - {file = "torch-2.1.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:e76bf3c5c354874f1da465c852a2fb60ee6cbce306e935337885760f080f9baa"}, - {file = "torch-2.1.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:98fea993639b0bb432dfceb7b538f07c0f1c33386d63f635219f49254968c80f"}, - {file = "torch-2.1.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:61b51b33c61737c287058b0c3061e6a9d3c363863e4a094f804bc486888a188a"}, - {file = "torch-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:1d70920da827e2276bf07f7ec46958621cad18d228c97da8f9c19638474dbd52"}, - {file = "torch-2.1.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:a70593806f1d7e6b53657d96810518da0f88ef2608c98a402955765b8c79d52c"}, - {file = "torch-2.1.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:e312f7e82e49565f7667b0bbf9559ab0c597063d93044740781c02acd5a87978"}, - {file = "torch-2.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3cbecfa5a7314d828f4a37b0c286714dc9aa2e69beb7a22f7aca76567ed9f4"}, - {file = "torch-2.1.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9ca0fcbf3d5ba644d6a8572c83a9abbdf5f7ff575bc38529ef6c185a3a71bde9"}, - {file = "torch-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:2dc9f312fc1fa0d61a565a0292ad73119d4b74c9f8b5031b55f8b4722abca079"}, - {file = "torch-2.1.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:d56b032176458e2af4709627bbd2c20fe2917eff8cd087a7fe313acccf5ce2f1"}, - {file = "torch-2.1.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:29e3b90a8c281f6660804a939d1f4218604c80162e521e1e6d8c8557325902a0"}, - {file = "torch-2.1.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bd95cee8511584b67ddc0ba465c3f1edeb5708d833ee02af1206b4486f1d9096"}, - {file = "torch-2.1.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b31230bd058424e56dba7f899280dbc6ac8b9948e43902e0c84a44666b1ec151"}, - {file = "torch-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:403f1095e665e4f35971b43797a920725b8b205723aa68254a4050c6beca29b6"}, - {file = "torch-2.1.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:715b50d8c1de5da5524a68287eb000f73e026e74d5f6b12bc450ef6995fcf5f9"}, - {file = "torch-2.1.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:db67e8725c76f4c7f4f02e7551bb16e81ba1a1912867bc35d7bb96d2be8c78b4"}, + {file = "torch-2.2.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bc889d311a855dd2dfd164daf8cc903a6b7273a747189cebafdd89106e4ad585"}, + {file = "torch-2.2.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:15dffa4cc3261fa73d02f0ed25f5fa49ecc9e12bf1ae0a4c1e7a88bbfaad9030"}, + {file = "torch-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:11e8fe261233aeabd67696d6b993eeb0896faa175c6b41b9a6c9f0334bdad1c5"}, + {file = "torch-2.2.2-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b2e2200b245bd9f263a0d41b6a2dab69c4aca635a01b30cca78064b0ef5b109e"}, + {file = "torch-2.2.2-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:877b3e6593b5e00b35bbe111b7057464e76a7dd186a287280d941b564b0563c2"}, + {file = "torch-2.2.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ad4c03b786e074f46606f4151c0a1e3740268bcf29fbd2fdf6666d66341c1dcb"}, + {file = "torch-2.2.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:32827fa1fbe5da8851686256b4cd94cc7b11be962862c2293811c94eea9457bf"}, + {file = "torch-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:f9ef0a648310435511e76905f9b89612e45ef2c8b023bee294f5e6f7e73a3e7c"}, + {file = "torch-2.2.2-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:95b9b44f3bcebd8b6cd8d37ec802048c872d9c567ba52c894bba90863a439059"}, + {file = "torch-2.2.2-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:49aa4126ede714c5aeef7ae92969b4b0bbe67f19665106463c39f22e0a1860d1"}, + {file = "torch-2.2.2-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:cf12cdb66c9c940227ad647bc9cf5dba7e8640772ae10dfe7569a0c1e2a28aca"}, + {file = "torch-2.2.2-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:89ddac2a8c1fb6569b90890955de0c34e1724f87431cacff4c1979b5f769203c"}, + {file = "torch-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:451331406b760f4b1ab298ddd536486ab3cfb1312614cfe0532133535be60bea"}, + {file = "torch-2.2.2-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:eb4d6e9d3663e26cd27dc3ad266b34445a16b54908e74725adb241aa56987533"}, + {file = "torch-2.2.2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:bf9558da7d2bf7463390b3b2a61a6a3dbb0b45b161ee1dd5ec640bf579d479fc"}, + {file = "torch-2.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd2bf7697c9e95fb5d97cc1d525486d8cf11a084c6af1345c2c2c22a6b0029d0"}, + {file = "torch-2.2.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b421448d194496e1114d87a8b8d6506bce949544e513742b097e2ab8f7efef32"}, + {file = "torch-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:3dbcd563a9b792161640c0cffe17e3270d85e8f4243b1f1ed19cca43d28d235b"}, + {file = "torch-2.2.2-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:31f4310210e7dda49f1fb52b0ec9e59382cfcb938693f6d5378f25b43d7c1d29"}, + {file = "torch-2.2.2-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c795feb7e8ce2e0ef63f75f8e1ab52e7fd5e1a4d7d0c31367ade1e3de35c9e95"}, + {file = "torch-2.2.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:a6e5770d68158d07456bfcb5318b173886f579fdfbf747543901ce718ea94782"}, + {file = "torch-2.2.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:67dcd726edff108e2cd6c51ff0e416fd260c869904de95750e80051358680d24"}, + {file = "torch-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:539d5ef6c4ce15bd3bd47a7b4a6e7c10d49d4d21c0baaa87c7d2ef8698632dfb"}, + {file = "torch-2.2.2-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:dff696de90d6f6d1e8200e9892861fd4677306d0ef604cb18f2134186f719f82"}, + {file = "torch-2.2.2-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:3a4dd910663fd7a124c056c878a52c2b0be4a5a424188058fe97109d4436ee42"}, ] [package.dependencies] @@ -2491,25 +2587,25 @@ nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linu nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = "*" -triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -typing-extensions = "*" +triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +typing-extensions = ">=4.8.0" [package.extras] -dynamo = ["jinja2"] opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] [[package]] name = "tqdm" -version = "4.66.1" +version = "4.66.2" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, + {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, + {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, ] [package.dependencies] @@ -2593,28 +2689,26 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "2.1.0" +version = "2.2.0" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" files = [ - {file = "triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66439923a30d5d48399b08a9eae10370f6c261a5ec864a64983bae63152d39d7"}, - {file = "triton-2.1.0-0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:919b06453f0033ea52c13eaf7833de0e57db3178d23d4e04f9fc71c4f2c32bf8"}, - {file = "triton-2.1.0-0-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae4bb8a91de790e1866405211c4d618379781188f40d5c4c399766914e84cd94"}, - {file = "triton-2.1.0-0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39f6fb6bdccb3e98f3152e3fbea724f1aeae7d749412bbb1fa9c441d474eba26"}, - {file = "triton-2.1.0-0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:21544e522c02005a626c8ad63d39bdff2f31d41069592919ef281e964ed26446"}, - {file = "triton-2.1.0-0-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:143582ca31dd89cd982bd3bf53666bab1c7527d41e185f9e3d8a3051ce1b663b"}, - {file = "triton-2.1.0-0-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82fc5aeeedf6e36be4e4530cbdcba81a09d65c18e02f52dc298696d45721f3bd"}, - {file = "triton-2.1.0-0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:81a96d110a738ff63339fc892ded095b31bd0d205e3aace262af8400d40b6fa8"}, + {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, + {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, + {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"}, + {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"}, + {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"}, ] [package.dependencies] filelock = "*" [package.extras] -build = ["cmake (>=3.18)", "lit"] -tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] -tutorials = ["matplotlib", "pandas", "tabulate"] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] [[package]] name = "typer" @@ -2638,39 +2732,40 @@ test = ["black (>=22.3.0,<23.0.0)", "coverage (>=5.2,<6.0)", "isort (>=5.0.6,<6. [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] name = "tzdata" -version = "2023.3" +version = "2024.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, ] [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2886,101 +2981,101 @@ files = [ [[package]] name = "yarl" -version = "1.9.3" +version = "1.9.4" description = "Yet another URL library" optional = false python-versions = ">=3.7" files = [ - {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32435d134414e01d937cd9d6cc56e8413a8d4741dea36af5840c7750f04d16ab"}, - {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9a5211de242754b5e612557bca701f39f8b1a9408dff73c6db623f22d20f470e"}, - {file = "yarl-1.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:525cd69eff44833b01f8ef39aa33a9cc53a99ff7f9d76a6ef6a9fb758f54d0ff"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc94441bcf9cb8c59f51f23193316afefbf3ff858460cb47b5758bf66a14d130"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e36021db54b8a0475805acc1d6c4bca5d9f52c3825ad29ae2d398a9d530ddb88"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0f17d1df951336a02afc8270c03c0c6e60d1f9996fcbd43a4ce6be81de0bd9d"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5f3faeb8100a43adf3e7925d556801d14b5816a0ac9e75e22948e787feec642"}, - {file = "yarl-1.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed37db837ecb5962469fad448aaae0f0ee94ffce2062cf2eb9aed13328b5196"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:721ee3fc292f0d069a04016ef2c3a25595d48c5b8ddc6029be46f6158d129c92"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b8bc5b87a65a4e64bc83385c05145ea901b613d0d3a434d434b55511b6ab0067"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:dd952b9c64f3b21aedd09b8fe958e4931864dba69926d8a90c90d36ac4e28c9a"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:c405d482c320a88ab53dcbd98d6d6f32ada074f2d965d6e9bf2d823158fa97de"}, - {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9df9a0d4c5624790a0dea2e02e3b1b3c69aed14bcb8650e19606d9df3719e87d"}, - {file = "yarl-1.9.3-cp310-cp310-win32.whl", hash = "sha256:d34c4f80956227f2686ddea5b3585e109c2733e2d4ef12eb1b8b4e84f09a2ab6"}, - {file = "yarl-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:cf7a4e8de7f1092829caef66fd90eaf3710bc5efd322a816d5677b7664893c93"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d61a0ca95503867d4d627517bcfdc28a8468c3f1b0b06c626f30dd759d3999fd"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73cc83f918b69110813a7d95024266072d987b903a623ecae673d1e71579d566"}, - {file = "yarl-1.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d81657b23e0edb84b37167e98aefb04ae16cbc5352770057893bd222cdc6e45f"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26a1a8443091c7fbc17b84a0d9f38de34b8423b459fb853e6c8cdfab0eacf613"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe34befb8c765b8ce562f0200afda3578f8abb159c76de3ab354c80b72244c41"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c757f64afe53a422e45e3e399e1e3cf82b7a2f244796ce80d8ca53e16a49b9f"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72a57b41a0920b9a220125081c1e191b88a4cdec13bf9d0649e382a822705c65"}, - {file = "yarl-1.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632c7aeb99df718765adf58eacb9acb9cbc555e075da849c1378ef4d18bf536a"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b0b8c06afcf2bac5a50b37f64efbde978b7f9dc88842ce9729c020dc71fae4ce"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1d93461e2cf76c4796355494f15ffcb50a3c198cc2d601ad8d6a96219a10c363"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4003f380dac50328c85e85416aca6985536812c082387255c35292cb4b41707e"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4d6d74a97e898c1c2df80339aa423234ad9ea2052f66366cef1e80448798c13d"}, - {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b61e64b06c3640feab73fa4ff9cb64bd8182de52e5dc13038e01cfe674ebc321"}, - {file = "yarl-1.9.3-cp311-cp311-win32.whl", hash = "sha256:29beac86f33d6c7ab1d79bd0213aa7aed2d2f555386856bb3056d5fdd9dab279"}, - {file = "yarl-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:f7271d6bd8838c49ba8ae647fc06469137e1c161a7ef97d778b72904d9b68696"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:dd318e6b75ca80bff0b22b302f83a8ee41c62b8ac662ddb49f67ec97e799885d"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c4b1efb11a8acd13246ffb0bee888dd0e8eb057f8bf30112e3e21e421eb82d4a"}, - {file = "yarl-1.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c6f034386e5550b5dc8ded90b5e2ff7db21f0f5c7de37b6efc5dac046eb19c10"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd49a908cb6d387fc26acee8b7d9fcc9bbf8e1aca890c0b2fdfd706057546080"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa4643635f26052401750bd54db911b6342eb1a9ac3e74f0f8b58a25d61dfe41"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e741bd48e6a417bdfbae02e088f60018286d6c141639359fb8df017a3b69415a"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c86d0d0919952d05df880a1889a4f0aeb6868e98961c090e335671dea5c0361"}, - {file = "yarl-1.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d5434b34100b504aabae75f0622ebb85defffe7b64ad8f52b8b30ec6ef6e4b9"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79e1df60f7c2b148722fb6cafebffe1acd95fd8b5fd77795f56247edaf326752"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:44e91a669c43f03964f672c5a234ae0d7a4d49c9b85d1baa93dec28afa28ffbd"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3cfa4dbe17b2e6fca1414e9c3bcc216f6930cb18ea7646e7d0d52792ac196808"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:88d2c3cc4b2f46d1ba73d81c51ec0e486f59cc51165ea4f789677f91a303a9a7"}, - {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cccdc02e46d2bd7cb5f38f8cc3d9db0d24951abd082b2f242c9e9f59c0ab2af3"}, - {file = "yarl-1.9.3-cp312-cp312-win32.whl", hash = "sha256:96758e56dceb8a70f8a5cff1e452daaeff07d1cc9f11e9b0c951330f0a2396a7"}, - {file = "yarl-1.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:c4472fe53ebf541113e533971bd8c32728debc4c6d8cc177f2bff31d011ec17e"}, - {file = "yarl-1.9.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:126638ab961633f0940a06e1c9d59919003ef212a15869708dcb7305f91a6732"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c99ddaddb2fbe04953b84d1651149a0d85214780e4d0ee824e610ab549d98d92"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dab30b21bd6fb17c3f4684868c7e6a9e8468078db00f599fb1c14e324b10fca"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:828235a2a169160ee73a2fcfb8a000709edf09d7511fccf203465c3d5acc59e4"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc391e3941045fd0987c77484b2799adffd08e4b6735c4ee5f054366a2e1551d"}, - {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51382c72dd5377861b573bd55dcf680df54cea84147c8648b15ac507fbef984d"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:28a108cb92ce6cf867690a962372996ca332d8cda0210c5ad487fe996e76b8bb"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8f18a7832ff85dfcd77871fe677b169b1bc60c021978c90c3bb14f727596e0ae"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:7eaf13af79950142ab2bbb8362f8d8d935be9aaf8df1df89c86c3231e4ff238a"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:66a6dbf6ca7d2db03cc61cafe1ee6be838ce0fbc97781881a22a58a7c5efef42"}, - {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a0a4f3aaa18580038cfa52a7183c8ffbbe7d727fe581300817efc1e96d1b0e9"}, - {file = "yarl-1.9.3-cp37-cp37m-win32.whl", hash = "sha256:946db4511b2d815979d733ac6a961f47e20a29c297be0d55b6d4b77ee4b298f6"}, - {file = "yarl-1.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2dad8166d41ebd1f76ce107cf6a31e39801aee3844a54a90af23278b072f1ccf"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bb72d2a94481e7dc7a0c522673db288f31849800d6ce2435317376a345728225"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9a172c3d5447b7da1680a1a2d6ecdf6f87a319d21d52729f45ec938a7006d5d8"}, - {file = "yarl-1.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2dc72e891672343b99db6d497024bf8b985537ad6c393359dc5227ef653b2f17"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8d51817cf4b8d545963ec65ff06c1b92e5765aa98831678d0e2240b6e9fd281"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53ec65f7eee8655bebb1f6f1607760d123c3c115a324b443df4f916383482a67"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfd77e8e5cafba3fb584e0f4b935a59216f352b73d4987be3af51f43a862c403"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e73db54c967eb75037c178a54445c5a4e7461b5203b27c45ef656a81787c0c1b"}, - {file = "yarl-1.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09c19e5f4404574fcfb736efecf75844ffe8610606f3fccc35a1515b8b6712c4"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6280353940f7e5e2efaaabd686193e61351e966cc02f401761c4d87f48c89ea4"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c25ec06e4241e162f5d1f57c370f4078797ade95c9208bd0c60f484834f09c96"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7217234b10c64b52cc39a8d82550342ae2e45be34f5bff02b890b8c452eb48d7"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4ce77d289f8d40905c054b63f29851ecbfd026ef4ba5c371a158cfe6f623663e"}, - {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f74b015c99a5eac5ae589de27a1201418a5d9d460e89ccb3366015c6153e60a"}, - {file = "yarl-1.9.3-cp38-cp38-win32.whl", hash = "sha256:8a2538806be846ea25e90c28786136932ec385c7ff3bc1148e45125984783dc6"}, - {file = "yarl-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:6465d36381af057d0fab4e0f24ef0e80ba61f03fe43e6eeccbe0056e74aadc70"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2f3c8822bc8fb4a347a192dd6a28a25d7f0ea3262e826d7d4ef9cc99cd06d07e"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7831566595fe88ba17ea80e4b61c0eb599f84c85acaa14bf04dd90319a45b90"}, - {file = "yarl-1.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff34cb09a332832d1cf38acd0f604c068665192c6107a439a92abfd8acf90fe2"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe8080b4f25dfc44a86bedd14bc4f9d469dfc6456e6f3c5d9077e81a5fedfba7"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8535e111a064f3bdd94c0ed443105934d6f005adad68dd13ce50a488a0ad1bf3"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d155a092bf0ebf4a9f6f3b7a650dc5d9a5bbb585ef83a52ed36ba46f55cc39d"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:778df71c8d0c8c9f1b378624b26431ca80041660d7be7c3f724b2c7a6e65d0d6"}, - {file = "yarl-1.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9f9cafaf031c34d95c1528c16b2fa07b710e6056b3c4e2e34e9317072da5d1a"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ca6b66f69e30f6e180d52f14d91ac854b8119553b524e0e28d5291a724f0f423"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0e7e83f31e23c5d00ff618045ddc5e916f9e613d33c5a5823bc0b0a0feb522f"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:af52725c7c39b0ee655befbbab5b9a1b209e01bb39128dce0db226a10014aacc"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0ab5baaea8450f4a3e241ef17e3d129b2143e38a685036b075976b9c415ea3eb"}, - {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d350388ba1129bc867c6af1cd17da2b197dff0d2801036d2d7d83c2d771a682"}, - {file = "yarl-1.9.3-cp39-cp39-win32.whl", hash = "sha256:e2a16ef5fa2382af83bef4a18c1b3bcb4284c4732906aa69422cf09df9c59f1f"}, - {file = "yarl-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:d92d897cb4b4bf915fbeb5e604c7911021a8456f0964f3b8ebbe7f9188b9eabb"}, - {file = "yarl-1.9.3-py3-none-any.whl", hash = "sha256:271d63396460b6607b588555ea27a1a02b717ca2e3f2cf53bdde4013d7790929"}, - {file = "yarl-1.9.3.tar.gz", hash = "sha256:4a14907b597ec55740f63e52d7fee0e9ee09d5b9d57a4f399a7423268e457b57"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, ] [package.dependencies] @@ -2989,18 +3084,18 @@ multidict = ">=4.0" [[package]] name = "zipp" -version = "3.17.0" +version = "3.18.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, - {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, + {file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"}, + {file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.0" diff --git a/server/requirements_cuda.txt b/server/requirements_cuda.txt index bc1b8891..694242e1 100644 --- a/server/requirements_cuda.txt +++ b/server/requirements_cuda.txt @@ -1,5 +1,5 @@ backoff==2.2.1 ; python_version >= "3.9" and python_version < "3.13" -bitsandbytes==0.41.2.post2 ; python_version >= "3.9" and python_version < "3.13" +bitsandbytes==0.41.3.post2 ; python_version >= "3.9" and python_version < "3.13" certifi==2023.11.17 ; python_version >= "3.9" and python_version < "3.13" charset-normalizer==3.3.2 ; python_version >= "3.9" and python_version < "3.13" click==8.1.7 ; python_version >= "3.9" and python_version < "3.13" @@ -8,14 +8,14 @@ deprecated==1.2.14 ; python_version >= "3.9" and python_version < "3.13" einops==0.6.1 ; python_version >= "3.9" and python_version < "3.13" filelock==3.13.1 ; python_version >= "3.9" and python_version < "3.13" fsspec==2023.10.0 ; python_version >= "3.9" and python_version < "3.13" -googleapis-common-protos==1.61.0 ; python_version >= "3.9" and python_version < "3.13" +googleapis-common-protos==1.62.0 ; python_version >= "3.9" and python_version < "3.13" grpc-interceptor==0.15.4 ; python_version >= "3.9" and python_version < "3.13" -grpcio-reflection==1.59.3 ; python_version >= "3.9" and python_version < "3.13" -grpcio-status==1.59.3 ; python_version >= "3.9" and python_version < "3.13" -grpcio==1.59.3 ; python_version >= "3.9" and python_version < "3.13" +grpcio-reflection==1.60.0 ; python_version >= "3.9" and python_version < "3.13" +grpcio-status==1.60.0 ; python_version >= "3.9" and python_version < "3.13" +grpcio==1.60.0 ; python_version >= "3.9" and python_version < "3.13" hf-transfer==0.1.4 ; python_version >= "3.9" and python_version < "3.13" -huggingface-hub==0.16.4 ; python_version >= "3.9" and python_version < "3.13" -idna==3.4 ; python_version >= "3.9" and python_version < "3.13" +huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" +idna==3.6 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" numpy==1.26.2 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-api==1.15.0 ; python_version >= "3.9" and python_version < "3.13" @@ -37,11 +37,11 @@ safetensors==0.3.3 ; python_version >= "3.9" and python_version < "3.13" scipy==1.11.4 ; python_version >= "3.9" and python_version < "3.13" sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" setuptools==69.0.2 ; python_version >= "3.9" and python_version < "3.13" -tokenizers==0.13.3 ; python_version >= "3.9" and python_version < "3.13" +tokenizers==0.15.0 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.33.3 ; python_version >= "3.9" and python_version < "3.13" +transformers==4.36.1 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" -typing-extensions==4.8.0 ; python_version >= "3.9" and python_version < "3.13" +typing-extensions==4.9.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.1.0 ; python_version >= "3.9" and python_version < "3.13" win32-setctime==1.1.0 ; python_version >= "3.9" and python_version < "3.13" and sys_platform == "win32" wrapt==1.16.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/requirements_rocm.txt b/server/requirements_rocm.txt index 5a321834..e0495fde 100644 --- a/server/requirements_rocm.txt +++ b/server/requirements_rocm.txt @@ -7,14 +7,14 @@ deprecated==1.2.14 ; python_version >= "3.9" and python_version < "3.13" einops==0.6.1 ; python_version >= "3.9" and python_version < "3.13" filelock==3.13.1 ; python_version >= "3.9" and python_version < "3.13" fsspec==2023.10.0 ; python_version >= "3.9" and python_version < "3.13" -googleapis-common-protos==1.61.0 ; python_version >= "3.9" and python_version < "3.13" +googleapis-common-protos==1.62.0 ; python_version >= "3.9" and python_version < "3.13" grpc-interceptor==0.15.4 ; python_version >= "3.9" and python_version < "3.13" -grpcio-reflection==1.59.3 ; python_version >= "3.9" and python_version < "3.13" -grpcio-status==1.59.3 ; python_version >= "3.9" and python_version < "3.13" -grpcio==1.59.3 ; python_version >= "3.9" and python_version < "3.13" +grpcio-reflection==1.60.0 ; python_version >= "3.9" and python_version < "3.13" +grpcio-status==1.60.0 ; python_version >= "3.9" and python_version < "3.13" +grpcio==1.60.0 ; python_version >= "3.9" and python_version < "3.13" hf-transfer==0.1.4 ; python_version >= "3.9" and python_version < "3.13" -huggingface-hub==0.16.4 ; python_version >= "3.9" and python_version < "3.13" -idna==3.4 ; python_version >= "3.9" and python_version < "3.13" +huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" +idna==3.6 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" numpy==1.26.2 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-api==1.15.0 ; python_version >= "3.9" and python_version < "3.13" @@ -36,11 +36,11 @@ safetensors==0.3.3 ; python_version >= "3.9" and python_version < "3.13" scipy==1.11.4 ; python_version >= "3.9" and python_version < "3.13" sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" setuptools==69.0.2 ; python_version >= "3.9" and python_version < "3.13" -tokenizers==0.13.3 ; python_version >= "3.9" and python_version < "3.13" +tokenizers==0.15.0 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.33.3 ; python_version >= "3.9" and python_version < "3.13" +transformers==4.36.1 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" -typing-extensions==4.8.0 ; python_version >= "3.9" and python_version < "3.13" +typing-extensions==4.9.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.1.0 ; python_version >= "3.9" and python_version < "3.13" win32-setctime==1.1.0 ; python_version >= "3.9" and python_version < "3.13" and sys_platform == "win32" wrapt==1.16.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index afeaf7e5..c85624f3 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -27,11 +27,6 @@ from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn -from text_generation_server.utils.flash_attn import ( - attention, - HAS_FLASH_ATTN_V2_ROCM, - HAS_FLASH_ATTN_V2_CUDA, -) from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, @@ -43,10 +38,6 @@ from text_generation_server.utils.layers import ( ) -if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM: - raise ImportError("Mistral model requires flash attn v2") - - class MistralConfig(PretrainedConfig): model_type = "mistral" diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index 35bb3735..b468d09b 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -27,12 +27,9 @@ from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple +from loguru import logger from text_generation_server.utils import paged_attention, flash_attn -from text_generation_server.utils.flash_attn import ( - HAS_FLASH_ATTN_V2_ROCM, - HAS_FLASH_ATTN_V2_CUDA, -) from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, @@ -44,18 +41,13 @@ from text_generation_server.utils.layers import ( get_linear, ) -if not HAS_FLASH_ATTN_V2_CUDA and not HAS_FLASH_ATTN_V2_ROCM: - raise ImportError("Mixtral model requires flash attn v2") - -try: - import megablocks.ops as ops -except ImportError: - raise ImportError("Mixtral model requires megablocks to be installed") - +HAS_MEGABLOCKS = True try: import stk + import megablocks.ops as ops except ImportError: - raise ImportError("Mixtral model requires stk to be installed") + logger.warning("Mixtral: megablocks is not installed") + HAS_MEGABLOCKS = False class MixtralConfig(PretrainedConfig): @@ -590,7 +582,7 @@ class BlockSparseMoE(nn.Module): return out def forward(self, x: torch.Tensor) -> torch.Tensor: - if len(x) > 256: + if len(x) > 256 and HAS_MEGABLOCKS: return self.sparse_forward(x) # This is faster when there is not a lot of tokens return self.dense_forward(x) From ecb0db45afde6de703a82e8c9c73586ca50bb55d Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 15 Dec 2023 14:56:17 +0100 Subject: [PATCH 020/153] fix: fix logic if sliding window key is not present in config (#1352) --- .../models/custom_modeling/flash_mistral_modeling.py | 2 +- .../models/custom_modeling/flash_mixtral_modeling.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index c85624f3..0fc4e1b3 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -60,7 +60,7 @@ class MistralConfig(PretrainedConfig): pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, - sliding_window=4096, + sliding_window=None, **kwargs, ): self.vocab_size = vocab_size diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index b468d09b..61488ec4 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -72,7 +72,7 @@ class MixtralConfig(PretrainedConfig): pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, - sliding_window=4096, + sliding_window=None, num_experts_per_tok=2, num_local_experts=8, **kwargs, From 5ff9e819522ec76295df0c4e928130acde74ec52 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Mon, 18 Dec 2023 10:20:08 +0100 Subject: [PATCH 021/153] fix: fix offline (#1341) (#1347) @oOraph --------- Signed-off-by: Raphael Glon Co-authored-by: Raphael Glon --- router/src/validation.rs | 7 ++ server/tests/utils/test_hub.py | 59 ++++++++++- server/text_generation_server/utils/hub.py | 112 ++++++++++++++------- 3 files changed, 142 insertions(+), 36 deletions(-) diff --git a/router/src/validation.rs b/router/src/validation.rs index ee3dfbe0..486e57b7 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -585,6 +585,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: Some(1.0), + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -599,6 +600,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: Some(0.99), + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -613,6 +615,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_p: None, + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -645,6 +648,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(5), + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -659,6 +663,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(4), + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -670,6 +675,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: Some(0), + max_new_tokens: Some(5), ..default_parameters() }, }) @@ -681,6 +687,7 @@ mod tests { inputs: "Hello".to_string(), parameters: GenerateParameters { top_n_tokens: None, + max_new_tokens: Some(5), ..default_parameters() }, }) diff --git a/server/tests/utils/test_hub.py b/server/tests/utils/test_hub.py index fac9a64d..5438c153 100644 --- a/server/tests/utils/test_hub.py +++ b/server/tests/utils/test_hub.py @@ -1,5 +1,13 @@ +import os +import requests +import tempfile + import pytest +import huggingface_hub.constants +from huggingface_hub import hf_api + +import text_generation_server.utils.hub from text_generation_server.utils.hub import ( weight_hub_files, download_weights, @@ -10,6 +18,52 @@ from text_generation_server.utils.hub import ( ) +@pytest.fixture() +def offline(): + current_value = text_generation_server.utils.hub.HF_HUB_OFFLINE + text_generation_server.utils.hub.HF_HUB_OFFLINE = True + yield "offline" + text_generation_server.utils.hub.HF_HUB_OFFLINE = current_value + + +@pytest.fixture() +def fresh_cache(): + with tempfile.TemporaryDirectory() as d: + current_value = huggingface_hub.constants.HUGGINGFACE_HUB_CACHE + huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = d + text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = d + os.environ['HUGGINGFACE_HUB_CACHE'] = d + yield + huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = current_value + os.environ['HUGGINGFACE_HUB_CACHE'] = current_value + text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = current_value + + +@pytest.fixture() +def prefetched(): + model_id = "bert-base-uncased" + huggingface_hub.snapshot_download( + repo_id=model_id, + revision="main", + local_files_only=False, + repo_type="model", + allow_patterns=["*.safetensors"] + ) + yield model_id + + +def test_weight_hub_files_offline_error(offline, fresh_cache): + # If the model is not prefetched then it will raise an error + with pytest.raises(EntryNotFoundError): + weight_hub_files("gpt2") + + +def test_weight_hub_files_offline_ok(prefetched, offline): + # If the model is prefetched then we should be able to get the weight files from local cache + filenames = weight_hub_files(prefetched) + assert filenames == ['model.safetensors'] + + def test_weight_hub_files(): filenames = weight_hub_files("bigscience/bloom-560m") assert filenames == ["model.safetensors"] @@ -33,8 +87,11 @@ def test_download_weights(): assert files == local_files -def test_weight_files_error(): +def test_weight_files_revision_error(): with pytest.raises(RevisionNotFoundError): weight_files("bigscience/bloom-560m", revision="error") + + +def test_weight_files_not_cached_error(fresh_cache): with pytest.raises(LocalEntryNotFoundError): weight_files("bert-base-uncased") diff --git a/server/text_generation_server/utils/hub.py b/server/text_generation_server/utils/hub.py index 23743c9b..019d4855 100644 --- a/server/text_generation_server/utils/hub.py +++ b/server/text_generation_server/utils/hub.py @@ -6,24 +6,29 @@ from loguru import logger from pathlib import Path from typing import Optional, List -from huggingface_hub import HfApi, hf_hub_download +from huggingface_hub import file_download, hf_api, HfApi, hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.utils import ( LocalEntryNotFoundError, EntryNotFoundError, - RevisionNotFoundError, # Import here to ease try/except in other part of the lib + RevisionNotFoundError, # noqa # Import here to ease try/except in other part of the lib ) WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) +HF_HUB_OFFLINE = os.environ.get("HF_HUB_OFFLINE", "0").lower() in ["true", "1", "yes"] -def weight_hub_files( - model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" -) -> List[str]: - """Get the weights filenames on the hub""" - api = HfApi() - info = api.model_info(model_id, revision=revision) - filenames = [ +def _cached_weight_files(model_id: str, revision: Optional[str], extension: str) -> List[str]: + """Guess weight files from the cached revision snapshot directory""" + d = _get_cached_revision_directory(model_id, revision) + if not d: + return [] + filenames = _weight_files_from_dir(d, extension) + return filenames + + +def _weight_hub_files_from_model_info(info: hf_api.ModelInfo, extension: str) -> List[str]: + return [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) @@ -33,24 +38,26 @@ def weight_hub_files( and "training" not in s.rfilename ] - if not filenames: - raise EntryNotFoundError( - f"No {extension} weights found for model {model_id} and revision {revision}.", - None, - ) +def _weight_files_from_dir(d: Path, extension: str) -> List[str]: + # os.walk: do not iterate, just scan for depth 1, not recursively + # see _weight_hub_files_from_model_info, that's also what is + # done there with the len(s.rfilename.split("/")) == 1 condition + root, _, files = next(os.walk(str(d))) + filenames = [f for f in files + if f.endswith(extension) + and "arguments" not in f + and "args" not in f + and "training" not in f] return filenames -def try_to_load_from_cache( - model_id: str, revision: Optional[str], filename: str -) -> Optional[Path]: - """Try to load a file from the Hugging Face cache""" +def _get_cached_revision_directory(model_id: str, revision: Optional[str]) -> Optional[Path]: if revision is None: revision = "main" - object_id = model_id.replace("/", "--") - repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}" + repo_cache = Path(HUGGINGFACE_HUB_CACHE) / Path( + file_download.repo_folder_name(repo_id=model_id, repo_type="model")) if not repo_cache.is_dir(): # No cache for this model @@ -74,8 +81,42 @@ def try_to_load_from_cache( # No cache for this revision and we won't try to return a random revision return None + return snapshots_dir / revision + + +def weight_hub_files( + model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" +) -> List[str]: + """Get the weights filenames on the hub""" + api = HfApi() + + if HF_HUB_OFFLINE: + filenames = _cached_weight_files(model_id, revision, extension) + else: + # Online case, fetch model info from the Hub + info = api.model_info(model_id, revision=revision) + filenames = _weight_hub_files_from_model_info(info, extension) + + if not filenames: + raise EntryNotFoundError( + f"No {extension} weights found for model {model_id} and revision {revision}.", + None, + ) + + return filenames + + +def try_to_load_from_cache( + model_id: str, revision: Optional[str], filename: str +) -> Optional[Path]: + """Try to load a file from the Hugging Face cache""" + + d = _get_cached_revision_directory(model_id, revision) + if not d: + return None + # Check if file exists in cache - cached_file = snapshots_dir / revision / filename + cached_file = d / filename return cached_file if cached_file.is_file() else None @@ -84,13 +125,14 @@ def weight_files( ) -> List[Path]: """Get the local files""" # Local model - if Path(model_id).exists() and Path(model_id).is_dir(): - local_files = list(Path(model_id).glob(f"*{extension}")) + d = Path(model_id) + if d.exists() and d.is_dir(): + local_files = _weight_files_from_dir(d, extension) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) - return local_files + return [Path(f) for f in local_files] try: filenames = weight_hub_files(model_id, revision, extension) @@ -138,33 +180,33 @@ def download_weights( ) -> List[Path]: """Download the safetensors files from the hub""" - def download_file(filename, tries=5, backoff: int = 5): - local_file = try_to_load_from_cache(model_id, revision, filename) + def download_file(fname, tries=5, backoff: int = 5): + local_file = try_to_load_from_cache(model_id, revision, fname) if local_file is not None: - logger.info(f"File {filename} already present in cache.") + logger.info(f"File {fname} already present in cache.") return Path(local_file) - for i in range(tries): + for idx in range(tries): try: - logger.info(f"Download file: {filename}") - start_time = time.time() + logger.info(f"Download file: {fname}") + stime = time.time() local_file = hf_hub_download( - filename=filename, + filename=fname, repo_id=model_id, revision=revision, - local_files_only=False, + local_files_only=HF_HUB_OFFLINE, ) logger.info( - f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - start_time))}." + f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - stime))}." ) return Path(local_file) except Exception as e: - if i + 1 == tries: + if idx + 1 == tries: raise e logger.error(e) logger.info(f"Retrying in {backoff} seconds") time.sleep(backoff) - logger.info(f"Retry {i + 1}/{tries - 1}") + logger.info(f"Retry {idx + 1}/{tries - 1}") # We do this instead of using tqdm because we want to parse the logs with the launcher start_time = time.time() From b7299e1b7fac91904cdfb761c8aee7b8caa1396a Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Mon, 18 Dec 2023 16:07:05 +0100 Subject: [PATCH 022/153] fix: fix gpt-q with groupsize = -1 (#1358) --- proto/generate.proto | 3 +++ router/client/src/client.rs | 14 ++++++++++++-- server/text_generation_server/server.py | 7 ++++++- .../text_generation_server/utils/gptq/exllama.py | 11 ++--------- .../text_generation_server/utils/gptq/exllamav2.py | 13 +------------ server/text_generation_server/utils/weights.py | 12 ++++++------ 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/proto/generate.proto b/proto/generate.proto index fc4617f9..ceb421c4 100644 --- a/proto/generate.proto +++ b/proto/generate.proto @@ -213,6 +213,9 @@ message DecodeResponse { message WarmupRequest { /// Batch to warmup on repeated Batch batches = 1; + uint32 max_input_length = 2; + uint32 max_prefill_tokens = 3; + uint32 max_total_tokens = 4; } /// Empty response diff --git a/router/client/src/client.rs b/router/client/src/client.rs index f0ecb05a..ba7b7565 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -167,7 +167,12 @@ impl Client { ); num_batches ]; - let request = tonic::Request::new(WarmupRequest { batches }).inject_context(); + let request = tonic::Request::new(WarmupRequest { + batches, + max_input_length, + max_prefill_tokens, + max_total_tokens, + }).inject_context(); let _response = self.stub.warmup(request).await?.into_inner(); } @@ -188,7 +193,12 @@ impl Client { ); num_batches ]; - let request = tonic::Request::new(WarmupRequest { batches }).inject_context(); + let request = tonic::Request::new(WarmupRequest { + batches, + max_input_length, + max_prefill_tokens, + max_total_tokens, + }).inject_context(); let _response = self.stub.warmup(request).await?.into_inner(); } Ok(None) // No support for maximum total tokens diff --git a/server/text_generation_server/server.py b/server/text_generation_server/server.py index 83a65251..4a07733a 100644 --- a/server/text_generation_server/server.py +++ b/server/text_generation_server/server.py @@ -21,7 +21,12 @@ from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): - def __init__(self, model: Model, cache: Cache, server_urls: List[str]): + def __init__( + self, + model: Model, + cache: Cache, + server_urls: List[str], + ): self.cache = cache self.model = model self.server_urls = server_urls diff --git a/server/text_generation_server/utils/gptq/exllama.py b/server/text_generation_server/utils/gptq/exllama.py index 7353afb5..32f817db 100644 --- a/server/text_generation_server/utils/gptq/exllama.py +++ b/server/text_generation_server/utils/gptq/exllama.py @@ -37,19 +37,12 @@ def set_device(device): DEVICE = device -def create_exllama_buffers(): +def create_exllama_buffers(max_total_tokens: int): global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE, TEMP_STATE, TEMP_DQ assert DEVICE is not None, "call set_device first" - if ACT_ORDER: - # TODO: this should be set to rust side `max_total_tokens`, but TGI - # does not offer an API to expose this variable to python, as this variable - # is handled by the client but it appears the model is initialized by the server. - # An alternative could be to initialize the buffers during warmup. - # Dummy - max_total_tokens = 2048 - else: + if not ACT_ORDER: max_total_tokens = 1 # This temp_state buffer is required to reorder X in the act-order case. diff --git a/server/text_generation_server/utils/gptq/exllamav2.py b/server/text_generation_server/utils/gptq/exllamav2.py index f820f0d9..dd41b269 100644 --- a/server/text_generation_server/utils/gptq/exllamav2.py +++ b/server/text_generation_server/utils/gptq/exllamav2.py @@ -101,7 +101,7 @@ def set_device(device): DEVICE = device -def create_exllama_buffers(): +def create_exllama_buffers(max_total_tokens: int): global FIXED_BYTES, LAYERS, DEVICE temp_dq = ExLlamaV2DeviceTensors(DEVICE, FIXED_BYTES) @@ -138,17 +138,6 @@ class QuantLinear(nn.Module): self.bias = bias if bias is not None else None self.group_size = groupsize - infeatures = self.infeatures - outfeatures = self.outfeatures - assert qweight.shape == (infeatures // 32 * self.bits, outfeatures) - assert infeatures % self.group_size == 0 - assert qzeros.shape == ( - infeatures // self.group_size, - outfeatures // 32 * self.bits, - ) - assert scales.shape == (infeatures // self.group_size, outfeatures) - assert g_idx.shape == (infeatures,), f"{g_idx.shape}, {infeatures}" - global FIXED_BYTES, LAYERS FIXED_BYTES = max(FIXED_BYTES, self.scratch_space_fixed()) LAYERS.append(self) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 67fda511..a2cca2ea 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -281,17 +281,17 @@ class Weights: else: logger.info(f"Using exllama kernels v{HAS_EXLLAMA}") - if use_exllama: + if use_exllama and groupsize != -1: qzeros = self.get_sharded(f"{prefix}.qzeros", dim=0) scales = self.get_sharded(f"{prefix}.scales", dim=0) - g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) - g_idx = g_idx - g_idx[0] else: - # The triton kernel reorders the scales/zero points instead of the weight/activation. - # Thus, each rank needs the full qzeros/scales. qzeros = self.get_tensor(f"{prefix}.qzeros") scales = self.get_tensor(f"{prefix}.scales") - g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) + + g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) + + if use_exllama: + g_idx = g_idx - g_idx[0] weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) elif quantize == "awq": From be05972911f7060c75013a8f550320eadc296f27 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 20 Dec 2023 15:37:14 +0100 Subject: [PATCH 023/153] Peft safetensors. (#1364) Works by removing adapter_model.safetensors from being detected as the core model file (which skips the real peft detection). # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/text_generation_server/utils/hub.py | 1 + 1 file changed, 1 insertion(+) diff --git a/server/text_generation_server/utils/hub.py b/server/text_generation_server/utils/hub.py index 019d4855..62afff0c 100644 --- a/server/text_generation_server/utils/hub.py +++ b/server/text_generation_server/utils/hub.py @@ -48,6 +48,7 @@ def _weight_files_from_dir(d: Path, extension: str) -> List[str]: if f.endswith(extension) and "arguments" not in f and "args" not in f + and "adapter" not in f and "training" not in f] return filenames From 3e22ad985e0b8a8267b9ad77c0f631ed421463a8 Mon Sep 17 00:00:00 2001 From: regisss <15324346+regisss@users.noreply.github.com> Date: Thu, 21 Dec 2023 11:05:35 +0100 Subject: [PATCH 024/153] docs: Change URL for Habana Gaudi support in doc (#1343) --- docs/source/supported_models.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index 34775139..0708c729 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -39,7 +39,7 @@ text-generation-launcher --model-id ## Supported Hardware -TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. +TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: * Quantization (GPTQ, AWQ, etc.) @@ -47,5 +47,5 @@ TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with pag * Kernel for slinding window attention (Mistral) TGI is also supported on the following AI hardware accelerators: -- *Habana first-gen Gaudi and Gaudi2:* check out this [example](https://github.com/huggingface/optimum-habana/tree/main/text-generation-inference) how to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index) +- *Habana first-gen Gaudi and Gaudi2:* check out this [repository](https://github.com/huggingface/tgi-gaudi) to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index) * *AWS Inferentia2:* check out this [guide](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference) on how to serve models with TGI on Inferentia2. From 7eeabb9cdac0f63345fee71b22cd6a7b212ff95b Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 21 Dec 2023 17:25:22 +0100 Subject: [PATCH 025/153] feat: update exllamav2 kernels (#1370) Co-authored-by: Nicolas Patry --- .../exllamav2_kernels/config.h | 2 + .../exllamav2_kernels/cuda/q_gemm.cu | 98 +++--- .../exllamav2_kernels/cuda/q_gemm.cuh | 5 +- .../exllamav2_kernels/cuda/q_gemm_kernel.cuh | 317 +++++++++++------- .../cuda/q_gemm_kernel_gptq.cuh | 142 +++++--- .../exllamav2_kernels/cuda/q_matrix.cu | 70 ++-- .../exllamav2_kernels/cuda/q_matrix.cuh | 4 +- .../exllamav2_kernels/cuda/quant/qdq_util.cuh | 2 + .../exllamav2_kernels/cuda/util.cuh | 12 + .../exllamav2_kernels/ext.cpp | 5 + server/tests/utils/test_hub.py | 8 +- .../flash_santacoder_modeling.py | 2 +- .../utils/gptq/exllamav2.py | 33 ++ server/text_generation_server/utils/hub.py | 32 +- server/text_generation_server/utils/layers.py | 6 +- server/text_generation_server/utils/log.py | 6 + .../text_generation_server/utils/weights.py | 36 +- 17 files changed, 525 insertions(+), 255 deletions(-) create mode 100644 server/text_generation_server/utils/log.py diff --git a/server/exllamav2_kernels/exllamav2_kernels/config.h b/server/exllamav2_kernels/exllamav2_kernels/config.h index 86baaf41..32a1a37d 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/config.h +++ b/server/exllamav2_kernels/exllamav2_kernels/config.h @@ -2,6 +2,7 @@ #define _config_h #define MAX_Q_GEMM_ROWS 50 +#define MAX_Q_GEMM_WEIGHTS 4 // must be <= MAX_Q_GEMM_ROWS #define QMODE_2BIT 1 #define QMODE_3BIT 1 @@ -10,4 +11,5 @@ #define QMODE_6BIT 0 #define QMODE_8BIT 0 + #endif diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu index 351b9cd5..b4e4cf22 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu @@ -10,16 +10,19 @@ #include "quant/qdq_6.cuh" #include "quant/qdq_8.cuh" -#define BLOCK_KN_SIZE 128 -#define BLOCK_M_SIZE_MAX 8 -#define MAX_GROUPS_IN_BLOCK (BLOCK_KN_SIZE / 32) +#define GPTQ_BLOCK_KN_SIZE 128 +#define GPTQ_BLOCK_M_SIZE_MAX 8 +#define GPTQ_MAX_GROUPS_IN_BLOCK (GPTQ_BLOCK_KN_SIZE / 32) + +#define EXL2_BLOCK_KN_SIZE 64 +#define EXL2_BLOCK_M_SIZE_MAX 8 +#define EXL2_MAX_GROUPS_IN_BLOCK (EXL2_BLOCK_KN_SIZE / 32) + #define CLEAR_N_SIZE 256 #include "q_gemm_kernel.cuh" #include "q_gemm_kernel_gptq.cuh" -#include "compat_gemm.cuh" - void gemm_half_q_half_cuda_part ( const half* a, @@ -29,20 +32,23 @@ void gemm_half_q_half_cuda_part int size_n, int size_k, int m_count, - bool clear + bool clear, + const half* r_weights, + int r_weights_stride, + bool mul_r_weights ) { if (!b->is_gptq) { dim3 blockDim, gridDim; - blockDim.x = BLOCK_KN_SIZE; + blockDim.x = EXL2_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; - gridDim.x = DIVIDE(size_n, BLOCK_KN_SIZE * 4); + gridDim.x = DIVIDE(size_n, EXL2_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); - gridDim.z = DIVIDE(size_k, BLOCK_KN_SIZE); + gridDim.z = DIVIDE(size_k, EXL2_BLOCK_KN_SIZE); - fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(true, m_count); + fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights); kernel<<>> ( @@ -55,7 +61,7 @@ void gemm_half_q_half_cuda_part size_n, size_k, b->groups, - b->groupsize, + b->cuda_q_group_map, b->cuda_q_perm, b->rows_8, b->rows_6, @@ -63,24 +69,27 @@ void gemm_half_q_half_cuda_part b->rows_4, b->rows_3, b->rows_2, - clear + clear, + r_weights, + r_weights_stride ); } else { dim3 blockDim, gridDim; - blockDim.x = BLOCK_KN_SIZE; + blockDim.x = GPTQ_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; - gridDim.x = DIVIDE(size_n, BLOCK_KN_SIZE * 4); + gridDim.x = DIVIDE(size_n, GPTQ_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); - gridDim.z = DIVIDE(size_k, BLOCK_KN_SIZE); + gridDim.z = DIVIDE(size_k, GPTQ_BLOCK_KN_SIZE); - fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(true, m_count); + fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(m_count, r_weights != NULL, mul_r_weights); -// DBGX((uint64_t) b->cuda_q_perm); -// DBGI(b->rows_4); -// DBGI(b->height); +// DBGX((uint64_t) r_weights); +// if (r_weights) +// print_global_mem(r_weights, 1, 1, 1); +// DBGI(r_weights_stride); kernel<<>> ( @@ -93,10 +102,12 @@ void gemm_half_q_half_cuda_part size_n, size_k, b->groups, - b->groupsize, + b->gptq_groupsize, b->cuda_q_perm, b->rows_4, - clear + clear, + r_weights, + r_weights_stride ); } } @@ -112,13 +123,14 @@ void gemm_half_q_half_cuda int size_k, bool clear, half* temp_dq, - bool force_cuda + bool force_cuda, + const half* r_weights, + const int r_weights_stride, + bool mul_r_weights ) { if (size_m > MAX_Q_GEMM_ROWS && !force_cuda) { - //printf("cublas\n"); - // Reconstruct FP16 matrix, then cuBLAS if (!temp_dq) temp_dq = b->temp_dq; @@ -139,12 +151,12 @@ void gemm_half_q_half_cuda //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; //cublasSgemmEx(cublas_handle, - // CUBLAS_OP_N, - // CUBLAS_OP_N, - // size_n, size_m, size_k, - // &alpha, temp_dq, CUDA_R_16F, size_n, - // a, CUDA_R_16F, size_k, - // &beta, c, CUDA_R_16F, size_n); + // CUBLAS_OP_N, + // CUBLAS_OP_N, + // size_n, size_m, size_k, + // &alpha, temp_dq, CUDA_R_16F, size_n, + // a, CUDA_R_16F, size_k, + // &beta, c, CUDA_R_16F, size_n); //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; @@ -158,24 +170,21 @@ void gemm_half_q_half_cuda } else { - //printf("cuda\n"); - // Quantized matmul - //if (clear) clear_tensor_cuda(c, size_m, size_n); - - int max_chunks = size_m / BLOCK_M_SIZE_MAX; - int last_chunk = max_chunks * BLOCK_M_SIZE_MAX; + int block_m_size_max = b->is_gptq ? GPTQ_BLOCK_M_SIZE_MAX : EXL2_BLOCK_M_SIZE_MAX; + int max_chunks = size_m / block_m_size_max; + int last_chunk = max_chunks * block_m_size_max; int last_chunk_size = size_m - last_chunk; if (max_chunks) { - gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, BLOCK_M_SIZE_MAX, clear); + gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, block_m_size_max, clear, r_weights, r_weights_stride, mul_r_weights); } if (last_chunk_size) { - gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear); + gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear, r_weights, r_weights_stride, mul_r_weights); } } } @@ -201,11 +210,10 @@ void clear_tensor_cuda int size_n ) { - return; - dim3 blockDim, gridDim; - blockDim.x = CLEAR_N_SIZE; - blockDim.y = 1; - gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE); - gridDim.y = size_m; - clear_kernel<<>>(c, size_m, size_n); +// dim3 blockDim, gridDim; +// blockDim.x = CLEAR_N_SIZE; +// blockDim.y = 1; +// gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE); +// gridDim.y = size_m; +// clear_kernel<<>>(c, size_m, size_n); } diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh index c69f1a70..b643f915 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh @@ -20,7 +20,10 @@ void gemm_half_q_half_cuda int size_k, bool clear = false, half* reconstruct = NULL, - bool force_cuda = false + bool force_cuda = false, + const half* r_weights = NULL, + const int r_weights_stride = 0, + bool mul_r_weights = false ); void clear_tensor_cuda diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh index 0b899a84..9cd2ba01 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh @@ -1,8 +1,5 @@ #include "compat.cuh" -#include -#include - __forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result, const half qs_h) { half2 result = {}; @@ -60,6 +57,47 @@ __forceinline__ __device__ float dot22_32_f(half2(&dq)[16], const half* a_ptr, c return fma(result_f, qs_f, g_result); } +__forceinline__ __device__ half dot22_8_h(half2(&dq)[4], const half* a_ptr, const half g_result, const half qs_h) +{ + // Use FP32 accumulator to avoid potential overflow since unscaled weights are in the range -128..127 + + float result = {}; + #pragma unroll + for (int i = 0; i < 4; i++) + { + half2 w01 = dq[i]; + float w0 = __low2float(w01); + float w1 = __high2float(w01); + float x0 = __half2float(*a_ptr++); + float x1 = __half2float(*a_ptr++); + result = fma(w0, x0, result); + result = fma(w1, x1, result); + } + float qs = __half2float(qs_h); + result *= qs; + half result_h = __float2half_rn(result); + return __hadd(result_h, g_result); +} + +__forceinline__ __device__ half dot22_16_h(half2(&dq)[8], const half* a_ptr, const half g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); + half result_h = __hadd(__low2half(result), __high2half(result)); + return __hfma(result_h, qs_h, g_result); +} + +__forceinline__ __device__ half dot22_32_h(half2(&dq)[16], const half* a_ptr, const half g_result, const half qs_h) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); + half result_h = __hadd(__low2half(result), __high2half(result)); + return __hfma(result_h, qs_h, g_result); +} typedef void (*fp_gemm_half_q_half_kernel) @@ -73,7 +111,7 @@ typedef void (*fp_gemm_half_q_half_kernel) const int, const int, const int, - const int, + const uint16_t*, const uint16_t*, const int, const int, @@ -81,10 +119,12 @@ typedef void (*fp_gemm_half_q_half_kernel) const int, const int, const int, - const bool + const bool, + const half*, + const int ); -template +template __global__ void gemm_half_q_half_kernel ( const half* __restrict__ a, @@ -96,7 +136,7 @@ __global__ void gemm_half_q_half_kernel const int size_n, const int size_k, const int groups, - const int groupsize, + const uint16_t* __restrict__ b_q_group_map, const uint16_t* __restrict__ b_q_perm, const int rows_8, const int rows_6, @@ -104,7 +144,9 @@ __global__ void gemm_half_q_half_kernel const int rows_4, const int rows_3, const int rows_2, - const bool clear + const bool clear, + const half* r_weights, + const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); @@ -115,18 +157,34 @@ __global__ void gemm_half_q_half_kernel // Block - int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_n = blockIdx.x * EXL2_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; - int offset_k = blockIdx.z * BLOCK_KN_SIZE; + int offset_k = blockIdx.z * EXL2_BLOCK_KN_SIZE; - int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_n = min(offset_n + EXL2_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); - int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + int end_k = min(offset_k + EXL2_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; + // Read weights + + half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; + if constexpr (use_r_weights) + { + uint16_t any_w = 0; + const half* w_ptr = r_weights; + for (int m = 0; m < m_count; ++m) + { + weights[m].as_half = *w_ptr; + w_ptr += r_weights_stride; + any_w |= weights[m].as_uint16; + } + if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) + } + // Preload block_a - __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + __shared__ half block_a[m_count][EXL2_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { @@ -135,6 +193,7 @@ __global__ void gemm_half_q_half_kernel const half* a_ptr = a_.item_ptr(offset_m + m, 0); half* block_a_ptr = block_a[m]; half a0 = a_ptr[b_q_perm[offset_k + t]]; +// half a0 = a_ptr[offset_k + t]; block_a_ptr[t] = a0; } } @@ -153,14 +212,19 @@ __global__ void gemm_half_q_half_kernel // Find initial group - int group = offset_k / groupsize; + //int group = offset_k / groupsize; + int group = b_q_group_map[offset_k * 2]; + +// if (offset_m == 0 && t == 0) +// DBGI2(offset_k, group); // Preload scales - float scales[MAX_GROUPS_IN_BLOCK][4]; + half scales[EXL2_MAX_GROUPS_IN_BLOCK][4]; - int groups_in_block = DIVIDE((end_k - offset_k), groupsize); - for (int g = 0; g < groups_in_block; g++) + //int groups_in_block = DIVIDE((end_k - offset_k), groupsize); + int temp_k = offset_k; + for (int g = 0; temp_k < end_k; g++) { int qscales[4]; b_q_scale_.item4(qscales, group + g, n); @@ -168,11 +232,12 @@ __global__ void gemm_half_q_half_kernel qscales[1]++; qscales[2]++; qscales[3]++; - float maxscale = __half2float(b_q_scale_max[group + g]); - scales[g][0] = __int2float_rn(qscales[0] * qscales[0]) * maxscale; - scales[g][1] = __int2float_rn(qscales[1] * qscales[1]) * maxscale; - scales[g][2] = __int2float_rn(qscales[2] * qscales[2]) * maxscale; - scales[g][3] = __int2float_rn(qscales[3] * qscales[3]) * maxscale; + half maxscale = b_q_scale_max[group + g]; + scales[g][0] = __hmul(__int2half_rn(qscales[0] * qscales[0]), maxscale); + scales[g][1] = __hmul(__int2half_rn(qscales[1] * qscales[1]), maxscale); + scales[g][2] = __hmul(__int2half_rn(qscales[2] * qscales[2]), maxscale); + scales[g][3] = __hmul(__int2half_rn(qscales[3] * qscales[3]), maxscale); + temp_k += b_q_group_map[temp_k * 2 + 1]; } // a, b offset @@ -193,20 +258,20 @@ __global__ void gemm_half_q_half_kernel const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; - int a_stride = BLOCK_KN_SIZE; + int a_stride = EXL2_BLOCK_KN_SIZE; // Initial group int scales_idx = 0; - float qs_f0 = scales[scales_idx][0]; - float qs_f1 = scales[scales_idx][1]; - float qs_f2 = scales[scales_idx][2]; - float qs_f3 = scales[scales_idx][3]; - int nextgroup = offset_k + groupsize; + half qs_h0 = scales[scales_idx][0]; + half qs_h1 = scales[scales_idx][1]; + half qs_h2 = scales[scales_idx][2]; + half qs_h3 = scales[scales_idx][3]; + int nextgroup = offset_k + b_q_group_map[offset_k * 2 + 1]; // Column result - float block_c[m_count][4] = {}; + half block_c[m_count][4] = {}; // Dequantize groups @@ -218,11 +283,11 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll @@ -240,10 +305,11 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_8_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_8_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_8_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_8_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_8_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_8_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_8_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_8_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 8; } @@ -256,11 +322,11 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll @@ -279,10 +345,11 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_16_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_16_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_16_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_16_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_16_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_16_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_16_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_16_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 16; } @@ -295,11 +362,11 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll @@ -320,10 +387,11 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_32_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_32_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_32_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_32_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_32_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_32_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_32_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_32_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 32; } @@ -337,11 +405,11 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll @@ -358,10 +426,11 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_8_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_8_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_8_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_8_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_8_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_8_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_8_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_8_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 8; } @@ -374,11 +443,11 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll @@ -397,10 +466,11 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_32_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_32_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_32_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_32_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_32_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_32_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_32_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_32_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 32; } @@ -413,15 +483,15 @@ __global__ void gemm_half_q_half_kernel { group++; scales_idx++; - qs_f0 = scales[scales_idx][0]; - qs_f1 = scales[scales_idx][1]; - qs_f2 = scales[scales_idx][2]; - qs_f3 = scales[scales_idx][3]; - nextgroup += groupsize; + qs_h0 = scales[scales_idx][0]; + qs_h1 = scales[scales_idx][1]; + qs_h2 = scales[scales_idx][2]; + qs_h3 = scales[scales_idx][3]; + nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll - for (int j = 0; j < 2; j++) + for (int j = 0; j < 1; j++) { int4 load_int4[1]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; @@ -434,15 +504,16 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { - block_c[m][0] = dot22_16_f(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_f0); - block_c[m][1] = dot22_16_f(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_f1); - block_c[m][2] = dot22_16_f(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_f2); - block_c[m][3] = dot22_16_f(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_f3); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = dot22_16_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); + block_c[m][1] = dot22_16_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); + block_c[m][2] = dot22_16_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); + block_c[m][3] = dot22_16_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 16; } - k += 32; + k += 16; } // Accumulate column sums in c @@ -450,38 +521,60 @@ __global__ void gemm_half_q_half_kernel for (int m = 0; m < m_count; m++) { half2* out = (half2*)c_.item_ptr(offset_m + m, n); - half2 result01 = __halves2half2(__float2half_rn(block_c[m][0]), __float2half_rn(block_c[m][1])); - half2 result23 = __halves2half2(__float2half_rn(block_c[m][2]), __float2half_rn(block_c[m][3])); + half2 result01 = __halves2half2(block_c[m][0], block_c[m][1]); + half2 result23 = __halves2half2(block_c[m][2], block_c[m][3]); + + if constexpr (mul_r_weights) + { + half2 w_mul2 = __half2half2(weights[m].as_half); + result01 = __hmul2(result01, w_mul2); + result23 = __hmul2(result23, w_mul2); + } + atomicAdd(out , result01); atomicAdd(out + 1, result23); +// *out = result01; +// *(out + 1) = result23; } } -fp_gemm_half_q_half_kernel pick_gemm_half_q_half_kernel(bool first_block, const int m_count) +template +struct map_m_count_exl2 { + static constexpr fp_gemm_half_q_half_kernel pick_gemm_half_q_half_kernel(const int m_count) + { + #if EXL2_BLOCK_M_SIZE_MAX >= 1 + if (m_count == 1) return gemm_half_q_half_kernel<1, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 2 + if (m_count == 2) return gemm_half_q_half_kernel<2, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 3 + if (m_count == 3) return gemm_half_q_half_kernel<3, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 4 + if (m_count == 4) return gemm_half_q_half_kernel<4, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 5 + if (m_count == 5) return gemm_half_q_half_kernel<5, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 6 + if (m_count == 6) return gemm_half_q_half_kernel<6, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 7 + if (m_count == 7) return gemm_half_q_half_kernel<7, use_r_weights, mul_r_weights>; + #endif + #if EXL2_BLOCK_M_SIZE_MAX >= 8 + if (m_count == 8) return gemm_half_q_half_kernel<8, use_r_weights, mul_r_weights>; + #endif + return NULL; + } +}; + +fp_gemm_half_q_half_kernel pick_gemm_half_q_half_kernel(const int m_count, bool r_weights, bool mul_r_weights) { - #if BLOCK_M_SIZE_MAX >= 1 - if (m_count == 1) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 2 - if (m_count == 2) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 3 - if (m_count == 3) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 4 - if (m_count == 4) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 5 - if (m_count == 5) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 6 - if (m_count == 6) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 7 - if (m_count == 7) return gemm_half_q_half_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 8 - if (m_count == 8) return gemm_half_q_half_kernel; - #endif + if (!r_weights && !mul_r_weights) return map_m_count_exl2::pick_gemm_half_q_half_kernel(m_count); + if (!r_weights && mul_r_weights) return map_m_count_exl2::pick_gemm_half_q_half_kernel(m_count); + if ( r_weights && !mul_r_weights) return map_m_count_exl2< true, false>::pick_gemm_half_q_half_kernel(m_count); + if ( r_weights && mul_r_weights) return map_m_count_exl2< true, true>::pick_gemm_half_q_half_kernel(m_count); return NULL; } diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh index ebaa42d0..74b0db2b 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh @@ -18,6 +18,15 @@ __forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr) return __half2float(__low2half(result)) + __half2float(__high2half(result)); } +__forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr) +{ + half2 result = {}; + const half2* a2_ptr = (const half2*)a_ptr; + #pragma unroll + for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); + return result; +} + typedef void (*fp_gemm_half_q_half_gptq_kernel) ( const half*, @@ -32,10 +41,12 @@ typedef void (*fp_gemm_half_q_half_gptq_kernel) const int, const uint16_t*, const int, - const bool + const bool, + const half*, + const int ); -template +template __global__ void gemm_half_q_half_gptq_kernel ( const half* __restrict__ a, @@ -50,7 +61,9 @@ __global__ void gemm_half_q_half_gptq_kernel const int groupsize, const uint16_t* __restrict__ b_q_perm, const int rows_4, - const bool clear + const bool clear, + const half* r_weights, + const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); @@ -62,19 +75,35 @@ __global__ void gemm_half_q_half_gptq_kernel // Block - int offset_n = blockIdx.x * BLOCK_KN_SIZE * 4; + int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; - int offset_k = blockIdx.z * BLOCK_KN_SIZE; + int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE; - int end_n = min(offset_n + BLOCK_KN_SIZE * 4, size_n); + int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); - int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); + int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; + // Read weights + + half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; + if constexpr (use_r_weights) + { + uint16_t any_w = 0; + const half* w_ptr = r_weights; + for (int m = 0; m < m_count; ++m) + { + weights[m].as_half = *w_ptr; + w_ptr += r_weights_stride; + any_w |= weights[m].as_uint16; + } + if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) + } + // Preload block_a - __shared__ half block_a[m_count][BLOCK_KN_SIZE]; + __shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { @@ -113,16 +142,16 @@ __global__ void gemm_half_q_half_gptq_kernel const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; - int a_stride = BLOCK_KN_SIZE; + int a_stride = GPTQ_BLOCK_KN_SIZE; // Initial group int zeros[4]; - float scales[4]; + half2 scales[4]; half2 z1z16[4][2]; half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); - b_gptq_scales_.item4_f(scales, group, n); + b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); @@ -132,7 +161,7 @@ __global__ void gemm_half_q_half_gptq_kernel // Column result - float block_c[m_count][4] = {}; + half2 block_c[m_count][4] = {}; // Dequantize and multiply @@ -144,7 +173,7 @@ __global__ void gemm_half_q_half_gptq_kernel group++; nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); - b_gptq_scales_.item4_f(scales, group, n); + b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); @@ -166,10 +195,11 @@ __global__ void gemm_half_q_half_gptq_kernel #pragma unroll for (int m = 0; m < m_count; m++) { - block_c[m][0] = fma(dot22_8_f(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); - block_c[m][1] = fma(dot22_8_f(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); - block_c[m][2] = fma(dot22_8_f(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); - block_c[m][3] = fma(dot22_8_f(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); + if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } + block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); + block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); + block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); + block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); } b_ptr += size_n; @@ -182,38 +212,62 @@ __global__ void gemm_half_q_half_gptq_kernel for (int m = 0; m < m_count; m++) { half2 *out = (half2*) c_.item_ptr(offset_m + m, n); - half2 result01 = __halves2half2(__float2half_rn(block_c[m][0]), __float2half_rn(block_c[m][1])); - half2 result23 = __halves2half2(__float2half_rn(block_c[m][2]), __float2half_rn(block_c[m][3])); + half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0])); + half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1])); + half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2])); + half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3])); + half2 result01 = __halves2half2(result0, result1); + half2 result23 = __halves2half2(result2, result3); + + if constexpr (mul_r_weights) + { + half2 w_mul2 = __half2half2(weights[m].as_half); + result01 = __hmul2(result01, w_mul2); + result23 = __hmul2(result23, w_mul2); + } + atomicAdd(out , result01); atomicAdd(out + 1, result23); } } -fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(bool first_block, const int m_count) +template +struct map_m_count_gptq { + static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count) + { + #if GPTQ_BLOCK_M_SIZE_MAX >= 1 + if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 2 + if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 3 + if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 4 + if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 5 + if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 6 + if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 7 + if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>; + #endif + #if GPTQ_BLOCK_M_SIZE_MAX >= 8 + if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>; + #endif + return NULL; + } +}; + +fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights) { - #if BLOCK_M_SIZE_MAX >= 1 - if (m_count == 1) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 2 - if (m_count == 2) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 3 - if (m_count == 3) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 4 - if (m_count == 4) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 5 - if (m_count == 5) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 6 - if (m_count == 6) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 7 - if (m_count == 7) return gemm_half_q_half_gptq_kernel; - #endif - #if BLOCK_M_SIZE_MAX >= 8 - if (m_count == 8) return gemm_half_q_half_gptq_kernel; - #endif + if (!r_weights && !mul_r_weights) return map_m_count_gptq::pick_gemm_half_q_half_gptq_kernel(m_count); + if (!r_weights && mul_r_weights) return map_m_count_gptq::pick_gemm_half_q_half_gptq_kernel(m_count); + if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count); + if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count); return NULL; } diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu index 6aed7470..ae08cc1f 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu @@ -57,6 +57,7 @@ QMatrix::QMatrix uint32_t* _q_scale, half* _q_scale_max, uint16_t* _q_groups, + uint16_t* _q_group_map, uint32_t* _gptq_qzeros, half* _gptq_scales, @@ -80,13 +81,17 @@ QMatrix::QMatrix cuda_q_scale = _q_scale; cuda_q_scale_max = _q_scale_max; cuda_q_groups = _q_groups; + cuda_q_group_map = _q_group_map; cuda_gptq_qzeros = _gptq_qzeros; cuda_gptq_scales = _gptq_scales; is_gptq = (_gptq_qzeros != NULL); - groupsize = 1; - while (groupsize * groups < height) groupsize *= 2; + if (is_gptq) + { + gptq_groupsize = 1; + while (gptq_groupsize * groups < height) gptq_groupsize *= 2; + } // Create group map @@ -102,15 +107,26 @@ QMatrix::QMatrix uint16_t* cpu_q_groups = (uint16_t*)calloc(groups * 2, sizeof(uint16_t)); cudaMemcpy(cpu_q_groups, cuda_q_groups, groups * 2 * sizeof(uint16_t), cudaMemcpyDeviceToHost); + int row = 0; for (int i = 0; i < groups; i++) { int bits = cpu_q_groups[i * 2]; - if (bits == 8) rows_8 += groupsize; - if (bits == 6) rows_6 += groupsize; - if (bits == 5) rows_5 += groupsize; - if (bits == 4) rows_4 += groupsize; - if (bits == 3) rows_3 += groupsize; - if (bits == 2) rows_2 += groupsize; + + int rows; + if (i < groups - 1) + { + int qrows = cpu_q_groups[i * 2 + 3] - cpu_q_groups[i * 2 + 1]; + rows = qrows * 32 / bits; + } + else rows = height - row; + + if (bits == 8) rows_8 += rows; + if (bits == 6) rows_6 += rows; + if (bits == 5) rows_5 += rows; + if (bits == 4) rows_4 += rows; + if (bits == 3) rows_3 += rows; + if (bits == 2) rows_2 += rows; + row += rows; } free(cpu_q_groups); @@ -138,6 +154,13 @@ QMatrix::QMatrix } } +// DBGI(rows_8); +// DBGI(rows_6); +// DBGI(rows_5); +// DBGI(rows_4); +// DBGI(rows_3); +// DBGI(rows_2); + // Shuffle quantized data dim3 blockDim, gridDim; @@ -283,10 +306,10 @@ __global__ void reconstruct_kernel const uint16_t* __restrict__ b_q_perm, const uint32_t* __restrict__ b_q_scale, const half* __restrict__ b_q_scale_max, - //const uint16_t* __restrict__ b_q_groups, + const uint16_t* __restrict__ b_q_group_map, const int size_k, const int size_n, - const int groupsize, + //const int groupsize, const int groups, half* __restrict__ b, const int rows_8, @@ -317,7 +340,8 @@ __global__ void reconstruct_kernel // Find initial group - int group = offset_k / groupsize; + // int group = offset_k / groupsize; + int group = b_q_group_map[offset_k * 2]; int pre_rows_8 = min(rows_8, offset_k); int pre_rows_6 = offset_k > rows_8 ? min(rows_6, offset_k) - rows_8 : 0; @@ -337,7 +361,7 @@ __global__ void reconstruct_kernel half qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); half2 qs_h2 = __halves2half2(qs_h, qs_h); - int nextgroup = offset_k + groupsize; + int nextgroup = offset_k + b_q_group_map[offset_k * 2 + 1]; int end_k = min(offset_k + BLOCK_KN_SIZE, size_k); int k = offset_k; @@ -347,7 +371,7 @@ __global__ void reconstruct_kernel while (k < rows_8 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } for (int p = 0; p < 4; p++) { half2 dq[4]; @@ -363,7 +387,7 @@ __global__ void reconstruct_kernel while (k < rows_6 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } for (int p = 0; p < 2; p++) { half2 dq[8]; @@ -380,7 +404,7 @@ __global__ void reconstruct_kernel while (k < rows_5 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } for (int p = 0; p < 1; p++) { half2 dq[16]; @@ -399,7 +423,7 @@ __global__ void reconstruct_kernel while (k < rows_4 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } for (int p = 0; p < 4; p++) { half2 dq[4]; @@ -414,7 +438,7 @@ __global__ void reconstruct_kernel while (k < rows_3 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } for (int p = 0; p < 1; p++) { half2 dq[16]; @@ -431,8 +455,8 @@ __global__ void reconstruct_kernel while (k < rows_2 && k < end_k) { - if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += groupsize; qs_h2 = __halves2half2(qs_h, qs_h); } - for (int p = 0; p < 2; p++) + if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); } + for (int p = 0; p < 1; p++) { half2 dq[8]; uint32_t q_0 = *b_ptr; b_ptr += size_n; @@ -441,7 +465,7 @@ __global__ void reconstruct_kernel half* dqh = (half*) dq; for (int j = 0; j < 16; j++) b_.set(perm[lk++], n, dqh[j]); } - k += 32; + k += 16; } } @@ -461,10 +485,10 @@ void QMatrix::reconstruct(half* out) cuda_q_perm, cuda_q_scale, cuda_q_scale_max, - //cuda_q_groups, + cuda_q_group_map, height, width, - groupsize, + //groupsize, groups, out, rows_8, @@ -487,7 +511,7 @@ void QMatrix::reconstruct(half* out) //const uint16_t* __restrict__ b_q_groups, height, width, - groupsize, + gptq_groupsize, groups, out, rows_4 diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh index dda83a4f..d36b8d66 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh @@ -18,7 +18,7 @@ public: int height; int width; int groups; - int groupsize; + int gptq_groupsize; int rows_8; int rows_6; @@ -33,6 +33,7 @@ public: uint32_t* cuda_q_scale = NULL; half* cuda_q_scale_max = NULL; uint16_t* cuda_q_groups = NULL; + uint16_t* cuda_q_group_map = NULL; uint32_t* cuda_gptq_qzeros = NULL; half* cuda_gptq_scales = NULL; @@ -53,6 +54,7 @@ public: uint32_t* _q_scale, half* _q_scale_max, uint16_t* _q_groups, + uint16_t* _q_group_map, uint32_t* _gptq_qzeros, half* _gptq_scales, diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh index 71657191..cac9df9c 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh @@ -7,6 +7,7 @@ union half2_uint32 half2 as_half2; __device__ half2_uint32(uint32_t val) : as_uint32(val) {} __device__ half2_uint32(half2 val) : as_half2(val) {} + __device__ half2_uint32() : as_uint32(0) {} }; union half_uint16 @@ -15,6 +16,7 @@ union half_uint16 half as_half; __device__ half_uint16(uint16_t val) : as_uint16(val) {} __device__ half_uint16(half val) : as_half(val) {} + __device__ half_uint16() : as_uint16(0) {} }; // Max_scale premultiplied by 1/256 diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh index 06a58d18..f56eda79 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh @@ -1,3 +1,11 @@ +#ifndef _util_cuh +#define _util_cuh + +#include +#include +#include +#include +#include #define DIVIDE(x, size) (((x) + (size) - 1) / (size)) @@ -40,3 +48,7 @@ inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort= if (abort) exit(code); } } + +void print_global_mem(const half* ptr, int rows, int columns, int stride); + +#endif \ No newline at end of file diff --git a/server/exllamav2_kernels/exllamav2_kernels/ext.cpp b/server/exllamav2_kernels/exllamav2_kernels/ext.cpp index 5e52e6ab..ff4e1851 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/ext.cpp +++ b/server/exllamav2_kernels/exllamav2_kernels/ext.cpp @@ -31,6 +31,7 @@ uintptr_t make_q_matrix torch::Tensor q_scale, torch::Tensor q_scale_max, torch::Tensor q_groups, + torch::Tensor q_group_map, torch::Tensor gptq_qzeros, torch::Tensor gptq_scales, torch::Tensor gptq_g_idx, @@ -43,6 +44,7 @@ uintptr_t make_q_matrix TORCH_CHECK_DTYPE_OPT(q_scale, kInt); TORCH_CHECK_DTYPE_OPT(q_scale_max, kHalf); TORCH_CHECK_DTYPE_OPT(q_groups, kShort); + TORCH_CHECK_DTYPE_OPT(q_group_map, kShort); TORCH_CHECK_DTYPE_OPT(gptq_qzeros, kInt); TORCH_CHECK_DTYPE_OPT(gptq_scales, kHalf); TORCH_CHECK_DTYPE_OPT(gptq_g_idx, kInt); @@ -83,12 +85,15 @@ uintptr_t make_q_matrix q_scale.device().is_meta() ? NULL : (uint32_t*) q_scale.data_ptr(), q_scale_max.device().is_meta() ? NULL : (half*) q_scale_max.data_ptr(), q_groups.device().is_meta() ? NULL : (uint16_t*) q_groups.data_ptr(), + q_group_map.device().is_meta() ? NULL : (uint16_t*) q_group_map.data_ptr(), gptq_qzeros.device().is_meta() ? NULL : (uint32_t*) gptq_qzeros.data_ptr(), gptq_scales.device().is_meta() ? NULL : (half*) gptq_scales.data_ptr(), gptq_g_idx.device().is_meta() ? NULL : (uint32_t*) gptq_g_idx.data_ptr(), (half*) temp_dq.data_ptr() ); + if (m->failed) throw std::runtime_error("CUDA out of memory"); + return reinterpret_cast (m); } diff --git a/server/tests/utils/test_hub.py b/server/tests/utils/test_hub.py index 5438c153..49549893 100644 --- a/server/tests/utils/test_hub.py +++ b/server/tests/utils/test_hub.py @@ -32,10 +32,10 @@ def fresh_cache(): current_value = huggingface_hub.constants.HUGGINGFACE_HUB_CACHE huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = d text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = d - os.environ['HUGGINGFACE_HUB_CACHE'] = d + os.environ["HUGGINGFACE_HUB_CACHE"] = d yield huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = current_value - os.environ['HUGGINGFACE_HUB_CACHE'] = current_value + os.environ["HUGGINGFACE_HUB_CACHE"] = current_value text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = current_value @@ -47,7 +47,7 @@ def prefetched(): revision="main", local_files_only=False, repo_type="model", - allow_patterns=["*.safetensors"] + allow_patterns=["*.safetensors"], ) yield model_id @@ -61,7 +61,7 @@ def test_weight_hub_files_offline_error(offline, fresh_cache): def test_weight_hub_files_offline_ok(prefetched, offline): # If the model is prefetched then we should be able to get the weight files from local cache filenames = weight_hub_files(prefetched) - assert filenames == ['model.safetensors'] + assert filenames == ["model.safetensors"] def test_weight_hub_files(): diff --git a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py index cd93d32a..22d03adf 100644 --- a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py @@ -71,7 +71,7 @@ def _load_multi_mqa_gptq( g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") g_idx = g_idx.to(device=weights.device) - bits, groupsize = weights._get_gptq_params() + bits, groupsize, _ = weights._get_gptq_params() from text_generation_server.utils.layers import HAS_EXLLAMA diff --git a/server/text_generation_server/utils/gptq/exllamav2.py b/server/text_generation_server/utils/gptq/exllamav2.py index dd41b269..a24e834b 100644 --- a/server/text_generation_server/utils/gptq/exllamav2.py +++ b/server/text_generation_server/utils/gptq/exllamav2.py @@ -27,6 +27,32 @@ def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): return output.view(output_shape) +# Group map needed for irregular group sizes + + +def make_group_map(q_groups, num_qrows): + + gr = q_groups.tolist() + group_map = [] + num_groups = len(gr) // 2 + + for i in range(num_groups): + bits = gr[i * 2] + if i < num_groups - 1: + qrows = gr[i * 2 + 3] - gr[i * 2 + 1] + else: + qrows = num_qrows - gr[i * 2 + 1] + rows = qrows * 32 // bits + for j in range(rows): + group_map += [i] + group_map += [rows - j] + + return torch.tensor(group_map, dtype=torch.short, device=q_groups.device) + + +# Create Q matrix + + def ext_make_q_matrix(w: dict, temp_dq, key: str = None): """ Create Q matrix @@ -37,6 +63,10 @@ def ext_make_q_matrix(w: dict, temp_dq, key: str = None): w["q_scale_max"] /= 256 w["q_perm"] = w["q_perm"].short() w["q_invperm"] = w["q_invperm"].short() + + if "q_group_map" not in w: + w["q_group_map"] = make_group_map(w["q_groups"], w["q_weight"].shape[0]) + return make_q_matrix( w["q_weight"], w["q_perm"], @@ -44,6 +74,7 @@ def ext_make_q_matrix(w: dict, temp_dq, key: str = None): w["q_scale"], w["q_scale_max"], w["q_groups"], + w["q_group_map"], none_tensor, none_tensor, none_tensor, @@ -70,6 +101,7 @@ def ext_make_q_matrix(w: dict, temp_dq, key: str = None): none_tensor, none_tensor, none_tensor, + none_tensor, w["qzeros"], w["scales"], w["g_idx"].cpu(), @@ -84,6 +116,7 @@ def ext_make_q_matrix(w: dict, temp_dq, key: str = None): none_tensor, none_tensor, none_tensor, + none_tensor, w["qzeros"], w["scales"], none_tensor, diff --git a/server/text_generation_server/utils/hub.py b/server/text_generation_server/utils/hub.py index 62afff0c..deb1a941 100644 --- a/server/text_generation_server/utils/hub.py +++ b/server/text_generation_server/utils/hub.py @@ -18,7 +18,9 @@ WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) HF_HUB_OFFLINE = os.environ.get("HF_HUB_OFFLINE", "0").lower() in ["true", "1", "yes"] -def _cached_weight_files(model_id: str, revision: Optional[str], extension: str) -> List[str]: +def _cached_weight_files( + model_id: str, revision: Optional[str], extension: str +) -> List[str]: """Guess weight files from the cached revision snapshot directory""" d = _get_cached_revision_directory(model_id, revision) if not d: @@ -27,7 +29,9 @@ def _cached_weight_files(model_id: str, revision: Optional[str], extension: str) return filenames -def _weight_hub_files_from_model_info(info: hf_api.ModelInfo, extension: str) -> List[str]: +def _weight_hub_files_from_model_info( + info: hf_api.ModelInfo, extension: str +) -> List[str]: return [ s.rfilename for s in info.siblings @@ -44,21 +48,27 @@ def _weight_files_from_dir(d: Path, extension: str) -> List[str]: # see _weight_hub_files_from_model_info, that's also what is # done there with the len(s.rfilename.split("/")) == 1 condition root, _, files = next(os.walk(str(d))) - filenames = [f for f in files - if f.endswith(extension) - and "arguments" not in f - and "args" not in f - and "adapter" not in f - and "training" not in f] + filenames = [ + f + for f in files + if f.endswith(extension) + and "arguments" not in f + and "args" not in f + and "adapter" not in f + and "training" not in f + ] return filenames -def _get_cached_revision_directory(model_id: str, revision: Optional[str]) -> Optional[Path]: +def _get_cached_revision_directory( + model_id: str, revision: Optional[str] +) -> Optional[Path]: if revision is None: revision = "main" repo_cache = Path(HUGGINGFACE_HUB_CACHE) / Path( - file_download.repo_folder_name(repo_id=model_id, repo_type="model")) + file_download.repo_folder_name(repo_id=model_id, repo_type="model") + ) if not repo_cache.is_dir(): # No cache for this model @@ -86,7 +96,7 @@ def _get_cached_revision_directory(model_id: str, revision: Optional[str]) -> Op def weight_hub_files( - model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" + model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 011a9382..6648b55a 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -19,6 +19,7 @@ from accelerate import init_empty_weights from text_generation_server.utils.gptq.quant_linear import QuantLinear from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM +from text_generation_server.utils.log import log_once HAS_AWQ = True try: @@ -35,10 +36,11 @@ HAS_EXLLAMA = False CAN_EXLLAMA = major >= 8 V2 = os.getenv("EXLLAMA_VERSION", "2") == "2" if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: - logger.warning( + V2 = False + log_once( + logger.warning, "Disabling exllama v2 and using v1 instead because there are issues when sharding" ) - V2 = False if os.getenv("DISABLE_EXLLAMA") == "True": HAS_EXLLAMA = False diff --git a/server/text_generation_server/utils/log.py b/server/text_generation_server/utils/log.py new file mode 100644 index 00000000..d831fa76 --- /dev/null +++ b/server/text_generation_server/utils/log.py @@ -0,0 +1,6 @@ +from functools import lru_cache + + +@lru_cache(10) +def log_once(log, msg:str): + log(msg) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index a2cca2ea..ee1899ab 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -6,6 +6,7 @@ import torch from loguru import logger from huggingface_hub import hf_hub_download import json +from text_generation_server.utils.log import log_once class Weights: @@ -161,7 +162,7 @@ class Weights: else: g_idx = None - bits, groupsize = self._get_gptq_params() + bits, groupsize, _ = self._get_gptq_params() weight = (qweight, qzeros, scales, g_idx, bits, groupsize, False) else: slice_ = self._get_slice(f"{prefix}.weight") @@ -211,10 +212,10 @@ class Weights: else: g_idx = None - bits, groupsize = self._get_gptq_params() + bits, groupsize, desc_act = self._get_gptq_params() from text_generation_server.utils.layers import HAS_EXLLAMA - use_exllama = bits == 4 and HAS_EXLLAMA and quantize == "gptq" + use_exllama = bits == 4 and HAS_EXLLAMA and quantize == "gptq" and not desc_act weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] @@ -240,11 +241,15 @@ class Weights: def get_multi_weights_row(self, prefix: str, quantize: str): if quantize == "gptq": use_exllama = True - bits, groupsize = self._get_gptq_params() + bits, groupsize, desc_act = self._get_gptq_params() if bits != 4: use_exllama = False + if desc_act: + log_once(logger.warning, "Disabling exllama because desc_act=True") + use_exllama = False + if self.process_group.size() > 1: g_idx = self.get_tensor(f"{prefix}.g_idx") if g_idx is not None: @@ -274,12 +279,18 @@ class Weights: if use_exllama: if not HAS_EXLLAMA: if CAN_EXLLAMA: - logger.warning( + log_once( + logger.warning, "Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True" ) use_exllama = False else: - logger.info(f"Using exllama kernels v{HAS_EXLLAMA}") + log_once( + logger.info, + f"Using exllama kernels v{HAS_EXLLAMA}" + ) + + g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) if use_exllama and groupsize != -1: qzeros = self.get_sharded(f"{prefix}.qzeros", dim=0) @@ -288,14 +299,12 @@ class Weights: qzeros = self.get_tensor(f"{prefix}.qzeros") scales = self.get_tensor(f"{prefix}.scales") - g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) - if use_exllama: g_idx = g_idx - g_idx[0] weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) elif quantize == "awq": - bits, groupsize = self._get_gptq_params() + bits, groupsize, _ = self._get_gptq_params() try: qweight = self.get_sharded(f"{prefix}.qweight", dim=0) @@ -314,18 +323,20 @@ class Weights: weight = self.get_sharded(f"{prefix}.weight", dim=1) return weight - def _get_gptq_params(self) -> Tuple[int, int]: + def _get_gptq_params(self) -> Tuple[int, int, int]: try: bits = self.get_tensor("gptq_bits").item() groupsize = self.get_tensor("gptq_groupsize").item() + desc_act = False except (SafetensorError, RuntimeError) as e: try: bits = self.gptq_bits groupsize = self.gptq_groupsize + desc_act = getattr(self, "gptq_desc_act", False) except Exception: raise e - return bits, groupsize + return bits, groupsize, desc_act def _set_gptq_params(self, model_id, revision): filename = "config.json" @@ -340,6 +351,7 @@ class Weights: data = json.load(f) self.gptq_bits = data["quantization_config"]["bits"] self.gptq_groupsize = data["quantization_config"]["group_size"] + self.gptq_desc_act = data["quantization_config"]["desc_act"] except Exception: filename = "quantize_config.json" try: @@ -353,6 +365,7 @@ class Weights: data = json.load(f) self.gptq_bits = data["bits"] self.gptq_groupsize = data["group_size"] + self.gptq_desc_act = data["desc_act"] except Exception: filename = "quant_config.json" try: @@ -366,5 +379,6 @@ class Weights: data = json.load(f) self.gptq_bits = data["w_bit"] self.gptq_groupsize = data["q_group_size"] + self.gptq_desc_act = data["desc_act"] except Exception: pass From 8cc4306f724a6958a3a6354eb3756e16a28aaf8c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 21 Dec 2023 17:29:23 +0100 Subject: [PATCH 026/153] Fix local load for peft (#1373) local directory overloaded still needs the directory to locate the weights files correctly. # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/tests/utils/test_hub.py | 10 +++++++++- server/text_generation_server/utils/hub.py | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/server/tests/utils/test_hub.py b/server/tests/utils/test_hub.py index 49549893..721820f5 100644 --- a/server/tests/utils/test_hub.py +++ b/server/tests/utils/test_hub.py @@ -61,7 +61,15 @@ def test_weight_hub_files_offline_error(offline, fresh_cache): def test_weight_hub_files_offline_ok(prefetched, offline): # If the model is prefetched then we should be able to get the weight files from local cache filenames = weight_hub_files(prefetched) - assert filenames == ["model.safetensors"] + root = None + assert len(filenames) == 1 + for f in filenames: + curroot, filename = os.path.split(f) + if root is None: + root = curroot + else: + assert root == curroot + assert filename == "model.safetensors" def test_weight_hub_files(): diff --git a/server/text_generation_server/utils/hub.py b/server/text_generation_server/utils/hub.py index deb1a941..b56484f6 100644 --- a/server/text_generation_server/utils/hub.py +++ b/server/text_generation_server/utils/hub.py @@ -49,7 +49,7 @@ def _weight_files_from_dir(d: Path, extension: str) -> List[str]: # done there with the len(s.rfilename.split("/")) == 1 condition root, _, files = next(os.walk(str(d))) filenames = [ - f + os.path.join(root, f) for f in files if f.endswith(extension) and "arguments" not in f From 62646c2a544d85a1feaf3e05b0f926c638579417 Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Fri, 22 Dec 2023 15:46:04 +0100 Subject: [PATCH 027/153] v1.3.4 --- Cargo.lock | 34 +++++++++---------- Cargo.toml | 2 +- docs/openapi.json | 2 +- integration-tests/pyproject.toml | 2 +- server/pyproject.toml | 2 +- server/text_generation_server/utils/layers.py | 2 +- server/text_generation_server/utils/log.py | 2 +- .../text_generation_server/utils/weights.py | 11 +++--- 8 files changed, 28 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7565d7da..39fb381a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -350,9 +350,9 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" [[package]] name = "cfg-if" @@ -2340,9 +2340,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "e3cc72858054fcff6d7dea32df2aeaee6a7c24227366d7ea429aada2f26b16ad" dependencies = [ "bitflags 2.5.0", "errno", @@ -2365,9 +2365,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring 0.17.8", @@ -2586,9 +2586,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -2798,7 +2798,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.3.3" +version = "1.3.4" dependencies = [ "average", "clap", @@ -2819,7 +2819,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.3.3" +version = "1.3.4" dependencies = [ "futures", "grpc-metadata", @@ -2836,7 +2836,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.3.3" +version = "1.3.4" dependencies = [ "clap", "ctrlc", @@ -2852,7 +2852,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.3.3" +version = "1.3.4" dependencies = [ "async-stream", "axum", @@ -2887,18 +2887,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -3409,7 +3409,7 @@ dependencies = [ "log", "native-tls", "once_cell", - "rustls 0.22.3", + "rustls 0.22.4", "rustls-pki-types", "rustls-webpki", "serde", diff --git a/Cargo.toml b/Cargo.toml index a09a8ca7..80e6e145 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.3.3" +version = "1.3.4" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 62751928..df2d427f 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "1.3.3" + "version": "1.3.4" }, "paths": { "/": { diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index 64723ae6..f6929587 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.3.3" +version = "1.3.4" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/pyproject.toml b/server/pyproject.toml index a027ba2b..d6806848 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.3.3" +version = "1.3.4" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 6648b55a..d4fa2559 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -39,7 +39,7 @@ if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: V2 = False log_once( logger.warning, - "Disabling exllama v2 and using v1 instead because there are issues when sharding" + "Disabling exllama v2 and using v1 instead because there are issues when sharding", ) if os.getenv("DISABLE_EXLLAMA") == "True": diff --git a/server/text_generation_server/utils/log.py b/server/text_generation_server/utils/log.py index d831fa76..b1456f1e 100644 --- a/server/text_generation_server/utils/log.py +++ b/server/text_generation_server/utils/log.py @@ -2,5 +2,5 @@ from functools import lru_cache @lru_cache(10) -def log_once(log, msg:str): +def log_once(log, msg: str): log(msg) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index ee1899ab..c4e82a6d 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -215,7 +215,9 @@ class Weights: bits, groupsize, desc_act = self._get_gptq_params() from text_generation_server.utils.layers import HAS_EXLLAMA - use_exllama = bits == 4 and HAS_EXLLAMA and quantize == "gptq" and not desc_act + use_exllama = ( + bits == 4 and HAS_EXLLAMA and quantize == "gptq" and not desc_act + ) weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] @@ -281,14 +283,11 @@ class Weights: if CAN_EXLLAMA: log_once( logger.warning, - "Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True" + "Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True", ) use_exllama = False else: - log_once( - logger.info, - f"Using exllama kernels v{HAS_EXLLAMA}" - ) + log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}") g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) From fc9173aa598a8e45364dfdf33da841854bb9f33a Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Tue, 9 Jan 2024 14:28:55 +0100 Subject: [PATCH 028/153] docs: update required CUDA version to 12.2 --- docs/source/quicktour.md | 2 +- docs/source/supported_models.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index 03ea03bc..e9a33f04 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -13,7 +13,7 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf -To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. +To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index 0708c729..dce4f2f9 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -39,7 +39,7 @@ text-generation-launcher --model-id ## Supported Hardware -TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 11.8+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. +TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 12.2+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: * Quantization (GPTQ, AWQ, etc.) From 118344b99d616fa0a7067b7b5f854ae97cb341c5 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Tue, 9 Jan 2024 15:21:00 +0100 Subject: [PATCH 029/153] fix: fix local loading for .bin models (#1419) --- server/text_generation_server/cli.py | 2 +- server/text_generation_server/utils/peft.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py index 2caf63c0..ec04a240 100644 --- a/server/text_generation_server/cli.py +++ b/server/text_generation_server/cli.py @@ -242,7 +242,7 @@ def download_weights( if not extension == ".safetensors" or not auto_convert: raise e - else: + elif (Path(model_id) / "adapter_config.json").exists(): # Try to load as a local PEFT model try: utils.download_and_unload_peft( diff --git a/server/text_generation_server/utils/peft.py b/server/text_generation_server/utils/peft.py index 45e23320..48ca264b 100644 --- a/server/text_generation_server/utils/peft.py +++ b/server/text_generation_server/utils/peft.py @@ -10,8 +10,7 @@ from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM def download_and_unload_peft(model_id, revision, trust_remote_code): torch_dtype = torch.float16 - logger.info("Peft model detected.") - logger.info("Loading the model it might take a while without feedback") + logger.info("Trying to load a Peft model. It might take a while without feedback") try: model = AutoPeftModelForCausalLM.from_pretrained( model_id, @@ -28,7 +27,7 @@ def download_and_unload_peft(model_id, revision, trust_remote_code): trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) - logger.info(f"Loaded.") + logger.info("Peft model detected.") logger.info(f"Merging the lora weights.") base_model_id = model.peft_config["default"].base_model_name_or_path From 92ddb41d95ecc561067ecb05aaed8823e62dfaba Mon Sep 17 00:00:00 2001 From: "R. P. Ruiz" <42214371+deepily@users.noreply.github.com> Date: Tue, 9 Jan 2024 10:19:31 -0500 Subject: [PATCH 030/153] Fix missing make target platform for local install: 'install-flash-attention-v2' (#1414) --- server/text_generation_server/utils/flash_attn.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index 02f01e65..48f8ef70 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -23,10 +23,15 @@ try: try: import flash_attn_2_cuda except ImportError: + architecture_suffix = "" + if IS_CUDA_SYSTEM: + architecture_suffix = "-cuda" + elif IS_ROCM_SYSTEM: + architecture_suffix = "-rocm" raise ImportError( "Flash Attention V2 is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " - "or install flash attention v2 with `cd server && make install install-flash-attention-v2`" + f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`" ) if not (is_sm8x or is_sm90): raise ImportError( From af63e3273fd1e5b6913c056378b16b364532cc2f Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Wed, 10 Jan 2024 16:35:54 +0100 Subject: [PATCH 031/153] fix: follow base model for tokenizer in router (#1424) Close #1422 --- Cargo.lock | 4 + router/Cargo.toml | 2 +- router/src/main.rs | 346 ++++++++++++++++++++++++--------------------- 3 files changed, 189 insertions(+), 163 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 39fb381a..85ff920b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -983,13 +983,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b780635574b3d92f036890d8373433d6f9fc7abb320ee42a5c25897fc8ed732" dependencies = [ "dirs 5.0.1", + "futures", "indicatif", "log", "native-tls", + "num_cpus", "rand", + "reqwest", "serde", "serde_json", "thiserror", + "tokio", "ureq", ] diff --git a/router/Cargo.toml b/router/Cargo.toml index 55af635a..5ccdb0cd 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -21,6 +21,7 @@ axum-tracing-opentelemetry = "0.14.1" text-generation-client = { path = "client" } clap = { version = "4.4.5", features = ["derive", "env"] } futures = "0.3.28" +hf-hub = { version = "0.3.0", features = ["tokio"] } metrics = "0.21.1" metrics-exporter-prometheus = { version = "0.12.1", features = [] } nohash-hasher = "0.2.0" @@ -41,7 +42,6 @@ tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } utoipa = { version = "3.5.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] } ngrok = { version = "0.13.1", features = ["axum"], optional = true } -hf-hub = "0.3.1" init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] } [build-dependencies] diff --git a/router/src/main.rs b/router/src/main.rs index dd1ef7e2..0c8e747b 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -1,8 +1,9 @@ /// Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. -/// Text Generation Inference webserver entrypoint use axum::http::HeaderValue; use clap::Parser; +use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo}; +use hf_hub::{Repo, RepoType}; use opentelemetry::sdk::propagation::TraceContextPropagator; use opentelemetry::sdk::trace; use opentelemetry::sdk::trace::Sampler; @@ -10,13 +11,15 @@ use opentelemetry::sdk::Resource; use opentelemetry::{global, KeyValue}; use opentelemetry_otlp::WithExportConfig; use std::env; +/// Text Generation Inference webserver entrypoint +use std::fs::File; +use std::io::BufReader; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::Path; -use std::time::Duration; use text_generation_client::{ClientError, ShardedClient}; use text_generation_router::{server, HubModelInfo}; use thiserror::Error; -use tokenizers::{FromPretrainedParameters, Tokenizer}; +use tokenizers::Tokenizer; use tower_http::cors::AllowOrigin; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; @@ -72,7 +75,8 @@ struct Args { ngrok_edge: Option, } -fn main() -> Result<(), RouterError> { +#[tokio::main] +async fn main() -> Result<(), RouterError> { // Get args let args = Args::parse(); // Pattern match configuration @@ -101,6 +105,9 @@ fn main() -> Result<(), RouterError> { ngrok_edge, } = args; + // Launch Tokio runtime + init_logging(otlp_endpoint, json_output); + // Validate args if max_input_length >= max_total_tokens { return Err(RouterError::ArgumentValidation( @@ -147,150 +154,162 @@ fn main() -> Result<(), RouterError> { let skip_tokenizer_in_tgi = env::var("SKIP_TOKENIZER_IN_TGI") .ok() .map_or(false, |value| value.to_lowercase() == "true"); - let tokenizer = if skip_tokenizer_in_tgi { - None - } else if local_model { - // Load local tokenizer - Tokenizer::from_file(local_path.join("tokenizer.json")).ok() - } else { - // Download and instantiate tokenizer - // We need to download it outside of the Tokio runtime - let params = FromPretrainedParameters { - revision: revision.clone().unwrap_or("main".to_string()), - auth_token: authorization_token.clone(), - ..Default::default() + let (tokenizer, model_info) = if local_model { + // Get Model info + let model_info = HubModelInfo { + model_id: tokenizer_name.clone(), + sha: None, + pipeline_tag: None, }; - Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).ok() + + // Load local tokenizer + let tokenizer = if skip_tokenizer_in_tgi { + None + } else { + Tokenizer::from_file(local_path.join("tokenizer.json")).ok() + }; + + (tokenizer, model_info) + } else { + let mut builder = ApiBuilder::new() + .with_progress(false) + .with_token(authorization_token); + + if let Some(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE").ok() { + builder = builder.with_cache_dir(cache_dir.into()); + } + + if revision.is_none() { + tracing::warn!("`--revision` is not set"); + tracing::warn!("We strongly advise to set it to a known supported commit."); + } + + let api = builder.build().unwrap(); + let api_repo = api.repo(Repo::with_revision( + tokenizer_name.clone(), + RepoType::Model, + revision.clone().unwrap_or("main".to_string()), + )); + + // Get Model info + let model_info = get_model_info(&api_repo).await.unwrap_or_else(|| { + tracing::warn!("Could not retrieve model info from the Hugging Face hub."); + HubModelInfo { + model_id: tokenizer_name.to_string(), + sha: None, + pipeline_tag: None, + } + }); + + let tokenizer = match api_repo.get("tokenizer.json").await { + Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(), + Err(_) => get_base_tokenizer(&api, &api_repo).await, + }; + + (tokenizer, model_info) }; - // Launch Tokio runtime - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build()? - .block_on(async { - init_logging(otlp_endpoint, json_output); + if tokenizer.is_none() { + tracing::warn!("Could not find a fast tokenizer implementation for {tokenizer_name}"); + tracing::warn!("Rust input length validation and truncation is disabled"); + } - if skip_tokenizer_in_tgi { - tracing::warn!("Rust input length validation disabled by environment variable"); - } else if tokenizer.is_none() { + // if pipeline-tag == text-generation we default to return_full_text = true + let compat_return_full_text = match &model_info.pipeline_tag { + None => { + tracing::warn!("no pipeline tag found for model {tokenizer_name}"); + false + } + Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation", + }; + + // Instantiate sharded client from the master unix socket + let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) + .await + .map_err(RouterError::Connection)?; + // Clear the cache; useful if the webserver rebooted + sharded_client + .clear_cache(None) + .await + .map_err(RouterError::Cache)?; + // Get info from the shard + let shard_info = sharded_client.info().await.map_err(RouterError::Info)?; + + // Warmup model + tracing::info!("Warming up model"); + let max_supported_batch_total_tokens = match sharded_client + .warmup( + max_input_length as u32, + max_batch_prefill_tokens, + max_total_tokens as u32, + max_batch_total_tokens, + ) + .await + .map_err(RouterError::Warmup)? + { + // Older models do not support automatic max-batch-total-tokens + None => { + let max_batch_total_tokens = max_batch_total_tokens + .unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens))); + tracing::warn!("Model does not support automatic max batch total tokens"); + max_batch_total_tokens + } + // Flash attention models return their max supported total tokens + Some(max_supported_batch_total_tokens) => { + // Warn if user added his own max-batch-total-tokens as we will ignore it + if max_batch_total_tokens.is_some() { tracing::warn!( - "Could not find a fast tokenizer implementation for {tokenizer_name}" + "`--max-batch-total-tokens` is deprecated for Flash \ + Attention models." ); - tracing::warn!("Rust input length validation and truncation is disabled"); + tracing::warn!( + "Inferred max batch total tokens: {max_supported_batch_total_tokens}" + ); + } + if max_total_tokens as u32 > max_supported_batch_total_tokens { + return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}"))); } - // Get Model info - let model_info = match local_model { - true => HubModelInfo { - model_id: tokenizer_name.clone(), - sha: None, - pipeline_tag: None, - }, - false => get_model_info(&tokenizer_name, revision, authorization_token) - .await - .unwrap_or_else(|| { - tracing::warn!("Could not retrieve model info from the Hugging Face hub."); - HubModelInfo { - model_id: tokenizer_name.to_string(), - sha: None, - pipeline_tag: None, - } - }), - }; + max_supported_batch_total_tokens + } + }; + tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}"); + tracing::info!("Connected"); - // if pipeline-tag == text-generation we default to return_full_text = true - let compat_return_full_text = match &model_info.pipeline_tag { - None => { - tracing::warn!("no pipeline tag found for model {tokenizer_name}"); - false - } - Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation", - }; + let addr = match hostname.parse() { + Ok(ip) => SocketAddr::new(ip, port), + Err(_) => { + tracing::warn!("Invalid hostname, defaulting to 0.0.0.0"); + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port) + } + }; - // Instantiate sharded client from the master unix socket - let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) - .await - .map_err(RouterError::Connection)?; - // Clear the cache; useful if the webserver rebooted - sharded_client - .clear_cache(None) - .await - .map_err(RouterError::Cache)?; - // Get info from the shard - let shard_info = sharded_client.info().await.map_err(RouterError::Info)?; - - // Warmup model - tracing::info!("Warming up model"); - let max_supported_batch_total_tokens = match sharded_client - .warmup(max_input_length as u32, max_batch_prefill_tokens, max_total_tokens as u32, max_batch_total_tokens) - .await - .map_err(RouterError::Warmup)? - { - // Older models do not support automatic max-batch-total-tokens - None => { - let max_batch_total_tokens = max_batch_total_tokens.unwrap_or( - 16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)), - ); - tracing::warn!("Model does not support automatic max batch total tokens"); - max_batch_total_tokens - } - // Flash attention models return their max supported total tokens - Some(max_supported_batch_total_tokens) => { - // Warn if user added his own max-batch-total-tokens as we will ignore it - if max_batch_total_tokens.is_some() { - tracing::warn!( - "`--max-batch-total-tokens` is deprecated for Flash \ - Attention models." - ); - tracing::warn!( - "Inferred max batch total tokens: {max_supported_batch_total_tokens}" - ); - } - if max_total_tokens as u32 > max_supported_batch_total_tokens { - return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}"))); - } - - max_supported_batch_total_tokens - } - }; - tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}"); - tracing::info!("Connected"); - - let addr = match hostname.parse() { - Ok(ip) => SocketAddr::new(ip, port), - Err(_) => { - tracing::warn!("Invalid hostname, defaulting to 0.0.0.0"); - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port) - } - }; - - // Run server - server::run( - model_info, - shard_info, - compat_return_full_text, - max_concurrent_requests, - max_best_of, - max_stop_sequences, - max_top_n_tokens, - max_input_length, - max_total_tokens, - waiting_served_ratio, - max_batch_prefill_tokens, - max_supported_batch_total_tokens, - max_waiting_tokens, - sharded_client, - tokenizer, - validation_workers, - addr, - cors_allow_origin, - ngrok, - ngrok_authtoken, - ngrok_edge, - ) - .await?; - Ok(()) - }) + // Run server + server::run( + model_info, + shard_info, + compat_return_full_text, + max_concurrent_requests, + max_best_of, + max_stop_sequences, + max_top_n_tokens, + max_input_length, + max_total_tokens, + waiting_served_ratio, + max_batch_prefill_tokens, + max_supported_batch_total_tokens, + max_waiting_tokens, + sharded_client, + tokenizer, + validation_workers, + addr, + cors_allow_origin, + ngrok, + ngrok_authtoken, + ngrok_edge, + ) + .await?; + Ok(()) } /// Init logging using env variables LOG_LEVEL and LOG_FORMAT: @@ -349,30 +368,8 @@ fn init_logging(otlp_endpoint: Option, json_output: bool) { } /// get model info from the Huggingface Hub -pub async fn get_model_info( - model_id: &str, - revision: Option, - token: Option, -) -> Option { - let revision = match revision { - None => { - tracing::warn!("`--revision` is not set"); - tracing::warn!("We strongly advise to set it to a known supported commit."); - "main".to_string() - } - Some(revision) => revision, - }; - - let client = reqwest::Client::new(); - // Poor man's urlencode - let revision = revision.replace('/', "%2F"); - let url = format!("https://huggingface.co/api/models/{model_id}/revision/{revision}"); - let mut builder = client.get(url).timeout(Duration::from_secs(5)); - if let Some(token) = token { - builder = builder.bearer_auth(token); - } - - let response = builder.send().await.ok()?; +pub async fn get_model_info(api: &ApiRepo) -> Option { + let response = api.info_request().send().await.ok()?; if response.status().is_success() { let hub_model_info: HubModelInfo = @@ -389,6 +386,31 @@ pub async fn get_model_info( } } +/// get base tokenizer +pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option { + let config_filename = api_repo.get("config.json").await.ok()?; + + // Open the file in read-only mode with buffer. + let file = File::open(config_filename).ok()?; + let reader = BufReader::new(file); + + // Read the JSON contents of the file as an instance of `User`. + let config: serde_json::Value = serde_json::from_reader(reader).ok()?; + + if let Some(serde_json::Value::String(base_model_id)) = config.get("base_model_name_or_path") { + let api_base_repo = api.repo(Repo::with_revision( + base_model_id.to_string(), + RepoType::Model, + "main".to_string(), + )); + + let tokenizer_filename = api_base_repo.get("tokenizer.json").await.ok()?; + Tokenizer::from_file(tokenizer_filename).ok() + } else { + None + } +} + #[derive(Debug, Error)] enum RouterError { #[error("Argument validation error: {0}")] From e930ad9cecbc745241511eaa44167379f4d484c8 Mon Sep 17 00:00:00 2001 From: PYNing <540439329@qq.com> Date: Thu, 11 Jan 2024 01:36:20 +0800 Subject: [PATCH 032/153] Fix local load for Medusa (#1420) # What does this PR do? Close #1418 Close #1415 ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/text_generation_server/cli.py | 29 +++++++++++++++++++ .../models/flash_llama.py | 25 +++++++++++----- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py index ec04a240..9ec3ce20 100644 --- a/server/text_generation_server/cli.py +++ b/server/text_generation_server/cli.py @@ -242,6 +242,35 @@ def download_weights( if not extension == ".safetensors" or not auto_convert: raise e + elif (Path(model_id) / "medusa_lm_head.pt").exists(): + # Try to load as a local Medusa model + try: + import json + + medusa_head = Path(model_id) / "medusa_lm_head.pt" + if auto_convert: + medusa_sf = Path(model_id) / "medusa_lm_head.safetensors" + if not medusa_sf.exists(): + utils.convert_files([Path(medusa_head)], [medusa_sf], []) + medusa_config = Path(model_id) / "config.json" + with open(medusa_config, "r") as f: + config = json.load(f) + + model_id = config["base_model_name_or_path"] + revision = "main" + try: + utils.weight_files(model_id, revision, extension) + logger.info( + f"Files for parent {model_id} are already present on the host. " + "Skipping download." + ) + return + # Local files not found + except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): + pass + except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): + pass + elif (Path(model_id) / "adapter_config.json").exists(): # Try to load as a local PEFT model try: diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index 8a3bccdd..7be61906 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -71,15 +71,26 @@ class FlashLlama(FlashCausalLM): from text_generation_server.utils.medusa import MedusaModel from huggingface_hub import hf_hub_download import json - - medusa_config = hf_hub_download( - use_medusa, revision=revision, filename="config.json" - ) + import os + from pathlib import Path + + is_local_model = (Path(use_medusa).exists() and Path(use_medusa).is_dir()) or os.getenv( + "WEIGHTS_CACHE_OVERRIDE", None + ) is not None + + if not is_local_model: + medusa_config = hf_hub_download( + use_medusa, revision=revision, filename="config.json" + ) + medusa_head = hf_hub_download( + use_medusa, revision=revision, filename="medusa_lm_head.pt" + ) + else: + medusa_config = str(Path(use_medusa) / "config.json") + medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") + with open(medusa_config, "r") as f: config = json.load(f) - medusa_head = hf_hub_download( - use_medusa, revision=revision, filename="medusa_lm_head.pt" - ) medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" weights = Weights( [medusa_sf], device, dtype, process_group=self.process_group From 12cfc7930bc7795f99828e9c0f19a31b4fccf2c7 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 11 Jan 2024 19:01:43 +0100 Subject: [PATCH 033/153] Return prompt vs generated tokens. (#1436) # What does this PR do? Fixes #637 Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- router/src/infer.rs | 15 +++++++++++++-- router/src/server.rs | 8 +++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/router/src/infer.rs b/router/src/infer.rs index 8078cee7..fdf0ae77 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -101,6 +101,7 @@ impl Infer { ) -> Result< ( OwnedSemaphorePermit, + u32, UnboundedReceiverStream>, ), InferError, @@ -125,6 +126,7 @@ impl Infer { // MPSC channel to communicate with the background batching task let (response_tx, response_rx) = mpsc::unbounded_channel(); + let input_length = valid_request.input_length; // Append the request to the queue self.queue.append(Entry { @@ -141,7 +143,11 @@ impl Infer { self.shared.batching_task.notify_one(); // Return stream - Ok((permit, UnboundedReceiverStream::new(response_rx))) + Ok(( + permit, + input_length, + UnboundedReceiverStream::new(response_rx), + )) } /// Add a new request to the queue and return a InferResponse @@ -153,7 +159,7 @@ impl Infer { let use_top_tokens = request.parameters.top_n_tokens.is_some_and(|x| x > 0); // Create stream and keep semaphore permit as long as generate lives - let (_permit, mut stream) = self.generate_stream(request).await?; + let (_permit, _input_length, mut stream) = self.generate_stream(request).await?; // Return values let mut result_prefill = Vec::new(); @@ -207,6 +213,7 @@ impl Infer { { Ok(InferResponse { prefill: result_prefill, + _input_length, tokens: result_tokens, generated_text, queued, @@ -647,6 +654,10 @@ pub(crate) enum InferStreamResponse { #[derive(Debug)] pub(crate) struct InferResponse { + /// input_length is the input as perceived by the rust tokenizer in the + /// validation pathway. It is redundant with prefill.len() but prefill + /// has data only if the user asked for it. This will always be filled. + pub(crate) _input_length: u32, pub(crate) prefill: Vec, pub(crate) tokens: Vec, pub(crate) generated_text: GeneratedText, diff --git a/router/src/server.rs b/router/src/server.rs index 78e2af3b..1ec45563 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -172,6 +172,7 @@ async fn generate( }; // Token details + let input_length = response._input_length; let details = match details { true => { // convert best_of_responses @@ -259,6 +260,11 @@ async fn generate( "x-time-per-token", time_per_token.as_millis().to_string().parse().unwrap(), ); + headers.insert("x-prompt-tokens", input_length.into()); + headers.insert( + "x-generated-tokens", + response.generated_text.generated_tokens.into(), + ); // Metrics metrics::increment_counter!("tgi_request_success"); @@ -380,7 +386,7 @@ async fn generate_stream( } else { match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await { // Keep permit as long as generate_stream lives - Ok((_permit, mut response_stream)) => { + Ok((_permit, _input_length, mut response_stream)) => { // Server-Sent Event stream while let Some(response) = response_stream.next().await { match response { From 76b226b00c58e6ad5b39bbd75c21e0e6a751daac Mon Sep 17 00:00:00 2001 From: drbh Date: Tue, 16 Jan 2024 05:07:41 -0500 Subject: [PATCH 034/153] feat: supports openai chat completions API (#1427) This PR adds support to make TGI a drop in replacement for OpenAI clients by exposing the same HTTP interface. Notes - TGI inits a single model at startup so the `model` field is unused in HTTP requests. - `max_tokens` and `stream` should work as expected but other params may be (unimplemented or not supported) General approach - fetch the `tokenizer_config` at startup from the hub - pass `tokenizer_config` into `Infer` so we have it at request time - use the `chat_template` on the config to format chat request - parse jinja template and render chat string - pass inputs into existing generate function - wrap generation output in expected structure before returning ```bash curl localhost:3000/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` It is also possible to use the `openai` python library and change the base url ```python from openai import OpenAI client = OpenAI( base_url="http://localhost:3000/v1", api_key="not needed for a local LLM" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) for message in chat_completion: print(message) ``` ```python from openai import OpenAI client = OpenAI( base_url="http://localhost:3000/v1", api_key="not needed for a local LLM" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=False ) print(chat_completion) ``` ```bash cd text-generation-inference/server MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2 ``` ***note many of the existing `chat_templates` use non standard `jinja` (ie. adding a `raise` to the template) which will throw an error when parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a valid template ```bash cd text-generation-inference/router cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0 ``` trigger ```bash curl localhost:3000/v1/chat/completions \ -X POST \ -d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \ -H 'Content-Type: application/json' ``` ^ supports `stream: true` and `stream: false` requests --- Cargo.lock | 11 ++ router/Cargo.toml | 2 + router/src/infer.rs | 51 ++++++--- router/src/lib.rs | 224 ++++++++++++++++++++++++++++++++++++++- router/src/main.rs | 129 ++++++++++++++++------ router/src/server.rs | 187 ++++++++++++++++++++++++++++++-- router/src/validation.rs | 2 +- 7 files changed, 550 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85ff920b..9048105e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1396,6 +1396,15 @@ dependencies = [ "unicase", ] +[[package]] +name = "minijinja" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "208758577ef2c86cf5dd3e85730d161413ec3284e2d73b2ef65d9a24d9971bcb" +dependencies = [ + "serde", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2863,10 +2872,12 @@ dependencies = [ "axum-tracing-opentelemetry", "clap", "futures", + "futures-util", "hf-hub", "init-tracing-opentelemetry", "metrics", "metrics-exporter-prometheus", + "minijinja", "ngrok", "nohash-hasher", "opentelemetry", diff --git a/router/Cargo.toml b/router/Cargo.toml index 5ccdb0cd..f6f16dae 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -43,6 +43,8 @@ utoipa = { version = "3.5.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] } ngrok = { version = "0.13.1", features = ["axum"], optional = true } init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] } +minijinja = "1.0.10" +futures-util = "0.3.30" [build-dependencies] vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] } diff --git a/router/src/infer.rs b/router/src/infer.rs index fdf0ae77..71d49a44 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -2,9 +2,11 @@ /// Batching and inference logic use crate::validation::{Validation, ValidationError}; +use crate::HubTokenizerConfig; +use crate::{ChatRequest, GenerateRequest, GenerateStreamResponse, PrefillToken}; use crate::{Entry, Queue, Token}; -use crate::{GenerateRequest, PrefillToken}; use futures::future::try_join_all; +use minijinja::{Environment, ErrorKind, Template}; use nohash_hasher::IntMap; use std::sync::{ atomic::{AtomicBool, Ordering}, @@ -15,7 +17,7 @@ use text_generation_client::{ }; use thiserror::Error; use tokio::sync::mpsc::error::SendError; -use tokio::sync::{mpsc, Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError}; +use tokio::sync::{mpsc, Notify, Semaphore, TryAcquireError}; use tokio::time::Instant; use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_stream::StreamExt; @@ -32,6 +34,8 @@ pub struct Infer { shared: Arc, /// Inference limit limit_concurrent_requests: Arc, + /// Chat template + template: Option>, } /// Infer shared state @@ -56,6 +60,7 @@ impl Infer { window_size: Option, speculate: u32, generation_health: Arc, + tokenizer_config: HubTokenizerConfig, ) -> Self { // Infer shared state let queue = Queue::new( @@ -85,11 +90,21 @@ impl Infer { // Inference limit with a semaphore let semaphore = Arc::new(Semaphore::new(max_concurrent_requests)); + let template = tokenizer_config.chat_template.map(|t| { + let env = Box::new(Environment::new()); + let template_str = t.into_boxed_str(); + // leaking env and template_str as read-only, static resources for performance. + Box::leak(env) + .template_from_str(Box::leak(template_str)) + .unwrap() + }); + Self { validation, queue, shared, limit_concurrent_requests: semaphore, + template, } } @@ -98,14 +113,7 @@ impl Infer { pub(crate) async fn generate_stream( &self, request: GenerateRequest, - ) -> Result< - ( - OwnedSemaphorePermit, - u32, - UnboundedReceiverStream>, - ), - InferError, - > { + ) -> Result { // Limit concurrent requests by acquiring a permit from the semaphore let permit = self .clone() @@ -150,6 +158,20 @@ impl Infer { )) } + /// Apply the chat template to the chat request + #[instrument(skip_all)] + pub(crate) fn apply_chat_template(&self, chat: ChatRequest) -> Result { + self.template + .as_ref() + .ok_or_else(|| InferError::TemplateError(ErrorKind::TemplateNotFound.into()))? + .render(chat) + .map_err(|e| { + metrics::increment_counter!("tgi_request_failure", "err" => "template"); + tracing::error!("{e}"); + InferError::TemplateError(e) + }) + } + /// Add a new request to the queue and return a InferResponse #[instrument(skip_all)] pub(crate) async fn generate( @@ -561,9 +583,9 @@ fn send_responses( let mut iterator = tokens_ .ids .into_iter() - .zip(tokens_.logprobs.into_iter()) - .zip(tokens_.texts.into_iter()) - .zip(tokens_.is_special.into_iter()) + .zip(tokens_.logprobs) + .zip(tokens_.texts) + .zip(tokens_.is_special) .enumerate() .peekable(); while let Some((i, (((id, logprob), text), special))) = iterator.next() { @@ -676,6 +698,8 @@ pub enum InferError { ValidationError(#[from] ValidationError), #[error("Incomplete generation")] IncompleteGeneration, + #[error("Template error: {0}")] + TemplateError(#[from] minijinja::Error), } impl InferError { @@ -685,6 +709,7 @@ impl InferError { InferError::Overloaded(_) => "overloaded", InferError::ValidationError(_) => "validation", InferError::IncompleteGeneration => "incomplete_generation", + InferError::TemplateError(_) => "template_error", } } } diff --git a/router/src/lib.rs b/router/src/lib.rs index 898fcd04..f6f8276f 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -5,12 +5,21 @@ mod queue; pub mod server; mod validation; -use infer::Infer; +use infer::{Infer, InferError, InferStreamResponse}; use queue::{Entry, Queue}; use serde::{Deserialize, Serialize}; +use tokio::sync::OwnedSemaphorePermit; +use tokio_stream::wrappers::UnboundedReceiverStream; use utoipa::ToSchema; use validation::Validation; +/// Type alias for generation responses +pub(crate) type GenerateStreamResponse = ( + OwnedSemaphorePermit, + u32, // input_length + UnboundedReceiverStream>, +); + /// Hub type #[derive(Clone, Debug, Deserialize)] pub struct HubModelInfo { @@ -20,6 +29,19 @@ pub struct HubModelInfo { pub pipeline_tag: Option, } +#[derive(Clone, Deserialize, Default)] +pub struct HubTokenizerConfig { + #[serde(default)] + pub chat_template: Option, +} + +impl HubTokenizerConfig { + pub fn from_file(filename: &str) -> Self { + let content = std::fs::read_to_string(filename).unwrap(); + serde_json::from_str(&content).unwrap_or_default() + } +} + #[derive(Clone, Debug, Serialize, ToSchema)] pub struct Info { /// Model info @@ -152,7 +174,7 @@ fn default_parameters() -> GenerateParameters { top_k: None, top_p: None, typical_p: None, - do_sample: false, + do_sample: true, max_new_tokens: default_max_new_tokens(), return_full_text: None, stop: Vec::new(), @@ -165,6 +187,193 @@ fn default_parameters() -> GenerateParameters { } } +#[derive(Clone, Deserialize, Serialize)] +pub(crate) struct ChatCompletion { + pub id: String, + pub object: String, + pub created: u64, + pub model: String, + pub system_fingerprint: String, + pub choices: Vec, + pub usage: Usage, +} + +#[derive(Clone, Deserialize, Serialize)] +pub(crate) struct ChatCompletionComplete { + pub index: u32, + pub message: Message, + pub logprobs: Option>, + pub finish_reason: String, +} + +#[derive(Clone, Deserialize, Serialize)] +pub(crate) struct Usage { + pub prompt_tokens: u32, + pub completion_tokens: u32, + pub total_tokens: u32, +} + +impl ChatCompletion { + pub(crate) fn new( + model: String, + system_fingerprint: String, + output: String, + created: u64, + details: Details, + return_logprobs: bool, + ) -> Self { + Self { + id: String::new(), + object: "text_completion".into(), + created, + model, + system_fingerprint, + choices: vec![ChatCompletionComplete { + index: 0, + message: Message { + role: "assistant".into(), + content: output, + }, + logprobs: return_logprobs + .then(|| details.tokens.iter().map(|t| t.logprob).collect()), + finish_reason: details.finish_reason.to_string(), + }], + usage: Usage { + prompt_tokens: details.prefill.len() as u32, + completion_tokens: details.generated_tokens, + total_tokens: details.prefill.len() as u32 + details.generated_tokens, + }, + } + } +} + +#[derive(Clone, Deserialize, Serialize)] +pub(crate) struct ChatCompletionChunk { + pub id: String, + pub object: String, + pub created: u64, + pub model: String, + pub system_fingerprint: String, + pub choices: Vec, +} + +#[derive(Clone, Deserialize, Serialize)] +pub(crate) struct ChatCompletionChoice { + pub index: u32, + pub delta: ChatCompletionDelta, + pub logprobs: Option, + pub finish_reason: Option, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct ChatCompletionDelta { + pub role: String, + pub content: String, +} + +impl ChatCompletionChunk { + pub(crate) fn new( + model: String, + system_fingerprint: String, + delta: String, + created: u64, + index: u32, + logprobs: Option, + finish_reason: Option, + ) -> Self { + Self { + id: String::new(), + object: "text_completion".to_string(), + created, + model, + system_fingerprint, + choices: vec![ChatCompletionChoice { + index, + delta: ChatCompletionDelta { + role: "assistant".to_string(), + content: delta, + }, + logprobs, + finish_reason, + }], + } + } +} + +fn default_request_messages() -> Vec { + vec![Message { + role: "user".to_string(), + content: "My name is David and I".to_string(), + }] +} + +#[derive(Clone, Deserialize, ToSchema, Serialize)] +pub(crate) struct ChatRequest { + /// UNUSED + #[schema(example = "bigscience/blomm-560m")] + /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. + pub model: String, /* NOTE: UNUSED */ + + /// A list of messages comprising the conversation so far. + #[serde(default = "default_request_messages")] + pub messages: Vec, + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, + /// decreasing the model's likelihood to repeat the same line verbatim. + #[serde(default)] + pub frequency_penalty: Option, + + /// UNUSED + /// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens + /// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, + /// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, + /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should + /// result in a ban or exclusive selection of the relevant token. + #[serde(default)] + pub logit_bias: Option>, + + /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each + /// output token returned in the content of message. + #[serde(default)] + pub logprobs: Option, + + /// UNUSED + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with + /// an associated log probability. logprobs must be set to true if this parameter is used. + #[serde(default)] + pub top_logprobs: Option, + + /// The maximum number of tokens that can be generated in the chat completion. + #[serde(default)] + pub max_tokens: Option, + + /// UNUSED + /// How many chat completion choices to generate for each input message. Note that you will be charged based on the + /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs. + #[serde(default)] + pub n: Option, + + /// UNUSED + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, + /// increasing the model's likelihood to talk about new topics + #[serde(default)] + pub presence_penalty: Option, + + #[serde(default = "bool::default")] + pub stream: bool, + + #[schema(nullable = true, example = 42)] + pub seed: Option, +} + +#[derive(Clone, Deserialize, ToSchema, Serialize)] +pub(crate) struct Message { + #[schema(example = "user")] + pub role: String, + #[schema(example = "My name is David and I")] + pub content: String, +} + #[derive(Clone, Debug, Deserialize, ToSchema)] pub(crate) struct GenerateRequest { #[schema(example = "My name is Olivier and I")] @@ -227,6 +436,16 @@ pub(crate) enum FinishReason { StopSequence, } +impl std::fmt::Display for FinishReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FinishReason::Length => write!(f, "length"), + FinishReason::EndOfSequenceToken => write!(f, "eos_token"), + FinishReason::StopSequence => write!(f, "stop_sequence"), + } + } +} + #[derive(Serialize, ToSchema)] pub(crate) struct BestOfSequence { #[schema(example = "test")] @@ -279,6 +498,7 @@ pub(crate) struct StreamDetails { #[derive(Serialize, ToSchema)] pub(crate) struct StreamResponse { + pub index: u32, pub token: Token, #[serde(skip_serializing_if = "Vec::is_empty")] pub top_tokens: Vec, diff --git a/router/src/main.rs b/router/src/main.rs index 0c8e747b..24c4c14d 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -11,13 +11,12 @@ use opentelemetry::sdk::Resource; use opentelemetry::{global, KeyValue}; use opentelemetry_otlp::WithExportConfig; use std::env; -/// Text Generation Inference webserver entrypoint use std::fs::File; use std::io::BufReader; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::Path; use text_generation_client::{ClientError, ShardedClient}; -use text_generation_router::{server, HubModelInfo}; +use text_generation_router::{server, HubModelInfo, HubTokenizerConfig}; use thiserror::Error; use tokenizers::Tokenizer; use tower_http::cors::AllowOrigin; @@ -58,6 +57,8 @@ struct Args { #[clap(default_value = "bigscience/bloom", long, env)] tokenizer_name: String, #[clap(long, env)] + tokenizer_config_path: Option, + #[clap(long, env)] revision: Option, #[clap(default_value = "2", long, env)] validation_workers: usize, @@ -95,6 +96,7 @@ async fn main() -> Result<(), RouterError> { port, master_shard_uds_path, tokenizer_name, + tokenizer_config_path, revision, validation_workers, json_output, @@ -151,47 +153,72 @@ async fn main() -> Result<(), RouterError> { // This will only be used to validate payloads let local_path = Path::new(&tokenizer_name); let local_model = local_path.exists() && local_path.is_dir(); + + // Load tokenizer config + // This will be used to format the chat template + let local_tokenizer_config_path = + tokenizer_config_path.unwrap_or("tokenizer_config.json".to_string()); + let local_tokenizer_config = Path::new(&local_tokenizer_config_path).exists(); + + // Shared API builder initialization + let api_builder = || { + let mut builder = ApiBuilder::new() + .with_progress(false) + .with_token(authorization_token); + + if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") { + builder = builder.with_cache_dir(cache_dir.into()); + } + + builder + }; + + // Decide if we need to use the API based on the revision and local path + let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir(); + + // Initialize API if needed + let api = if use_api { + tracing::info!("Using the Hugging Face API"); + match api_builder().build() { + Ok(api) => Some(api), + Err(_) => { + tracing::warn!("Unable to build the Hugging Face API"); + None + } + } + } else { + None + }; + + // Load tokenizer and model info let skip_tokenizer_in_tgi = env::var("SKIP_TOKENIZER_IN_TGI") .ok() .map_or(false, |value| value.to_lowercase() == "true"); let (tokenizer, model_info) = if local_model { - // Get Model info - let model_info = HubModelInfo { - model_id: tokenizer_name.clone(), - sha: None, - pipeline_tag: None, - }; - - // Load local tokenizer let tokenizer = if skip_tokenizer_in_tgi { None } else { Tokenizer::from_file(local_path.join("tokenizer.json")).ok() }; + let model_info = HubModelInfo { + model_id: tokenizer_name.to_string(), + sha: None, + pipeline_tag: None, + }; (tokenizer, model_info) - } else { - let mut builder = ApiBuilder::new() - .with_progress(false) - .with_token(authorization_token); - - if let Some(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE").ok() { - builder = builder.with_cache_dir(cache_dir.into()); - } - - if revision.is_none() { - tracing::warn!("`--revision` is not set"); - tracing::warn!("We strongly advise to set it to a known supported commit."); - } - - let api = builder.build().unwrap(); + } else if let Some(api) = api.clone() { let api_repo = api.repo(Repo::with_revision( - tokenizer_name.clone(), + tokenizer_name.to_string(), RepoType::Model, - revision.clone().unwrap_or("main".to_string()), + revision.clone().unwrap_or_else(|| "main".to_string()), )); - // Get Model info + let tokenizer = match api_repo.get("tokenizer.json").await { + Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(), + Err(_) => get_base_tokenizer(&api, &api_repo).await, + }; + let model_info = get_model_info(&api_repo).await.unwrap_or_else(|| { tracing::warn!("Could not retrieve model info from the Hugging Face hub."); HubModelInfo { @@ -201,12 +228,33 @@ async fn main() -> Result<(), RouterError> { } }); - let tokenizer = match api_repo.get("tokenizer.json").await { - Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(), - Err(_) => get_base_tokenizer(&api, &api_repo).await, - }; - (tokenizer, model_info) + } else { + // No API and no local model + return Err(RouterError::ArgumentValidation( + "No local model found and no revision specified".to_string(), + )); + }; + + // Load tokenizer config if found locally, or check if we can get it from the API if needed + let tokenizer_config = if local_tokenizer_config { + tracing::info!("Using local tokenizer config"); + HubTokenizerConfig::from_file(&local_tokenizer_config_path) + } else if let Some(api) = api { + tracing::info!("Using the Hugging Face API to retrieve tokenizer config"); + get_tokenizer_config(&api.repo(Repo::with_revision( + tokenizer_name.to_string(), + RepoType::Model, + revision.unwrap_or_else(|| "main".to_string()), + ))) + .await + .unwrap_or_else(|| { + tracing::warn!("Could not retrieve tokenizer config from the Hugging Face hub."); + HubTokenizerConfig::default() + }) + } else { + tracing::warn!("Could not find tokenizer config locally and no revision specified"); + HubTokenizerConfig::default() }; if tokenizer.is_none() { @@ -307,6 +355,7 @@ async fn main() -> Result<(), RouterError> { ngrok, ngrok_authtoken, ngrok_edge, + tokenizer_config, ) .await?; Ok(()) @@ -411,6 +460,20 @@ pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option Option { + let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?; + + // Open the file in read-only mode with buffer. + let file = File::open(tokenizer_config_filename).ok()?; + let reader = BufReader::new(file); + + // Read the JSON contents of the file as an instance of 'HubTokenizerConfig'. + let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader).ok()?; + + Some(tokenizer_config) +} + #[derive(Debug, Error)] enum RouterError { #[error("Argument validation error: {0}")] diff --git a/router/src/server.rs b/router/src/server.rs index 1ec45563..c78397e5 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -4,10 +4,11 @@ use crate::health::Health; use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; +use crate::HubTokenizerConfig; use crate::{ - BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason, - GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken, - StreamDetails, StreamResponse, Token, Validation, + BestOfSequence, ChatCompletion, ChatCompletionChunk, ChatRequest, CompatGenerateRequest, + Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, + HubModelInfo, Infer, Info, PrefillToken, StreamDetails, StreamResponse, Token, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; @@ -345,6 +346,21 @@ async fn generate_stream( HeaderMap, Sse>>, ) { + let on_message_callback = |stream_token: StreamResponse| { + let event = Event::default(); + event.json_data(stream_token).unwrap() + }; + let (headers, response_stream) = + generate_stream_internal(infer, Json(req), on_message_callback).await; + let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); + (headers, sse) +} + +async fn generate_stream_internal( + infer: Infer, + Json(req): Json, + on_message_callback: impl Fn(StreamResponse) -> Event, +) -> (HeaderMap, impl Stream>) { let span = tracing::Span::current(); let start_time = Instant::now(); metrics::increment_counter!("tgi_request_count"); @@ -387,8 +403,10 @@ async fn generate_stream( match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await { // Keep permit as long as generate_stream lives Ok((_permit, _input_length, mut response_stream)) => { + let mut index = 0; // Server-Sent Event stream while let Some(response) = response_stream.next().await { + index += 1; match response { Ok(response) => { match response { @@ -403,13 +421,14 @@ async fn generate_stream( // StreamResponse let stream_token = StreamResponse { + index, token, top_tokens, generated_text: None, details: None, }; - - yield Ok(Event::default().json_data(stream_token).unwrap()) + let event = on_message_callback(stream_token); + yield Ok(event); } // Yield event for last token and compute timings InferStreamResponse::End { @@ -465,13 +484,16 @@ async fn generate_stream( tracing::info!(parent: &span, "Success"); let stream_token = StreamResponse { + index, token, top_tokens, generated_text: Some(output_text), details }; - yield Ok(Event::default().json_data(stream_token).unwrap()); + + let event = on_message_callback(stream_token); + yield Ok(event); break; } } @@ -502,7 +524,154 @@ async fn generate_stream( } }; - (headers, Sse::new(stream).keep_alive(KeepAlive::default())) + (headers, stream) +} + +/// Generate tokens +#[utoipa::path( + post, + tag = "Text Generation Inference", + path = "/v1/chat/completions", + request_body = ChatRequest, + responses( + (status = 200, description = "Generated Text", body = GenerateResponse), + (status = 424, description = "Generation Error", body = ErrorResponse, + example = json ! ({"error": "Request failed during generation"})), + (status = 429, description = "Model is overloaded", body = ErrorResponse, + example = json ! ({"error": "Model is overloaded"})), + (status = 422, description = "Input validation error", body = ErrorResponse, + example = json ! ({"error": "Input validation error"})), + (status = 500, description = "Incomplete generation", body = ErrorResponse, + example = json ! ({"error": "Incomplete generation"})), + ) + )] +#[instrument( + skip_all, + fields( + // parameters = ? req.parameters, + total_time, + validation_time, + queue_time, + inference_time, + time_per_token, + seed, + ) + )] +async fn chat_completions( + Extension(infer): Extension, + Extension(info): Extension, + Json(req): Json, +) -> Result)> { + metrics::increment_counter!("tgi_request_count"); + + let stream = req.stream; + let max_new_tokens = req.max_tokens.or(Some(100)); + let repetition_penalty = req + .frequency_penalty + // rescale frequency_penalty from (-2.0, 2.0) to (0.0, 4.0) + .map(|x| x + 2.0); + let logprobs = req.logprobs.unwrap_or(false); + let seed = req.seed; + + // apply chat template to flatten the request into a single input + let inputs = match infer.apply_chat_template(req) { + Ok(inputs) => inputs, + Err(err) => { + metrics::increment_counter!("tgi_request_failure", "err" => "validation"); + tracing::error!("{err}"); + return Err(( + StatusCode::UNPROCESSABLE_ENTITY, + Json(ErrorResponse { + error: err.to_string(), + error_type: err.error_type().to_string(), + }), + )); + } + }; + + // build the request passing some parameters + let generate_request = GenerateRequest { + inputs: inputs.to_string(), + parameters: GenerateParameters { + best_of: None, + temperature: None, + repetition_penalty, + top_k: None, + top_p: None, + typical_p: None, + do_sample: true, + max_new_tokens, + return_full_text: None, + stop: Vec::new(), + truncate: None, + watermark: false, + details: true, + decoder_input_details: true, + seed, + top_n_tokens: None, + }, + }; + + // static values that will be returned in all cases + let model_id = info.model_id.clone(); + let system_fingerprint = format!("{}-{}", info.version, info.docker_label.unwrap_or("native")); + + // switch on stream + if stream { + // pass this callback to the stream generation and build the required event structure + let on_message_callback = move |stream_token: StreamResponse| { + let event = Event::default(); + + let current_time = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_else(|_| std::time::Duration::from_secs(0)) + .as_secs(); + + event + .json_data(ChatCompletionChunk::new( + model_id.clone(), + system_fingerprint.clone(), + stream_token.token.text, + current_time, + stream_token.index, + logprobs.then_some(stream_token.token.logprob), + stream_token.details.map(|d| d.finish_reason.to_string()), + )) + .map_or_else( + |e| { + println!("Failed to serialize ChatCompletionChunk: {:?}", e); + Event::default() + }, + |data| data, + ) + }; + + let (headers, response_stream) = + generate_stream_internal(infer, Json(generate_request), on_message_callback).await; + let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); + Ok((headers, sse).into_response()) + } else { + let (headers, Json(generation)) = + generate(Extension(infer), Json(generate_request)).await?; + + let current_time = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_else(|_| std::time::Duration::from_secs(0)) + .as_secs(); + + // build the complete response object with the full text + let response = ChatCompletion::new( + generation.generated_text, + model_id, + system_fingerprint, + current_time, + generation.details.unwrap(), + logprobs, + ); + + // wrap generation inside a Vec to match api-inference + Ok((headers, Json(response)).into_response()) + } } /// Prometheus metrics scrape endpoint @@ -540,6 +709,7 @@ pub async fn run( ngrok: bool, ngrok_authtoken: Option, ngrok_edge: Option, + tokenizer_config: HubTokenizerConfig, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] @@ -608,6 +778,7 @@ pub async fn run( shard_info.window_size, shard_info.speculate, generation_health, + tokenizer_config, ); // Duration buckets @@ -697,6 +868,7 @@ pub async fn run( .route("/info", get(get_model_info)) .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) + .route("/v1/chat/completions", post(chat_completions)) // AWS Sagemaker route .route("/invocations", post(compat_generate)) // Base Health route @@ -826,6 +998,7 @@ impl From for (StatusCode, Json) { InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS, InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY, InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR, + InferError::TemplateError(_) => StatusCode::UNPROCESSABLE_ENTITY, }; ( diff --git a/router/src/validation.rs b/router/src/validation.rs index 486e57b7..bbd94119 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -403,7 +403,7 @@ type TokenizerRequest = ( Span, ); -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct ValidGenerateRequest { pub inputs: String, pub input_length: u32, From 77afb882dc717f52cd6eb7ce033896f5b0746121 Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 18 Jan 2024 06:31:56 -0500 Subject: [PATCH 035/153] feat: support raise_exception, bos and eos tokens (#1450) This PR adds support to handle the custom jinja function `raise_exception` and passes the `bos` and `eos` tokens into the template Additionally this PR adds 3 tests to validate and show examples of what can and cannot be parsed currently. ```bash cargo test --package text-generation-router --lib -- infer::tests --nocapture # Finished test [unoptimized + debuginfo] target(s) in 7.82s # Running unittests src/lib.rs (target/debug/deps/text_generation_router-18a0bbf99c2ca1b4) # running 3 tests # test infer::tests::test_chat_template_valid_with_raise ... ok # test infer::tests::test_chat_template ... ok # test infer::tests::test_chat_template_invalid_with_raise ... ok # test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 15 filtered out; finished in 0.00s ``` --- router/src/infer.rs | 247 +++++++++++++++++++++++++++++++++++++++++-- router/src/lib.rs | 10 +- router/src/server.rs | 8 +- 3 files changed, 249 insertions(+), 16 deletions(-) diff --git a/router/src/infer.rs b/router/src/infer.rs index 71d49a44..675c18b1 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -2,9 +2,10 @@ /// Batching and inference logic use crate::validation::{Validation, ValidationError}; -use crate::HubTokenizerConfig; -use crate::{ChatRequest, GenerateRequest, GenerateStreamResponse, PrefillToken}; -use crate::{Entry, Queue, Token}; +use crate::{ + ChatTemplateInputs, Entry, GenerateRequest, GenerateStreamResponse, HubTokenizerConfig, + Message, PrefillToken, Queue, Token, +}; use futures::future::try_join_all; use minijinja::{Environment, ErrorKind, Template}; use nohash_hasher::IntMap; @@ -34,8 +35,12 @@ pub struct Infer { shared: Arc, /// Inference limit limit_concurrent_requests: Arc, - /// Chat template - template: Option>, + /// Chat template (template, bos_token, eos_token) + template: ( + Option>, + Option, + Option, + ), } /// Infer shared state @@ -44,6 +49,11 @@ struct Shared { batching_task: Notify, } +/// Raise a exception (custom function) used in the chat templates +fn raise_exception(err_text: String) -> Result { + Err(minijinja::Error::new(ErrorKind::SyntaxError, err_text)) +} + impl Infer { #[allow(clippy::too_many_arguments)] pub(crate) fn new( @@ -91,20 +101,28 @@ impl Infer { let semaphore = Arc::new(Semaphore::new(max_concurrent_requests)); let template = tokenizer_config.chat_template.map(|t| { - let env = Box::new(Environment::new()); + let mut env = Box::new(Environment::new()); let template_str = t.into_boxed_str(); + env.add_function("raise_exception", raise_exception); // leaking env and template_str as read-only, static resources for performance. Box::leak(env) .template_from_str(Box::leak(template_str)) .unwrap() }); - + let eos_token = tokenizer_config + .eos_token + .map_or_else(String::new, |t| t) + .into(); + let bos_token = tokenizer_config + .bos_token + .map_or_else(String::new, |t| t) + .into(); Self { validation, queue, shared, limit_concurrent_requests: semaphore, - template, + template: (template, eos_token, bos_token), } } @@ -160,11 +178,16 @@ impl Infer { /// Apply the chat template to the chat request #[instrument(skip_all)] - pub(crate) fn apply_chat_template(&self, chat: ChatRequest) -> Result { - self.template + pub(crate) fn apply_chat_template(&self, messages: Vec) -> Result { + let (template, bos_token, eos_token) = &self.template; + template .as_ref() .ok_or_else(|| InferError::TemplateError(ErrorKind::TemplateNotFound.into()))? - .render(chat) + .render(ChatTemplateInputs { + messages, + eos_token: eos_token.as_deref(), + bos_token: bos_token.as_deref(), + }) .map_err(|e| { metrics::increment_counter!("tgi_request_failure", "err" => "template"); tracing::error!("{e}"); @@ -713,3 +736,205 @@ impl InferError { } } } + +// tests +#[cfg(test)] +mod tests { + use crate::infer::raise_exception; + use crate::ChatTemplateInputs; + use crate::Message; + use minijinja::Environment; + + #[test] + fn test_chat_template() { + let env = Environment::new(); + + let source = r#" + {% for message in messages %} + {% if message['role'] == 'system' %} + {% if message['content']%} + {{'### System:\n' + message['content']+'\n\n'}} + {% endif %} + {% elif message['role'] == 'user' %} + {{'### User:\n' + message['content']+'\n\n'}} + {% elif message['role'] == 'assistant' %} + {{'### Assistant:\n' + message['content']}} + {% endif %} + {% if loop.last and add_generation_prompt %} + {{ '### Assistant:\n' }} + {% endif %} + {% endfor %}"#; + + // trim all the whitespace + let source = source + .lines() + .map(|line| line.trim()) + .collect::>() + .join(""); + + let tmpl = env.template_from_str(&source); + + let chat_template_inputs = ChatTemplateInputs { + messages: vec![ + Message { + role: "user".to_string(), + content: "Hi!".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "Hello how can I help?".to_string(), + }, + Message { + role: "user".to_string(), + content: "What is Deep Learning?".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "magic!".to_string(), + }, + ], + bos_token: Some("[BOS]"), + eos_token: Some("[EOS]"), + }; + + let result = tmpl.unwrap().render(chat_template_inputs).unwrap(); + + assert_eq!( + result, + r#"### User: +Hi! + +### Assistant: +Hello how can I help?### User: +What is Deep Learning? + +### Assistant: +magic!"# + ); + } + + #[test] + fn test_chat_template_invalid_with_raise() { + let mut env = Environment::new(); + env.add_function("raise_exception", raise_exception); + + let source = r#" + {{ bos_token }} + {% for message in messages %} + {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %} + {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }} + {% endif %} + {% if message['role'] == 'user' %} + {{ '[INST] ' + message['content'] + ' [/INST]' }} + {% elif message['role'] == 'assistant' %} + {{ message['content'] + eos_token}} + {% else %} + {{ raise_exception('Only user and assistant roles are supported!') }} + {% endif %} + {% endfor %}"#; + + // trim all the whitespace + let source = source + .lines() + .map(|line| line.trim()) + .collect::>() + .join(""); + + let tmpl = env.template_from_str(&source); + + let chat_template_inputs = ChatTemplateInputs { + messages: vec![ + Message { + role: "user".to_string(), + content: "Hi!".to_string(), + }, + Message { + role: "user".to_string(), + content: "Hi again!".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "Hello how can I help?".to_string(), + }, + Message { + role: "user".to_string(), + content: "What is Deep Learning?".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "magic!".to_string(), + }, + ], + bos_token: Some("[BOS]"), + eos_token: Some("[EOS]"), + }; + + let result = tmpl.unwrap().render(chat_template_inputs); //.err().unwrap(); + + match result { + Ok(_) => panic!("Should have failed"), + Err(e) => { + assert_eq!( + e.detail().unwrap(), + "Conversation roles must alternate user/assistant/user/assistant/..." + ); + } + } + } + + #[test] + fn test_chat_template_valid_with_raise() { + let mut env = Environment::new(); + env.add_function("raise_exception", raise_exception); + + let source = r#" + {{ bos_token }} + {% for message in messages %} + {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %} + {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }} + {% endif %} + {% if message['role'] == 'user' %} + {{ '[INST] ' + message['content'] + ' [/INST]' }} + {% elif message['role'] == 'assistant' %} + {{ message['content'] + eos_token}} + {% else %} + {{ raise_exception('Only user and assistant roles are supported!') }} + {% endif %} + {% endfor %}"#; + + // trim all the whitespace + let source = source + .lines() + .map(|line| line.trim()) + .collect::>() + .join(""); + + let tmpl = env.template_from_str(&source); + + let chat_template_inputs = ChatTemplateInputs { + messages: vec![ + Message { + role: "user".to_string(), + content: "Hi!".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "Hello how can I help?".to_string(), + }, + Message { + role: "user".to_string(), + content: "What is Deep Learning?".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "magic!".to_string(), + }, + ], + bos_token: Some("[BOS]"), + eos_token: Some("[EOS]"), + }; + + let result = tmpl.unwrap().render(chat_template_inputs).unwrap(); + assert_eq!(result, "[BOS][INST] Hi! [/INST]Hello how can I help?[EOS][INST] What is Deep Learning? [/INST]magic![EOS]"); + } +} diff --git a/router/src/lib.rs b/router/src/lib.rs index f6f8276f..983079d6 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -31,8 +31,9 @@ pub struct HubModelInfo { #[derive(Clone, Deserialize, Default)] pub struct HubTokenizerConfig { - #[serde(default)] pub chat_template: Option, + pub bos_token: Option, + pub eos_token: Option, } impl HubTokenizerConfig { @@ -366,6 +367,13 @@ pub(crate) struct ChatRequest { pub seed: Option, } +#[derive(Clone, Serialize, Deserialize)] +pub(crate) struct ChatTemplateInputs<'a> { + messages: Vec, + bos_token: Option<&'a str>, + eos_token: Option<&'a str>, +} + #[derive(Clone, Deserialize, ToSchema, Serialize)] pub(crate) struct Message { #[schema(example = "user")] diff --git a/router/src/server.rs b/router/src/server.rs index c78397e5..abddf81f 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -4,11 +4,11 @@ use crate::health::Health; use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; -use crate::HubTokenizerConfig; use crate::{ BestOfSequence, ChatCompletion, ChatCompletionChunk, ChatRequest, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, - HubModelInfo, Infer, Info, PrefillToken, StreamDetails, StreamResponse, Token, Validation, + HubModelInfo, HubTokenizerConfig, Infer, Info, PrefillToken, StreamDetails, StreamResponse, + Token, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; @@ -574,7 +574,7 @@ async fn chat_completions( let seed = req.seed; // apply chat template to flatten the request into a single input - let inputs = match infer.apply_chat_template(req) { + let inputs = match infer.apply_chat_template(req.messages) { Ok(inputs) => inputs, Err(err) => { metrics::increment_counter!("tgi_request_failure", "err" => "validation"); @@ -661,9 +661,9 @@ async fn chat_completions( // build the complete response object with the full text let response = ChatCompletion::new( - generation.generated_text, model_id, system_fingerprint, + generation.generated_text, current_time, generation.details.unwrap(), logprobs, From 935ee007495f92cab11e3ea1738e0a06f798080b Mon Sep 17 00:00:00 2001 From: drbh Date: Mon, 22 Jan 2024 09:22:54 -0500 Subject: [PATCH 036/153] chore: bump rust version and annotate/fix all clippy warnings (#1455) This PR just bumps the latest rust version and makes clippy happy ```bash cargo clippy --all -- -D warnings # Finished dev [unoptimized + debuginfo] target(s) in 0.10s ``` --- benchmark/src/app.rs | 2 +- launcher/src/main.rs | 2 ++ router/client/src/client.rs | 2 +- router/client/src/sharded_client.rs | 2 ++ 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/benchmark/src/app.rs b/benchmark/src/app.rs index 49654c1b..b27c56b4 100644 --- a/benchmark/src/app.rs +++ b/benchmark/src/app.rs @@ -466,7 +466,7 @@ fn latency_paragraph<'a>(latency: &mut Vec, name: &'static str) -> Paragrap let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]); // Latency p50/p90/p99 texts - let colors = vec![Color::LightGreen, Color::LightYellow, Color::LightRed]; + let colors = [Color::LightGreen, Color::LightYellow, Color::LightRed]; for (i, (name, value)) in latency_percentiles.iter().enumerate() { let span = Line::from(vec![Span::styled( format!("{name}: {value:.2} ms"), diff --git a/launcher/src/main.rs b/launcher/src/main.rs index e814b833..25c780ed 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -55,6 +55,8 @@ impl std::fmt::Display for Quantization { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { + #[allow(deprecated)] + // Use `eetq` instead, which provides better latencies overall and is drop-in in most cases Quantization::Bitsandbytes => { write!(f, "bitsandbytes") } diff --git a/router/client/src/client.rs b/router/client/src/client.rs index ba7b7565..bd592c28 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -365,7 +365,7 @@ pub struct DecodeTimings { impl DecodeTimings { fn new(concat_ns: Option, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self { Self { - concat: concat_ns.map(|v| Duration::from_nanos(v)), + concat: concat_ns.map(Duration::from_nanos), forward: Duration::from_nanos(forward_ns), decode: Duration::from_nanos(decode_ns), total: Duration::from_nanos(total_ns), diff --git a/router/client/src/sharded_client.rs b/router/client/src/sharded_client.rs index b71c0a7d..8d81da6a 100644 --- a/router/client/src/sharded_client.rs +++ b/router/client/src/sharded_client.rs @@ -130,6 +130,7 @@ impl ShardedClient { .iter_mut() .map(|client| Box::pin(client.prefill(batch.clone()))) .collect(); + #[allow(clippy::type_complexity)] let results: Result, Option, PrefillTimings)>> = join_all(futures).await.into_iter().collect(); let mut results = results?; @@ -162,6 +163,7 @@ impl ShardedClient { .iter_mut() .map(|client| Box::pin(client.decode(batches.clone()))) .collect(); + #[allow(clippy::type_complexity)] let results: Result, Option, DecodeTimings)>> = join_all(futures).await.into_iter().collect(); let mut results = results?; From 5836a1cc69e85862688b24924fd5244f36c84ce0 Mon Sep 17 00:00:00 2001 From: drbh Date: Mon, 22 Jan 2024 10:29:01 -0500 Subject: [PATCH 037/153] feat: conditionally toggle chat on invocations route (#1454) This PR adds support for reading the `OAI_ENABLED` env var which will changes the function called when the `/invocations` is called. If `OAI_ENABLED=true` the `chat_completions` method is used otherwise it defaults to `compat_generate`. example running the router ```bash OAI_ENABLED=true \ cargo run -- \ --tokenizer-name mistralai/Mistral-7B-Instruct-v0.2 ``` example request ```bash curl localhost:3000/invocations \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": false, "max_tokens": 20, "logprobs": true, "seed": 0 }' \ -H 'Content-Type: application/json' | jq ``` **please let me know if any naming changes are needed or if any other routes need similar functionality. --- router/src/main.rs | 4 ++++ router/src/server.rs | 32 ++++++++++++++++++++------------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/router/src/main.rs b/router/src/main.rs index 24c4c14d..bebf0e53 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -74,6 +74,8 @@ struct Args { ngrok_authtoken: Option, #[clap(long, env)] ngrok_edge: Option, + #[clap(long, env, default_value_t = false)] + chat_enabled_api: bool, } #[tokio::main] @@ -105,6 +107,7 @@ async fn main() -> Result<(), RouterError> { ngrok, ngrok_authtoken, ngrok_edge, + chat_enabled_api, } = args; // Launch Tokio runtime @@ -356,6 +359,7 @@ async fn main() -> Result<(), RouterError> { ngrok_authtoken, ngrok_edge, tokenizer_config, + chat_enabled_api, ) .await?; Ok(()) diff --git a/router/src/server.rs b/router/src/server.rs index abddf81f..cd8790b0 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -710,6 +710,7 @@ pub async fn run( ngrok_authtoken: Option, ngrok_edge: Option, tokenizer_config: HubTokenizerConfig, + chat_enabled_api: bool, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] @@ -860,25 +861,32 @@ pub async fn run( docker_label: option_env!("DOCKER_LABEL"), }; - // Create router - let app = Router::new() - .merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi())) - // Base routes + // Configure Swagger UI + let swagger_ui = SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()); + + // Define base and health routes + let base_routes = Router::new() .route("/", post(compat_generate)) .route("/info", get(get_model_info)) .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) .route("/v1/chat/completions", post(chat_completions)) - // AWS Sagemaker route - .route("/invocations", post(compat_generate)) - // Base Health route .route("/health", get(health)) - // Inference API health route - .route("/", get(health)) - // AWS Sagemaker health route .route("/ping", get(health)) - // Prometheus metrics route - .route("/metrics", get(metrics)) + .route("/metrics", get(metrics)); + + // Conditional AWS Sagemaker route + let aws_sagemaker_route = if chat_enabled_api { + Router::new().route("/invocations", post(chat_completions)) // Use 'chat_completions' for OAI_ENABLED + } else { + Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise + }; + + // Combine routes and layers + let app = Router::new() + .merge(swagger_ui) + .merge(base_routes) + .merge(aws_sagemaker_route) .layer(Extension(info)) .layer(Extension(health_ext.clone())) .layer(Extension(compat_return_full_text)) From 1b99d4c0b65ab7bb1d8563901b053a8aeca162f2 Mon Sep 17 00:00:00 2001 From: Jacob Keisling Date: Tue, 23 Jan 2024 08:55:05 -0600 Subject: [PATCH 038/153] Disable `decoder_input_details` on OpenAI-compatible chat streaming, pass temp and top-k from API (#1470) This PR makes some minor tweaks to the new OpenAI-compatible chat endpoint #1427 in `GenerateParameters`: - Disables `decoder_input_details` when streaming is enabled. This was causing all streaming chat requests to fail before, since [`decoder_input_details`==true is not enabled when streaming tokens](https://github.com/huggingface/text-generation-inference/blob/98e5faff9daec6170cc2b0f963f2d73cf846b341/router/src/validation.rs#L406). - Passes through `temperature` and `top_p` hyperparameters from the API request to `GenerateParameters` ## Testing ```bash curl localhost:8080/v1/chat/completions \ -X POST \ -d '{ "model": "", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` Should work correctly. Currently, most recent release from `main` returns error: ``` data:{"error":"Input validation error: `decoder_input_details` == true is not supported when streaming tokens","error_type":"validation"} ``` It's my first time contributing to this project, so I could be missing something. Would especially appreciate @drbh's eyes on this one --- router/src/lib.rs | 12 ++++++++++++ router/src/server.rs | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/router/src/lib.rs b/router/src/lib.rs index 983079d6..894ab466 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -365,6 +365,18 @@ pub(crate) struct ChatRequest { #[schema(nullable = true, example = 42)] pub seed: Option, + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while + /// lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + #[serde(default)] + pub temperature: Option, + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the + /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + #[serde(default)] + pub top_p: Option, } #[derive(Clone, Serialize, Deserialize)] diff --git a/router/src/server.rs b/router/src/server.rs index cd8790b0..dd4ddbd2 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -594,10 +594,10 @@ async fn chat_completions( inputs: inputs.to_string(), parameters: GenerateParameters { best_of: None, - temperature: None, + temperature: req.temperature, repetition_penalty, top_k: None, - top_p: None, + top_p: req.top_p, typical_p: None, do_sample: true, max_new_tokens, @@ -606,7 +606,7 @@ async fn chat_completions( truncate: None, watermark: false, details: true, - decoder_input_details: true, + decoder_input_details: !stream, seed, top_n_tokens: None, }, From 2a3a9c526bd19bea7785302a4df7bcd46ba8904c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 24 Jan 2024 13:08:41 +0100 Subject: [PATCH 039/153] Fixing non divisible embeddings. (#1476) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- .../test_idefics/test_idefics.json | 55 +++-- .../test_idefics/test_idefics_load.json | 214 +++++++++--------- server/tests/utils/test_layers.py | 64 ++++++ server/text_generation_server/utils/layers.py | 4 +- .../text_generation_server/utils/weights.py | 2 +- 5 files changed, 199 insertions(+), 140 deletions(-) create mode 100644 server/tests/utils/test_layers.py diff --git a/integration-tests/models/__snapshots__/test_idefics/test_idefics.json b/integration-tests/models/__snapshots__/test_idefics/test_idefics.json index 2c5d05f6..90fb6dcc 100644 --- a/integration-tests/models/__snapshots__/test_idefics/test_idefics.json +++ b/integration-tests/models/__snapshots__/test_idefics/test_idefics.json @@ -11,92 +11,92 @@ }, { "id": 4911, - "logprob": -5.7851562, + "logprob": -6.9765625, "text": "User" }, { "id": 29901, - "logprob": -0.006996155, + "logprob": -0.0059432983, "text": ":" }, { "id": 32000, - "logprob": -0.81347656, + "logprob": -0.8408203, "text": "" }, { "id": 32001, - "logprob": -6.687641e-05, + "logprob": -9.906292e-05, "text": "" }, { "id": 32000, - "logprob": -3.5762787e-07, + "logprob": -2.3841858e-07, "text": "" }, { "id": 1815, - "logprob": -4.2148438, + "logprob": -4.1679688, "text": "Can" }, { "id": 366, - "logprob": -0.014137268, + "logprob": -0.014099121, "text": "you" }, { "id": 2649, - "logprob": -4.4335938, + "logprob": -4.4609375, "text": "tell" }, { "id": 592, - "logprob": -0.2919922, + "logprob": -0.29882812, "text": "me" }, { "id": 263, - "logprob": -4.2070312, + "logprob": -4.1445312, "text": "a" }, { "id": 1407, - "logprob": -9.421875, + "logprob": -9.3828125, "text": "very" }, { "id": 3273, - "logprob": -1.8720703, + "logprob": -1.9736328, "text": "short" }, { "id": 5828, - "logprob": -0.26489258, + "logprob": -0.2800293, "text": "story" }, { "id": 2729, - "logprob": -3.7441406, + "logprob": -3.5625, "text": "based" }, { "id": 373, - "logprob": -0.0005393028, + "logprob": -0.0006427765, "text": "on" }, { "id": 278, - "logprob": -0.140625, + "logprob": -0.13952637, "text": "the" }, { "id": 1967, - "logprob": -0.06756592, + "logprob": -0.068115234, "text": "image" }, { "id": 29973, - "logprob": -0.15454102, + "logprob": -0.16357422, "text": "?" } ], @@ -104,25 +104,25 @@ "tokens": [ { "id": 32002, - "logprob": -0.0019140244, + "logprob": -0.0026474, "special": true, "text": "" }, { "id": 29871, - "logprob": -8.404255e-05, + "logprob": -8.547306e-05, "special": false, "text": " " }, { "id": 13, - "logprob": -1.7642975e-05, + "logprob": -1.7881393e-05, "special": false, "text": "\n" }, { "id": 7900, - "logprob": -2.9802322e-06, + "logprob": -3.0994415e-06, "special": false, "text": "Ass" }, @@ -140,30 +140,29 @@ }, { "id": 319, - "logprob": -0.91064453, + "logprob": -0.92529297, "special": false, "text": " A" }, { "id": 696, - "logprob": -1.2412109, + "logprob": -1.1269531, "special": false, "text": " ro" }, { "id": 15664, - "logprob": -0.0002439022, + "logprob": -0.00029492378, "special": false, "text": "oster" }, { "id": 15028, - "logprob": -1.1630859, + "logprob": -1.1855469, "special": false, "text": " stands" } - ], - "top_tokens": null + ] }, "generated_text": " \nAssistant: A rooster stands" } diff --git a/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json b/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json index f258e38d..21d6161b 100644 --- a/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json +++ b/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json @@ -12,92 +12,92 @@ }, { "id": 4911, - "logprob": -5.7851562, + "logprob": -6.9804688, "text": "User" }, { "id": 29901, - "logprob": -0.006996155, + "logprob": -0.006122589, "text": ":" }, { "id": 32000, - "logprob": -0.81347656, + "logprob": -0.8417969, "text": "" }, { "id": 32001, - "logprob": -6.687641e-05, + "logprob": -9.918213e-05, "text": "" }, { "id": 32000, - "logprob": -3.5762787e-07, + "logprob": -2.3841858e-07, "text": "" }, { "id": 1815, - "logprob": -4.2148438, + "logprob": -4.1679688, "text": "Can" }, { "id": 366, - "logprob": -0.014137268, + "logprob": -0.014091492, "text": "you" }, { "id": 2649, - "logprob": -4.4335938, + "logprob": -4.4726562, "text": "tell" }, { "id": 592, - "logprob": -0.2919922, + "logprob": -0.2998047, "text": "me" }, { "id": 263, - "logprob": -4.2070312, + "logprob": -4.15625, "text": "a" }, { "id": 1407, - "logprob": -9.421875, + "logprob": -9.3828125, "text": "very" }, { "id": 3273, - "logprob": -1.8720703, + "logprob": -1.9716797, "text": "short" }, { "id": 5828, - "logprob": -0.26489258, + "logprob": -0.27734375, "text": "story" }, { "id": 2729, - "logprob": -3.7441406, + "logprob": -3.5605469, "text": "based" }, { "id": 373, - "logprob": -0.0005393028, + "logprob": -0.00064468384, "text": "on" }, { "id": 278, - "logprob": -0.140625, + "logprob": -0.14160156, "text": "the" }, { "id": 1967, - "logprob": -0.06756592, + "logprob": -0.06915283, "text": "image" }, { "id": 29973, - "logprob": -0.15454102, + "logprob": -0.16381836, "text": "?" } ], @@ -105,19 +105,19 @@ "tokens": [ { "id": 32002, - "logprob": -0.0019140244, + "logprob": -0.0026664734, "special": true, "text": "" }, { "id": 29871, - "logprob": -8.392334e-05, + "logprob": -8.583069e-05, "special": false, "text": " " }, { "id": 13, - "logprob": -1.7881393e-05, + "logprob": -1.8119812e-05, "special": false, "text": "\n" }, @@ -135,36 +135,35 @@ }, { "id": 29901, - "logprob": -3.0994415e-06, + "logprob": -3.2186508e-06, "special": false, "text": ":" }, { "id": 319, - "logprob": -0.9057617, + "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, - "logprob": -1.2294922, + "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, - "logprob": -0.00024533272, + "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, - "logprob": -1.1640625, + "logprob": -1.1865234, "special": false, "text": " stands" } - ], - "top_tokens": null + ] }, "generated_text": " \nAssistant: A rooster stands" }, @@ -181,92 +180,92 @@ }, { "id": 4911, - "logprob": -5.7773438, + "logprob": -6.9804688, "text": "User" }, { "id": 29901, - "logprob": -0.0070114136, + "logprob": -0.006122589, "text": ":" }, { "id": 32000, - "logprob": -0.8208008, + "logprob": -0.8417969, "text": "" }, { "id": 32001, - "logprob": -6.699562e-05, + "logprob": -9.942055e-05, "text": "" }, { "id": 32000, - "logprob": -3.5762787e-07, + "logprob": -2.3841858e-07, "text": "" }, { "id": 1815, - "logprob": -4.2265625, + "logprob": -4.1679688, "text": "Can" }, { "id": 366, - "logprob": -0.014175415, + "logprob": -0.014091492, "text": "you" }, { "id": 2649, - "logprob": -4.4296875, + "logprob": -4.4726562, "text": "tell" }, { "id": 592, - "logprob": -0.29516602, + "logprob": -0.2998047, "text": "me" }, { "id": 263, - "logprob": -4.2109375, + "logprob": -4.15625, "text": "a" }, { "id": 1407, - "logprob": -9.4296875, + "logprob": -9.3828125, "text": "very" }, { "id": 3273, - "logprob": -1.8720703, + "logprob": -1.9716797, "text": "short" }, { "id": 5828, - "logprob": -0.26879883, + "logprob": -0.27734375, "text": "story" }, { "id": 2729, - "logprob": -3.7675781, + "logprob": -3.5605469, "text": "based" }, { "id": 373, - "logprob": -0.0005354881, + "logprob": -0.0006451607, "text": "on" }, { "id": 278, - "logprob": -0.13671875, + "logprob": -0.14160156, "text": "the" }, { "id": 1967, - "logprob": -0.06719971, + "logprob": -0.06915283, "text": "image" }, { "id": 29973, - "logprob": -0.15551758, + "logprob": -0.16381836, "text": "?" } ], @@ -274,19 +273,19 @@ "tokens": [ { "id": 32002, - "logprob": -0.0019130707, + "logprob": -0.0026664734, "special": true, "text": "" }, { "id": 29871, - "logprob": -8.392334e-05, + "logprob": -8.571148e-05, "special": false, "text": " " }, { "id": 13, - "logprob": -1.7881393e-05, + "logprob": -1.8119812e-05, "special": false, "text": "\n" }, @@ -310,30 +309,29 @@ }, { "id": 319, - "logprob": -0.9013672, + "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, - "logprob": -1.2324219, + "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, - "logprob": -0.0002477169, + "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, - "logprob": -1.1660156, + "logprob": -1.1865234, "special": false, "text": " stands" } - ], - "top_tokens": null + ] }, "generated_text": " \nAssistant: A rooster stands" }, @@ -350,92 +348,92 @@ }, { "id": 4911, - "logprob": -5.7773438, + "logprob": -6.9804688, "text": "User" }, { "id": 29901, - "logprob": -0.0070114136, + "logprob": -0.006122589, "text": ":" }, { "id": 32000, - "logprob": -0.8208008, + "logprob": -0.8417969, "text": "" }, { "id": 32001, - "logprob": -6.699562e-05, + "logprob": -9.918213e-05, "text": "" }, { "id": 32000, - "logprob": -3.5762787e-07, + "logprob": -2.3841858e-07, "text": "" }, { "id": 1815, - "logprob": -4.2265625, + "logprob": -4.1679688, "text": "Can" }, { "id": 366, - "logprob": -0.014175415, + "logprob": -0.014091492, "text": "you" }, { "id": 2649, - "logprob": -4.4296875, + "logprob": -4.4726562, "text": "tell" }, { "id": 592, - "logprob": -0.29516602, + "logprob": -0.2998047, "text": "me" }, { "id": 263, - "logprob": -4.2109375, + "logprob": -4.15625, "text": "a" }, { "id": 1407, - "logprob": -9.4296875, + "logprob": -9.3828125, "text": "very" }, { "id": 3273, - "logprob": -1.8720703, + "logprob": -1.9716797, "text": "short" }, { "id": 5828, - "logprob": -0.26879883, + "logprob": -0.27734375, "text": "story" }, { "id": 2729, - "logprob": -3.7675781, + "logprob": -3.5605469, "text": "based" }, { "id": 373, - "logprob": -0.0005354881, + "logprob": -0.00064468384, "text": "on" }, { "id": 278, - "logprob": -0.13671875, + "logprob": -0.14160156, "text": "the" }, { "id": 1967, - "logprob": -0.06719971, + "logprob": -0.06915283, "text": "image" }, { "id": 29973, - "logprob": -0.15551758, + "logprob": -0.16381836, "text": "?" } ], @@ -443,19 +441,19 @@ "tokens": [ { "id": 32002, - "logprob": -0.001912117, + "logprob": -0.0026664734, "special": true, "text": "" }, { "id": 29871, - "logprob": -8.392334e-05, + "logprob": -8.59499e-05, "special": false, "text": " " }, { "id": 13, - "logprob": -1.7762184e-05, + "logprob": -1.8119812e-05, "special": false, "text": "\n" }, @@ -479,30 +477,29 @@ }, { "id": 319, - "logprob": -0.9013672, + "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, - "logprob": -1.2324219, + "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, - "logprob": -0.0002477169, + "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, - "logprob": -1.1660156, + "logprob": -1.1865234, "special": false, "text": " stands" } - ], - "top_tokens": null + ] }, "generated_text": " \nAssistant: A rooster stands" }, @@ -519,92 +516,92 @@ }, { "id": 4911, - "logprob": -5.7773438, + "logprob": -6.9804688, "text": "User" }, { "id": 29901, - "logprob": -0.0070114136, + "logprob": -0.006122589, "text": ":" }, { "id": 32000, - "logprob": -0.8208008, + "logprob": -0.8417969, "text": "" }, { "id": 32001, - "logprob": -6.699562e-05, + "logprob": -9.942055e-05, "text": "" }, { "id": 32000, - "logprob": -3.5762787e-07, + "logprob": -2.3841858e-07, "text": "" }, { "id": 1815, - "logprob": -4.2265625, + "logprob": -4.1679688, "text": "Can" }, { "id": 366, - "logprob": -0.014175415, + "logprob": -0.014091492, "text": "you" }, { "id": 2649, - "logprob": -4.4296875, + "logprob": -4.4726562, "text": "tell" }, { "id": 592, - "logprob": -0.29516602, + "logprob": -0.2998047, "text": "me" }, { "id": 263, - "logprob": -4.2109375, + "logprob": -4.15625, "text": "a" }, { "id": 1407, - "logprob": -9.4296875, + "logprob": -9.3828125, "text": "very" }, { "id": 3273, - "logprob": -1.8720703, + "logprob": -1.9716797, "text": "short" }, { "id": 5828, - "logprob": -0.26879883, + "logprob": -0.27734375, "text": "story" }, { "id": 2729, - "logprob": -3.7675781, + "logprob": -3.5605469, "text": "based" }, { "id": 373, - "logprob": -0.0005354881, + "logprob": -0.0006451607, "text": "on" }, { "id": 278, - "logprob": -0.13671875, + "logprob": -0.14160156, "text": "the" }, { "id": 1967, - "logprob": -0.06719971, + "logprob": -0.06915283, "text": "image" }, { "id": 29973, - "logprob": -0.15551758, + "logprob": -0.16381836, "text": "?" } ], @@ -612,19 +609,19 @@ "tokens": [ { "id": 32002, - "logprob": -0.001912117, + "logprob": -0.0026664734, "special": true, "text": "" }, { "id": 29871, - "logprob": -8.392334e-05, + "logprob": -8.571148e-05, "special": false, "text": " " }, { "id": 13, - "logprob": -1.7762184e-05, + "logprob": -1.8119812e-05, "special": false, "text": "\n" }, @@ -648,30 +645,29 @@ }, { "id": 319, - "logprob": -0.9013672, + "logprob": -0.9301758, "special": false, "text": " A" }, { "id": 696, - "logprob": -1.2324219, + "logprob": -1.1279297, "special": false, "text": " ro" }, { "id": 15664, - "logprob": -0.0002477169, + "logprob": -0.0002939701, "special": false, "text": "oster" }, { "id": 15028, - "logprob": -1.1660156, + "logprob": -1.1865234, "special": false, "text": " stands" } - ], - "top_tokens": null + ] }, "generated_text": " \nAssistant: A rooster stands" } diff --git a/server/tests/utils/test_layers.py b/server/tests/utils/test_layers.py new file mode 100644 index 00000000..0a9fecd1 --- /dev/null +++ b/server/tests/utils/test_layers.py @@ -0,0 +1,64 @@ +import torch +from text_generation_server.utils.layers import ( + TensorParallelEmbedding, +) + +class ProcessGroup: + def __init__(self, rank: int, world_size: int): + self._rank = rank + self.world_size = world_size + + def size(self)->int: + return self.world_size + + def rank(self)->int: + return self._rank + +class Weights: + def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int): + self.weight = torch.arange(vocab_size*hidden_dim).float().view(vocab_size, hidden_dim) + self.process_group = ProcessGroup(rank, world_size) + + + def get_partial_sharded(self, name:str, dim: int): + assert dim == 0 + + rank = self.process_group.rank() + world_size = self.process_group.size() + size = self.weight.shape[dim] + + block_size = (size + world_size - 1) // world_size + start = rank * block_size + stop = (rank + 1) * block_size + return self.weight[start:stop] + + def get_shape(self, name: str): + return self.weight.shape + +def test_weight_hub_files_offline_error(): + + vocab_size= 17 + weights = Weights(rank=0, world_size=1, vocab_size = vocab_size,hidden_dim = 256) + embeddings = TensorParallelEmbedding("", weights) + + input_ids = torch.arange(vocab_size) + output = embeddings.forward(input_ids) + assert embeddings.min_id == 0 + assert embeddings.max_id == 17 + torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256)) + + weights_0_2 = Weights(rank=0, world_size=2, vocab_size = vocab_size,hidden_dim = 256) + weights_1_2 = Weights(rank=1, world_size=2, vocab_size = vocab_size,hidden_dim = 256) + embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False) + assert embeddings_0_2.min_id == 0 + assert embeddings_0_2.max_id == 9 + torch.testing.assert_close(embeddings_0_2.weight , torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0).view(10, 256).float()) + embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False) + assert embeddings_1_2.min_id == 9 + assert embeddings_1_2.max_id == 17 + torch.testing.assert_close(embeddings_1_2.weight , torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0).view(9, 256).float()) + output_tp_0 = embeddings_0_2.forward(input_ids) + output_tp_1 = embeddings_1_2.forward(input_ids) + + torch.testing.assert_close(output, output_tp_0 + output_tp_1) + diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index d4fa2559..5a0de0d7 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -507,10 +507,10 @@ class TensorParallelEmbedding(nn.Module): world_size = process_group.size() rank = process_group.rank() - block_size = num_embeddings // world_size + block_size = (num_embeddings + world_size - 1) // world_size self.min_id = rank * block_size self.max_id = min(num_embeddings, (rank + 1) * block_size) - self.null_idx = block_size + self.null_idx = weight.shape[0] # Usually block_size, might be less in non even vocab_size. self.process_group = weights.process_group self.reduce = reduce diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index c4e82a6d..186733f3 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -92,7 +92,7 @@ class Weights: rank = self.process_group.rank() size = slice_.get_shape()[dim] - block_size = size // world_size + block_size = (size + world_size - 1) // world_size start = rank * block_size stop = (rank + 1) * block_size From ae222cce6e46786c4a32978ab631e25456cef860 Mon Sep 17 00:00:00 2001 From: drbh Date: Wed, 24 Jan 2024 11:41:28 -0500 Subject: [PATCH 040/153] Add messages api compatibility docs (#1478) This PR adds a new page to the docs that describes the Messages API and how to use it. Additionally this page will contain cloud provider specific information for enabling and using this feature. This PR includes a SageMaker example/information. --- docs/source/_toctree.yml | 2 + docs/source/messages_api.md | 134 ++++++++++++++++++++++++++++++++++++ router/src/main.rs | 6 +- router/src/server.rs | 4 +- 4 files changed, 141 insertions(+), 5 deletions(-) create mode 100644 docs/source/messages_api.md diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 6fa50a6a..d57a594d 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -7,6 +7,8 @@ title: Installation - local: supported_models title: Supported Models and Hardware + - local: messages_api + title: Messages API title: Getting started - sections: - local: basic_tutorials/consuming_tgi diff --git a/docs/source/messages_api.md b/docs/source/messages_api.md new file mode 100644 index 00000000..899de865 --- /dev/null +++ b/docs/source/messages_api.md @@ -0,0 +1,134 @@ +# Messages API + +_Messages API is compatible to OpenAI Chat Completion API_ + +Text Generation Inference (TGI) now supports the Message API which is fully compatible with the OpenAI Chat Completion API. This means you can use OpenAI's client libraries to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. + +## Making a Request + +You can make a request to TGI's Messages API using `curl`. Here's an example: + +```bash +curl localhost:3000/v1/chat/completions \ + -X POST \ + -d '{ + "model": "tgi", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "What is deep learning?" + } + ], + "stream": true, + "max_tokens": 20 +}' \ + -H 'Content-Type: application/json' +``` + +## Streaming + +You can also use OpenAI's Python client library to make a streaming request. Here's how: + +```python +from openai import OpenAI + +# init the client but point it to TGI +client = OpenAI( + base_url="http://localhost:3000/v1", + api_key="-" +) + +chat_completion = client.chat.completions.create( + model="tgi", + messages=[ + {"role": "system", "content": "You are a helpful assistant." }, + {"role": "user", "content": "What is deep learning?"} + ], + stream=True +) + +# iterate and print stream +for message in chat_completion: + print(message) +``` + +## Synchronous + +If you prefer to make a synchronous request, you can do so like this: + +```python +from openai import OpenAI + +# init the client but point it to TGI +client = OpenAI( + base_url="http://localhost:3000/v1", + api_key="-" +) + +chat_completion = client.chat.completions.create( + model="tgi", + messages=[ + {"role": "system", "content": "You are a helpful assistant." }, + {"role": "user", "content": "What is deep learning?"} + ], + stream=False +) + +print(chat_completion) +``` + +## Cloud Providers + +TGI can be deployed on various cloud providers for scalable and robust text generation. One such provider is Amazon SageMaker, which has recently added support for TGI. Here's how you can deploy TGI on Amazon SageMaker: + +## Amazon SageMaker + +To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`. + +This will modify the `/invocations` route to accept Messages dictonaries consisting out of role and content. See the example below on how to deploy Llama with the new Messages API. + +```python +import json +import sagemaker +import boto3 +from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri + +try: + role = sagemaker.get_execution_role() +except ValueError: + iam = boto3.client('iam') + role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] + +# Hub Model configuration. https://huggingface.co/models +hub = { + 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', + 'SM_NUM_GPUS': json.dumps(1), + 'MESSAGES_API_ENABLED': True +} + +# create Hugging Face Model Class +huggingface_model = HuggingFaceModel( + image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), + env=hub, + role=role, +) + +# deploy model to SageMaker Inference +predictor = huggingface_model.deploy( + initial_instance_count=1, + instance_type="ml.g5.2xlarge", + container_startup_health_check_timeout=300, + ) + +# send request +predictor.predict({ +"messages": [ + {"role": "system", "content": "You are a helpful assistant." }, + {"role": "user", "content": "What is deep learning?"} + ] +}) +``` \ No newline at end of file diff --git a/router/src/main.rs b/router/src/main.rs index bebf0e53..5f52b801 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -75,7 +75,7 @@ struct Args { #[clap(long, env)] ngrok_edge: Option, #[clap(long, env, default_value_t = false)] - chat_enabled_api: bool, + messages_api_enabled: bool, } #[tokio::main] @@ -107,7 +107,7 @@ async fn main() -> Result<(), RouterError> { ngrok, ngrok_authtoken, ngrok_edge, - chat_enabled_api, + messages_api_enabled, } = args; // Launch Tokio runtime @@ -359,7 +359,7 @@ async fn main() -> Result<(), RouterError> { ngrok_authtoken, ngrok_edge, tokenizer_config, - chat_enabled_api, + messages_api_enabled, ) .await?; Ok(()) diff --git a/router/src/server.rs b/router/src/server.rs index dd4ddbd2..d18be174 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -710,7 +710,7 @@ pub async fn run( ngrok_authtoken: Option, ngrok_edge: Option, tokenizer_config: HubTokenizerConfig, - chat_enabled_api: bool, + messages_api_enabled: bool, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] @@ -876,7 +876,7 @@ pub async fn run( .route("/metrics", get(metrics)); // Conditional AWS Sagemaker route - let aws_sagemaker_route = if chat_enabled_api { + let aws_sagemaker_route = if messages_api_enabled { Router::new().route("/invocations", post(chat_completions)) // Use 'chat_completions' for OAI_ENABLED } else { Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise From be9bfae18ceb66ddbc0f4b4491ef5132f6bba351 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 25 Jan 2024 14:19:03 +0100 Subject: [PATCH 041/153] Add a new `/tokenize` route to get the tokenized input (#1471) Ideally this is done client side, but this is a recurring request, therefore we implemented it. - Runs only if rust tokenizer is present (not encumbering the main inference pipeline is important). - Returns simple results, ID, text (gotten with offsets from the original string) and offsets (so users can do things like highlighting text). Fixes # (issue) - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- docs/openapi.json | 884 +-------------------------------------- router/src/infer.rs | 22 + router/src/lib.rs | 12 + router/src/server.rs | 55 ++- router/src/validation.rs | 43 +- 5 files changed, 113 insertions(+), 903 deletions(-) diff --git a/docs/openapi.json b/docs/openapi.json index df2d427f..4454259b 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1,883 +1 @@ -{ - "openapi": "3.0.3", - "info": { - "title": "Text Generation Inference", - "description": "Text Generation Webserver", - "contact": { - "name": "Olivier Dehaene" - }, - "license": { - "name": "Apache 2.0", - "url": "https://www.apache.org/licenses/LICENSE-2.0" - }, - "version": "1.3.4" - }, - "paths": { - "/": { - "post": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", - "description": "Generate tokens if `stream == false` or a stream of token if `stream == true`", - "operationId": "compat_generate", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CompatGenerateRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Generated Text", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateResponse" - } - }, - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/StreamResponse" - } - } - } - }, - "422": { - "description": "Input validation error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Input validation error" - } - } - } - }, - "424": { - "description": "Generation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Request failed during generation" - } - } - } - }, - "429": { - "description": "Model is overloaded", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Model is overloaded" - } - } - } - }, - "500": { - "description": "Incomplete generation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Incomplete generation" - } - } - } - } - } - } - }, - "/generate": { - "post": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Generate tokens", - "description": "Generate tokens", - "operationId": "generate", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Generated Text", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateResponse" - } - } - } - }, - "422": { - "description": "Input validation error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Input validation error" - } - } - } - }, - "424": { - "description": "Generation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Request failed during generation" - } - } - } - }, - "429": { - "description": "Model is overloaded", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Model is overloaded" - } - } - } - }, - "500": { - "description": "Incomplete generation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Incomplete generation" - } - } - } - } - } - } - }, - "/generate_stream": { - "post": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Generate a stream of token using Server-Sent Events", - "description": "Generate a stream of token using Server-Sent Events", - "operationId": "generate_stream", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GenerateRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Generated Text", - "content": { - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/StreamResponse" - } - } - } - }, - "422": { - "description": "Input validation error", - "content": { - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Input validation error" - } - } - } - }, - "424": { - "description": "Generation Error", - "content": { - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Request failed during generation" - } - } - } - }, - "429": { - "description": "Model is overloaded", - "content": { - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Model is overloaded" - } - } - } - }, - "500": { - "description": "Incomplete generation", - "content": { - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "Incomplete generation" - } - } - } - } - } - } - }, - "/health": { - "get": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Health check method", - "description": "Health check method", - "operationId": "health", - "responses": { - "200": { - "description": "Everything is working fine" - }, - "503": { - "description": "Text generation inference is down", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ErrorResponse" - }, - "example": { - "error": "unhealthy", - "error_type": "healthcheck" - } - } - } - } - } - } - }, - "/info": { - "get": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Text Generation Inference endpoint info", - "description": "Text Generation Inference endpoint info", - "operationId": "get_model_info", - "responses": { - "200": { - "description": "Served model info", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Info" - } - } - } - } - } - } - }, - "/metrics": { - "get": { - "tags": [ - "Text Generation Inference" - ], - "summary": "Prometheus metrics scrape endpoint", - "description": "Prometheus metrics scrape endpoint", - "operationId": "metrics", - "responses": { - "200": { - "description": "Prometheus Metrics", - "content": { - "text/plain": { - "schema": { - "type": "string" - } - } - } - } - } - } - } - }, - "components": { - "schemas": { - "BestOfSequence": { - "type": "object", - "required": [ - "generated_text", - "finish_reason", - "generated_tokens", - "prefill", - "tokens" - ], - "properties": { - "finish_reason": { - "$ref": "#/components/schemas/FinishReason" - }, - "generated_text": { - "type": "string", - "example": "test" - }, - "generated_tokens": { - "type": "integer", - "format": "int32", - "example": 1, - "minimum": 0 - }, - "prefill": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PrefillToken" - } - }, - "seed": { - "type": "integer", - "format": "int64", - "example": 42, - "nullable": true, - "minimum": 0 - }, - "tokens": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Token" - } - }, - "top_tokens": { - "type": "array", - "items": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Token" - } - } - } - } - }, - "CompatGenerateRequest": { - "type": "object", - "required": [ - "inputs" - ], - "properties": { - "inputs": { - "type": "string", - "example": "My name is Olivier and I" - }, - "parameters": { - "$ref": "#/components/schemas/GenerateParameters" - }, - "stream": { - "type": "boolean", - "default": "false" - } - } - }, - "Details": { - "type": "object", - "required": [ - "finish_reason", - "generated_tokens", - "prefill", - "tokens" - ], - "properties": { - "best_of_sequences": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BestOfSequence" - }, - "nullable": true - }, - "finish_reason": { - "$ref": "#/components/schemas/FinishReason" - }, - "generated_tokens": { - "type": "integer", - "format": "int32", - "example": 1, - "minimum": 0 - }, - "prefill": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PrefillToken" - } - }, - "seed": { - "type": "integer", - "format": "int64", - "example": 42, - "nullable": true, - "minimum": 0 - }, - "tokens": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Token" - } - }, - "top_tokens": { - "type": "array", - "items": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Token" - } - } - } - } - }, - "ErrorResponse": { - "type": "object", - "required": [ - "error", - "error_type" - ], - "properties": { - "error": { - "type": "string" - }, - "error_type": { - "type": "string" - } - } - }, - "FinishReason": { - "type": "string", - "enum": [ - "length", - "eos_token", - "stop_sequence" - ] - }, - "GenerateParameters": { - "type": "object", - "properties": { - "best_of": { - "type": "integer", - "default": "null", - "example": 1, - "nullable": true, - "minimum": 0, - "exclusiveMinimum": 0 - }, - "decoder_input_details": { - "type": "boolean", - "default": "true" - }, - "details": { - "type": "boolean", - "default": "true" - }, - "do_sample": { - "type": "boolean", - "default": "false", - "example": true - }, - "max_new_tokens": { - "type": "integer", - "format": "int32", - "default": "20", - "example": "20", - "nullable": true, - "minimum": 0 - }, - "repetition_penalty": { - "type": "number", - "format": "float", - "default": "null", - "example": 1.03, - "nullable": true, - "exclusiveMinimum": 0 - }, - "return_full_text": { - "type": "boolean", - "default": "null", - "example": false, - "nullable": true - }, - "seed": { - "type": "integer", - "format": "int64", - "default": "null", - "example": "null", - "nullable": true, - "minimum": 0, - "exclusiveMinimum": 0 - }, - "stop": { - "type": "array", - "items": { - "type": "string" - }, - "example": [ - "photographer" - ], - "maxItems": 4 - }, - "temperature": { - "type": "number", - "format": "float", - "default": "null", - "example": 0.5, - "nullable": true, - "exclusiveMinimum": 0 - }, - "top_k": { - "type": "integer", - "format": "int32", - "default": "null", - "example": 10, - "nullable": true, - "exclusiveMinimum": 0 - }, - "top_n_tokens": { - "type": "integer", - "format": "int32", - "default": "null", - "example": 5, - "nullable": true, - "minimum": 0, - "exclusiveMinimum": 0 - }, - "top_p": { - "type": "number", - "format": "float", - "default": "null", - "example": 0.95, - "nullable": true, - "maximum": 1, - "exclusiveMinimum": 0 - }, - "truncate": { - "type": "integer", - "default": "null", - "example": "null", - "nullable": true, - "minimum": 0 - }, - "typical_p": { - "type": "number", - "format": "float", - "default": "null", - "example": 0.95, - "nullable": true, - "maximum": 1, - "exclusiveMinimum": 0 - }, - "watermark": { - "type": "boolean", - "default": "false", - "example": true - } - } - }, - "GenerateRequest": { - "type": "object", - "required": [ - "inputs" - ], - "properties": { - "inputs": { - "type": "string", - "example": "My name is Olivier and I" - }, - "parameters": { - "$ref": "#/components/schemas/GenerateParameters" - } - } - }, - "GenerateResponse": { - "type": "object", - "required": [ - "generated_text" - ], - "properties": { - "details": { - "allOf": [ - { - "$ref": "#/components/schemas/Details" - } - ], - "nullable": true - }, - "generated_text": { - "type": "string", - "example": "test" - } - } - }, - "Info": { - "type": "object", - "required": [ - "model_id", - "model_dtype", - "model_device_type", - "max_concurrent_requests", - "max_best_of", - "max_stop_sequences", - "max_input_length", - "max_total_tokens", - "waiting_served_ratio", - "max_batch_total_tokens", - "max_waiting_tokens", - "validation_workers", - "version" - ], - "properties": { - "docker_label": { - "type": "string", - "example": "null", - "nullable": true - }, - "max_batch_total_tokens": { - "type": "integer", - "format": "int32", - "example": "32000", - "minimum": 0 - }, - "max_best_of": { - "type": "integer", - "example": "2", - "minimum": 0 - }, - "max_concurrent_requests": { - "type": "integer", - "description": "Router Parameters", - "example": "128", - "minimum": 0 - }, - "max_input_length": { - "type": "integer", - "example": "1024", - "minimum": 0 - }, - "max_stop_sequences": { - "type": "integer", - "example": "4", - "minimum": 0 - }, - "max_total_tokens": { - "type": "integer", - "example": "2048", - "minimum": 0 - }, - "max_waiting_tokens": { - "type": "integer", - "example": "20", - "minimum": 0 - }, - "model_device_type": { - "type": "string", - "example": "cuda" - }, - "model_dtype": { - "type": "string", - "example": "torch.float16" - }, - "model_id": { - "type": "string", - "description": "Model info", - "example": "bigscience/blomm-560m" - }, - "model_pipeline_tag": { - "type": "string", - "example": "text-generation", - "nullable": true - }, - "model_sha": { - "type": "string", - "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", - "nullable": true - }, - "sha": { - "type": "string", - "example": "null", - "nullable": true - }, - "validation_workers": { - "type": "integer", - "example": "2", - "minimum": 0 - }, - "version": { - "type": "string", - "description": "Router Info", - "example": "0.5.0" - }, - "waiting_served_ratio": { - "type": "number", - "format": "float", - "example": "1.2" - } - } - }, - "PrefillToken": { - "type": "object", - "required": [ - "id", - "text", - "logprob" - ], - "properties": { - "id": { - "type": "integer", - "format": "int32", - "example": 0, - "minimum": 0 - }, - "logprob": { - "type": "number", - "format": "float", - "example": -0.34, - "nullable": true - }, - "text": { - "type": "string", - "example": "test" - } - } - }, - "StreamDetails": { - "type": "object", - "required": [ - "finish_reason", - "generated_tokens" - ], - "properties": { - "finish_reason": { - "$ref": "#/components/schemas/FinishReason" - }, - "generated_tokens": { - "type": "integer", - "format": "int32", - "example": 1, - "minimum": 0 - }, - "seed": { - "type": "integer", - "format": "int64", - "example": 42, - "nullable": true, - "minimum": 0 - } - } - }, - "StreamResponse": { - "type": "object", - "required": [ - "token" - ], - "properties": { - "details": { - "allOf": [ - { - "$ref": "#/components/schemas/StreamDetails" - } - ], - "default": "null", - "nullable": true - }, - "generated_text": { - "type": "string", - "default": "null", - "example": "test", - "nullable": true - }, - "token": { - "$ref": "#/components/schemas/Token" - }, - "top_tokens": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Token" - } - } - } - }, - "Token": { - "type": "object", - "required": [ - "id", - "text", - "logprob", - "special" - ], - "properties": { - "id": { - "type": "integer", - "format": "int32", - "example": 0, - "minimum": 0 - }, - "logprob": { - "type": "number", - "format": "float", - "example": -0.34, - "nullable": true - }, - "special": { - "type": "boolean", - "example": "false" - }, - "text": { - "type": "string", - "example": "test" - } - } - } - } - }, - "tags": [ - { - "name": "Text Generation Inference", - "description": "Hugging Face Text Generation Inference API" - } - ] -} +{"openapi":"3.0.3","info":{"title":"Text Generation Inference","description":"Text Generation Webserver","contact":{"name":"Olivier Dehaene"},"license":{"name":"Apache 2.0","url":"https://www.apache.org/licenses/LICENSE-2.0"},"version":"1.3.4"},"paths":{"/":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens if `stream == false` or a stream of token if `stream == true`","description":"Generate tokens if `stream == false` or a stream of token if `stream == true`","operationId":"compat_generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CompatGenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}},"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate_stream":{"post":{"tags":["Text Generation Inference"],"summary":"Generate a stream of token using Server-Sent Events","description":"Generate a stream of token using Server-Sent Events","operationId":"generate_stream","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/health":{"get":{"tags":["Text Generation Inference"],"summary":"Health check method","description":"Health check method","operationId":"health","responses":{"200":{"description":"Everything is working fine"},"503":{"description":"Text generation inference is down","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"unhealthy","error_type":"healthcheck"}}}}}}},"/info":{"get":{"tags":["Text Generation Inference"],"summary":"Text Generation Inference endpoint info","description":"Text Generation Inference endpoint info","operationId":"get_model_info","responses":{"200":{"description":"Served model info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Info"}}}}}}},"/metrics":{"get":{"tags":["Text Generation Inference"],"summary":"Prometheus metrics scrape endpoint","description":"Prometheus metrics scrape endpoint","operationId":"metrics","responses":{"200":{"description":"Prometheus Metrics","content":{"text/plain":{"schema":{"type":"string"}}}}}}},"/tokenize":{"post":{"tags":["Text Generation Inference"],"summary":"Tokenize inputs","description":"Tokenize inputs","operationId":"tokenize","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeRequest"}}},"required":true},"responses":{"200":{"description":"Tokenized ids","content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeResponse"}}}},"404":{"description":"No tokenizer found","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"No fast tokenizer available"}}}}}}},"/v1/chat/completions":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"chat_completions","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatCompletionChunk"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}}},"components":{"schemas":{"BestOfSequence":{"type":"object","required":["generated_text","finish_reason","generated_tokens","prefill","tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_text":{"type":"string","example":"test"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"CompatGenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"},"stream":{"type":"boolean","default":"false"}}},"Details":{"type":"object","required":["finish_reason","generated_tokens","prefill","tokens"],"properties":{"best_of_sequences":{"type":"array","items":{"$ref":"#/components/schemas/BestOfSequence"},"nullable":true},"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ErrorResponse":{"type":"object","required":["error","error_type"],"properties":{"error":{"type":"string"},"error_type":{"type":"string"}}},"FinishReason":{"type":"string","enum":["length","eos_token","stop_sequence"]},"GenerateParameters":{"type":"object","properties":{"best_of":{"type":"integer","default":"null","example":1,"nullable":true,"minimum":0,"exclusiveMinimum":0},"decoder_input_details":{"type":"boolean","default":"true"},"details":{"type":"boolean","default":"true"},"do_sample":{"type":"boolean","default":"false","example":true},"max_new_tokens":{"type":"integer","format":"int32","default":"100","example":"20","nullable":true,"minimum":0},"repetition_penalty":{"type":"number","format":"float","default":"null","example":1.03,"nullable":true,"exclusiveMinimum":0},"return_full_text":{"type":"boolean","default":"null","example":false,"nullable":true},"seed":{"type":"integer","format":"int64","default":"null","example":"null","nullable":true,"minimum":0,"exclusiveMinimum":0},"stop":{"type":"array","items":{"type":"string"},"example":["photographer"],"maxItems":4},"temperature":{"type":"number","format":"float","default":"null","example":0.5,"nullable":true,"exclusiveMinimum":0},"top_k":{"type":"integer","format":"int32","default":"null","example":10,"nullable":true,"exclusiveMinimum":0},"top_n_tokens":{"type":"integer","format":"int32","default":"null","example":5,"nullable":true,"minimum":0,"exclusiveMinimum":0},"top_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"truncate":{"type":"integer","default":"null","example":"null","nullable":true,"minimum":0},"typical_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"watermark":{"type":"boolean","default":"false","example":true}}},"GenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"}}},"GenerateResponse":{"type":"object","required":["generated_text"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/Details"}],"nullable":true},"generated_text":{"type":"string","example":"test"}}},"Info":{"type":"object","required":["model_id","model_dtype","model_device_type","max_concurrent_requests","max_best_of","max_stop_sequences","max_input_length","max_total_tokens","waiting_served_ratio","max_batch_total_tokens","max_waiting_tokens","validation_workers","version"],"properties":{"docker_label":{"type":"string","example":"null","nullable":true},"max_batch_total_tokens":{"type":"integer","format":"int32","example":"32000","minimum":0},"max_best_of":{"type":"integer","example":"2","minimum":0},"max_concurrent_requests":{"type":"integer","description":"Router Parameters","example":"128","minimum":0},"max_input_length":{"type":"integer","example":"1024","minimum":0},"max_stop_sequences":{"type":"integer","example":"4","minimum":0},"max_total_tokens":{"type":"integer","example":"2048","minimum":0},"max_waiting_tokens":{"type":"integer","example":"20","minimum":0},"model_device_type":{"type":"string","example":"cuda"},"model_dtype":{"type":"string","example":"torch.float16"},"model_id":{"type":"string","description":"Model info","example":"bigscience/blomm-560m"},"model_pipeline_tag":{"type":"string","example":"text-generation","nullable":true},"model_sha":{"type":"string","example":"e985a63cdc139290c5f700ff1929f0b5942cced2","nullable":true},"sha":{"type":"string","example":"null","nullable":true},"validation_workers":{"type":"integer","example":"2","minimum":0},"version":{"type":"string","description":"Router Info","example":"0.5.0"},"waiting_served_ratio":{"type":"number","format":"float","example":"1.2"}}},"PrefillToken":{"type":"object","required":["id","text","logprob"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"text":{"type":"string","example":"test"}}},"StreamDetails":{"type":"object","required":["finish_reason","generated_tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0}}},"StreamResponse":{"type":"object","required":["index","token"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/StreamDetails"}],"default":"null","nullable":true},"generated_text":{"type":"string","default":"null","example":"test","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"token":{"$ref":"#/components/schemas/Token"},"top_tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}},"Token":{"type":"object","required":["id","text","logprob","special"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"special":{"type":"boolean","example":"false"},"text":{"type":"string","example":"test"}}}}},"tags":[{"name":"Text Generation Inference","description":"Hugging Face Text Generation Inference API"}]} diff --git a/router/src/infer.rs b/router/src/infer.rs index 675c18b1..74f32e85 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -176,6 +176,28 @@ impl Infer { )) } + /// Tokenizer the input + #[instrument(skip_all)] + pub(crate) async fn tokenize( + &self, + request: GenerateRequest, + ) -> Result, InferError> { + // Tokenize request + let inputs = request.inputs; + let truncate = request.parameters.truncate; + let encoding = self + .validation + .tokenize(inputs, truncate) + .await + .map_err(|err| { + tracing::error!("Tokenization {err}"); + err + })?; + + // Return Encoding + Ok(encoding.map(|(encoding, _)| encoding)) + } + /// Apply the chat template to the chat request #[instrument(skip_all)] pub(crate) fn apply_chat_template(&self, messages: Vec) -> Result { diff --git a/router/src/lib.rs b/router/src/lib.rs index 894ab466..2bfbbacd 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -444,6 +444,18 @@ pub struct Token { special: bool, } +#[derive(Debug, Serialize, ToSchema)] +pub struct SimpleToken { + #[schema(example = 0)] + id: u32, + #[schema(example = "test")] + text: String, + #[schema(example = 0)] + start: usize, + #[schema(example = 2)] + stop: usize, +} + #[derive(Serialize, ToSchema)] #[serde(rename_all(serialize = "snake_case"))] pub(crate) enum FinishReason { diff --git a/router/src/server.rs b/router/src/server.rs index d18be174..a6462731 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -7,8 +7,8 @@ use crate::validation::ValidationError; use crate::{ BestOfSequence, ChatCompletion, ChatCompletionChunk, ChatRequest, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, - HubModelInfo, HubTokenizerConfig, Infer, Info, PrefillToken, StreamDetails, StreamResponse, - Token, Validation, + HubModelInfo, HubTokenizerConfig, Infer, Info, PrefillToken, SimpleToken, StreamDetails, + StreamResponse, Token, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; @@ -534,7 +534,7 @@ async fn generate_stream_internal( path = "/v1/chat/completions", request_body = ChatRequest, responses( - (status = 200, description = "Generated Text", body = GenerateResponse), + (status = 200, description = "Generated Text", body = ChatCompletionChunk), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, @@ -674,6 +674,52 @@ async fn chat_completions( } } +/// Tokenize inputs +#[utoipa::path( + post, + tag = "Text Generation Inference", + path = "/tokenize", + request_body = TokenizeRequest, + responses( + (status = 200, description = "Tokenized ids", body = TokenizeResponse), + (status = 404, description = "No tokenizer found", body = ErrorResponse, + example = json ! ({"error": "No fast tokenizer available"})), + ) + )] +#[instrument(skip_all)] +async fn tokenize( + Extension(infer): Extension, + Json(req): Json, +) -> Result)> { + let input = req.inputs.clone(); + let encoding = infer.tokenize(req).await?; + if let Some(encoding) = encoding { + let tokens: Vec = encoding + .get_ids() + .iter() + .zip(encoding.get_offsets()) + .map(|(&id, &(start, stop))| { + let text: String = input.chars().skip(start).take(stop - start).collect(); + SimpleToken { + id, + text, + start, + stop, + } + }) + .collect(); + Ok(Json(tokens).into_response()) + } else { + Err(( + StatusCode::NOT_FOUND, + Json(ErrorResponse { + error: "No fast tokenizer or tokenizer.json for this model".to_string(), + error_type: "no fast tokenizer".to_string(), + }), + )) + } +} + /// Prometheus metrics scrape endpoint #[utoipa::path( get, @@ -721,6 +767,8 @@ pub async fn run( compat_generate, generate, generate_stream, + chat_completions, + tokenize, metrics, ), components( @@ -871,6 +919,7 @@ pub async fn run( .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) .route("/v1/chat/completions", post(chat_completions)) + .route("/tokenize", post(tokenize)) .route("/health", get(health)) .route("/ping", get(health)) .route("/metrics", get(metrics)); diff --git a/router/src/validation.rs b/router/src/validation.rs index bbd94119..62ab299f 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -79,12 +79,11 @@ impl Validation { } #[instrument(skip(self, inputs))] - async fn validate_input( + pub async fn tokenize( &self, inputs: String, truncate: Option, - max_new_tokens: Option, - ) -> Result<(String, usize, u32), ValidationError> { + ) -> Result, ValidationError> { // If we have a fast tokenizer if let Some(sender) = &self.sender { // Create response channel @@ -97,8 +96,23 @@ impl Validation { // Await on response channel // Unwrap is safe here - let (inputs, _) = response_receiver.await.unwrap()?; + let encoding = response_receiver.await.unwrap()?; + Ok(Some(encoding)) + } else { + Ok(None) + } + } + #[instrument(skip(self, inputs))] + async fn validate_input( + &self, + inputs: String, + truncate: Option, + max_new_tokens: Option, + ) -> Result<(String, usize, u32), ValidationError> { + // If we have a fast tokenizer + if let Some((encoding, inputs)) = self.tokenize(inputs.clone(), truncate).await? { + // Create response channel let input_length = if self.skip_tokenizer_in_tgi { inputs.chars().filter(|&c| c == ',').count() + 1 } else { @@ -370,36 +384,31 @@ fn tokenizer_worker(tokenizer: Tokenizer, mut receiver: mpsc::UnboundedReceiver< /// Get input length and optionally truncate it fn prepare_input( - inputs: String, + mut inputs: String, truncate: Option, tokenizer: &Tokenizer, -) -> Result<(String, usize), ValidationError> { +) -> Result<(tokenizers::Encoding, String), ValidationError> { // Get the number of tokens in the input let mut encoding = tokenizer .encode(inputs.clone(), true) .map_err(|err| ValidationError::Tokenizer(err.to_string()))?; // Optionally truncate - let (inputs, input_length) = match truncate { - // Truncate is some and < encoding length - Some(truncate) if truncate < encoding.len() => { - // truncate encoding and decode new inputs + if let Some(truncate) = truncate { + if truncate < encoding.len() { encoding.truncate(truncate, 0, TruncationDirection::Left); - let inputs = tokenizer + inputs = tokenizer .decode(encoding.get_ids(), false) .map_err(|err| ValidationError::Tokenizer(err.to_string()))?; - (inputs, encoding.len()) } - // Nothing to do - _ => (inputs, encoding.len()), - }; + } - Ok((inputs, input_length)) + Ok((encoding, inputs)) } type TokenizerRequest = ( (String, Option), - oneshot::Sender>, + oneshot::Sender>, Span, ); From b2fc097b2b4b75cbc251e56512c6e94e76c16a3f Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 25 Jan 2024 09:37:53 -0500 Subject: [PATCH 042/153] feat: adds phi model (#1442) This PR adds basic modeling for phi-2 run ```bash text-generation-server \ serve \ microsoft/phi-2 \ --revision 834565c23f9b28b96ccbeabe614dd906b6db551a ``` test ```bash curl -s localhost:3000/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' | jq . ``` notes - recently (~1 day ago) the Phi weights and model were updated to accommodate adding [GQA/MQA attention to the model.](https://github.com/huggingface/transformers/pull/28163) This impl expects the original model format so a fixed revision is required at the moment. - this PR only includes a basic implementation of the model and can later be extended for support Flash and Sharded versions as well as make use of better optimization --- .../test_flash_phi/test_flash_phi.json | 84 ++++ .../test_flash_phi_all_params.json | 60 +++ .../test_flash_phi/test_flash_phi_load.json | 338 +++++++++++++++ integration-tests/models/test_flash_phi.py | 65 +++ .../custom_modeling/flash_phi_modeling.py | 400 ++++++++++++++++++ .../models/custom_modeling/phi_modeling.py | 308 ++++++++++++++ .../models/flash_phi.py | 102 +++++ server/text_generation_server/models/phi.py | 63 +++ 8 files changed, 1420 insertions(+) create mode 100644 integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json create mode 100644 integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json create mode 100644 integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json create mode 100644 integration-tests/models/test_flash_phi.py create mode 100644 server/text_generation_server/models/custom_modeling/flash_phi_modeling.py create mode 100644 server/text_generation_server/models/custom_modeling/phi_modeling.py create mode 100644 server/text_generation_server/models/flash_phi.py create mode 100644 server/text_generation_server/models/phi.py diff --git a/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json new file mode 100644 index 00000000..51d969b2 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json @@ -0,0 +1,84 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": null, + "tokens": [ + { + "id": 25, + "logprob": -2.3203125, + "special": false, + "text": ":" + }, + { + "id": 1391, + "logprob": -0.98779297, + "special": false, + "text": " {" + }, + { + "id": 25927, + "logprob": -0.76660156, + "special": false, + "text": "request" + }, + { + "id": 92, + "logprob": -0.7246094, + "special": false, + "text": "}" + }, + { + "id": 4943, + "logprob": -0.41333008, + "special": false, + "text": "\")" + }, + { + "id": 198, + "logprob": -0.11785889, + "special": false, + "text": "\n" + }, + { + "id": 50280, + "logprob": -0.97265625, + "special": false, + "text": " " + }, + { + "id": 26209, + "logprob": -1.4414062, + "special": false, + "text": "response" + }, + { + "id": 796, + "logprob": -0.0569458, + "special": false, + "text": " =" + }, + { + "id": 2116, + "logprob": -1.1533203, + "special": false, + "text": " self" + } + ], + "top_tokens": null + }, + "generated_text": ": {request}\")\n response = self" +} diff --git a/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json new file mode 100644 index 00000000..221ff13d --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json @@ -0,0 +1,60 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "stop_sequence", + "generated_tokens": 6, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": 0, + "tokens": [ + { + "id": 284, + "logprob": -0.19421387, + "special": false, + "text": " to" + }, + { + "id": 3758, + "logprob": -0.62597656, + "special": false, + "text": " send" + }, + { + "id": 1366, + "logprob": -0.87060547, + "special": false, + "text": " data" + }, + { + "id": 625, + "logprob": -0.88427734, + "special": false, + "text": " over" + }, + { + "id": 257, + "logprob": -1.0830078, + "special": false, + "text": " a" + }, + { + "id": 3127, + "logprob": -1.9462891, + "special": false, + "text": " network" + } + ], + "top_tokens": null + }, + "generated_text": "Test request to send data over a network" +} diff --git a/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json new file mode 100644 index 00000000..62f7fd32 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_load.json @@ -0,0 +1,338 @@ +[ + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": null, + "tokens": [ + { + "id": 25, + "logprob": -2.3203125, + "special": false, + "text": ":" + }, + { + "id": 1391, + "logprob": -0.98779297, + "special": false, + "text": " {" + }, + { + "id": 25927, + "logprob": -0.7729492, + "special": false, + "text": "request" + }, + { + "id": 92, + "logprob": -0.7241211, + "special": false, + "text": "}" + }, + { + "id": 4943, + "logprob": -0.4091797, + "special": false, + "text": "\")" + }, + { + "id": 198, + "logprob": -0.119018555, + "special": false, + "text": "\n" + }, + { + "id": 50280, + "logprob": -0.9707031, + "special": false, + "text": " " + }, + { + "id": 26209, + "logprob": -1.4414062, + "special": false, + "text": "response" + }, + { + "id": 796, + "logprob": -0.056854248, + "special": false, + "text": " =" + }, + { + "id": 2116, + "logprob": -1.1533203, + "special": false, + "text": " self" + } + ], + "top_tokens": null + }, + "generated_text": ": {request}\")\n response = self" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": null, + "tokens": [ + { + "id": 25, + "logprob": -2.3203125, + "special": false, + "text": ":" + }, + { + "id": 1391, + "logprob": -0.98779297, + "special": false, + "text": " {" + }, + { + "id": 25927, + "logprob": -0.7729492, + "special": false, + "text": "request" + }, + { + "id": 92, + "logprob": -0.7241211, + "special": false, + "text": "}" + }, + { + "id": 4943, + "logprob": -0.4091797, + "special": false, + "text": "\")" + }, + { + "id": 198, + "logprob": -0.119018555, + "special": false, + "text": "\n" + }, + { + "id": 50280, + "logprob": -0.9707031, + "special": false, + "text": " " + }, + { + "id": 26209, + "logprob": -1.4414062, + "special": false, + "text": "response" + }, + { + "id": 796, + "logprob": -0.056854248, + "special": false, + "text": " =" + }, + { + "id": 2116, + "logprob": -1.1533203, + "special": false, + "text": " self" + } + ], + "top_tokens": null + }, + "generated_text": ": {request}\")\n response = self" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": null, + "tokens": [ + { + "id": 25, + "logprob": -2.3203125, + "special": false, + "text": ":" + }, + { + "id": 1391, + "logprob": -0.98779297, + "special": false, + "text": " {" + }, + { + "id": 25927, + "logprob": -0.7729492, + "special": false, + "text": "request" + }, + { + "id": 92, + "logprob": -0.7241211, + "special": false, + "text": "}" + }, + { + "id": 4943, + "logprob": -0.4091797, + "special": false, + "text": "\")" + }, + { + "id": 198, + "logprob": -0.119018555, + "special": false, + "text": "\n" + }, + { + "id": 50280, + "logprob": -0.9707031, + "special": false, + "text": " " + }, + { + "id": 26209, + "logprob": -1.4414062, + "special": false, + "text": "response" + }, + { + "id": 796, + "logprob": -0.056854248, + "special": false, + "text": " =" + }, + { + "id": 2116, + "logprob": -1.1533203, + "special": false, + "text": " self" + } + ], + "top_tokens": null + }, + "generated_text": ": {request}\")\n response = self" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 14402, + "logprob": null, + "text": "Test" + }, + { + "id": 2581, + "logprob": -11.6171875, + "text": " request" + } + ], + "seed": null, + "tokens": [ + { + "id": 25, + "logprob": -2.3203125, + "special": false, + "text": ":" + }, + { + "id": 1391, + "logprob": -0.98779297, + "special": false, + "text": " {" + }, + { + "id": 25927, + "logprob": -0.7729492, + "special": false, + "text": "request" + }, + { + "id": 92, + "logprob": -0.7241211, + "special": false, + "text": "}" + }, + { + "id": 4943, + "logprob": -0.4091797, + "special": false, + "text": "\")" + }, + { + "id": 198, + "logprob": -0.119018555, + "special": false, + "text": "\n" + }, + { + "id": 50280, + "logprob": -0.9707031, + "special": false, + "text": " " + }, + { + "id": 26209, + "logprob": -1.4414062, + "special": false, + "text": "response" + }, + { + "id": 796, + "logprob": -0.056854248, + "special": false, + "text": " =" + }, + { + "id": 2116, + "logprob": -1.1533203, + "special": false, + "text": " self" + } + ], + "top_tokens": null + }, + "generated_text": ": {request}\")\n response = self" + } +] diff --git a/integration-tests/models/test_flash_phi.py b/integration-tests/models/test_flash_phi.py new file mode 100644 index 00000000..6391f2a1 --- /dev/null +++ b/integration-tests/models/test_flash_phi.py @@ -0,0 +1,65 @@ +import pytest + + +@pytest.fixture(scope="module") +def flash_phi_handle(launcher): + with launcher("microsoft/phi-2", num_shard=1) as handle: + yield handle + + +@pytest.fixture(scope="module") +async def flash_phi(flash_phi_handle): + await flash_phi_handle.health(300) + return flash_phi_handle.client + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_phi(flash_phi, response_snapshot): + response = await flash_phi.generate( + "Test request", max_new_tokens=10, decoder_input_details=True + ) + + assert response.details.generated_tokens == 10 + assert response.generated_text == ": {request}\")\n response = self" + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_phi_all_params(flash_phi, response_snapshot): + response = await flash_phi.generate( + "Test request", + max_new_tokens=10, + repetition_penalty=1.2, + return_full_text=True, + stop_sequences=["network"], + temperature=0.5, + top_p=0.9, + top_k=10, + truncate=5, + typical_p=0.9, + watermark=True, + decoder_input_details=True, + seed=0, + ) + + assert response.details.generated_tokens == 6 + assert response.generated_text == "Test request to send data over a network" + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_phi_load(flash_phi, generate_load, response_snapshot): + responses = await generate_load( + flash_phi, "Test request", max_new_tokens=10, n=4 + ) + + assert len(responses) == 4 + assert all( + [r.generated_text == responses[0].generated_text for r in responses] + ), f"{[r.generated_text for r in responses]}" + assert responses[0].generated_text == ": {request}\")\n response = self" + + assert responses == response_snapshot diff --git a/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py b/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py new file mode 100644 index 00000000..d103973f --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py @@ -0,0 +1,400 @@ +import torch +import torch.distributed + +from torch import nn +from transformers.activations import ACT2FN +from transformers.configuration_utils import PretrainedConfig +from typing import Optional, List, Tuple + +from text_generation_server.utils import paged_attention, flash_attn +from text_generation_server.utils.layers import ( + TensorParallelRowLinear, + TensorParallelColumnLinear, + TensorParallelEmbedding, + PositionRotaryEmbedding, + TensorParallelHead, + get_linear, + FastLayerNorm, +) + +class PhiConfig(PretrainedConfig): + def __init__( + self, + vocab_size=51200, + hidden_size=2560, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=32, + hidden_act="gelu_fast", # llama uses silu + layer_norm_eps=1e-05, # rms in llama, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + tie_word_embeddings=False, + rope_theta=10000.0, + resid_pdrop=0.1, # llama doesn't have this + partial_rotary_factor=0.5, # important difference between llama and phi + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.layer_norm_eps = layer_norm_eps + self.rope_theta = rope_theta + self.resid_pdrop = resid_pdrop + self.partial_rotary_factor = partial_rotary_factor + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + +# this is the same as llama except for Phi uses bias=True +def load_attention(config, prefix, weights): + if config.num_attention_heads != config.num_key_value_heads: + return _load_gqa(config, prefix, weights) + else: + return TensorParallelColumnLinear.load_multi( + config, + prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], + dim=0, + weights=weights, + bias=True, + ) + +def _load_gqa(config, prefix: str, weights): + assert config.hidden_size % config.num_attention_heads == 0 + assert config.num_attention_heads % weights.process_group.size() == 0 + + weight = weights.get_multi_weights_col( + prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], + quantize=config.quantize, + dim=0, + ) + + if config.quantize not in ["gptq", "awq"]: + weight = weight.to(dtype=weights.dtype).to(device=weights.device) + + head_size = config.hidden_size // config.num_attention_heads + num_heads = config.num_attention_heads // weights.process_group.size() + num_key_value_heads = config.num_key_value_heads // weights.process_group.size() + assert list(weight.shape) == [ + (num_heads + 2 * num_key_value_heads) * head_size, + config.hidden_size, + ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" + + # this is the same as llama except for Phi uses bias=True + return TensorParallelColumnLinear( + get_linear(weight, bias=True, quantize=config.quantize) + ) + +class FlashPhiAttention(torch.nn.Module): + def __init__( + self, + prefix: str, + config, + weights, + ): + super().__init__() + self.num_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.num_heads + + self.softmax_scale = self.head_size**-0.5 + self.rotary_dim = int(config.partial_rotary_factor * self.head_size) + + self.rotary_emb = PositionRotaryEmbedding.static( + config=config, + dim=self.rotary_dim, + base=config.rope_theta, + device=weights.device, + ) + + if self.num_heads % weights.process_group.size() != 0: + raise ValueError( + f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " + f"and `num_shards`: {weights.process_group.size()}" + ) + + self.num_heads = self.num_heads // weights.process_group.size() + self.num_key_value_heads = ( + config.num_key_value_heads // weights.process_group.size() + ) + + self.query_key_value = load_attention(config, prefix, weights) + + # in llama the dense layer is called "o_proj" and has bias=False + self.dense = TensorParallelRowLinear.load( + config, + prefix=f"{prefix}.dense", + weights=weights, + bias=True, + ) + self.num_groups = self.num_heads // self.num_key_value_heads + self.kv_head_mapping = torch.arange( + 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device + ).repeat_interleave(self.num_groups) + + def forward( + self, + hidden_states, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + ): + # Compute query, key, value and split + qkv = self.query_key_value(hidden_states) + query, kv = qkv.split( + [ + self.head_size * self.num_heads, + 2 * self.head_size * self.num_key_value_heads, + ], + dim=1, + ) + + # Reshape query and key for rotary embeddings + query = query.view(-1, self.num_heads, self.head_size) + kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) + + # NOTE: this is the main difference between Llama and Phi + # in llama the rotary embeddings are applied to the whole query and key. + # Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions + # + # Apply partial positional embeddings in place + self.rotary_emb( + query[:, :, :self.rotary_dim], kv[:, 0, :, :self.rotary_dim], + cos, sin + ) + + # Reshape key and value and cache + paged_attention.reshape_and_cache( + kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots + ) + + # output tensor + attn_output = torch.empty_like(query) + + # Prefill + if cu_seqlen_prefill is not None: + flash_attn.attention( + query, + torch.select(kv, dim=1, index=0), + torch.select(kv, dim=1, index=1), + attn_output, + cu_seqlen_prefill, + max_s, + self.softmax_scale, + ) + # Decode + else: + paged_attention.attention( + attn_output, + query, + kv_cache[0], + kv_cache[1], + self.kv_head_mapping, + self.softmax_scale, + block_tables, + input_lengths, + max_s, + ) + + return self.dense(attn_output.view(-1, self.num_heads*self.head_size)) + +class PhiMLP(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + act = config.hidden_act + self.act = ( + ACT2FN[act] + if "gelu" not in act + else lambda x: torch.nn.functional.gelu( + x, + approximate="tanh" + if act in ["gelu_fast", "gelu_pytorch_tanh"] + else "none", + ) + ) + + # llama weights are up_proj and down_proj and bias=False + self.up_proj = TensorParallelRowLinear.load( + config, + prefix=f"{prefix}.fc1", + weights=weights, + bias=True, + ) + self.down_proj = TensorParallelRowLinear.load( + config, + prefix=f"{prefix}.fc2", + weights=weights, + bias=True, + ) + + def forward(self, hidden_states): + # NOTE: Llama requires the gate up states to an intermediate size + # Phi does not and we can avoid the `view` operation + return self.down_proj(self.act(self.up_proj(hidden_states))) + + +class FlashPhiLayer(nn.Module): + def __init__(self, layer_id, config, weights): + super().__init__() + prefix = f"model.layers.{layer_id}" + self.self_attn = FlashPhiAttention( + prefix=f"{prefix}.self_attn", config=config, weights=weights + ) + self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) + self.input_layernorm = FastLayerNorm.load( + prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps + ) + self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) + + def forward( + self, + hidden_states, + residual, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + ): + hidden_states, res = self.input_layernorm(hidden_states, residual) + # Self Attention + attn_output = self.self_attn( + hidden_states, + cos, + sin, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + ) + + hidden_states = self.resid_dropout(attn_output).add(self.resid_dropout(self.mlp(hidden_states))) + + return hidden_states, res + +class FlashPhiModel(torch.nn.Module): + def __init__(self, config, weights): + super().__init__() + + process_group = weights.process_group + self.tp_rank = process_group.rank() + self.tp_world_size = process_group.size() + self.embed_tokens = TensorParallelEmbedding( + prefix="model.embed_tokens", weights=weights + ) + self.layers = nn.ModuleList( + [ + FlashPhiLayer( + layer_id, + config, + weights, + ) + for layer_id in range(config.num_hidden_layers) + ] + ) + self.gradient_checkpointing = False + + self.head_size = self.layers[0].self_attn.head_size + self.num_heads = self.layers[0].self_attn.num_heads + self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads + + self.norm = FastLayerNorm.load( + prefix="model.final_layernorm", + weights=weights, + eps=config.layer_norm_eps, + ) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + ) -> torch.Tensor: + hidden_states = self.embed_tokens(input_ids) + + # Get rotary cos and sin for this forward + # Avoid to index in each layer + cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( + position_ids, max_s, hidden_states.dtype + ) + + residual = None + for i, layer in enumerate(self.layers): + hidden_states, residual = layer( + hidden_states, + residual, + cos, + sin, + cu_seqlen_prefill, + kv_cache[i], + block_tables, + slots, + input_lengths, + max_s, + ) + + hidden_states, _ = self.norm(hidden_states, residual) + + return hidden_states + +class FlashPhiForCausalLM(torch.nn.Module): + def __init__(self, config, weights): + super().__init__() + + self.model = FlashPhiModel(config, weights) + self.lm_head = TensorParallelHead.load( + config, + prefix="lm_head", + weights=weights, + ) + + def forward( + self, + input_ids: torch.Tensor, + position_ids: torch.Tensor, + cu_seqlen_prefill: Optional[torch.Tensor], + kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], + block_tables: torch.Tensor, + slots: torch.Tensor, + input_lengths: torch.Tensor, + max_s: int, + lm_head_indices: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + hidden_states = self.model( + input_ids, + position_ids, + cu_seqlen_prefill, + kv_cache, + block_tables, + slots, + input_lengths, + max_s, + ) + if lm_head_indices is not None: + hidden_states = hidden_states[lm_head_indices] + + return self.lm_head(hidden_states) diff --git a/server/text_generation_server/models/custom_modeling/phi_modeling.py b/server/text_generation_server/models/custom_modeling/phi_modeling.py new file mode 100644 index 00000000..f9999537 --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/phi_modeling.py @@ -0,0 +1,308 @@ +# imlementation of the PhiModel and PhiForCausalLM classes + +import torch +import torch.distributed + +import math +from torch import nn +from typing import Optional, List, Tuple, Any +from transformers.configuration_utils import PretrainedConfig +from transformers.modeling_outputs import CausalLMOutputWithPast + +from text_generation_server.utils.layers import ( + TensorParallelRowLinear, + TensorParallelColumnLinear, + TensorParallelEmbedding, + TensorParallelHead, + FastLinear, +) + + +# PhiConfig is the configuration class for the PhiModel. +class PhiConfig(PretrainedConfig): + def __init__( + self, + vocab_size=51200, + n_positions=2048, + n_embd=2560, + n_layer=32, + n_inner=None, + n_head=32, + rotary_dim=32, + layer_norm_epsilon=1e-5, + tie_word_embeddings=False, + pad_vocab_size_multiple=64, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + no_bias=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_inner = n_inner + self.n_head = n_head + self.rotary_dim = rotary_dim + + self.layer_norm_epsilon = layer_norm_epsilon + self.tie_word_embeddings = tie_word_embeddings + self.pad_vocab_size_multiple = pad_vocab_size_multiple + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.no_bias = no_bias + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + +# RotaryEmbedding is a class that implements the rotary embedding. +class RotaryEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + inv_freq = [ + 1.0 / 10000.0 ** (i / dim) + for i in range(0, dim, 2) + ] + inv_freq_len = len(inv_freq) + inv_freq = torch.tensor(inv_freq).view(1, inv_freq_len) + t = torch.arange(0, max_seq_len, dtype=torch.float).view(max_seq_len, 1) + freqs = t.matmul(inv_freq) + self.sin = freqs.sin() + self.cos = freqs.cos() + + def apply_rotary_emb_qkv(self, qkv, seqlen_offset): + b_size, seqlen, three, _, _headdim = qkv.shape + if three != 3: + raise Exception("unexpected shape for qkv") + _, rotary_dim = self.cos.shape + rotary_dim = rotary_dim * 2 + q_rot = qkv[:, :, 0, :, :rotary_dim] + q_pass = qkv[:, :, 0, :, rotary_dim:] + k_rot = qkv[:, :, 1, :, :rotary_dim] + k_pass = qkv[:, :, 1, :, rotary_dim:] + q12 = torch.chunk(q_rot, 2, dim=-1) + k12 = torch.chunk(k_rot, 2, dim=-1) + q1, q2 = q12[0], q12[1] + k1, k2 = k12[0], k12[1] + c = self.cos.narrow(0, seqlen_offset, seqlen).unsqueeze(1) + s = self.sin.narrow(0, seqlen_offset, seqlen).unsqueeze(1) + q_rot = torch.cat( + [ + q1 * c - q2 * s, + q1 * s + q2 * c, + ], + dim=-1, + ) + k_rot = torch.cat( + [ + k1 * c - k2 * s, + k1 * s + k2 * c, + ], + dim=-1, + ) + q = torch.cat([q_rot, q_pass], dim=-1) + k = torch.cat([k_rot, k_pass], dim=-1) + v = qkv[:, :, 2] + return q, k, v + + +# PhiCausalLMHead is the head of the PhiModel. It is a linear layer with a layer norm. +class PhiCausalLMHead(nn.Module): + def __init__(self, config, weights): + super().__init__() + self.ln = nn.LayerNorm.load( + prefix="lm_head.ln", + weights=weights, + eps=config.layer_norm_epsilon, + ) + self.linear = TensorParallelHead.load( + config=config, prefix="lm_head.linear", weights=weights + ) + + def forward(self, hidden_states): + hidden_states = self.ln(hidden_states) + hidden_states = self.linear(hidden_states) + return hidden_states + +# PhiMHA is a multi-head attention layer. This layer uses an attention mask to prevent tokens from attending to subsequent tokens. +class PhiMHA(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + self.Wqkv = TensorParallelColumnLinear.load( + config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias + ) + self.out_proj = TensorParallelRowLinear.load( + config, + prefix=f"{prefix}.out_proj", + weights=weights, + bias=not config.no_bias, + ) + self.op_size = config.n_embd + self.head_dim = int(config.n_embd / config.n_head) + self.num_heads = config.n_head + self.rotary_emb = RotaryEmbedding( + config.rotary_dim, + config.n_positions, + ) + self.softmax_scale = 1.0 / math.sqrt(self.head_dim) + + def forward( + self, + hidden_states, + past_kv_cache, + attention_mask=None, + ): + b_size, seq_len, _n_embd = hidden_states.shape + qkv = self.Wqkv(hidden_states) + qkv = qkv.view(b_size, seq_len, 3, self.num_heads, self.head_dim) + seqlen_offset = 0 if past_kv_cache is None else past_kv_cache[0].shape[1] + q, k, v = self.rotary_emb.apply_rotary_emb_qkv(qkv, seqlen_offset) + + # if there is a kv_cache, then we need to concatenate + if past_kv_cache is not None: + prev_k, prev_v = past_kv_cache + k = torch.cat([prev_k, k], dim=1) + v = torch.cat([prev_v, v], dim=1) + + past_kv_cache = [k, v] + attn_weights = torch.einsum('bthd,bshd->bhts', q, k * self.softmax_scale) + + if attention_mask is not None: + seqlen_k = k.shape[1] + seqlen_q = q.shape[1] + causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device), 1) + attn_weights = attn_weights + causal_mask.to(dtype=attn_weights.dtype) + + attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) + attn_output = attn_weights.matmul(v.transpose(1, 2)).squeeze(0) + attn_output = attn_output.view((b_size, self.num_heads, seq_len, self.head_dim)).transpose(1, 2).flatten(-2) + return self.out_proj(attn_output), past_kv_cache + +# PhiMLP is a multi-layer perceptron. It contains two linear layers with a gelu activation function. +class PhiMLP(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + + self.n_inner = config.n_inner + self.fc1 = FastLinear.load( + config=config, + prefix=f"{prefix}.fc1", + weights=weights, + bias=False, + ) + self.fc2 = FastLinear.load( + config=config, + prefix=f"{prefix}.fc2", + weights=weights, + bias=False, + ) + self.activation = torch.nn.functional.gelu + + def forward(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + +# PhiBlock is a single transformer block. It contains a layer norm, a multi-head attention layer and an multi-layer perceptron. +class PhiBlock(nn.Module): + def __init__(self, layer_id, config, weights): + super().__init__() + self.layer_id = layer_id + self.layer_norm = nn.LayerNorm.load(prefix=f"{layer_id}.ln", weights=weights, eps=config.layer_norm_epsilon) + self.mixer = PhiMHA(prefix=f"{layer_id}.mixer", config=config, weights=weights) + self.mlp = PhiMLP(prefix=f"{layer_id}.mlp", config=config, weights=weights) + + def forward( + self, + hidden_states, + kv_cache, + attention_mask, + ): + residual = hidden_states + hidden_states = self.layer_norm(hidden_states) + attn_outputs, past_kv_cache = self.mixer(hidden_states, kv_cache, attention_mask) + feed_forward_hidden_states = self.mlp(hidden_states) + out = attn_outputs + feed_forward_hidden_states + residual + return out, past_kv_cache + +# PhiModel implements the embedding layer and the transformer blocks. +class PhiModel(nn.Module): + def __init__(self, config, weights): + super().__init__() + self.tp_rank = weights.process_group.rank() + self.tp_world_size = weights.process_group.size() + self.embed_tokens = TensorParallelEmbedding( + prefix="transformer.embd.wte", weights=weights + ) + self.blocks = nn.ModuleList( + [PhiBlock(f"transformer.h.{layer_id}", config, weights) for layer_id in range(config.n_layer)] + ) + + def forward( + self, + input_ids: torch.LongTensor, + past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, + attention_mask: Optional[torch.ByteTensor] = None, + return_dict: Optional[bool] = None, + use_cache: Optional[bool] = None, + ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: + hidden_states = self.embed_tokens(input_ids) + seq_len = hidden_states.shape[1] + mask = None if seq_len <= 1 else attention_mask + + past_key_values = [None] * len(self.blocks) if past_key_values is None else past_key_values + + for index, block in enumerate(self.blocks): + hidden_states, new_key_values = block(hidden_states, past_key_values[index], mask) + past_key_values[index] = new_key_values + + return hidden_states, past_key_values + +# PhiForCausalLM wraps the PhiModel and PhiCausalLMHead together and returns a CausalLMOutputWithPast object. +class PhiForCausalLM(torch.nn.Module): + def __init__(self, config, weights): + super().__init__() + self.model = PhiModel(config, weights) + self.lm_head = PhiCausalLMHead(config, weights) + + def forward( + self, + input_ids: torch.LongTensor, + past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, + attention_mask: Optional[torch.ByteTensor] = None, + return_dict: Optional[bool] = None, + use_cache: Optional[bool] = None, + labels: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: + model_output = self.model( + input_ids, past_key_values, attention_mask, return_dict, use_cache + ) + logits = self.lm_head(model_output[0]) + + loss = None + if labels is not None: + loss = nn.CrossEntropyLoss()( + logits[:, :-1].view(-1, logits.size(-1)), + labels[:, 1:].view(-1) + ) + + if not return_dict: + return ((loss,) + (logits,) + model_output[1:]) if loss is not None else (logits,) + model_output[1:] + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=model_output[1], + hidden_states=None, + attentions=None, + ) + + diff --git a/server/text_generation_server/models/flash_phi.py b/server/text_generation_server/models/flash_phi.py new file mode 100644 index 00000000..1c49f2a9 --- /dev/null +++ b/server/text_generation_server/models/flash_phi.py @@ -0,0 +1,102 @@ +import torch +import torch.distributed + +from opentelemetry import trace +from transformers import AutoConfig, AutoTokenizer +from typing import Optional + +from text_generation_server.models import FlashCausalLM +from text_generation_server.models.custom_modeling.flash_phi_modeling import ( + FlashPhiForCausalLM, + PhiConfig, +) +from text_generation_server.utils import ( + initialize_torch_distributed, + weight_files, + Weights, +) + +tracer = trace.get_tracer(__name__) + + +class FlashPhi(FlashCausalLM): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + use_medusa: Optional[str] = None, + ): + self.process_group, rank, world_size = initialize_torch_distributed() + if torch.cuda.is_available(): + device = torch.device(f"cuda:{rank}") + dtype = torch.float16 if dtype is None else dtype + else: + raise NotImplementedError("FlashPhi is only available on GPU") + + tokenizer = AutoTokenizer.from_pretrained( + model_id, + revision=revision, + padding_side="left", + truncation_side="left", + trust_remote_code=trust_remote_code, + ) + + config = PhiConfig.from_pretrained( + model_id, revision=revision, trust_remote_code=trust_remote_code + ) + config.quantize = quantize + + torch.distributed.barrier(group=self.process_group) + + filenames = weight_files(model_id, revision=revision, extension=".safetensors") + weights = Weights(filenames, device, dtype, process_group=self.process_group) + if config.quantize in ["gptq", "awq"]: + weights._set_gptq_params(model_id, revision) + + model = FlashPhiForCausalLM(config, weights) + if use_medusa: + from text_generation_server.utils.medusa import MedusaModel + from huggingface_hub import hf_hub_download + import json + import os + from pathlib import Path + + is_local_model = (Path(use_medusa).exists() and Path(use_medusa).is_dir()) or os.getenv( + "WEIGHTS_CACHE_OVERRIDE", None + ) is not None + + if not is_local_model: + medusa_config = hf_hub_download( + use_medusa, revision=revision, filename="config.json" + ) + medusa_head = hf_hub_download( + use_medusa, revision=revision, filename="medusa_lm_head.pt" + ) + else: + medusa_config = str(Path(use_medusa) / "config.json") + medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") + + with open(medusa_config, "r") as f: + config = json.load(f) + medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" + weights = Weights( + [medusa_sf], device, dtype, process_group=self.process_group + ) + lm_head = model.lm_head + model.lm_head = MedusaModel(config, weights, lm_head) + + torch.distributed.barrier(group=self.process_group) + super(FlashPhi, self).__init__( + model=model, + tokenizer=tokenizer, + num_layers=len(model.model.layers), + num_kv_heads=model.model.num_key_value_heads, + head_size=model.model.head_size, + dtype=dtype, + device=device, + rank=rank, + world_size=world_size, + ) diff --git a/server/text_generation_server/models/phi.py b/server/text_generation_server/models/phi.py new file mode 100644 index 00000000..d477478a --- /dev/null +++ b/server/text_generation_server/models/phi.py @@ -0,0 +1,63 @@ +import torch +import torch.distributed + +from transformers import AutoConfig, AutoTokenizer +from typing import Optional, List, Tuple + +from text_generation_server.models import CausalLM +from text_generation_server.models.custom_modeling.phi_modeling import PhiConfig, PhiForCausalLM +from text_generation_server.utils import ( + initialize_torch_distributed, + weight_files, + Weights, +) + +class Phi(CausalLM): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + ): + self.process_group, _rank, _world_size = initialize_torch_distributed() + if torch.cuda.is_available(): + device = torch.device("cuda") + dtype = torch.float16 if dtype is None else dtype + else: + if quantize: + raise ValueError("quantization is not available on CPU") + + device = torch.device("cpu") + dtype = torch.float32 if dtype is None else dtype + + tokenizer = AutoTokenizer.from_pretrained( + model_id, + revision=revision, + padding_side="left", + truncation_side="left", + trust_remote_code=trust_remote_code, + ) + config = PhiConfig.from_pretrained( + model_id, revision=revision, trust_remote_code=trust_remote_code + ) + + tokenizer.bos_token_id = config.bos_token_id + tokenizer.eos_token_id = config.eos_token_id + tokenizer.pad_token = tokenizer.eos_token + + config.quantize = quantize + torch.distributed.barrier(group=self.process_group) + filenames = weight_files(model_id, revision=revision, extension=".safetensors") + weights = Weights(filenames, device, dtype, process_group=self.process_group) + model = PhiForCausalLM(config, weights) + torch.distributed.barrier(group=self.process_group) + super(CausalLM, self).__init__( + model=model, + tokenizer=tokenizer, + requires_padding=True, + dtype=dtype, + device=device, + ) + From ac0be8a6a4d58b4800918aa22d173679e1e2984b Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 25 Jan 2024 18:16:03 +0100 Subject: [PATCH 043/153] fix: read stderr in download (#1486) #1186 --- launcher/src/main.rs | 48 +++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 25c780ed..95256178 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -6,7 +6,7 @@ use nix::unistd::Pid; use serde::Deserialize; use std::env; use std::ffi::OsString; -use std::io::{BufRead, BufReader, Lines, Read}; +use std::io::{BufRead, BufReader, Lines}; use std::os::unix::process::{CommandExt, ExitStatusExt}; use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; @@ -497,6 +497,9 @@ fn shard_manager( // Safetensors load fast envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into())); + // Disable progress bar + envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); + // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( @@ -581,6 +584,13 @@ fn shard_manager( thread::spawn(move || { log_lines(shard_stdout_reader.lines()); }); + // We read stderr in another thread as it seems that lines() can block in some cases + let (err_sender, err_receiver) = mpsc::channel(); + thread::spawn(move || { + for line in shard_stderr_reader.lines().flatten() { + err_sender.send(line).unwrap_or(()); + } + }); let mut ready = false; let start_time = Instant::now(); @@ -588,13 +598,6 @@ fn shard_manager( loop { // Process exited if let Some(exit_status) = p.try_wait().unwrap() { - // We read stderr in another thread as it seems that lines() can block in some cases - let (err_sender, err_receiver) = mpsc::channel(); - thread::spawn(move || { - for line in shard_stderr_reader.lines().flatten() { - err_sender.send(line).unwrap_or(()); - } - }); let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; @@ -790,6 +793,9 @@ fn download_convert_model(args: &Args, running: Arc) -> Result<(), L // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); + // Disable progress bar + envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); + // If huggingface_hub_cache is set, pass it to the download process // Useful when running inside a docker container if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache { @@ -840,12 +846,20 @@ fn download_convert_model(args: &Args, running: Arc) -> Result<(), L } }; - // Redirect STDOUT to the console - let download_stdout = download_process.stdout.take().unwrap(); - let stdout = BufReader::new(download_stdout); + let download_stdout = BufReader::new(download_process.stdout.take().unwrap()); thread::spawn(move || { - log_lines(stdout.lines()); + log_lines(download_stdout.lines()); + }); + + let download_stderr = BufReader::new(download_process.stderr.take().unwrap()); + + // We read stderr in another thread as it seems that lines() can block in some cases + let (err_sender, err_receiver) = mpsc::channel(); + thread::spawn(move || { + for line in download_stderr.lines().flatten() { + err_sender.send(line).unwrap_or(()); + } }); loop { @@ -856,12 +870,10 @@ fn download_convert_model(args: &Args, running: Arc) -> Result<(), L } let mut err = String::new(); - download_process - .stderr - .take() - .unwrap() - .read_to_string(&mut err) - .unwrap(); + while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { + err = err + "\n" + &line; + } + if let Some(signal) = status.signal() { tracing::error!( "Download process was signaled to shutdown with signal {signal}: {err}" From a1124f7b8b258dfe5ab7e518f1ff20c7f5070217 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 10:13:23 +0100 Subject: [PATCH 044/153] Update the docs --- docs/source/supported_models.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index dce4f2f9..004790ab 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -19,7 +19,9 @@ The following models are optimized and can be served with TGI, which uses custom - [MPT](https://huggingface.co/mosaicml/mpt-30b) - [Llama V2](https://huggingface.co/meta-llama) - [Code Llama](https://huggingface.co/codellama) -- [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) +- [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) +- [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) +- [Phi](https://huggingface.co/microsoft/phi-2) If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: From 41fbf5c2541b17de2b3e2108890e58ca37a253d8 Mon Sep 17 00:00:00 2001 From: drbh Date: Fri, 26 Jan 2024 04:41:39 -0500 Subject: [PATCH 045/153] fix: show warning with tokenizer config parsing error (#1488) This tiny PR just prints the parsing error when a tokenizer config fails to load. This is helpful when a chat_template wont load due to formatting issues https://github.com/huggingface/text-generation-inference/pull/1427#issuecomment-1909226388 --- router/src/main.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/router/src/main.rs b/router/src/main.rs index 5f52b801..6526b859 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -473,7 +473,12 @@ pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option Date: Fri, 26 Jan 2024 10:41:58 +0100 Subject: [PATCH 046/153] fix: launcher doc typos (#1473) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --------- Co-authored-by: Andres Restrepo --- docs/source/basic_tutorials/launcher.md | 4 ++-- launcher/src/main.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index 9590e463..bafe3669 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -60,9 +60,9 @@ Options: [env: QUANTIZE=] Possible values: - - awq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models whereever possible because of the better latency + - awq: 4 bit quantization. Requires a specific AWQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models wherever possible because of the better latency - eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from https://github.com/NetEase-FuXi/EETQ.git - - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels whereever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels + - gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels - bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16 - bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 95256178..7b7b8bf0 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -23,16 +23,16 @@ mod env_runtime; #[derive(Clone, Copy, Debug, ValueEnum)] enum Quantization { - /// 4 bit quantization. Requires a specific GTPQ quantized model: + /// 4 bit quantization. Requires a specific AWQ quantized model: /// https://hf.co/models?search=awq. - /// Should replace GPTQ models whereever possible because of the better latency + /// Should replace GPTQ models wherever possible because of the better latency Awq, /// 8 bit quantization, doesn't require specific model. /// Should be a drop-in replacement to bitsandbytes with much better performance. /// Kernels are from https://github.com/NetEase-FuXi/EETQ.git Eetq, /// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. - /// text-generation-inference will use exllama (faster) kernels whereever possible, and use + /// text-generation-inference will use exllama (faster) kernels wherever possible, and use /// triton kernel (wider support) when it's not. /// AWQ has faster kernels. Gptq, From ea2aa53805594343bf990f8bfc57115a9491592c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 14:00:29 +0100 Subject: [PATCH 047/153] Reinstate exl2 with tp (#1490) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- ...t_flash_starcoder_gptq_default_params.json | 26 +++++++++---------- .../utils/gptq/exllamav2.py | 4 +++ server/text_generation_server/utils/layers.py | 12 ++++----- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json index 5598a2ad..1ace3814 100644 --- a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json +++ b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json @@ -16,52 +16,52 @@ }, { "id": 21017, - "logprob": -9.09375, + "logprob": -9.0859375, "text": "ometric" }, { "id": 81, - "logprob": -0.25976562, + "logprob": -0.25830078, "text": "_" }, { "id": 6009, - "logprob": -2.2148438, + "logprob": -2.1875, "text": "mean" }, { "id": 26, - "logprob": -0.3010254, + "logprob": -0.30004883, "text": "(" }, { "id": 62, - "logprob": -5.6757812, + "logprob": -5.6171875, "text": "L" }, { "id": 44, - "logprob": -3.0898438, + "logprob": -3.078125, "text": ":" }, { "id": 1682, - "logprob": -0.6791992, + "logprob": -0.68066406, "text": " List" }, { "id": 77, - "logprob": -0.38891602, + "logprob": -0.38745117, "text": "[" }, { "id": 1808, - "logprob": -0.92041016, + "logprob": -0.9453125, "text": "float" }, { "id": 10794, - "logprob": -2.5390625, + "logprob": -2.5371094, "text": "]):" } ], @@ -69,7 +69,7 @@ "tokens": [ { "id": 284, - "logprob": 0.0, + "logprob": -0.051635742, "special": false, "text": "\n " }, @@ -81,7 +81,7 @@ }, { "id": 11665, - "logprob": -1.6005859, + "logprob": -1.2236328, "special": false, "text": " reduce" }, @@ -159,7 +159,7 @@ }, { "id": 203, - "logprob": -0.11968994, + "logprob": -0.12695312, "special": false, "text": "\n" }, diff --git a/server/text_generation_server/utils/gptq/exllamav2.py b/server/text_generation_server/utils/gptq/exllamav2.py index a24e834b..2b897f25 100644 --- a/server/text_generation_server/utils/gptq/exllamav2.py +++ b/server/text_generation_server/utils/gptq/exllamav2.py @@ -185,6 +185,10 @@ class QuantLinear(nn.Module): "g_idx": self.g_idx, } temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size()) + + # We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us, + # and `Memory access fault by GPU node-2` will EAT you. + self.temp_dq = temp_dq self.q_handle = ext_make_q_matrix(self.q_tensors, temp_dq) def forward(self, x, force_cuda=False): diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 5a0de0d7..c9393d99 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -35,12 +35,12 @@ except Exception: HAS_EXLLAMA = False CAN_EXLLAMA = major >= 8 V2 = os.getenv("EXLLAMA_VERSION", "2") == "2" -if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: - V2 = False - log_once( - logger.warning, - "Disabling exllama v2 and using v1 instead because there are issues when sharding", - ) +# if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: +# V2 = False +# log_once( +# logger.warning, +# "Disabling exllama v2 and using v1 instead because there are issues when sharding", +# ) if os.getenv("DISABLE_EXLLAMA") == "True": HAS_EXLLAMA = False From b064b33e8b041588c8187943c3194d019ba2d220 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 14:05:02 +0100 Subject: [PATCH 048/153] Add sealion mpt support (#1477) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --------- Co-authored-by: Choon Meng Tan Co-authored-by: David Ong Tat-Wee <13075447+ongtw@users.noreply.github.com> --- .../models/custom_modeling/mpt_modeling.py | 81 ++++++++++++++----- 1 file changed, 63 insertions(+), 18 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/mpt_modeling.py b/server/text_generation_server/models/custom_modeling/mpt_modeling.py index 5ccf796d..1a9aef74 100644 --- a/server/text_generation_server/models/custom_modeling/mpt_modeling.py +++ b/server/text_generation_server/models/custom_modeling/mpt_modeling.py @@ -28,7 +28,6 @@ EPS = 1e-5 def load_col(config, prefix, weights, bias): - assert bias == False, NotImplementedError assert config.quantize != "gptq", NotImplementedError slice_ = weights._get_slice(f"{prefix}.weight") rank = weights.process_group.rank() @@ -45,7 +44,26 @@ def load_col(config, prefix, weights, bias): if weight.dtype != torch.int32: weight = weight.to(dtype=weights.dtype) weight = weight.to(device=weights.device) - bias = None + + if bias: + bias_slice_ = weights._get_slice(f"{prefix}.bias") + bias_rank = weights.process_group.rank() + bias_size = weights.process_group.size() + + bias_h = bias_slice_.get_shape() + bias_h = bias_h[0] + bias_block_size = bias_h // bias_size + + bias_q_part = bias_slice_[bias_rank * bias_block_size : (bias_rank + 1) * bias_block_size] + bias_k_part = bias_slice_[bias_h + bias_rank * bias_block_size : bias_h + (bias_rank + 1) * bias_block_size] + bias_v_part = bias_slice_[2 * bias_h + bias_rank * bias_block_size : 2 * bias_h + (bias_rank + 1) * bias_block_size] + + bias = torch.cat([bias_q_part, bias_k_part, bias_v_part], dim=0) + if bias.dtype != torch.int32: + bias = bias.to(dtype=weights.dtype) + bias = bias.to(device=weights.device) + else: + bias = None linear = get_linear(weight, bias, config.quantize) return TensorParallelColumnLinear(linear) @@ -330,7 +348,12 @@ class MultiheadAttention(nn.Module): config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias ) if self.qk_ln: - raise NotImplementedError("qk_ln is not supported") + bias = not config.no_bias + hidden_size = config.d_model + head_dim = hidden_size // self.n_heads + + self.q_ln = LPLayerNorm(d_model, bias=bias, prefix=f"{prefix}.q_ln", weights=weights) + self.k_ln = LPLayerNorm(self.n_heads * head_dim, prefix=f"{prefix}.k_ln", weights=weights) if self.attn_impl == "flash": self.attn_fn = flash_attn_fn elif self.attn_impl == "triton": @@ -581,12 +604,20 @@ class MPTBlock(nn.Module): f"""Not implemented attn {config.attn_config["attn_type"]}""" ) resid_pdrop = config.resid_pdrop - self.norm_1 = nn.LayerNorm.load_no_bias( - prefix=f"{prefix}.norm_1", weights=weights, eps=EPS - ) - self.norm_2 = nn.LayerNorm.load_no_bias( - prefix=f"{prefix}.norm_2", weights=weights, eps=EPS - ) + if config.no_bias: + self.norm_1 = nn.LayerNorm.load_no_bias( + prefix=f"{prefix}.norm_1", weights=weights, eps=EPS + ) + self.norm_2 = nn.LayerNorm.load_no_bias( + prefix=f"{prefix}.norm_2", weights=weights, eps=EPS + ) + else: + self.norm_1 = nn.LayerNorm.load( + prefix=f"{prefix}.norm_1", weights=weights, eps=EPS + ) + self.norm_2 = nn.LayerNorm.load( + prefix=f"{prefix}.norm_2", weights=weights, eps=EPS + ) self.attn = MultiheadAttention(config, prefix=f"{prefix}.attn", weights=weights) self.ffn = MPTMLP(config, prefix=f"{prefix}.ffn", weights=weights) self.resid_attn_dropout = nn.Dropout(resid_pdrop) @@ -635,6 +666,9 @@ class LPLayerNorm(torch.nn.LayerNorm): elementwise_affine=True, device=None, dtype=None, + bias: Optional[bool] = True, + prefix=None, + weights=None, ): super().__init__( normalized_shape=normalized_shape, @@ -642,7 +676,14 @@ class LPLayerNorm(torch.nn.LayerNorm): elementwise_affine=elementwise_affine, device=device, dtype=dtype, + bias=bias, ) + if weights is not None: + self.weight = nn.Parameter(weights.get_sharded(f"{prefix}.weight", dim=0)) + if bias: + self.bias = nn.Parameter(weights.get_sharded(f"{prefix}.bias", dim=0)) + self.normalized_shape = self.weight.shape + def forward(self, x): module_device = x.device @@ -755,20 +796,23 @@ class MPTModel(MPTPreTrainedModel): ) self.wte = TensorParallelEmbedding("transformer.wte", weights) + if not self.alibi: - # self.wpe = torch.nn.Embedding( - # config.max_seq_len, config.d_model, device=config.init_device - # ) - raise RuntimeError("no alibi no supported") + self.wpe = TensorParallelEmbedding("transformer.wpe", weights) self.blocks = nn.ModuleList( [ MPTBlock(config, prefix=f"transformer.blocks.{i}", weights=weights) for i in range(config.n_layers) ] ) - self.norm_f = nn.LayerNorm.load_no_bias( - prefix="transformer.norm_f", weights=weights, eps=EPS - ) + if config.no_bias: + self.norm_f = nn.LayerNorm.load_no_bias( + prefix="transformer.norm_f", weights=weights, eps=EPS + ) + else: + self.norm_f = nn.LayerNorm.load( + prefix="transformer.norm_f", weights=weights, eps=EPS + ) self.is_causal = not self.prefix_lm self._attn_bias_initialized = False self.attn_bias = None @@ -787,8 +831,9 @@ class MPTModel(MPTPreTrainedModel): if config.verbose: warnings.warn(f"Removing bias ({module.bias}) from {module}.") module.register_parameter("bias", None) - if config.verbose and config.verbose > 2: - print(self) + if hasattr(self.config, "verbose"): + if config.verbose and config.verbose > 2: + print(self) if "verbose" not in self.config.init_config: self.config.init_config["verbose"] = self.config.verbose if self.config.init_config["verbose"] > 1: From 9fd5f5150cf9b26b41f16a5258c60b1fe9f6167b Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 14:06:27 +0100 Subject: [PATCH 049/153] Trying to fix that flaky test. (#1491) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- router/src/lib.rs | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/router/src/lib.rs b/router/src/lib.rs index 2bfbbacd..6c16c4b3 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -548,26 +548,12 @@ pub(crate) struct ErrorResponse { #[cfg(test)] mod tests { - use std::io::Write; use tokenizers::Tokenizer; pub(crate) async fn get_tokenizer() -> Tokenizer { - let filename = std::path::Path::new("tokenizer.json"); - if !filename.exists() { - let content = reqwest::get("https://huggingface.co/gpt2/raw/main/tokenizer.json") - .await - .unwrap() - .bytes() - .await - .unwrap(); - let tmp_filename = "tokenizer.json.temp"; - let mut file = std::fs::File::create(tmp_filename).unwrap(); - file.write_all(&content).unwrap(); - // Re-check if another process has written this file maybe. - if !filename.exists() { - std::fs::rename(tmp_filename, filename).unwrap() - } - } - Tokenizer::from_file("tokenizer.json").unwrap() + let api = hf_hub::api::sync::Api::new().unwrap(); + let repo = api.model("gpt2".to_string()); + let filename = repo.get("tokenizer.json").unwrap(); + Tokenizer::from_file(filename).unwrap() } } From 5134d9ccc30d83c31d458ff761a4c1b7be95474c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Restrepo?= Date: Fri, 26 Jan 2024 08:10:07 -0500 Subject: [PATCH 050/153] fix: launcher doc typos (#1462) fixes launcher doc typos Fixes # (issue) - [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? @OlivierDehaene OR @Narsil Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. Co-authored-by: Nicolas Patry From 5d663fb85d8cc2e4670765ace05ec2e976abdee5 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 16:07:31 +0100 Subject: [PATCH 051/153] Update the docs to include newer models. (#1492) --- docs/openapi.json | 2 +- router/src/lib.rs | 31 +++++++++++++++++++++++++------ router/src/server.rs | 22 +++++++++++++++------- 3 files changed, 41 insertions(+), 14 deletions(-) diff --git a/docs/openapi.json b/docs/openapi.json index 4454259b..9a9ed116 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1 +1 @@ -{"openapi":"3.0.3","info":{"title":"Text Generation Inference","description":"Text Generation Webserver","contact":{"name":"Olivier Dehaene"},"license":{"name":"Apache 2.0","url":"https://www.apache.org/licenses/LICENSE-2.0"},"version":"1.3.4"},"paths":{"/":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens if `stream == false` or a stream of token if `stream == true`","description":"Generate tokens if `stream == false` or a stream of token if `stream == true`","operationId":"compat_generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CompatGenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}},"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate_stream":{"post":{"tags":["Text Generation Inference"],"summary":"Generate a stream of token using Server-Sent Events","description":"Generate a stream of token using Server-Sent Events","operationId":"generate_stream","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/health":{"get":{"tags":["Text Generation Inference"],"summary":"Health check method","description":"Health check method","operationId":"health","responses":{"200":{"description":"Everything is working fine"},"503":{"description":"Text generation inference is down","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"unhealthy","error_type":"healthcheck"}}}}}}},"/info":{"get":{"tags":["Text Generation Inference"],"summary":"Text Generation Inference endpoint info","description":"Text Generation Inference endpoint info","operationId":"get_model_info","responses":{"200":{"description":"Served model info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Info"}}}}}}},"/metrics":{"get":{"tags":["Text Generation Inference"],"summary":"Prometheus metrics scrape endpoint","description":"Prometheus metrics scrape endpoint","operationId":"metrics","responses":{"200":{"description":"Prometheus Metrics","content":{"text/plain":{"schema":{"type":"string"}}}}}}},"/tokenize":{"post":{"tags":["Text Generation Inference"],"summary":"Tokenize inputs","description":"Tokenize inputs","operationId":"tokenize","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeRequest"}}},"required":true},"responses":{"200":{"description":"Tokenized ids","content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeResponse"}}}},"404":{"description":"No tokenizer found","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"No fast tokenizer available"}}}}}}},"/v1/chat/completions":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"chat_completions","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatCompletionChunk"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}}},"components":{"schemas":{"BestOfSequence":{"type":"object","required":["generated_text","finish_reason","generated_tokens","prefill","tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_text":{"type":"string","example":"test"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"CompatGenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"},"stream":{"type":"boolean","default":"false"}}},"Details":{"type":"object","required":["finish_reason","generated_tokens","prefill","tokens"],"properties":{"best_of_sequences":{"type":"array","items":{"$ref":"#/components/schemas/BestOfSequence"},"nullable":true},"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ErrorResponse":{"type":"object","required":["error","error_type"],"properties":{"error":{"type":"string"},"error_type":{"type":"string"}}},"FinishReason":{"type":"string","enum":["length","eos_token","stop_sequence"]},"GenerateParameters":{"type":"object","properties":{"best_of":{"type":"integer","default":"null","example":1,"nullable":true,"minimum":0,"exclusiveMinimum":0},"decoder_input_details":{"type":"boolean","default":"true"},"details":{"type":"boolean","default":"true"},"do_sample":{"type":"boolean","default":"false","example":true},"max_new_tokens":{"type":"integer","format":"int32","default":"100","example":"20","nullable":true,"minimum":0},"repetition_penalty":{"type":"number","format":"float","default":"null","example":1.03,"nullable":true,"exclusiveMinimum":0},"return_full_text":{"type":"boolean","default":"null","example":false,"nullable":true},"seed":{"type":"integer","format":"int64","default":"null","example":"null","nullable":true,"minimum":0,"exclusiveMinimum":0},"stop":{"type":"array","items":{"type":"string"},"example":["photographer"],"maxItems":4},"temperature":{"type":"number","format":"float","default":"null","example":0.5,"nullable":true,"exclusiveMinimum":0},"top_k":{"type":"integer","format":"int32","default":"null","example":10,"nullable":true,"exclusiveMinimum":0},"top_n_tokens":{"type":"integer","format":"int32","default":"null","example":5,"nullable":true,"minimum":0,"exclusiveMinimum":0},"top_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"truncate":{"type":"integer","default":"null","example":"null","nullable":true,"minimum":0},"typical_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"watermark":{"type":"boolean","default":"false","example":true}}},"GenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"}}},"GenerateResponse":{"type":"object","required":["generated_text"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/Details"}],"nullable":true},"generated_text":{"type":"string","example":"test"}}},"Info":{"type":"object","required":["model_id","model_dtype","model_device_type","max_concurrent_requests","max_best_of","max_stop_sequences","max_input_length","max_total_tokens","waiting_served_ratio","max_batch_total_tokens","max_waiting_tokens","validation_workers","version"],"properties":{"docker_label":{"type":"string","example":"null","nullable":true},"max_batch_total_tokens":{"type":"integer","format":"int32","example":"32000","minimum":0},"max_best_of":{"type":"integer","example":"2","minimum":0},"max_concurrent_requests":{"type":"integer","description":"Router Parameters","example":"128","minimum":0},"max_input_length":{"type":"integer","example":"1024","minimum":0},"max_stop_sequences":{"type":"integer","example":"4","minimum":0},"max_total_tokens":{"type":"integer","example":"2048","minimum":0},"max_waiting_tokens":{"type":"integer","example":"20","minimum":0},"model_device_type":{"type":"string","example":"cuda"},"model_dtype":{"type":"string","example":"torch.float16"},"model_id":{"type":"string","description":"Model info","example":"bigscience/blomm-560m"},"model_pipeline_tag":{"type":"string","example":"text-generation","nullable":true},"model_sha":{"type":"string","example":"e985a63cdc139290c5f700ff1929f0b5942cced2","nullable":true},"sha":{"type":"string","example":"null","nullable":true},"validation_workers":{"type":"integer","example":"2","minimum":0},"version":{"type":"string","description":"Router Info","example":"0.5.0"},"waiting_served_ratio":{"type":"number","format":"float","example":"1.2"}}},"PrefillToken":{"type":"object","required":["id","text","logprob"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"text":{"type":"string","example":"test"}}},"StreamDetails":{"type":"object","required":["finish_reason","generated_tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0}}},"StreamResponse":{"type":"object","required":["index","token"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/StreamDetails"}],"default":"null","nullable":true},"generated_text":{"type":"string","default":"null","example":"test","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"token":{"$ref":"#/components/schemas/Token"},"top_tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}},"Token":{"type":"object","required":["id","text","logprob","special"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"special":{"type":"boolean","example":"false"},"text":{"type":"string","example":"test"}}}}},"tags":[{"name":"Text Generation Inference","description":"Hugging Face Text Generation Inference API"}]} +{"openapi":"3.0.3","info":{"title":"Text Generation Inference","description":"Text Generation Webserver","contact":{"name":"Olivier Dehaene"},"license":{"name":"Apache 2.0","url":"https://www.apache.org/licenses/LICENSE-2.0"},"version":"1.3.4"},"paths":{"/":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens if `stream == false` or a stream of token if `stream == true`","description":"Generate tokens if `stream == false` or a stream of token if `stream == true`","operationId":"compat_generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CompatGenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}},"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate_stream":{"post":{"tags":["Text Generation Inference"],"summary":"Generate a stream of token using Server-Sent Events","description":"Generate a stream of token using Server-Sent Events","operationId":"generate_stream","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/health":{"get":{"tags":["Text Generation Inference"],"summary":"Health check method","description":"Health check method","operationId":"health","responses":{"200":{"description":"Everything is working fine"},"503":{"description":"Text generation inference is down","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"unhealthy","error_type":"healthcheck"}}}}}}},"/info":{"get":{"tags":["Text Generation Inference"],"summary":"Text Generation Inference endpoint info","description":"Text Generation Inference endpoint info","operationId":"get_model_info","responses":{"200":{"description":"Served model info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Info"}}}}}}},"/metrics":{"get":{"tags":["Text Generation Inference"],"summary":"Prometheus metrics scrape endpoint","description":"Prometheus metrics scrape endpoint","operationId":"metrics","responses":{"200":{"description":"Prometheus Metrics","content":{"text/plain":{"schema":{"type":"string"}}}}}}},"/tokenize":{"post":{"tags":["Text Generation Inference"],"summary":"Tokenize inputs","description":"Tokenize inputs","operationId":"tokenize","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Tokenized ids","content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeResponse"}}}},"404":{"description":"No tokenizer found","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"No fast tokenizer available"}}}}}}},"/v1/chat/completions":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"chat_completions","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatCompletionChunk"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}}},"components":{"schemas":{"BestOfSequence":{"type":"object","required":["generated_text","finish_reason","generated_tokens","prefill","tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_text":{"type":"string","example":"test"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ChatCompletion":{"type":"object","required":["id","object","created","model","system_fingerprint","choices","usage"],"properties":{"choices":{"type":"array","items":{"$ref":"#/components/schemas/ChatCompletionComplete"}},"created":{"type":"integer","format":"int64","example":"1706270835","minimum":0},"id":{"type":"string"},"model":{"type":"string","example":"mistralai/Mistral-7B-Instruct-v0.2"},"object":{"type":"string"},"system_fingerprint":{"type":"string"},"usage":{"$ref":"#/components/schemas/Usage"}}},"ChatCompletionChoice":{"type":"object","required":["index","delta"],"properties":{"delta":{"$ref":"#/components/schemas/ChatCompletionDelta"},"finish_reason":{"type":"string","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"logprobs":{"type":"number","format":"float","nullable":true}}},"ChatCompletionChunk":{"type":"object","required":["id","object","created","model","system_fingerprint","choices"],"properties":{"choices":{"type":"array","items":{"$ref":"#/components/schemas/ChatCompletionChoice"}},"created":{"type":"integer","format":"int64","example":"1706270978","minimum":0},"id":{"type":"string"},"model":{"type":"string","example":"mistralai/Mistral-7B-Instruct-v0.2"},"object":{"type":"string"},"system_fingerprint":{"type":"string"}}},"ChatCompletionDelta":{"type":"object","required":["role","content"],"properties":{"content":{"type":"string","example":"What is Deep Learning?"},"role":{"type":"string","example":"user"}}},"ChatRequest":{"type":"object","required":["model"],"properties":{"frequency_penalty":{"type":"number","format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.","example":"1.0","nullable":true},"logit_bias":{"type":"array","items":{"type":"number","format":"float"},"description":"UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.","nullable":true},"logprobs":{"type":"boolean","description":"Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.","example":"false","nullable":true},"max_tokens":{"type":"integer","format":"int32","description":"The maximum number of tokens that can be generated in the chat completion.","example":"32","nullable":true,"minimum":0},"messages":{"type":"array","items":{"$ref":"#/components/schemas/Message"},"description":"A list of messages comprising the conversation so far."},"model":{"type":"string","description":"UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.","example":"mistralai/Mistral-7B-Instruct-v0.2"},"n":{"type":"integer","format":"int32","description":"UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.","example":"2","nullable":true,"minimum":0},"presence_penalty":{"type":"number","format":"float","description":"UNUSED\nNumber between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics","example":0.1,"nullable":true},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"stream":{"type":"boolean"},"temperature":{"type":"number","format":"float","description":"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.","example":1.0,"nullable":true},"top_logprobs":{"type":"integer","format":"int32","description":"UNUSED\nAn integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.","example":"5","nullable":true,"minimum":0},"top_p":{"type":"number","format":"float","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.","example":0.95,"nullable":true}}},"CompatGenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"},"stream":{"type":"boolean","default":"false"}}},"Details":{"type":"object","required":["finish_reason","generated_tokens","prefill","tokens"],"properties":{"best_of_sequences":{"type":"array","items":{"$ref":"#/components/schemas/BestOfSequence"},"nullable":true},"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ErrorResponse":{"type":"object","required":["error","error_type"],"properties":{"error":{"type":"string"},"error_type":{"type":"string"}}},"FinishReason":{"type":"string","enum":["length","eos_token","stop_sequence"],"example":"Length"},"GenerateParameters":{"type":"object","properties":{"best_of":{"type":"integer","default":"null","example":1,"nullable":true,"minimum":0,"exclusiveMinimum":0},"decoder_input_details":{"type":"boolean","default":"true"},"details":{"type":"boolean","default":"true"},"do_sample":{"type":"boolean","default":"false","example":true},"max_new_tokens":{"type":"integer","format":"int32","default":"100","example":"20","nullable":true,"minimum":0},"repetition_penalty":{"type":"number","format":"float","default":"null","example":1.03,"nullable":true,"exclusiveMinimum":0},"return_full_text":{"type":"boolean","default":"null","example":false,"nullable":true},"seed":{"type":"integer","format":"int64","default":"null","example":"null","nullable":true,"minimum":0,"exclusiveMinimum":0},"stop":{"type":"array","items":{"type":"string"},"example":["photographer"],"maxItems":4},"temperature":{"type":"number","format":"float","default":"null","example":0.5,"nullable":true,"exclusiveMinimum":0},"top_k":{"type":"integer","format":"int32","default":"null","example":10,"nullable":true,"exclusiveMinimum":0},"top_n_tokens":{"type":"integer","format":"int32","default":"null","example":5,"nullable":true,"minimum":0,"exclusiveMinimum":0},"top_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"truncate":{"type":"integer","default":"null","example":"null","nullable":true,"minimum":0},"typical_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"watermark":{"type":"boolean","default":"false","example":true}}},"GenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"}}},"GenerateResponse":{"type":"object","required":["generated_text"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/Details"}],"nullable":true},"generated_text":{"type":"string","example":"test"}}},"Info":{"type":"object","required":["model_id","model_dtype","model_device_type","max_concurrent_requests","max_best_of","max_stop_sequences","max_input_length","max_total_tokens","waiting_served_ratio","max_batch_total_tokens","max_waiting_tokens","validation_workers","version"],"properties":{"docker_label":{"type":"string","example":"null","nullable":true},"max_batch_total_tokens":{"type":"integer","format":"int32","example":"32000","minimum":0},"max_best_of":{"type":"integer","example":"2","minimum":0},"max_concurrent_requests":{"type":"integer","description":"Router Parameters","example":"128","minimum":0},"max_input_length":{"type":"integer","example":"1024","minimum":0},"max_stop_sequences":{"type":"integer","example":"4","minimum":0},"max_total_tokens":{"type":"integer","example":"2048","minimum":0},"max_waiting_tokens":{"type":"integer","example":"20","minimum":0},"model_device_type":{"type":"string","example":"cuda"},"model_dtype":{"type":"string","example":"torch.float16"},"model_id":{"type":"string","description":"Model info","example":"bigscience/blomm-560m"},"model_pipeline_tag":{"type":"string","example":"text-generation","nullable":true},"model_sha":{"type":"string","example":"e985a63cdc139290c5f700ff1929f0b5942cced2","nullable":true},"sha":{"type":"string","example":"null","nullable":true},"validation_workers":{"type":"integer","example":"2","minimum":0},"version":{"type":"string","description":"Router Info","example":"0.5.0"},"waiting_served_ratio":{"type":"number","format":"float","example":"1.2"}}},"Message":{"type":"object","required":["role","content"],"properties":{"content":{"type":"string","example":"My name is David and I"},"role":{"type":"string","example":"user"}}},"PrefillToken":{"type":"object","required":["id","text","logprob"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"text":{"type":"string","example":"test"}}},"SimpleToken":{"type":"object","required":["id","text","start","stop"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"start":{"type":"integer","example":0,"minimum":0},"stop":{"type":"integer","example":2,"minimum":0},"text":{"type":"string","example":"test"}}},"StreamDetails":{"type":"object","required":["finish_reason","generated_tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0}}},"StreamResponse":{"type":"object","required":["index","token"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/StreamDetails"}],"default":"null","nullable":true},"generated_text":{"type":"string","default":"null","example":"test","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"token":{"$ref":"#/components/schemas/Token"},"top_tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}},"Token":{"type":"object","required":["id","text","logprob","special"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"special":{"type":"boolean","example":"false"},"text":{"type":"string","example":"test"}}},"TokenizeResponse":{"type":"array","items":{"$ref":"#/components/schemas/SimpleToken"}}}},"tags":[{"name":"Text Generation Inference","description":"Hugging Face Text Generation Inference API"}]} \ No newline at end of file diff --git a/router/src/lib.rs b/router/src/lib.rs index 6c16c4b3..fc5670a0 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -188,18 +188,20 @@ fn default_parameters() -> GenerateParameters { } } -#[derive(Clone, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletion { pub id: String, pub object: String, + #[schema(example = "1706270835")] pub created: u64, + #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] pub model: String, pub system_fingerprint: String, pub choices: Vec, pub usage: Usage, } -#[derive(Clone, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionComplete { pub index: u32, pub message: Message, @@ -248,17 +250,19 @@ impl ChatCompletion { } } -#[derive(Clone, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionChunk { pub id: String, pub object: String, + #[schema(example = "1706270978")] pub created: u64, + #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] pub model: String, pub system_fingerprint: String, pub choices: Vec, } -#[derive(Clone, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionChoice { pub index: u32, pub delta: ChatCompletionDelta, @@ -266,9 +270,11 @@ pub(crate) struct ChatCompletionChoice { pub finish_reason: Option, } -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)] pub(crate) struct ChatCompletionDelta { + #[schema(example = "user")] pub role: String, + #[schema(example = "What is Deep Learning?")] pub content: String, } @@ -311,7 +317,7 @@ fn default_request_messages() -> Vec { #[derive(Clone, Deserialize, ToSchema, Serialize)] pub(crate) struct ChatRequest { /// UNUSED - #[schema(example = "bigscience/blomm-560m")] + #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. pub model: String, /* NOTE: UNUSED */ @@ -322,6 +328,7 @@ pub(crate) struct ChatRequest { /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, /// decreasing the model's likelihood to repeat the same line verbatim. #[serde(default)] + #[schema(example = "1.0")] pub frequency_penalty: Option, /// UNUSED @@ -336,28 +343,33 @@ pub(crate) struct ChatRequest { /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each /// output token returned in the content of message. #[serde(default)] + #[schema(example = "false")] pub logprobs: Option, /// UNUSED /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with /// an associated log probability. logprobs must be set to true if this parameter is used. #[serde(default)] + #[schema(example = "5")] pub top_logprobs: Option, /// The maximum number of tokens that can be generated in the chat completion. #[serde(default)] + #[schema(example = "32")] pub max_tokens: Option, /// UNUSED /// How many chat completion choices to generate for each input message. Note that you will be charged based on the /// number of generated tokens across all of the choices. Keep n as 1 to minimize costs. #[serde(default)] + #[schema(nullable = true, example = "2")] pub n: Option, /// UNUSED /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, /// increasing the model's likelihood to talk about new topics #[serde(default)] + #[schema(nullable = true, example = 0.1)] pub presence_penalty: Option, #[serde(default = "bool::default")] @@ -371,11 +383,13 @@ pub(crate) struct ChatRequest { /// /// We generally recommend altering this or `top_p` but not both. #[serde(default)] + #[schema(nullable = true, example = 1.0)] pub temperature: Option, /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. #[serde(default)] + #[schema(nullable = true, example = 0.95)] pub top_p: Option, } @@ -458,6 +472,7 @@ pub struct SimpleToken { #[derive(Serialize, ToSchema)] #[serde(rename_all(serialize = "snake_case"))] +#[schema(example = "Length")] pub(crate) enum FinishReason { #[schema(rename = "length")] Length, @@ -518,6 +533,10 @@ pub(crate) struct GenerateResponse { pub details: Option

, } +#[derive(Serialize, ToSchema)] +#[serde(transparent)] +pub(crate) struct TokenizeResponse(Vec); + #[derive(Serialize, ToSchema)] pub(crate) struct StreamDetails { #[schema(example = "length")] diff --git a/router/src/server.rs b/router/src/server.rs index a6462731..4092ba6c 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -5,10 +5,10 @@ use crate::health::Health; use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; use crate::{ - BestOfSequence, ChatCompletion, ChatCompletionChunk, ChatRequest, CompatGenerateRequest, - Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, - HubModelInfo, HubTokenizerConfig, Infer, Info, PrefillToken, SimpleToken, StreamDetails, - StreamResponse, Token, Validation, + BestOfSequence, ChatCompletion, ChatCompletionChoice, ChatCompletionChunk, ChatCompletionDelta, + ChatRequest, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters, + GenerateRequest, GenerateResponse, HubModelInfo, HubTokenizerConfig, Infer, Info, Message, + PrefillToken, SimpleToken, StreamDetails, StreamResponse, Token, TokenizeResponse, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; @@ -679,7 +679,7 @@ async fn chat_completions( post, tag = "Text Generation Inference", path = "/tokenize", - request_body = TokenizeRequest, + request_body = GenerateRequest, responses( (status = 200, description = "Tokenized ids", body = TokenizeResponse), (status = 404, description = "No tokenizer found", body = ErrorResponse, @@ -690,7 +690,7 @@ async fn chat_completions( async fn tokenize( Extension(infer): Extension, Json(req): Json, -) -> Result)> { +) -> Result, (StatusCode, Json)> { let input = req.inputs.clone(); let encoding = infer.tokenize(req).await?; if let Some(encoding) = encoding { @@ -708,7 +708,7 @@ async fn tokenize( } }) .collect(); - Ok(Json(tokens).into_response()) + Ok(Json(TokenizeResponse(tokens))) } else { Err(( StatusCode::NOT_FOUND, @@ -776,10 +776,18 @@ pub async fn run( Info, CompatGenerateRequest, GenerateRequest, + ChatRequest, + Message, + ChatCompletionChoice, + ChatCompletionDelta, + ChatCompletionChunk, + ChatCompletion, GenerateParameters, PrefillToken, Token, GenerateResponse, + TokenizeResponse, + SimpleToken, BestOfSequence, Details, FinishReason, From 4b376b30f1613d514c16c4d97e11f840f0c75359 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:27:44 +0100 Subject: [PATCH 052/153] GPTQ support on ROCm (#1489) Tested with ``` CUDA_VISIBLE_DEVICES=0 text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq EXLLAMA_VERSION=1 CUDA_VISIBLE_DEVICES=0 text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq CUDA_VISIBLE_DEVICES="0,1" text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq ``` all with good and identical results on MI210. --------- Co-authored-by: Felix Marty Co-authored-by: OlivierDehaene Co-authored-by: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> --- .gitignore | 10 +++++++ Dockerfile_amd | 24 +++++++++++++-- docs/source/supported_models.md | 4 +-- .../{cuda_compat.cuh => cu_compat.cuh} | 6 ++-- .../exllama_kernels/cuda_func/q4_matmul.cu | 7 +++-- .../exllama_kernels/hip_compat.cuh} | 29 ++++++++++++++----- .../exllama_kernels/exllama_kernels/util.cuh | 4 +++ server/exllamav2_kernels/setup.py | 11 +++++++ .../utils/gptq/exllamav2.py | 5 +--- server/text_generation_server/utils/layers.py | 2 +- 10 files changed, 80 insertions(+), 22 deletions(-) rename server/exllama_kernels/exllama_kernels/{cuda_compat.cuh => cu_compat.cuh} (91%) rename server/{exllamav2_kernels/exllamav2_kernels/cuda/compat_gemm.cuh => exllama_kernels/exllama_kernels/hip_compat.cuh} (68%) diff --git a/.gitignore b/.gitignore index 20c9baee..1f9ba162 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,13 @@ target router/tokenizer.json *__pycache__* + +# ROCm auto-generated files +*.hip +server/exllamav2_kernels/exllamav2_kernels/hip/ +server/exllama_kernels/exllama_kernels/hip/ +server/exllama_kernels/exllama_kernels/hip_func/ +*_hip.cuh +server/exllama_kernels/exllama_kernels/hip_buffers.cuh +server/exllama_kernels/exllama_kernels/exllama_ext_hip.cpp + diff --git a/Dockerfile_amd b/Dockerfile_amd index dd331a5d..d2b6f897 100644 --- a/Dockerfile_amd +++ b/Dockerfile_amd @@ -75,8 +75,8 @@ RUN chmod +x ~/mambaforge.sh && \ mamba init && \ rm ~/mambaforge.sh -# Install PyTorch nightly (2.2.0.dev2023) compiled against RoCm 5.7, as VLLM can not be compiled with RoCm 5.6. -RUN pip install --pre torch==2.2.0.dev20231106 --index-url https://download.pytorch.org/whl/nightly/rocm5.7 +# Install PyTorch 2.2 RC compiled against RoCm 5.7, as VLLM can not be compiled with RoCm 5.6. +RUN pip install torch --index-url https://download.pytorch.org/whl/test/rocm5.7/ FROM base AS kernel-builder @@ -104,6 +104,20 @@ WORKDIR /usr/src COPY server/custom_kernels/ . RUN PYTORCH_ROCM_ARCH=gfx90a python setup.py build +# Build exllama kernels +FROM kernel-builder as exllama-kernels-builder +WORKDIR /usr/src +COPY server/exllama_kernels/ . + +RUN PYTORCH_ROCM_ARCH="gfx90a" python setup.py build + +# Build exllama v2 kernels +FROM kernel-builder as exllamav2-kernels-builder +WORKDIR /usr/src +COPY server/exllamav2_kernels/ . + +RUN PYTORCH_ROCM_ARCH="gfx90a" python setup.py build + FROM base as base-copy # Text Generation Inference base env @@ -120,6 +134,12 @@ COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86 # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages +# Copy build artifacts from exllama kernels builder +COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages + +# Copy build artifacts from exllamav2 kernels builder +COPY --from=exllamav2-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages + # Install flash-attention dependencies RUN pip install einops --no-cache-dir diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index 004790ab..df5102c2 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -43,8 +43,8 @@ text-generation-launcher --model-id TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 12.2+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed. -TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention and flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: -* Quantization (GPTQ, AWQ, etc.) +TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention, GPTQ quantization, flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future: +* Loading [AWQ](https://huggingface.co/docs/transformers/quantization#awq) checkpoints. * Flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm) * Kernel for slinding window attention (Mistral) diff --git a/server/exllama_kernels/exllama_kernels/cuda_compat.cuh b/server/exllama_kernels/exllama_kernels/cu_compat.cuh similarity index 91% rename from server/exllama_kernels/exllama_kernels/cuda_compat.cuh rename to server/exllama_kernels/exllama_kernels/cu_compat.cuh index 8dfa25de..c5258813 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_compat.cuh +++ b/server/exllama_kernels/exllama_kernels/cu_compat.cuh @@ -43,12 +43,12 @@ __device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) // -#if defined(__CUDA_ARCH__) -#if __CUDA_ARCH__ < 700 +#if defined(__CUDA_ARCH__) || defined(USE_ROCM) +#if __CUDA_ARCH__ < 700 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } -#if __CUDA_ARCH__ < 600 +#if __CUDA_ARCH__ < 600 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } #endif diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu index 60dc4c9d..61380f42 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu @@ -2,8 +2,11 @@ #include "column_remap.cuh" #include "../util.cuh" #include "../matrix.cuh" -#include "../cuda_compat.cuh" +#include "../cu_compat.cuh" #include "../cuda_buffers.cuh" +#if defined(USE_ROCM) +#include "../hip_compat.cuh" +#endif const int THREADS_X = 32; // Block size and thread count along columns in w and out const int THREADS_Y = 1; // Block size and thread count along rows in x and out @@ -128,7 +131,7 @@ __global__ void q4_matmul_kernel if constexpr (use_half2) { - half result = __hadd(acc.x, acc.y); + half result = __hadd(__low2half(acc), __high2half(acc)); atomicAdd(out_.item_ptr(x_row, w_column), result); } else diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/compat_gemm.cuh b/server/exllama_kernels/exllama_kernels/hip_compat.cuh similarity index 68% rename from server/exllamav2_kernels/exllamav2_kernels/cuda/compat_gemm.cuh rename to server/exllama_kernels/exllama_kernels/hip_compat.cuh index 19b1e4a6..4f2a7ae7 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/compat_gemm.cuh +++ b/server/exllama_kernels/exllama_kernels/hip_compat.cuh @@ -1,12 +1,23 @@ -#ifndef _compat_gemm_cuh -#define _compat_gemm_cuh +// Adapted from turboderp exllama: https://github.com/turboderp/exllama -#if defined(USE_ROCM) +#ifndef _hip_compat_cuh +#define _hip_compat_cuh -// For some reason this include is not present anywhere in exllama_v2 codebase, but it is required -// for symbols as hipblasHalf. -#include +// Workaround for a bug in hipamd, backported from upstream, this is fixed in ROCm 5.6. +__device__ __forceinline__ __half __compat_hrcp(__half x) { + return __half_raw{ + static_cast<_Float16>(__builtin_amdgcn_rcph(static_cast<__half_raw>(x).data))}; +} +__device__ __forceinline__ __half2 __compat_h2rcp(__half2 x) { + return _Float16_2{static_cast<_Float16>(__builtin_amdgcn_rcph(x.x)), + static_cast<_Float16>(__builtin_amdgcn_rcph(x.y))}; +} + +#define hrcp __compat_hrcp +#define h2rcp __compat_h2rcp + +// Automatic conversion of hipblasHgemm doesn't convert half to hipblasHalf. __host__ __forceinline__ hipblasStatus_t __compat_hipblasHgemm(hipblasHandle_t handle, hipblasOperation_t transA, hipblasOperation_t transB, @@ -31,8 +42,10 @@ __host__ __forceinline__ hipblasStatus_t __compat_hipblasHgemm(hipblasHandle_t #define hipblasHgemm __compat_hipblasHgemm // Previous version of PyTorch were converting to rocBLAS instead of hipBLAS. +#define rocblas_handle hipblasHandle_t #define rocblas_operation_none HIPBLAS_OP_N +#define rocblas_get_stream hipblasGetStream +#define rocblas_set_stream hipblasSetStream #define rocblas_hgemm __compat_hipblasHgemm -#endif -#endif +#endif \ No newline at end of file diff --git a/server/exllama_kernels/exllama_kernels/util.cuh b/server/exllama_kernels/exllama_kernels/util.cuh index 2839b10f..7b397573 100644 --- a/server/exllama_kernels/exllama_kernels/util.cuh +++ b/server/exllama_kernels/exllama_kernels/util.cuh @@ -8,7 +8,11 @@ #include #include +#if defined(USE_ROCM) +#define cudaUnspecified hipErrorUnknown +#else #define cudaUnspecified cudaErrorApiFailureBase +#endif // React to failure on return code != cudaSuccess diff --git a/server/exllamav2_kernels/setup.py b/server/exllamav2_kernels/setup.py index 518db1df..4a16b546 100644 --- a/server/exllamav2_kernels/setup.py +++ b/server/exllamav2_kernels/setup.py @@ -1,5 +1,15 @@ from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension +import torch + +extra_cuda_cflags = ["-lineinfo", "-O3"] + +if torch.version.hip: + extra_cuda_cflags += ["-DHIPBLAS_USE_HIP_HALF"] + +extra_compile_args = { + "nvcc": extra_cuda_cflags, +} setup( name="exllamav2_kernels", @@ -11,6 +21,7 @@ setup( "exllamav2_kernels/cuda/q_matrix.cu", "exllamav2_kernels/cuda/q_gemm.cu", ], + extra_compile_args=extra_compile_args, ) ], cmdclass={"build_ext": BuildExtension}, diff --git a/server/text_generation_server/utils/gptq/exllamav2.py b/server/text_generation_server/utils/gptq/exllamav2.py index 2b897f25..80836a95 100644 --- a/server/text_generation_server/utils/gptq/exllamav2.py +++ b/server/text_generation_server/utils/gptq/exllamav2.py @@ -1,12 +1,9 @@ # Adapted from turboderp exllama: https://github.com/turboderp/exllamav2 -from logging import getLogger - import torch import torch.nn as nn -import math -logger = getLogger(__name__) +from loguru import logger try: from exllamav2_kernels import make_q_matrix, gemm_half_q_half diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index c9393d99..6ddfd6f4 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -33,7 +33,7 @@ except Exception: major = 1 HAS_EXLLAMA = False -CAN_EXLLAMA = major >= 8 +CAN_EXLLAMA = major >= 8 or IS_ROCM_SYSTEM V2 = os.getenv("EXLLAMA_VERSION", "2") == "2" # if V2 and int(os.getenv("WORLD_SIZE", "1")) > 1: # V2 = False From ac580f515bcccf58987a03a7a537e43c0ff1e6cc Mon Sep 17 00:00:00 2001 From: drbh Date: Fri, 26 Jan 2024 12:01:33 -0500 Subject: [PATCH 053/153] feat: add tokenizer-config-path to launcher args (#1495) This PR adds the `tokenizer-config-path` to the launcher and passes it to the router Fixes: https://github.com/huggingface/text-generation-inference/pull/1427 --- docs/source/basic_tutorials/launcher.md | 8 ++++++++ launcher/src/main.rs | 11 +++++++++++ 2 files changed, 19 insertions(+) diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index bafe3669..712b4fc4 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -354,6 +354,14 @@ Options: [env: NGROK_EDGE=] +``` +## TOKENIZER_CONFIG_PATH +```shell + --tokenizer-config-path + The path to the tokenizer config file. This path is used to load the tokenizer configuration which may include a `chat_template`. If not provided, the default config will be used from the model hub + + [env: TOKENIZER_CONFIG_PATH=] + ``` ## ENV ```shell diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 7b7b8bf0..313d0123 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -370,6 +370,11 @@ struct Args { #[clap(long, env)] ngrok_edge: Option, + /// The path to the tokenizer config file. This path is used to load the tokenizer configuration which may + /// include a `chat_template`. If not provided, the default config will be used from the model hub. + #[clap(long, env)] + tokenizer_config_path: Option, + /// Display a lot of information about your runtime environment #[clap(long, short, action)] env: bool, @@ -1026,6 +1031,12 @@ fn spawn_webserver( args.model_id, ]; + // Tokenizer config path + if let Some(ref tokenizer_config_path) = args.tokenizer_config_path { + router_args.push("--tokenizer-config-path".to_string()); + router_args.push(tokenizer_config_path.to_string()); + } + // Model optional max batch total tokens if let Some(max_batch_total_tokens) = args.max_batch_total_tokens { router_args.push("--max-batch-total-tokens".to_string()); From efd4b97d156d9593f1c4138cea5443397bbf7b61 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 26 Jan 2024 19:04:57 +0100 Subject: [PATCH 054/153] v1.4.0 (#1494) --- .github/workflows/delete_doc_comment.yml | 12 - Cargo.lock | 12 +- Cargo.toml | 2 +- docs/openapi.json | 1294 ++++++++++++++++- .../basic_tutorials/gated_model_access.md | 2 +- docs/source/quicktour.md | 6 +- integration-tests/models/test_flash_phi.py | 8 +- integration-tests/pyproject.toml | 2 +- server/poetry.lock | 82 +- server/pyproject.toml | 2 +- server/requirements_cuda.txt | 18 +- server/requirements_rocm.txt | 18 +- server/tests/utils/test_layers.py | 37 +- server/text_generation_server/cli.py | 2 +- .../custom_modeling/flash_phi_modeling.py | 30 +- .../models/custom_modeling/mpt_modeling.py | 27 +- .../models/custom_modeling/phi_modeling.py | 62 +- .../models/flash_llama.py | 12 +- .../models/flash_phi.py | 12 +- server/text_generation_server/models/phi.py | 7 +- server/text_generation_server/utils/layers.py | 4 +- 21 files changed, 1496 insertions(+), 155 deletions(-) delete mode 100644 .github/workflows/delete_doc_comment.yml diff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml deleted file mode 100644 index 1cad807b..00000000 --- a/.github/workflows/delete_doc_comment.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Delete doc comment - -on: - pull_request: - types: [ closed ] - - -jobs: - delete: - uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main - with: - pr_number: ${{ github.event.number }} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 9048105e..689dc0ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1398,9 +1398,9 @@ dependencies = [ [[package]] name = "minijinja" -version = "1.0.10" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "208758577ef2c86cf5dd3e85730d161413ec3284e2d73b2ef65d9a24d9971bcb" +checksum = "fb5c5e3d2b4c0a6832bd3d571f7c19a7c1c1f05f11a6e85ae1a29f76be5f9455" dependencies = [ "serde", ] @@ -2811,7 +2811,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "1.3.4" +version = "1.4.0" dependencies = [ "average", "clap", @@ -2832,7 +2832,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "1.3.4" +version = "1.4.0" dependencies = [ "futures", "grpc-metadata", @@ -2849,7 +2849,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "1.3.4" +version = "1.4.0" dependencies = [ "clap", "ctrlc", @@ -2865,7 +2865,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "1.3.4" +version = "1.4.0" dependencies = [ "async-stream", "axum", diff --git a/Cargo.toml b/Cargo.toml index 80e6e145..a328a368 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ members = [ resolver = "2" [workspace.package] -version = "1.3.4" +version = "1.4.0" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/docs/openapi.json b/docs/openapi.json index 9a9ed116..da3969df 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1 +1,1293 @@ -{"openapi":"3.0.3","info":{"title":"Text Generation Inference","description":"Text Generation Webserver","contact":{"name":"Olivier Dehaene"},"license":{"name":"Apache 2.0","url":"https://www.apache.org/licenses/LICENSE-2.0"},"version":"1.3.4"},"paths":{"/":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens if `stream == false` or a stream of token if `stream == true`","description":"Generate tokens if `stream == false` or a stream of token if `stream == true`","operationId":"compat_generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CompatGenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}},"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"generate","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateResponse"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/generate_stream":{"post":{"tags":["Text Generation Inference"],"summary":"Generate a stream of token using Server-Sent Events","description":"Generate a stream of token using Server-Sent Events","operationId":"generate_stream","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/StreamResponse"}}}},"422":{"description":"Input validation error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"text/event-stream":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}},"/health":{"get":{"tags":["Text Generation Inference"],"summary":"Health check method","description":"Health check method","operationId":"health","responses":{"200":{"description":"Everything is working fine"},"503":{"description":"Text generation inference is down","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"unhealthy","error_type":"healthcheck"}}}}}}},"/info":{"get":{"tags":["Text Generation Inference"],"summary":"Text Generation Inference endpoint info","description":"Text Generation Inference endpoint info","operationId":"get_model_info","responses":{"200":{"description":"Served model info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Info"}}}}}}},"/metrics":{"get":{"tags":["Text Generation Inference"],"summary":"Prometheus metrics scrape endpoint","description":"Prometheus metrics scrape endpoint","operationId":"metrics","responses":{"200":{"description":"Prometheus Metrics","content":{"text/plain":{"schema":{"type":"string"}}}}}}},"/tokenize":{"post":{"tags":["Text Generation Inference"],"summary":"Tokenize inputs","description":"Tokenize inputs","operationId":"tokenize","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GenerateRequest"}}},"required":true},"responses":{"200":{"description":"Tokenized ids","content":{"application/json":{"schema":{"$ref":"#/components/schemas/TokenizeResponse"}}}},"404":{"description":"No tokenizer found","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"No fast tokenizer available"}}}}}}},"/v1/chat/completions":{"post":{"tags":["Text Generation Inference"],"summary":"Generate tokens","description":"Generate tokens","operationId":"chat_completions","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatRequest"}}},"required":true},"responses":{"200":{"description":"Generated Text","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ChatCompletionChunk"}}}},"422":{"description":"Input validation error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Input validation error"}}}},"424":{"description":"Generation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Request failed during generation"}}}},"429":{"description":"Model is overloaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Model is overloaded"}}}},"500":{"description":"Incomplete generation","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ErrorResponse"},"example":{"error":"Incomplete generation"}}}}}}}},"components":{"schemas":{"BestOfSequence":{"type":"object","required":["generated_text","finish_reason","generated_tokens","prefill","tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_text":{"type":"string","example":"test"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ChatCompletion":{"type":"object","required":["id","object","created","model","system_fingerprint","choices","usage"],"properties":{"choices":{"type":"array","items":{"$ref":"#/components/schemas/ChatCompletionComplete"}},"created":{"type":"integer","format":"int64","example":"1706270835","minimum":0},"id":{"type":"string"},"model":{"type":"string","example":"mistralai/Mistral-7B-Instruct-v0.2"},"object":{"type":"string"},"system_fingerprint":{"type":"string"},"usage":{"$ref":"#/components/schemas/Usage"}}},"ChatCompletionChoice":{"type":"object","required":["index","delta"],"properties":{"delta":{"$ref":"#/components/schemas/ChatCompletionDelta"},"finish_reason":{"type":"string","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"logprobs":{"type":"number","format":"float","nullable":true}}},"ChatCompletionChunk":{"type":"object","required":["id","object","created","model","system_fingerprint","choices"],"properties":{"choices":{"type":"array","items":{"$ref":"#/components/schemas/ChatCompletionChoice"}},"created":{"type":"integer","format":"int64","example":"1706270978","minimum":0},"id":{"type":"string"},"model":{"type":"string","example":"mistralai/Mistral-7B-Instruct-v0.2"},"object":{"type":"string"},"system_fingerprint":{"type":"string"}}},"ChatCompletionDelta":{"type":"object","required":["role","content"],"properties":{"content":{"type":"string","example":"What is Deep Learning?"},"role":{"type":"string","example":"user"}}},"ChatRequest":{"type":"object","required":["model"],"properties":{"frequency_penalty":{"type":"number","format":"float","description":"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.","example":"1.0","nullable":true},"logit_bias":{"type":"array","items":{"type":"number","format":"float"},"description":"UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.","nullable":true},"logprobs":{"type":"boolean","description":"Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.","example":"false","nullable":true},"max_tokens":{"type":"integer","format":"int32","description":"The maximum number of tokens that can be generated in the chat completion.","example":"32","nullable":true,"minimum":0},"messages":{"type":"array","items":{"$ref":"#/components/schemas/Message"},"description":"A list of messages comprising the conversation so far."},"model":{"type":"string","description":"UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.","example":"mistralai/Mistral-7B-Instruct-v0.2"},"n":{"type":"integer","format":"int32","description":"UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.","example":"2","nullable":true,"minimum":0},"presence_penalty":{"type":"number","format":"float","description":"UNUSED\nNumber between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics","example":0.1,"nullable":true},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"stream":{"type":"boolean"},"temperature":{"type":"number","format":"float","description":"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.","example":1.0,"nullable":true},"top_logprobs":{"type":"integer","format":"int32","description":"UNUSED\nAn integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.","example":"5","nullable":true,"minimum":0},"top_p":{"type":"number","format":"float","description":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.","example":0.95,"nullable":true}}},"CompatGenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"},"stream":{"type":"boolean","default":"false"}}},"Details":{"type":"object","required":["finish_reason","generated_tokens","prefill","tokens"],"properties":{"best_of_sequences":{"type":"array","items":{"$ref":"#/components/schemas/BestOfSequence"},"nullable":true},"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"prefill":{"type":"array","items":{"$ref":"#/components/schemas/PrefillToken"}},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0},"tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}},"top_tokens":{"type":"array","items":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}}},"ErrorResponse":{"type":"object","required":["error","error_type"],"properties":{"error":{"type":"string"},"error_type":{"type":"string"}}},"FinishReason":{"type":"string","enum":["length","eos_token","stop_sequence"],"example":"Length"},"GenerateParameters":{"type":"object","properties":{"best_of":{"type":"integer","default":"null","example":1,"nullable":true,"minimum":0,"exclusiveMinimum":0},"decoder_input_details":{"type":"boolean","default":"true"},"details":{"type":"boolean","default":"true"},"do_sample":{"type":"boolean","default":"false","example":true},"max_new_tokens":{"type":"integer","format":"int32","default":"100","example":"20","nullable":true,"minimum":0},"repetition_penalty":{"type":"number","format":"float","default":"null","example":1.03,"nullable":true,"exclusiveMinimum":0},"return_full_text":{"type":"boolean","default":"null","example":false,"nullable":true},"seed":{"type":"integer","format":"int64","default":"null","example":"null","nullable":true,"minimum":0,"exclusiveMinimum":0},"stop":{"type":"array","items":{"type":"string"},"example":["photographer"],"maxItems":4},"temperature":{"type":"number","format":"float","default":"null","example":0.5,"nullable":true,"exclusiveMinimum":0},"top_k":{"type":"integer","format":"int32","default":"null","example":10,"nullable":true,"exclusiveMinimum":0},"top_n_tokens":{"type":"integer","format":"int32","default":"null","example":5,"nullable":true,"minimum":0,"exclusiveMinimum":0},"top_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"truncate":{"type":"integer","default":"null","example":"null","nullable":true,"minimum":0},"typical_p":{"type":"number","format":"float","default":"null","example":0.95,"nullable":true,"maximum":1,"exclusiveMinimum":0},"watermark":{"type":"boolean","default":"false","example":true}}},"GenerateRequest":{"type":"object","required":["inputs"],"properties":{"inputs":{"type":"string","example":"My name is Olivier and I"},"parameters":{"$ref":"#/components/schemas/GenerateParameters"}}},"GenerateResponse":{"type":"object","required":["generated_text"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/Details"}],"nullable":true},"generated_text":{"type":"string","example":"test"}}},"Info":{"type":"object","required":["model_id","model_dtype","model_device_type","max_concurrent_requests","max_best_of","max_stop_sequences","max_input_length","max_total_tokens","waiting_served_ratio","max_batch_total_tokens","max_waiting_tokens","validation_workers","version"],"properties":{"docker_label":{"type":"string","example":"null","nullable":true},"max_batch_total_tokens":{"type":"integer","format":"int32","example":"32000","minimum":0},"max_best_of":{"type":"integer","example":"2","minimum":0},"max_concurrent_requests":{"type":"integer","description":"Router Parameters","example":"128","minimum":0},"max_input_length":{"type":"integer","example":"1024","minimum":0},"max_stop_sequences":{"type":"integer","example":"4","minimum":0},"max_total_tokens":{"type":"integer","example":"2048","minimum":0},"max_waiting_tokens":{"type":"integer","example":"20","minimum":0},"model_device_type":{"type":"string","example":"cuda"},"model_dtype":{"type":"string","example":"torch.float16"},"model_id":{"type":"string","description":"Model info","example":"bigscience/blomm-560m"},"model_pipeline_tag":{"type":"string","example":"text-generation","nullable":true},"model_sha":{"type":"string","example":"e985a63cdc139290c5f700ff1929f0b5942cced2","nullable":true},"sha":{"type":"string","example":"null","nullable":true},"validation_workers":{"type":"integer","example":"2","minimum":0},"version":{"type":"string","description":"Router Info","example":"0.5.0"},"waiting_served_ratio":{"type":"number","format":"float","example":"1.2"}}},"Message":{"type":"object","required":["role","content"],"properties":{"content":{"type":"string","example":"My name is David and I"},"role":{"type":"string","example":"user"}}},"PrefillToken":{"type":"object","required":["id","text","logprob"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"text":{"type":"string","example":"test"}}},"SimpleToken":{"type":"object","required":["id","text","start","stop"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"start":{"type":"integer","example":0,"minimum":0},"stop":{"type":"integer","example":2,"minimum":0},"text":{"type":"string","example":"test"}}},"StreamDetails":{"type":"object","required":["finish_reason","generated_tokens"],"properties":{"finish_reason":{"$ref":"#/components/schemas/FinishReason"},"generated_tokens":{"type":"integer","format":"int32","example":1,"minimum":0},"seed":{"type":"integer","format":"int64","example":42,"nullable":true,"minimum":0}}},"StreamResponse":{"type":"object","required":["index","token"],"properties":{"details":{"allOf":[{"$ref":"#/components/schemas/StreamDetails"}],"default":"null","nullable":true},"generated_text":{"type":"string","default":"null","example":"test","nullable":true},"index":{"type":"integer","format":"int32","minimum":0},"token":{"$ref":"#/components/schemas/Token"},"top_tokens":{"type":"array","items":{"$ref":"#/components/schemas/Token"}}}},"Token":{"type":"object","required":["id","text","logprob","special"],"properties":{"id":{"type":"integer","format":"int32","example":0,"minimum":0},"logprob":{"type":"number","format":"float","example":-0.34,"nullable":true},"special":{"type":"boolean","example":"false"},"text":{"type":"string","example":"test"}}},"TokenizeResponse":{"type":"array","items":{"$ref":"#/components/schemas/SimpleToken"}}}},"tags":[{"name":"Text Generation Inference","description":"Hugging Face Text Generation Inference API"}]} \ No newline at end of file +{ + "openapi": "3.0.3", + "info": { + "title": "Text Generation Inference", + "description": "Text Generation Webserver", + "contact": { + "name": "Olivier Dehaene" + }, + "license": { + "name": "Apache 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + }, + "version": "1.4.0" + }, + "paths": { + "/": { + "post": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", + "description": "Generate tokens if `stream == false` or a stream of token if `stream == true`", + "operationId": "compat_generate", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompatGenerateRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Generated Text", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateResponse" + } + }, + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/StreamResponse" + } + } + } + }, + "422": { + "description": "Input validation error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Input validation error" + } + } + } + }, + "424": { + "description": "Generation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Request failed during generation" + } + } + } + }, + "429": { + "description": "Model is overloaded", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Model is overloaded" + } + } + } + }, + "500": { + "description": "Incomplete generation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Incomplete generation" + } + } + } + } + } + } + }, + "/generate": { + "post": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Generate tokens", + "description": "Generate tokens", + "operationId": "generate", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Generated Text", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateResponse" + } + } + } + }, + "422": { + "description": "Input validation error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Input validation error" + } + } + } + }, + "424": { + "description": "Generation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Request failed during generation" + } + } + } + }, + "429": { + "description": "Model is overloaded", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Model is overloaded" + } + } + } + }, + "500": { + "description": "Incomplete generation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Incomplete generation" + } + } + } + } + } + } + }, + "/generate_stream": { + "post": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Generate a stream of token using Server-Sent Events", + "description": "Generate a stream of token using Server-Sent Events", + "operationId": "generate_stream", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Generated Text", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/StreamResponse" + } + } + } + }, + "422": { + "description": "Input validation error", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Input validation error" + } + } + } + }, + "424": { + "description": "Generation Error", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Request failed during generation" + } + } + } + }, + "429": { + "description": "Model is overloaded", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Model is overloaded" + } + } + } + }, + "500": { + "description": "Incomplete generation", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Incomplete generation" + } + } + } + } + } + } + }, + "/health": { + "get": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Health check method", + "description": "Health check method", + "operationId": "health", + "responses": { + "200": { + "description": "Everything is working fine" + }, + "503": { + "description": "Text generation inference is down", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "unhealthy", + "error_type": "healthcheck" + } + } + } + } + } + } + }, + "/info": { + "get": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Text Generation Inference endpoint info", + "description": "Text Generation Inference endpoint info", + "operationId": "get_model_info", + "responses": { + "200": { + "description": "Served model info", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Info" + } + } + } + } + } + } + }, + "/metrics": { + "get": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Prometheus metrics scrape endpoint", + "description": "Prometheus metrics scrape endpoint", + "operationId": "metrics", + "responses": { + "200": { + "description": "Prometheus Metrics", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + }, + "/tokenize": { + "post": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Tokenize inputs", + "description": "Tokenize inputs", + "operationId": "tokenize", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Tokenized ids", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TokenizeResponse" + } + } + } + }, + "404": { + "description": "No tokenizer found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "No fast tokenizer available" + } + } + } + } + } + } + }, + "/v1/chat/completions": { + "post": { + "tags": [ + "Text Generation Inference" + ], + "summary": "Generate tokens", + "description": "Generate tokens", + "operationId": "chat_completions", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Generated Text", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatCompletionChunk" + } + } + } + }, + "422": { + "description": "Input validation error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Input validation error" + } + } + } + }, + "424": { + "description": "Generation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Request failed during generation" + } + } + } + }, + "429": { + "description": "Model is overloaded", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Model is overloaded" + } + } + } + }, + "500": { + "description": "Incomplete generation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorResponse" + }, + "example": { + "error": "Incomplete generation" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "BestOfSequence": { + "type": "object", + "required": [ + "generated_text", + "finish_reason", + "generated_tokens", + "prefill", + "tokens" + ], + "properties": { + "finish_reason": { + "$ref": "#/components/schemas/FinishReason" + }, + "generated_text": { + "type": "string", + "example": "test" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "prefill": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PrefillToken" + } + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Token" + } + }, + "top_tokens": { + "type": "array", + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Token" + } + } + } + } + }, + "ChatCompletion": { + "type": "object", + "required": [ + "id", + "object", + "created", + "model", + "system_fingerprint", + "choices", + "usage" + ], + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ChatCompletionComplete" + } + }, + "created": { + "type": "integer", + "format": "int64", + "example": "1706270835", + "minimum": 0 + }, + "id": { + "type": "string" + }, + "model": { + "type": "string", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "object": { + "type": "string" + }, + "system_fingerprint": { + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/Usage" + } + } + }, + "ChatCompletionChoice": { + "type": "object", + "required": [ + "index", + "delta" + ], + "properties": { + "delta": { + "$ref": "#/components/schemas/ChatCompletionDelta" + }, + "finish_reason": { + "type": "string", + "nullable": true + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "logprobs": { + "type": "number", + "format": "float", + "nullable": true + } + } + }, + "ChatCompletionChunk": { + "type": "object", + "required": [ + "id", + "object", + "created", + "model", + "system_fingerprint", + "choices" + ], + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ChatCompletionChoice" + } + }, + "created": { + "type": "integer", + "format": "int64", + "example": "1706270978", + "minimum": 0 + }, + "id": { + "type": "string" + }, + "model": { + "type": "string", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "object": { + "type": "string" + }, + "system_fingerprint": { + "type": "string" + } + } + }, + "ChatCompletionDelta": { + "type": "object", + "required": [ + "role", + "content" + ], + "properties": { + "content": { + "type": "string", + "example": "What is Deep Learning?" + }, + "role": { + "type": "string", + "example": "user" + } + } + }, + "ChatRequest": { + "type": "object", + "required": [ + "model" + ], + "properties": { + "frequency_penalty": { + "type": "number", + "format": "float", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.", + "example": "1.0", + "nullable": true + }, + "logit_bias": { + "type": "array", + "items": { + "type": "number", + "format": "float" + }, + "description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.", + "nullable": true + }, + "logprobs": { + "type": "boolean", + "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.", + "example": "false", + "nullable": true + }, + "max_tokens": { + "type": "integer", + "format": "int32", + "description": "The maximum number of tokens that can be generated in the chat completion.", + "example": "32", + "nullable": true, + "minimum": 0 + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Message" + }, + "description": "A list of messages comprising the conversation so far." + }, + "model": { + "type": "string", + "description": "UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", + "example": "mistralai/Mistral-7B-Instruct-v0.2" + }, + "n": { + "type": "integer", + "format": "int32", + "description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.", + "example": "2", + "nullable": true, + "minimum": 0 + }, + "presence_penalty": { + "type": "number", + "format": "float", + "description": "UNUSED\nNumber between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics", + "example": 0.1, + "nullable": true + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "stream": { + "type": "boolean" + }, + "temperature": { + "type": "number", + "format": "float", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.", + "example": 1.0, + "nullable": true + }, + "top_logprobs": { + "type": "integer", + "format": "int32", + "description": "UNUSED\nAn integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.", + "example": "5", + "nullable": true, + "minimum": 0 + }, + "top_p": { + "type": "number", + "format": "float", + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.", + "example": 0.95, + "nullable": true + } + } + }, + "CompatGenerateRequest": { + "type": "object", + "required": [ + "inputs" + ], + "properties": { + "inputs": { + "type": "string", + "example": "My name is Olivier and I" + }, + "parameters": { + "$ref": "#/components/schemas/GenerateParameters" + }, + "stream": { + "type": "boolean", + "default": "false" + } + } + }, + "Details": { + "type": "object", + "required": [ + "finish_reason", + "generated_tokens", + "prefill", + "tokens" + ], + "properties": { + "best_of_sequences": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BestOfSequence" + }, + "nullable": true + }, + "finish_reason": { + "$ref": "#/components/schemas/FinishReason" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "prefill": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PrefillToken" + } + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Token" + } + }, + "top_tokens": { + "type": "array", + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Token" + } + } + } + } + }, + "ErrorResponse": { + "type": "object", + "required": [ + "error", + "error_type" + ], + "properties": { + "error": { + "type": "string" + }, + "error_type": { + "type": "string" + } + } + }, + "FinishReason": { + "type": "string", + "enum": [ + "length", + "eos_token", + "stop_sequence" + ], + "example": "Length" + }, + "GenerateParameters": { + "type": "object", + "properties": { + "best_of": { + "type": "integer", + "default": "null", + "example": 1, + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "decoder_input_details": { + "type": "boolean", + "default": "true" + }, + "details": { + "type": "boolean", + "default": "true" + }, + "do_sample": { + "type": "boolean", + "default": "false", + "example": true + }, + "max_new_tokens": { + "type": "integer", + "format": "int32", + "default": "100", + "example": "20", + "nullable": true, + "minimum": 0 + }, + "repetition_penalty": { + "type": "number", + "format": "float", + "default": "null", + "example": 1.03, + "nullable": true, + "exclusiveMinimum": 0 + }, + "return_full_text": { + "type": "boolean", + "default": "null", + "example": false, + "nullable": true + }, + "seed": { + "type": "integer", + "format": "int64", + "default": "null", + "example": "null", + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "stop": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "photographer" + ], + "maxItems": 4 + }, + "temperature": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.5, + "nullable": true, + "exclusiveMinimum": 0 + }, + "top_k": { + "type": "integer", + "format": "int32", + "default": "null", + "example": 10, + "nullable": true, + "exclusiveMinimum": 0 + }, + "top_n_tokens": { + "type": "integer", + "format": "int32", + "default": "null", + "example": 5, + "nullable": true, + "minimum": 0, + "exclusiveMinimum": 0 + }, + "top_p": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.95, + "nullable": true, + "maximum": 1, + "exclusiveMinimum": 0 + }, + "truncate": { + "type": "integer", + "default": "null", + "example": "null", + "nullable": true, + "minimum": 0 + }, + "typical_p": { + "type": "number", + "format": "float", + "default": "null", + "example": 0.95, + "nullable": true, + "maximum": 1, + "exclusiveMinimum": 0 + }, + "watermark": { + "type": "boolean", + "default": "false", + "example": true + } + } + }, + "GenerateRequest": { + "type": "object", + "required": [ + "inputs" + ], + "properties": { + "inputs": { + "type": "string", + "example": "My name is Olivier and I" + }, + "parameters": { + "$ref": "#/components/schemas/GenerateParameters" + } + } + }, + "GenerateResponse": { + "type": "object", + "required": [ + "generated_text" + ], + "properties": { + "details": { + "allOf": [ + { + "$ref": "#/components/schemas/Details" + } + ], + "nullable": true + }, + "generated_text": { + "type": "string", + "example": "test" + } + } + }, + "Info": { + "type": "object", + "required": [ + "model_id", + "model_dtype", + "model_device_type", + "max_concurrent_requests", + "max_best_of", + "max_stop_sequences", + "max_input_length", + "max_total_tokens", + "waiting_served_ratio", + "max_batch_total_tokens", + "max_waiting_tokens", + "validation_workers", + "version" + ], + "properties": { + "docker_label": { + "type": "string", + "example": "null", + "nullable": true + }, + "max_batch_total_tokens": { + "type": "integer", + "format": "int32", + "example": "32000", + "minimum": 0 + }, + "max_best_of": { + "type": "integer", + "example": "2", + "minimum": 0 + }, + "max_concurrent_requests": { + "type": "integer", + "description": "Router Parameters", + "example": "128", + "minimum": 0 + }, + "max_input_length": { + "type": "integer", + "example": "1024", + "minimum": 0 + }, + "max_stop_sequences": { + "type": "integer", + "example": "4", + "minimum": 0 + }, + "max_total_tokens": { + "type": "integer", + "example": "2048", + "minimum": 0 + }, + "max_waiting_tokens": { + "type": "integer", + "example": "20", + "minimum": 0 + }, + "model_device_type": { + "type": "string", + "example": "cuda" + }, + "model_dtype": { + "type": "string", + "example": "torch.float16" + }, + "model_id": { + "type": "string", + "description": "Model info", + "example": "bigscience/blomm-560m" + }, + "model_pipeline_tag": { + "type": "string", + "example": "text-generation", + "nullable": true + }, + "model_sha": { + "type": "string", + "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", + "nullable": true + }, + "sha": { + "type": "string", + "example": "null", + "nullable": true + }, + "validation_workers": { + "type": "integer", + "example": "2", + "minimum": 0 + }, + "version": { + "type": "string", + "description": "Router Info", + "example": "0.5.0" + }, + "waiting_served_ratio": { + "type": "number", + "format": "float", + "example": "1.2" + } + } + }, + "Message": { + "type": "object", + "required": [ + "role", + "content" + ], + "properties": { + "content": { + "type": "string", + "example": "My name is David and I" + }, + "role": { + "type": "string", + "example": "user" + } + } + }, + "PrefillToken": { + "type": "object", + "required": [ + "id", + "text", + "logprob" + ], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "logprob": { + "type": "number", + "format": "float", + "example": -0.34, + "nullable": true + }, + "text": { + "type": "string", + "example": "test" + } + } + }, + "SimpleToken": { + "type": "object", + "required": [ + "id", + "text", + "start", + "stop" + ], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "start": { + "type": "integer", + "example": 0, + "minimum": 0 + }, + "stop": { + "type": "integer", + "example": 2, + "minimum": 0 + }, + "text": { + "type": "string", + "example": "test" + } + } + }, + "StreamDetails": { + "type": "object", + "required": [ + "finish_reason", + "generated_tokens" + ], + "properties": { + "finish_reason": { + "$ref": "#/components/schemas/FinishReason" + }, + "generated_tokens": { + "type": "integer", + "format": "int32", + "example": 1, + "minimum": 0 + }, + "seed": { + "type": "integer", + "format": "int64", + "example": 42, + "nullable": true, + "minimum": 0 + } + } + }, + "StreamResponse": { + "type": "object", + "required": [ + "index", + "token" + ], + "properties": { + "details": { + "allOf": [ + { + "$ref": "#/components/schemas/StreamDetails" + } + ], + "default": "null", + "nullable": true + }, + "generated_text": { + "type": "string", + "default": "null", + "example": "test", + "nullable": true + }, + "index": { + "type": "integer", + "format": "int32", + "minimum": 0 + }, + "token": { + "$ref": "#/components/schemas/Token" + }, + "top_tokens": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Token" + } + } + } + }, + "Token": { + "type": "object", + "required": [ + "id", + "text", + "logprob", + "special" + ], + "properties": { + "id": { + "type": "integer", + "format": "int32", + "example": 0, + "minimum": 0 + }, + "logprob": { + "type": "number", + "format": "float", + "example": -0.34, + "nullable": true + }, + "special": { + "type": "boolean", + "example": "false" + }, + "text": { + "type": "string", + "example": "test" + } + } + }, + "TokenizeResponse": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SimpleToken" + } + } + } + }, + "tags": [ + { + "name": "Text Generation Inference", + "description": "Hugging Face Text Generation Inference API" + } + ] +} \ No newline at end of file diff --git a/docs/source/basic_tutorials/gated_model_access.md b/docs/source/basic_tutorials/gated_model_access.md index 1437717f..060d177d 100644 --- a/docs/source/basic_tutorials/gated_model_access.md +++ b/docs/source/basic_tutorials/gated_model_access.md @@ -19,6 +19,6 @@ docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ - -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 \ + -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 \ --model-id $model ``` diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index e9a33f04..78ebb8e2 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -8,7 +8,7 @@ Let's say you want to deploy [Falcon-7B Instruct](https://huggingface.co/tiiuae/ model=tiiuae/falcon-7b-instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 --model-id $model +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id $model ``` @@ -20,7 +20,7 @@ To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://d TGI also supports ROCm-enabled AMD GPUs (only MI210 and MI250 are tested), details are available in the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). To launch TGI on ROCm GPUs, please use instead: ```bash -docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3-rocm --model-id $model +docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4-rocm --model-id $model ``` Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint. @@ -91,7 +91,7 @@ curl 127.0.0.1:8080/generate \ To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash -docker run ghcr.io/huggingface/text-generation-inference:1.3 --help +docker run ghcr.io/huggingface/text-generation-inference:1.4 --help ``` diff --git a/integration-tests/models/test_flash_phi.py b/integration-tests/models/test_flash_phi.py index 6391f2a1..0987b3a1 100644 --- a/integration-tests/models/test_flash_phi.py +++ b/integration-tests/models/test_flash_phi.py @@ -21,7 +21,7 @@ async def test_flash_phi(flash_phi, response_snapshot): ) assert response.details.generated_tokens == 10 - assert response.generated_text == ": {request}\")\n response = self" + assert response.generated_text == ': {request}")\n response = self' assert response == response_snapshot @@ -52,14 +52,12 @@ async def test_flash_phi_all_params(flash_phi, response_snapshot): @pytest.mark.asyncio @pytest.mark.private async def test_flash_phi_load(flash_phi, generate_load, response_snapshot): - responses = await generate_load( - flash_phi, "Test request", max_new_tokens=10, n=4 - ) + responses = await generate_load(flash_phi, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" - assert responses[0].generated_text == ": {request}\")\n response = self" + assert responses[0].generated_text == ': {request}")\n response = self' assert responses == response_snapshot diff --git a/integration-tests/pyproject.toml b/integration-tests/pyproject.toml index f6929587..f0c5add9 100644 --- a/integration-tests/pyproject.toml +++ b/integration-tests/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-integration-tests" -version = "1.3.4" +version = "1.4.0" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry "] diff --git a/server/poetry.lock b/server/poetry.lock index 360eeb36..16a28a01 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -1812,13 +1812,13 @@ xmp = ["defusedxml"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -1886,51 +1886,51 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pyarrow" -version = "15.0.2" +version = "16.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-15.0.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:88b340f0a1d05b5ccc3d2d986279045655b1fe8e41aba6ca44ea28da0d1455d8"}, - {file = "pyarrow-15.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eaa8f96cecf32da508e6c7f69bb8401f03745c050c1dd42ec2596f2e98deecac"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23c6753ed4f6adb8461e7c383e418391b8d8453c5d67e17f416c3a5d5709afbd"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f639c059035011db8c0497e541a8a45d98a58dbe34dc8fadd0ef128f2cee46e5"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:290e36a59a0993e9a5224ed2fb3e53375770f07379a0ea03ee2fce2e6d30b423"}, - {file = "pyarrow-15.0.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06c2bb2a98bc792f040bef31ad3e9be6a63d0cb39189227c08a7d955db96816e"}, - {file = "pyarrow-15.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:f7a197f3670606a960ddc12adbe8075cea5f707ad7bf0dffa09637fdbb89f76c"}, - {file = "pyarrow-15.0.2-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5f8bc839ea36b1f99984c78e06e7a06054693dc2af8920f6fb416b5bca9944e4"}, - {file = "pyarrow-15.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5e81dfb4e519baa6b4c80410421528c214427e77ca0ea9461eb4097c328fa33"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4f240852b302a7af4646c8bfe9950c4691a419847001178662a98915fd7ee7"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e7d9cfb5a1e648e172428c7a42b744610956f3b70f524aa3a6c02a448ba853e"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2d4f905209de70c0eb5b2de6763104d5a9a37430f137678edfb9a675bac9cd98"}, - {file = "pyarrow-15.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90adb99e8ce5f36fbecbbc422e7dcbcbed07d985eed6062e459e23f9e71fd197"}, - {file = "pyarrow-15.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:b116e7fd7889294cbd24eb90cd9bdd3850be3738d61297855a71ac3b8124ee38"}, - {file = "pyarrow-15.0.2-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:25335e6f1f07fdaa026a61c758ee7d19ce824a866b27bba744348fa73bb5a440"}, - {file = "pyarrow-15.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90f19e976d9c3d8e73c80be84ddbe2f830b6304e4c576349d9360e335cd627fc"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22366249bf5fd40ddacc4f03cd3160f2d7c247692945afb1899bab8a140ddfb"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2a335198f886b07e4b5ea16d08ee06557e07db54a8400cc0d03c7f6a22f785f"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e6d459c0c22f0b9c810a3917a1de3ee704b021a5fb8b3bacf968eece6df098f"}, - {file = "pyarrow-15.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:033b7cad32198754d93465dcfb71d0ba7cb7cd5c9afd7052cab7214676eec38b"}, - {file = "pyarrow-15.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:29850d050379d6e8b5a693098f4de7fd6a2bea4365bfd073d7c57c57b95041ee"}, - {file = "pyarrow-15.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:7167107d7fb6dcadb375b4b691b7e316f4368f39f6f45405a05535d7ad5e5058"}, - {file = "pyarrow-15.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e85241b44cc3d365ef950432a1b3bd44ac54626f37b2e3a0cc89c20e45dfd8bf"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:248723e4ed3255fcd73edcecc209744d58a9ca852e4cf3d2577811b6d4b59818"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ff3bdfe6f1b81ca5b73b70a8d482d37a766433823e0c21e22d1d7dde76ca33f"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f3d77463dee7e9f284ef42d341689b459a63ff2e75cee2b9302058d0d98fe142"}, - {file = "pyarrow-15.0.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:8c1faf2482fb89766e79745670cbca04e7018497d85be9242d5350cba21357e1"}, - {file = "pyarrow-15.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:28f3016958a8e45a1069303a4a4f6a7d4910643fc08adb1e2e4a7ff056272ad3"}, - {file = "pyarrow-15.0.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:89722cb64286ab3d4daf168386f6968c126057b8c7ec3ef96302e81d8cdb8ae4"}, - {file = "pyarrow-15.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0ba387705044b3ac77b1b317165c0498299b08261d8122c96051024f953cd5"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad2459bf1f22b6a5cdcc27ebfd99307d5526b62d217b984b9f5c974651398832"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58922e4bfece8b02abf7159f1f53a8f4d9f8e08f2d988109126c17c3bb261f22"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:adccc81d3dc0478ea0b498807b39a8d41628fa9210729b2f718b78cb997c7c91"}, - {file = "pyarrow-15.0.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8bd2baa5fe531571847983f36a30ddbf65261ef23e496862ece83bdceb70420d"}, - {file = "pyarrow-15.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6669799a1d4ca9da9c7e06ef48368320f5856f36f9a4dd31a11839dda3f6cc8c"}, - {file = "pyarrow-15.0.2.tar.gz", hash = "sha256:9c9bc803cb3b7bfacc1e96ffbfd923601065d9d3f911179d81e72d99fd74a3d9"}, + {file = "pyarrow-16.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:22a1fdb1254e5095d629e29cd1ea98ed04b4bbfd8e42cc670a6b639ccc208b60"}, + {file = "pyarrow-16.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:574a00260a4ed9d118a14770edbd440b848fcae5a3024128be9d0274dbcaf858"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0815d0ddb733b8c1b53a05827a91f1b8bde6240f3b20bf9ba5d650eb9b89cdf"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df0080339387b5d30de31e0a149c0c11a827a10c82f0c67d9afae3981d1aabb7"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:edf38cce0bf0dcf726e074159c60516447e4474904c0033f018c1f33d7dac6c5"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91d28f9a40f1264eab2af7905a4d95320ac2f287891e9c8b0035f264fe3c3a4b"}, + {file = "pyarrow-16.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:99af421ee451a78884d7faea23816c429e263bd3618b22d38e7992c9ce2a7ad9"}, + {file = "pyarrow-16.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d22d0941e6c7bafddf5f4c0662e46f2075850f1c044bf1a03150dd9e189427ce"}, + {file = "pyarrow-16.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:266ddb7e823f03733c15adc8b5078db2df6980f9aa93d6bb57ece615df4e0ba7"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cc23090224b6594f5a92d26ad47465af47c1d9c079dd4a0061ae39551889efe"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56850a0afe9ef37249d5387355449c0f94d12ff7994af88f16803a26d38f2016"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:705db70d3e2293c2f6f8e84874b5b775f690465798f66e94bb2c07bab0a6bb55"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:5448564754c154997bc09e95a44b81b9e31ae918a86c0fcb35c4aa4922756f55"}, + {file = "pyarrow-16.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:729f7b262aa620c9df8b9967db96c1575e4cfc8c25d078a06968e527b8d6ec05"}, + {file = "pyarrow-16.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fb8065dbc0d051bf2ae2453af0484d99a43135cadabacf0af588a3be81fbbb9b"}, + {file = "pyarrow-16.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:20ce707d9aa390593ea93218b19d0eadab56390311cb87aad32c9a869b0e958c"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5823275c8addbbb50cd4e6a6839952682a33255b447277e37a6f518d6972f4e1"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ab8b9050752b16a8b53fcd9853bf07d8daf19093533e990085168f40c64d978"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:42e56557bc7c5c10d3e42c3b32f6cff649a29d637e8f4e8b311d334cc4326730"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a7abdee4a4a7cfa239e2e8d721224c4b34ffe69a0ca7981354fe03c1328789b"}, + {file = "pyarrow-16.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ef2f309b68396bcc5a354106741d333494d6a0d3e1951271849787109f0229a6"}, + {file = "pyarrow-16.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ed66e5217b4526fa3585b5e39b0b82f501b88a10d36bd0d2a4d8aa7b5a48e2df"}, + {file = "pyarrow-16.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc8814310486f2a73c661ba8354540f17eef51e1b6dd090b93e3419d3a097b3a"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c2f5e239db7ed43e0ad2baf46a6465f89c824cc703f38ef0fde927d8e0955f7"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f293e92d1db251447cb028ae12f7bc47526e4649c3a9924c8376cab4ad6b98bd"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:dd9334a07b6dc21afe0857aa31842365a62eca664e415a3f9536e3a8bb832c07"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d91073d1e2fef2c121154680e2ba7e35ecf8d4969cc0af1fa6f14a8675858159"}, + {file = "pyarrow-16.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:71d52561cd7aefd22cf52538f262850b0cc9e4ec50af2aaa601da3a16ef48877"}, + {file = "pyarrow-16.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b93c9a50b965ee0bf4fef65e53b758a7e8dcc0c2d86cebcc037aaaf1b306ecc0"}, + {file = "pyarrow-16.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d831690844706e374c455fba2fb8cfcb7b797bfe53ceda4b54334316e1ac4fa4"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35692ce8ad0b8c666aa60f83950957096d92f2a9d8d7deda93fb835e6053307e"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dd3151d098e56f16a8389c1247137f9e4c22720b01c6f3aa6dec29a99b74d80"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bd40467bdb3cbaf2044ed7a6f7f251c8f941c8b31275aaaf88e746c4f3ca4a7a"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:00a1dcb22ad4ceb8af87f7bd30cc3354788776c417f493089e0a0af981bc8d80"}, + {file = "pyarrow-16.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fda9a7cebd1b1d46c97b511f60f73a5b766a6de4c5236f144f41a5d5afec1f35"}, + {file = "pyarrow-16.0.0.tar.gz", hash = "sha256:59bb1f1edbbf4114c72415f039f1359f1a57d166a331c3229788ccbfbb31689a"}, ] [package.dependencies] -numpy = ">=1.16.6,<2" +numpy = ">=1.16.6" [[package]] name = "pyarrow-hotfix" diff --git a/server/pyproject.toml b/server/pyproject.toml index d6806848..60bd399a 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "text-generation-server" -version = "1.3.4" +version = "1.4.0" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene "] diff --git a/server/requirements_cuda.txt b/server/requirements_cuda.txt index 694242e1..e9267512 100644 --- a/server/requirements_cuda.txt +++ b/server/requirements_cuda.txt @@ -13,11 +13,11 @@ grpc-interceptor==0.15.4 ; python_version >= "3.9" and python_version < "3.13" grpcio-reflection==1.60.0 ; python_version >= "3.9" and python_version < "3.13" grpcio-status==1.60.0 ; python_version >= "3.9" and python_version < "3.13" grpcio==1.60.0 ; python_version >= "3.9" and python_version < "3.13" -hf-transfer==0.1.4 ; python_version >= "3.9" and python_version < "3.13" +hf-transfer==0.1.5 ; python_version >= "3.9" and python_version < "3.13" huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" idna==3.6 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" -numpy==1.26.2 ; python_version >= "3.9" and python_version < "3.13" +numpy==1.26.3 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-api==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-grpc==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-http==1.15.0 ; python_version >= "3.9" and python_version < "3.13" @@ -28,18 +28,18 @@ opentelemetry-proto==1.15.0 ; python_version >= "3.9" and python_version < "3.13 opentelemetry-sdk==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-semantic-conventions==0.36b0 ; python_version >= "3.9" and python_version < "3.13" packaging==23.2 ; python_version >= "3.9" and python_version < "3.13" -pillow==10.1.0 ; python_version >= "3.9" and python_version < "3.13" -protobuf==4.25.1 ; python_version >= "3.9" and python_version < "3.13" +pillow==10.2.0 ; python_version >= "3.9" and python_version < "3.13" +protobuf==4.25.2 ; python_version >= "3.9" and python_version < "3.13" pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13" -regex==2023.10.3 ; python_version >= "3.9" and python_version < "3.13" +regex==2023.12.25 ; python_version >= "3.9" and python_version < "3.13" requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13" safetensors==0.3.3 ; python_version >= "3.9" and python_version < "3.13" -scipy==1.11.4 ; python_version >= "3.9" and python_version < "3.13" +scipy==1.12.0 ; python_version >= "3.9" and python_version < "3.13" sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" -setuptools==69.0.2 ; python_version >= "3.9" and python_version < "3.13" -tokenizers==0.15.0 ; python_version >= "3.9" and python_version < "3.13" +setuptools==69.0.3 ; python_version >= "3.9" and python_version < "3.13" +tokenizers==0.15.1 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.36.1 ; python_version >= "3.9" and python_version < "3.13" +transformers==4.37.1 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" typing-extensions==4.9.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.1.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/requirements_rocm.txt b/server/requirements_rocm.txt index e0495fde..053429c9 100644 --- a/server/requirements_rocm.txt +++ b/server/requirements_rocm.txt @@ -12,11 +12,11 @@ grpc-interceptor==0.15.4 ; python_version >= "3.9" and python_version < "3.13" grpcio-reflection==1.60.0 ; python_version >= "3.9" and python_version < "3.13" grpcio-status==1.60.0 ; python_version >= "3.9" and python_version < "3.13" grpcio==1.60.0 ; python_version >= "3.9" and python_version < "3.13" -hf-transfer==0.1.4 ; python_version >= "3.9" and python_version < "3.13" +hf-transfer==0.1.5 ; python_version >= "3.9" and python_version < "3.13" huggingface-hub==0.19.4 ; python_version >= "3.9" and python_version < "3.13" idna==3.6 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" -numpy==1.26.2 ; python_version >= "3.9" and python_version < "3.13" +numpy==1.26.3 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-api==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-grpc==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-http==1.15.0 ; python_version >= "3.9" and python_version < "3.13" @@ -27,18 +27,18 @@ opentelemetry-proto==1.15.0 ; python_version >= "3.9" and python_version < "3.13 opentelemetry-sdk==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-semantic-conventions==0.36b0 ; python_version >= "3.9" and python_version < "3.13" packaging==23.2 ; python_version >= "3.9" and python_version < "3.13" -pillow==10.1.0 ; python_version >= "3.9" and python_version < "3.13" -protobuf==4.25.1 ; python_version >= "3.9" and python_version < "3.13" +pillow==10.2.0 ; python_version >= "3.9" and python_version < "3.13" +protobuf==4.25.2 ; python_version >= "3.9" and python_version < "3.13" pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13" -regex==2023.10.3 ; python_version >= "3.9" and python_version < "3.13" +regex==2023.12.25 ; python_version >= "3.9" and python_version < "3.13" requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13" safetensors==0.3.3 ; python_version >= "3.9" and python_version < "3.13" -scipy==1.11.4 ; python_version >= "3.9" and python_version < "3.13" +scipy==1.12.0 ; python_version >= "3.9" and python_version < "3.13" sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" -setuptools==69.0.2 ; python_version >= "3.9" and python_version < "3.13" -tokenizers==0.15.0 ; python_version >= "3.9" and python_version < "3.13" +setuptools==69.0.3 ; python_version >= "3.9" and python_version < "3.13" +tokenizers==0.15.1 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.36.1 ; python_version >= "3.9" and python_version < "3.13" +transformers==4.37.1 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" typing-extensions==4.9.0 ; python_version >= "3.9" and python_version < "3.13" urllib3==2.1.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/server/tests/utils/test_layers.py b/server/tests/utils/test_layers.py index 0a9fecd1..93a0e982 100644 --- a/server/tests/utils/test_layers.py +++ b/server/tests/utils/test_layers.py @@ -3,24 +3,27 @@ from text_generation_server.utils.layers import ( TensorParallelEmbedding, ) + class ProcessGroup: def __init__(self, rank: int, world_size: int): self._rank = rank self.world_size = world_size - def size(self)->int: + def size(self) -> int: return self.world_size - def rank(self)->int: + def rank(self) -> int: return self._rank + class Weights: def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int): - self.weight = torch.arange(vocab_size*hidden_dim).float().view(vocab_size, hidden_dim) + self.weight = ( + torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim) + ) self.process_group = ProcessGroup(rank, world_size) - - def get_partial_sharded(self, name:str, dim: int): + def get_partial_sharded(self, name: str, dim: int): assert dim == 0 rank = self.process_group.rank() @@ -35,10 +38,11 @@ class Weights: def get_shape(self, name: str): return self.weight.shape + def test_weight_hub_files_offline_error(): - vocab_size= 17 - weights = Weights(rank=0, world_size=1, vocab_size = vocab_size,hidden_dim = 256) + vocab_size = 17 + weights = Weights(rank=0, world_size=1, vocab_size=vocab_size, hidden_dim=256) embeddings = TensorParallelEmbedding("", weights) input_ids = torch.arange(vocab_size) @@ -47,18 +51,27 @@ def test_weight_hub_files_offline_error(): assert embeddings.max_id == 17 torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256)) - weights_0_2 = Weights(rank=0, world_size=2, vocab_size = vocab_size,hidden_dim = 256) - weights_1_2 = Weights(rank=1, world_size=2, vocab_size = vocab_size,hidden_dim = 256) + weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256) + weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256) embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False) assert embeddings_0_2.min_id == 0 assert embeddings_0_2.max_id == 9 - torch.testing.assert_close(embeddings_0_2.weight , torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0).view(10, 256).float()) + torch.testing.assert_close( + embeddings_0_2.weight, + torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0) + .view(10, 256) + .float(), + ) embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False) assert embeddings_1_2.min_id == 9 assert embeddings_1_2.max_id == 17 - torch.testing.assert_close(embeddings_1_2.weight , torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0).view(9, 256).float()) + torch.testing.assert_close( + embeddings_1_2.weight, + torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0) + .view(9, 256) + .float(), + ) output_tp_0 = embeddings_0_2.forward(input_ids) output_tp_1 = embeddings_1_2.forward(input_ids) torch.testing.assert_close(output, output_tp_0 + output_tp_1) - diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py index 9ec3ce20..905b7e69 100644 --- a/server/text_generation_server/cli.py +++ b/server/text_generation_server/cli.py @@ -270,7 +270,7 @@ def download_weights( pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass - + elif (Path(model_id) / "adapter_config.json").exists(): # Try to load as a local PEFT model try: diff --git a/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py b/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py index d103973f..96701794 100644 --- a/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py @@ -17,6 +17,7 @@ from text_generation_server.utils.layers import ( FastLayerNorm, ) + class PhiConfig(PretrainedConfig): def __init__( self, @@ -25,15 +26,15 @@ class PhiConfig(PretrainedConfig): num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, - hidden_act="gelu_fast", # llama uses silu - layer_norm_eps=1e-05, # rms in llama, + hidden_act="gelu_fast", # llama uses silu + layer_norm_eps=1e-05, # rms in llama, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, - resid_pdrop=0.1, # llama doesn't have this - partial_rotary_factor=0.5, # important difference between llama and phi + resid_pdrop=0.1, # llama doesn't have this + partial_rotary_factor=0.5, # important difference between llama and phi **kwargs, ): self.vocab_size = vocab_size @@ -55,6 +56,7 @@ class PhiConfig(PretrainedConfig): **kwargs, ) + # this is the same as llama except for Phi uses bias=True def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: @@ -68,6 +70,7 @@ def load_attention(config, prefix, weights): bias=True, ) + def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 @@ -94,6 +97,7 @@ def _load_gqa(config, prefix: str, weights): get_linear(weight, bias=True, quantize=config.quantize) ) + class FlashPhiAttention(torch.nn.Module): def __init__( self, @@ -173,8 +177,7 @@ class FlashPhiAttention(torch.nn.Module): # # Apply partial positional embeddings in place self.rotary_emb( - query[:, :, :self.rotary_dim], kv[:, 0, :, :self.rotary_dim], - cos, sin + query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin ) # Reshape key and value and cache @@ -210,7 +213,8 @@ class FlashPhiAttention(torch.nn.Module): max_s, ) - return self.dense(attn_output.view(-1, self.num_heads*self.head_size)) + return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) + class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): @@ -256,7 +260,9 @@ class FlashPhiLayer(nn.Module): ) self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load( - prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps + prefix=f"{prefix}.input_layernorm", + weights=weights, + eps=config.layer_norm_eps, ) self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) @@ -287,10 +293,13 @@ class FlashPhiLayer(nn.Module): max_s, ) - hidden_states = self.resid_dropout(attn_output).add(self.resid_dropout(self.mlp(hidden_states))) + hidden_states = self.resid_dropout(attn_output).add( + self.resid_dropout(self.mlp(hidden_states)) + ) return hidden_states, res + class FlashPhiModel(torch.nn.Module): def __init__(self, config, weights): super().__init__() @@ -361,6 +370,7 @@ class FlashPhiModel(torch.nn.Module): return hidden_states + class FlashPhiForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() @@ -380,7 +390,7 @@ class FlashPhiForCausalLM(torch.nn.Module): kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, - input_lengths: torch.Tensor, + input_lengths: torch.Tensor, max_s: int, lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: diff --git a/server/text_generation_server/models/custom_modeling/mpt_modeling.py b/server/text_generation_server/models/custom_modeling/mpt_modeling.py index 1a9aef74..2c2fec48 100644 --- a/server/text_generation_server/models/custom_modeling/mpt_modeling.py +++ b/server/text_generation_server/models/custom_modeling/mpt_modeling.py @@ -54,9 +54,19 @@ def load_col(config, prefix, weights, bias): bias_h = bias_h[0] bias_block_size = bias_h // bias_size - bias_q_part = bias_slice_[bias_rank * bias_block_size : (bias_rank + 1) * bias_block_size] - bias_k_part = bias_slice_[bias_h + bias_rank * bias_block_size : bias_h + (bias_rank + 1) * bias_block_size] - bias_v_part = bias_slice_[2 * bias_h + bias_rank * bias_block_size : 2 * bias_h + (bias_rank + 1) * bias_block_size] + bias_q_part = bias_slice_[ + bias_rank * bias_block_size : (bias_rank + 1) * bias_block_size + ] + bias_k_part = bias_slice_[ + bias_h + + bias_rank * bias_block_size : bias_h + + (bias_rank + 1) * bias_block_size + ] + bias_v_part = bias_slice_[ + 2 * bias_h + + bias_rank * bias_block_size : 2 * bias_h + + (bias_rank + 1) * bias_block_size + ] bias = torch.cat([bias_q_part, bias_k_part, bias_v_part], dim=0) if bias.dtype != torch.int32: @@ -352,8 +362,12 @@ class MultiheadAttention(nn.Module): hidden_size = config.d_model head_dim = hidden_size // self.n_heads - self.q_ln = LPLayerNorm(d_model, bias=bias, prefix=f"{prefix}.q_ln", weights=weights) - self.k_ln = LPLayerNorm(self.n_heads * head_dim, prefix=f"{prefix}.k_ln", weights=weights) + self.q_ln = LPLayerNorm( + d_model, bias=bias, prefix=f"{prefix}.q_ln", weights=weights + ) + self.k_ln = LPLayerNorm( + self.n_heads * head_dim, prefix=f"{prefix}.k_ln", weights=weights + ) if self.attn_impl == "flash": self.attn_fn = flash_attn_fn elif self.attn_impl == "triton": @@ -684,7 +698,6 @@ class LPLayerNorm(torch.nn.LayerNorm): self.bias = nn.Parameter(weights.get_sharded(f"{prefix}.bias", dim=0)) self.normalized_shape = self.weight.shape - def forward(self, x): module_device = x.device downcast_x = _cast_if_autocast_enabled(x) @@ -798,7 +811,7 @@ class MPTModel(MPTPreTrainedModel): self.wte = TensorParallelEmbedding("transformer.wte", weights) if not self.alibi: - self.wpe = TensorParallelEmbedding("transformer.wpe", weights) + self.wpe = TensorParallelEmbedding("transformer.wpe", weights) self.blocks = nn.ModuleList( [ MPTBlock(config, prefix=f"transformer.blocks.{i}", weights=weights) diff --git a/server/text_generation_server/models/custom_modeling/phi_modeling.py b/server/text_generation_server/models/custom_modeling/phi_modeling.py index f9999537..e5c09728 100644 --- a/server/text_generation_server/models/custom_modeling/phi_modeling.py +++ b/server/text_generation_server/models/custom_modeling/phi_modeling.py @@ -62,14 +62,12 @@ class PhiConfig(PretrainedConfig): **kwargs, ) + # RotaryEmbedding is a class that implements the rotary embedding. class RotaryEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() - inv_freq = [ - 1.0 / 10000.0 ** (i / dim) - for i in range(0, dim, 2) - ] + inv_freq = [1.0 / 10000.0 ** (i / dim) for i in range(0, dim, 2)] inv_freq_len = len(inv_freq) inv_freq = torch.tensor(inv_freq).view(1, inv_freq_len) t = torch.arange(0, max_seq_len, dtype=torch.float).view(max_seq_len, 1) @@ -131,6 +129,7 @@ class PhiCausalLMHead(nn.Module): hidden_states = self.linear(hidden_states) return hidden_states + # PhiMHA is a multi-head attention layer. This layer uses an attention mask to prevent tokens from attending to subsequent tokens. class PhiMHA(nn.Module): def __init__(self, prefix, config, weights): @@ -172,19 +171,27 @@ class PhiMHA(nn.Module): v = torch.cat([prev_v, v], dim=1) past_kv_cache = [k, v] - attn_weights = torch.einsum('bthd,bshd->bhts', q, k * self.softmax_scale) + attn_weights = torch.einsum("bthd,bshd->bhts", q, k * self.softmax_scale) if attention_mask is not None: seqlen_k = k.shape[1] seqlen_q = q.shape[1] - causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device), 1) + causal_mask = torch.triu( + torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device), + 1, + ) attn_weights = attn_weights + causal_mask.to(dtype=attn_weights.dtype) - + attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) attn_output = attn_weights.matmul(v.transpose(1, 2)).squeeze(0) - attn_output = attn_output.view((b_size, self.num_heads, seq_len, self.head_dim)).transpose(1, 2).flatten(-2) + attn_output = ( + attn_output.view((b_size, self.num_heads, seq_len, self.head_dim)) + .transpose(1, 2) + .flatten(-2) + ) return self.out_proj(attn_output), past_kv_cache + # PhiMLP is a multi-layer perceptron. It contains two linear layers with a gelu activation function. class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): @@ -204,19 +211,22 @@ class PhiMLP(nn.Module): bias=False, ) self.activation = torch.nn.functional.gelu - + def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states + # PhiBlock is a single transformer block. It contains a layer norm, a multi-head attention layer and an multi-layer perceptron. class PhiBlock(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() self.layer_id = layer_id - self.layer_norm = nn.LayerNorm.load(prefix=f"{layer_id}.ln", weights=weights, eps=config.layer_norm_epsilon) + self.layer_norm = nn.LayerNorm.load( + prefix=f"{layer_id}.ln", weights=weights, eps=config.layer_norm_epsilon + ) self.mixer = PhiMHA(prefix=f"{layer_id}.mixer", config=config, weights=weights) self.mlp = PhiMLP(prefix=f"{layer_id}.mlp", config=config, weights=weights) @@ -228,11 +238,14 @@ class PhiBlock(nn.Module): ): residual = hidden_states hidden_states = self.layer_norm(hidden_states) - attn_outputs, past_kv_cache = self.mixer(hidden_states, kv_cache, attention_mask) + attn_outputs, past_kv_cache = self.mixer( + hidden_states, kv_cache, attention_mask + ) feed_forward_hidden_states = self.mlp(hidden_states) out = attn_outputs + feed_forward_hidden_states + residual return out, past_kv_cache + # PhiModel implements the embedding layer and the transformer blocks. class PhiModel(nn.Module): def __init__(self, config, weights): @@ -241,9 +254,12 @@ class PhiModel(nn.Module): self.tp_world_size = weights.process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix="transformer.embd.wte", weights=weights - ) + ) self.blocks = nn.ModuleList( - [PhiBlock(f"transformer.h.{layer_id}", config, weights) for layer_id in range(config.n_layer)] + [ + PhiBlock(f"transformer.h.{layer_id}", config, weights) + for layer_id in range(config.n_layer) + ] ) def forward( @@ -258,14 +274,19 @@ class PhiModel(nn.Module): seq_len = hidden_states.shape[1] mask = None if seq_len <= 1 else attention_mask - past_key_values = [None] * len(self.blocks) if past_key_values is None else past_key_values + past_key_values = ( + [None] * len(self.blocks) if past_key_values is None else past_key_values + ) for index, block in enumerate(self.blocks): - hidden_states, new_key_values = block(hidden_states, past_key_values[index], mask) + hidden_states, new_key_values = block( + hidden_states, past_key_values[index], mask + ) past_key_values[index] = new_key_values return hidden_states, past_key_values + # PhiForCausalLM wraps the PhiModel and PhiCausalLMHead together and returns a CausalLMOutputWithPast object. class PhiForCausalLM(torch.nn.Module): def __init__(self, config, weights): @@ -290,12 +311,15 @@ class PhiForCausalLM(torch.nn.Module): loss = None if labels is not None: loss = nn.CrossEntropyLoss()( - logits[:, :-1].view(-1, logits.size(-1)), - labels[:, 1:].view(-1) + logits[:, :-1].view(-1, logits.size(-1)), labels[:, 1:].view(-1) ) if not return_dict: - return ((loss,) + (logits,) + model_output[1:]) if loss is not None else (logits,) + model_output[1:] + return ( + ((loss,) + (logits,) + model_output[1:]) + if loss is not None + else (logits,) + model_output[1:] + ) return CausalLMOutputWithPast( loss=loss, @@ -304,5 +328,3 @@ class PhiForCausalLM(torch.nn.Module): hidden_states=None, attentions=None, ) - - diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index 7be61906..94bd58f4 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -73,11 +73,11 @@ class FlashLlama(FlashCausalLM): import json import os from pathlib import Path - - is_local_model = (Path(use_medusa).exists() and Path(use_medusa).is_dir()) or os.getenv( - "WEIGHTS_CACHE_OVERRIDE", None - ) is not None - + + is_local_model = ( + Path(use_medusa).exists() and Path(use_medusa).is_dir() + ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None + if not is_local_model: medusa_config = hf_hub_download( use_medusa, revision=revision, filename="config.json" @@ -88,7 +88,7 @@ class FlashLlama(FlashCausalLM): else: medusa_config = str(Path(use_medusa) / "config.json") medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") - + with open(medusa_config, "r") as f: config = json.load(f) medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" diff --git a/server/text_generation_server/models/flash_phi.py b/server/text_generation_server/models/flash_phi.py index 1c49f2a9..061b9740 100644 --- a/server/text_generation_server/models/flash_phi.py +++ b/server/text_generation_server/models/flash_phi.py @@ -63,11 +63,11 @@ class FlashPhi(FlashCausalLM): import json import os from pathlib import Path - - is_local_model = (Path(use_medusa).exists() and Path(use_medusa).is_dir()) or os.getenv( - "WEIGHTS_CACHE_OVERRIDE", None - ) is not None - + + is_local_model = ( + Path(use_medusa).exists() and Path(use_medusa).is_dir() + ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None + if not is_local_model: medusa_config = hf_hub_download( use_medusa, revision=revision, filename="config.json" @@ -78,7 +78,7 @@ class FlashPhi(FlashCausalLM): else: medusa_config = str(Path(use_medusa) / "config.json") medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") - + with open(medusa_config, "r") as f: config = json.load(f) medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" diff --git a/server/text_generation_server/models/phi.py b/server/text_generation_server/models/phi.py index d477478a..79aa3fb9 100644 --- a/server/text_generation_server/models/phi.py +++ b/server/text_generation_server/models/phi.py @@ -5,13 +5,17 @@ from transformers import AutoConfig, AutoTokenizer from typing import Optional, List, Tuple from text_generation_server.models import CausalLM -from text_generation_server.models.custom_modeling.phi_modeling import PhiConfig, PhiForCausalLM +from text_generation_server.models.custom_modeling.phi_modeling import ( + PhiConfig, + PhiForCausalLM, +) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) + class Phi(CausalLM): def __init__( self, @@ -60,4 +64,3 @@ class Phi(CausalLM): dtype=dtype, device=device, ) - diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 6ddfd6f4..010d6143 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -510,7 +510,9 @@ class TensorParallelEmbedding(nn.Module): block_size = (num_embeddings + world_size - 1) // world_size self.min_id = rank * block_size self.max_id = min(num_embeddings, (rank + 1) * block_size) - self.null_idx = weight.shape[0] # Usually block_size, might be less in non even vocab_size. + self.null_idx = weight.shape[ + 0 + ] # Usually block_size, might be less in non even vocab_size. self.process_group = weights.process_group self.reduce = reduce From 433934519c4fb38b84072e293c06c76c1f7acf8f Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 26 Jan 2024 20:13:47 +0100 Subject: [PATCH 055/153] Fixing top_n_tokens. (#1497) Superseeds #1459 The fix works as follows. We updated next_token_chooser to return all logprbs, then batch_top_n_tokens, now also gets accepted_ids + speculated_length (so it knows how to interpret the flat logprobs). We then update the code to return lists ot `Tokens` that it expects. Fixes # (issue) - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/tests/utils/test_tokens.py | 52 +++++++++----- .../models/causal_lm.py | 35 ++++++---- .../models/flash_causal_lm.py | 39 ++++++----- .../models/seq2seq_lm.py | 35 ++++++---- server/text_generation_server/models/types.py | 2 +- server/text_generation_server/utils/tokens.py | 69 +++++++++++++------ 6 files changed, 150 insertions(+), 82 deletions(-) diff --git a/server/tests/utils/test_tokens.py b/server/tests/utils/test_tokens.py index 39446fd5..343716b9 100644 --- a/server/tests/utils/test_tokens.py +++ b/server/tests/utils/test_tokens.py @@ -64,30 +64,50 @@ def test_batch_top_tokens(): top_n_tokens = [0, 2, 3, 4, 5] top_n_tokens_tensor = torch.tensor(top_n_tokens) inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5) + accepted_ids = torch.ones_like(top_n_tokens_tensor) topn_tok_ids, topn_tok_logprobs = batch_top_tokens( - top_n_tokens, top_n_tokens_tensor, inp_logprobs + top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids ) - assert topn_tok_ids[0] == [] - assert topn_tok_ids[1] == [0, 3] - assert topn_tok_ids[2] == [0, 3, 1, 4] - assert topn_tok_ids[3] == [0, 3, 1, 4] - assert topn_tok_ids[4] == [0, 3, 1, 4, 2] + assert topn_tok_ids[0] == [[]] + assert topn_tok_ids[1] == [[0, 3]] + assert topn_tok_ids[2] == [[0, 3, 1, 4]] + assert topn_tok_ids[3] == [[0, 3, 1, 4]] + assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]] - assert topn_tok_logprobs[0] == [] - assert topn_tok_logprobs[1] == [-1, -2] - assert topn_tok_logprobs[2] == [-1, -2, -3, -3] - assert topn_tok_logprobs[3] == [-1, -2, -3, -3] - assert topn_tok_logprobs[4] == [-1, -2, -3, -3, -4] + assert topn_tok_logprobs[0] == [[]] + assert topn_tok_logprobs[1] == [[-1, -2]] + assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]] + assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]] + assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]] + + # Now let's make second member of the batch be speculated + inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5 * 2) + accepted_ids[1] = 2 + topn_tok_ids, topn_tok_logprobs = batch_top_tokens( + top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids + ) + + assert topn_tok_ids[0] == [[]] + assert topn_tok_ids[1] == [[0, 3], [0, 3]] + assert topn_tok_ids[2] == [[0, 3, 1, 4]] + assert topn_tok_ids[3] == [[0, 3, 1, 4]] + assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]] + + assert topn_tok_logprobs[0] == [[]] + assert topn_tok_logprobs[1] == [[-1, -2], [-1, -2]] + assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]] + assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]] + assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]] def test_pass_through_tokenizer(skip_tokenizer_env_var): tokenizer = AutoTokenizer.from_pretrained( - 'meta-llama/Llama-2-7b-chat-hf', - revision=None, - padding_side="left", - truncation_side="left", + 'meta-llama/Llama-2-7b-chat-hf', + revision=None, + padding_side="left", + truncation_side="left", ) tokenizer.pad_token_id = 2 make_tokenizer_optional(tokenizer) @@ -109,4 +129,4 @@ def test_pass_through_tokenizer(skip_tokenizer_env_var): if __name__ == "__main__": - test_pass_through_tokenizer() \ No newline at end of file + test_pass_through_tokenizer() diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 387a5618..30e39b4e 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -856,10 +856,13 @@ class CausalLM(Model): next_token_ids, next_token_logprobs, logprobs, _, _ = batch.next_token_chooser( batch.input_ids, logits.squeeze(-2), self.speculate ) + # Speculation is not active for causal + accepted_ids = torch.ones_like(batch.input_ids)[:, 0] batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, + accepted_ids, ) prev_batches.append({ @@ -1036,20 +1039,24 @@ class CausalLM(Model): prefill_tokens = None if top_n_tokens > 0: - toptoken_texts = self.tokenizer.batch_decode( - top_token_ids, - clean_up_tokenization_spaces=False, - skip_special_tokens=False, - ) - special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids - ] - top_tokens = Tokens( - top_token_ids, - top_token_logprobs, - toptoken_texts, - special_toptokens, - ) + all_top_tokens = [] + for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + toptoken_texts = self.tokenizer.batch_decode( + top_token_ids, + clean_up_tokenization_spaces=False, + skip_special_tokens=False, + ) + special_toptokens = [ + token_id in self.all_special_ids for token_id in top_token_ids + ] + top_tokens = Tokens( + top_token_ids, + top_token_logprobs, + toptoken_texts, + special_toptokens, + ) + all_top_tokens.append(top_tokens) + top_tokens = all_top_tokens else: top_tokens = None diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 930082cd..53a3d582 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -842,6 +842,8 @@ class FlashCausalLM(Model): else: next_token_logits = out + + speculate = get_speculate() ( next_input_ids, next_token_logprobs, @@ -851,16 +853,15 @@ class FlashCausalLM(Model): ) = batch.next_token_chooser( batch.all_input_ids_tensor[:, : batch.max_seqlen], next_token_logits, - get_speculate(), + speculate, batch.speculative_ids, speculative_logits, ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( - batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs + batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids ) - speculative_length = 0 if speculative_ids is None else speculative_ids.shape[1] if prefill: if len(batch) > 1 and prefill_logprobs: # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs @@ -1062,20 +1063,24 @@ class FlashCausalLM(Model): prefill_tokens = None if top_n_tokens > 0: - toptoken_texts = self.tokenizer.batch_decode( - top_token_ids, - clean_up_tokenization_spaces=False, - skip_special_tokens=False, - ) - special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids - ] - top_tokens = Tokens( - top_token_ids, - top_token_logprobs, - toptoken_texts, - special_toptokens, - ) + all_top_tokens = [] + for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + toptoken_texts = self.tokenizer.batch_decode( + top_token_ids, + clean_up_tokenization_spaces=False, + skip_special_tokens=False, + ) + special_toptokens = [ + token_id in self.all_special_ids for token_id in top_token_ids + ] + top_tokens = Tokens( + top_token_ids, + top_token_logprobs, + toptoken_texts, + special_toptokens, + ) + all_top_tokens.append(top_tokens) + top_tokens = all_top_tokens else: top_tokens = None diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index f2e4cec6..8b93aecd 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -640,10 +640,13 @@ class Seq2SeqLM(Model): batch.past_key_values, ) + # Speculation is not active for seq2seq + accepted_ids = torch.ones_like(batch.decoder_input_ids)[:, 0] batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), + accepted_ids, ) start_decode = time.time_ns() @@ -746,20 +749,24 @@ class Seq2SeqLM(Model): prefill_tokens = None if top_n_tokens > 0: - toptoken_texts = self.tokenizer.batch_decode( - top_token_ids, - clean_up_tokenization_spaces=False, - skip_special_tokens=False, - ) - special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids - ] - top_tokens = Tokens( - top_token_ids, - top_token_logprobs, - toptoken_texts, - special_toptokens, - ) + all_top_tokens = [] + for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + toptoken_texts = self.tokenizer.batch_decode( + top_token_ids, + clean_up_tokenization_spaces=False, + skip_special_tokens=False, + ) + special_toptokens = [ + token_id in self.all_special_ids for token_id in top_token_ids + ] + top_tokens = Tokens( + top_token_ids, + top_token_logprobs, + toptoken_texts, + special_toptokens, + ) + all_top_tokens.append(top_tokens) + top_tokens = all_top_tokens else: top_tokens = None diff --git a/server/text_generation_server/models/types.py b/server/text_generation_server/models/types.py index f85f27e5..bc68812e 100644 --- a/server/text_generation_server/models/types.py +++ b/server/text_generation_server/models/types.py @@ -95,5 +95,5 @@ class Generation: generated_text=self.generated_text.to_pb() if self.generated_text is not None else None, - top_tokens=self.top_tokens.to_pb() if self.top_tokens is not None else None, + top_tokens=[top_tokens.to_pb() for top_tokens in self.top_tokens] if self.top_tokens is not None else None, ) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index 08828835..9e40fa43 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -269,7 +269,8 @@ class HeterogeneousNextTokenChooser: scores[:, j] = _scores next_ids[:, j] = _next_ids next_ids = next_ids.view(B * S) - scores = scores.view(B * S, -1) + allscores = scores.view(B * S, -1) + alllogprobs = torch.log_softmax(allscores, -1) if speculated_ids is not None: accepted_ids = [] @@ -297,20 +298,15 @@ class HeterogeneousNextTokenChooser: accepted_ids, device=input_ids.device, dtype=input_ids.dtype ) next_ids = next_ids[indices] - scores = scores[indices] + logprobs = alllogprobs[indices] indices = torch.arange(B, device=input_ids.device) * S if speculative_scores is not None: speculative_scores = speculative_scores[indices + accepted_ids - 1] else: accepted_ids = torch.ones_like(next_ids) + logprobs = alllogprobs - # ignore logprobs if we use greedy search - if type(self.choice) == Greedy: - logprobs = torch.empty_like(scores, device="cpu") - next_logprobs = torch.empty_like(next_ids.view(-1), device="cpu") - else: - logprobs = torch.log_softmax(scores, -1) - next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) + next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) if speculate > 0: if speculative_scores is not None: @@ -324,7 +320,7 @@ class HeterogeneousNextTokenChooser: else: speculative_ids = None - return next_ids, next_logprobs, logprobs, accepted_ids, speculative_ids + return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids def filter(self, indices): if self.watermark_processor is not None: @@ -435,8 +431,8 @@ class HeterogeneousSampling: def batch_top_tokens( - top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor -) -> Tuple[List[List[int]], List[List[float]]]: + top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor +) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the @@ -445,15 +441,22 @@ def batch_top_tokens( max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: - return [[]] * len(top_n_tokens), [[]] * len(top_n_tokens) + return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) + + batch_size = accepted_ids.shape[0] + speculate_size = logprobs.shape[0] // batch_size + top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size - top_n_tokens = [min(tok, logprobs.size(-1)) for tok in top_n_tokens] + top_n_tokens = [min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size)] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset - sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=1, sorted=True).values - nth_highest = torch.gather(sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1)) + sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values + + nth_highest = torch.gather( + sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) + ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values @@ -467,10 +470,36 @@ def batch_top_tokens( top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() - return ( - [idxs[:n] if req_n > 0 else [] for idxs, n, req_n in zip(top_indices, top_n_ishes, top_n_tokens)], - [vals[:n] if req_n > 0 else [] for vals, n, req_n in zip(top_values, top_n_ishes, top_n_tokens)], - ) + batch_top_token_ids = [] + batch_top_token_logprobs = [] + accepted_ids_list = accepted_ids.tolist() + for i, n_accepted_ids in enumerate(accepted_ids_list): + start = speculate_size * i + stop = speculate_size * (i + 1) + _top_indices = top_indices[start: stop] + _top_values = top_values[start: stop] + _top_n_ishes = top_n_ishes[start: stop] + _top_n_tokens = top_n_tokens[start: stop] + + _top_indices = _top_indices[:n_accepted_ids] + _top_values = _top_values[:n_accepted_ids] + _top_n_ishes = _top_n_ishes[:n_accepted_ids] + _top_n_tokens = _top_n_tokens[:n_accepted_ids] + + row_top_token_ids = [] + row_top_token_logprobs = [] + + for idxs, vals, n, req_n in zip(_top_indices, _top_values, _top_n_ishes, _top_n_tokens): + indices = idxs[:n] if req_n > 0 else [] + values = vals[:n] if req_n > 0 else [] + + row_top_token_ids.append(indices) + row_top_token_logprobs.append(values) + + batch_top_token_ids.append(row_top_token_ids) + batch_top_token_logprobs.append(row_top_token_logprobs) + + return batch_top_token_ids, batch_top_token_logprobs def make_tokenizer_optional(tokenizer): From 050c5840b5b9df78b9773647dc6a3ca8e7bfcf3d Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 29 Jan 2024 11:20:08 +0100 Subject: [PATCH 056/153] Sending compute type from the environment instead of hardcoded string (#1504) # What does this PR do? Sending compute type from the environment instead of hardcoded string Using env is slow, therefore getting it from global state instead. Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- router/src/server.rs | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/router/src/server.rs b/router/src/server.rs index 4092ba6c..6c70a675 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -59,6 +59,7 @@ example = json ! ({"error": "Incomplete generation"})), async fn compat_generate( Extension(default_return_full_text): Extension, infer: Extension, + compute_type: Extension, Json(mut req): Json, ) -> Result)> { // default return_full_text given the pipeline_tag @@ -68,11 +69,11 @@ async fn compat_generate( // switch on stream if req.stream { - Ok(generate_stream(infer, Json(req.into())) + Ok(generate_stream(infer,compute_type, Json(req.into())) .await .into_response()) } else { - let (headers, Json(generation)) = generate(infer, Json(req.into())).await?; + let (headers, Json(generation)) = generate(infer, compute_type, Json(req.into())).await?; // wrap generation inside a Vec to match api-inference Ok((headers, Json(vec![generation])).into_response()) } @@ -147,6 +148,7 @@ seed, )] async fn generate( infer: Extension, + Extension(ComputeType(compute_type)): Extension, Json(req): Json, ) -> Result<(HeaderMap, Json), (StatusCode, Json)> { let span = tracing::Span::current(); @@ -232,7 +234,7 @@ async fn generate( // Headers let mut headers = HeaderMap::new(); - headers.insert("x-compute-type", "gpu+optimized".parse().unwrap()); + headers.insert("x-compute-type", compute_type.parse().unwrap()); headers.insert( "x-compute-time", total_time.as_millis().to_string().parse().unwrap(), @@ -341,6 +343,7 @@ seed, )] async fn generate_stream( Extension(infer): Extension, + Extension(compute_type): Extension, Json(req): Json, ) -> ( HeaderMap, @@ -351,13 +354,14 @@ async fn generate_stream( event.json_data(stream_token).unwrap() }; let (headers, response_stream) = - generate_stream_internal(infer, Json(req), on_message_callback).await; + generate_stream_internal(infer, compute_type, Json(req), on_message_callback).await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); (headers, sse) } async fn generate_stream_internal( infer: Infer, + ComputeType(compute_type): ComputeType, Json(req): Json, on_message_callback: impl Fn(StreamResponse) -> Event, ) -> (HeaderMap, impl Stream>) { @@ -370,7 +374,7 @@ async fn generate_stream_internal( let compute_characters = req.inputs.chars().count(); let mut headers = HeaderMap::new(); - headers.insert("x-compute-type", "gpu+optimized".parse().unwrap()); + headers.insert("x-compute-type",compute_type.parse().unwrap()); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), @@ -559,6 +563,7 @@ async fn generate_stream_internal( )] async fn chat_completions( Extension(infer): Extension, + Extension(compute_type): Extension, Extension(info): Extension, Json(req): Json, ) -> Result)> { @@ -647,12 +652,12 @@ async fn chat_completions( }; let (headers, response_stream) = - generate_stream_internal(infer, Json(generate_request), on_message_callback).await; + generate_stream_internal(infer, compute_type, Json(generate_request), on_message_callback).await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); Ok((headers, sse).into_response()) } else { let (headers, Json(generation)) = - generate(Extension(infer), Json(generate_request)).await?; + generate(Extension(infer), Extension(compute_type), Json(generate_request)).await?; let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) @@ -731,6 +736,9 @@ async fn metrics(prom_handle: Extension) -> String { prom_handle.render() } +#[derive(Clone, Debug)] +pub(crate) struct ComputeType(String); + /// Serving method #[allow(clippy::too_many_arguments)] pub async fn run( @@ -939,6 +947,8 @@ pub async fn run( Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise }; + let compute_type = ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string())); + // Combine routes and layers let app = Router::new() .merge(swagger_ui) @@ -948,6 +958,7 @@ pub async fn run( .layer(Extension(health_ext.clone())) .layer(Extension(compat_return_full_text)) .layer(Extension(infer)) + .layer(Extension(compute_type)) .layer(Extension(prom_handle.clone())) .layer(OtelAxumLayer::default()) .layer(cors_layer); From 89fa4fddb0ae288321a422efa52a1a892e00cf56 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 29 Jan 2024 12:30:50 +0100 Subject: [PATCH 057/153] Create the compute type at launch time (if not provided in the env). (#1505) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- launcher/src/main.rs | 24 ++++++++++++++++++++++-- router/src/server.rs | 24 +++++++++++++++++------- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 313d0123..53a40ea8 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -992,7 +992,20 @@ fn spawn_shards( Ok(()) } +fn compute_type(num_shard: usize) -> Option { + let output = Command::new("nvidia-smi") + .args(["--query-gpu=gpu_name", "--format=csv"]) + .output() + .ok()?; + let output = String::from_utf8(output.stdout).ok()?; + let fullname = output.split('\n').nth(1)?; + let cardname = fullname.replace(' ', "-").to_lowercase(); + let compute_type = format!("{num_shard}-{cardname}"); + Some(compute_type) +} + fn spawn_webserver( + num_shard: usize, args: Args, shutdown: Arc, shutdown_receiver: &mpsc::Receiver<()>, @@ -1082,6 +1095,13 @@ fn spawn_webserver( envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into())) }; + // Parse Compute type + if let Ok(compute_type) = env::var("COMPUTE_TYPE") { + envs.push(("COMPUTE_TYPE".into(), compute_type.into())) + } else if let Some(compute_type) = compute_type(num_shard) { + envs.push(("COMPUTE_TYPE".into(), compute_type.into())) + } + let mut webserver = match Command::new("text-generation-router") .args(router_args) .envs(envs) @@ -1275,8 +1295,8 @@ fn main() -> Result<(), LauncherError> { return Ok(()); } - let mut webserver = - spawn_webserver(args, shutdown.clone(), &shutdown_receiver).map_err(|err| { + let mut webserver = spawn_webserver(num_shard, args, shutdown.clone(), &shutdown_receiver) + .map_err(|err| { shutdown_shards(shutdown.clone(), &shutdown_receiver); err })?; diff --git a/router/src/server.rs b/router/src/server.rs index 6c70a675..129db6a2 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -69,7 +69,7 @@ async fn compat_generate( // switch on stream if req.stream { - Ok(generate_stream(infer,compute_type, Json(req.into())) + Ok(generate_stream(infer, compute_type, Json(req.into())) .await .into_response()) } else { @@ -374,7 +374,7 @@ async fn generate_stream_internal( let compute_characters = req.inputs.chars().count(); let mut headers = HeaderMap::new(); - headers.insert("x-compute-type",compute_type.parse().unwrap()); + headers.insert("x-compute-type", compute_type.parse().unwrap()); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), @@ -651,13 +651,22 @@ async fn chat_completions( ) }; - let (headers, response_stream) = - generate_stream_internal(infer, compute_type, Json(generate_request), on_message_callback).await; + let (headers, response_stream) = generate_stream_internal( + infer, + compute_type, + Json(generate_request), + on_message_callback, + ) + .await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); Ok((headers, sse).into_response()) } else { - let (headers, Json(generation)) = - generate(Extension(infer), Extension(compute_type), Json(generate_request)).await?; + let (headers, Json(generation)) = generate( + Extension(infer), + Extension(compute_type), + Json(generate_request), + ) + .await?; let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) @@ -947,7 +956,8 @@ pub async fn run( Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise }; - let compute_type = ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string())); + let compute_type = + ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string())); // Combine routes and layers let app = Router::new() From 86796bc78c5b4a54df9e05afed98a6d3c84ae0b3 Mon Sep 17 00:00:00 2001 From: freitng <153592523+freitng@users.noreply.github.com> Date: Mon, 29 Jan 2024 17:02:57 +0100 Subject: [PATCH 058/153] Modify default for max_new_tokens in python client (#1336) # What does this PR do? Since ([#1097](https://github.com/huggingface/text-generation-inference/pull/1097)) the clients do not need to specify a max_length anymore. However, the python client in this repo had not yet been adapted to these changes. This PR makes it possible to use the python client and not provide max_new_tokens. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [x] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- clients/python/tests/test_client.py | 16 ++++++++++++++++ clients/python/text_generation/client.py | 8 ++++---- clients/python/text_generation/types.py | 2 +- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/clients/python/tests/test_client.py b/clients/python/tests/test_client.py index 1e25e1b1..775e7a6c 100644 --- a/clients/python/tests/test_client.py +++ b/clients/python/tests/test_client.py @@ -21,6 +21,22 @@ def test_generate(flan_t5_xxl_url, hf_headers): assert not response.details.tokens[0].special +def test_generate_max_new_tokens_not_set(flan_t5_xxl_url, hf_headers): + client = Client(flan_t5_xxl_url, hf_headers) + response = client.generate("test", decoder_input_details=True) + + assert response.generated_text != "" + assert response.details.finish_reason == FinishReason.EndOfSequenceToken + assert response.details.generated_tokens > 1 + assert response.details.seed is None + assert len(response.details.prefill) == 1 + assert response.details.prefill[0] == InputToken(id=0, text="", logprob=None) + assert len(response.details.tokens) > 1 + assert response.details.tokens[0].id == 3 + assert response.details.tokens[0].text == " " + assert not response.details.tokens[0].special + + def test_generate_best_of(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) response = client.generate( diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py index 0bf80f8c..63b5258d 100644 --- a/clients/python/text_generation/client.py +++ b/clients/python/text_generation/client.py @@ -62,7 +62,7 @@ class Client: self, prompt: str, do_sample: bool = False, - max_new_tokens: int = 20, + max_new_tokens: Optional[int] = None, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, @@ -157,7 +157,7 @@ class Client: self, prompt: str, do_sample: bool = False, - max_new_tokens: int = 20, + max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, @@ -312,7 +312,7 @@ class AsyncClient: self, prompt: str, do_sample: bool = False, - max_new_tokens: int = 20, + max_new_tokens: Optional[int] = None, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, @@ -405,7 +405,7 @@ class AsyncClient: self, prompt: str, do_sample: bool = False, - max_new_tokens: int = 20, + max_new_tokens: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, diff --git a/clients/python/text_generation/types.py b/clients/python/text_generation/types.py index aa02d8d8..7fa8033e 100644 --- a/clients/python/text_generation/types.py +++ b/clients/python/text_generation/types.py @@ -9,7 +9,7 @@ class Parameters(BaseModel): # Activate logits sampling do_sample: bool = False # Maximum number of generated tokens - max_new_tokens: int = 20 + max_new_tokens: Optional[int] = None # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] = None From bf72c03d0e7d6f8e5e053f63b448d658c18afadb Mon Sep 17 00:00:00 2001 From: dtlzhuangz <139844877+dtlzhuangz@users.noreply.github.com> Date: Wed, 31 Jan 2024 19:05:49 +0800 Subject: [PATCH 059/153] feat: eetq gemv optimization when batch_size <= 4 (#1502) # What does this PR do? Add TensorRT-LLM weight-only GEMV kernel support. We extract GEMV kernel from [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM/tree/main/cpp/tensorrt_llm/kernels/weightOnlyBatchedGemv) to accelerate the decode speed of EETQ when batch_size is smaller or equal to 4. - Features 1. There is almost no loss of quantization accuracy. 2. The speed of decoding is 13% - 27% faster than original EETQ which utilizes GEMM kernel. - Test Below is our test on 3090. Environment: torch=2.0.1, cuda=11.8, nvidia driver: 525.78.01 prompt=1024, max_new_tokens=50 ![image](https://github.com/huggingface/text-generation-inference/assets/139844877/98e63b23-23cd-452f-91bd-55ccdc9b7021) ![image](https://github.com/huggingface/text-generation-inference/assets/139844877/5c3132ff-fc1c-4b20-a83f-59b3d5f586b7) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/Makefile-eetq | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/Makefile-eetq b/server/Makefile-eetq index 5e8e9830..8c060987 100644 --- a/server/Makefile-eetq +++ b/server/Makefile-eetq @@ -1,4 +1,4 @@ -eetq_commit := 323827dd471458a84e9c840f614e4592b157a4b1 +eetq_commit := 71adb5e191bb8290069a580abff0355d7b2dd5c9 eetq: # Clone eetq @@ -6,7 +6,7 @@ eetq: git clone https://github.com/NetEase-FuXi/EETQ.git eetq build-eetq: eetq - cd eetq && git fetch && git checkout $(eetq_commit) + cd eetq && git fetch && git checkout $(eetq_commit) && git submodule update --init --recursive cd eetq && python setup.py build install-eetq: build-eetq From 11d8e7132faf75cca10d6e85e196b2cc1153b772 Mon Sep 17 00:00:00 2001 From: drbh Date: Wed, 31 Jan 2024 11:26:22 -0500 Subject: [PATCH 060/153] fix: improve messages api docs content and formatting (#1506) This PR simply updates the messages api docs to address content changes and make format consistent --- docs/source/messages_api.md | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/source/messages_api.md b/docs/source/messages_api.md index 899de865..1e342686 100644 --- a/docs/source/messages_api.md +++ b/docs/source/messages_api.md @@ -1,8 +1,8 @@ # Messages API -_Messages API is compatible to OpenAI Chat Completion API_ +Text Generation Inference (TGI) now supports the Messages API, which is fully compatible with the OpenAI Chat Completion API. This feature is available starting from version 1.4.0. You can use OpenAI's client libraries or third-party libraries expecting OpenAI schema to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. -Text Generation Inference (TGI) now supports the Message API which is fully compatible with the OpenAI Chat Completion API. This means you can use OpenAI's client libraries to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. +> **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature. ## Making a Request @@ -87,7 +87,7 @@ TGI can be deployed on various cloud providers for scalable and robust text gene ## Amazon SageMaker -To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`. +To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`. This will modify the `/invocations` route to accept Messages dictonaries consisting out of role and content. See the example below on how to deploy Llama with the new Messages API. @@ -98,30 +98,30 @@ import boto3 from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri try: - role = sagemaker.get_execution_role() + role = sagemaker.get_execution_role() except ValueError: - iam = boto3.client('iam') - role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] + iam = boto3.client('iam') + role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { - 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', - 'SM_NUM_GPUS': json.dumps(1), - 'MESSAGES_API_ENABLED': True + 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', + 'SM_NUM_GPUS': json.dumps(1), + 'MESSAGES_API_ENABLED': True } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( - image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), - env=hub, - role=role, + image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), + env=hub, + role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( - initial_instance_count=1, - instance_type="ml.g5.2xlarge", - container_startup_health_check_timeout=300, + initial_instance_count=1, + instance_type="ml.g5.2xlarge", + container_startup_health_check_timeout=300, ) # send request @@ -131,4 +131,4 @@ predictor.predict({ {"role": "user", "content": "What is deep learning?"} ] }) -``` \ No newline at end of file +``` From 27daa511ecf41986ffbce8ed23316b2646cc0bee Mon Sep 17 00:00:00 2001 From: Dean Wyatte <2512762+dwyatte@users.noreply.github.com> Date: Thu, 1 Feb 2024 01:34:11 -0700 Subject: [PATCH 061/153] GPTNeoX: Use static rotary embedding (#1498) # What does this PR do? `transformers` 4.35 removed rotary embeddings from GPTNeoX's weights ([link to line diff](https://github.com/huggingface/transformers/commit/253f9a3f9716d08a81fb305fe71f983122eb608b#diff-0e2a05d86c82e96f516db8c14070ceb36f53ca44c6bc21a9cd92ad2e777b9cf1R298)). This applies the same fix as https://github.com/huggingface/text-generation-inference/pull/793 which generates them on-the-fly using the appropriate value from the config file Fixes https://github.com/huggingface/text-generation-inference/issues/1460 ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [x] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? @OlivierDehaene OR @Narsil --- .../models/custom_modeling/flash_neox_modeling.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py b/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py index eea5f787..3ee344e4 100644 --- a/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py @@ -91,6 +91,8 @@ class FlashNeoxAttention(torch.nn.Module): self.hidden_size = hidden_size self.head_size = hidden_size // num_heads + self.rotary_dim = int(config.rotary_pct * self.head_size) + if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " @@ -98,8 +100,11 @@ class FlashNeoxAttention(torch.nn.Module): ) self.num_heads = self.num_heads // weights.process_group.size() - self.rotary_emb = PositionRotaryEmbedding.load( - config=config, prefix=f"{prefix}.rotary_emb", weights=weights + self.rotary_emb = PositionRotaryEmbedding.static( + config=config, + dim=self.rotary_dim, + base=config.rotary_emb_base, + device=weights.device, ) self.softmax_scale = self.head_size ** (-0.5) From 1a0bfe3f7fae612d707a82ad3da17ccc570a1997 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 1 Feb 2024 10:23:37 +0100 Subject: [PATCH 062/153] Freshen up the README. From 2bf39314ba47dc1a18c0522ba9ede7a3ccfc70ea Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 1 Feb 2024 13:29:04 +0100 Subject: [PATCH 063/153] Hotfix the / health - route. (#1515) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- router/src/server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/router/src/server.rs b/router/src/server.rs index 129db6a2..808d9f6b 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -940,6 +940,7 @@ pub async fn run( // Define base and health routes let base_routes = Router::new() .route("/", post(compat_generate)) + .route("/", get(health)) .route("/info", get(get_model_info)) .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) From 6c0b21bd14d1980eb06e195d366beb2f45b425da Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 1 Feb 2024 14:36:10 +0000 Subject: [PATCH 064/153] Revert "Modify default for max_new_tokens in python client (#1336)" This reverts commit 2d56f106a60c7b698705494e7539f8a7e4c85dd9. It causes a breaking in our integrations-tests. --- clients/python/tests/test_client.py | 16 ---------------- clients/python/text_generation/client.py | 8 ++++---- clients/python/text_generation/types.py | 2 +- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/clients/python/tests/test_client.py b/clients/python/tests/test_client.py index 775e7a6c..1e25e1b1 100644 --- a/clients/python/tests/test_client.py +++ b/clients/python/tests/test_client.py @@ -21,22 +21,6 @@ def test_generate(flan_t5_xxl_url, hf_headers): assert not response.details.tokens[0].special -def test_generate_max_new_tokens_not_set(flan_t5_xxl_url, hf_headers): - client = Client(flan_t5_xxl_url, hf_headers) - response = client.generate("test", decoder_input_details=True) - - assert response.generated_text != "" - assert response.details.finish_reason == FinishReason.EndOfSequenceToken - assert response.details.generated_tokens > 1 - assert response.details.seed is None - assert len(response.details.prefill) == 1 - assert response.details.prefill[0] == InputToken(id=0, text="", logprob=None) - assert len(response.details.tokens) > 1 - assert response.details.tokens[0].id == 3 - assert response.details.tokens[0].text == " " - assert not response.details.tokens[0].special - - def test_generate_best_of(flan_t5_xxl_url, hf_headers): client = Client(flan_t5_xxl_url, hf_headers) response = client.generate( diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py index 63b5258d..0bf80f8c 100644 --- a/clients/python/text_generation/client.py +++ b/clients/python/text_generation/client.py @@ -62,7 +62,7 @@ class Client: self, prompt: str, do_sample: bool = False, - max_new_tokens: Optional[int] = None, + max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, @@ -157,7 +157,7 @@ class Client: self, prompt: str, do_sample: bool = False, - max_new_tokens: Optional[int] = None, + max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, @@ -312,7 +312,7 @@ class AsyncClient: self, prompt: str, do_sample: bool = False, - max_new_tokens: Optional[int] = None, + max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, @@ -405,7 +405,7 @@ class AsyncClient: self, prompt: str, do_sample: bool = False, - max_new_tokens: Optional[int] = None, + max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, diff --git a/clients/python/text_generation/types.py b/clients/python/text_generation/types.py index 7fa8033e..aa02d8d8 100644 --- a/clients/python/text_generation/types.py +++ b/clients/python/text_generation/types.py @@ -9,7 +9,7 @@ class Parameters(BaseModel): # Activate logits sampling do_sample: bool = False # Maximum number of generated tokens - max_new_tokens: Optional[int] = None + max_new_tokens: int = 20 # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] = None From 14b40bffbafa04301c696b6175a8cf8bb40c0f01 Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 1 Feb 2024 09:39:32 -0500 Subject: [PATCH 065/153] fix: tokenizer config should use local model path when possible (#1518) This PR fixes the issue with loading a local tokenizer config. Previously the default functionality would look in the current working directory. Now if a local model path is specified we will check that directory for the tokenizer_config. ## Examples of valid commands uses tokenizer_config from hub ``` text-generation-launcher --model-id HuggingFaceH4/zephyr-7b-beta ``` use tokenizer_config from local model path ``` text-generation-launcher \ --model-id ~/.cache/huggingface/hub/models--HuggingFaceH4--zephyr-7b-beta/snapshots/dc24cabd13eacd3ae3a5fe574bd645483a335a4a/ ``` use specific tokenizer_config file ``` text-generation-launcher \ --model-id ~/.cache/huggingface/hub/models--HuggingFaceH4--zephyr-7b-beta/snapshots/dc24cabd13eacd3ae3a5fe574bd645483a335a4a/ \ --tokenizer-config-path ~/.cache/huggingface/hub/models--HuggingFaceH4--zephyr-7b-beta/snapshots/dc24cabd13eacd3ae3a5fe574bd645483a335a4a/tokenizer_config.json ``` --------- Co-authored-by: Nicolas Patry --- router/src/lib.rs | 2 +- router/src/main.rs | 49 +++++++++++++++++++++++++--------------------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/router/src/lib.rs b/router/src/lib.rs index fc5670a0..07360e78 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -37,7 +37,7 @@ pub struct HubTokenizerConfig { } impl HubTokenizerConfig { - pub fn from_file(filename: &str) -> Self { + pub fn from_file(filename: &std::path::Path) -> Self { let content = std::fs::read_to_string(filename).unwrap(); serde_json::from_str(&content).unwrap_or_default() } diff --git a/router/src/main.rs b/router/src/main.rs index 6526b859..702393aa 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -157,12 +157,6 @@ async fn main() -> Result<(), RouterError> { let local_path = Path::new(&tokenizer_name); let local_model = local_path.exists() && local_path.is_dir(); - // Load tokenizer config - // This will be used to format the chat template - let local_tokenizer_config_path = - tokenizer_config_path.unwrap_or("tokenizer_config.json".to_string()); - let local_tokenizer_config = Path::new(&local_tokenizer_config_path).exists(); - // Shared API builder initialization let api_builder = || { let mut builder = ApiBuilder::new() @@ -240,24 +234,35 @@ async fn main() -> Result<(), RouterError> { }; // Load tokenizer config if found locally, or check if we can get it from the API if needed - let tokenizer_config = if local_tokenizer_config { + let tokenizer_config = if let Some(path) = tokenizer_config_path { + tracing::info!("Using local tokenizer config from user specified path"); + HubTokenizerConfig::from_file(&std::path::PathBuf::from(path)) + } else if local_model { tracing::info!("Using local tokenizer config"); - HubTokenizerConfig::from_file(&local_tokenizer_config_path) - } else if let Some(api) = api { - tracing::info!("Using the Hugging Face API to retrieve tokenizer config"); - get_tokenizer_config(&api.repo(Repo::with_revision( - tokenizer_name.to_string(), - RepoType::Model, - revision.unwrap_or_else(|| "main".to_string()), - ))) - .await - .unwrap_or_else(|| { - tracing::warn!("Could not retrieve tokenizer config from the Hugging Face hub."); - HubTokenizerConfig::default() - }) + HubTokenizerConfig::from_file(&local_path.join("tokenizer_config.json")) } else { - tracing::warn!("Could not find tokenizer config locally and no revision specified"); - HubTokenizerConfig::default() + match api { + Some(api) => { + tracing::info!("Using the Hugging Face API to retrieve tokenizer config"); + let repo = Repo::with_revision( + tokenizer_name.to_string(), + RepoType::Model, + revision.unwrap_or("main".to_string()), + ); + get_tokenizer_config(&api.repo(repo)) + .await + .unwrap_or_else(|| { + tracing::warn!( + "Could not retrieve tokenizer config from the Hugging Face hub." + ); + HubTokenizerConfig::default() + }) + } + None => { + tracing::warn!("Could not find tokenizer config locally and no API specified"); + HubTokenizerConfig::default() + } + } }; if tokenizer.is_none() { From 369ae2dcc1015a8deb8c3319a400974ec26da2f8 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 1 Feb 2024 16:26:48 +0100 Subject: [PATCH 066/153] Updating tokenizers. (#1517) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- router/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/router/Cargo.toml b/router/Cargo.toml index f6f16dae..1a7ceb70 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -32,7 +32,7 @@ reqwest = { version = "0.11.20", features = [] } serde = "1.0.188" serde_json = "1.0.107" thiserror = "1.0.48" -tokenizers = { version = "0.14.0", features = ["http"] } +tokenizers = { version = "0.15.1", features = ["http"] } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tokio-stream = "0.1.14" tower-http = { version = "0.4.4", features = ["cors"] } From e39ba494b8acfa1648ef912e17b5629f04ee3ae3 Mon Sep 17 00:00:00 2001 From: Pedro Cuenca Date: Fri, 2 Feb 2024 14:05:30 +0100 Subject: [PATCH 067/153] [docs] Fix link to Install CLI (#1526) # What does this PR do? Attempts to fix a link from Using TGI CLI to Installation. ## Before submitting - [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? --- docs/source/basic_tutorials/using_cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/basic_tutorials/using_cli.md b/docs/source/basic_tutorials/using_cli.md index 82c10e6b..a3a65f60 100644 --- a/docs/source/basic_tutorials/using_cli.md +++ b/docs/source/basic_tutorials/using_cli.md @@ -1,6 +1,6 @@ # Using TGI CLI -You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, please refer to [the installation section](./installation#install-cli). +You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, please refer to [the installation section](../installation#install-cli). `text-generation-server` lets you download the model with `download-weights` command like below 👇 From 62a40b8aa69709f28ac72bd8d03fb4370cc082bc Mon Sep 17 00:00:00 2001 From: drbh Date: Fri, 2 Feb 2024 10:31:11 -0500 Subject: [PATCH 068/153] feat: add ie update to message docs (#1523) update messages api docs and add Hugging Face Inference Endpoints integrations section/instructions --------- Co-authored-by: Philipp Schmid <32632186+philschmid@users.noreply.github.com> --- docs/source/messages_api.md | 45 +++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/docs/source/messages_api.md b/docs/source/messages_api.md index 1e342686..939850aa 100644 --- a/docs/source/messages_api.md +++ b/docs/source/messages_api.md @@ -4,6 +4,15 @@ Text Generation Inference (TGI) now supports the Messages API, which is fully co > **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature. +#### Table of Contents + +- [Making a Request](#making-a-request) +- [Streaming](#streaming) +- [Synchronous](#synchronous) +- [Hugging Face Inference Endpoints](#hugging-face-inference-endpoints) +- [Cloud Providers](#cloud-providers) + - [Amazon SageMaker](#amazon-sagemaker) + ## Making a Request You can make a request to TGI's Messages API using `curl`. Here's an example: @@ -81,6 +90,38 @@ chat_completion = client.chat.completions.create( print(chat_completion) ``` +## Hugging Face Inference Endpoints + +The Messages API is integrated with [Inference Endpoints](https://huggingface.co/inference-endpoints/dedicated). +Every endpoint that uses "Text Generation Inference" with an LLM, which has a chat template can now be used. Below is an example of how to use IE with TGI using OpenAI's Python client library: + +> **Note:** Make sure to replace `base_url` with your endpoint URL and to include `v1/` at the end of the URL. The `api_key` should be replaced with your Hugging Face API key. + +```python +from openai import OpenAI + +# init the client but point it to TGI +client = OpenAI( + # replace with your endpoint url, make sure to include "v1/" at the end + base_url="https://vlzz10eq3fol3429.us-east-1.aws.endpoints.huggingface.cloud/v1/", + # replace with your API key + api_key="hf_XXX" +) + +chat_completion = client.chat.completions.create( + model="tgi", + messages=[ + {"role": "system", "content": "You are a helpful assistant." }, + {"role": "user", "content": "What is deep learning?"} + ], + stream=True +) + +# iterate and print stream +for message in chat_completion: + print(message.choices[0].delta.content, end="") +``` + ## Cloud Providers TGI can be deployed on various cloud providers for scalable and robust text generation. One such provider is Amazon SageMaker, which has recently added support for TGI. Here's how you can deploy TGI on Amazon SageMaker: @@ -114,7 +155,7 @@ hub = { huggingface_model = HuggingFaceModel( image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), env=hub, - role=role, + role=role, ) # deploy model to SageMaker Inference @@ -123,7 +164,7 @@ predictor = huggingface_model.deploy( instance_type="ml.g5.2xlarge", container_startup_health_check_timeout=300, ) - + # send request predictor.predict({ "messages": [ From 99cb270f910e8daead53822684f308b2e6c42699 Mon Sep 17 00:00:00 2001 From: drbh Date: Wed, 7 Feb 2024 03:35:53 -0500 Subject: [PATCH 069/153] =?UTF-8?q?feat:=20use=20existing=20add=5Fgenerati?= =?UTF-8?q?on=5Fprompt=20variable=20from=20config=20in=20temp=E2=80=A6=20(?= =?UTF-8?q?#1533)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds support to read the `add_generation_prompt` from the config and use it in the chat template. If `add_generation_prompt` does not exist we default to false --- router/src/infer.rs | 64 ++++++++++++++++++++++++++++++++++++++------- router/src/lib.rs | 1 + 2 files changed, 56 insertions(+), 9 deletions(-) diff --git a/router/src/infer.rs b/router/src/infer.rs index 74f32e85..7b6b9910 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -209,6 +209,7 @@ impl Infer { messages, eos_token: eos_token.as_deref(), bos_token: bos_token.as_deref(), + add_generation_prompt: true, }) .map_err(|e| { metrics::increment_counter!("tgi_request_failure", "err" => "template"); @@ -817,21 +818,14 @@ mod tests { ], bos_token: Some("[BOS]"), eos_token: Some("[EOS]"), + add_generation_prompt: true, }; let result = tmpl.unwrap().render(chat_template_inputs).unwrap(); assert_eq!( result, - r#"### User: -Hi! - -### Assistant: -Hello how can I help?### User: -What is Deep Learning? - -### Assistant: -magic!"# + "### User:\nHi!\n\n### Assistant:\nHello how can I help?### User:\nWhat is Deep Learning?\n\n### Assistant:\nmagic!### Assistant:\n" ); } @@ -889,6 +883,7 @@ magic!"# ], bos_token: Some("[BOS]"), eos_token: Some("[EOS]"), + add_generation_prompt: true, }; let result = tmpl.unwrap().render(chat_template_inputs); //.err().unwrap(); @@ -954,9 +949,60 @@ magic!"# ], bos_token: Some("[BOS]"), eos_token: Some("[EOS]"), + add_generation_prompt: true, }; let result = tmpl.unwrap().render(chat_template_inputs).unwrap(); assert_eq!(result, "[BOS][INST] Hi! [/INST]Hello how can I help?[EOS][INST] What is Deep Learning? [/INST]magic![EOS]"); } + + #[test] + fn test_chat_template_valid_with_add_generation_prompt() { + let mut env = Environment::new(); + env.add_function("raise_exception", raise_exception); + + let source = r#" + {% for message in messages %} + {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}} + {% endfor %} + {% if add_generation_prompt %} + {{ '<|im_start|>assistant\n' }} + {% endif %}"#; + + // trim all the whitespace + let source = source + .lines() + .map(|line| line.trim()) + .collect::>() + .join(""); + + let tmpl = env.template_from_str(&source); + + let chat_template_inputs = ChatTemplateInputs { + messages: vec![ + Message { + role: "user".to_string(), + content: "Hi!".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "Hello how can I help?".to_string(), + }, + Message { + role: "user".to_string(), + content: "What is Deep Learning?".to_string(), + }, + Message { + role: "assistant".to_string(), + content: "magic!".to_string(), + }, + ], + bos_token: Some("[BOS]"), + eos_token: Some("[EOS]"), + add_generation_prompt: true, + }; + + let result = tmpl.unwrap().render(chat_template_inputs).unwrap(); + assert_eq!(result, "<|im_start|>user\nHi!<|im_end|>\n<|im_start|>assistant\nHello how can I help?<|im_end|>\n<|im_start|>user\nWhat is Deep Learning?<|im_end|>\n<|im_start|>assistant\nmagic!<|im_end|>\n<|im_start|>assistant\n"); + } } diff --git a/router/src/lib.rs b/router/src/lib.rs index 07360e78..e85519cc 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -398,6 +398,7 @@ pub(crate) struct ChatTemplateInputs<'a> { messages: Vec, bos_token: Option<&'a str>, eos_token: Option<&'a str>, + add_generation_prompt: bool, } #[derive(Clone, Deserialize, ToSchema, Serialize)] From 51a4e62ed4a5e05f74afb06c09fdb20cd65d8d1e Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 8 Feb 2024 04:19:45 -0500 Subject: [PATCH 070/153] Impl simple mamba model (#1480) This draft PR is a work in progress implementation of the mamba model. This PR currently loads weights, and produces correct logits after a single pass. This PR still needs to correctly integrate this model so it produces tokens as expected, and apply optimization to avoid all copies during runtime/unnecessary operations. [Mamba: Linear-Time Sequence Modeling with Selective State Spaces (Albert Gu and Tri Dao)](https://arxiv.org/abs/2312.00752) https://github.com/johnma2006/mamba-minimal https://github.com/huggingface/candle/blob/main/candle-examples/examples/mamba-minimal/model.rs https://github.com/huggingface/transformers/pull/28094 Notes: this dev work is currently targeting `state-spaces/mamba-130m`, so if you want to test please use that model. Additionally when starting the router the prefill needs to be limited: `cargo run -- --max-batch-prefill-tokens 768 --max-input-length 768` Integration tests have been added and basic functionality such as model loading is supported. ```bash cd integration-tests pytest -vv models/test_fused_kernel_mamba.py ``` - [x] add tests - [x] load model - [x] make simple request - [ ] resolve warmup issue - [ ] resolve output issues fetching models tested during dev ```bash text-generation-server download-weights state-spaces/mamba-130m text-generation-server download-weights state-spaces/mamba-1.4b text-generation-server download-weights state-spaces/mamba-2.8b ``` The server can be run ```bash cd server MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 python text_generation_server/cli.py serve state-spaces/mamba-2.8b ``` router ```bash cargo run ``` make a request ```bash curl -s localhost:3000/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' | jq ``` response ```json { "generated_text": "\n\nDeep learning is a machine learning technique that uses a deep neural network to learn from data." } ``` --------- Co-authored-by: Nicolas Patry --- .../__snapshots__/test_mamba/test_mamba.json | 73 ++ .../test_mamba/test_mamba_all_params.json | 99 +++ .../test_mamba/test_mamba_load.json | 398 +++++++++++ integration-tests/models/test_mamba.py | 59 ++ server/.gitignore | 1 + server/Makefile | 1 + server/Makefile-selective-scan | 28 + .../models/custom_modeling/mamba_modeling.py | 194 ++++++ server/text_generation_server/models/mamba.py | 656 ++++++++++++++++++ 9 files changed, 1509 insertions(+) create mode 100644 integration-tests/models/__snapshots__/test_mamba/test_mamba.json create mode 100644 integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json create mode 100644 integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json create mode 100644 integration-tests/models/test_mamba.py create mode 100644 server/Makefile-selective-scan create mode 100644 server/text_generation_server/models/custom_modeling/mamba_modeling.py create mode 100644 server/text_generation_server/models/mamba.py diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba.json new file mode 100644 index 00000000..4435f215 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba.json @@ -0,0 +1,73 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [], + "seed": null, + "tokens": [ + { + "id": 187, + "logprob": -0.3552246, + "special": false, + "text": "\n" + }, + { + "id": 187, + "logprob": -0.38378906, + "special": false, + "text": "\n" + }, + { + "id": 30763, + "logprob": -1.140625, + "special": false, + "text": "Deep" + }, + { + "id": 4715, + "logprob": -0.5551758, + "special": false, + "text": " learning" + }, + { + "id": 310, + "logprob": -0.59033203, + "special": false, + "text": " is" + }, + { + "id": 247, + "logprob": -0.70654297, + "special": false, + "text": " a" + }, + { + "id": 747, + "logprob": -2.0410156, + "special": false, + "text": " new" + }, + { + "id": 1511, + "logprob": -2.3789062, + "special": false, + "text": " type" + }, + { + "id": 273, + "logprob": -0.0026435852, + "special": false, + "text": " of" + }, + { + "id": 5145, + "logprob": -1.2841797, + "special": false, + "text": " machine" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nDeep learning is a new type of machine" +} diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json new file mode 100644 index 00000000..052c1c69 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json @@ -0,0 +1,99 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 2502, + "logprob": null, + "text": " red" + }, + { + "id": 13, + "logprob": -2.5234375, + "text": "," + }, + { + "id": 8862, + "logprob": -3.4433594, + "text": " yellow" + }, + { + "id": 13, + "logprob": -0.43017578, + "text": "," + }, + { + "id": 209, + "logprob": -8.21875, + "text": " " + } + ], + "seed": 0, + "tokens": [ + { + "id": 187, + "logprob": 0.0, + "special": false, + "text": "\n" + }, + { + "id": 395, + "logprob": -0.46411133, + "special": false, + "text": "and" + }, + { + "id": 13735, + "logprob": -2.1132812, + "special": false, + "text": " orange" + }, + { + "id": 313, + "logprob": -1.2128906, + "special": false, + "text": " (" + }, + { + "id": 249, + "logprob": -2.3671875, + "special": false, + "text": "in" + }, + { + "id": 253, + "logprob": 0.0, + "special": false, + "text": " the" + }, + { + "id": 1340, + "logprob": -1.640625, + "special": false, + "text": " order" + }, + { + "id": 597, + "logprob": -0.5488281, + "special": false, + "text": " they" + }, + { + "id": 3176, + "logprob": -0.48608398, + "special": false, + "text": " appear" + }, + { + "id": 275, + "logprob": 0.0, + "special": false, + "text": " in" + } + ], + "top_tokens": null + }, + "generated_text": "blue, red, yellow, \nand orange (in the order they appear in" +} diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json new file mode 100644 index 00000000..014210b2 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json @@ -0,0 +1,398 @@ +[ + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1276, + "logprob": null, + "text": "What" + }, + { + "id": 310, + "logprob": -0.8125, + "text": " is" + }, + { + "id": 18147, + "logprob": -12.828125, + "text": " Deep" + }, + { + "id": 20727, + "logprob": -3.0, + "text": " Learning" + }, + { + "id": 32, + "logprob": -1.1484375, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 187, + "logprob": -0.3552246, + "special": false, + "text": "\n" + }, + { + "id": 187, + "logprob": -0.38378906, + "special": false, + "text": "\n" + }, + { + "id": 30763, + "logprob": -1.1279297, + "special": false, + "text": "Deep" + }, + { + "id": 4715, + "logprob": -0.5595703, + "special": false, + "text": " learning" + }, + { + "id": 310, + "logprob": -0.60253906, + "special": false, + "text": " is" + }, + { + "id": 247, + "logprob": -0.7050781, + "special": false, + "text": " a" + }, + { + "id": 747, + "logprob": -2.0488281, + "special": false, + "text": " new" + }, + { + "id": 1511, + "logprob": -2.3808594, + "special": false, + "text": " type" + }, + { + "id": 273, + "logprob": -0.0026416779, + "special": false, + "text": " of" + }, + { + "id": 5145, + "logprob": -1.2851562, + "special": false, + "text": " machine" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nDeep learning is a new type of machine" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1276, + "logprob": null, + "text": "What" + }, + { + "id": 310, + "logprob": -0.78027344, + "text": " is" + }, + { + "id": 18147, + "logprob": -12.8203125, + "text": " Deep" + }, + { + "id": 20727, + "logprob": -2.9902344, + "text": " Learning" + }, + { + "id": 32, + "logprob": -1.1523438, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 187, + "logprob": -0.35351562, + "special": false, + "text": "\n" + }, + { + "id": 187, + "logprob": -0.38256836, + "special": false, + "text": "\n" + }, + { + "id": 30763, + "logprob": -1.1269531, + "special": false, + "text": "Deep" + }, + { + "id": 4715, + "logprob": -0.54541016, + "special": false, + "text": " learning" + }, + { + "id": 310, + "logprob": -0.59765625, + "special": false, + "text": " is" + }, + { + "id": 247, + "logprob": -0.7001953, + "special": false, + "text": " a" + }, + { + "id": 747, + "logprob": -2.0585938, + "special": false, + "text": " new" + }, + { + "id": 1511, + "logprob": -2.3789062, + "special": false, + "text": " type" + }, + { + "id": 273, + "logprob": -0.0027446747, + "special": false, + "text": " of" + }, + { + "id": 5145, + "logprob": -1.2851562, + "special": false, + "text": " machine" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nDeep learning is a new type of machine" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1276, + "logprob": null, + "text": "What" + }, + { + "id": 310, + "logprob": -0.78027344, + "text": " is" + }, + { + "id": 18147, + "logprob": -12.8203125, + "text": " Deep" + }, + { + "id": 20727, + "logprob": -2.9902344, + "text": " Learning" + }, + { + "id": 32, + "logprob": -1.1523438, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 187, + "logprob": -0.35351562, + "special": false, + "text": "\n" + }, + { + "id": 187, + "logprob": -0.38256836, + "special": false, + "text": "\n" + }, + { + "id": 30763, + "logprob": -1.1269531, + "special": false, + "text": "Deep" + }, + { + "id": 4715, + "logprob": -0.54541016, + "special": false, + "text": " learning" + }, + { + "id": 310, + "logprob": -0.59765625, + "special": false, + "text": " is" + }, + { + "id": 247, + "logprob": -0.7001953, + "special": false, + "text": " a" + }, + { + "id": 747, + "logprob": -2.0585938, + "special": false, + "text": " new" + }, + { + "id": 1511, + "logprob": -2.3789062, + "special": false, + "text": " type" + }, + { + "id": 273, + "logprob": -0.0027446747, + "special": false, + "text": " of" + }, + { + "id": 5145, + "logprob": -1.2851562, + "special": false, + "text": " machine" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nDeep learning is a new type of machine" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1276, + "logprob": null, + "text": "What" + }, + { + "id": 310, + "logprob": -0.78027344, + "text": " is" + }, + { + "id": 18147, + "logprob": -12.8203125, + "text": " Deep" + }, + { + "id": 20727, + "logprob": -2.9902344, + "text": " Learning" + }, + { + "id": 32, + "logprob": -1.1523438, + "text": "?" + } + ], + "seed": null, + "tokens": [ + { + "id": 187, + "logprob": -0.35351562, + "special": false, + "text": "\n" + }, + { + "id": 187, + "logprob": -0.38256836, + "special": false, + "text": "\n" + }, + { + "id": 30763, + "logprob": -1.1269531, + "special": false, + "text": "Deep" + }, + { + "id": 4715, + "logprob": -0.54541016, + "special": false, + "text": " learning" + }, + { + "id": 310, + "logprob": -0.59765625, + "special": false, + "text": " is" + }, + { + "id": 247, + "logprob": -0.7001953, + "special": false, + "text": " a" + }, + { + "id": 747, + "logprob": -2.0585938, + "special": false, + "text": " new" + }, + { + "id": 1511, + "logprob": -2.3789062, + "special": false, + "text": " type" + }, + { + "id": 273, + "logprob": -0.0027446747, + "special": false, + "text": " of" + }, + { + "id": 5145, + "logprob": -1.2851562, + "special": false, + "text": " machine" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nDeep learning is a new type of machine" + } +] diff --git a/integration-tests/models/test_mamba.py b/integration-tests/models/test_mamba.py new file mode 100644 index 00000000..d86faeff --- /dev/null +++ b/integration-tests/models/test_mamba.py @@ -0,0 +1,59 @@ +import pytest + + +@pytest.fixture(scope="module") +def fused_kernel_mamba_handle(launcher): + with launcher("state-spaces/mamba-130m", num_shard=1) as handle: + yield handle + + +@pytest.fixture(scope="module") +async def fused_kernel_mamba(fused_kernel_mamba_handle): + await fused_kernel_mamba_handle.health(300) + return fused_kernel_mamba_handle.client + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_mamba(fused_kernel_mamba, response_snapshot): + response = await fused_kernel_mamba.generate( + "What is Deep Learning?", max_new_tokens=10 + ) + + assert response.details.generated_tokens == 10 + assert response.generated_text == "\n\nDeep learning is a new type of machine" + assert response == response_snapshot + +@pytest.mark.asyncio +@pytest.mark.private +async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): + response = await fused_kernel_mamba.generate( + "blue, red, yellow, ", + max_new_tokens=10, + repetition_penalty=1.2, + return_full_text=True, + stop_sequences=["test"], + temperature=0.5, + top_p=0.9, + top_k=10, + truncate=5, + typical_p=0.9, + watermark=True, + decoder_input_details=True, + seed=0, + ) + + assert response.details.generated_tokens == 10 + assert response.generated_text == "blue, red, yellow, \nand orange (in the order they appear in" + assert response == response_snapshot + +@pytest.mark.asyncio +@pytest.mark.private +async def test_mamba_load(fused_kernel_mamba, generate_load, response_snapshot): + responses = await generate_load(fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4) + + assert len(responses) == 4 + assert all([r.generated_text == responses[0].generated_text for r in responses]) + assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" + + assert responses == response_snapshot diff --git a/server/.gitignore b/server/.gitignore index dcb8fe67..576746ee 100644 --- a/server/.gitignore +++ b/server/.gitignore @@ -161,3 +161,4 @@ flash-attention-v2/ vllm/ llm-awq/ eetq/ +mamba/ diff --git a/server/Makefile b/server/Makefile index 23dc6959..ae2248e3 100644 --- a/server/Makefile +++ b/server/Makefile @@ -3,6 +3,7 @@ include Makefile-flash-att-v2 include Makefile-vllm include Makefile-awq include Makefile-eetq +include Makefile-selective-scan unit-tests: pytest -s -vv -m "not private" tests diff --git a/server/Makefile-selective-scan b/server/Makefile-selective-scan new file mode 100644 index 00000000..f4dec868 --- /dev/null +++ b/server/Makefile-selective-scan @@ -0,0 +1,28 @@ +selective_scan_commit := 2a3704fd47ba817b415627b06fd796b971fdc137 + +causal-conv1d: + rm -rf causal-conv1d + git clone https://github.com/Dao-AILab/causal-conv1d.git + +build-causal-conv1d: causal-conv1d + cd causal-conv1d/ && git checkout v1.1.1 # known latest working version tag + cd causal-conv1d/ && CAUSAL_CONV1D_FORCE_BUILD=TRUE python setup.py build + +install-causal-conv1d: build-causal-conv1d + pip uninstall causal-conv1d -y || true + cd causal-conv1d/ && pip install . + +# selective-scan dependends on causal-conv1d +selective-scan: + rm -rf mamba + git clone https://github.com/state-spaces/mamba.git mamba + +build-selective-scan: selective-scan + cd mamba/ && git fetch && git checkout $(selective_scan_commit) + cd mamba && python setup.py build + +install-selective-scan: install-causal-conv1d build-selective-scan + pip uninstall selective-scan-cuda -y || true + cd mamba && pip install . + +build-all: build-causal-conv1d build-selective-scan \ No newline at end of file diff --git a/server/text_generation_server/models/custom_modeling/mamba_modeling.py b/server/text_generation_server/models/custom_modeling/mamba_modeling.py new file mode 100644 index 00000000..1773f04d --- /dev/null +++ b/server/text_generation_server/models/custom_modeling/mamba_modeling.py @@ -0,0 +1,194 @@ +import torch +import torch.distributed + +from mamba_ssm.ops.triton.selective_state_update import selective_state_update +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn +from mamba_ssm.utils.generation import InferenceParams +from torch import nn +from typing import Optional, Tuple, Any +from transformers.configuration_utils import PretrainedConfig +import torch.nn.functional as F + +from text_generation_server.utils.layers import ( + TensorParallelEmbedding, + FastRMSNorm, + FastLinear, +) + +from einops import rearrange +from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +import math + +class MambaConfig(PretrainedConfig): + def __init__( + self, + vocab_size=50280, + d_model=768, + d_state=16, + n_layer=32, + layer_norm_epsilon=1e-5, + tie_word_embeddings=False, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + expand=2, + dt_rank="auto", + **kwargs, + ): + self.vocab_size = vocab_size + self.n_layer = n_layer + self.layer_norm_epsilon = layer_norm_epsilon + self.d_model = d_model + self.d_inner = d_model * 2 + self.d_conv = 4 + self.d_state = d_state + self.expand = expand + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + +class MambaBlock(nn.Module): + def __init__(self, prefix, config, weights): + super().__init__() + self.layer_idx = int(prefix.split(".")[2]) + self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False) + self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False) + self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True) + self.dt_proj_no_bias = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=False) + self.out_proj = FastLinear.load(config, f"{prefix}.out_proj", weights, bias=False) + self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True) + self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float()) + self.D = weights.get_tensor(f"{prefix}.D") + self.activation = "silu" + self.dt_rank = config.dt_rank + self.d_state = config.d_state + self.d_conv = config.d_conv + self.act = nn.SiLU() + + # inference_params + def forward(self, hidden_states: torch.Tensor, inference_params=None): + _, seqlen, _ = hidden_states.shape + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + + if inference_params.seqlen_offset > 0: + out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state) + return out, conv_state, ssm_state + + projected_states = self.in_proj(hidden_states).transpose(1,2) + x, z = projected_states.chunk(2, dim=1) + conv_state = F.pad(x, (self.d_conv - seqlen, 0)) + x = causal_conv1d_fn( + x=x, + weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)), + bias=self.conv1d.bias, + activation=self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + y, last_state = selective_scan_fn( + x, + dt, + self.negA, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=True, + ) + y = rearrange(y, "b d l -> b l d") + attn_outputs = self.out_proj(y) + return attn_outputs, conv_state, last_state + + def step(self, hidden_states, conv_state, ssm_state): + _xz = self.in_proj(hidden_states) + _x, _z = _xz.chunk(2, dim=-1) # (B D) + conv_state_new = torch.cat([conv_state, _x.transpose(1,2)], dim=-1) + conv_out = causal_conv1d_fn( + x=conv_state_new, + weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)), + bias=self.conv1d.bias, + activation=self.activation + ) + conv_state = conv_state_new[:, :, 1:] + bsz, seqlen, dim = hidden_states.shape + output_tensor = torch.zeros( + (bsz, seqlen, dim), + device=hidden_states.device, + dtype=hidden_states.dtype + ) + for i in range(0, bsz): + x = conv_out[i:i+1,:,-1] + z = _z[i:i+1, -1, :] + x_db = self.x_proj(x) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = F.linear(dt, self.dt_proj.weight) + y = selective_state_update( + ssm_state[i:i+1,:,:], x, dt, self.negA, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + out = self.out_proj(y) + output_tensor[i] = out + + return output_tensor, conv_state, ssm_state + + + +class ResidualBlock(nn.Module): + def __init__(self, layer_id, config, weights): + super().__init__() + self.mamba_block = MambaBlock(prefix=f"{layer_id}.mixer", config=config, weights=weights) + self.layer_norm = FastRMSNorm.load(prefix=f"{layer_id}.norm", weights=weights, eps=config.layer_norm_epsilon) + + def forward( + self, + hidden_states: torch.Tensor, + residual: Optional[torch.Tensor] = None, + inference_params: Optional[Any] = None, + ): + residual = (hidden_states + residual) if residual is not None else hidden_states + shape = residual.shape + hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1])) + hidden_states, conv_state, last_ssm_state = self.mamba_block(hidden_states.view(*shape), inference_params) + return hidden_states, residual, conv_state, last_ssm_state + +class MambaModel(nn.Module): + def __init__(self, config, weights): + super().__init__() + prefix = "backbone" + self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights) + self.blocks = nn.ModuleList( + [ResidualBlock(f"{prefix}.layers.{i}", config, weights) for i in range(config.n_layer)] + ) + self.norm_f = FastRMSNorm.load(f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon) + self.lm_head = FastLinear.load(config, f"{prefix}.embedding", weights, bias=False) + self.config = config + + def forward(self, input_ids: torch.Tensor, inference_params=None, residual=None) -> Tuple[torch.Tensor, torch.Tensor, InferenceParams]: + hidden_states = self.embed_tokens(input_ids) + for block in self.blocks: + hidden_states, residual, conv_state, ssm_state = block(hidden_states, residual, inference_params) + inference_params.key_value_memory_dict[block.mamba_block.layer_idx] = (conv_state, ssm_state) + + hidden_states = hidden_states + residual if residual is not None else hidden_states + hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1))) + hidden_states = hidden_states.view(residual.shape) + logits = self.lm_head(hidden_states) + + # update the offset for the next inference using these params + inference_params.seqlen_offset += input_ids.size(1) + return logits, input_ids, inference_params \ No newline at end of file diff --git a/server/text_generation_server/models/mamba.py b/server/text_generation_server/models/mamba.py new file mode 100644 index 00000000..c10910aa --- /dev/null +++ b/server/text_generation_server/models/mamba.py @@ -0,0 +1,656 @@ +import torch +import torch.distributed +from transformers import AutoTokenizer, PreTrainedTokenizerBase +from typing import Optional +from text_generation_server.models.custom_modeling.mamba_modeling import ( + MambaConfig, +) +from text_generation_server.pb import generate_pb2 +from text_generation_server.utils import ( + initialize_torch_distributed, + weight_files, + Weights, +) +import time +from text_generation_server.models.custom_modeling.mamba_modeling import MambaModel +from text_generation_server.models import Model +from typing import Any, List, Optional, Tuple, Type, Dict +from text_generation_server.models.types import ( + Batch, + Tokens, + Generation, + GeneratedText, +) +from text_generation_server.utils.tokens import batch_top_tokens, Sampling +from dataclasses import dataclass +from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling +from mamba_ssm.utils.generation import InferenceParams + +@dataclass +class MambaBatch(Batch): + batch_id: int + requests: List[generate_pb2.Request] + requests_idx_mapping: Dict[int, int] + + # Decoder values + input_ids: torch.Tensor + + # All tokens + all_input_ids: List[torch.Tensor] + + # Lengths of all generations present in the batch + input_lengths: List[int] + prefix_offsets: List[int] + read_offsets: List[int] + + # Generation helpers + next_token_choosers: List[NextTokenChooser] + stopping_criterias: List[StoppingCriteria] + top_n_tokens: List[int] + top_n_tokens_tensor: torch.Tensor + + # Metadata used for padding + max_input_length: int + padding_right_offset: int + + # Maximum number of tokens this batch will grow to + max_tokens: int + + # Past metadata + keys_head_dim_last: bool = True + + # Inference params + inference_params: Optional[Dict[str, Any]] = None + + def to_pb(self) -> generate_pb2.CachedBatch: + return generate_pb2.CachedBatch( + id=self.batch_id, + request_ids=[r.id for r in self.requests], + size=len(self), + max_tokens=self.max_tokens, + ) + + @classmethod + def from_pb( + cls, + pb: generate_pb2.Batch, + tokenizer: PreTrainedTokenizerBase, + dtype: torch.dtype, + device: torch.device, + ) -> "MambaBatch": + inputs = [] + next_token_choosers = [] + stopping_criterias = [] + top_n_tokens = [] + prefix_offsets = [] + read_offsets = [] + requests_idx_mapping = {} + + # Parse batch + max_truncation = 0 + padding_right_offset = 0 + max_decode_tokens = 0 + for i, r in enumerate(pb.requests): + requests_idx_mapping[r.id] = i + inputs.append(r.inputs) + next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device)) + stopping_criteria = StoppingCriteria.from_pb( + r.stopping_parameters, tokenizer + ) + stopping_criterias.append(stopping_criteria) + top_n_tokens.append(r.top_n_tokens) + max_truncation = max(max_truncation, r.truncate) + max_decode_tokens += stopping_criteria.max_new_tokens + padding_right_offset = max( + padding_right_offset, stopping_criteria.max_new_tokens + ) + + tokenized_inputs = tokenizer( + inputs, + return_tensors="pt", + padding=True, + return_token_type_ids=False, + truncation=True, + max_length=max_truncation, + ).to(device) + for _ in pb.requests: + input_len = tokenized_inputs["input_ids"].shape[1] + prefix_offsets.append(input_len - 5) + read_offsets.append(input_len) + + input_lengths = tokenized_inputs["attention_mask"].sum(1) + max_input_length = input_lengths.max() + input_ids = tokenized_inputs["input_ids"] + all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1) + top_n_tokens_tensor = torch.tensor( + top_n_tokens, device=device, dtype=torch.int64 + ) + max_tokens = len(inputs) * (max_input_length + max_decode_tokens) + return cls( + batch_id=pb.id, + requests=pb.requests, + requests_idx_mapping=requests_idx_mapping, + input_ids=input_ids, + # past_input_ids=None, + all_input_ids=list(all_input_ids), + input_lengths=input_lengths.tolist(), + prefix_offsets=prefix_offsets, + read_offsets=read_offsets, + next_token_choosers=next_token_choosers, + stopping_criterias=stopping_criterias, + top_n_tokens=top_n_tokens, + top_n_tokens_tensor=top_n_tokens_tensor, + max_input_length=max_input_length.item(), + padding_right_offset=padding_right_offset, + max_tokens=max_tokens, + ) + + def filter(self, request_ids: List[int]) -> Optional["MambaBatch"]: + if len(request_ids) == 0: + raise ValueError("Batch must have at least one request") + if len(request_ids) == len(self): + return self + + keep_indices = [] + + # New values after filtering + requests_idx_mapping = {} + requests = [] + input_lengths = [] + prefix_offsets = [] + read_offsets = [] + all_input_ids = [] + max_input_length = 0 + + next_token_choosers = [] + stopping_criterias = [] + top_n_tokens = [] + + total_remaining_decode_tokens = 0 + new_padding_right_offset = 0 + + indices = [] + for i, request_id in enumerate(request_ids): + idx = self.requests_idx_mapping[request_id] + requests_idx_mapping[request_id] = i + keep_indices.append(idx) + + requests.append(self.requests[idx]) + prefix_offsets.append(self.prefix_offsets[idx]) + read_offsets.append(self.read_offsets[idx]) + all_input_ids.append(self.all_input_ids[idx]) + + request_input_length = self.input_lengths[idx] + input_lengths.append(request_input_length) + max_input_length = max(max_input_length, request_input_length) + indices.append(idx) + + next_token_choosers.append(self.next_token_choosers[idx]) + stopping_criteria = self.stopping_criterias[idx] + stopping_criterias.append(stopping_criteria) + top_n_tokens.append(self.top_n_tokens[idx]) + remaining_decode_tokens = ( + stopping_criteria.max_new_tokens - stopping_criteria.current_tokens + ) + total_remaining_decode_tokens += remaining_decode_tokens + new_padding_right_offset = max( + new_padding_right_offset, remaining_decode_tokens + ) + + # Apply indices to input_ids, attention mask, past key values and other items that need to be cached + input_ids = self.input_ids[keep_indices] + + top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] + max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens + + self.requests = requests + self.requests_idx_mapping = requests_idx_mapping + self.input_ids = input_ids + self.all_input_ids = all_input_ids + self.input_lengths = input_lengths + self.prefix_offsets = prefix_offsets + self.read_offsets = read_offsets + self.next_token_choosers = next_token_choosers + self.stopping_criterias = stopping_criterias + self.top_n_tokens = top_n_tokens + self.top_n_tokens_tensor = top_n_tokens_tensor + self.max_input_length = max_input_length + self.padding_right_offset = new_padding_right_offset + self.max_tokens = max_tokens + + # TODO + # Kept it simple by just updating the state, maybe updating the other CPU values is necessary. + key_value_memory_dict = {} + for i, (conv_state, ssm_state) in self.inference_params.key_value_memory_dict.items(): + key_value_memory_dict[i] = (conv_state[indices], ssm_state[indices]) + self.inference_params.key_value_memory_dict = key_value_memory_dict + + return self + + @classmethod + def concatenate(cls, batches: List["MambaBatch"]) -> "MambaBatch": + # Used for padding + total_batch_size = 0 + max_input_length = 0 + padding_right_offset = 0 + for batch in batches: + total_batch_size += len(batch) + max_input_length = max(max_input_length, batch.max_input_length) + padding_right_offset = max(padding_right_offset, batch.padding_right_offset) + + # Batch attributes + requests = [] + requests_idx_mapping = {} + input_lengths = [] + prefix_offsets = [] + read_offsets = [] + all_input_ids = [] + next_token_choosers = [] + stopping_criterias = [] + top_n_tokens = [] + max_tokens = 0 + max_seqlen = 0 + batch_size = 0 + seqlen_offset = 0 + + # Batch tensors + input_ids = None + top_n_tokens_tensor = None + + # Used for slicing correctly inside the tensors + # Equivalent to a cumsum on batch sizes + start_index = 0 + for i, batch in enumerate(batches): + requests.extend(batch.requests) + input_lengths.extend(batch.input_lengths) + prefix_offsets.extend(batch.prefix_offsets) + read_offsets.extend(batch.read_offsets) + all_input_ids.extend(batch.all_input_ids) + next_token_choosers.extend(batch.next_token_choosers) + stopping_criterias.extend(batch.stopping_criterias) + top_n_tokens.extend(batch.top_n_tokens) + + if i == 0: + requests_idx_mapping = batch.requests_idx_mapping + else: + # We need to offset the mapping for each batch by the cumulative batch size + for k, v in batch.requests_idx_mapping.items(): + requests_idx_mapping[k] = v + start_index + + # Slicing end index for this batch + end_index = start_index + len(batch) + + # Create empty tensor + # input_ids is always of shape [batch_size, 1] + # We do not need to pad it + if input_ids is None: + input_ids = batch.input_ids.new_empty((total_batch_size, 1)) + # Copy to correct indices + input_ids[start_index:end_index] = batch.input_ids + + if top_n_tokens_tensor is None: + top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( + total_batch_size, + ) + top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor + + # Add eventual padding tokens that were added while concatenating + max_tokens += batch.max_tokens + ( + max_input_length - batch.max_input_length + ) * len(batch) + + max_seqlen = max(max_seqlen, batch.inference_params.max_seqlen) + seqlen_offset = max(seqlen_offset, batch.inference_params.seqlen_offset) + batch_size += batch.inference_params.max_batch_size + + start_index = end_index + + + (_, d_model, d_conv) = batches[0].inference_params.key_value_memory_dict[0][0].shape + (_, _, d_state) = batches[0].inference_params.key_value_memory_dict[0][1].shape + n_blocks = len(batches[0].inference_params.key_value_memory_dict) + dtype = batches[0].inference_params.key_value_memory_dict[0][0].dtype + device = batches[0].inference_params.key_value_memory_dict[0][0].device + + key_value_memory_dict = {} + for i in range(n_blocks): + conv_state = torch.zeros( + batch_size, + d_model, + d_conv, + device=device, + dtype=dtype, + ) + ssm_state = torch.zeros( + batch_size, + d_model, + d_state, + device=device, + dtype=dtype, + ) + key_value_memory_dict[i] = (conv_state, ssm_state) + lengths_per_sample = torch.zeros(batch_size, dtype=torch.int32, device=device) + + inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_offset, + key_value_memory_dict=key_value_memory_dict, + lengths_per_sample=lengths_per_sample, + ) + + current_batch = 0 + for batch in batches: + for i in range(n_blocks): + conv_state, ssm_state = batch.inference_params.key_value_memory_dict[i] + batch_size = batch.inference_params.max_batch_size + inference_params.key_value_memory_dict[i][0][current_batch:current_batch + batch_size] = conv_state + inference_params.key_value_memory_dict[i][1][current_batch:current_batch + batch_size] = ssm_state + inference_params.lengths_per_sample[current_batch: current_batch + batch_size] = batch.inference_params.lengths_per_sample + current_batch += batch_size + + return cls( + batch_id=batches[0].batch_id, + requests=requests, + requests_idx_mapping=requests_idx_mapping, + input_ids=input_ids, + all_input_ids=all_input_ids, + input_lengths=input_lengths, + prefix_offsets=prefix_offsets, + read_offsets=read_offsets, + next_token_choosers=next_token_choosers, + stopping_criterias=stopping_criterias, + top_n_tokens=top_n_tokens, + top_n_tokens_tensor=top_n_tokens_tensor, + max_input_length=max_input_length, + padding_right_offset=padding_right_offset, + keys_head_dim_last=batches[0].keys_head_dim_last, + max_tokens=max_tokens, + inference_params=inference_params + ) + + def __len__(self): + return len(self.requests) + +class Mamba(Model): + def __init__( + self, + model_id: str, + revision: Optional[str] = None, + quantize: Optional[str] = None, + dtype: Optional[torch.dtype] = None, + trust_remote_code: bool = False, + ): + self.process_group, _rank, _world_size = initialize_torch_distributed() + if torch.cuda.is_available(): + device = torch.device("cuda") + dtype = torch.float16 if dtype is None else dtype + else: + if quantize: + raise ValueError("quantization is not available on CPU") + + device = torch.device("cpu") + dtype = torch.float32 if dtype is None else dtype + + tokenizer = AutoTokenizer.from_pretrained( + "EleutherAI/gpt-neox-20b", + revision=revision, + padding_side="left", + truncation_side="left", + trust_remote_code=trust_remote_code, + ) + config = MambaConfig.from_pretrained( + model_id, revision=revision, trust_remote_code=trust_remote_code + ) + + tokenizer.bos_token_id = config.bos_token_id + tokenizer.eos_token_id = config.eos_token_id + tokenizer.pad_token = tokenizer.eos_token + + config.quantize = quantize + torch.distributed.barrier(group=self.process_group) + filenames = weight_files(model_id, revision=revision, extension=".safetensors") + weights = Weights(filenames, device, dtype, process_group=self.process_group) + model = MambaModel(config, weights) + torch.distributed.barrier(group=self.process_group) + super(Mamba, self).__init__( + model=model, + tokenizer=tokenizer, + requires_padding=True, + dtype=dtype, + device=device, + ) + + @property + def batch_type(self) -> Type[MambaBatch]: + return MambaBatch + + def warmup(self, batch) -> Optional[int]: + # TODO: implement warmup for Mamba if needed + return None + + def forward( + self, + input_ids: torch.Tensor, + past: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + return self.model( + input_ids, + past=past, + ) + + def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: + start = time.time_ns() + input_ids = batch.input_ids # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids + + batch_size = input_ids.shape[0] + max_seqlen = input_ids.shape[1] + dtype = input_ids.dtype + + # Inference params + seqlen_og = 0 + inf_cache = {} + lengths_per_sample = torch.ones(batch_size, dtype=torch.int32, device=input_ids.device) * max_seqlen + + if batch.inference_params is None: + inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + + # Allocate inference cache + for res_block in self.model.blocks: + block = res_block.mamba_block + conv_state = torch.zeros( + batch_size, + self.model.config.d_model * self.model.config.expand, + self.model.config.d_conv, + device=block.conv1d.weight.device, + dtype=block.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.model.config.d_model * self.model.config.expand, + self.model.config.d_state, + device=block.dt_proj.weight.device, + dtype=block.dt_proj.weight.dtype, + ) + inference_params.key_value_memory_dict[block.layer_idx] = (conv_state, ssm_state) + batch.inference_params = inference_params + + # Forward pass + logits, past_input_ids, new_inference_params = self.model(input_ids, batch.inference_params) + + batch.inference_params = new_inference_params + # Results + generations: List[Generation] = [] + stopped = True + + # Speculation is not active for causal + accepted_ids = torch.ones_like(batch.input_ids)[:, 0] + batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( + batch.top_n_tokens, + batch.top_n_tokens_tensor, + torch.log_softmax(logits[:, -1], -1), + accepted_ids, + ) + + start_decode = time.time_ns() + + # Zipped iterator + iterator = zip( + batch.requests, + batch.input_lengths, + batch.prefix_offsets, + batch.read_offsets, + logits, + batch.next_token_choosers, + batch.stopping_criterias, + batch.all_input_ids, + batch.top_n_tokens, + batch_top_token_ids, + batch_top_token_logprobs, + ) + + # For each member of the batch + for i, ( + request, + input_length, + prefix_offset, + read_offset, + logits, + next_token_chooser, + stopping_criteria, + all_input_ids, + top_n_tokens, + top_token_ids, + top_token_logprobs, + ) in enumerate(iterator): + # Select next token + next_token_id, logprobs = next_token_chooser( + all_input_ids.view(1, -1), logits[-1:, :] + ) + + # Append next token to all tokens + all_input_ids = torch.cat([all_input_ids, next_token_id]) + new_input_length = input_length + 1 + + # Generated token + next_token_logprob = logprobs[-1, next_token_id] + next_token_id_squeezed = next_token_id.squeeze() + next_token_text, prefix_offset, read_offset = self.decode_token( + all_input_ids[:, 0], prefix_offset, read_offset + ) + + # Evaluate stopping criteria + stop, reason = stopping_criteria( + next_token_id_squeezed, + next_token_text, + ) + + if not stop: + stopped = False + + # Shard generations + # All generations will be appended in the rust sharded client + if i % self.world_size == self.rank: + if stop: + # Decode generated tokens + output_text, _, _ = self.decode_token( + all_input_ids[:, 0], + prefix_offset=len(all_input_ids) + - stopping_criteria.current_tokens + - 1, + read_offset=len(all_input_ids) - stopping_criteria.current_tokens, + skip_special_tokens=True, + ) + # Get seed + if isinstance(next_token_chooser.choice, Sampling): + seed = next_token_chooser.choice.seed + else: + seed = None + + generated_text = GeneratedText( + output_text, stopping_criteria.current_tokens, reason, seed + ) + else: + generated_text = None + + if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: + # Remove generated token to only have prefill and add nan for first prompt token + prefill_logprobs = [float("nan")] + torch.log_softmax( + logits, -1 + ).gather(1, all_input_ids[1:]).squeeze(1)[ + -new_input_length:-1 + ].tolist() + prefill_token_ids = all_input_ids[-new_input_length:-1] + prefill_texts = self.tokenizer.batch_decode( + prefill_token_ids, + clean_up_tokenization_spaces=False, + skip_special_tokens=False, + ) + prefill_tokens = Tokens( + prefill_token_ids, + prefill_logprobs, + prefill_texts, + is_special=[], + ) + else: + prefill_tokens = None + + if top_n_tokens > 0: + toptoken_texts = self.tokenizer.batch_decode( + top_token_ids, + clean_up_tokenization_spaces=False, + skip_special_tokens=False, + ) + special_toptokens = [ + token_id in self.all_special_ids for token_id in top_token_ids + ] + top_tokens = Tokens( + top_token_ids, + top_token_logprobs, + toptoken_texts, + special_toptokens, + ) + else: + top_tokens = None + + generation = Generation( + request.id, + prefill_tokens, + Tokens( + [next_token_id_squeezed], + [next_token_logprob], + [next_token_text], + [next_token_id_squeezed.item() in self.all_special_ids], + ), + generated_text, + top_tokens, + ) + + generations.append(generation) + + # Update values + batch.input_ids[i, 0] = next_token_id + batch.all_input_ids[i] = all_input_ids + batch.input_lengths[i] = new_input_length + batch.prefix_offsets[i] = prefix_offset + batch.read_offsets[i] = read_offset + batch.max_input_length = max(batch.max_input_length, new_input_length) + + # We finished all generations in the batch; there is no next batch + if stopped: + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, None, (forward_ns, decode_ns) + + # Slice unused values from prefill + batch.input_ids = batch.input_ids[:, :1] + + forward_ns = start_decode - start + decode_ns = time.time_ns() - start_decode + return generations, batch, (forward_ns, decode_ns) From cec954e391b602ffb01e8027732106e282f59f54 Mon Sep 17 00:00:00 2001 From: Jason Stillerman Date: Thu, 8 Feb 2024 06:44:04 -0500 Subject: [PATCH 071/153] Update to peft 0.8.2 (#1537) - [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [x] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [x] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [x] Did you write any new necessary tests? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. @OlivierDehaene OR @Narsil --- server/poetry.lock | 14 ++++++++------ server/pyproject.toml | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/server/poetry.lock b/server/poetry.lock index 16a28a01..f1445b1d 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -1699,30 +1699,32 @@ xml = ["lxml (>=4.9.2)"] [[package]] name = "peft" -version = "0.4.0" +version = "0.8.2" description = "Parameter-Efficient Fine-Tuning (PEFT)" optional = false python-versions = ">=3.8.0" files = [ - {file = "peft-0.4.0-py3-none-any.whl", hash = "sha256:2cf992772a6d703814477e0bdcdadd68cb8ea388111ce2d793dd2ff0e438f357"}, - {file = "peft-0.4.0.tar.gz", hash = "sha256:e768fa22d6e9f32aa7e891f0d06f355960278ca4dc0cdd96bff71f6f06269207"}, + {file = "peft-0.8.2-py3-none-any.whl", hash = "sha256:4a9c81c38e689fd4043b2757cd0e2b526a9b8b8fd04f8442df2c4824b32c2505"}, + {file = "peft-0.8.2.tar.gz", hash = "sha256:bbdf61db2d8ca503e894edc64016038e6f34b7b522374bad09a22af41882e7ac"}, ] [package.dependencies] -accelerate = "*" +accelerate = ">=0.21.0" +huggingface-hub = ">=0.17.0" numpy = ">=1.17" packaging = ">=20.0" psutil = "*" pyyaml = "*" safetensors = "*" torch = ">=1.13.0" +tqdm = "*" transformers = "*" [package.extras] dev = ["black (>=22.0,<23.0)", "hf-doc-builder", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"] docs-specific = ["hf-doc-builder"] quality = ["black (>=22.0,<23.0)", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"] -test = ["black (>=22.0,<23.0)", "datasets", "diffusers", "hf-doc-builder", "parameterized", "pytest", "pytest-cov", "pytest-xdist", "ruff (>=0.0.241)", "urllib3 (<=2.0.0)"] +test = ["black (>=22.0,<23.0)", "datasets", "diffusers (<0.21.0)", "hf-doc-builder", "parameterized", "pytest", "pytest-cov", "pytest-xdist", "ruff (>=0.0.241)", "scipy", "urllib3 (<=2.0.0)"] [[package]] name = "pillow" @@ -3100,4 +3102,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "8f84984b4381688a2f9461e968c730626f1da5bdb7a32a5d5f366febc6bdad01" +content-hash = "5f39b7146fc36cc846070a3e12db863f2748b49a24ab55878129be3480c89685" diff --git a/server/pyproject.toml b/server/pyproject.toml index 60bd399a..517ef42f 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -21,7 +21,7 @@ opentelemetry-exporter-otlp = "^1.15.0" opentelemetry-instrumentation-grpc = "^0.36b0" hf-transfer = "^0.1.2" sentencepiece = "^0.1.97" -peft = "^0.4.0" +peft = "^0.8.2" optimum-habana = "1.10.4" transformers = "4.37.2" accelerate = "0.27.2" From f1d8da3ba64be505ad98f6822c9ffb1d166de485 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 8 Feb 2024 18:41:25 +0100 Subject: [PATCH 072/153] feat(server): add frequency penalty (#1541) --- Cargo.lock | 38 ++++++- benchmark/src/lib.rs | 3 + benchmark/src/main.rs | 7 ++ benchmark/src/table.rs | 2 + integration-tests/models/test_mamba.py | 11 +- proto/generate.proto | 2 + router/client/src/client.rs | 2 + router/src/health.rs | 1 + router/src/lib.rs | 86 ++++++++++++-- router/src/queue.rs | 1 + router/src/server.rs | 18 ++- router/src/validation.rs | 11 +- server/tests/utils/test_tokens.py | 2 +- .../models/causal_lm.py | 11 +- .../models/custom_modeling/mamba_modeling.py | 107 ++++++++++++------ .../models/flash_causal_lm.py | 8 +- server/text_generation_server/models/mamba.py | 59 +++++++--- .../models/seq2seq_lm.py | 7 +- server/text_generation_server/models/types.py | 4 +- .../utils/logits_process.py | 56 +++++++++ server/text_generation_server/utils/tokens.py | 73 +++++++++--- 21 files changed, 412 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 689dc0ae..a91768c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2824,7 +2824,7 @@ dependencies = [ "tabled", "text-generation-client", "thiserror", - "tokenizers", + "tokenizers 0.14.1", "tokio", "tracing", "tracing-subscriber", @@ -2888,7 +2888,7 @@ dependencies = [ "serde_json", "text-generation-client", "thiserror", - "tokenizers", + "tokenizers 0.15.1", "tokio", "tokio-stream", "tower-http", @@ -3012,6 +3012,40 @@ dependencies = [ "unicode_categories", ] +[[package]] +name = "tokenizers" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db445cceba5dfeb0f9702be7d6bfd91801ddcbe8fe8722defe7f2e96da75812" +dependencies = [ + "aho-corasick", + "clap", + "derive_builder", + "esaxx-rs", + "getrandom", + "hf-hub", + "indicatif", + "itertools 0.11.0", + "lazy_static", + "log", + "macro_rules_attribute", + "monostate", + "onig", + "paste", + "rand", + "rayon", + "rayon-cond", + "regex", + "regex-syntax 0.7.5", + "serde", + "serde_json", + "spm_precompiled", + "thiserror", + "unicode-normalization-alignments", + "unicode-segmentation", + "unicode_categories", +] + [[package]] name = "tokio" version = "1.37.0" diff --git a/benchmark/src/lib.rs b/benchmark/src/lib.rs index 1875652c..6deae48d 100644 --- a/benchmark/src/lib.rs +++ b/benchmark/src/lib.rs @@ -30,6 +30,7 @@ pub async fn run( top_p: Option, typical_p: Option, repetition_penalty: Option, + frequency_penalty: Option, watermark: bool, do_sample: bool, client: ShardedClient, @@ -42,6 +43,7 @@ pub async fn run( do_sample, seed: 0, repetition_penalty: repetition_penalty.unwrap_or(1.0), + frequency_penalty: frequency_penalty.unwrap_or(0.0), watermark, }; @@ -140,6 +142,7 @@ pub async fn run( top_p, typical_p, repetition_penalty, + frequency_penalty, watermark, do_sample, ); diff --git a/benchmark/src/main.rs b/benchmark/src/main.rs index 97c8af1c..2d89e045 100644 --- a/benchmark/src/main.rs +++ b/benchmark/src/main.rs @@ -84,6 +84,11 @@ struct Args { #[clap(long, env)] repetition_penalty: Option, + /// Generation parameter in case you want to specifically test/debug particular + /// decoding strategies, for full doc refer to the `text-generation-server` + #[clap(long, env)] + frequency_penalty: Option, + /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] @@ -119,6 +124,7 @@ fn main() -> Result<(), Box> { top_p, typical_p, repetition_penalty, + frequency_penalty, watermark, do_sample, master_shard_uds_path, @@ -187,6 +193,7 @@ fn main() -> Result<(), Box> { top_p, typical_p, repetition_penalty, + frequency_penalty, watermark, do_sample, sharded_client, diff --git a/benchmark/src/table.rs b/benchmark/src/table.rs index 9e36717b..c4819ff3 100644 --- a/benchmark/src/table.rs +++ b/benchmark/src/table.rs @@ -15,6 +15,7 @@ pub(crate) fn parameters_table( top_p: Option, typical_p: Option, repetition_penalty: Option, + frequency_penalty: Option, watermark: bool, do_sample: bool, ) -> Table { @@ -33,6 +34,7 @@ pub(crate) fn parameters_table( builder.push_record(["Top P", &format!("{top_p:?}")]); builder.push_record(["Typical P", &format!("{typical_p:?}")]); builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]); + builder.push_record(["Frequency Penalty", &format!("{frequency_penalty:?}")]); builder.push_record(["Watermark", &watermark.to_string()]); builder.push_record(["Do Sample", &do_sample.to_string()]); diff --git a/integration-tests/models/test_mamba.py b/integration-tests/models/test_mamba.py index d86faeff..bf398999 100644 --- a/integration-tests/models/test_mamba.py +++ b/integration-tests/models/test_mamba.py @@ -24,6 +24,7 @@ async def test_mamba(fused_kernel_mamba, response_snapshot): assert response.generated_text == "\n\nDeep learning is a new type of machine" assert response == response_snapshot + @pytest.mark.asyncio @pytest.mark.private async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): @@ -44,13 +45,19 @@ async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): ) assert response.details.generated_tokens == 10 - assert response.generated_text == "blue, red, yellow, \nand orange (in the order they appear in" + assert ( + response.generated_text + == "blue, red, yellow, \nand orange (in the order they appear in" + ) assert response == response_snapshot + @pytest.mark.asyncio @pytest.mark.private async def test_mamba_load(fused_kernel_mamba, generate_load, response_snapshot): - responses = await generate_load(fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4) + responses = await generate_load( + fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4 + ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) diff --git a/proto/generate.proto b/proto/generate.proto index ceb421c4..dde344f6 100644 --- a/proto/generate.proto +++ b/proto/generate.proto @@ -66,6 +66,8 @@ message NextTokenChooserParameters { uint64 seed = 6; /// repetition penalty float repetition_penalty = 7; + /// frequency penalty + float frequency_penalty = 9; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; } diff --git a/router/client/src/client.rs b/router/client/src/client.rs index bd592c28..c61a4003 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -227,6 +227,7 @@ impl Client { do_sample: false, seed: 0, repetition_penalty: 1.0, + frequency_penalty: 0.0, watermark: false, }) } else { @@ -238,6 +239,7 @@ impl Client { do_sample: true, seed: 0, repetition_penalty: 1.2, + frequency_penalty: 0.1, watermark: false, }) }; diff --git a/router/src/health.rs b/router/src/health.rs index ab290fc1..e830a3c3 100644 --- a/router/src/health.rs +++ b/router/src/health.rs @@ -43,6 +43,7 @@ impl Health { do_sample: false, seed: 0, repetition_penalty: 1.0, + frequency_penalty: 0.0, watermark: false, }), stopping_parameters: Some(StoppingCriteriaParameters { diff --git a/router/src/lib.rs b/router/src/lib.rs index e85519cc..7c44d642 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -106,6 +106,14 @@ pub(crate) struct GenerateParameters { )] pub repetition_penalty: Option, #[serde(default)] + #[schema( + exclusive_minimum = -2.0, + nullable = true, + default = "null", + example = 0.1 + )] + pub frequency_penalty: Option, + #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)] pub top_k: Option, #[serde(default)] @@ -172,6 +180,7 @@ fn default_parameters() -> GenerateParameters { best_of: None, temperature: None, repetition_penalty: None, + frequency_penalty: None, top_k: None, top_p: None, typical_p: None, @@ -205,10 +214,71 @@ pub(crate) struct ChatCompletion { pub(crate) struct ChatCompletionComplete { pub index: u32, pub message: Message, - pub logprobs: Option>, + pub logprobs: Option, pub finish_reason: String, } +#[derive(Clone, Deserialize, Serialize, ToSchema)] +pub(crate) struct ChatCompletionLogprobs { + content: Vec, +} + +impl From<(Token, Vec)> for ChatCompletionLogprobs { + fn from(value: (Token, Vec)) -> Self { + let (token, top_tokens) = value; + + Self { + content: vec![ChatCompletionLogprob { + token: token.text, + logprob: token.logprob, + top_logprobs: top_tokens + .into_iter() + .map(|t| ChatCompletionTopLogprob { + token: t.text, + logprob: t.logprob, + }) + .collect(), + }], + } + } +} + +impl From<(Vec, Vec>)> for ChatCompletionLogprobs { + fn from(value: (Vec, Vec>)) -> Self { + let (tokens, top_tokens) = value; + Self { + content: tokens + .into_iter() + .zip(top_tokens) + .map(|(t, top_t)| ChatCompletionLogprob { + token: t.text, + logprob: t.logprob, + top_logprobs: top_t + .into_iter() + .map(|t| ChatCompletionTopLogprob { + token: t.text, + logprob: t.logprob, + }) + .collect(), + }) + .collect(), + } + } +} + +#[derive(Clone, Deserialize, Serialize, ToSchema)] +pub(crate) struct ChatCompletionLogprob { + token: String, + logprob: f32, + top_logprobs: Vec, +} + +#[derive(Clone, Deserialize, Serialize, ToSchema)] +pub(crate) struct ChatCompletionTopLogprob { + token: String, + logprob: f32, +} + #[derive(Clone, Deserialize, Serialize)] pub(crate) struct Usage { pub prompt_tokens: u32, @@ -238,7 +308,7 @@ impl ChatCompletion { content: output, }, logprobs: return_logprobs - .then(|| details.tokens.iter().map(|t| t.logprob).collect()), + .then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))), finish_reason: details.finish_reason.to_string(), }], usage: Usage { @@ -266,7 +336,7 @@ pub(crate) struct ChatCompletionChunk { pub(crate) struct ChatCompletionChoice { pub index: u32, pub delta: ChatCompletionDelta, - pub logprobs: Option, + pub logprobs: Option, pub finish_reason: Option, } @@ -285,7 +355,7 @@ impl ChatCompletionChunk { delta: String, created: u64, index: u32, - logprobs: Option, + logprobs: Option, finish_reason: Option, ) -> Self { Self { @@ -319,8 +389,8 @@ pub(crate) struct ChatRequest { /// UNUSED #[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")] /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. - pub model: String, /* NOTE: UNUSED */ - + pub model: String, + /* NOTE: UNUSED */ /// A list of messages comprising the conversation so far. #[serde(default = "default_request_messages")] pub messages: Vec, @@ -346,7 +416,6 @@ pub(crate) struct ChatRequest { #[schema(example = "false")] pub logprobs: Option, - /// UNUSED /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with /// an associated log probability. logprobs must be set to true if this parameter is used. #[serde(default)] @@ -365,7 +434,6 @@ pub(crate) struct ChatRequest { #[schema(nullable = true, example = "2")] pub n: Option, - /// UNUSED /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, /// increasing the model's likelihood to talk about new topics #[serde(default)] @@ -447,7 +515,7 @@ pub struct PrefillToken { logprob: f32, } -#[derive(Debug, Serialize, ToSchema)] +#[derive(Debug, Serialize, ToSchema, Clone)] pub struct Token { #[schema(example = 0)] id: u32, diff --git a/router/src/queue.rs b/router/src/queue.rs index 6227d70c..9e3494f7 100644 --- a/router/src/queue.rs +++ b/router/src/queue.rs @@ -496,6 +496,7 @@ mod tests { do_sample: false, seed: 0, repetition_penalty: 0.0, + frequency_penalty: 0.0, watermark: false, }, stopping_parameters: StoppingCriteriaParameters { diff --git a/router/src/server.rs b/router/src/server.rs index 808d9f6b..15ad6b33 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -6,9 +6,10 @@ use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; use crate::{ BestOfSequence, ChatCompletion, ChatCompletionChoice, ChatCompletionChunk, ChatCompletionDelta, - ChatRequest, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters, - GenerateRequest, GenerateResponse, HubModelInfo, HubTokenizerConfig, Infer, Info, Message, - PrefillToken, SimpleToken, StreamDetails, StreamResponse, Token, TokenizeResponse, Validation, + ChatCompletionLogprobs, ChatRequest, CompatGenerateRequest, Details, ErrorResponse, + FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, + HubTokenizerConfig, Infer, Info, Message, PrefillToken, SimpleToken, StreamDetails, + StreamResponse, Token, TokenizeResponse, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; @@ -572,8 +573,8 @@ async fn chat_completions( let stream = req.stream; let max_new_tokens = req.max_tokens.or(Some(100)); let repetition_penalty = req - .frequency_penalty - // rescale frequency_penalty from (-2.0, 2.0) to (0.0, 4.0) + .presence_penalty + // rescale repetition_penalty from (-2.0, 2.0) to (0.0, 4.0) .map(|x| x + 2.0); let logprobs = req.logprobs.unwrap_or(false); let seed = req.seed; @@ -601,6 +602,7 @@ async fn chat_completions( best_of: None, temperature: req.temperature, repetition_penalty, + frequency_penalty: req.frequency_penalty, top_k: None, top_p: req.top_p, typical_p: None, @@ -632,6 +634,10 @@ async fn chat_completions( .unwrap_or_else(|_| std::time::Duration::from_secs(0)) .as_secs(); + let logprobs = logprobs.then(|| { + ChatCompletionLogprobs::from((stream_token.token.clone(), stream_token.top_tokens)) + }); + event .json_data(ChatCompletionChunk::new( model_id.clone(), @@ -639,7 +645,7 @@ async fn chat_completions( stream_token.token.text, current_time, stream_token.index, - logprobs.then_some(stream_token.token.logprob), + logprobs, stream_token.details.map(|d| d.finish_reason.to_string()), )) .map_or_else( diff --git a/router/src/validation.rs b/router/src/validation.rs index 62ab299f..5802391d 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -187,6 +187,7 @@ impl Validation { best_of, temperature, repetition_penalty, + frequency_penalty, top_k, top_p, typical_p, @@ -223,12 +224,17 @@ impl Validation { return Err(ValidationError::RepetitionPenalty); } + let frequency_penalty = frequency_penalty.unwrap_or(0.0); + if !(-2.0..=2.0).contains(&frequency_penalty) { + return Err(ValidationError::FrequencyPenalty); + } + // TODO: enable watermark with fp8 quantization let quantization_enabled = env::var("QUANT_CONFIG") .ok() .map_or(false, |value| !value.is_empty()); if watermark && quantization_enabled { - return Err(ValidationError::WatermarkWithQuantization) + return Err(ValidationError::WatermarkWithQuantization); } // Different because the proto default value is not a valid value @@ -314,6 +320,7 @@ impl Validation { let parameters = NextTokenChooserParameters { temperature, repetition_penalty, + frequency_penalty, top_k, top_p, typical_p, @@ -445,6 +452,8 @@ pub enum ValidationError { Temperature, #[error("`repetition_penalty` must be strictly positive")] RepetitionPenalty, + #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")] + FrequencyPenalty, #[error("`top_p` must be > 0.0 and < 1.0")] TopP, #[error("`top_k` must be strictly positive")] diff --git a/server/tests/utils/test_tokens.py b/server/tests/utils/test_tokens.py index 343716b9..94d2a8f2 100644 --- a/server/tests/utils/test_tokens.py +++ b/server/tests/utils/test_tokens.py @@ -84,7 +84,7 @@ def test_batch_top_tokens(): # Now let's make second member of the batch be speculated inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5 * 2) - accepted_ids[1] = 2 + accepted_ids[1] = 2 topn_tok_ids, topn_tok_logprobs = batch_top_tokens( top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids ) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 30e39b4e..771c7694 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -798,8 +798,8 @@ class CausalLM(Model): attention_mask, position_ids, token_idx, - past_key_values: Optional = None, - bypass_hpu_graph: Optional = None, + past_key_values: Optional[List[Tuple]] = None, + bypass_hpu_graph: Optional[bool] = None, ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: # Model Forward kwargs = { @@ -1040,14 +1040,17 @@ class CausalLM(Model): if top_n_tokens > 0: all_top_tokens = [] - for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + for (top_token_ids, top_token_logprobs) in zip( + top_token_ids, top_token_logprobs + ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids + token_id in self.all_special_ids + for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, diff --git a/server/text_generation_server/models/custom_modeling/mamba_modeling.py b/server/text_generation_server/models/custom_modeling/mamba_modeling.py index 1773f04d..017c0341 100644 --- a/server/text_generation_server/models/custom_modeling/mamba_modeling.py +++ b/server/text_generation_server/models/custom_modeling/mamba_modeling.py @@ -19,6 +19,7 @@ from einops import rearrange from causal_conv1d import causal_conv1d_fn, causal_conv1d_update import math + class MambaConfig(PretrainedConfig): def __init__( self, @@ -53,6 +54,7 @@ class MambaConfig(PretrainedConfig): **kwargs, ) + class MambaBlock(nn.Module): def __init__(self, prefix, config, weights): super().__init__() @@ -60,10 +62,14 @@ class MambaBlock(nn.Module): self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False) self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False) self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True) - self.dt_proj_no_bias = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=False) - self.out_proj = FastLinear.load(config, f"{prefix}.out_proj", weights, bias=False) + self.dt_proj_no_bias = FastLinear.load( + config, f"{prefix}.dt_proj", weights, bias=False + ) + self.out_proj = FastLinear.load( + config, f"{prefix}.out_proj", weights, bias=False + ) self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True) - self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float()) + self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float()) self.D = weights.get_tensor(f"{prefix}.D") self.activation = "silu" self.dt_rank = config.dt_rank @@ -80,12 +86,14 @@ class MambaBlock(nn.Module): out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state) return out, conv_state, ssm_state - projected_states = self.in_proj(hidden_states).transpose(1,2) + projected_states = self.in_proj(hidden_states).transpose(1, 2) x, z = projected_states.chunk(2, dim=1) conv_state = F.pad(x, (self.d_conv - seqlen, 0)) x = causal_conv1d_fn( x=x, - weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)), + weight=self.conv1d.weight.view( + self.conv1d.weight.size(0), self.conv1d.weight.size(2) + ), bias=self.conv1d.bias, activation=self.activation, ) @@ -94,7 +102,9 @@ class MambaBlock(nn.Module): # We want dt to have d as the slowest moving dimension # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) - dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt, B, C = torch.split( + x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1 + ) dt = self.dt_proj.weight @ dt.t() dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() @@ -118,28 +128,39 @@ class MambaBlock(nn.Module): def step(self, hidden_states, conv_state, ssm_state): _xz = self.in_proj(hidden_states) _x, _z = _xz.chunk(2, dim=-1) # (B D) - conv_state_new = torch.cat([conv_state, _x.transpose(1,2)], dim=-1) - conv_out = causal_conv1d_fn( - x=conv_state_new, - weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)), - bias=self.conv1d.bias, - activation=self.activation + conv_state_new = torch.cat([conv_state, _x.transpose(1, 2)], dim=-1) + conv_out = causal_conv1d_fn( + x=conv_state_new, + weight=self.conv1d.weight.view( + self.conv1d.weight.size(0), self.conv1d.weight.size(2) + ), + bias=self.conv1d.bias, + activation=self.activation, ) conv_state = conv_state_new[:, :, 1:] bsz, seqlen, dim = hidden_states.shape output_tensor = torch.zeros( - (bsz, seqlen, dim), - device=hidden_states.device, - dtype=hidden_states.dtype + (bsz, seqlen, dim), device=hidden_states.device, dtype=hidden_states.dtype ) for i in range(0, bsz): - x = conv_out[i:i+1,:,-1] - z = _z[i:i+1, -1, :] + x = conv_out[i : i + 1, :, -1] + z = _z[i : i + 1, -1, :] x_db = self.x_proj(x) - dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt, B, C = torch.split( + x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1 + ) dt = F.linear(dt, self.dt_proj.weight) y = selective_state_update( - ssm_state[i:i+1,:,:], x, dt, self.negA, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ssm_state[i : i + 1, :, :], + x, + dt, + self.negA, + B, + C, + self.D, + z=z, + dt_bias=self.dt_proj.bias, + dt_softplus=True, ) out = self.out_proj(y) output_tensor[i] = out @@ -147,48 +168,70 @@ class MambaBlock(nn.Module): return output_tensor, conv_state, ssm_state - class ResidualBlock(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() - self.mamba_block = MambaBlock(prefix=f"{layer_id}.mixer", config=config, weights=weights) - self.layer_norm = FastRMSNorm.load(prefix=f"{layer_id}.norm", weights=weights, eps=config.layer_norm_epsilon) + self.mamba_block = MambaBlock( + prefix=f"{layer_id}.mixer", config=config, weights=weights + ) + self.layer_norm = FastRMSNorm.load( + prefix=f"{layer_id}.norm", weights=weights, eps=config.layer_norm_epsilon + ) def forward( self, hidden_states: torch.Tensor, residual: Optional[torch.Tensor] = None, inference_params: Optional[Any] = None, - ): + ): residual = (hidden_states + residual) if residual is not None else hidden_states shape = residual.shape hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1])) - hidden_states, conv_state, last_ssm_state = self.mamba_block(hidden_states.view(*shape), inference_params) + hidden_states, conv_state, last_ssm_state = self.mamba_block( + hidden_states.view(*shape), inference_params + ) return hidden_states, residual, conv_state, last_ssm_state + class MambaModel(nn.Module): def __init__(self, config, weights): super().__init__() prefix = "backbone" self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights) self.blocks = nn.ModuleList( - [ResidualBlock(f"{prefix}.layers.{i}", config, weights) for i in range(config.n_layer)] + [ + ResidualBlock(f"{prefix}.layers.{i}", config, weights) + for i in range(config.n_layer) + ] + ) + self.norm_f = FastRMSNorm.load( + f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon + ) + self.lm_head = FastLinear.load( + config, f"{prefix}.embedding", weights, bias=False ) - self.norm_f = FastRMSNorm.load(f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon) - self.lm_head = FastLinear.load(config, f"{prefix}.embedding", weights, bias=False) self.config = config - def forward(self, input_ids: torch.Tensor, inference_params=None, residual=None) -> Tuple[torch.Tensor, torch.Tensor, InferenceParams]: + def forward( + self, input_ids: torch.Tensor, inference_params=None, residual=None + ) -> Tuple[torch.Tensor, torch.Tensor, InferenceParams]: hidden_states = self.embed_tokens(input_ids) for block in self.blocks: - hidden_states, residual, conv_state, ssm_state = block(hidden_states, residual, inference_params) - inference_params.key_value_memory_dict[block.mamba_block.layer_idx] = (conv_state, ssm_state) + hidden_states, residual, conv_state, ssm_state = block( + hidden_states, residual, inference_params + ) + inference_params.key_value_memory_dict[block.mamba_block.layer_idx] = ( + conv_state, + ssm_state, + ) - hidden_states = hidden_states + residual if residual is not None else hidden_states + hidden_states = ( + hidden_states + residual if residual is not None else hidden_states + ) hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1))) hidden_states = hidden_states.view(residual.shape) logits = self.lm_head(hidden_states) # update the offset for the next inference using these params inference_params.seqlen_offset += input_ids.size(1) - return logits, input_ids, inference_params \ No newline at end of file + return logits, input_ids, inference_params diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 53a3d582..90776654 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -842,7 +842,6 @@ class FlashCausalLM(Model): else: next_token_logits = out - speculate = get_speculate() ( next_input_ids, @@ -1064,14 +1063,17 @@ class FlashCausalLM(Model): if top_n_tokens > 0: all_top_tokens = [] - for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + for (top_token_ids, top_token_logprobs) in zip( + top_token_ids, top_token_logprobs + ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids + token_id in self.all_special_ids + for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, diff --git a/server/text_generation_server/models/mamba.py b/server/text_generation_server/models/mamba.py index c10910aa..c51e1e20 100644 --- a/server/text_generation_server/models/mamba.py +++ b/server/text_generation_server/models/mamba.py @@ -26,6 +26,7 @@ from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling from mamba_ssm.utils.generation import InferenceParams + @dataclass class MambaBatch(Batch): batch_id: int @@ -69,7 +70,7 @@ class MambaBatch(Batch): size=len(self), max_tokens=self.max_tokens, ) - + @classmethod def from_pb( cls, @@ -196,7 +197,7 @@ class MambaBatch(Batch): new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) - + # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] @@ -218,10 +219,13 @@ class MambaBatch(Batch): self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens - # TODO + # TODO # Kept it simple by just updating the state, maybe updating the other CPU values is necessary. key_value_memory_dict = {} - for i, (conv_state, ssm_state) in self.inference_params.key_value_memory_dict.items(): + for i, ( + conv_state, + ssm_state, + ) in self.inference_params.key_value_memory_dict.items(): key_value_memory_dict[i] = (conv_state[indices], ssm_state[indices]) self.inference_params.key_value_memory_dict = key_value_memory_dict @@ -305,8 +309,9 @@ class MambaBatch(Batch): start_index = end_index - - (_, d_model, d_conv) = batches[0].inference_params.key_value_memory_dict[0][0].shape + (_, d_model, d_conv) = ( + batches[0].inference_params.key_value_memory_dict[0][0].shape + ) (_, _, d_state) = batches[0].inference_params.key_value_memory_dict[0][1].shape n_blocks = len(batches[0].inference_params.key_value_memory_dict) dtype = batches[0].inference_params.key_value_memory_dict[0][0].dtype @@ -344,9 +349,15 @@ class MambaBatch(Batch): for i in range(n_blocks): conv_state, ssm_state = batch.inference_params.key_value_memory_dict[i] batch_size = batch.inference_params.max_batch_size - inference_params.key_value_memory_dict[i][0][current_batch:current_batch + batch_size] = conv_state - inference_params.key_value_memory_dict[i][1][current_batch:current_batch + batch_size] = ssm_state - inference_params.lengths_per_sample[current_batch: current_batch + batch_size] = batch.inference_params.lengths_per_sample + inference_params.key_value_memory_dict[i][0][ + current_batch : current_batch + batch_size + ] = conv_state + inference_params.key_value_memory_dict[i][1][ + current_batch : current_batch + batch_size + ] = ssm_state + inference_params.lengths_per_sample[ + current_batch : current_batch + batch_size + ] = batch.inference_params.lengths_per_sample current_batch += batch_size return cls( @@ -366,12 +377,13 @@ class MambaBatch(Batch): padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, - inference_params=inference_params + inference_params=inference_params, ) def __len__(self): return len(self.requests) + class Mamba(Model): def __init__( self, @@ -428,7 +440,7 @@ class Mamba(Model): def warmup(self, batch) -> Optional[int]: # TODO: implement warmup for Mamba if needed return None - + def forward( self, input_ids: torch.Tensor, @@ -441,7 +453,9 @@ class Mamba(Model): def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: start = time.time_ns() - input_ids = batch.input_ids # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids + input_ids = ( + batch.input_ids + ) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids batch_size = input_ids.shape[0] max_seqlen = input_ids.shape[1] @@ -450,8 +464,11 @@ class Mamba(Model): # Inference params seqlen_og = 0 inf_cache = {} - lengths_per_sample = torch.ones(batch_size, dtype=torch.int32, device=input_ids.device) * max_seqlen - + lengths_per_sample = ( + torch.ones(batch_size, dtype=torch.int32, device=input_ids.device) + * max_seqlen + ) + if batch.inference_params is None: inference_params = InferenceParams( max_seqlen=max_seqlen, @@ -478,11 +495,16 @@ class Mamba(Model): device=block.dt_proj.weight.device, dtype=block.dt_proj.weight.dtype, ) - inference_params.key_value_memory_dict[block.layer_idx] = (conv_state, ssm_state) + inference_params.key_value_memory_dict[block.layer_idx] = ( + conv_state, + ssm_state, + ) batch.inference_params = inference_params - + # Forward pass - logits, past_input_ids, new_inference_params = self.model(input_ids, batch.inference_params) + logits, past_input_ids, new_inference_params = self.model( + input_ids, batch.inference_params + ) batch.inference_params = new_inference_params # Results @@ -564,7 +586,8 @@ class Mamba(Model): prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, - read_offset=len(all_input_ids) - stopping_criteria.current_tokens, + read_offset=len(all_input_ids) + - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index 8b93aecd..25042a32 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -750,14 +750,17 @@ class Seq2SeqLM(Model): if top_n_tokens > 0: all_top_tokens = [] - for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs): + for (top_token_ids, top_token_logprobs) in zip( + top_token_ids, top_token_logprobs + ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ - token_id in self.all_special_ids for token_id in top_token_ids + token_id in self.all_special_ids + for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, diff --git a/server/text_generation_server/models/types.py b/server/text_generation_server/models/types.py index bc68812e..da71b0ec 100644 --- a/server/text_generation_server/models/types.py +++ b/server/text_generation_server/models/types.py @@ -95,5 +95,7 @@ class Generation: generated_text=self.generated_text.to_pb() if self.generated_text is not None else None, - top_tokens=[top_tokens.to_pb() for top_tokens in self.top_tokens] if self.top_tokens is not None else None, + top_tokens=[top_tokens.to_pb() for top_tokens in self.top_tokens] + if self.top_tokens is not None + else None, ) diff --git a/server/text_generation_server/utils/logits_process.py b/server/text_generation_server/utils/logits_process.py index c515e4d3..9013d200 100644 --- a/server/text_generation_server/utils/logits_process.py +++ b/server/text_generation_server/utils/logits_process.py @@ -107,6 +107,62 @@ class HeterogeneousRepetitionPenaltyLogitsProcessor(LogitsProcessor): return None +class FrequencyPenaltyLogitsProcessor(LogitsProcessor): + r""" + Frequency penalty as defined by OpenAI + + Args: + penalty (`float`): + The parameter for frequency penalty. 0.0 means no penalty. + """ + + def __init__(self, penalty: float): + self.penalty = penalty + + def __call__( + self, input_ids: torch.LongTensor, scores: torch.FloatTensor + ) -> torch.FloatTensor: + score = torch.gather(scores, 1, input_ids) + # if score < 0 then penalty has to be multiplied to reduce the previous token probability + score = -torch.where( + score < 0, score * self.penalty, score / self.penalty + ) + + return scores.scatter_add_(1, input_ids, score) + + +class HeterogeneousFrequencyPenaltyLogitsProcessor(LogitsProcessor): + r""" + Frequency penalty as defined by OpenAI + + Args: + frequency_penalty (`List[float]`): + The parameter for frequency penalty. 0.0 means no penalty. + """ + + def __init__(self, penalty: List[float], dtype: torch.dtype, device: torch.device): + self.penalty = penalty + self.penalty_tensor = torch.tensor( + penalty, dtype=dtype, device=device + ).unsqueeze(1) + + def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: + score = torch.gather(scores, 1, input_ids) + # if score < 0 then penalty has to be multiplied to reduce the previous token probability + score = -torch.where( + score < 0, score * self.penalty_tensor, score / self.penalty_tensor + ) + + return scores.scatter_add_(1, input_ids, score) + + def filter(self, indices): + self.penalty = [self.penalty[i] for i in indices] + if any([x != 0.0 for x in self.penalty]): + self.penalty_tensor = self.penalty_tensor[indices] + return self + return None + + class HeterogeneousTemperatureLogitsWarper: r""" [`LogitsWarper`] for temperature (exponential scaling output probability distribution). diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index 9e40fa43..9d918ac5 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -1,14 +1,16 @@ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. import re -from typing import Callable, List, Optional, Tuple +from typing import List, Optional, Tuple import torch from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason from text_generation_server.utils.logits_process import ( + FrequencyPenaltyLogitsProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, + HeterogeneousFrequencyPenaltyLogitsProcessor, HeterogeneousTemperatureLogitsWarper, HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, @@ -26,6 +28,7 @@ class NextTokenChooser: watermark=False, temperature=1.0, repetition_penalty=1.0, + frequency_penalty=0.0, top_k=None, top_p=None, typical_p=None, @@ -35,7 +38,14 @@ class NextTokenChooser: ): self.watermark_processor = WatermarkLogitsProcessor(device=device) if watermark else None self.repetition_processor = ( - RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty else None + RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) + if repetition_penalty and repetition_penalty != 1.0 + else None + ) + self.frequency_processor = ( + FrequencyPenaltyLogitsProcessor(penalty=frequency_penalty) + if frequency_penalty and frequency_penalty != 0.0 + else None ) has_warpers = ( @@ -57,6 +67,8 @@ class NextTokenChooser: scores = self.watermark_processor(input_ids, scores) if self.repetition_processor is not None: scores = self.repetition_processor(input_ids, scores) + if self.frequency_processor is not None: + scores = self.frequency_processor(input_ids, scores) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) @@ -77,6 +89,7 @@ class NextTokenChooser: watermark=pb.watermark, temperature=pb.temperature, repetition_penalty=pb.repetition_penalty, + frequency_penalty=pb.frequency_penalty, top_k=pb.top_k, top_p=pb.top_p, typical_p=pb.typical_p, @@ -171,7 +184,6 @@ def create_n_gram_speculation( return speculative_ids - class HeterogeneousNextTokenChooser: def __init__( self, @@ -180,6 +192,7 @@ class HeterogeneousNextTokenChooser: watermark: List[bool], temperature: List[float], repetition_penalty: List[float], + frequency_penalty: List[float], top_k: List[int], top_p: List[float], typical_p: List[float], @@ -203,14 +216,28 @@ class HeterogeneousNextTokenChooser: ) self.repetition_processor = ( - HeterogeneousRepetitionPenaltyLogitsProcessor(repetition_penalty, dtype, device) + HeterogeneousRepetitionPenaltyLogitsProcessor( + repetition_penalty, dtype, device + ) if any([x != 1.0 for x in repetition_penalty]) else None ) + self.frequency_processor = ( + HeterogeneousFrequencyPenaltyLogitsProcessor( + frequency_penalty, dtype, device + ) + if any([x != 0.0 for x in frequency_penalty]) + else None + ) + if any([x != 1.0 for x in temperature]): - do_sample = [sample or x != 1.0 for x, sample in zip(temperature, do_sample)] - warpers.append(HeterogeneousTemperatureLogitsWarper(temperature, dtype, device)) + do_sample = [ + sample or x != 1.0 for x, sample in zip(temperature, do_sample) + ] + warpers.append( + HeterogeneousTemperatureLogitsWarper(temperature, dtype, device) + ) if any([x != 0 for x in top_k]): do_sample = [sample or x != 0 for x, sample in zip(top_k, do_sample)] @@ -261,6 +288,8 @@ class HeterogeneousNextTokenChooser: _scores = self.watermark_processor(input_ids, _scores) if self.repetition_processor is not None: _scores = self.repetition_processor(input_ids, _scores) + if self.frequency_processor is not None: + _scores = self.frequency_processor(input_ids, _scores) for warper in self.warpers: _scores = warper(input_ids, _scores) @@ -329,6 +358,9 @@ class HeterogeneousNextTokenChooser: if self.repetition_processor is not None: self.repetition_processor = self.repetition_processor.filter(indices) + if self.frequency_processor is not None: + self.frequency_processor = self.frequency_processor.filter(indices) + filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) @@ -358,6 +390,7 @@ class HeterogeneousNextTokenChooser: watermark=[pb_.watermark for pb_ in pb], temperature=[pb_.temperature for pb_ in pb], repetition_penalty=[pb_.repetition_penalty for pb_ in pb], + frequency_penalty=[pb_.frequency_penalty for pb_ in pb], top_k=[pb_.top_k for pb_ in pb], top_p=[pb_.top_p for pb_ in pb], typical_p=[pb_.typical_p for pb_ in pb], @@ -431,7 +464,10 @@ class HeterogeneousSampling: def batch_top_tokens( - top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor + top_n_tokens: List[int], + top_n_tokens_tensor: torch.Tensor, + logprobs: torch.Tensor, + accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. @@ -443,12 +479,15 @@ def batch_top_tokens( if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) - batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size - top_n_tokens = [min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size)] + top_n_tokens = [ + min(tok, logprobs.size(-1)) + for tok in top_n_tokens + for _ in range(speculate_size) + ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset @@ -476,10 +515,10 @@ def batch_top_tokens( for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) - _top_indices = top_indices[start: stop] - _top_values = top_values[start: stop] - _top_n_ishes = top_n_ishes[start: stop] - _top_n_tokens = top_n_tokens[start: stop] + _top_indices = top_indices[start:stop] + _top_values = top_values[start:stop] + _top_n_ishes = top_n_ishes[start:stop] + _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] @@ -489,7 +528,9 @@ def batch_top_tokens( row_top_token_ids = [] row_top_token_logprobs = [] - for idxs, vals, n, req_n in zip(_top_indices, _top_values, _top_n_ishes, _top_n_tokens): + for idxs, vals, n, req_n in zip( + _top_indices, _top_values, _top_n_ishes, _top_n_tokens + ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] @@ -527,8 +568,8 @@ def make_tokenizer_optional(tokenizer): for inner_text in text] if padding == "longest": max_length = max(len(tokens) for tokens in all_tokens) - return {"input_ids": torch.tensor([[tokenizer.pad_token_id] * (max_length - len(tokens)) + tokens for tokens in all_tokens], dtype=torch.int32), - "attention_mask": torch.tensor([[0] * (max_length - len(tokens)) + [1] * len(tokens) for tokens in all_tokens], dtype=torch.int32)} + return {"input_ids": torch.tensor([[tokenizer.pad_token_id] * (max_length - len(tokens)) + tokens for tokens in all_tokens]), + "attention_mask": torch.tensor([[0] * (max_length - len(tokens)) + [1] * len(tokens) for tokens in all_tokens])} def decode( self, From 8415d4605d2cd41a225e617b6e7e71bf09e60775 Mon Sep 17 00:00:00 2001 From: drbh Date: Fri, 9 Feb 2024 04:32:04 -0500 Subject: [PATCH 073/153] chore: bump ci rust version (#1543) This PR bumps the rust toolchain in CI to resolve the CI build issue ```bash Downloaded crossbeam-utils v0.8.19 Downloaded crc32fast v1.3.2 error: failed to compile `text-generation-router v1.4.0 (/home/runner/work/text-generation-inference/text-generation-inference/router)`, intermediate artifacts can be found at `/home/runner/work/text-generation-inference/text-generation-inference/target` Caused by: package `clap_lex v0.7.0` cannot be built because it requires rustc 1.74 or newer, while the currently active rustc version is 1.71.0 Either upgrade to rustc 1.74 or newer, or use cargo update -p clap_lex@0.7.0 --precise ver where `ver` is the latest version of `clap_lex` supporting rustc 1.71.0 make: *** [Makefile:12: install-router] Error 101 ``` --- .github/workflows/tests.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 311ee6b9..ecc8eb4d 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -33,7 +33,10 @@ jobs: - name: Install Rust uses: actions-rs/toolchain@v1 with: - toolchain: 1.71.0 + # Released on: 28 December, 2023 + # Branched from master on: 10 November, 2023 + # https://releases.rs/docs/1.75.0/ + toolchain: 1.75.0 override: true components: rustfmt, clippy - name: Install Protoc From 777e5192771201f3db1abf69f8d52c216a7346ab Mon Sep 17 00:00:00 2001 From: Ilyas Moutawwakil <57442720+IlyasMoutawwakil@users.noreply.github.com> Date: Fri, 9 Feb 2024 10:45:16 +0100 Subject: [PATCH 074/153] ROCm AWQ support (#1514) # What does this PR do? This PR adds the possibility to run AWQ models with Exllama/GPTQ kernels, specifically for ROCm devices that support Exllama kernels but not AWQ's GEMM. This is done by : - un-packing, reordering and re-packing AWQ weights when `--quantize gptq` but the model's `quant_method=awq`. - avoiding overflows when adding 1 to zeros in exllama and triton. Ref: https://github.com/casper-hansen/AutoAWQ/pull/313 ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --------- Co-authored-by: Nicolas Patry --- .../test_flash_llama_gptq.json | 69 ++-- .../test_flash_llama_gptq_all_params.json | 25 +- .../test_flash_llama_gptq_load.json | 276 ++++++++------- .../test_flash_starcoder_gptq.json | 247 ++++++------- ...t_flash_starcoder_gptq_default_params.json | 63 ++-- .../test_flash_starcoder_gptq_load.json | 332 +++++++++--------- .../exllama_kernels/cuda_func/q4_matmul.cu | 8 +- .../exllama_kernels/cuda_func/q4_matrix.cu | 2 +- .../cuda/q_gemm_kernel_gptq.cuh | 16 +- .../exllamav2_kernels/cuda/q_matrix.cu | 16 +- .../flash_santacoder_modeling.py | 14 +- .../utils/awq/conversion_utils.py | 97 +++++ .../utils/gptq/quant_linear.py | 2 +- server/text_generation_server/utils/layers.py | 7 + .../text_generation_server/utils/weights.py | 114 ++++-- 15 files changed, 737 insertions(+), 551 deletions(-) create mode 100644 server/text_generation_server/utils/awq/conversion_utils.py diff --git a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json index e4ffb83b..7797cc6c 100644 --- a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json +++ b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json @@ -11,78 +11,79 @@ }, { "id": 4321, - "logprob": -9.59375, + "logprob": -9.7890625, "text": "Test" }, { "id": 2009, - "logprob": -9.6640625, + "logprob": -9.625, "text": "request" } ], "seed": null, "tokens": [ - { - "id": 29918, - "logprob": -2.3867188, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -2.8183594, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -1.6367188, + "logprob": -2.3359375, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -1.0527344, + "logprob": -1.8779297, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.6542969, + "logprob": -1.2744141, "special": false, "text": " request" }, - { - "id": 29918, - "logprob": -0.056121826, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -0.01600647, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -0.87939453, + "logprob": -1.6933594, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -0.7529297, + "logprob": -1.4648438, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.2980957, + "logprob": -0.15600586, "special": false, "text": " request" + }, + { + "id": 13, + "logprob": -0.8027344, + "special": false, + "text": "\n" + }, + { + "id": 3057, + "logprob": -0.23022461, + "special": false, + "text": "Test" + }, + { + "id": 2009, + "logprob": -0.0069885254, + "special": false, + "text": " request" + }, + { + "id": 13, + "logprob": -0.02218628, + "special": false, + "text": "\n" } - ] + ], + "top_tokens": null }, - "generated_text": "_uri\nTest request_uri\nTest request" + "generated_text": "\nTest request\nTest request\nTest request\n" } diff --git a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json index 02713a00..fa2fd4a2 100644 --- a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json +++ b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json @@ -11,12 +11,12 @@ }, { "id": 4321, - "logprob": -9.6015625, + "logprob": -9.84375, "text": "Test" }, { "id": 2009, - "logprob": -9.6640625, + "logprob": -9.6015625, "text": "request" } ], @@ -24,13 +24,13 @@ "tokens": [ { "id": 29899, - "logprob": -1.1640625, + "logprob": -1.5625, "special": false, "text": "-" }, { "id": 1454, - "logprob": -0.07543945, + "logprob": -0.20410156, "special": false, "text": "for" }, @@ -54,19 +54,19 @@ }, { "id": 396, - "logprob": -0.2956543, + "logprob": -0.27685547, "special": false, "text": " #" }, { "id": 29906, - "logprob": -0.52734375, + "logprob": -0.4970703, "special": false, "text": "2" }, { "id": 29900, - "logprob": -0.6899414, + "logprob": -0.80615234, "special": false, "text": "0" }, @@ -77,12 +77,13 @@ "text": "1" }, { - "id": 29946, - "logprob": -1.5068359, + "id": 29955, + "logprob": -1.0751953, "special": false, - "text": "4" + "text": "7" } - ] + ], + "top_tokens": null }, - "generated_text": "Test request-for-comment: #2014" + "generated_text": "Test request-for-comment: #2017" } diff --git a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json index 88bfa4f9..594b7351 100644 --- a/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json +++ b/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json @@ -12,80 +12,81 @@ }, { "id": 4321, - "logprob": -9.6015625, + "logprob": -9.828125, "text": "Test" }, { "id": 2009, - "logprob": -9.671875, + "logprob": -9.609375, "text": "request" } ], "seed": null, "tokens": [ - { - "id": 29918, - "logprob": -2.3828125, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -2.8105469, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -1.6396484, + "logprob": -2.3300781, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -1.0546875, + "logprob": -1.8740234, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.6513672, + "logprob": -1.2646484, "special": false, "text": " request" }, - { - "id": 29918, - "logprob": -0.056365967, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -0.016082764, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -0.87841797, + "logprob": -1.7158203, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -0.7548828, + "logprob": -1.4667969, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.29711914, + "logprob": -0.15344238, "special": false, "text": " request" + }, + { + "id": 13, + "logprob": -0.81591797, + "special": false, + "text": "\n" + }, + { + "id": 3057, + "logprob": -0.22973633, + "special": false, + "text": "Test" + }, + { + "id": 2009, + "logprob": -0.007045746, + "special": false, + "text": " request" + }, + { + "id": 13, + "logprob": -0.021957397, + "special": false, + "text": "\n" } - ] + ], + "top_tokens": null }, - "generated_text": "_uri\nTest request_uri\nTest request" + "generated_text": "\nTest request\nTest request\nTest request\n" }, { "details": { @@ -100,80 +101,81 @@ }, { "id": 4321, - "logprob": -9.6015625, + "logprob": -9.84375, "text": "Test" }, { "id": 2009, - "logprob": -9.6640625, + "logprob": -9.59375, "text": "request" } ], "seed": null, "tokens": [ - { - "id": 29918, - "logprob": -2.3828125, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -2.828125, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -1.6386719, + "logprob": -2.3378906, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -1.0527344, + "logprob": -1.8779297, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.6542969, + "logprob": -1.2636719, "special": false, "text": " request" }, - { - "id": 29918, - "logprob": -0.055877686, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -0.016021729, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -0.8769531, + "logprob": -1.6992188, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -0.7583008, + "logprob": -1.4589844, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.29833984, + "logprob": -0.15344238, "special": false, "text": " request" + }, + { + "id": 13, + "logprob": -0.79052734, + "special": false, + "text": "\n" + }, + { + "id": 3057, + "logprob": -0.22937012, + "special": false, + "text": "Test" + }, + { + "id": 2009, + "logprob": -0.007041931, + "special": false, + "text": " request" + }, + { + "id": 13, + "logprob": -0.022140503, + "special": false, + "text": "\n" } - ] + ], + "top_tokens": null }, - "generated_text": "_uri\nTest request_uri\nTest request" + "generated_text": "\nTest request\nTest request\nTest request\n" }, { "details": { @@ -188,80 +190,81 @@ }, { "id": 4321, - "logprob": -9.6015625, + "logprob": -9.84375, "text": "Test" }, { "id": 2009, - "logprob": -9.671875, + "logprob": -9.609375, "text": "request" } ], "seed": null, "tokens": [ - { - "id": 29918, - "logprob": -2.3847656, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -2.8144531, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -1.6396484, + "logprob": -2.3261719, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -1.0527344, + "logprob": -1.8730469, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.65478516, + "logprob": -1.2587891, "special": false, "text": " request" }, - { - "id": 29918, - "logprob": -0.056243896, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -0.016143799, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -0.8808594, + "logprob": -1.6894531, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -0.75341797, + "logprob": -1.46875, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.2956543, + "logprob": -0.1541748, "special": false, "text": " request" + }, + { + "id": 13, + "logprob": -0.80322266, + "special": false, + "text": "\n" + }, + { + "id": 3057, + "logprob": -0.22912598, + "special": false, + "text": "Test" + }, + { + "id": 2009, + "logprob": -0.0070495605, + "special": false, + "text": " request" + }, + { + "id": 13, + "logprob": -0.021606445, + "special": false, + "text": "\n" } - ] + ], + "top_tokens": null }, - "generated_text": "_uri\nTest request_uri\nTest request" + "generated_text": "\nTest request\nTest request\nTest request\n" }, { "details": { @@ -276,79 +279,80 @@ }, { "id": 4321, - "logprob": -9.6015625, + "logprob": -9.84375, "text": "Test" }, { "id": 2009, - "logprob": -9.6640625, + "logprob": -9.6015625, "text": "request" } ], "seed": null, "tokens": [ - { - "id": 29918, - "logprob": -2.3769531, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -2.8183594, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -1.6396484, + "logprob": -2.3320312, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -1.0546875, + "logprob": -1.875, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.65478516, + "logprob": -1.2646484, "special": false, "text": " request" }, - { - "id": 29918, - "logprob": -0.05557251, - "special": false, - "text": "_" - }, - { - "id": 5338, - "logprob": -0.01612854, - "special": false, - "text": "uri" - }, { "id": 13, - "logprob": -0.8730469, + "logprob": -1.6884766, "special": false, "text": "\n" }, { "id": 3057, - "logprob": -0.7519531, + "logprob": -1.4589844, "special": false, "text": "Test" }, { "id": 2009, - "logprob": -0.29785156, + "logprob": -0.15185547, "special": false, "text": " request" + }, + { + "id": 13, + "logprob": -0.79833984, + "special": false, + "text": "\n" + }, + { + "id": 3057, + "logprob": -0.22827148, + "special": false, + "text": "Test" + }, + { + "id": 2009, + "logprob": -0.006996155, + "special": false, + "text": " request" + }, + { + "id": 13, + "logprob": -0.021560669, + "special": false, + "text": "\n" } - ] + ], + "top_tokens": null }, - "generated_text": "_uri\nTest request_uri\nTest request" + "generated_text": "\nTest request\nTest request\nTest request\n" } ] diff --git a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json index 53055e42..5e537bb7 100644 --- a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json +++ b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json @@ -1,193 +1,194 @@ { - "generated_text": "\n return sum(L) / len(L)\n\n\ndef geometric_mean(L", "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, - "seed": null, "prefill": [ { "id": 589, - "text": "def", - "logprob": null + "logprob": null, + "text": "def" }, { "id": 3226, - "text": " ge", - "logprob": -9.0234375 + "logprob": -8.5859375, + "text": " ge" }, { "id": 21017, - "text": "ometric", - "logprob": -9.0859375 + "logprob": -7.5859375, + "text": "ometric" }, { "id": 81, - "text": "_", - "logprob": -0.25878906 + "logprob": -0.2668457, + "text": "_" }, { "id": 6009, - "text": "mean", - "logprob": -2.2109375 + "logprob": -1.6416016, + "text": "mean" }, { "id": 26, - "text": "(", - "logprob": -0.30371094 + "logprob": -0.22705078, + "text": "(" }, { "id": 62, - "text": "L", - "logprob": -5.6054688 + "logprob": -5.2304688, + "text": "L" }, { "id": 44, - "text": ":", - "logprob": -3.0722656 + "logprob": -3.0976562, + "text": ":" }, { "id": 1682, - "text": " List", - "logprob": -0.6879883 + "logprob": -1.1044922, + "text": " List" }, { "id": 77, - "text": "[", - "logprob": -0.38500977 + "logprob": -0.14294434, + "text": "[" }, { "id": 1808, - "text": "float", - "logprob": -0.984375 + "logprob": -0.32299805, + "text": "float" }, { "id": 10794, - "text": "]):", - "logprob": -2.5351562 + "logprob": -2.8164062, + "text": "]):" } ], + "seed": null, "tokens": [ { "id": 284, - "text": "\n ", - "logprob": -1.1738281, - "special": false + "logprob": -0.1282959, + "special": false, + "text": "\n " }, { - "id": 442, - "text": " return", - "logprob": -0.95947266, - "special": false + "id": 1524, + "logprob": -0.97998047, + "special": false, + "text": " \"\"\"" }, { - "id": 3632, - "text": " sum", - "logprob": -1.4199219, - "special": false + "id": 284, + "logprob": -0.7006836, + "special": false, + "text": "\n " }, { - "id": 26, - "text": "(", - "logprob": -0.085876465, - "special": false + "id": 14883, + "logprob": -2.1933594, + "special": false, + "text": " Calculate" }, { - "id": 62, - "text": "L", - "logprob": -0.09875488, - "special": false - }, - { - "id": 27, - "text": ")", - "logprob": -0.30517578, - "special": false - }, - { - "id": 517, - "text": " /", - "logprob": -0.42089844, - "special": false - }, - { - "id": 2069, - "text": " len", - "logprob": -0.042053223, - "special": false - }, - { - "id": 26, - "text": "(", - "logprob": -0.0011806488, - "special": false - }, - { - "id": 62, - "text": "L", - "logprob": -0.0005259514, - "special": false - }, - { - "id": 27, - "text": ")", - "logprob": -0.0017633438, - "special": false - }, - { - "id": 478, - "text": "\n\n", - "logprob": -0.69189453, - "special": false - }, - { - "id": 203, - "text": "\n", - "logprob": -0.041870117, - "special": false - }, - { - "id": 589, - "text": "def", - "logprob": -0.27856445, - "special": false + "id": 322, + "logprob": -0.2697754, + "special": false, + "text": " the" }, { "id": 3226, - "text": " ge", - "logprob": -1.7255859, - "special": false + "logprob": -0.0836792, + "special": false, + "text": " ge" }, { "id": 21017, - "text": "ometric", - "logprob": -0.011291504, - "special": false + "logprob": -0.018737793, + "special": false, + "text": "ometric" }, { - "id": 81, - "text": "_", - "logprob": -0.008430481, - "special": false + "id": 5651, + "logprob": -0.028640747, + "special": false, + "text": " mean" }, { - "id": 6009, - "text": "mean", - "logprob": -0.025787354, - "special": false + "id": 432, + "logprob": -0.29467773, + "special": false, + "text": " of" }, { - "id": 26, - "text": "(", - "logprob": -0.073913574, - "special": false + "id": 312, + "logprob": -0.31518555, + "special": false, + "text": " a" }, { - "id": 62, - "text": "L", - "logprob": -0.09967041, - "special": false + "id": 1149, + "logprob": -0.20605469, + "special": false, + "text": " list" + }, + { + "id": 432, + "logprob": -0.23254395, + "special": false, + "text": " of" + }, + { + "id": 7515, + "logprob": -0.4489746, + "special": false, + "text": " numbers" + }, + { + "id": 32, + "logprob": -0.6044922, + "special": false, + "text": "." + }, + { + "id": 446, + "logprob": -0.63964844, + "special": false, + "text": "\n\n " + }, + { + "id": 499, + "logprob": -1.1953125, + "special": false, + "text": " :" + }, + { + "id": 753, + "logprob": -0.03515625, + "special": false, + "text": "param" + }, + { + "id": 498, + "logprob": -0.06311035, + "special": false, + "text": " L" + }, + { + "id": 44, + "logprob": -0.003414154, + "special": false, + "text": ":" + }, + { + "id": 1682, + "logprob": -1.3310547, + "special": false, + "text": " List" } - ] - } + ], + "top_tokens": null + }, + "generated_text": "\n \"\"\"\n Calculate the geometric mean of a list of numbers.\n\n :param L: List" } diff --git a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json index 1ace3814..bf0f5146 100644 --- a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json +++ b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json @@ -11,57 +11,57 @@ }, { "id": 3226, - "logprob": -9.0234375, + "logprob": -8.5859375, "text": " ge" }, { "id": 21017, - "logprob": -9.0859375, + "logprob": -7.5898438, "text": "ometric" }, { "id": 81, - "logprob": -0.25830078, + "logprob": -0.26586914, "text": "_" }, { "id": 6009, - "logprob": -2.1875, + "logprob": -1.6347656, "text": "mean" }, { "id": 26, - "logprob": -0.30004883, + "logprob": -0.22705078, "text": "(" }, { "id": 62, - "logprob": -5.6171875, + "logprob": -5.2382812, "text": "L" }, { "id": 44, - "logprob": -3.078125, + "logprob": -3.0996094, "text": ":" }, { "id": 1682, - "logprob": -0.68066406, + "logprob": -1.1025391, "text": " List" }, { "id": 77, - "logprob": -0.38745117, + "logprob": -0.14294434, "text": "[" }, { "id": 1808, - "logprob": -0.9453125, + "logprob": -0.32226562, "text": "float" }, { "id": 10794, - "logprob": -2.5371094, + "logprob": -2.8164062, "text": "]):" } ], @@ -69,19 +69,19 @@ "tokens": [ { "id": 284, - "logprob": -0.051635742, + "logprob": 0.0, "special": false, "text": "\n " }, { "id": 442, - "logprob": 0.0, + "logprob": -1.3134766, "special": false, "text": " return" }, { "id": 11665, - "logprob": -1.2236328, + "logprob": -0.10021973, "special": false, "text": " reduce" }, @@ -129,7 +129,7 @@ }, { "id": 319, - "logprob": 0.0, + "logprob": -0.42871094, "special": false, "text": " *" }, @@ -158,36 +158,37 @@ "text": ")" }, { - "id": 203, - "logprob": -0.12695312, - "special": false, - "text": "\n" - }, - { - "id": 203, + "id": 1115, "logprob": 0.0, "special": false, - "text": "\n" + "text": " **" }, { - "id": 589, + "id": 308, "logprob": 0.0, "special": false, - "text": "def" + "text": " (" }, { - "id": 3226, + "id": 35, "logprob": 0.0, "special": false, - "text": " ge" + "text": "1" }, { - "id": 21017, + "id": 32, + "logprob": -0.31323242, + "special": false, + "text": "." + }, + { + "id": 34, "logprob": 0.0, "special": false, - "text": "ometric" + "text": "0" } - ] + ], + "top_tokens": null }, - "generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric" + "generated_text": "\n return reduce(lambda x, y: x * y, L) ** (1.0" } diff --git a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json index 5381ce5a..46a21ed8 100644 --- a/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json +++ b/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json @@ -12,57 +12,57 @@ }, { "id": 3226, - "logprob": -9.0234375, + "logprob": -8.5859375, "text": " ge" }, { "id": 21017, - "logprob": -9.0859375, + "logprob": -7.5820312, "text": "ometric" }, { "id": 81, - "logprob": -0.25927734, + "logprob": -0.26708984, "text": "_" }, { "id": 6009, - "logprob": -2.25, + "logprob": -1.6386719, "text": "mean" }, { "id": 26, - "logprob": -0.30126953, + "logprob": -0.22717285, "text": "(" }, { "id": 62, - "logprob": -5.7539062, + "logprob": -5.234375, "text": "L" }, { "id": 44, - "logprob": -3.0878906, + "logprob": -3.1015625, "text": ":" }, { "id": 1682, - "logprob": -0.6845703, + "logprob": -1.1083984, "text": " List" }, { "id": 77, - "logprob": -0.3918457, + "logprob": -0.14294434, "text": "[" }, { "id": 1808, - "logprob": -0.8798828, + "logprob": -0.32592773, "text": "float" }, { "id": 10794, - "logprob": -2.4980469, + "logprob": -2.8164062, "text": "]):" } ], @@ -70,67 +70,68 @@ "tokens": [ { "id": 284, - "logprob": -1.1533203, + "logprob": -0.12817383, "special": false, "text": "\n " }, { - "id": 442, - "logprob": -0.91796875, + "id": 1524, + "logprob": -0.9863281, "special": false, - "text": " return" + "text": " \"\"\"" }, { - "id": 3632, - "logprob": -1.3291016, + "id": 284, + "logprob": -0.7011719, "special": false, - "text": " sum" + "text": "\n " }, { - "id": 26, - "logprob": -0.08062744, + "id": 14883, + "logprob": -2.2050781, "special": false, - "text": "(" + "text": " Calculate" }, { - "id": 62, - "logprob": -0.097717285, + "id": 322, + "logprob": -0.2668457, "special": false, - "text": "L" + "text": " the" }, { - "id": 27, - "logprob": -0.29003906, + "id": 3226, + "logprob": -0.08465576, "special": false, - "text": ")" + "text": " ge" }, { - "id": 517, - "logprob": -0.34958984, + "id": 21017, + "logprob": -0.019012451, "special": false, - "text": " /" + "text": "ometric" }, { - "id": 2069, - "logprob": -0.03829956, + "id": 5651, + "logprob": -0.028625488, "special": false, - "text": " len" + "text": " mean" }, { - "id": 26, - "logprob": -0.0011987686, + "id": 432, + "logprob": -0.29418945, "special": false, - "text": "(" + "text": " of" }, { - "id": 62, - "logprob": -0.00050878525, + "id": 312, + "logprob": -0.3161621, "special": false, - "text": "L" + "text": " a" } - ] + ], + "top_tokens": null }, - "generated_text": "\n return sum(L) / len(L" + "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { @@ -145,57 +146,57 @@ }, { "id": 3226, - "logprob": -9.0234375, + "logprob": -8.5859375, "text": " ge" }, { "id": 21017, - "logprob": -9.0859375, + "logprob": -7.59375, "text": "ometric" }, { "id": 81, - "logprob": -0.25878906, + "logprob": -0.26953125, "text": "_" }, { "id": 6009, - "logprob": -2.2109375, + "logprob": -1.640625, "text": "mean" }, { "id": 26, - "logprob": -0.30371094, + "logprob": -0.22705078, "text": "(" }, { "id": 62, - "logprob": -5.6054688, + "logprob": -5.234375, "text": "L" }, { "id": 44, - "logprob": -3.0722656, + "logprob": -3.1132812, "text": ":" }, { "id": 1682, - "logprob": -0.6879883, + "logprob": -1.1123047, "text": " List" }, { "id": 77, - "logprob": -0.38500977, + "logprob": -0.14294434, "text": "[" }, { "id": 1808, - "logprob": -0.984375, + "logprob": -0.32299805, "text": "float" }, { "id": 10794, - "logprob": -2.5351562, + "logprob": -2.8164062, "text": "]):" } ], @@ -203,67 +204,68 @@ "tokens": [ { "id": 284, - "logprob": -1.1738281, + "logprob": -0.12854004, "special": false, "text": "\n " }, { - "id": 442, - "logprob": -0.9584961, + "id": 1524, + "logprob": -0.9897461, "special": false, - "text": " return" + "text": " \"\"\"" }, { - "id": 3632, - "logprob": -1.4169922, + "id": 284, + "logprob": -0.69970703, "special": false, - "text": " sum" + "text": "\n " }, { - "id": 26, - "logprob": -0.085876465, + "id": 14883, + "logprob": -2.2050781, "special": false, - "text": "(" + "text": " Calculate" }, { - "id": 62, - "logprob": -0.0982666, + "id": 322, + "logprob": -0.2668457, "special": false, - "text": "L" + "text": " the" }, { - "id": 27, - "logprob": -0.3022461, + "id": 3226, + "logprob": -0.08496094, "special": false, - "text": ")" + "text": " ge" }, { - "id": 517, - "logprob": -0.40504883, + "id": 21017, + "logprob": -0.019012451, "special": false, - "text": " /" + "text": "ometric" }, { - "id": 2069, - "logprob": -0.041656494, + "id": 5651, + "logprob": -0.029037476, "special": false, - "text": " len" + "text": " mean" }, { - "id": 26, - "logprob": -0.0011844635, + "id": 432, + "logprob": -0.2939453, "special": false, - "text": "(" + "text": " of" }, { - "id": 62, - "logprob": -0.0005264282, + "id": 312, + "logprob": -0.31591797, "special": false, - "text": "L" + "text": " a" } - ] + ], + "top_tokens": null }, - "generated_text": "\n return sum(L) / len(L" + "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { @@ -278,57 +280,57 @@ }, { "id": 3226, - "logprob": -9.0234375, + "logprob": -8.5859375, "text": " ge" }, { "id": 21017, - "logprob": -9.0859375, + "logprob": -7.5859375, "text": "ometric" }, { "id": 81, - "logprob": -0.25927734, + "logprob": -0.26586914, "text": "_" }, { "id": 6009, - "logprob": -2.25, + "logprob": -1.6347656, "text": "mean" }, { "id": 26, - "logprob": -0.30126953, + "logprob": -0.22766113, "text": "(" }, { "id": 62, - "logprob": -5.7539062, + "logprob": -5.2265625, "text": "L" }, { "id": 44, - "logprob": -3.0878906, + "logprob": -3.0976562, "text": ":" }, { "id": 1682, - "logprob": -0.6845703, + "logprob": -1.1025391, "text": " List" }, { "id": 77, - "logprob": -0.3918457, + "logprob": -0.1427002, "text": "[" }, { "id": 1808, - "logprob": -0.8798828, + "logprob": -0.32592773, "text": "float" }, { "id": 10794, - "logprob": -2.4980469, + "logprob": -2.8164062, "text": "]):" } ], @@ -336,67 +338,68 @@ "tokens": [ { "id": 284, - "logprob": -1.1533203, + "logprob": -0.13012695, "special": false, "text": "\n " }, { - "id": 442, - "logprob": -0.9165039, + "id": 1524, + "logprob": -0.98046875, "special": false, - "text": " return" + "text": " \"\"\"" }, { - "id": 3632, - "logprob": -1.328125, + "id": 284, + "logprob": -0.69921875, "special": false, - "text": " sum" + "text": "\n " }, { - "id": 26, - "logprob": -0.07946777, + "id": 14883, + "logprob": -2.1992188, "special": false, - "text": "(" + "text": " Calculate" }, { - "id": 62, - "logprob": -0.09820557, + "id": 322, + "logprob": -0.2668457, "special": false, - "text": "L" + "text": " the" }, { - "id": 27, - "logprob": -0.28930664, + "id": 3226, + "logprob": -0.083496094, "special": false, - "text": ")" + "text": " ge" }, { - "id": 517, - "logprob": -0.34592773, + "id": 21017, + "logprob": -0.01902771, "special": false, - "text": " /" + "text": "ometric" }, { - "id": 2069, - "logprob": -0.038330078, + "id": 5651, + "logprob": -0.029006958, "special": false, - "text": " len" + "text": " mean" }, { - "id": 26, - "logprob": -0.0011940002, + "id": 432, + "logprob": -0.29248047, "special": false, - "text": "(" + "text": " of" }, { - "id": 62, - "logprob": -0.00050878525, + "id": 312, + "logprob": -0.3161621, "special": false, - "text": "L" + "text": " a" } - ] + ], + "top_tokens": null }, - "generated_text": "\n return sum(L) / len(L" + "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { @@ -411,57 +414,57 @@ }, { "id": 3226, - "logprob": -9.0234375, + "logprob": -8.5859375, "text": " ge" }, { "id": 21017, - "logprob": -9.0859375, + "logprob": -7.5859375, "text": "ometric" }, { "id": 81, - "logprob": -0.25927734, + "logprob": -0.26904297, "text": "_" }, { "id": 6009, - "logprob": -2.25, + "logprob": -1.6386719, "text": "mean" }, { "id": 26, - "logprob": -0.30126953, + "logprob": -0.22705078, "text": "(" }, { "id": 62, - "logprob": -5.7539062, + "logprob": -5.234375, "text": "L" }, { "id": 44, - "logprob": -3.0878906, + "logprob": -3.1132812, "text": ":" }, { "id": 1682, - "logprob": -0.6845703, + "logprob": -1.1074219, "text": " List" }, { "id": 77, - "logprob": -0.3918457, + "logprob": -0.14477539, "text": "[" }, { "id": 1808, - "logprob": -0.8798828, + "logprob": -0.3256836, "text": "float" }, { "id": 10794, - "logprob": -2.4980469, + "logprob": -2.8027344, "text": "]):" } ], @@ -469,66 +472,67 @@ "tokens": [ { "id": 284, - "logprob": -1.1533203, + "logprob": -0.12915039, "special": false, "text": "\n " }, { - "id": 442, - "logprob": -0.91259766, + "id": 1524, + "logprob": -0.98535156, "special": false, - "text": " return" + "text": " \"\"\"" }, { - "id": 3632, - "logprob": -1.3251953, + "id": 284, + "logprob": -0.69921875, "special": false, - "text": " sum" + "text": "\n " }, { - "id": 26, - "logprob": -0.08062744, + "id": 14883, + "logprob": -2.2011719, "special": false, - "text": "(" + "text": " Calculate" }, { - "id": 62, - "logprob": -0.09906006, + "id": 322, + "logprob": -0.26708984, "special": false, - "text": "L" + "text": " the" }, { - "id": 27, - "logprob": -0.28979492, + "id": 3226, + "logprob": -0.08502197, "special": false, - "text": ")" + "text": " ge" }, { - "id": 517, - "logprob": -0.35958984, + "id": 21017, + "logprob": -0.019012451, "special": false, - "text": " /" + "text": "ometric" }, { - "id": 2069, - "logprob": -0.038604736, + "id": 5651, + "logprob": -0.028625488, "special": false, - "text": " len" + "text": " mean" }, { - "id": 26, - "logprob": -0.0011901855, + "id": 432, + "logprob": -0.29589844, "special": false, - "text": "(" + "text": " of" }, { - "id": 62, - "logprob": -0.0005078316, + "id": 312, + "logprob": -0.31591797, "special": false, - "text": "L" + "text": " a" } - ] + ], + "top_tokens": null }, - "generated_text": "\n return sum(L) / len(L" + "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" } ] diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu index 61380f42..09126efe 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu @@ -85,7 +85,7 @@ __global__ void q4_matmul_kernel if constexpr (use_half2) { half2 w_scale = w_scales_.item_half2half2(group, w_column); - uint32_t w_zero = w_zeros_.item(group, w_column) + 1; + uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc = dot_product_8_x_map(acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8, x_map); else acc = dot_product_8 (acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8); @@ -93,7 +93,7 @@ __global__ void q4_matmul_kernel else { half w_scale = w_scales_.item(group, w_column); - uint32_t w_zero = w_zeros_.item(group, w_column) + 1; + uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc_h = dot_product_8_x_map_h(acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8, x_map); else acc_h = dot_product_8_h (acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8); @@ -110,7 +110,7 @@ __global__ void q4_matmul_kernel { int group = k / groupsize; half2 w_scale = w_scales_.item_half2half2(group, w_column); - uint32_t w_zero = w_zeros_.item(group, w_column) + 1; + uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc = dot_product_8_x_map(acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1, x_map); else acc = dot_product_8 (acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1); @@ -119,7 +119,7 @@ __global__ void q4_matmul_kernel { int group = k / groupsize; half w_scale = w_scales_.item(group, w_column); - uint32_t w_zero = w_zeros_.item(group, w_column) + 1; + uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc_h = dot_product_8_x_map_h(acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1, x_map); else acc_h = dot_product_8_h (acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1); diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu index f3d1564f..2867a8d0 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu @@ -189,7 +189,7 @@ __global__ void reconstruct_kernel int group = row / groupsize; half w_scale = w_scales_.item(group, column); - uint32_t w_zero = w_zeros_.item(group, column) + 1; + uint32_t w_zero = (w_zeros_.item(group, column) + 1) & 0x0F; uint32_t w_read = w_.item_uint32_t(row, column); half* out_ptr = out_.item_ptr(row, column); diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh index 74b0db2b..f816fd9d 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh @@ -152,10 +152,10 @@ __global__ void gemm_half_q_half_gptq_kernel half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); - dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); - dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); - dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); - dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); // __syncthreads(); @@ -174,10 +174,10 @@ __global__ void gemm_half_q_half_gptq_kernel nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); - dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); - dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); - dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); - dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); } #pragma unroll diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu index ae08cc1f..7a0038b4 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu @@ -237,10 +237,10 @@ __global__ void reconstruct_gptq_kernel half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); - dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); - dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); - dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); - dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); __syncthreads(); @@ -255,10 +255,10 @@ __global__ void reconstruct_gptq_kernel nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); - dequant_4bit_8_prep_zero(zeros[0] + 1, z1z16[0], y1y16[0]); - dequant_4bit_8_prep_zero(zeros[1] + 1, z1z16[1], y1y16[1]); - dequant_4bit_8_prep_zero(zeros[2] + 1, z1z16[2], y1y16[2]); - dequant_4bit_8_prep_zero(zeros[3] + 1, z1z16[3], y1y16[3]); + dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); + dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); + dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); + dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); } for (int p = 0; p < 4; p++) diff --git a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py index 22d03adf..81041046 100644 --- a/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py @@ -69,9 +69,17 @@ def _load_multi_mqa_gptq( qzeros = torch.cat([q_tensor, kv_tensor], dim=1) qzeros = qzeros.to(device=weights.device) - g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") - g_idx = g_idx.to(device=weights.device) - bits, groupsize, _ = weights._get_gptq_params() + bits, groupsize, _, quant_method, = weights._get_gptq_params() + if quant_method == "gptq": + g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") + g_idx = g_idx.to(device=weights.device) + elif quant_method == "awq": + g_idx = None + from text_generation_server.utils.awq.conversion_utils import ( + fast_awq_to_gptq, + ) + + qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) from text_generation_server.utils.layers import HAS_EXLLAMA diff --git a/server/text_generation_server/utils/awq/conversion_utils.py b/server/text_generation_server/utils/awq/conversion_utils.py new file mode 100644 index 00000000..b19eafbb --- /dev/null +++ b/server/text_generation_server/utils/awq/conversion_utils.py @@ -0,0 +1,97 @@ +import torch +from typing import List + + +AWQ_PACK_ORDER = [0, 2, 4, 6, 1, 3, 5, 7] +REVERSE_AWQ_PACK_ORDER = [0, 4, 1, 5, 2, 6, 3, 7] + + +def pack(imatrix: torch.Tensor, direction: str = "column"): + """ + Packs a 4-bit integer matrix into a packed 32-bit integer matrix. + Args: + imatrix (torch.Tensor): matrix of integers + direction (str): direction of packing, either "column" or "row" + Returns: + qmatrix (torch.Tensor): packed matrix of integers + """ + shifts = torch.arange(0, 32, 4, dtype=torch.int32, device=imatrix.device) + + imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow + + if direction == "column": + imatrix = imatrix.view(-1, imatrix.shape[1] // (32 // 4), (32 // 4)) + qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, None, :]).sum(dim=-1) + + elif direction == "row": + imatrix = imatrix.view(imatrix.shape[0] // (32 // 4), (32 // 4), -1) + qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, :, None]).sum(dim=1) + + qmatrix = qmatrix.to(torch.int32) + + return qmatrix + + +def unpack(qmatrix: torch.Tensor, direction: str = "column"): + """ + Unpacks a 32-bit packed integer matrix into a 4-bit integer matrix. + Args: + qmatrix (torch.Tensor): matrix of packed integers + direction (str): direction of unpacking, either "column" or "row" + Returns: + imatrix (torch.Tensor): matrix of integers + """ + shifts = torch.arange(0, 32, 4, device=qmatrix.device) + + if direction == "column": + imatrix = torch.bitwise_right_shift( + qmatrix[:, :, None], shifts[None, None, :] + ).view(qmatrix.shape[0], -1) + + elif direction == "row": + imatrix = torch.bitwise_right_shift( + qmatrix[:, None, :], shifts[None, :, None] + ).view(-1, qmatrix.shape[-1]) + + imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow + + return imatrix + + +def apply_order( + imatrix: torch.Tensor, + direction: str = "column", + order: List[int] = AWQ_PACK_ORDER, +): + """ + Applies the order to a 4-bit integer matrix. + Args: + imatrix (torch.Tensor): matrix of integers + direction (str): direction of applying order, either "column" or "row" + order (List[int]): order to apply, default is AWQ_PACK_ORDER + Returns: + imatrix (torch.Tensor): matrix of integers + """ + if direction == "column": + imatrix = imatrix.view(-1, (32 // 4))[:, order].view(imatrix.shape) + elif direction == "row": + imatrix = imatrix.view((32 // 4), -1)[order, :].view(imatrix.shape) + + return imatrix + + +def fast_awq_to_gptq(qweight, qzeros): + # awq uses column packing for both weights and zeros + izeros = unpack(qzeros, direction="column") + iweights = unpack(qweight, direction="column") + + # Reverse the order of the iweight and izeros tensors + izeros = apply_order(izeros, direction="column", order=REVERSE_AWQ_PACK_ORDER) + iweights = apply_order(iweights, direction="column", order=REVERSE_AWQ_PACK_ORDER) + # Subtract 1 from the izeros tensor (gptq adds 1 to the zeros) + izeros = izeros - 1 + # exllama uses row packing for weights and column packing for zeros + qzeros = pack(izeros, direction="column") + qweight = pack(iweights, direction="row") + + return qweight, qzeros diff --git a/server/text_generation_server/utils/gptq/quant_linear.py b/server/text_generation_server/utils/gptq/quant_linear.py index bfc91c00..8ad0dd80 100644 --- a/server/text_generation_server/utils/gptq/quant_linear.py +++ b/server/text_generation_server/utils/gptq/quant_linear.py @@ -182,7 +182,7 @@ try: ) # (BLOCK_SIZE_K, BLOCK_SIZE_N,) zeros = (zeros >> zeros_shifter[None, :]) & maxq - zeros = zeros + 1 + zeros = (zeros + 1) & maxq # eventually avoid overflow a = tl.load(a_ptrs, mask=a_mask, other=0.0) # (BLOCK_SIZE_M, BLOCK_SIZE_K) b = tl.load(b_ptrs) # (BLOCK_SIZE_K, BLOCK_SIZE_N), but repeated diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 010d6143..01e32588 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -349,6 +349,13 @@ def get_linear(weight, bias, quantize): raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) + if IS_ROCM_SYSTEM: + raise NotImplementedError( + "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " + "to use Exllama/GPTQ kernels for AWQ inference." + ) + if not HAS_AWQ: + raise NotImplementedError("You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly") linear = WQLinear( w_bit=bits, group_size=groupsize, diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 186733f3..8f7e1f10 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -46,7 +46,6 @@ class Weights: return self._handles[filename] def get_filename(self, tensor_name: str) -> (str, str): - names = [tensor_name] if self.prefix is not None: prefixed = f"{self.prefix}.{tensor_name}" @@ -154,15 +153,30 @@ class Weights: f"Cannot load `{quantize}` weight, make sure the model is already quantized." ) + bits, groupsize, _, quant_method = self._get_gptq_params() + qzeros = self._get_qweight(f"{prefix}.qzeros") scales = self._get_qweight(f"{prefix}.scales") scales = scales.to(dtype=self.dtype) - if quantize == "gptq": + + if quantize == "gptq" and quant_method == "gptq": g_idx = self.get_tensor(f"{prefix}.g_idx") + elif quantize == "gptq" and quant_method == "awq": + log_once( + logger.info, "Converting AWQ model to Exllama/GPTQ packing format." + ) + from text_generation_server.utils.awq.conversion_utils import ( + fast_awq_to_gptq, + ) + + qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) + g_idx = ( + torch.arange(qweight.shape[0] * (32 // bits), device=qweight.device) + // groupsize + ).to(dtype=torch.int32) else: g_idx = None - bits, groupsize, _ = self._get_gptq_params() weight = (qweight, qzeros, scales, g_idx, bits, groupsize, False) else: slice_ = self._get_slice(f"{prefix}.weight") @@ -204,20 +218,40 @@ class Weights: [self.get_sharded(f"{p}.scales", dim=1) for p in prefixes], dim=1 ) - if quantize == "gptq": - w = [self.get_tensor(f"{p}.g_idx") for p in prefixes] - for w2 in w[1:]: - torch.testing.assert_close(w2, w[0]) - g_idx = w[0] - else: - g_idx = None + bits, groupsize, desc_act, quant_method = self._get_gptq_params() - bits, groupsize, desc_act = self._get_gptq_params() from text_generation_server.utils.layers import HAS_EXLLAMA use_exllama = ( bits == 4 and HAS_EXLLAMA and quantize == "gptq" and not desc_act ) + + if quantize == "gptq" and quant_method == "gptq": + w = [self.get_tensor(f"{p}.g_idx") for p in prefixes] + for w2 in w[1:]: + torch.testing.assert_close(w2, w[0]) + g_idx = w[0] + elif quantize == "gptq" and quant_method == "awq": + log_once( + logger.info, "Converting AWQ model to Exllama/GPTQ packing format." + ) + from text_generation_server.utils.awq.conversion_utils import ( + fast_awq_to_gptq, + ) + + qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) + if use_exllama: + g_idx = None + else: + g_idx = ( + torch.arange( + qweight.shape[0] * (32 // bits), device=qweight.device + ) + // groupsize + ).to(dtype=torch.int32) + else: + g_idx = None + weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] @@ -243,7 +277,7 @@ class Weights: def get_multi_weights_row(self, prefix: str, quantize: str): if quantize == "gptq": use_exllama = True - bits, groupsize, desc_act = self._get_gptq_params() + bits, groupsize, desc_act, quant_method = self._get_gptq_params() if bits != 4: use_exllama = False @@ -252,8 +286,19 @@ class Weights: log_once(logger.warning, "Disabling exllama because desc_act=True") use_exllama = False + try: + qweight = self.get_sharded(f"{prefix}.qweight", dim=0) + except RuntimeError: + raise RuntimeError( + "Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" + ) + + if quant_method == "gptq": + g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) + elif quant_method == "awq": + g_idx = None + if self.process_group.size() > 1: - g_idx = self.get_tensor(f"{prefix}.g_idx") if g_idx is not None: if ( not torch.equal( @@ -269,13 +314,6 @@ class Weights: # it would require to reorder input activations that are split unto several GPUs use_exllama = False - try: - qweight = self.get_sharded(f"{prefix}.qweight", dim=0) - except RuntimeError: - raise RuntimeError( - "Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" - ) - from text_generation_server.utils.layers import HAS_EXLLAMA, CAN_EXLLAMA if use_exllama: @@ -289,8 +327,6 @@ class Weights: else: log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}") - g_idx = self.get_sharded(f"{prefix}.g_idx", dim=0) - if use_exllama and groupsize != -1: qzeros = self.get_sharded(f"{prefix}.qzeros", dim=0) scales = self.get_sharded(f"{prefix}.scales", dim=0) @@ -298,12 +334,31 @@ class Weights: qzeros = self.get_tensor(f"{prefix}.qzeros") scales = self.get_tensor(f"{prefix}.scales") - if use_exllama: + if use_exllama and g_idx is not None: g_idx = g_idx - g_idx[0] + if quant_method == "awq": + log_once( + logger.info, "Converting AWQ model to Exllama/GPTQ packing format." + ) + from text_generation_server.utils.awq.conversion_utils import ( + fast_awq_to_gptq, + ) + + qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) + if use_exllama: + g_idx = None + else: + g_idx = ( + torch.arange( + qweight.shape[0] * (32 // bits), device=qweight.device + ) + // groupsize + ).to(dtype=torch.int32) + weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) elif quantize == "awq": - bits, groupsize, _ = self._get_gptq_params() + bits, groupsize, _, _ = self._get_gptq_params() try: qweight = self.get_sharded(f"{prefix}.qweight", dim=0) @@ -322,20 +377,22 @@ class Weights: weight = self.get_sharded(f"{prefix}.weight", dim=1) return weight - def _get_gptq_params(self) -> Tuple[int, int, int]: + def _get_gptq_params(self) -> Tuple[int, int, int, str]: try: bits = self.get_tensor("gptq_bits").item() groupsize = self.get_tensor("gptq_groupsize").item() desc_act = False + quant_method = "gptq" except (SafetensorError, RuntimeError) as e: try: bits = self.gptq_bits groupsize = self.gptq_groupsize desc_act = getattr(self, "gptq_desc_act", False) + quant_method = getattr(self, "quant_method", "gptq") except Exception: raise e - return bits, groupsize, desc_act + return bits, groupsize, desc_act, quant_method def _set_gptq_params(self, model_id, revision): filename = "config.json" @@ -351,6 +408,7 @@ class Weights: self.gptq_bits = data["quantization_config"]["bits"] self.gptq_groupsize = data["quantization_config"]["group_size"] self.gptq_desc_act = data["quantization_config"]["desc_act"] + self.quant_method = data["quantization_config"]["quant_method"] except Exception: filename = "quantize_config.json" try: @@ -365,6 +423,8 @@ class Weights: self.gptq_bits = data["bits"] self.gptq_groupsize = data["group_size"] self.gptq_desc_act = data["desc_act"] + if "version" in data and data["version"] == "GEMM": + self.quant_method = "awq" except Exception: filename = "quant_config.json" try: @@ -379,5 +439,7 @@ class Weights: self.gptq_bits = data["w_bit"] self.gptq_groupsize = data["q_group_size"] self.gptq_desc_act = data["desc_act"] + if "version" in data and data["version"] == "GEMM": + self.quant_method = "awq" except Exception: pass From 518d30dec4a2a9ab59a9e11bcfc94340efd808ff Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 9 Feb 2024 12:38:41 +0100 Subject: [PATCH 075/153] feat(router): add max_batch_size (#1542) Some hardware require a maximum batch size. --- docs/source/basic_tutorials/launcher.md | 8 +++ launcher/src/main.rs | 11 ++++ router/client/src/client.rs | 14 +---- router/client/src/sharded_client.rs | 9 ++- router/src/infer.rs | 13 +++- router/src/lib.rs | 2 + router/src/main.rs | 25 +++++++- router/src/queue.rs | 83 +++++++++++++++++++------ router/src/server.rs | 3 + 9 files changed, 134 insertions(+), 34 deletions(-) diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index 712b4fc4..ba54f058 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -197,6 +197,14 @@ Options: [env: MAX_WAITING_TOKENS=] [default: 20] +``` +## MAX_BATCH_SIZE +```shell + --max-batch-size + Enforce a maximum number of requests per batch Specific flag for hardware targets that do not support unpadded inference + + [env: MAX_BATCH_SIZE=] + ``` ## HOSTNAME ```shell diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 53a40ea8..428b00c1 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -281,6 +281,11 @@ struct Args { #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, + /// Enforce a maximum number of requests per batch + /// Specific flag for hardware targets that do not support unpadded inference + #[clap(long, env)] + max_batch_size: Option, + /// The IP address to listen on #[clap(default_value = "0.0.0.0", long, env)] hostname: String, @@ -1056,6 +1061,12 @@ fn spawn_webserver( router_args.push(max_batch_total_tokens.to_string()); } + // Router optional max batch size + if let Some(max_batch_size) = args.max_batch_size { + router_args.push("--max-batch-size".to_string()); + router_args.push(max_batch_size.to_string()); + } + // Model optional revision if let Some(ref revision) = args.revision { router_args.push("--revision".to_string()); diff --git a/router/client/src/client.rs b/router/client/src/client.rs index c61a4003..592338fa 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -109,7 +109,7 @@ impl Client { max_input_length: u32, max_prefill_tokens: u32, max_total_tokens: u32, - max_batch_total_tokens: Option, + max_batch_size: Option, ) -> Result> { let warmup_enabled: bool = env::var("WARMUP_ENABLED").ok().map_or(true, |value| value.to_lowercase() == "true"); if !warmup_enabled { @@ -142,17 +142,9 @@ impl Client { } } + // if max_batch_size is None, create two batches + let num_batches = max_batch_size.unwrap_or(2).min(2); let mut id_counter: u64 = 0; - let num_batches = match max_batch_total_tokens { - Some(val) => { - if val == max_total_tokens { - 1 - } else { - 2 - } - } - None => 2, // If max_batch_total_tokens is None, create two batches - }; for shape in shapes.iter() { // create two batches in order to trigger concatenate operation // in case decode bs=1 create one batch diff --git a/router/client/src/sharded_client.rs b/router/client/src/sharded_client.rs index 8d81da6a..e2c800dd 100644 --- a/router/client/src/sharded_client.rs +++ b/router/client/src/sharded_client.rs @@ -99,13 +99,18 @@ impl ShardedClient { max_input_length: u32, max_prefill_tokens: u32, max_total_tokens: u32, - max_batch_total_tokens: Option, + max_batch_size: Option, ) -> Result> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| { - Box::pin(client.warmup(max_input_length, max_prefill_tokens, max_total_tokens, max_batch_total_tokens)) + Box::pin(client.warmup( + max_input_length, + max_prefill_tokens, + max_total_tokens, + max_batch_size, + )) }) .collect(); // Take the minimum value diff --git a/router/src/infer.rs b/router/src/infer.rs index 7b6b9910..48369de9 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -63,6 +63,7 @@ impl Infer { max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, + max_batch_size: Option, max_concurrent_requests: usize, requires_padding: bool, max_input_length: u32, @@ -92,6 +93,7 @@ impl Infer { max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, + max_batch_size, queue.clone(), shared.clone(), generation_health, @@ -349,6 +351,7 @@ async fn batching_task( max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, + max_batch_size: Option, queue: Queue, shared: Arc, generation_health: Arc, @@ -362,7 +365,12 @@ async fn batching_task( // This batch might be smaller than the maximum batch size if there are not enough requests // waiting in the queue while let Some((mut entries, batch, span)) = queue - .next_batch(None, max_batch_prefill_tokens, max_batch_total_tokens) + .next_batch( + None, + max_batch_size, + max_batch_prefill_tokens, + max_batch_total_tokens, + ) .await { let mut cached_batch = prefill(&mut client, batch, &mut entries, &generation_health) @@ -390,10 +398,11 @@ async fn batching_task( }; let token_budget = max_batch_total_tokens.saturating_sub(batch_max_tokens); + let max_size = max_batch_size.map(|max_size| max_size - batch_size as usize); // Try to get a new batch if let Some((mut new_entries, new_batch, span)) = queue - .next_batch(min_size, max_batch_prefill_tokens, token_budget) + .next_batch(min_size, max_size, max_batch_prefill_tokens, token_budget) .await { // Tracking metrics diff --git a/router/src/lib.rs b/router/src/lib.rs index 7c44d642..3ce9eca8 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -73,6 +73,8 @@ pub struct Info { pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, + #[schema(nullable = true, example = "null")] + pub max_batch_size: Option, #[schema(example = "2")] pub validation_workers: usize, /// Router Info diff --git a/router/src/main.rs b/router/src/main.rs index 702393aa..1757e459 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -48,6 +48,8 @@ struct Args { max_batch_total_tokens: Option, #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, + #[clap(long, env)] + max_batch_size: Option, #[clap(default_value = "0.0.0.0", long, env)] hostname: String, #[clap(default_value = "3000", long, short, env)] @@ -94,6 +96,7 @@ async fn main() -> Result<(), RouterError> { max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, + max_batch_size, hostname, port, master_shard_uds_path, @@ -138,6 +141,25 @@ async fn main() -> Result<(), RouterError> { } } + let (max_batch_size, max_batch_total_tokens) = match (max_batch_size, max_batch_total_tokens) { + (Some(_max_batch_size), Some(_max_batch_total_tokens)) => { + if (_max_batch_total_tokens as usize / max_total_tokens) != _max_batch_size { + tracing::warn!("max_batch_size was set to {_max_batch_size} while max_batch_total_tokens to {_max_batch_total_tokens}"); + tracing::warn!("These values are not match, so max_batch_size will be preferred"); + (Some(_max_batch_size), Some((_max_batch_size * max_total_tokens) as u32)) + } else { + (Some(_max_batch_size), Some(_max_batch_total_tokens)) + } + }, + (Some(_max_batch_size), None) => ( + Some(_max_batch_size), Some((_max_batch_size * max_total_tokens) as u32) + ), + (None, Some(_max_batch_total_tokens)) => ( + Some(_max_batch_total_tokens as usize / max_total_tokens), Some(_max_batch_total_tokens) + ), + (None, None) => (None, None), + }; + // CORS allowed origins // map to go inside the option and then map to parse from String to HeaderValue // Finally, convert to AllowOrigin @@ -298,7 +320,7 @@ async fn main() -> Result<(), RouterError> { max_input_length as u32, max_batch_prefill_tokens, max_total_tokens as u32, - max_batch_total_tokens, + max_batch_size, ) .await .map_err(RouterError::Warmup)? @@ -355,6 +377,7 @@ async fn main() -> Result<(), RouterError> { max_batch_prefill_tokens, max_supported_batch_total_tokens, max_waiting_tokens, + max_batch_size, sharded_client, tokenizer, validation_workers, diff --git a/router/src/queue.rs b/router/src/queue.rs index 9e3494f7..00021812 100644 --- a/router/src/queue.rs +++ b/router/src/queue.rs @@ -79,6 +79,7 @@ impl Queue { pub(crate) async fn next_batch( &self, min_size: Option, + max_size: Option, prefill_token_budget: u32, token_budget: u32, ) -> Option { @@ -89,6 +90,7 @@ impl Queue { self.queue_sender .send(QueueCommand::NextBatch { min_size, + max_size, prefill_token_budget, token_budget, response_sender, @@ -128,12 +130,14 @@ async fn queue_task( } QueueCommand::NextBatch { min_size, + max_size, prefill_token_budget, token_budget, response_sender, span, } => span.in_scope(|| { - let next_batch = state.next_batch(min_size, prefill_token_budget, token_budget); + let next_batch = + state.next_batch(min_size, max_size, prefill_token_budget, token_budget); response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size", state.entries.len() as f64); }), @@ -308,6 +312,7 @@ impl State { fn next_batch( &mut self, min_size: Option, + max_size: Option, prefill_token_budget: u32, token_budget: u32, ) -> Option { @@ -403,6 +408,11 @@ impl State { entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); + + // Check if max_size + if Some(batch_requests.len()) == max_size { + break; + } } // Empty batch @@ -451,6 +461,7 @@ enum QueueCommand { Append(Box, Span), NextBatch { min_size: Option, + max_size: Option, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender>, @@ -535,8 +546,8 @@ mod tests { fn test_next_batch_empty() { let mut state = default_state(); - assert!(state.next_batch(None, 1, 1).is_none()); - assert!(state.next_batch(Some(1), 1, 1).is_none()); + assert!(state.next_batch(None, None, 1, 1).is_none()); + assert!(state.next_batch(Some(1), None, 1, 1).is_none()); } #[test] @@ -547,7 +558,7 @@ mod tests { state.append(entry1); state.append(entry2); - let (entries, batch, _) = state.next_batch(None, 2, 4).unwrap(); + let (entries, batch, _) = state.next_batch(None, None, 2, 4).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); @@ -563,7 +574,7 @@ mod tests { let (entry3, _guard3) = default_entry(); state.append(entry3); - assert!(state.next_batch(Some(2), 2, 2).is_none()); + assert!(state.next_batch(Some(2), None, 2, 2).is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); @@ -571,6 +582,26 @@ mod tests { assert_eq!(id, 2); } + #[test] + fn test_next_batch_max_size() { + let mut state = default_state(); + let (entry1, _guard1) = default_entry(); + let (entry2, _guard2) = default_entry(); + state.append(entry1); + state.append(entry2); + + let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries.contains_key(&0)); + assert!(entries.get(&0).unwrap().batch_time.is_some()); + assert_eq!(batch.id, 0); + assert_eq!(batch.size, 1); + + assert_eq!(state.next_id, 2); + assert_eq!(state.entries.len(), 1); + assert_eq!(state.next_batch_id, 1); + } + #[test] fn test_next_batch_token_budget() { let mut state = default_state(); @@ -579,7 +610,7 @@ mod tests { state.append(entry1); state.append(entry2); - let (entries, batch, _) = state.next_batch(None, 1, 2).unwrap(); + let (entries, batch, _) = state.next_batch(None, None, 1, 2).unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); @@ -592,7 +623,7 @@ mod tests { let (entry3, _guard3) = default_entry(); state.append(entry3); - let (entries, batch, _) = state.next_batch(None, 3, 6).unwrap(); + let (entries, batch, _) = state.next_batch(None, None, 3, 6).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); @@ -615,8 +646,8 @@ mod tests { async fn test_queue_next_batch_empty() { let queue = default_queue(); - assert!(queue.next_batch(None, 1, 1).await.is_none()); - assert!(queue.next_batch(Some(1), 1, 1).await.is_none()); + assert!(queue.next_batch(None, None, 1, 1).await.is_none()); + assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] @@ -627,7 +658,7 @@ mod tests { queue.append(entry1); queue.append(entry2); - let (entries, batch, _) = queue.next_batch(None, 2, 4).await.unwrap(); + let (entries, batch, _) = queue.next_batch(None, None, 2, 4).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); @@ -640,11 +671,11 @@ mod tests { queue.append(entry3); // Not enough requests pending - assert!(queue.next_batch(Some(2), 2, 2).await.is_none()); + assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none()); // Not enough token budget - assert!(queue.next_batch(Some(1), 0, 0).await.is_none()); + assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none()); // Ok - let (entries2, batch2, _) = queue.next_batch(Some(1), 1, 2).await.unwrap(); + let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 4).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); @@ -652,6 +683,22 @@ mod tests { assert_eq!(batch2.size, 1); } + #[tokio::test] + async fn test_queue_next_batch_max_size() { + let queue = default_queue(); + let (entry1, _guard1) = default_entry(); + let (entry2, _guard2) = default_entry(); + queue.append(entry1); + queue.append(entry2); + + let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap(); + assert_eq!(entries.len(), 1); + assert!(entries.contains_key(&0)); + assert!(entries.get(&0).unwrap().batch_time.is_some()); + assert_eq!(batch.id, 0); + assert_eq!(batch.size, 1); + } + #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = default_queue(); @@ -660,7 +707,7 @@ mod tests { queue.append(entry1); queue.append(entry2); - let (entries, batch, _) = queue.next_batch(None, 1, 2).await.unwrap(); + let (entries, batch, _) = queue.next_batch(None, None, 1, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); @@ -669,7 +716,7 @@ mod tests { let (entry3, _guard3) = default_entry(); queue.append(entry3); - let (entries, batch, _) = queue.next_batch(None, 2, 4).await.unwrap(); + let (entries, batch, _) = queue.next_batch(None, None, 3, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); @@ -686,9 +733,9 @@ mod tests { queue.append(entry2); // Budget of 1 is not enough - assert!(queue.next_batch(None, 1, 1).await.is_none()); + assert!(queue.next_batch(None, None, 1, 1).await.is_none()); - let (entries, batch, _) = queue.next_batch(None, 6, 6).await.unwrap(); + let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); @@ -702,6 +749,6 @@ mod tests { let (entry, _) = default_entry(); queue.append(entry); - assert!(queue.next_batch(None, 1, 1).await.is_none()); + assert!(queue.next_batch(None, None, 1, 1).await.is_none()); } } diff --git a/router/src/server.rs b/router/src/server.rs index 15ad6b33..450494df 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -770,6 +770,7 @@ pub async fn run( max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, + max_batch_size: Option, client: ShardedClient, tokenizer: Option, validation_workers: usize, @@ -851,6 +852,7 @@ pub async fn run( max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, + max_batch_size, max_concurrent_requests, shard_info.requires_padding, max_input_length as u32, @@ -934,6 +936,7 @@ pub async fn run( waiting_served_ratio, max_batch_total_tokens, max_waiting_tokens, + max_batch_size, validation_workers, version: env!("CARGO_PKG_VERSION"), sha: option_env!("VERGEN_GIT_SHA"), From 0c207f71ed0efdfea2fc7e9f15907c6ce8f481be Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Mon, 12 Feb 2024 10:09:29 +0100 Subject: [PATCH 076/153] feat: experimental support for cuda graphs (#1428) Co-authored-by: Nicolas Patry --- Dockerfile | 1 + Dockerfile_amd | 2 +- docs/source/basic_tutorials/launcher.md | 8 ++ integration-tests/conftest.py | 5 +- launcher/src/main.rs | 14 +- server/Makefile-awq | 6 +- .../exllama_kernels/cuda_func/q4_matmul.cu | 5 +- .../exllama_kernels/cuda_func/q4_matmul.cuh | 8 +- .../exllama_kernels/cuda_func/q4_matrix.cu | 11 +- .../exllama_kernels/exllama_ext.cpp | 6 +- .../exllamav2_kernels/cuda/q_gemm.cu | 5 +- .../exllamav2_kernels/cuda/q_matrix.cu | 11 +- .../custom_modeling/flash_mistral_modeling.py | 8 +- .../custom_modeling/flash_mixtral_modeling.py | 8 +- .../models/flash_causal_lm.py | 131 ++++++++++++++++-- .../models/flash_mistral.py | 123 ++++++++++++++-- .../text_generation_server/utils/weights.py | 3 +- 17 files changed, 299 insertions(+), 56 deletions(-) diff --git a/Dockerfile b/Dockerfile index c49f43e6..b5a9291d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,6 @@ # Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.75 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.75 AS chef WORKDIR /usr/src FROM chef as planner diff --git a/Dockerfile_amd b/Dockerfile_amd index d2b6f897..c2ec4a6d 100644 --- a/Dockerfile_amd +++ b/Dockerfile_amd @@ -1,5 +1,5 @@ # Rust builder -FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.75 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index ba54f058..be31a7a4 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -205,6 +205,14 @@ Options: [env: MAX_BATCH_SIZE=] +``` +## ENABLE_CUDA_GRAPHS +```shell + --enable-cuda-graphs + Enable experimental support for cuda graphs + + [env: ENABLE_CUDA_GRAPHS=] + ``` ## HOSTNAME ```shell diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py index 4cb4ca59..efeda08d 100644 --- a/integration-tests/conftest.py +++ b/integration-tests/conftest.py @@ -317,7 +317,10 @@ def launcher(event_loop): gpu_count = num_shard if num_shard is not None else 1 - env = {"LOG_LEVEL": "info,text_generation_router=debug"} + env = { + "LOG_LEVEL": "info,text_generation_router=debug", + "ENABLE_CUDA_GRAPHS": "true", + } if not use_flash_attention: env["USE_FLASH_ATTENTION"] = "false" diff --git a/launcher/src/main.rs b/launcher/src/main.rs index 428b00c1..b5b476fa 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -286,6 +286,10 @@ struct Args { #[clap(long, env)] max_batch_size: Option, + /// Enable experimental support for cuda graphs + #[clap(long, env)] + enable_cuda_graphs: bool, + /// The IP address to listen on #[clap(default_value = "0.0.0.0", long, env)] hostname: String, @@ -410,6 +414,7 @@ fn shard_manager( disable_custom_kernels: bool, watermark_gamma: Option, watermark_delta: Option, + enable_cuda_graphs: bool, cuda_memory_fraction: f32, rope_scaling: Option, rope_factor: Option, @@ -496,7 +501,7 @@ fn shard_manager( envs.push(("WORLD_SIZE".into(), world_size.to_string().into())); envs.push(("MASTER_ADDR".into(), master_addr.into())); envs.push(("MASTER_PORT".into(), master_port.to_string().into())); - envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into())); + envs.push(("TORCH_NCCL_AVOID_RECORD_STREAMS".into(), "1".into())); // CUDA memory fraction envs.push(( @@ -546,6 +551,11 @@ fn shard_manager( )); }; + // Enable experimental support for cuda graphs + if enable_cuda_graphs { + envs.push(("ENABLE_CUDA_GRAPHS".into(), "True".into())) + } + // If disable_custom_kernels is true, pass it to the shard as an env var if disable_custom_kernels { envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into())) @@ -935,6 +945,7 @@ fn spawn_shards( let disable_custom_kernels = args.disable_custom_kernels; let watermark_gamma = args.watermark_gamma; let watermark_delta = args.watermark_delta; + let enable_cuda_graphs = args.enable_cuda_graphs; let cuda_memory_fraction = args.cuda_memory_fraction; let rope_scaling = args.rope_scaling; let rope_factor = args.rope_factor; @@ -957,6 +968,7 @@ fn spawn_shards( disable_custom_kernels, watermark_gamma, watermark_delta, + enable_cuda_graphs, cuda_memory_fraction, rope_scaling, rope_factor, diff --git a/server/Makefile-awq b/server/Makefile-awq index 80e78c08..5dd9dbaa 100644 --- a/server/Makefile-awq +++ b/server/Makefile-awq @@ -1,8 +1,10 @@ -awq_commit := f084f40bd996f3cf3a0633c1ad7d9d476c318aaa +# Fork that adds only the correct stream to this kernel in order +# to make cuda graphs work. +awq_commit := bd1dc2d5254345cc76ab71894651fb821275bdd4 awq: rm -rf llm-awq - git clone https://github.com/mit-han-lab/llm-awq + git clone https://github.com/huggingface/llm-awq build-awq: awq cd llm-awq/ && git fetch && git checkout $(awq_commit) diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu index 09126efe..1b0f7956 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu @@ -1,5 +1,6 @@ #include "q4_matmul.cuh" #include "column_remap.cuh" +#include #include "../util.cuh" #include "../matrix.cuh" #include "../cu_compat.cuh" @@ -224,8 +225,8 @@ void q4_matmul_recons_cuda const int x_height, Q4Matrix* w, half* out, - const cublasHandle_t handle, - bool no_zero + bool no_zero, + const cublasHandle_t handle ) { int height = x_height; diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh index 63611790..4c7a6669 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh @@ -19,8 +19,8 @@ void q4_matmul_cuda const int x_height, const Q4Matrix* w, half* out, - bool no_zero = false, - cudaStream_t alt_stream = NULL + bool no_zero, + cudaStream_t alt_stream ); void q4_matmul_recons_cuda @@ -30,8 +30,8 @@ void q4_matmul_recons_cuda const int x_height, Q4Matrix* w, half* out, - const cublasHandle_t handle, - bool no_zero = false + bool no_zero, + const cublasHandle_t handle ); #endif diff --git a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu index 2867a8d0..1f32e6b8 100644 --- a/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu +++ b/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu @@ -1,5 +1,6 @@ // Adapted from turboderp exllama: https://github.com/turboderp/exllama +#include #include "q4_matrix.cuh" #include #include "../util.cuh" @@ -90,7 +91,7 @@ __global__ void make_sequential_kernel int w2_row_shift = w2_subrow << 2; int wnew2_row_shift = i << 2; - uint64_t src = w2[w2_row * w2_stride + w2_column]; + uint64_t src = w2[w2_row * w2_stride + w2_column]; src >>= w2_row_shift; src &= 0x0000000f0000000f; src <<= wnew2_row_shift; @@ -146,7 +147,8 @@ void Q4Matrix::make_sequential(const uint32_t* cpu_g_idx) dim3 threads(UNSHUF_BLOCKSIZE_X, 1, 1); dim3 blocks(width / UNSHUF_BLOCKSIZE_X / 2, height / 8, 1); - make_sequential_kernel<<>>(cuda_qweight, cuda_new_qweight, cuda_x_map, height / 8, width); + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + make_sequential_kernel<<>>(cuda_qweight, cuda_new_qweight, cuda_x_map, height / 8, width); // Replace qweights @@ -213,5 +215,6 @@ void Q4Matrix::reconstruct(half* out) 1 ); - reconstruct_kernel<<>>(cuda_qweight, out, cuda_scales, cuda_qzeros, height / 8, width, groupsize); -} \ No newline at end of file + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + reconstruct_kernel<<>>(cuda_qweight, out, cuda_scales, cuda_qzeros, height / 8, width, groupsize); +} diff --git a/server/exllama_kernels/exllama_kernels/exllama_ext.cpp b/server/exllama_kernels/exllama_kernels/exllama_ext.cpp index b786988b..f2df80e8 100644 --- a/server/exllama_kernels/exllama_kernels/exllama_ext.cpp +++ b/server/exllama_kernels/exllama_kernels/exllama_ext.cpp @@ -183,6 +183,7 @@ void q4_matmul int x_height = x.size(0); + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (tuningParams.matmul_recons_thd == 0 || x_height < tuningParams.matmul_recons_thd) { q4_matmul_cuda @@ -191,7 +192,9 @@ void q4_matmul (half*) x.data_ptr(), x_height, wm, - (half*) out.data_ptr() + (half*) out.data_ptr(), + false, + stream ); } else @@ -203,6 +206,7 @@ void q4_matmul x_height, wm, (half*) out.data_ptr(), + false, at::cuda::getCurrentCUDABlasHandle() ); } diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu index b4e4cf22..5b99f1ba 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu @@ -38,6 +38,7 @@ void gemm_half_q_half_cuda_part bool mul_r_weights ) { + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (!b->is_gptq) { dim3 blockDim, gridDim; @@ -50,7 +51,7 @@ void gemm_half_q_half_cuda_part fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights); - kernel<<>> + kernel<<>> ( a, b->cuda_q_weight, @@ -91,7 +92,7 @@ void gemm_half_q_half_cuda_part // print_global_mem(r_weights, 1, 1, 1); // DBGI(r_weights_stride); - kernel<<>> + kernel<<>> ( a, b->cuda_q_weight, diff --git a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu index 7a0038b4..f7a91e29 100644 --- a/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu +++ b/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu @@ -168,8 +168,9 @@ QMatrix::QMatrix blockDim.y = 1; gridDim.x = DIVIDE(width, THREADS_X); gridDim.y = 1; + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - shuffle_kernel<<>>(cuda_q_weight, height, width, rows_8, rows_6, rows_5, rows_4, rows_3, rows_2); + shuffle_kernel<<>>(cuda_q_weight, height, width, rows_8, rows_6, rows_5, rows_4, rows_3, rows_2); } QMatrix::~QMatrix() @@ -475,11 +476,12 @@ void QMatrix::reconstruct(half* out) blockDim.x = BLOCK_KN_SIZE; blockDim.y = 1; gridDim.y = DIVIDE(height, BLOCK_KN_SIZE); + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (!is_gptq) { gridDim.x = DIVIDE(width, BLOCK_KN_SIZE); - reconstruct_kernel<<>> + reconstruct_kernel<<>> ( cuda_q_weight, cuda_q_perm, @@ -502,7 +504,7 @@ void QMatrix::reconstruct(half* out) else { gridDim.x = DIVIDE(width, BLOCK_KN_SIZE * 4); - reconstruct_gptq_kernel<<>> + reconstruct_gptq_kernel<<>> ( cuda_q_weight, cuda_q_perm, @@ -563,6 +565,7 @@ __global__ void make_sequential_kernel bool QMatrix::make_sequential(const uint32_t* cpu_g_idx) { + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); uint32_t* cuda_new_qweight = NULL; cudaError_t err = cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t)); if (err != cudaSuccess) { @@ -621,7 +624,7 @@ bool QMatrix::make_sequential(const uint32_t* cpu_g_idx) gridDim.x = DIVIDE(width, THREADS_X); gridDim.y = height / 8; - make_sequential_kernel<<>> + make_sequential_kernel<<>> ( cuda_q_weight, cuda_new_qweight, diff --git a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py index 0fc4e1b3..7b45be57 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mistral_modeling.py @@ -425,6 +425,11 @@ class FlashMistralForCausalLM(torch.nn.Module): weights=weights, ) self.max_past = config.sliding_window + self.max_past_tensor = ( + torch.tensor(config.sliding_window, device=weights.device) + if self.max_past is not None + else None + ) def forward( self, @@ -446,8 +451,7 @@ class FlashMistralForCausalLM(torch.nn.Module): elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values - max_s = min(self.max_past, max_s) - input_lengths = torch.clamp(input_lengths, max=self.max_past) + input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor) hidden_states = self.model( input_ids, diff --git a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py index 61488ec4..c91b2224 100644 --- a/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py @@ -816,6 +816,11 @@ class FlashMixtralForCausalLM(torch.nn.Module): weights=weights, ) self.max_past = config.sliding_window + self.max_past_tensor = ( + torch.tensor(config.sliding_window, device=weights.device) + if self.max_past is not None + else None + ) def forward( self, @@ -837,8 +842,7 @@ class FlashMixtralForCausalLM(torch.nn.Module): elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values - max_s = min(self.max_past, max_s) - input_lengths = torch.clamp(input_lengths, max=self.max_past) + input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor) hidden_states = self.model( input_ids, diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 90776654..c7fda516 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1,4 +1,5 @@ import math +import os import time import itertools import torch @@ -6,6 +7,7 @@ import torch.distributed import numpy as np +from loguru import logger from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase @@ -31,6 +33,8 @@ from text_generation_server.utils.dist import MEMORY_FRACTION tracer = trace.get_tracer(__name__) +MEM_POOL = torch.cuda.graph_pool_handle() + @dataclass class FlashCausalLMBatch(Batch): @@ -62,7 +66,7 @@ class FlashCausalLMBatch(Batch): # Set in prefill by the CacheManager # list of length b of list of length s_i // block_size block_tables: Optional[List[List[int]]] - # tensor of size [b, max_seqlen // block_size] holding the paged attention block tables for all sequences + # tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences block_tables_tensor: Optional[torch.Tensor] # tensor of length \sum_{i=0}^{b} max_s_i holding the paged attention slots for all sequences slots: Optional[torch.Tensor] @@ -663,6 +667,8 @@ class FlashCausalLM(Model): self.num_kv_heads = num_kv_heads self.head_size = head_size + self.cuda_graphs = {} + super(FlashCausalLM, self).__init__( model=model, tokenizer=tokenizer, @@ -678,7 +684,60 @@ class FlashCausalLM(Model): def batch_type(self) -> Type[FlashCausalLMBatch]: return FlashCausalLMBatch + def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): + input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device) + position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) + slots = torch.arange(bs, dtype=torch.int32, device=self.device) + input_lengths = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s + block_tables = ( + torch.arange(max_bt, dtype=torch.int32, device=self.device) + .repeat(bs) + .reshape((bs, max_bt)) + ) + kv_cache = get_cache_manager().kv_cache + + self.cuda_graphs[bs] = { + "input_ids": input_ids, + "position_ids": position_ids, + "kv_cache": kv_cache, + "block_tables": block_tables, + "slots": slots, + "input_lengths": input_lengths, + } + graph = torch.cuda.CUDAGraph() + self.cuda_graphs[bs]["graph"] = graph + + torch.cuda.synchronize() + # Run once outside to warmup + self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=None, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + lm_head_indices=None, + ) + torch.cuda.synchronize() + + with torch.cuda.graph(graph, pool=MEM_POOL): + self.cuda_graphs[bs]["logits"] = self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=None, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + lm_head_indices=None, + ) + torch.cuda.synchronize() + def warmup(self, batch: FlashCausalLMBatch): + # The warmup batch is the biggest batch we could ever receive torch.cuda.empty_cache() try: cache_manager = set_cache_manager( @@ -690,6 +749,8 @@ class FlashCausalLM(Model): self.dtype, self.device, ) + max_bt = batch.max_blocks + max_s = max_bt * get_cache_manager().block_size _, batch, _ = self.generate_token(batch) except torch.cuda.OutOfMemoryError as e: raise RuntimeError( @@ -713,7 +774,8 @@ class FlashCausalLM(Model): ) num_blocks = ( - int(free_memory // total_cache_size) + # Leave 5% for some wiggle room + int((free_memory * 0.95) // total_cache_size) # Add batch.blocks as we allocated it above, so it is included in the peak memory. + cache_manager.num_blocks ) @@ -731,9 +793,19 @@ class FlashCausalLM(Model): self.device, ) + if os.getenv("ENABLE_CUDA_GRAPHS", "False") == "True": + try: + logger.info("Experimental support for Cuda Graphs is enabled") + # Warmup cuda graphs + for bs in [1, 2, 4] + [8 * i for i in range(8)]: + if self.speculate is None or self.speculate + 1 <= bs: + self.cuda_graph_warmup(bs, max_s, max_bt) + except Exception: + logger.exception(f"Decode cuda graph warmup failed") + return int(num_blocks * BLOCK_SIZE) - def forward(self, batch: FlashCausalLMBatch) -> Tuple[torch.Tensor, torch.Tensor]: + def forward(self, batch: FlashCausalLMBatch) -> torch.Tensor: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids @@ -785,17 +857,48 @@ class FlashCausalLM(Model): max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices - return self.model.forward( - input_ids=input_ids, - position_ids=position_ids, - cu_seqlen_prefill=cu_seqlen_prefill, - kv_cache=kv_cache, - block_tables=block_tables, - slots=slots, - input_lengths=input_lengths, - max_s=max_s, - lm_head_indices=lm_head_indices, - ) + bs = input_ids.shape[0] + padded_bs = bs + if bs == 3: + padded_bs = 4 + elif 3 < bs <= 8: + padded_bs = 8 + elif bs > 8: + padded_bs = (bs + 7) // 8 * 8 + + # Try to find an associated cuda graph + cuda_graph = self.cuda_graphs.get(padded_bs, None) + + if cu_seqlen_prefill is not None or cuda_graph is None or batch.speculative_ids is not None: + return self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=cu_seqlen_prefill, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + lm_head_indices=lm_head_indices, + ) + + # Copy inputs to the static inputs of the cuda graph + # Static inputs are potentially padded + cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids + cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids + cuda_graph["block_tables"][ + : block_tables.shape[0], : block_tables.shape[1] + ] = block_tables + cuda_graph["slots"].fill_(-1) + cuda_graph["slots"][: slots.shape[0]] = slots + cuda_graph["input_lengths"].zero_() + cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths + + # Replay the graph + cuda_graph["graph"].replay() + + # Slice output to the correct shape + return cuda_graph["logits"][:bs] @tracer.start_as_current_span("generate_token") def generate_token( diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index 8c6cb025..34a50194 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -35,6 +35,8 @@ tracer = trace.get_tracer(__name__) SLIDING_WINDOW: Optional[int] = None SLIDING_WINDOW_BLOCKS: Optional[int] = None +MEM_POOL = torch.cuda.graph_pool_handle() + # Adds windowing logic to FlashCausalLMBatch @dataclass @@ -332,6 +334,8 @@ class BaseFlashMistral(FlashCausalLM): model = model_cls(config, weights) + self.cuda_graphs = {} + torch.distributed.barrier(group=self.process_group) super(BaseFlashMistral, self).__init__( model=model, @@ -350,6 +354,60 @@ class BaseFlashMistral(FlashCausalLM): def batch_type(self) -> Type[FlashMistralBatch]: return FlashMistralBatch + def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): + input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device) + position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) + slots = torch.arange(bs, dtype=torch.int32, device=self.device) + input_lengths = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s + block_tables = ( + torch.arange(max_bt, dtype=torch.int32, device=self.device) + .repeat(bs) + .reshape((bs, max_bt)) + ) + kv_cache = get_cache_manager().kv_cache + + self.cuda_graphs[bs] = { + "input_ids": input_ids, + "position_ids": position_ids, + "kv_cache": kv_cache, + "block_tables": block_tables, + "slots": slots, + "input_lengths": input_lengths, + } + graph = torch.cuda.CUDAGraph() + self.cuda_graphs[bs]["graph"] = graph + + torch.cuda.synchronize() + # Run once outside to warmup + self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=None, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + prefill_cache_indices=None, + lm_head_indices=None, + ) + torch.cuda.synchronize() + + with torch.cuda.graph(graph, pool=MEM_POOL): + self.cuda_graphs[bs]["logits"] = self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=None, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + prefill_cache_indices=None, + lm_head_indices=None, + ) + torch.cuda.synchronize() + def forward(self, batch: FlashMistralBatch) -> Tuple[torch.Tensor, torch.Tensor]: # Model Forward if batch.speculative_ids is not None: @@ -401,21 +459,56 @@ class BaseFlashMistral(FlashCausalLM): input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices - logits = self.model.forward( - input_ids=input_ids, - position_ids=position_ids, - cu_seqlen_prefill=cu_seqlen_prefill, - kv_cache=kv_cache, - block_tables=block_tables, - slots=slots, - input_lengths=input_lengths, - max_s=max_s, - prefill_cache_indices=batch.prefill_cache_indices, - lm_head_indices=lm_head_indices, - ) - if batch.prefill_cache_indices is not None: - batch.prefill_cache_indices = None - return logits + + if self.model.max_past is not None: + max_s = min(self.model.max_past, max_s) + + bs = input_ids.shape[0] + padded_bs = bs + if bs == 3: + padded_bs = 4 + elif 3 < bs <= 8: + padded_bs = 8 + elif bs > 8: + padded_bs = (bs + 7) // 8 * 8 + + # Try to find an associated cuda graph + cuda_graph = self.cuda_graphs.get(padded_bs, None) + + if cu_seqlen_prefill is not None or cuda_graph is None: + logits = self.model.forward( + input_ids=input_ids, + position_ids=position_ids, + cu_seqlen_prefill=cu_seqlen_prefill, + kv_cache=kv_cache, + block_tables=block_tables, + slots=slots, + input_lengths=input_lengths, + max_s=max_s, + prefill_cache_indices=batch.prefill_cache_indices, + lm_head_indices=lm_head_indices, + ) + if batch.prefill_cache_indices is not None: + batch.prefill_cache_indices = None + return logits + + # Copy inputs to the static inputs of the cuda graph + # Static inputs are potentially padded + cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids + cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids + cuda_graph["block_tables"][ + : block_tables.shape[0], : block_tables.shape[1] + ] = block_tables + cuda_graph["slots"].fill_(-1) + cuda_graph["slots"][: slots.shape[0]] = slots + cuda_graph["input_lengths"].zero_() + cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths + + # Replay the graph + cuda_graph["graph"].replay() + + # Slice output to the correct shape + return cuda_graph["logits"][:bs] class FlashMistral(BaseFlashMistral): diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 8f7e1f10..d0614346 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -407,8 +407,9 @@ class Weights: data = json.load(f) self.gptq_bits = data["quantization_config"]["bits"] self.gptq_groupsize = data["quantization_config"]["group_size"] - self.gptq_desc_act = data["quantization_config"]["desc_act"] + # Order is important here, desc_act is missing on some real models self.quant_method = data["quantization_config"]["quant_method"] + self.gptq_desc_act = data["quantization_config"]["desc_act"] except Exception: filename = "quantize_config.json" try: From 91b56a71dc9a114f57f53da50f5ce4617e29588b Mon Sep 17 00:00:00 2001 From: drbh Date: Tue, 13 Feb 2024 10:01:02 -0500 Subject: [PATCH 077/153] feat: add deserialize_with that handles strings or objects with content (#1550) This PR adds a simple custom `deserialize_with` function that parses a string or an object with a content property. This should help support more token configuration files stored on the hub --- router/src/lib.rs | 85 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/router/src/lib.rs b/router/src/lib.rs index 3ce9eca8..a9d783bb 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -32,7 +32,9 @@ pub struct HubModelInfo { #[derive(Clone, Deserialize, Default)] pub struct HubTokenizerConfig { pub chat_template: Option, + #[serde(deserialize_with = "token_serde::deserialize")] pub bos_token: Option, + #[serde(deserialize_with = "token_serde::deserialize")] pub eos_token: Option, } @@ -43,6 +45,34 @@ impl HubTokenizerConfig { } } +mod token_serde { + use super::*; + use serde::de; + use serde::Deserializer; + use serde_json::Value; + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let value = Value::deserialize(deserializer)?; + + match value { + Value::String(s) => Ok(Some(s)), + Value::Object(map) => { + if let Some(content) = map.get("content").and_then(|v| v.as_str()) { + Ok(Some(content.to_string())) + } else { + Err(de::Error::custom( + "content key not found in structured token", + )) + } + } + _ => Err(de::Error::custom("invalid token format")), + } + } +} + #[derive(Clone, Debug, Serialize, ToSchema)] pub struct Info { /// Model info @@ -638,6 +668,8 @@ pub(crate) struct ErrorResponse { #[cfg(test)] mod tests { + use super::*; + use tokenizers::Tokenizer; pub(crate) async fn get_tokenizer() -> Tokenizer { @@ -646,4 +678,57 @@ mod tests { let filename = repo.get("tokenizer.json").unwrap(); Tokenizer::from_file(filename).unwrap() } + + #[test] + fn test_hub_nested_tokens_tokenizer_config() { + // this is a subset of the tokenizer.json file + // in this case we expect the tokens to be encoded as simple strings + let json_content = r#"{ + "chat_template": "test", + "bos_token": "<|begin▁of▁sentence|>", + "eos_token": "<|end▁of▁sentence|>" + }"#; + + let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap(); + + // check that we successfully parsed the tokens + assert_eq!(config.chat_template, Some("test".to_string())); + assert_eq!( + config.bos_token, + Some("<|begin▁of▁sentence|>".to_string()) + ); + assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string())); + + // in this case we expect the tokens to be encoded as structured tokens + // we want the content of the structured token + let json_content = r#"{ + "chat_template": "test", + "bos_token": { + "__type": "AddedToken", + "content": "<|begin▁of▁sentence|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "__type": "AddedToken", + "content": "<|end▁of▁sentence|>", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } + }"#; + + let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap(); + + // check that we successfully parsed the tokens + assert_eq!(config.chat_template, Some("test".to_string())); + assert_eq!( + config.bos_token, + Some("<|begin▁of▁sentence|>".to_string()) + ); + assert_eq!(config.eos_token, Some("<|end▁of▁sentence|>".to_string())); + } } From d05d930545a55eb27fbcdf52b9494612da99d5db Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 13 Feb 2024 17:43:47 +0100 Subject: [PATCH 078/153] Fixing glibc version in the runtime. (#1556) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- Dockerfile_amd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile_amd b/Dockerfile_amd index c2ec4a6d..9a5e3568 100644 --- a/Dockerfile_amd +++ b/Dockerfile_amd @@ -36,7 +36,7 @@ COPY launcher launcher RUN cargo build --release # Text Generation Inference base image for RoCm -FROM rocm/dev-ubuntu-20.04:5.7 as base +FROM rocm/dev-ubuntu-22.04:5.7 as base RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ From f6500bfaa3377db29f9c88a8e4eb3a7b2ee513cf Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 13 Feb 2024 22:46:16 +0100 Subject: [PATCH 079/153] Upgrade intermediary layer for nvidia too. (#1557) Fixes # (issue) - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b5a9291d..c49f43e6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,5 @@ # Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.75 AS chef -FROM lukemathwalker/cargo-chef:latest-rust-1.75 AS chef WORKDIR /usr/src FROM chef as planner From e93cc34a2259c3309cd4d623e7c56636f0621b15 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 14 Feb 2024 09:54:10 +0100 Subject: [PATCH 080/153] Improving mamba runtime by using updates (#1552) - Move float16 to bfloat16, which has less imprecisions (load test are failing with the update kernels + f16, all working under bf16). Another note, is that we are not respecting the layer norm in f32 defined in the configuration (this is OK in my book, but that could impact the f16 precision) - Moved to update kernels. Triton overhead is super high, removed by switching to cuda graphs works great (update cuda graph is available in TRT-LLM if needed, seems *exactly* like the regular ssm kernel. - Moved inference_params struct in order to make only 2 tensors, to reduce the overhead of copying back and forth to the cuda graphs. - Left over overhead seems entirely in the tokenization bit. (Still 4 copies are paid before launching the graph) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- .../__snapshots__/test_mamba/test_mamba.json | 20 +- .../test_mamba/test_mamba_all_params.json | 86 +++--- .../test_mamba/test_mamba_load.json | 112 ++++---- integration-tests/models/test_mamba.py | 6 +- .../models/custom_modeling/mamba_modeling.py | 95 +++---- .../models/flash_causal_lm.py | 2 +- .../text_generation_server/models/globals.py | 3 + server/text_generation_server/models/mamba.py | 260 ++++++++++-------- 8 files changed, 300 insertions(+), 284 deletions(-) create mode 100644 server/text_generation_server/models/globals.py diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba.json index 4435f215..eaba5078 100644 --- a/integration-tests/models/__snapshots__/test_mamba/test_mamba.json +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba.json @@ -8,61 +8,61 @@ "tokens": [ { "id": 187, - "logprob": -0.3552246, + "logprob": -0.37890625, "special": false, "text": "\n" }, { "id": 187, - "logprob": -0.38378906, + "logprob": -0.26953125, "special": false, "text": "\n" }, { "id": 30763, - "logprob": -1.140625, + "logprob": -1.1953125, "special": false, "text": "Deep" }, { "id": 4715, - "logprob": -0.5551758, + "logprob": -0.53515625, "special": false, "text": " learning" }, { "id": 310, - "logprob": -0.59033203, + "logprob": -0.625, "special": false, "text": " is" }, { "id": 247, - "logprob": -0.70654297, + "logprob": -0.6796875, "special": false, "text": " a" }, { "id": 747, - "logprob": -2.0410156, + "logprob": -2.0, "special": false, "text": " new" }, { "id": 1511, - "logprob": -2.3789062, + "logprob": -2.3125, "special": false, "text": " type" }, { "id": 273, - "logprob": -0.0026435852, + "logprob": -0.0028533936, "special": false, "text": " of" }, { "id": 5145, - "logprob": -1.2841797, + "logprob": -1.265625, "special": false, "text": " machine" } diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json index 052c1c69..85e9a9e0 100644 --- a/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json @@ -11,22 +11,22 @@ }, { "id": 13, - "logprob": -2.5234375, + "logprob": -2.734375, "text": "," }, { "id": 8862, - "logprob": -3.4433594, + "logprob": -3.6875, "text": " yellow" }, { "id": 13, - "logprob": -0.43017578, + "logprob": -0.40234375, "text": "," }, { "id": 209, - "logprob": -8.21875, + "logprob": -8.25, "text": " " } ], @@ -40,60 +40,60 @@ }, { "id": 395, - "logprob": -0.46411133, + "logprob": -0.3125, "special": false, "text": "and" }, { - "id": 13735, - "logprob": -2.1132812, - "special": false, - "text": " orange" - }, - { - "id": 313, - "logprob": -1.2128906, - "special": false, - "text": " (" - }, - { - "id": 249, - "logprob": -2.3671875, - "special": false, - "text": "in" - }, - { - "id": 253, + "id": 4797, "logprob": 0.0, "special": false, - "text": " the" + "text": " blue" }, { - "id": 1340, - "logprob": -1.640625, + "id": 9830, + "logprob": -1.65625, "special": false, - "text": " order" + "text": " colors" }, { - "id": 597, - "logprob": -0.5488281, - "special": false, - "text": " they" - }, - { - "id": 3176, - "logprob": -0.48608398, - "special": false, - "text": " appear" - }, - { - "id": 275, + "id": 15, "logprob": 0.0, "special": false, - "text": " in" + "text": "." + }, + { + "id": 329, + "logprob": -2.4375, + "special": false, + "text": " A" + }, + { + "id": 1180, + "logprob": -1.953125, + "special": false, + "text": " number" + }, + { + "id": 273, + "logprob": 0.0, + "special": false, + "text": " of" + }, + { + "id": 1027, + "logprob": -1.5546875, + "special": false, + "text": " different" + }, + { + "id": 3295, + "logprob": -0.97265625, + "special": false, + "text": " color" } ], "top_tokens": null }, - "generated_text": "blue, red, yellow, \nand orange (in the order they appear in" + "generated_text": "blue, red, yellow, \nand blue colors. A number of different color" } diff --git a/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json b/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json index 014210b2..4921c14b 100644 --- a/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json +++ b/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json @@ -12,22 +12,22 @@ }, { "id": 310, - "logprob": -0.8125, + "logprob": -0.83984375, "text": " is" }, { "id": 18147, - "logprob": -12.828125, + "logprob": -12.8125, "text": " Deep" }, { "id": 20727, - "logprob": -3.0, + "logprob": -2.84375, "text": " Learning" }, { "id": 32, - "logprob": -1.1484375, + "logprob": -1.25, "text": "?" } ], @@ -35,61 +35,61 @@ "tokens": [ { "id": 187, - "logprob": -0.3552246, + "logprob": -0.37890625, "special": false, "text": "\n" }, { "id": 187, - "logprob": -0.38378906, + "logprob": -0.4296875, "special": false, "text": "\n" }, { "id": 30763, - "logprob": -1.1279297, + "logprob": -1.078125, "special": false, "text": "Deep" }, { "id": 4715, - "logprob": -0.5595703, + "logprob": -0.515625, "special": false, "text": " learning" }, { "id": 310, - "logprob": -0.60253906, + "logprob": -0.6015625, "special": false, "text": " is" }, { "id": 247, - "logprob": -0.7050781, + "logprob": -0.65625, "special": false, "text": " a" }, { "id": 747, - "logprob": -2.0488281, + "logprob": -2.109375, "special": false, "text": " new" }, { "id": 1511, - "logprob": -2.3808594, + "logprob": -2.328125, "special": false, "text": " type" }, { "id": 273, - "logprob": -0.0026416779, + "logprob": -0.0032653809, "special": false, "text": " of" }, { "id": 5145, - "logprob": -1.2851562, + "logprob": -1.28125, "special": false, "text": " machine" } @@ -111,22 +111,22 @@ }, { "id": 310, - "logprob": -0.78027344, + "logprob": -0.80078125, "text": " is" }, { "id": 18147, - "logprob": -12.8203125, + "logprob": -13.25, "text": " Deep" }, { "id": 20727, - "logprob": -2.9902344, + "logprob": -2.828125, "text": " Learning" }, { "id": 32, - "logprob": -1.1523438, + "logprob": -1.1953125, "text": "?" } ], @@ -134,61 +134,61 @@ "tokens": [ { "id": 187, - "logprob": -0.35351562, + "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, - "logprob": -0.38256836, + "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, - "logprob": -1.1269531, + "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, - "logprob": -0.54541016, + "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, - "logprob": -0.59765625, + "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, - "logprob": -0.7001953, + "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, - "logprob": -2.0585938, + "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, - "logprob": -2.3789062, + "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, - "logprob": -0.0027446747, + "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, - "logprob": -1.2851562, + "logprob": -1.3125, "special": false, "text": " machine" } @@ -210,22 +210,22 @@ }, { "id": 310, - "logprob": -0.78027344, + "logprob": -0.80078125, "text": " is" }, { "id": 18147, - "logprob": -12.8203125, + "logprob": -13.25, "text": " Deep" }, { "id": 20727, - "logprob": -2.9902344, + "logprob": -2.828125, "text": " Learning" }, { "id": 32, - "logprob": -1.1523438, + "logprob": -1.1953125, "text": "?" } ], @@ -233,61 +233,61 @@ "tokens": [ { "id": 187, - "logprob": -0.35351562, + "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, - "logprob": -0.38256836, + "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, - "logprob": -1.1269531, + "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, - "logprob": -0.54541016, + "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, - "logprob": -0.59765625, + "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, - "logprob": -0.7001953, + "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, - "logprob": -2.0585938, + "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, - "logprob": -2.3789062, + "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, - "logprob": -0.0027446747, + "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, - "logprob": -1.2851562, + "logprob": -1.3125, "special": false, "text": " machine" } @@ -309,22 +309,22 @@ }, { "id": 310, - "logprob": -0.78027344, + "logprob": -0.80078125, "text": " is" }, { "id": 18147, - "logprob": -12.8203125, + "logprob": -13.25, "text": " Deep" }, { "id": 20727, - "logprob": -2.9902344, + "logprob": -2.828125, "text": " Learning" }, { "id": 32, - "logprob": -1.1523438, + "logprob": -1.1953125, "text": "?" } ], @@ -332,61 +332,61 @@ "tokens": [ { "id": 187, - "logprob": -0.35351562, + "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, - "logprob": -0.38256836, + "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, - "logprob": -1.1269531, + "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, - "logprob": -0.54541016, + "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, - "logprob": -0.59765625, + "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, - "logprob": -0.7001953, + "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, - "logprob": -2.0585938, + "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, - "logprob": -2.3789062, + "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, - "logprob": -0.0027446747, + "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, - "logprob": -1.2851562, + "logprob": -1.3125, "special": false, "text": " machine" } diff --git a/integration-tests/models/test_mamba.py b/integration-tests/models/test_mamba.py index bf398999..4cc863f0 100644 --- a/integration-tests/models/test_mamba.py +++ b/integration-tests/models/test_mamba.py @@ -47,14 +47,14 @@ async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): assert response.details.generated_tokens == 10 assert ( response.generated_text - == "blue, red, yellow, \nand orange (in the order they appear in" + == "blue, red, yellow, \nand blue colors. A number of different color" ) assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private -async def test_mamba_load(fused_kernel_mamba, generate_load, response_snapshot): +async def test_mamba_load(fused_kernel_mamba, generate_load, generous_response_snapshot): responses = await generate_load( fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4 ) @@ -63,4 +63,4 @@ async def test_mamba_load(fused_kernel_mamba, generate_load, response_snapshot): assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" - assert responses == response_snapshot + assert responses == generous_response_snapshot diff --git a/server/text_generation_server/models/custom_modeling/mamba_modeling.py b/server/text_generation_server/models/custom_modeling/mamba_modeling.py index 017c0341..53e939bb 100644 --- a/server/text_generation_server/models/custom_modeling/mamba_modeling.py +++ b/server/text_generation_server/models/custom_modeling/mamba_modeling.py @@ -3,7 +3,6 @@ import torch.distributed from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.selective_scan_interface import selective_scan_fn -from mamba_ssm.utils.generation import InferenceParams from torch import nn from typing import Optional, Tuple, Any from transformers.configuration_utils import PretrainedConfig @@ -18,6 +17,17 @@ from text_generation_server.utils.layers import ( from einops import rearrange from causal_conv1d import causal_conv1d_fn, causal_conv1d_update import math +from dataclasses import dataclass + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + max_seqlen: int + max_batch_size: int + conv_states: torch.Tensor + ssm_states: torch.Tensor + seqlen_offset: int class MambaConfig(PretrainedConfig): @@ -56,9 +66,9 @@ class MambaConfig(PretrainedConfig): class MambaBlock(nn.Module): - def __init__(self, prefix, config, weights): + def __init__(self, prefix, config, weights, layer_id): super().__init__() - self.layer_idx = int(prefix.split(".")[2]) + self.layer_id = layer_id self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False) self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False) self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True) @@ -79,21 +89,20 @@ class MambaBlock(nn.Module): # inference_params def forward(self, hidden_states: torch.Tensor, inference_params=None): - _, seqlen, _ = hidden_states.shape - conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] - if inference_params.seqlen_offset > 0: + conv_state = inference_params.conv_states[self.layer_id] + ssm_state = inference_params.ssm_states[self.layer_id] out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state) return out, conv_state, ssm_state + _, seqlen, _ = hidden_states.shape projected_states = self.in_proj(hidden_states).transpose(1, 2) + # assert projected_states.shape == [batch_size, 2 * dstate, seqlen], f"{projected_states.shape} [{batch_size}, {dstate}, {seqlen}]" x, z = projected_states.chunk(2, dim=1) conv_state = F.pad(x, (self.d_conv - seqlen, 0)) x = causal_conv1d_fn( x=x, - weight=self.conv1d.weight.view( - self.conv1d.weight.size(0), self.conv1d.weight.size(2) - ), + weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ) @@ -126,56 +135,28 @@ class MambaBlock(nn.Module): return attn_outputs, conv_state, last_state def step(self, hidden_states, conv_state, ssm_state): - _xz = self.in_proj(hidden_states) - _x, _z = _xz.chunk(2, dim=-1) # (B D) - conv_state_new = torch.cat([conv_state, _x.transpose(1, 2)], dim=-1) - conv_out = causal_conv1d_fn( - x=conv_state_new, - weight=self.conv1d.weight.view( - self.conv1d.weight.size(0), self.conv1d.weight.size(2) - ), - bias=self.conv1d.bias, - activation=self.activation, + xz = self.in_proj(hidden_states.squeeze(1)) + x, z = xz.chunk(2, dim=-1) # (B D) + x = causal_conv1d_update(x, conv_state, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation) + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = F.linear(dt, self.dt_proj.weight) + A = self.negA + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True ) - conv_state = conv_state_new[:, :, 1:] - bsz, seqlen, dim = hidden_states.shape - output_tensor = torch.zeros( - (bsz, seqlen, dim), device=hidden_states.device, dtype=hidden_states.dtype - ) - for i in range(0, bsz): - x = conv_out[i : i + 1, :, -1] - z = _z[i : i + 1, -1, :] - x_db = self.x_proj(x) - dt, B, C = torch.split( - x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1 - ) - dt = F.linear(dt, self.dt_proj.weight) - y = selective_state_update( - ssm_state[i : i + 1, :, :], - x, - dt, - self.negA, - B, - C, - self.D, - z=z, - dt_bias=self.dt_proj.bias, - dt_softplus=True, - ) - out = self.out_proj(y) - output_tensor[i] = out - - return output_tensor, conv_state, ssm_state + out = self.out_proj(y) + return out.unsqueeze(1), conv_state.clone(), ssm_state.clone() class ResidualBlock(nn.Module): - def __init__(self, layer_id, config, weights): + def __init__(self, prefix, config, weights, layer_id): super().__init__() self.mamba_block = MambaBlock( - prefix=f"{layer_id}.mixer", config=config, weights=weights + prefix=f"{prefix}.mixer", config=config, weights=weights, layer_id=layer_id ) self.layer_norm = FastRMSNorm.load( - prefix=f"{layer_id}.norm", weights=weights, eps=config.layer_norm_epsilon + prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_epsilon ) def forward( @@ -200,7 +181,7 @@ class MambaModel(nn.Module): self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights) self.blocks = nn.ModuleList( [ - ResidualBlock(f"{prefix}.layers.{i}", config, weights) + ResidualBlock(f"{prefix}.layers.{i}", config, weights, layer_id=i) for i in range(config.n_layer) ] ) @@ -216,14 +197,12 @@ class MambaModel(nn.Module): self, input_ids: torch.Tensor, inference_params=None, residual=None ) -> Tuple[torch.Tensor, torch.Tensor, InferenceParams]: hidden_states = self.embed_tokens(input_ids) - for block in self.blocks: + for i, block in enumerate(self.blocks): hidden_states, residual, conv_state, ssm_state = block( hidden_states, residual, inference_params ) - inference_params.key_value_memory_dict[block.mamba_block.layer_idx] = ( - conv_state, - ssm_state, - ) + inference_params.conv_states[i].copy_(conv_state) + inference_params.ssm_states[i].copy_(ssm_state) hidden_states = ( hidden_states + residual if residual is not None else hidden_states @@ -234,4 +213,4 @@ class MambaModel(nn.Module): # update the offset for the next inference using these params inference_params.seqlen_offset += input_ids.size(1) - return logits, input_ids, inference_params + return logits diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index c7fda516..e04a9719 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -28,12 +28,12 @@ from text_generation_server.models.cache_manager import ( BLOCK_SIZE, ) from text_generation_server.pb import generate_pb2 +from text_generation_server.models.globals import MEM_POOL from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser from text_generation_server.utils.dist import MEMORY_FRACTION tracer = trace.get_tracer(__name__) -MEM_POOL = torch.cuda.graph_pool_handle() @dataclass diff --git a/server/text_generation_server/models/globals.py b/server/text_generation_server/models/globals.py new file mode 100644 index 00000000..b0dca376 --- /dev/null +++ b/server/text_generation_server/models/globals.py @@ -0,0 +1,3 @@ +import torch + +MEM_POOL = torch.cuda.graph_pool_handle() diff --git a/server/text_generation_server/models/mamba.py b/server/text_generation_server/models/mamba.py index c51e1e20..8f18e475 100644 --- a/server/text_generation_server/models/mamba.py +++ b/server/text_generation_server/models/mamba.py @@ -2,17 +2,20 @@ import torch import torch.distributed from transformers import AutoTokenizer, PreTrainedTokenizerBase from typing import Optional +import os from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaConfig, ) +from loguru import logger from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) +from text_generation_server.models.globals import MEM_POOL import time -from text_generation_server.models.custom_modeling.mamba_modeling import MambaModel +from text_generation_server.models.custom_modeling.mamba_modeling import MambaModel, InferenceParams from text_generation_server.models import Model from typing import Any, List, Optional, Tuple, Type, Dict from text_generation_server.models.types import ( @@ -24,7 +27,34 @@ from text_generation_server.models.types import ( from text_generation_server.utils.tokens import batch_top_tokens, Sampling from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling -from mamba_ssm.utils.generation import InferenceParams + +def new_inference_params(n_blocks: int, batch_size: int, d_inner: int, d_conv: int, d_state: int, seqlen_offset: int, dtype: torch.dtype, device: torch.device): + max_seqlen = 0 + conv_states = torch.zeros( + (n_blocks, + batch_size, + d_inner, + d_conv,), + device=device, + dtype=dtype, + ) + ssm_states = torch.zeros( + (n_blocks, + batch_size, + d_inner, + d_state,), + device=device, + dtype=dtype, + ) + inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_offset, + conv_states=conv_states, + ssm_states=ssm_states, + + ) + return inference_params @dataclass @@ -221,14 +251,8 @@ class MambaBatch(Batch): # TODO # Kept it simple by just updating the state, maybe updating the other CPU values is necessary. - key_value_memory_dict = {} - for i, ( - conv_state, - ssm_state, - ) in self.inference_params.key_value_memory_dict.items(): - key_value_memory_dict[i] = (conv_state[indices], ssm_state[indices]) - self.inference_params.key_value_memory_dict = key_value_memory_dict - + self.inference_params.conv_states = self.inference_params.conv_states[:, indices] + self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices] return self @classmethod @@ -254,9 +278,16 @@ class MambaBatch(Batch): top_n_tokens = [] max_tokens = 0 max_seqlen = 0 - batch_size = 0 seqlen_offset = 0 + (n_blocks, _, d_inner, d_conv) = ( + batches[0].inference_params.conv_states.shape + ) + (_, _, _, d_state) = batches[0].inference_params.ssm_states.shape + dtype = batches[0].inference_params.conv_states.dtype + device = batches[0].inference_params.conv_states.device + inference_params = new_inference_params(n_blocks=n_blocks, batch_size=total_batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=device, dtype=dtype) + # Batch tensors input_ids = None top_n_tokens_tensor = None @@ -303,63 +334,16 @@ class MambaBatch(Batch): max_input_length - batch.max_input_length ) * len(batch) - max_seqlen = max(max_seqlen, batch.inference_params.max_seqlen) - seqlen_offset = max(seqlen_offset, batch.inference_params.seqlen_offset) - batch_size += batch.inference_params.max_batch_size + inference_params.max_seqlen = max(inference_params.max_seqlen, batch.inference_params.max_seqlen) + assert batch.inference_params.seqlen_offset != 0, "Invalid seqlen offset" + inference_params.seqlen_offset = max(inference_params.seqlen_offset, batch.inference_params.seqlen_offset) + + + inference_params.conv_states[:, start_index:end_index] = batch.inference_params.conv_states + inference_params.ssm_states[:, start_index:end_index] = batch.inference_params.ssm_states start_index = end_index - (_, d_model, d_conv) = ( - batches[0].inference_params.key_value_memory_dict[0][0].shape - ) - (_, _, d_state) = batches[0].inference_params.key_value_memory_dict[0][1].shape - n_blocks = len(batches[0].inference_params.key_value_memory_dict) - dtype = batches[0].inference_params.key_value_memory_dict[0][0].dtype - device = batches[0].inference_params.key_value_memory_dict[0][0].device - - key_value_memory_dict = {} - for i in range(n_blocks): - conv_state = torch.zeros( - batch_size, - d_model, - d_conv, - device=device, - dtype=dtype, - ) - ssm_state = torch.zeros( - batch_size, - d_model, - d_state, - device=device, - dtype=dtype, - ) - key_value_memory_dict[i] = (conv_state, ssm_state) - lengths_per_sample = torch.zeros(batch_size, dtype=torch.int32, device=device) - - inference_params = InferenceParams( - max_seqlen=max_seqlen, - max_batch_size=batch_size, - seqlen_offset=seqlen_offset, - key_value_memory_dict=key_value_memory_dict, - lengths_per_sample=lengths_per_sample, - ) - - current_batch = 0 - for batch in batches: - for i in range(n_blocks): - conv_state, ssm_state = batch.inference_params.key_value_memory_dict[i] - batch_size = batch.inference_params.max_batch_size - inference_params.key_value_memory_dict[i][0][ - current_batch : current_batch + batch_size - ] = conv_state - inference_params.key_value_memory_dict[i][1][ - current_batch : current_batch + batch_size - ] = ssm_state - inference_params.lengths_per_sample[ - current_batch : current_batch + batch_size - ] = batch.inference_params.lengths_per_sample - current_batch += batch_size - return cls( batch_id=batches[0].batch_id, requests=requests, @@ -394,9 +378,13 @@ class Mamba(Model): trust_remote_code: bool = False, ): self.process_group, _rank, _world_size = initialize_torch_distributed() + self.cuda_graphs = {} if torch.cuda.is_available(): device = torch.device("cuda") - dtype = torch.float16 if dtype is None else dtype + # Bf16 is important. In f16 accumulations in the matmul are causing + # differences while the server is under load. + # This is detectable by the integration load test + dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") @@ -439,17 +427,93 @@ class Mamba(Model): def warmup(self, batch) -> Optional[int]: # TODO: implement warmup for Mamba if needed + if os.getenv("ENABLE_CUDA_GRAPHS", "False") == "True": + if self.speculate is None or self.speculate == 0: + try: + logger.info("Experimental support for Cuda Graphs is enabled") + # Warmup cuda graphs + for bs in [1, 2, 4] + [8 * i for i in range(1, 9)]: + self.cuda_graph_warmup(bs) + except Exception: + logger.exception(f"Decode cuda graph warmup failed") + return None + def cuda_graph_warmup(self, batch_size: int): + input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device) + n_blocks = len(self.model.blocks) + + d_state = self.model.config.d_state + d_conv = self.model.config.d_conv + # Inner takes the expand multiplication + d_inner = self.model.config.d_inner + + # Important seqlen_offset to go through the update mecanism with the state + seqlen_offset = 1 + inference_params = new_inference_params(n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype) + + graph = torch.cuda.CUDAGraph() + + torch.cuda.synchronize() + # Run once outside to warmup + self.model.forward( + input_ids=input_ids, + inference_params=inference_params + ) + torch.cuda.synchronize() + + with torch.cuda.graph(graph, pool=MEM_POOL): + logits = self.model.forward( + input_ids=input_ids, + inference_params=inference_params + ) + torch.cuda.synchronize() + graph_dict = { + "input_ids": input_ids, + "inference_params": inference_params, + "graph": graph, + "logits": logits + } + self.cuda_graphs[batch_size] = graph_dict + def forward( self, input_ids: torch.Tensor, - past: Optional[List[torch.Tensor]] = None, + inference_params: Any ) -> Tuple[torch.Tensor, torch.Tensor]: - return self.model( - input_ids, - past=past, - ) + bs = input_ids.shape[0] + padded_bs = bs + if bs == 3: + padded_bs = 4 + elif 3 < bs <= 8: + padded_bs = 8 + elif bs > 8: + padded_bs = (bs + 7) // 8 * 8 + + # Try to find an associated cuda graph + cuda_graph = self.cuda_graphs.get(padded_bs, None) + is_prefill = inference_params is None or inference_params.seqlen_offset == 0 + + if is_prefill or cuda_graph is None: + return self.model( + input_ids, + inference_params=inference_params, + ) + + # Copy inputs to the static inputs of the cuda graph + # Static inputs are potentially padded + cuda_graph["input_ids"][: bs] = input_ids + cuda_graph["inference_params"].conv_states[:, : bs] = inference_params.conv_states + cuda_graph["inference_params"].ssm_states[:, : bs] = inference_params.ssm_states + + # Replay the graph + cuda_graph["graph"].replay() + + inference_params.conv_states.copy_(cuda_graph["inference_params"].conv_states[:, :bs]) + inference_params.ssm_states.copy_(cuda_graph["inference_params"].ssm_states[:, :bs]) + + # Slice output to the correct shape + return cuda_graph["logits"][:bs] def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: start = time.time_ns() @@ -457,56 +521,26 @@ class Mamba(Model): batch.input_ids ) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids - batch_size = input_ids.shape[0] - max_seqlen = input_ids.shape[1] - dtype = input_ids.dtype - + batch_size, max_seqlen = input_ids.shape # Inference params - seqlen_og = 0 - inf_cache = {} - lengths_per_sample = ( - torch.ones(batch_size, dtype=torch.int32, device=input_ids.device) - * max_seqlen - ) if batch.inference_params is None: - inference_params = InferenceParams( - max_seqlen=max_seqlen, - max_batch_size=batch_size, - seqlen_offset=seqlen_og, - key_value_memory_dict=inf_cache, - lengths_per_sample=lengths_per_sample, - ) - - # Allocate inference cache - for res_block in self.model.blocks: - block = res_block.mamba_block - conv_state = torch.zeros( - batch_size, - self.model.config.d_model * self.model.config.expand, - self.model.config.d_conv, - device=block.conv1d.weight.device, - dtype=block.conv1d.weight.dtype, - ) - ssm_state = torch.zeros( - batch_size, - self.model.config.d_model * self.model.config.expand, - self.model.config.d_state, - device=block.dt_proj.weight.device, - dtype=block.dt_proj.weight.dtype, - ) - inference_params.key_value_memory_dict[block.layer_idx] = ( - conv_state, - ssm_state, - ) + # 0 is important here + seqlen_offset = 0 + n_blocks = len(self.model.blocks) + d_state = self.model.config.d_state + d_conv = self.model.config.d_conv + d_inner = self.model.config.d_inner + inference_params = new_inference_params(n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype) batch.inference_params = inference_params # Forward pass - logits, past_input_ids, new_inference_params = self.model( - input_ids, batch.inference_params + logits = self.forward( + input_ids, inference_params=batch.inference_params ) - batch.inference_params = new_inference_params + + # batch.inference_params = new_inference_params # Results generations: List[Generation] = [] stopped = True From 686b56a0c009b4fbd09a4254eaafe003856f88d1 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 14 Feb 2024 15:30:07 +0100 Subject: [PATCH 081/153] Small cleanup. (#1560) Using a single `os.getenv` statement instead of multiple. Should make truthful values easier to catch In the end didn't move towards full CLI because modifying globals in Python is error prone (depends on code import order). Added an error when mamba is launched with TP. # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/text_generation_server/models/flash_causal_lm.py | 4 ++-- server/text_generation_server/models/globals.py | 3 +++ server/text_generation_server/models/mamba.py | 8 +++++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index e04a9719..886fe486 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -28,7 +28,7 @@ from text_generation_server.models.cache_manager import ( BLOCK_SIZE, ) from text_generation_server.pb import generate_pb2 -from text_generation_server.models.globals import MEM_POOL +from text_generation_server.models.globals import MEM_POOL, ENABLE_CUDA_GRAPHS from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser from text_generation_server.utils.dist import MEMORY_FRACTION @@ -793,7 +793,7 @@ class FlashCausalLM(Model): self.device, ) - if os.getenv("ENABLE_CUDA_GRAPHS", "False") == "True": + if ENABLE_CUDA_GRAPHS: try: logger.info("Experimental support for Cuda Graphs is enabled") # Warmup cuda graphs diff --git a/server/text_generation_server/models/globals.py b/server/text_generation_server/models/globals.py index b0dca376..3b8a70bc 100644 --- a/server/text_generation_server/models/globals.py +++ b/server/text_generation_server/models/globals.py @@ -1,3 +1,6 @@ import torch +import os MEM_POOL = torch.cuda.graph_pool_handle() +# This is overridden by the cli +ENABLE_CUDA_GRAPHS = os.getenv("ENABLE_CUDA_GRAPHS", "false").lower() in {"1", "true"} diff --git a/server/text_generation_server/models/mamba.py b/server/text_generation_server/models/mamba.py index 8f18e475..868db6aa 100644 --- a/server/text_generation_server/models/mamba.py +++ b/server/text_generation_server/models/mamba.py @@ -13,7 +13,7 @@ from text_generation_server.utils import ( weight_files, Weights, ) -from text_generation_server.models.globals import MEM_POOL +from text_generation_server.models.globals import ENABLE_CUDA_GRAPHS, MEM_POOL import time from text_generation_server.models.custom_modeling.mamba_modeling import MambaModel, InferenceParams from text_generation_server.models import Model @@ -377,7 +377,9 @@ class Mamba(Model): dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): - self.process_group, _rank, _world_size = initialize_torch_distributed() + self.process_group, _rank, world_size = initialize_torch_distributed() + if world_size > 1: + raise RuntimeError("Mamba does not support Tensor Parallelism (TP)") self.cuda_graphs = {} if torch.cuda.is_available(): device = torch.device("cuda") @@ -427,7 +429,7 @@ class Mamba(Model): def warmup(self, batch) -> Optional[int]: # TODO: implement warmup for Mamba if needed - if os.getenv("ENABLE_CUDA_GRAPHS", "False") == "True": + if ENABLE_CUDA_GRAPHS: if self.speculate is None or self.speculate == 0: try: logger.info("Experimental support for Cuda Graphs is enabled") From 55acb86f42fae5dc7260a938cb31a33b86dcced3 Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 15 Feb 2024 04:28:10 -0500 Subject: [PATCH 082/153] Outlines guided generation (#1539) This WIP PR starts to add grammar support via outlines, currently this PR supports very simple regex grammars and does not optimize for precompiling or caching grammar fsm's. todo: - [X] add simple outlines guidance to `NextTokenChooser` - [X] update protos for grammar - [X] update generation params API - [X] constrain simple grammar - [ ] support parsing more complex grammar into fsm - [ ] support all outline support grammar types - [ ] explore optimizations to avoid recompiling grammars guided request ```bash curl -s 'http://localhost:3000/generate' \ --header 'Content-Type: application/json' \ --data-raw '{ "inputs": "make an email for david: \n", "parameters": { "max_new_tokens": 6, "grammar": "[\\w-]+@([\\w-]+\\.)+[\\w-]+" } }' | jq ``` response ```json { "generated_text": "david@example.com" } ``` unguided request ```bash curl -s 'http://localhost:3000/generate' \ --header 'Content-Type: application/json' \ --data '{ "inputs": "make an email for david: \n", "parameters": { "max_new_tokens": 6 } }' | jq ``` response ```json { "generated_text": " email = 'david" } ``` --- benchmark/src/lib.rs | 4 +- clients/python/text_generation/client.py | 10 + clients/python/text_generation/types.py | 28 +- docs/source/basic_tutorials/launcher.md | 8 + integration-tests/conftest.py | 30 +- .../test_flash_llama_grammar.json | 89 ++++ .../test_flash_llama_grammar_json.json | 274 ++++++++++ .../test_flash_llama_grammar_load.json | 478 +++++++++++++++++ .../test_flash_llama_grammar_regex.json | 109 ++++ ...sh_llama_grammar_single_load_instance.json | 73 +++ .../models/test_grammar_llama.py | 151 ++++++ launcher/src/main.rs | 10 + proto/generate.proto | 10 + router/client/src/client.rs | 4 + router/client/src/lib.rs | 4 +- router/src/health.rs | 3 + router/src/lib.rs | 40 ++ router/src/main.rs | 4 + router/src/queue.rs | 8 +- router/src/server.rs | 3 + router/src/validation.rs | 46 +- server/poetry.lock | 492 +++++++++++++++++- server/pyproject.toml | 1 + .../models/causal_lm.py | 5 +- .../models/flash_causal_lm.py | 24 +- .../models/flash_mistral.py | 2 +- .../models/galactica.py | 2 +- .../models/idefics_causal_lm.py | 5 +- server/text_generation_server/models/mamba.py | 5 +- .../models/seq2seq_lm.py | 5 +- .../utils/logits_process.py | 142 ++++- server/text_generation_server/utils/tokens.py | 92 +++- 32 files changed, 2128 insertions(+), 33 deletions(-) create mode 100644 integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar.json create mode 100644 integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_json.json create mode 100644 integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_load.json create mode 100644 integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_regex.json create mode 100644 integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_single_load_instance.json create mode 100644 integration-tests/models/test_grammar_llama.py diff --git a/benchmark/src/lib.rs b/benchmark/src/lib.rs index 6deae48d..638c6514 100644 --- a/benchmark/src/lib.rs +++ b/benchmark/src/lib.rs @@ -8,7 +8,7 @@ use crate::app::App; use crate::event::Event; use crossterm::ExecutableCommand; use std::io; -use text_generation_client::{NextTokenChooserParameters, ShardedClient}; +use text_generation_client::{GrammarType, NextTokenChooserParameters, ShardedClient}; use tokenizers::Tokenizer; use tokio::sync::{broadcast, mpsc}; use tui::backend::CrosstermBackend; @@ -45,6 +45,8 @@ pub async fn run( repetition_penalty: repetition_penalty.unwrap_or(1.0), frequency_penalty: frequency_penalty.unwrap_or(0.0), watermark, + grammar: String::new(), + grammar_type: GrammarType::None as i32, }; // Initialize terminal properties diff --git a/clients/python/text_generation/client.py b/clients/python/text_generation/client.py index 0bf80f8c..bbccbf1d 100644 --- a/clients/python/text_generation/client.py +++ b/clients/python/text_generation/client.py @@ -10,6 +10,7 @@ from text_generation.types import ( Response, Request, Parameters, + Grammar, ) from text_generation.errors import parse_error @@ -76,6 +77,7 @@ class Client: watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, + grammar: Optional[Grammar] = None, ) -> Response: """ Given a prompt, generate the following text @@ -138,6 +140,7 @@ class Client: watermark=watermark, decoder_input_details=decoder_input_details, top_n_tokens=top_n_tokens, + grammar=grammar, ) request = Request(inputs=prompt, stream=False, parameters=parameters) @@ -169,6 +172,7 @@ class Client: typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, + grammar: Optional[Grammar] = None, ) -> Iterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens @@ -227,6 +231,7 @@ class Client: typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, + grammar=grammar, ) request = Request(inputs=prompt, stream=True, parameters=parameters) @@ -326,6 +331,7 @@ class AsyncClient: watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, + grammar: Optional[Grammar] = None, ) -> Response: """ Given a prompt, generate the following text asynchronously @@ -370,6 +376,7 @@ class AsyncClient: Returns: Response: generated response """ + # Validate parameters parameters = Parameters( best_of=best_of, @@ -388,6 +395,7 @@ class AsyncClient: typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, + grammar=grammar, ) request = Request(inputs=prompt, stream=False, parameters=parameters) @@ -417,6 +425,7 @@ class AsyncClient: typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, + grammar: Optional[Grammar] = None, ) -> AsyncIterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens asynchronously @@ -475,6 +484,7 @@ class AsyncClient: typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, + grammar=grammar, ) request = Request(inputs=prompt, stream=True, parameters=parameters) diff --git a/clients/python/text_generation/types.py b/clients/python/text_generation/types.py index aa02d8d8..3426411b 100644 --- a/clients/python/text_generation/types.py +++ b/clients/python/text_generation/types.py @@ -1,10 +1,24 @@ from enum import Enum from pydantic import BaseModel, validator -from typing import Optional, List +from typing import Optional, List, Union from text_generation.errors import ValidationError +# enum for grammar type +class GrammarType(str, Enum): + Json = "json" + Regex = "regex" + + +# Grammar type and value +class Grammar(BaseModel): + # Grammar type + type: GrammarType + # Grammar value + value: Union[str, dict] + + class Parameters(BaseModel): # Activate logits sampling do_sample: bool = False @@ -41,6 +55,8 @@ class Parameters(BaseModel): decoder_input_details: bool = False # Return the N most likely tokens at each step top_n_tokens: Optional[int] = None + # grammar to use for generation + grammar: Optional[Grammar] = None @validator("best_of") def valid_best_of(cls, field_value, values): @@ -109,6 +125,14 @@ class Parameters(BaseModel): raise ValidationError("`top_n_tokens` must be strictly positive") return v + @validator("grammar") + def valid_grammar(cls, v): + if v is not None: + if v.type == GrammarType.Regex and not v.value: + raise ValidationError("`value` cannot be empty for `regex` grammar") + if v.type == GrammarType.Json and not v.value: + raise ValidationError("`value` cannot be empty for `json` grammar") + return v class Request(BaseModel): # Prompt @@ -157,7 +181,7 @@ class Token(BaseModel): # Token text text: str # Logprob - logprob: float + logprob: Optional[float] = None # Is the token a special token # Can be used to ignore tokens when concatenating special: bool diff --git a/docs/source/basic_tutorials/launcher.md b/docs/source/basic_tutorials/launcher.md index be31a7a4..36fa1241 100644 --- a/docs/source/basic_tutorials/launcher.md +++ b/docs/source/basic_tutorials/launcher.md @@ -378,6 +378,14 @@ Options: [env: TOKENIZER_CONFIG_PATH=] +``` +## DISABLE_GRAMMAR_SUPPORT +```shell + --disable-grammar-support + Disable outlines grammar constrained generation. This is a feature that allows you to generate text that follows a specific grammar + + [env: DISABLE_GRAMMAR_SUPPORT=] + ``` ## ENV ```shell diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py index efeda08d..e0228894 100644 --- a/integration-tests/conftest.py +++ b/integration-tests/conftest.py @@ -16,7 +16,14 @@ from syrupy.extensions.json import JSONSnapshotExtension from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError from text_generation import AsyncClient -from text_generation.types import Response, Details, InputToken, Token, BestOfSequence +from text_generation.types import ( + Response, + Details, + InputToken, + Token, + BestOfSequence, + Grammar, +) DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None) HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None) @@ -224,6 +231,7 @@ def launcher(event_loop): quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, + disable_grammar_support: bool = False, dtype: Optional[str] = None, ): port = random.randint(8000, 10_000) @@ -247,6 +255,8 @@ def launcher(event_loop): env = os.environ + if disable_grammar_support: + args.append("--disable-grammar-support") if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: @@ -287,12 +297,15 @@ def launcher(event_loop): quantize: Optional[str] = None, trust_remote_code: bool = False, use_flash_attention: bool = True, + disable_grammar_support: bool = False, dtype: Optional[str] = None, ): port = random.randint(8000, 10_000) args = ["--model-id", model_id, "--env"] + if disable_grammar_support: + args.append("--disable-grammar-support") if num_shard is not None: args.extend(["--num-shard", str(num_shard)]) if quantize is not None: @@ -370,11 +383,22 @@ def launcher(event_loop): @pytest.fixture(scope="module") def generate_load(): async def generate_load_inner( - client: AsyncClient, prompt: str, max_new_tokens: int, n: int + client: AsyncClient, + prompt: str, + max_new_tokens: int, + n: int, + seed: Optional[int] = None, + grammar: Optional[Grammar] = None, + stop_sequences: Optional[List[str]] = None, ) -> List[Response]: futures = [ client.generate( - prompt, max_new_tokens=max_new_tokens, decoder_input_details=True + prompt, + max_new_tokens=max_new_tokens, + decoder_input_details=True, + seed=seed, + grammar=grammar, + stop_sequences=stop_sequences, ) for _ in range(n) ] diff --git a/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar.json b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar.json new file mode 100644 index 00000000..0e87f59e --- /dev/null +++ b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar.json @@ -0,0 +1,89 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 4321, + "logprob": -13.90625, + "text": "Test" + }, + { + "id": 2009, + "logprob": -12.328125, + "text": "request" + } + ], + "seed": null, + "tokens": [ + { + "id": 13, + "logprob": -2.0566406, + "special": false, + "text": "\n" + }, + { + "id": 13, + "logprob": -1.5253906, + "special": false, + "text": "\n" + }, + { + "id": 29902, + "logprob": -2.7578125, + "special": false, + "text": "I" + }, + { + "id": 4966, + "logprob": -1.9033203, + "special": false, + "text": " hope" + }, + { + "id": 445, + "logprob": -0.5019531, + "special": false, + "text": " this" + }, + { + "id": 6911, + "logprob": -0.21264648, + "special": false, + "text": " helps" + }, + { + "id": 29991, + "logprob": -0.5991211, + "special": false, + "text": "!" + }, + { + "id": 2803, + "logprob": -0.37475586, + "special": false, + "text": " Let" + }, + { + "id": 592, + "logprob": -0.018463135, + "special": false, + "text": " me" + }, + { + "id": 1073, + "logprob": -0.0008597374, + "special": false, + "text": " know" + } + ], + "top_tokens": null + }, + "generated_text": "\n\nI hope this helps! Let me know" +} diff --git a/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_json.json b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_json.json new file mode 100644 index 00000000..7b12b158 --- /dev/null +++ b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_json.json @@ -0,0 +1,274 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "eos_token", + "generated_tokens": 30, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 5235, + "logprob": -10.0625, + "text": "info" + }, + { + "id": 29901, + "logprob": -3.2324219, + "text": ":" + }, + { + "id": 13260, + "logprob": -10.625, + "text": "dav" + }, + { + "id": 333, + "logprob": -0.08276367, + "text": "id" + }, + { + "id": 8753, + "logprob": -7.5273438, + "text": "hol" + }, + { + "id": 17559, + "logprob": -3.8476562, + "text": "tz" + }, + { + "id": 763, + "logprob": -10.140625, + "text": "like" + }, + { + "id": 10697, + "logprob": -10.1953125, + "text": "trees" + }, + { + "id": 322, + "logprob": -2.5742188, + "text": "and" + }, + { + "id": 756, + "logprob": -7.4882812, + "text": "has" + }, + { + "id": 1023, + "logprob": -5.0507812, + "text": "two" + }, + { + "id": 274, + "logprob": -5.3164062, + "text": "c" + }, + { + "id": 1446, + "logprob": -0.6694336, + "text": "ats" + }, + { + "id": 29889, + "logprob": -0.9995117, + "text": "." + }, + { + "id": 29871, + "logprob": -4.2421875, + "text": "" + } + ], + "seed": null, + "tokens": [ + { + "id": 6377, + "logprob": -0.14916992, + "special": false, + "text": "{\"" + }, + { + "id": 29888, + "logprob": -0.13598633, + "special": false, + "text": "f" + }, + { + "id": 12935, + "logprob": -0.017669678, + "special": false, + "text": "irs" + }, + { + "id": 29873, + "logprob": -0.00085639954, + "special": false, + "text": "t" + }, + { + "id": 1170, + "logprob": -0.0054016113, + "special": false, + "text": "Name" + }, + { + "id": 4710, + "logprob": -0.13549805, + "special": false, + "text": "\":\"" + }, + { + "id": 19504, + "logprob": -0.8852539, + "special": false, + "text": "David" + }, + { + "id": 3284, + "logprob": -0.16394043, + "special": false, + "text": "\",\"" + }, + { + "id": 4230, + "logprob": -0.020492554, + "special": false, + "text": "last" + }, + { + "id": 1170, + "logprob": -0.0013818741, + "special": false, + "text": "Name" + }, + { + "id": 4710, + "logprob": -0.0067749023, + "special": false, + "text": "\":\"" + }, + { + "id": 29950, + "logprob": -0.11578369, + "special": false, + "text": "H" + }, + { + "id": 14339, + "logprob": -0.004131317, + "special": false, + "text": "olt" + }, + { + "id": 29920, + "logprob": -0.0033359528, + "special": false, + "text": "z" + }, + { + "id": 3284, + "logprob": -0.20471191, + "special": false, + "text": "\",\"" + }, + { + "id": 29882, + "logprob": -0.0069274902, + "special": false, + "text": "h" + }, + { + "id": 20838, + "logprob": -0.19580078, + "special": false, + "text": "obb" + }, + { + "id": 29891, + "logprob": -2.2649765e-06, + "special": false, + "text": "y" + }, + { + "id": 4710, + "logprob": -0.32080078, + "special": false, + "text": "\":\"" + }, + { + "id": 29911, + "logprob": -2.1035156, + "special": false, + "text": "T" + }, + { + "id": 11003, + "logprob": -0.020767212, + "special": false, + "text": "rees" + }, + { + "id": 3284, + "logprob": -0.6010742, + "special": false, + "text": "\",\"" + }, + { + "id": 29876, + "logprob": -0.57666016, + "special": false, + "text": "n" + }, + { + "id": 398, + "logprob": -0.0061073303, + "special": false, + "text": "um" + }, + { + "id": 29907, + "logprob": -0.45703125, + "special": false, + "text": "C" + }, + { + "id": 1446, + "logprob": -0.0002872944, + "special": false, + "text": "ats" + }, + { + "id": 1115, + "logprob": -0.0021018982, + "special": false, + "text": "\":" + }, + { + "id": 29906, + "logprob": -0.08996582, + "special": false, + "text": "2" + }, + { + "id": 29913, + "logprob": -0.021697998, + "special": false, + "text": "}" + }, + { + "id": 2, + "logprob": 0.0, + "special": true, + "text": "" + } + ], + "top_tokens": null + }, + "generated_text": "{\"firstName\":\"David\",\"lastName\":\"Holtz\",\"hobby\":\"Trees\",\"numCats\":2}" +} diff --git a/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_load.json b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_load.json new file mode 100644 index 00000000..b7b26a2c --- /dev/null +++ b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_load.json @@ -0,0 +1,478 @@ +[ + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1024, + "logprob": -10.578125, + "text": "name" + }, + { + "id": 29901, + "logprob": -3.03125, + "text": ":" + }, + { + "id": 13260, + "logprob": -9.171875, + "text": "dav" + }, + { + "id": 333, + "logprob": -0.04244995, + "text": "id" + }, + { + "id": 29889, + "logprob": -2.4863281, + "text": "." + }, + { + "id": 4876, + "logprob": -10.7890625, + "text": "email" + }, + { + "id": 29901, + "logprob": -0.32714844, + "text": ":" + }, + { + "id": 259, + "logprob": -9.4921875, + "text": " " + } + ], + "seed": null, + "tokens": [ + { + "id": 29896, + "logprob": -0.7685547, + "special": false, + "text": "1" + }, + { + "id": 29906, + "logprob": -0.2376709, + "special": false, + "text": "2" + }, + { + "id": 29941, + "logprob": -0.01008606, + "special": false, + "text": "3" + }, + { + "id": 29946, + "logprob": -0.64160156, + "special": false, + "text": "4" + }, + { + "id": 29945, + "logprob": -0.5, + "special": false, + "text": "5" + }, + { + "id": 29953, + "logprob": -0.46557617, + "special": false, + "text": "6" + }, + { + "id": 29992, + "logprob": -0.5341797, + "special": false, + "text": "@" + }, + { + "id": 21980, + "logprob": -0.5361328, + "special": false, + "text": "gmail" + }, + { + "id": 29889, + "logprob": -0.00088739395, + "special": false, + "text": "." + }, + { + "id": 510, + "logprob": -0.0022907257, + "special": false, + "text": "com" + } + ], + "top_tokens": null + }, + "generated_text": "123456@gmail.com" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1024, + "logprob": -10.578125, + "text": "name" + }, + { + "id": 29901, + "logprob": -3.0332031, + "text": ":" + }, + { + "id": 13260, + "logprob": -9.171875, + "text": "dav" + }, + { + "id": 333, + "logprob": -0.04257202, + "text": "id" + }, + { + "id": 29889, + "logprob": -2.4785156, + "text": "." + }, + { + "id": 4876, + "logprob": -10.7890625, + "text": "email" + }, + { + "id": 29901, + "logprob": -0.32495117, + "text": ":" + }, + { + "id": 259, + "logprob": -9.4921875, + "text": " " + } + ], + "seed": null, + "tokens": [ + { + "id": 29896, + "logprob": -0.7709961, + "special": false, + "text": "1" + }, + { + "id": 29906, + "logprob": -0.23840332, + "special": false, + "text": "2" + }, + { + "id": 29941, + "logprob": -0.00995636, + "special": false, + "text": "3" + }, + { + "id": 29946, + "logprob": -0.64208984, + "special": false, + "text": "4" + }, + { + "id": 29945, + "logprob": -0.4970703, + "special": false, + "text": "5" + }, + { + "id": 29953, + "logprob": -0.46533203, + "special": false, + "text": "6" + }, + { + "id": 29992, + "logprob": -0.5336914, + "special": false, + "text": "@" + }, + { + "id": 21980, + "logprob": -0.5361328, + "special": false, + "text": "gmail" + }, + { + "id": 29889, + "logprob": -0.00088739395, + "special": false, + "text": "." + }, + { + "id": 510, + "logprob": -0.0022735596, + "special": false, + "text": "com" + } + ], + "top_tokens": null + }, + "generated_text": "123456@gmail.com" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1024, + "logprob": -10.578125, + "text": "name" + }, + { + "id": 29901, + "logprob": -3.0332031, + "text": ":" + }, + { + "id": 13260, + "logprob": -9.171875, + "text": "dav" + }, + { + "id": 333, + "logprob": -0.04257202, + "text": "id" + }, + { + "id": 29889, + "logprob": -2.4785156, + "text": "." + }, + { + "id": 4876, + "logprob": -10.7890625, + "text": "email" + }, + { + "id": 29901, + "logprob": -0.32495117, + "text": ":" + }, + { + "id": 259, + "logprob": -9.4921875, + "text": " " + } + ], + "seed": null, + "tokens": [ + { + "id": 29896, + "logprob": -0.7709961, + "special": false, + "text": "1" + }, + { + "id": 29906, + "logprob": -0.23840332, + "special": false, + "text": "2" + }, + { + "id": 29941, + "logprob": -0.00995636, + "special": false, + "text": "3" + }, + { + "id": 29946, + "logprob": -0.64208984, + "special": false, + "text": "4" + }, + { + "id": 29945, + "logprob": -0.4970703, + "special": false, + "text": "5" + }, + { + "id": 29953, + "logprob": -0.46533203, + "special": false, + "text": "6" + }, + { + "id": 29992, + "logprob": -0.5336914, + "special": false, + "text": "@" + }, + { + "id": 21980, + "logprob": -0.5361328, + "special": false, + "text": "gmail" + }, + { + "id": 29889, + "logprob": -0.00088739395, + "special": false, + "text": "." + }, + { + "id": 510, + "logprob": -0.0022735596, + "special": false, + "text": "com" + } + ], + "top_tokens": null + }, + "generated_text": "123456@gmail.com" + }, + { + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 1024, + "logprob": -10.578125, + "text": "name" + }, + { + "id": 29901, + "logprob": -3.0332031, + "text": ":" + }, + { + "id": 13260, + "logprob": -9.171875, + "text": "dav" + }, + { + "id": 333, + "logprob": -0.04257202, + "text": "id" + }, + { + "id": 29889, + "logprob": -2.4785156, + "text": "." + }, + { + "id": 4876, + "logprob": -10.7890625, + "text": "email" + }, + { + "id": 29901, + "logprob": -0.32495117, + "text": ":" + }, + { + "id": 259, + "logprob": -9.4921875, + "text": " " + } + ], + "seed": null, + "tokens": [ + { + "id": 29896, + "logprob": -0.7709961, + "special": false, + "text": "1" + }, + { + "id": 29906, + "logprob": -0.23840332, + "special": false, + "text": "2" + }, + { + "id": 29941, + "logprob": -0.00995636, + "special": false, + "text": "3" + }, + { + "id": 29946, + "logprob": -0.64208984, + "special": false, + "text": "4" + }, + { + "id": 29945, + "logprob": -0.4970703, + "special": false, + "text": "5" + }, + { + "id": 29953, + "logprob": -0.46533203, + "special": false, + "text": "6" + }, + { + "id": 29992, + "logprob": -0.5336914, + "special": false, + "text": "@" + }, + { + "id": 21980, + "logprob": -0.5361328, + "special": false, + "text": "gmail" + }, + { + "id": 29889, + "logprob": -0.00088739395, + "special": false, + "text": "." + }, + { + "id": 510, + "logprob": -0.0022735596, + "special": false, + "text": "com" + } + ], + "top_tokens": null + }, + "generated_text": "123456@gmail.com" + } +] diff --git a/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_regex.json b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_regex.json new file mode 100644 index 00000000..1ba9ae1e --- /dev/null +++ b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_regex.json @@ -0,0 +1,109 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [ + { + "id": 1, + "logprob": null, + "text": "" + }, + { + "id": 806, + "logprob": -11.890625, + "text": "Wh" + }, + { + "id": 1446, + "logprob": -3.6699219, + "text": "ats" + }, + { + "id": 2921, + "logprob": -7.8203125, + "text": "Go" + }, + { + "id": 468, + "logprob": -8.0703125, + "text": "og" + }, + { + "id": 793, + "logprob": -2.1875, + "text": "les" + }, + { + "id": 16332, + "logprob": -9.7109375, + "text": "DNS" + } + ], + "seed": null, + "tokens": [ + { + "id": 29946, + "logprob": -1.4765625, + "special": false, + "text": "4" + }, + { + "id": 29906, + "logprob": -0.9199219, + "special": false, + "text": "2" + }, + { + "id": 29889, + "logprob": 0.0, + "special": false, + "text": "." + }, + { + "id": 29896, + "logprob": -1.1367188, + "special": false, + "text": "1" + }, + { + "id": 29889, + "logprob": -1.4648438, + "special": false, + "text": "." + }, + { + "id": 29896, + "logprob": -0.40722656, + "special": false, + "text": "1" + }, + { + "id": 29889, + "logprob": -0.17419434, + "special": false, + "text": "." + }, + { + "id": 29896, + "logprob": -0.20251465, + "special": false, + "text": "1" + }, + { + "id": 29900, + "logprob": -1.5527344, + "special": false, + "text": "0" + }, + { + "id": 29896, + "logprob": -1.3710938, + "special": false, + "text": "1" + } + ], + "top_tokens": null + }, + "generated_text": "42.1.1.101" +} diff --git a/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_single_load_instance.json b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_single_load_instance.json new file mode 100644 index 00000000..7ffb17cb --- /dev/null +++ b/integration-tests/models/__snapshots__/test_grammar_llama/test_flash_llama_grammar_single_load_instance.json @@ -0,0 +1,73 @@ +{ + "details": { + "best_of_sequences": null, + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [], + "seed": null, + "tokens": [ + { + "id": 29896, + "logprob": -0.7685547, + "special": false, + "text": "1" + }, + { + "id": 29906, + "logprob": -0.33666992, + "special": false, + "text": "2" + }, + { + "id": 29941, + "logprob": -0.009979248, + "special": false, + "text": "3" + }, + { + "id": 29946, + "logprob": -0.64208984, + "special": false, + "text": "4" + }, + { + "id": 29945, + "logprob": -0.4970703, + "special": false, + "text": "5" + }, + { + "id": 29953, + "logprob": -0.46533203, + "special": false, + "text": "6" + }, + { + "id": 29992, + "logprob": -0.5336914, + "special": false, + "text": "@" + }, + { + "id": 21980, + "logprob": -0.53759766, + "special": false, + "text": "gmail" + }, + { + "id": 29889, + "logprob": -0.0008878708, + "special": false, + "text": "." + }, + { + "id": 510, + "logprob": -0.002275467, + "special": false, + "text": "com" + } + ], + "top_tokens": null + }, + "generated_text": "123456@gmail.com" +} diff --git a/integration-tests/models/test_grammar_llama.py b/integration-tests/models/test_grammar_llama.py new file mode 100644 index 00000000..f068496c --- /dev/null +++ b/integration-tests/models/test_grammar_llama.py @@ -0,0 +1,151 @@ +import pytest +import json + +from text_generation.types import GrammarType + + +@pytest.fixture(scope="module") +def flash_llama_grammar_handle(launcher): + with launcher( + "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False + ) as handle: + yield handle + + +@pytest.fixture(scope="module") +async def flash_llama_grammar(flash_llama_grammar_handle): + await flash_llama_grammar_handle.health(300) + return flash_llama_grammar_handle.client + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_llama_grammar(flash_llama_grammar, response_snapshot): + response = await flash_llama_grammar.generate( + "Test request", max_new_tokens=10, decoder_input_details=True + ) + + assert response.details.generated_tokens == 10 + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_llama_grammar_regex(flash_llama_grammar, response_snapshot): + response = await flash_llama_grammar.generate( + "Whats Googles DNS", + max_new_tokens=10, + decoder_input_details=True, + seed=0, + grammar={ + "type": GrammarType.Regex, # "regex" + "value": "((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)", + }, + ) + + assert response.details.generated_tokens == 10 + assert response.generated_text == "42.1.1.101" + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_llama_grammar_json(flash_llama_grammar, response_snapshot): + response = await flash_llama_grammar.generate( + "info: david holtz like trees and has two cats. ", + max_new_tokens=100, + decoder_input_details=True, + seed=0, + grammar={ + "type": GrammarType.Json, # "json" + "value": json.dumps( + { + "type": "object", + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Person", + "properties": { + "firstName": { + "type": "string", + "description": "The person'''s first name.", + }, + "lastName": { + "type": "string", + "description": "The person'''s last name.", + }, + "hobby": { + "description": "The person'''s hobby.", + "type": "string", + }, + "numCats": { + "description": "The number of cats the person has.", + "type": "integer", + "minimum": 0, + }, + }, + "required": ["firstName", "lastName", "hobby", "numCats"], + } + ), + }, + ) + + assert response.details.generated_tokens == 30 + assert ( + response.generated_text + == '{"firstName":"David","lastName":"Holtz","hobby":"Trees","numCats":2}' + ) + assert response == response_snapshot + + +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_llama_grammar_load( + flash_llama_grammar, generate_load, response_snapshot +): + responses = await generate_load( + flash_llama_grammar, + "name: david. email: ", + max_new_tokens=10, + n=4, + stop_sequences=[".com"], + seed=0, + grammar={ + "type": GrammarType.Regex, # "regex" + "value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex + }, + ) + + assert len(responses) == 4 + + expected = "123456@gmail.com" + + for response in responses: + assert response.generated_text == expected + + assert all([r.generated_text == responses[0].generated_text for r in responses]) + + assert responses == response_snapshot + + +# this is the same as the above test, but only fires off a single request +# this is only to ensure that the parallel and single inference produce the same result +@pytest.mark.asyncio +@pytest.mark.private +async def test_flash_llama_grammar_single_load_instance( + flash_llama_grammar, generate_load, response_snapshot +): + response = await flash_llama_grammar.generate( + "name: david. email: ", + max_new_tokens=10, + stop_sequences=[".com"], + seed=0, + grammar={ + "type": GrammarType.Regex, # "regex" + "value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex + }, + ) + + # assert response.details.generated_tokens == 30 + assert response.generated_text == "123456@gmail.com" + + assert response == response_snapshot diff --git a/launcher/src/main.rs b/launcher/src/main.rs index b5b476fa..35e23316 100644 --- a/launcher/src/main.rs +++ b/launcher/src/main.rs @@ -384,6 +384,11 @@ struct Args { #[clap(long, env)] tokenizer_config_path: Option, + /// Disable outlines grammar constrained generation. + /// This is a feature that allows you to generate text that follows a specific grammar. + #[clap(long, env)] + disable_grammar_support: bool, + /// Display a lot of information about your runtime environment #[clap(long, short, action)] env: bool, @@ -1061,6 +1066,11 @@ fn spawn_webserver( args.model_id, ]; + // Grammar support + if args.disable_grammar_support { + router_args.push("--disable-grammar-support".to_string()); + } + // Tokenizer config path if let Some(ref tokenizer_config_path) = args.tokenizer_config_path { router_args.push("--tokenizer-config-path".to_string()); diff --git a/proto/generate.proto b/proto/generate.proto index dde344f6..5bcf792d 100644 --- a/proto/generate.proto +++ b/proto/generate.proto @@ -51,6 +51,12 @@ message ClearCacheRequest { /// Empty response message ClearCacheResponse {} +enum GrammarType { + GRAMMAR_TYPE_NONE = 0; + GRAMMAR_TYPE_JSON = 1; + GRAMMAR_TYPE_REGEX = 2; +} + message NextTokenChooserParameters { /// exponential scaling output probability distribution float temperature = 1; @@ -70,6 +76,10 @@ message NextTokenChooserParameters { float frequency_penalty = 9; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; + /// grammar (applied if not empty) + string grammar = 10; + /// grammar type + GrammarType grammar_type = 11; } message StoppingCriteriaParameters { diff --git a/router/client/src/client.rs b/router/client/src/client.rs index 592338fa..51b75a49 100644 --- a/router/client/src/client.rs +++ b/router/client/src/client.rs @@ -221,6 +221,8 @@ impl Client { repetition_penalty: 1.0, frequency_penalty: 0.0, watermark: false, + grammar: String::new(), + grammar_type: GrammarType::None as i32, }) } else { Some(NextTokenChooserParameters { @@ -233,6 +235,8 @@ impl Client { repetition_penalty: 1.2, frequency_penalty: 0.1, watermark: false, + grammar: String::new(), + grammar_type: GrammarType::None as i32, }) }; requests.push(Request { diff --git a/router/client/src/lib.rs b/router/client/src/lib.rs index c38b931b..6782d9ff 100644 --- a/router/client/src/lib.rs +++ b/router/client/src/lib.rs @@ -9,8 +9,8 @@ pub use client::Client; pub use pb::generate::v2::HealthResponse; pub use pb::generate::v2::InfoResponse as ShardInfo; pub use pb::generate::v2::{ - Batch, CachedBatch, FinishReason, GeneratedText, Generation, NextTokenChooserParameters, - Request, StoppingCriteriaParameters, Tokens, + Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, + NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient; use thiserror::Error; diff --git a/router/src/health.rs b/router/src/health.rs index e830a3c3..b05b3094 100644 --- a/router/src/health.rs +++ b/router/src/health.rs @@ -1,5 +1,6 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use text_generation_client::GrammarType as ProtoGrammarType; use text_generation_client::{ Batch, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; @@ -45,6 +46,8 @@ impl Health { repetition_penalty: 1.0, frequency_penalty: 0.0, watermark: false, + grammar: String::new(), + grammar_type: ProtoGrammarType::None as i32, }), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: 1, diff --git a/router/src/lib.rs b/router/src/lib.rs index a9d783bb..87873821 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -45,6 +45,43 @@ impl HubTokenizerConfig { } } +mod json_object_or_string_to_string { + use serde::{Deserialize, Deserializer}; + use serde_json::Value; + + // A custom deserializer that treats both strings and objects as strings. + // This provides flexibility with input formats for the 'grammar' field. + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value = Value::deserialize(deserializer)?; + + match value { + Value::String(s) => Ok(s), + // Safely handle serialization and return an error if it fails + Value::Object(o) => { + serde_json::to_string(&o).map_err(|e| serde::de::Error::custom(e.to_string())) + } + _ => Err(serde::de::Error::custom( + "expected string or object for grammar", + )), + } + } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(tag = "type", content = "value")] +pub(crate) enum GrammarType { + #[serde( + rename = "json", + deserialize_with = "json_object_or_string_to_string::deserialize" + )] + Json(String), + #[serde(rename = "regex")] + Regex(String), +} + mod token_serde { use super::*; use serde::de; @@ -201,6 +238,8 @@ pub(crate) struct GenerateParameters { #[serde(default)] #[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)] pub top_n_tokens: Option, + #[serde(default)] + pub grammar: Option, } fn default_max_new_tokens() -> Option { @@ -226,6 +265,7 @@ fn default_parameters() -> GenerateParameters { decoder_input_details: false, seed: None, top_n_tokens: None, + grammar: None, } } diff --git a/router/src/main.rs b/router/src/main.rs index 1757e459..472e9f7f 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -78,6 +78,8 @@ struct Args { ngrok_edge: Option, #[clap(long, env, default_value_t = false)] messages_api_enabled: bool, + #[clap(long, env, default_value_t = false)] + disable_grammar_support: bool, } #[tokio::main] @@ -111,6 +113,7 @@ async fn main() -> Result<(), RouterError> { ngrok_authtoken, ngrok_edge, messages_api_enabled, + disable_grammar_support, } = args; // Launch Tokio runtime @@ -388,6 +391,7 @@ async fn main() -> Result<(), RouterError> { ngrok_edge, tokenizer_config, messages_api_enabled, + disable_grammar_support, ) .await?; Ok(()) diff --git a/router/src/queue.rs b/router/src/queue.rs index 00021812..e98bc43d 100644 --- a/router/src/queue.rs +++ b/router/src/queue.rs @@ -472,7 +472,9 @@ enum QueueCommand { #[cfg(test)] mod tests { use super::*; - use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters}; + use text_generation_client::{ + GrammarType as ProtoGrammarType, NextTokenChooserParameters, StoppingCriteriaParameters, + }; use tracing::info_span; fn default_queue() -> Queue { @@ -495,7 +497,7 @@ mod tests { let entry = Entry { request: ValidGenerateRequest { - inputs: "".to_string(), + inputs: String::new(), input_length: 0, truncate: 0, decoder_input_details: false, @@ -509,6 +511,8 @@ mod tests { repetition_penalty: 0.0, frequency_penalty: 0.0, watermark: false, + grammar: String::new(), + grammar_type: ProtoGrammarType::None as i32, }, stopping_parameters: StoppingCriteriaParameters { ignore_eos_token: false, diff --git a/router/src/server.rs b/router/src/server.rs index 450494df..7d9c453c 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -616,6 +616,7 @@ async fn chat_completions( decoder_input_details: !stream, seed, top_n_tokens: None, + grammar: None, }, }; @@ -781,6 +782,7 @@ pub async fn run( ngrok_edge: Option, tokenizer_config: HubTokenizerConfig, messages_api_enabled: bool, + grammar_support: bool, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] @@ -842,6 +844,7 @@ pub async fn run( max_top_n_tokens, max_input_length, max_total_tokens, + grammar_support, ); let generation_health = Arc::new(AtomicBool::new(false)); let health_ext = Health::new(client.clone(), generation_health.clone()); diff --git a/router/src/validation.rs b/router/src/validation.rs index 5802391d..b3a4ef1b 100644 --- a/router/src/validation.rs +++ b/router/src/validation.rs @@ -2,10 +2,12 @@ /// Payload validation logic use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput}; -use crate::{GenerateParameters, GenerateRequest}; +use crate::{GenerateParameters, GenerateRequest, GrammarType}; use rand::{thread_rng, Rng}; use std::env; -use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters}; +use text_generation_client::{ + GrammarType as ProtoGrammarType, NextTokenChooserParameters, StoppingCriteriaParameters, +}; use thiserror::Error; use tokenizers::tokenizer::Tokenizer; use tokenizers::TruncationDirection; @@ -22,6 +24,7 @@ pub struct Validation { max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, + disable_grammar_support: bool, /// Channel to communicate with the background tokenization task sender: Option>, skip_tokenizer_in_tgi: bool, @@ -36,6 +39,7 @@ impl Validation { max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, + disable_grammar_support: bool, ) -> Self { // If we have a fast tokenizer let sender = if let Some(tokenizer) = tokenizer { @@ -74,6 +78,7 @@ impl Validation { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, skip_tokenizer_in_tgi, } } @@ -199,6 +204,7 @@ impl Validation { watermark, decoder_input_details, top_n_tokens, + grammar, .. } = request.parameters; @@ -317,6 +323,28 @@ impl Validation { .validate_input(request.inputs, truncate, max_new_tokens) .await?; + // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar + // NOTE: this is currently difficult because we need the tokenizer in Python to build + // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which + // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM + // compiler and use that to build the FSM here. + + // Validate grammar and unpack the grammar and type for the proto message + let (grammar, grammar_type) = match grammar { + Some(grammar) => { + // Ensure that grammar is not set if it's not supported + if self.disable_grammar_support { + return Err(ValidationError::Grammar); + } + match grammar { + // currently both are handled the same way since compilation is done in Python + GrammarType::Json(json) => (json, ProtoGrammarType::Json.into()), + GrammarType::Regex(regex) => (regex, ProtoGrammarType::Regex.into()), + } + } + None => (String::new(), ProtoGrammarType::None.into()), + }; + let parameters = NextTokenChooserParameters { temperature, repetition_penalty, @@ -327,6 +355,8 @@ impl Validation { do_sample, seed, watermark, + grammar, + grammar_type, }; let stopping_parameters = StoppingCriteriaParameters { max_new_tokens, @@ -478,6 +508,8 @@ pub enum ValidationError { StopSequence(usize, usize), #[error("tokenizer error {0}")] Tokenizer(String), + #[error("grammar is not supported")] + Grammar, #[error("`watermark` = true is not allowed with FP8 quantization.")] WatermarkWithQuantization, } @@ -497,6 +529,7 @@ mod tests { let max_input_length = 5; let max_total_tokens = 6; let workers = 1; + let disable_grammar_support = true; let validation = Validation::new( workers, tokenizer, @@ -505,6 +538,7 @@ mod tests { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, ); let max_new_tokens = 10; @@ -525,6 +559,7 @@ mod tests { let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; + let disable_grammar_support = true; let workers = 1; let validation = Validation::new( workers, @@ -534,6 +569,7 @@ mod tests { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, ); let max_new_tokens = 10; @@ -555,6 +591,7 @@ mod tests { let max_input_length = 5; let max_total_tokens = 6; let workers = 1; + let disable_grammar_support = true; let validation = Validation::new( workers, tokenizer, @@ -563,6 +600,7 @@ mod tests { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, ); match validation .validate(GenerateRequest { @@ -589,6 +627,7 @@ mod tests { let max_input_length = 5; let max_total_tokens = 106; let workers = 1; + let disable_grammar_support = true; let validation = Validation::new( workers, tokenizer, @@ -597,6 +636,7 @@ mod tests { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, ); match validation .validate(GenerateRequest { @@ -652,6 +692,7 @@ mod tests { let max_input_length = 5; let max_total_tokens = 106; let workers = 1; + let disable_grammar_support = true; let validation = Validation::new( workers, tokenizer, @@ -660,6 +701,7 @@ mod tests { max_top_n_tokens, max_input_length, max_total_tokens, + disable_grammar_support, ); match validation .validate(GenerateRequest { diff --git a/server/poetry.lock b/server/poetry.lock index f1445b1d..7afe275e 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -140,6 +140,17 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + [[package]] name = "async-timeout" version = "4.0.3" @@ -305,6 +316,17 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} +[[package]] +name = "cloudpickle" +version = "3.0.0" +description = "Pickler class to extend the standard pickle.Pickler functionality" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, + {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, +] + [[package]] name = "colorama" version = "0.4.6" @@ -439,6 +461,17 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "diskcache" +version = "5.6.3" +description = "Disk Cache -- Disk and file backed persistent cache." +optional = false +python-versions = ">=3" +files = [ + {file = "diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19"}, + {file = "diskcache-5.6.3.tar.gz", hash = "sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc"}, +] + [[package]] name = "exceptiongroup" version = "1.2.1" @@ -945,6 +978,17 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "interegular" +version = "0.3.3" +description = "a regex intersection checker" +optional = false +python-versions = ">=3.7" +files = [ + {file = "interegular-0.3.3-py37-none-any.whl", hash = "sha256:b0c07007d48c89d6d19f7204972d369b2a77222722e126b6aa63aa721dc3b19c"}, + {file = "interegular-0.3.3.tar.gz", hash = "sha256:d9b697b21b34884711399ba0f0376914b81899ce670032486d0d048344a76600"}, +] + [[package]] name = "jinja2" version = "3.1.3" @@ -962,6 +1006,99 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "joblib" +version = "1.4.0" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "joblib-1.4.0-py3-none-any.whl", hash = "sha256:42942470d4062537be4d54c83511186da1fc14ba354961a2114da91efa9a4ed7"}, + {file = "joblib-1.4.0.tar.gz", hash = "sha256:1eb0dc091919cd384490de890cb5dfd538410a6d4b3b54eef09fb8c50b409b1c"}, +] + +[[package]] +name = "jsonschema" +version = "4.21.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "lark" +version = "1.1.9" +description = "a modern parsing library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "lark-1.1.9-py3-none-any.whl", hash = "sha256:a0dd3a87289f8ccbb325901e4222e723e7d745dbfc1803eaf5f3d2ace19cf2db"}, + {file = "lark-1.1.9.tar.gz", hash = "sha256:15fa5236490824c2c4aba0e22d2d6d823575dcaf4cdd1848e34b6ad836240fba"}, +] + +[package.extras] +atomic-cache = ["atomicwrites"] +interegular = ["interegular (>=0.3.1,<0.4.0)"] +nearley = ["js2py"] +regex = ["regex"] + +[[package]] +name = "llvmlite" +version = "0.42.0" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.9" +files = [ + {file = "llvmlite-0.42.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3366938e1bf63d26c34fbfb4c8e8d2ded57d11e0567d5bb243d89aab1eb56098"}, + {file = "llvmlite-0.42.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c35da49666a21185d21b551fc3caf46a935d54d66969d32d72af109b5e7d2b6f"}, + {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70f44ccc3c6220bd23e0ba698a63ec2a7d3205da0d848804807f37fc243e3f77"}, + {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f8d8717a9073b9e0246998de89929071d15b47f254c10eef2310b9aac033d"}, + {file = "llvmlite-0.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:8d90edf400b4ceb3a0e776b6c6e4656d05c7187c439587e06f86afceb66d2be5"}, + {file = "llvmlite-0.42.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ae511caed28beaf1252dbaf5f40e663f533b79ceb408c874c01754cafabb9cbf"}, + {file = "llvmlite-0.42.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81e674c2fe85576e6c4474e8c7e7aba7901ac0196e864fe7985492b737dbab65"}, + {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb3975787f13eb97629052edb5017f6c170eebc1c14a0433e8089e5db43bcce6"}, + {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5bece0cdf77f22379f19b1959ccd7aee518afa4afbd3656c6365865f84903f9"}, + {file = "llvmlite-0.42.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e0c4c11c8c2aa9b0701f91b799cb9134a6a6de51444eff5a9087fc7c1384275"}, + {file = "llvmlite-0.42.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:08fa9ab02b0d0179c688a4216b8939138266519aaa0aa94f1195a8542faedb56"}, + {file = "llvmlite-0.42.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b2fce7d355068494d1e42202c7aff25d50c462584233013eb4470c33b995e3ee"}, + {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebe66a86dc44634b59a3bc860c7b20d26d9aaffcd30364ebe8ba79161a9121f4"}, + {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d47494552559e00d81bfb836cf1c4d5a5062e54102cc5767d5aa1e77ccd2505c"}, + {file = "llvmlite-0.42.0-cp312-cp312-win_amd64.whl", hash = "sha256:05cb7e9b6ce69165ce4d1b994fbdedca0c62492e537b0cc86141b6e2c78d5888"}, + {file = "llvmlite-0.42.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdd3888544538a94d7ec99e7c62a0cdd8833609c85f0c23fcb6c5c591aec60ad"}, + {file = "llvmlite-0.42.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0936c2067a67fb8816c908d5457d63eba3e2b17e515c5fe00e5ee2bace06040"}, + {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a78ab89f1924fc11482209f6799a7a3fc74ddc80425a7a3e0e8174af0e9e2301"}, + {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7599b65c7af7abbc978dbf345712c60fd596aa5670496561cc10e8a71cebfb2"}, + {file = "llvmlite-0.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:43d65cc4e206c2e902c1004dd5418417c4efa6c1d04df05c6c5675a27e8ca90e"}, + {file = "llvmlite-0.42.0.tar.gz", hash = "sha256:f92b09243c0cc3f457da8b983f67bd8e1295d0f5b3746c7a1861d7a99403854a"}, +] + [[package]] name = "loguru" version = "0.6.0" @@ -1189,6 +1326,17 @@ files = [ [package.dependencies] dill = ">=0.3.8" +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + [[package]] name = "networkx" version = "3.2.1" @@ -1207,6 +1355,40 @@ doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9. extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +[[package]] +name = "numba" +version = "0.59.1" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numba-0.59.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97385a7f12212c4f4bc28f648720a92514bee79d7063e40ef66c2d30600fd18e"}, + {file = "numba-0.59.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b77aecf52040de2a1eb1d7e314497b9e56fba17466c80b457b971a25bb1576d"}, + {file = "numba-0.59.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3476a4f641bfd58f35ead42f4dcaf5f132569c4647c6f1360ccf18ee4cda3990"}, + {file = "numba-0.59.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:525ef3f820931bdae95ee5379c670d5c97289c6520726bc6937a4a7d4230ba24"}, + {file = "numba-0.59.1-cp310-cp310-win_amd64.whl", hash = "sha256:990e395e44d192a12105eca3083b61307db7da10e093972ca285c85bef0963d6"}, + {file = "numba-0.59.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:43727e7ad20b3ec23ee4fc642f5b61845c71f75dd2825b3c234390c6d8d64051"}, + {file = "numba-0.59.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:411df625372c77959570050e861981e9d196cc1da9aa62c3d6a836b5cc338966"}, + {file = "numba-0.59.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2801003caa263d1e8497fb84829a7ecfb61738a95f62bc05693fcf1733e978e4"}, + {file = "numba-0.59.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dd2842fac03be4e5324ebbbd4d2d0c8c0fc6e0df75c09477dd45b288a0777389"}, + {file = "numba-0.59.1-cp311-cp311-win_amd64.whl", hash = "sha256:0594b3dfb369fada1f8bb2e3045cd6c61a564c62e50cf1f86b4666bc721b3450"}, + {file = "numba-0.59.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1cce206a3b92836cdf26ef39d3a3242fec25e07f020cc4feec4c4a865e340569"}, + {file = "numba-0.59.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8c8b4477763cb1fbd86a3be7050500229417bf60867c93e131fd2626edb02238"}, + {file = "numba-0.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d80bce4ef7e65bf895c29e3889ca75a29ee01da80266a01d34815918e365835"}, + {file = "numba-0.59.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f7ad1d217773e89a9845886401eaaab0a156a90aa2f179fdc125261fd1105096"}, + {file = "numba-0.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:5bf68f4d69dd3a9f26a9b23548fa23e3bcb9042e2935257b471d2a8d3c424b7f"}, + {file = "numba-0.59.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4e0318ae729de6e5dbe64c75ead1a95eb01fabfe0e2ebed81ebf0344d32db0ae"}, + {file = "numba-0.59.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f68589740a8c38bb7dc1b938b55d1145244c8353078eea23895d4f82c8b9ec1"}, + {file = "numba-0.59.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:649913a3758891c77c32e2d2a3bcbedf4a69f5fea276d11f9119677c45a422e8"}, + {file = "numba-0.59.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9712808e4545270291d76b9a264839ac878c5eb7d8b6e02c970dc0ac29bc8187"}, + {file = "numba-0.59.1-cp39-cp39-win_amd64.whl", hash = "sha256:8d51ccd7008a83105ad6a0082b6a2b70f1142dc7cfd76deb8c5a862367eb8c86"}, + {file = "numba-0.59.1.tar.gz", hash = "sha256:76f69132b96028d2774ed20415e8c528a34e3299a40581bae178f0994a2f370b"}, +] + +[package.dependencies] +llvmlite = "==0.42.*" +numpy = ">=1.22,<1.27" + [[package]] name = "numpy" version = "1.26.4" @@ -1613,6 +1795,39 @@ transformers = ">=4.37.0,<4.38.0" quality = ["hf-doc-builder", "ruff"] tests = ["GitPython", "datasets", "optuna", "parameterized", "psutil", "pytest", "safetensors", "sentencepiece"] +[[package]] +name = "outlines" +version = "0.0.27" +description = "Probabilistic Generative Model Programming" +optional = false +python-versions = ">=3.8" +files = [ + {file = "outlines-0.0.27-py3-none-any.whl", hash = "sha256:dd614f49760ff8870a5d491fad4a372d7b7d4da5c1646f1b42f12a9d5e34db4b"}, + {file = "outlines-0.0.27.tar.gz", hash = "sha256:debc49f0db4d968eea05a4a6134516b3e49c6c8607106aa097410a4b53d5af6c"}, +] + +[package.dependencies] +cloudpickle = "*" +diskcache = "*" +interegular = "*" +jinja2 = "*" +joblib = "*" +jsonschema = "*" +lark = "*" +nest-asyncio = "*" +numba = "*" +numpy = "*" +pydantic = ">=2.0" +referencing = "*" +requests = "*" +scipy = "*" +torch = ">=2.1.0" +transformers = "*" + +[package.extras] +serve = ["fastapi", "pydantic (>=2.0)", "ray (==2.9.0)", "uvicorn", "vllm (>=0.3.0)"] +test = ["accelerate", "beartype (<0.16.0)", "coverage[toml] (>=5.1)", "datasets", "diff-cover", "huggingface-hub", "llama-cpp-python", "pre-commit", "pytest", "pytest-benchmark", "pytest-cov", "pytest-mock", "responses", "transformers"] + [[package]] name = "packaging" version = "24.0" @@ -1945,6 +2160,116 @@ files = [ {file = "pyarrow_hotfix-0.6.tar.gz", hash = "sha256:79d3e030f7ff890d408a100ac16d6f00b14d44a502d7897cd9fc3e3a534e9945"}, ] +[[package]] +name = "pydantic" +version = "2.7.1" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.18.2" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + [[package]] name = "pyreadline3" version = "3.4.1" @@ -2063,6 +2388,21 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "referencing" +version = "0.35.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.0-py3-none-any.whl", hash = "sha256:8080727b30e364e5783152903672df9b6b091c926a146a759080b62ca3126cd6"}, + {file = "referencing-0.35.0.tar.gz", hash = "sha256:191e936b0c696d0af17ad7430a3dc68e88bc11be6514f4757dc890f04ab05889"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "regex" version = "2024.4.16" @@ -2186,6 +2526,114 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rpds-py" +version = "0.18.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, + {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, + {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, + {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, + {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, +] + [[package]] name = "safetensors" version = "0.4.3" @@ -2308,6 +2756,48 @@ tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] torch = ["safetensors[numpy]", "torch (>=1.10)"] +[[package]] +name = "scipy" +version = "1.13.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scipy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba419578ab343a4e0a77c0ef82f088238a93eef141b2b8017e46149776dfad4d"}, + {file = "scipy-1.13.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:22789b56a999265431c417d462e5b7f2b487e831ca7bef5edeb56efe4c93f86e"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f1432ba070e90d42d7fd836462c50bf98bd08bed0aa616c359eed8a04e3922"}, + {file = "scipy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8434f6f3fa49f631fae84afee424e2483289dfc30a47755b4b4e6b07b2633a4"}, + {file = "scipy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dcbb9ea49b0167de4167c40eeee6e167caeef11effb0670b554d10b1e693a8b9"}, + {file = "scipy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:1d2f7bb14c178f8b13ebae93f67e42b0a6b0fc50eba1cd8021c9b6e08e8fb1cd"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fbcf8abaf5aa2dc8d6400566c1a727aed338b5fe880cde64907596a89d576fa"}, + {file = "scipy-1.13.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5e4a756355522eb60fcd61f8372ac2549073c8788f6114449b37e9e8104f15a5"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5acd8e1dbd8dbe38d0004b1497019b2dbbc3d70691e65d69615f8a7292865d7"}, + {file = "scipy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ff7dad5d24a8045d836671e082a490848e8639cabb3dbdacb29f943a678683d"}, + {file = "scipy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4dca18c3ffee287ddd3bc8f1dabaf45f5305c5afc9f8ab9cbfab855e70b2df5c"}, + {file = "scipy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:a2f471de4d01200718b2b8927f7d76b5d9bde18047ea0fa8bd15c5ba3f26a1d6"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0de696f589681c2802f9090fff730c218f7c51ff49bf252b6a97ec4a5d19e8b"}, + {file = "scipy-1.13.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:b2a3ff461ec4756b7e8e42e1c681077349a038f0686132d623fa404c0bee2551"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf9fe63e7a4bf01d3645b13ff2aa6dea023d38993f42aaac81a18b1bda7a82a"}, + {file = "scipy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e7626dfd91cdea5714f343ce1176b6c4745155d234f1033584154f60ef1ff42"}, + {file = "scipy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:109d391d720fcebf2fbe008621952b08e52907cf4c8c7efc7376822151820820"}, + {file = "scipy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:8930ae3ea371d6b91c203b1032b9600d69c568e537b7988a3073dfe4d4774f21"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5407708195cb38d70fd2d6bb04b1b9dd5c92297d86e9f9daae1576bd9e06f602"}, + {file = "scipy-1.13.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ac38c4c92951ac0f729c4c48c9e13eb3675d9986cc0c83943784d7390d540c78"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c74543c4fbeb67af6ce457f6a6a28e5d3739a87f62412e4a16e46f164f0ae5"}, + {file = "scipy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28e286bf9ac422d6beb559bc61312c348ca9b0f0dae0d7c5afde7f722d6ea13d"}, + {file = "scipy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33fde20efc380bd23a78a4d26d59fc8704e9b5fd9b08841693eb46716ba13d86"}, + {file = "scipy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:45c08bec71d3546d606989ba6e7daa6f0992918171e2a6f7fbedfa7361c2de1e"}, + {file = "scipy-1.13.0.tar.gz", hash = "sha256:58569af537ea29d3f78e5abd18398459f195546bb3be23d16677fb26616cc11e"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "sentencepiece" version = "0.1.99" @@ -3102,4 +3592,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "5f39b7146fc36cc846070a3e12db863f2748b49a24ab55878129be3480c89685" +content-hash = "8a74ea25e989cdb332816e4bc0ad661c6230d60d5ae8e712fa56297aae4083b3" diff --git a/server/pyproject.toml b/server/pyproject.toml index 517ef42f..ee1a9d8a 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -25,6 +25,7 @@ peft = "^0.8.2" optimum-habana = "1.10.4" transformers = "4.37.2" accelerate = "0.27.2" +outlines="^0.0.27" [tool.poetry.group.dev.dependencies] grpcio-tools = "*" diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 771c7694..2dcdaa34 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -407,6 +407,7 @@ class CausalLMBatch(Batch): parameters, batches[dst_batch_idx].next_token_chooser.dtype, batches[dst_batch_idx].next_token_chooser.device, + batches[dst_batch_idx].next_token_chooser.tokenizer, hq_env.is_quantization_enabled ) @@ -462,7 +463,7 @@ class CausalLMBatch(Batch): parameters.append(parameters[0]) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( - parameters, dtype, device, hq_env.is_quantization_enabled + parameters, dtype, device, tokenizer, hq_env.is_quantization_enabled ) tokenized_inputs = tokenizer( [r.data.inputs for r in requests] + dummy_inputs, @@ -1040,7 +1041,7 @@ class CausalLM(Model): if top_n_tokens > 0: all_top_tokens = [] - for (top_token_ids, top_token_logprobs) in zip( + for top_token_ids, top_token_logprobs in zip( top_token_ids, top_token_logprobs ): toptoken_texts = self.tokenizer.batch_decode( diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 886fe486..7ec8c2fc 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -237,7 +237,7 @@ class FlashCausalLMBatch(Batch): ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( - next_token_chooser_parameters, dtype, device + next_token_chooser_parameters, dtype, device, tokenizer ) start_slots = torch.tensor(start_slots, dtype=torch.int64) @@ -593,6 +593,7 @@ class FlashCausalLMBatch(Batch): next_token_chooser_parameters, dtype=batches[0].next_token_chooser.dtype, device=batches[0].next_token_chooser.device, + tokenizer=batches[0].next_token_chooser.tokenizer, ) speculative_ids = ( @@ -869,7 +870,11 @@ class FlashCausalLM(Model): # Try to find an associated cuda graph cuda_graph = self.cuda_graphs.get(padded_bs, None) - if cu_seqlen_prefill is not None or cuda_graph is None or batch.speculative_ids is not None: + if ( + cu_seqlen_prefill is not None + or cuda_graph is None + or batch.speculative_ids is not None + ): return self.model.forward( input_ids=input_ids, position_ids=position_ids, @@ -1013,9 +1018,9 @@ class FlashCausalLM(Model): # Copy batch.input_ids to prefill_token_indices if prefill_logprobs: if len(batch) > 1: - prefill_tokens_indices[ - out_start_index : out_end_index - 1 - ] = batch.input_ids[start_index + 1 : start_index + out_length] + prefill_tokens_indices[out_start_index : out_end_index - 1] = ( + batch.input_ids[start_index + 1 : start_index + out_length] + ) else: # Set prefill_tokens_indices to the correct slice prefill_tokens_indices = batch.input_ids[ @@ -1028,6 +1033,7 @@ class FlashCausalLM(Model): cumulative_length += input_length + # Update values batch.input_ids = next_input_ids[accepted_ids.cumsum(dim=-1) - 1] batch.speculative_ids = speculative_ids batch.position_ids = next_position_ids + accepted_ids @@ -1166,7 +1172,7 @@ class FlashCausalLM(Model): if top_n_tokens > 0: all_top_tokens = [] - for (top_token_ids, top_token_logprobs) in zip( + for top_token_ids, top_token_logprobs in zip( top_token_ids, top_token_logprobs ): toptoken_texts = self.tokenizer.batch_decode( @@ -1204,6 +1210,12 @@ class FlashCausalLM(Model): generations.append(generation) + # accept each new token for this specific request since we may + # have more than one new token per request with speculative decoding + for next_token_id in _next_token_ids: + batch.next_token_chooser = batch.next_token_chooser.advance_grammar_single(i, next_token_id) + + # Update values batch.input_lengths[i] = input_length + n_accepted_ids if batch.input_lengths[i] > batch.max_seqlen: diff --git a/server/text_generation_server/models/flash_mistral.py b/server/text_generation_server/models/flash_mistral.py index 34a50194..70669c8d 100644 --- a/server/text_generation_server/models/flash_mistral.py +++ b/server/text_generation_server/models/flash_mistral.py @@ -192,7 +192,7 @@ class FlashMistralBatch(FlashCausalLMBatch): ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( - next_token_chooser_parameters, dtype, device + next_token_chooser_parameters, dtype, device, tokenizer ) start_slots = torch.tensor(start_slots, dtype=torch.int64) diff --git a/server/text_generation_server/models/galactica.py b/server/text_generation_server/models/galactica.py index 42ff1c80..a2c30255 100644 --- a/server/text_generation_server/models/galactica.py +++ b/server/text_generation_server/models/galactica.py @@ -92,7 +92,7 @@ class GalacticaCausalLMBatch(CausalLMBatch): requests_idx_mapping[r.id] = i # Add escape_custom_split_sequence to the CausalLMBatch logic inputs.append(escape_custom_split_sequence(r.inputs)) - next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device)) + next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) diff --git a/server/text_generation_server/models/idefics_causal_lm.py b/server/text_generation_server/models/idefics_causal_lm.py index 2f28688d..5ea2db87 100644 --- a/server/text_generation_server/models/idefics_causal_lm.py +++ b/server/text_generation_server/models/idefics_causal_lm.py @@ -114,7 +114,7 @@ class IdeficsCausalLMBatch(Batch): for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.inputs) - next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device)) + next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) @@ -815,6 +815,9 @@ class IdeficsCausalLM(Model): generations.append(generation) # Update values + batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( + next_token_id_squeezed.item() + ) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length diff --git a/server/text_generation_server/models/mamba.py b/server/text_generation_server/models/mamba.py index 868db6aa..4585f4b9 100644 --- a/server/text_generation_server/models/mamba.py +++ b/server/text_generation_server/models/mamba.py @@ -124,7 +124,7 @@ class MambaBatch(Batch): for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.inputs) - next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device)) + next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) @@ -694,6 +694,9 @@ class Mamba(Model): generations.append(generation) # Update values + batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( + next_token_id_squeezed.item() + ) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index 25042a32..459f4256 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -96,7 +96,7 @@ class Seq2SeqLMBatch(Batch): inputs.append(r.inputs) requests_idx_mapping[r.id] = i decoder_input_lengths.append(1) - next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device)) + next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device, tokenizer)) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) @@ -789,6 +789,9 @@ class Seq2SeqLM(Model): generations.append(generation) # Update values + batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( + next_token_id_squeezed.item() + ) batch.decoder_input_ids[i] = next_token_id batch.all_decoder_input_ids[i] = all_decoder_input_ids batch.input_lengths[i] = input_length diff --git a/server/text_generation_server/utils/logits_process.py b/server/text_generation_server/utils/logits_process.py index 9013d200..64f74433 100644 --- a/server/text_generation_server/utils/logits_process.py +++ b/server/text_generation_server/utils/logits_process.py @@ -2,8 +2,17 @@ import math import torch import habana_frameworks.torch.core as htcore +import json +from loguru import logger from functools import lru_cache from typing import Optional, List, Dict, Union +from text_generation_server.pb.generate_pb2 import GrammarType + +from outlines.fsm.fsm import RegexFSM +from outlines.fsm.json_schema import build_regex_from_object +from functools import lru_cache +from typing import List, Optional, DefaultDict +import time from transformers import ( LogitsWarper, @@ -124,9 +133,7 @@ class FrequencyPenaltyLogitsProcessor(LogitsProcessor): ) -> torch.FloatTensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then penalty has to be multiplied to reduce the previous token probability - score = -torch.where( - score < 0, score * self.penalty, score / self.penalty - ) + score = -torch.where(score < 0, score * self.penalty, score / self.penalty) return scores.scatter_add_(1, input_ids, score) @@ -435,3 +442,132 @@ class HeterogeneousProcessorWrapper(LogitsProcessor): self.processors = new_processors return self return None + + +class GrammarLogitProcessor(LogitsProcessor): + fsm_state: DefaultDict[int, int] + fsm: RegexFSM + + def __init__(self, tokenizer, device, grammar, grammar_type): + self.device = device + self.tokenizer = GrammarLogitProcessor._cached_adapt_tokenizer(tokenizer) + self.fsm = GrammarLogitProcessor._cached_compile_fsm( + grammar_type, grammar, self.tokenizer + ) + + def __call__( + self, + logits: torch.Tensor, + fsm_grammar_state: int, + ): + if fsm_grammar_state == -1 or self.fsm is None: + return logits + allowed_tokens = self.fsm.allowed_token_ids(fsm_grammar_state) + mask = torch.full((logits.shape[-1],), -math.inf, device=self.device) + mask[allowed_tokens] = 0 + biased_scores = logits + mask + return biased_scores + + def advance(self, next_token_id, fsm_grammar_state): + return GrammarLogitProcessor._advance( + next_token_id, fsm_grammar_state, self.fsm + ) + + @staticmethod + def _advance(next_token_id, fsm_grammar_state, fsm): + if fsm_grammar_state == -1: + return fsm_grammar_state + return fsm.next_state(fsm_grammar_state, next_token_id) + + # TODO: move grammar compilation into the router + @staticmethod + @lru_cache(maxsize=32, typed=True) + def _cached_compile_fsm(grammar_type, schema, tokenizer): + start_time = time.time() + if grammar_type == GrammarType.GRAMMAR_TYPE_JSON: + schema = build_regex_from_object(schema) + elif grammar_type == GrammarType.GRAMMAR_TYPE_REGEX: + pass # schema is already a regex just here for clarity + fsm = RegexFSM(schema, tokenizer) + logger.debug(f"Compiled FSM in {time.time() - start_time:.2f}s") + return fsm + + @staticmethod + @lru_cache(maxsize=32, typed=True) + def _cached_adapt_tokenizer(tokenizer): + """Adapt tokenizer to work with the FSM. + + The API of Outlines tokenizers is slightly different to that of + `transformers`. In addition we need to handle the missing spaces to + Llama's tokenizer to be able to compile FSMs for this model. + + """ + start_time = time.time() + tokenizer.vocabulary = tokenizer.get_vocab() + tokenizer.special_tokens = set(tokenizer.all_special_tokens) + + def convert_token_to_string(token: str) -> str: + from transformers.file_utils import SPIECE_UNDERLINE + + string = tokenizer.convert_tokens_to_string([token]) + + # A hack to handle missing spaces to HF's Llama tokenizers + if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": + return " " + string + + return string + + tokenizer.convert_token_to_string = convert_token_to_string + logger.debug(f"Adapted tokenizer in {time.time() - start_time:.2f}s") + return tokenizer + + def filter(self, indices): + new_fsms = [] + for i in indices: + new_fsms.append(self.fsms[i]) + self.fsms = new_fsms + return self + + +class HeterogeneousGrammarLogitProcessor(LogitsProcessor): + def __init__(self, tokenizer, device, grammars, grammar_type): + self.device = device + self.tokenizer = GrammarLogitProcessor._cached_adapt_tokenizer(tokenizer) + self.fsms = [] + for i in range(len(grammars)): + fsm = GrammarLogitProcessor._cached_compile_fsm( + grammar_type[i], grammars[i], self.tokenizer + ) + self.fsms.append(fsm) + + def __call__( + self, + logits: torch.Tensor, + fsm_grammar_states: List[int], + mask: torch.Tensor, + ): + mask = torch.full_like(logits, -math.inf) + for i in range(logits.shape[0]): + fsm = self.fsms[i] + if fsm_grammar_states[i] == -1 or fsm is None: + continue + allowed_tokens = fsm.allowed_token_ids(fsm_grammar_states[i]) + mask[i, allowed_tokens] = 0 + logits += mask + return logits + + def advance_batch(self, next_token_ids, fsm_grammar_states, grammars): + return [ + GrammarLogitProcessor._advance( + next_token_ids[i], fsm_grammar_states[i], self.fsms[i] + ) + for i in range(len(next_token_ids)) + ] + + def advance_at_index(self, next_token_id, fsm_grammar_state, index): + return GrammarLogitProcessor._advance( + next_token_id, fsm_grammar_state, self.fsms[index] + ) + + def filter(self, indices): + return GrammarLogitProcessor.filter(self, indices) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index 9d918ac5..ff0d22e2 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -3,11 +3,13 @@ import re from typing import List, Optional, Tuple +import math import torch from text_generation_server.pb import generate_pb2 -from text_generation_server.pb.generate_pb2 import FinishReason +from text_generation_server.pb.generate_pb2 import FinishReason, GrammarType from text_generation_server.utils.logits_process import ( FrequencyPenaltyLogitsProcessor, + GrammarLogitProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, HeterogeneousFrequencyPenaltyLogitsProcessor, @@ -15,6 +17,7 @@ from text_generation_server.utils.logits_process import ( HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, HeterogeneousTypicalLogitsWarper, + HeterogeneousGrammarLogitProcessor, static_warper, ) from text_generation_server.utils.watermark import WatermarkLogitsProcessor @@ -35,8 +38,14 @@ class NextTokenChooser: do_sample=False, seed=0, device="cpu", + tokenizer: Optional[PreTrainedTokenizerBase] = None, + grammar: str = "", + grammar_type: GrammarType = GrammarType.GRAMMAR_TYPE_NONE, + fsm_grammar_state: int = 0, ): - self.watermark_processor = WatermarkLogitsProcessor(device=device) if watermark else None + self.watermark_processor = ( + WatermarkLogitsProcessor(device=device) if watermark else None + ) self.repetition_processor = ( RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty and repetition_penalty != 1.0 @@ -47,6 +56,12 @@ class NextTokenChooser: if frequency_penalty and frequency_penalty != 0.0 else None ) + self.grammar_processor = ( + GrammarLogitProcessor(tokenizer, device, grammar, grammar_type) + if grammar != "" + else None + ) + self.tokenizer = tokenizer has_warpers = ( (temperature is not None and temperature != 1.0) @@ -60,7 +75,10 @@ class NextTokenChooser: self.static_warper = None sampling = do_sample or has_warpers + self.choice = Sampling(seed, device) if sampling else Greedy() + self.fsm_grammar_state = fsm_grammar_state + self.grammar = grammar def __call__(self, input_ids, scores): if self.watermark_processor is not None: @@ -69,6 +87,8 @@ class NextTokenChooser: scores = self.repetition_processor(input_ids, scores) if self.frequency_processor is not None: scores = self.frequency_processor(input_ids, scores) + if self.grammar_processor is not None: + scores = self.grammar_processor(scores, self.fsm_grammar_state) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) @@ -79,11 +99,19 @@ class NextTokenChooser: return next_id, next_logprob + def advance_grammar(self, next_id: int): + if self.grammar_processor is not None: + self.fsm_grammar_state = self.grammar_processor.advance( + next_id, self.fsm_grammar_state + ) + return self + @classmethod def from_pb( cls, pb: generate_pb2.NextTokenChooserParameters, device: torch.device, + tokenizer: PreTrainedTokenizerBase, ) -> "NextTokenChooser": return NextTokenChooser( watermark=pb.watermark, @@ -96,6 +124,9 @@ class NextTokenChooser: do_sample=pb.do_sample, seed=pb.seed, device=device, + tokenizer=tokenizer, + grammar=pb.grammar, + grammar_type=pb.grammar_type, ) @@ -198,6 +229,10 @@ class HeterogeneousNextTokenChooser: typical_p: List[float], do_sample: List[bool], seeds: List[int], + tokenizer: PreTrainedTokenizerBase, + grammars: List[str], + grammar_types: List[int], + fsm_grammar_states: List[int], quantization_enabled: bool, ): warpers = [] @@ -231,6 +266,14 @@ class HeterogeneousNextTokenChooser: else None ) + self.grammar_processor = ( + HeterogeneousGrammarLogitProcessor( + tokenizer, device, grammars, grammar_types + ) + if any([grammar != "" for grammar in grammars]) + else None + ) + if any([x != 1.0 for x in temperature]): do_sample = [ sample or x != 1.0 for x, sample in zip(temperature, do_sample) @@ -262,6 +305,10 @@ class HeterogeneousNextTokenChooser: self.do_sample = do_sample self.dtype = dtype self.device = device + self.tokenizer = tokenizer + self.fsm_grammar_states = fsm_grammar_states + self.grammars = grammars + self.grammar_types = grammar_types def __call__( self, @@ -282,6 +329,8 @@ class HeterogeneousNextTokenChooser: scores = scores.view(B, S, -1) next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) + mask = torch.full((scores.shape[-1],), -math.inf, device=self.device) + for j in range(S): _scores = scores[:, j] if self.watermark_processor is not None: @@ -290,10 +339,10 @@ class HeterogeneousNextTokenChooser: _scores = self.repetition_processor(input_ids, _scores) if self.frequency_processor is not None: _scores = self.frequency_processor(input_ids, _scores) - for warper in self.warpers: _scores = warper(input_ids, _scores) - + if self.grammar_processor is not None: + _scores = self.grammar_processor(_scores, self.fsm_grammar_states, mask) _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids @@ -351,6 +400,21 @@ class HeterogeneousNextTokenChooser: return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids + def advance_grammar(self, next_ids: List[int]): + if self.grammar_processor is not None: + other_new_states = self.grammar_processor.advance_batch( + next_ids, self.fsm_grammar_states, self.grammars + ) + self.fsm_grammar_states = other_new_states + return self + + def advance_grammar_single(self, grammar_state_index: int, next_id: int): + if self.grammar_processor is not None: + self.fsm_grammar_states[grammar_state_index] = self.grammar_processor.advance_at_index( + next_id, self.fsm_grammar_states[grammar_state_index], grammar_state_index + ) + return self + def filter(self, indices): if self.watermark_processor is not None: self.watermark_processor = self.watermark_processor.filter(indices) @@ -361,6 +425,9 @@ class HeterogeneousNextTokenChooser: if self.frequency_processor is not None: self.frequency_processor = self.frequency_processor.filter(indices) + if self.grammar_processor is not None: + self.grammar_processor = self.grammar_processor.filter(indices) + filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) @@ -371,6 +438,18 @@ class HeterogeneousNextTokenChooser: self.seeds = [self.seeds[i] for i in indices] self.do_sample = [self.do_sample[i] for i in indices] + new_grammars = [] + new_fsm_grammar_states = [] + new_grammar_types = [] + for i in indices: + new_grammars.append(self.grammars[i]) + new_fsm_grammar_states.append(self.fsm_grammar_states[i]) + new_grammar_types.append(self.grammar_types[i]) + + self.grammars = new_grammars + self.fsm_grammar_states = new_fsm_grammar_states + self.grammar_types = new_grammar_types + if any(self.do_sample): self.choice.filter(indices) else: @@ -384,6 +463,7 @@ class HeterogeneousNextTokenChooser: pb: List[generate_pb2.NextTokenChooserParameters], dtype: torch.dtype, device: torch.device, + tokenizer: PreTrainedTokenizerBase, quantization_enabled: bool, ) -> "HeterogeneousNextTokenChooser": return HeterogeneousNextTokenChooser( @@ -398,6 +478,10 @@ class HeterogeneousNextTokenChooser: seeds=[pb_.seed for pb_ in pb], device=device, dtype=dtype, + tokenizer=tokenizer, + grammars=[pb_.grammar for pb_ in pb], + grammar_types=[pb_.grammar_type for pb_ in pb], + fsm_grammar_states=[0] * len(pb), quantization_enabled=quantization_enabled, ) From cfccdf3d43bfa97504c6059025e36ef4156652c4 Mon Sep 17 00:00:00 2001 From: Aaron Mihalik Date: Thu, 15 Feb 2024 13:30:31 -0500 Subject: [PATCH 083/153] Added `name` field to OpenAI compatible API Messages (#1563) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What does this PR do? Literally just adds the name field to the Message class. I verified this change by building a new docker container (using the `Dockerfile` in the repo) and trialing with a `chat_template` that uses the `name` field. Here's the previous behavior: Input messages: ``` { "messages": [ {"role": "system", "content": "You are a succinct but helpful AI Assistant listening to a chat server. Address everyone by @"}, {"role": "user", "name": "Aaron", "content": "Hello There!"}, {"role": "assistant", "content": " Hello @Aaron! How can I assist you today?"}, {"role": "user", "name": "Sally", "content": "Hiya everyone. Is @Aaron is this room?"} ], "model": "meta-llama/Llama-2-7b-chat-hf" } ``` Response before the modification: ``` Hello @Aaron! Yes, you are in the chat room. How can I assist you today? 😊 Hiya everyone! *waves* It's great to see you all here. Is there something on your mind that you'd like to talk about or ask? I'm here to listen and help in any way I can. 🤖 ``` Response after my modification: ``` Hello @Sally! Yes, @Aaron is currently in the chat room. How may I assist you today? ``` Fixes #1558 ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? @Narsil --------- Co-authored-by: Aaron Mihalik Co-authored-by: drbh --- router/src/infer.rs | 17 +++++++++++++++++ router/src/lib.rs | 4 ++++ 2 files changed, 21 insertions(+) diff --git a/router/src/infer.rs b/router/src/infer.rs index 48369de9..372ea117 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -811,18 +811,22 @@ mod tests { Message { role: "user".to_string(), content: "Hi!".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "Hello how can I help?".to_string(), + name: None, }, Message { role: "user".to_string(), content: "What is Deep Learning?".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "magic!".to_string(), + name: None, }, ], bos_token: Some("[BOS]"), @@ -872,22 +876,27 @@ mod tests { Message { role: "user".to_string(), content: "Hi!".to_string(), + name: None, }, Message { role: "user".to_string(), content: "Hi again!".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "Hello how can I help?".to_string(), + name: None, }, Message { role: "user".to_string(), content: "What is Deep Learning?".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "magic!".to_string(), + name: None, }, ], bos_token: Some("[BOS]"), @@ -942,18 +951,22 @@ mod tests { Message { role: "user".to_string(), content: "Hi!".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "Hello how can I help?".to_string(), + name: None, }, Message { role: "user".to_string(), content: "What is Deep Learning?".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "magic!".to_string(), + name: None, }, ], bos_token: Some("[BOS]"), @@ -992,18 +1005,22 @@ mod tests { Message { role: "user".to_string(), content: "Hi!".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "Hello how can I help?".to_string(), + name: None, }, Message { role: "user".to_string(), content: "What is Deep Learning?".to_string(), + name: None, }, Message { role: "assistant".to_string(), content: "magic!".to_string(), + name: None, }, ], bos_token: Some("[BOS]"), diff --git a/router/src/lib.rs b/router/src/lib.rs index 87873821..8c7ca74b 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -378,6 +378,7 @@ impl ChatCompletion { message: Message { role: "assistant".into(), content: output, + name: None, }, logprobs: return_logprobs .then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))), @@ -453,6 +454,7 @@ fn default_request_messages() -> Vec { vec![Message { role: "user".to_string(), content: "My name is David and I".to_string(), + name: None, }] } @@ -547,6 +549,8 @@ pub(crate) struct Message { pub role: String, #[schema(example = "My name is David and I")] pub content: String, + #[schema(example = "\"David\"")] + pub name: Option, } #[derive(Clone, Debug, Deserialize, ToSchema)] From 69a2eadc528cd55d37cbdc010a67d10acebe9e6b Mon Sep 17 00:00:00 2001 From: Aaron Mihalik Date: Fri, 16 Feb 2024 05:44:04 -0500 Subject: [PATCH 084/153] Bugfix: eos and bos tokens positions are inconsistent (#1567) --- router/src/infer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/router/src/infer.rs b/router/src/infer.rs index 372ea117..f299875b 100644 --- a/router/src/infer.rs +++ b/router/src/infer.rs @@ -124,7 +124,7 @@ impl Infer { queue, shared, limit_concurrent_requests: semaphore, - template: (template, eos_token, bos_token), + template: (template, bos_token, eos_token), } } From 31b5e37f496071dfdb220f8624c9aabe9eec8a93 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 16 Feb 2024 11:58:58 +0100 Subject: [PATCH 085/153] chore: add pre-commit (#1569) --- .github/ISSUE_TEMPLATE/bug-report.yml | 10 +- .github/ISSUE_TEMPLATE/feature-request.yml | 2 +- .github/workflows/autodocs.yml | 6 +- .github/workflows/build_pr_documentation.yml | 2 +- .github/workflows/tests.yaml | 9 +- .github/workflows/upload_pr_documentation.yml | 2 +- .gitignore | 1 - .pre-commit-config.yaml | 18 +++ benchmark/Cargo.toml | 1 - benchmark/README.md | 8 +- clients/python/.gitignore | 2 +- clients/python/Makefile | 2 +- clients/python/README.md | 8 +- clients/python/text_generation/types.py | 1 + docs/index.html | 2 +- docs/openapi.json | 2 +- docs/source/basic_tutorials/consuming_tgi.md | 24 +-- .../source/basic_tutorials/non_core_models.md | 6 +- .../source/basic_tutorials/preparing_model.md | 6 +- docs/source/basic_tutorials/using_cli.md | 12 +- docs/source/conceptual/flash_attention.md | 5 +- docs/source/conceptual/quantization.md | 10 +- docs/source/conceptual/safetensors.md | 6 +- docs/source/conceptual/streaming.md | 18 +-- docs/source/conceptual/tensor_parallelism.md | 2 +- docs/source/installation.md | 4 +- docs/source/messages_api.md | 2 +- docs/source/quicktour.md | 2 +- integration-tests/models/test_mamba.py | 4 +- integration-tests/pytest.ini | 2 +- load_tests/common.js | 2 +- load_tests/starcoder_load.js | 2 +- load_tests/tgi.js | 2 +- load_tests/vllm.js | 2 +- router/README.md | 4 +- router/client/src/pb/.gitignore | 2 +- router/src/validation.rs | 1 + rust-toolchain.toml | 2 +- server/Makefile-awq | 2 +- server/Makefile-flash-att | 2 +- server/Makefile-selective-scan | 6 +- server/README.md | 2 +- .../fused_bloom_attention_cuda.cu | 2 +- .../cuda_func/column_remap.cuh | 2 +- .../exllama_kernels/cuda_func/q4_matrix.cuh | 2 +- .../exllama_kernels/hip_compat.cuh | 2 +- .../exllamav2_kernels/cuda/matrix_view.cuh | 2 +- .../exllamav2_kernels/cuda/q_gemm.cuh | 2 +- .../exllamav2_kernels/cuda/quant/qdq_2.cuh | 2 +- .../exllamav2_kernels/cuda/quant/qdq_4.cuh | 2 +- .../exllamav2_kernels/cuda/quant/qdq_5.cuh | 2 +- .../exllamav2_kernels/cuda/quant/qdq_6.cuh | 2 - .../exllamav2_kernels/cuda/quant/qdq_8.cuh | 2 +- .../exllamav2_kernels/cuda/util.cuh | 2 +- .../custom_modeling/flash_llama_modeling.py | 6 +- .../custom_modeling/flash_mistral_modeling.py | 6 +- .../custom_modeling/flash_mixtral_modeling.py | 12 +- .../custom_modeling/flash_neox_modeling.py | 6 +- .../custom_modeling/flash_phi_modeling.py | 6 +- .../flash_santacoder_modeling.py | 13 +- .../models/custom_modeling/idefics_config.py | 3 + .../custom_modeling/idefics_modeling.py | 8 +- .../custom_modeling/idefics_processing.py | 1 + .../models/custom_modeling/mamba_modeling.py | 21 ++- .../models/custom_modeling/mpt_modeling.py | 1 + .../models/flash_causal_lm.py | 6 +- .../models/galactica.py | 4 +- .../models/idefics_causal_lm.py | 42 ++--- server/text_generation_server/models/mamba.py | 143 ++++++++++++------ server/text_generation_server/models/rw.py | 8 +- .../models/seq2seq_lm.py | 20 ++- server/text_generation_server/models/types.py | 20 +-- server/text_generation_server/pb/.gitignore | 2 +- .../utils/gptq/quant_linear.py | 2 +- server/text_generation_server/utils/layers.py | 4 +- .../utils/logits_process.py | 2 +- server/text_generation_server/utils/tokens.py | 8 +- 77 files changed, 344 insertions(+), 232 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 12c93b9e..24ac3cbe 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -5,14 +5,14 @@ body: id: system-info attributes: label: System Info - description: | + description: | Please share your system info with us (`text-generation-launcher --env` if installed locally). - The full command line used that causes issues: + The full command line used that causes issues: OS version: Rust version (if self-compiling, `cargo version`): Model being used (`curl 127.0.0.1:8080/info | jq`): If local model please explicit the kind of model and/or equivalents. - Hardware used (GPUs, how many, on which cloud) (`nvidia-smi`): + Hardware used (GPUs, how many, on which cloud) (`nvidia-smi`): Deployment specificities (Kubernetes, EKS, AKS, any particular deployments): The current version being used: @@ -52,11 +52,11 @@ body: placeholder: | Steps to reproduce the behavior: - + 1. 2. 3. - + - type: textarea id: expected-behavior diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 5abc1565..f1a9135c 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -19,7 +19,7 @@ body: label: Motivation description: | Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too. - + - type: textarea id: contribution diff --git a/.github/workflows/autodocs.yml b/.github/workflows/autodocs.yml index a981c09c..7c5c6eca 100644 --- a/.github/workflows/autodocs.yml +++ b/.github/workflows/autodocs.yml @@ -6,15 +6,15 @@ on: jobs: update_docs: runs-on: ubuntu-latest - + steps: - name: Checkout code uses: actions/checkout@v2 - + - name: Install Launcher id: install-launcher run: cargo install --git https://github.com/${{ github.repository }} --branch ${{ github.head_ref }} text-generation-launcher - + - name: Check launcher Docs are up-to-date run: | echo text-generation-launcher --help diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml index b46216ec..a5ce39a5 100644 --- a/.github/workflows/build_pr_documentation.yml +++ b/.github/workflows/build_pr_documentation.yml @@ -16,4 +16,4 @@ jobs: commit_sha: ${{ github.event.pull_request.head.sha }} pr_number: ${{ github.event.number }} package: text-generation-inference - additional_args: --not_python_module + additional_args: --not_python_module diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index ecc8eb4d..5b19eb8c 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -71,12 +71,11 @@ jobs: pip install pytest export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} pytest -s -vv server/tests - - name: Run Rust fmt + - name: Pre-commit checks run: | - cargo fmt --check - - name: Run Rust clippy - run: | - cargo clippy + pip install pre-commit + pre-commit install + pre-commit run --all-files - name: Run Rust tests run: | cargo test diff --git a/.github/workflows/upload_pr_documentation.yml b/.github/workflows/upload_pr_documentation.yml index b984ead2..ae00bb51 100644 --- a/.github/workflows/upload_pr_documentation.yml +++ b/.github/workflows/upload_pr_documentation.yml @@ -13,4 +13,4 @@ jobs: package_name: text-generation-inference secrets: hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} - comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} \ No newline at end of file + comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} diff --git a/.gitignore b/.gitignore index 1f9ba162..b3ca772b 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,3 @@ server/exllama_kernels/exllama_kernels/hip_func/ *_hip.cuh server/exllama_kernels/exllama_kernels/hip_buffers.cuh server/exllama_kernels/exllama_kernels/exllama_ext_hip.cpp - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..45bc07a5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,18 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + exclude: docs/source/basic_tutorials/launcher.md +- repo: https://github.com/psf/black + rev: 24.2.0 + hooks: + - id: black +- repo: https://github.com/doublify/pre-commit-rust + rev: v1.0 + hooks: + - id: fmt + - id: cargo-check + - id: clippy diff --git a/benchmark/Cargo.toml b/benchmark/Cargo.toml index 2dd2e64d..40738c4d 100644 --- a/benchmark/Cargo.toml +++ b/benchmark/Cargo.toml @@ -29,4 +29,3 @@ tui = {package = "ratatui", version = "0.23", default-features = false, features tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } hf-hub = "0.3.1" - diff --git a/benchmark/README.md b/benchmark/README.md index 7f51a731..17a02a30 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -6,12 +6,12 @@ -A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) +A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). -## Install +## Install -```shell +```shell make install-benchmark ``` @@ -27,4 +27,4 @@ Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m -``` \ No newline at end of file +``` diff --git a/clients/python/.gitignore b/clients/python/.gitignore index 5758ba92..5a8ecaa7 100644 --- a/clients/python/.gitignore +++ b/clients/python/.gitignore @@ -155,4 +155,4 @@ dmypy.json cython_debug/ transformers -safetensors \ No newline at end of file +safetensors diff --git a/clients/python/Makefile b/clients/python/Makefile index 8b4334bd..42720875 100644 --- a/clients/python/Makefile +++ b/clients/python/Makefile @@ -3,4 +3,4 @@ unit-tests: install: pip install pip --upgrade - pip install -e . \ No newline at end of file + pip install -e . diff --git a/clients/python/README.md b/clients/python/README.md index 82f3ee0c..20243f4a 100644 --- a/clients/python/README.md +++ b/clients/python/README.md @@ -141,7 +141,7 @@ class Parameters: # Get decoder input token logprobs and ids decoder_input_details: bool # Return the N most likely tokens at each step - top_n_tokens: Optional[int] + top_n_tokens: Optional[int] # Decoder input tokens class InputToken: @@ -192,7 +192,7 @@ class BestOfSequence: # Generated tokens tokens: List[Token] # Most likely tokens - top_tokens: Optional[List[List[Token]]] + top_tokens: Optional[List[List[Token]]] # `generate` details @@ -236,7 +236,7 @@ class StreamResponse: # Generated token token: Token # Most likely tokens - top_tokens: Optional[List[Token]] + top_tokens: Optional[List[Token]] # Complete generated text # Only available when the generation is finished generated_text: Optional[str] @@ -248,4 +248,4 @@ class StreamResponse: class DeployedModel: model_id: str sha: str -``` \ No newline at end of file +``` diff --git a/clients/python/text_generation/types.py b/clients/python/text_generation/types.py index 3426411b..911114ee 100644 --- a/clients/python/text_generation/types.py +++ b/clients/python/text_generation/types.py @@ -134,6 +134,7 @@ class Parameters(BaseModel): raise ValidationError("`value` cannot be empty for `json` grammar") return v + class Request(BaseModel): # Prompt inputs: str diff --git a/docs/index.html b/docs/index.html index 16d143d8..f582d3ce 100644 --- a/docs/index.html +++ b/docs/index.html @@ -27,4 +27,4 @@ } - \ No newline at end of file + diff --git a/docs/openapi.json b/docs/openapi.json index da3969df..fd7cca7b 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1290,4 +1290,4 @@ "description": "Hugging Face Text Generation Inference API" } ] -} \ No newline at end of file +} diff --git a/docs/source/basic_tutorials/consuming_tgi.md b/docs/source/basic_tutorials/consuming_tgi.md index 540f4b13..4829ec7c 100644 --- a/docs/source/basic_tutorials/consuming_tgi.md +++ b/docs/source/basic_tutorials/consuming_tgi.md @@ -23,7 +23,7 @@ You can simply install `huggingface-hub` package with pip. pip install huggingface-hub ``` -Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. +Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. ```python from huggingface_hub import InferenceClient @@ -83,8 +83,8 @@ Gradio is a Python library that helps you build web applications for your machin pip install huggingface-hub gradio ``` -Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). - +Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). + ```python import gradio as gr from huggingface_hub import InferenceClient @@ -110,30 +110,30 @@ gr.ChatInterface( ).queue().launch() ``` -The UI looks like this 👇 +The UI looks like this 👇
- -
-You can try the demo directly here 👇 +You can try the demo directly here 👇
-