From 2772ae92f33f8942ef57d75354f1d1cd5f84ec5c Mon Sep 17 00:00:00 2001 From: xxl <505279206@qq.com> Date: Fri, 22 Nov 2024 15:08:15 +0800 Subject: [PATCH] first commit --- LOGO.jpg | Bin 0 -> 123604 bytes MODEL_LICENSE.md | 48 ++ README.md | 317 +++++++- config.json | 49 ++ configuration.json | 1 + configuration_chatglm.py | 59 ++ generation_config.json | 5 + modeling_chatglm.py | 1197 ++++++++++++++++++++++++++++++ pytorch_model-00001-of-00002.bin | 3 + pytorch_model-00002-of-00002.bin | 3 + pytorch_model.bin.index.json | 207 ++++++ quantization.py | 188 +++++ requirements.txt | 14 + special_tokens_map.json | 1 + tokenization_chatglm.py | 264 +++++++ tokenizer.model | 3 + tokenizer_config.json | 15 + 17 files changed, 2372 insertions(+), 2 deletions(-) create mode 100644 LOGO.jpg create mode 100644 MODEL_LICENSE.md create mode 100644 config.json create mode 100644 configuration.json create mode 100644 configuration_chatglm.py create mode 100644 generation_config.json create mode 100644 modeling_chatglm.py create mode 100644 pytorch_model-00001-of-00002.bin create mode 100644 pytorch_model-00002-of-00002.bin create mode 100644 pytorch_model.bin.index.json create mode 100644 quantization.py create mode 100644 requirements.txt create mode 100644 special_tokens_map.json create mode 100644 tokenization_chatglm.py create mode 100644 tokenizer.model create mode 100644 tokenizer_config.json diff --git a/LOGO.jpg b/LOGO.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39644468d36bcab315f1119a4b3b4a7d8896661d GIT binary patch literal 123604 zcmcG$NwVum)+Uyvn$@V55SC>olMEG!o&-agS%$s`(F5cRecuuQ0WL(F(LmeKP+CBl zmhj6j`68pq$jJQ5OuX;i3xMti-W)&8caHnN{a^p~fB)e>D}pTi;XnLO|KSh+JNWs- zzy0?={8zIamf=wj<9GO`SLPsz~1do;r6pAh&DfBtX#7$r@%d>3a$)&J#>|L_0tfBx~iD*ekJ15!uykhGTh|VB$xD$|Lw2;>CgKQdpCC35`DLKZU6XT|H~iW#D4(SUytEGe)lf2G=KRc z{q@%GLf!n19p>!2)1Q#<69gf?J74~Uo(S@c{MYX=1VdmHg;DBzl>C82e;`iZef#|J zum1^L{drFFA8bze_BwF)mp^{%*Yo-O(-Z&GFc&a-xm;ibgE8!TP~!W?(J#^degF6` zA0j`};j%|ESL0F*{dZqN@aDLsGuE2Q+Ouk!4%$V=A!CZ(!v*T*tP%P{{o zy`L3$FN)vl+4wV#`^5l(FF+;yH^&ux2NuR}jmFnEpK5RR{&3x&)8vPIn7e5CYt=>jNM^q|N2?;@=qiG9Y+2M`|f`Usy-c_ zkN@@0@ULt9tZ#q&D&wLh`zsI*`1dICJ&L>N`3Hpf0Y@Q(`T;?HM&wiSS2Xk?t@7j7 zG?2@i#_9g4{NT_{_yy zHjUBz$iCLzU;g;x;`^I0`;`7x2MpSm@c7|brAzr&j6A)&_p?Vowd!y6zNYP6qRgtI zT>groKdbfa+3!{R#;l6Ti$A^X+r!_h_vY@u!j~E*Xxeg`yJ8nKLl->V7HmM%9DKIu zhbNX>DqdcZczMd4zUnkhSWRot5JJB{3=PVEUGen){9ktZf1|Ch%HU(s3=GM7z_*-* zUM#Po`lpdhG1>khwNd>KZ2+}uNw`lqn7_Wp! zUAqN0;Ny5cCrgH7Bl7$H7r)FHG~$aiO(O>?dn_OaKHms_UH%V!>>JJ;=~q?%UH|na zco+TG?Fk;-zFNjSQrv#N3+3-Z-mP0c_r<&AUecEL;Hy35yHVa9{dQkH|JFk9_G6*u zL*y=^>;3EU-F@Z>?wj}hQi=go28 zmkB7u&|j74CKdfU0v8PTbxZ3>YdhB0_uSX_g?>kE=S}eE4{yIJ1c(st?t^ME&X5T( zP9n)FR8b#gjwi2fQ8=QH$D)sw(Zlzrm5<>ko5(mHQjP_yme_H42__%`b>DRR<<&Av{p8j($Oub-rfL zEWxkkacS_|Vesyn{=E|$?F#J8Zmuavgvhbd`I>z=b4do)inev8th2}6XuSwY- z-}?V6!5?4p`THwA)C8rycm8n?$olRuK@|9S6p;O!cRPDb#deD;W_-60T>L%{vbzJi z-X@(7CdVuMuXT&H>-_+$2P2y(w8t1DiYOq8H^G0x{l9Ga`;NiBTKMe?cQgrF5U3Tb zPV-F=h-B^zqyWR{8ciEX|KSM923kbuq+oDbdeVi(inRQo8=CcLWuW^f+t6x5&jovYTBs%TMsp0U z-{|AXS+wEN*pn;ht4E)nK-if#8hQdnWhD$}@h)vQjP&GN+IcW~!4tGgFt#P=v@bJn zf`KIzbdWG{A)s{lWQwP#(RZ2Y3bCZ4hIv>bNhdsWSdvYr4MSMcNar#G6*5N`PgbDi zl&)^9_LPZF3mcwwo{C4e2z!4jJKYa#Nvl_SEZCt?AM|`|bzR<6Nvz~@YqXYLQcrT2D#tE(}t^s|Lxw*KWXe!~5r&)l$a*$<-fCG4$wibqa z@D6SJjHvU8WkZbo~iva&3HMcuRJL*ePB zW!*t>pKZg2jZ#0mhK&d1_3T?Vf2h1Q^lXi&K0QwCz^ffSt?a{7ae98S&xiIVC>IZs zMR_fzMU=aTa1I1xj=NV5Bs5<)X%T zKJn~LkjslJ|0GtfX|8vggt_r?U5c~At%S*1RLdc}85WllN8ZfG6E->MX|Xol@w{%` zYzE?$hqc;V$eW)1CdlvL$D{1n22&Re_F+o{UnGuFDgxhGPRXb>|Ii&`*E#;sU26x5 zl+fLgF$Es-Jf^gIfp5I1bUs}cRK$Cg?o+U3-(dVe2!Vv!gNu;S0pPVxXqq5qB2$>< z04?K_up^;mr>#IV;auj904FzbFKJQqZtoUWWwnWhcZ15N5Iyf6?VT;g$sM-`UaW@5 zvqxMUiD)p3?qxrqGLH;HtAAg1z8Tg@=b7Xq^q1iD&XQbmWOjBZGOAyQ<0q$~L0v7GrdDshZLgT@9$(6;pT2ruvj;qgqClL}tW! z3L4iu-UQ?7s5B#4I2L8J@U~0`UTWoSb=ecE&B=PMs76DFt#M#UgNdi%G7nv~p4f|b zbZ^*;T0H5we5keZu^jaAaiE-T=*aDrzpjK1^N9MgTSFlbjPqQ>L(uXSvPO!Fq%klW@3}m@G8W-+m44L=^4&tlZP{x`ksH^O*ZOu!x#_sB)LCKK8PyXeE83ZJBR{QP zVHZ!st=Guqo=MxZ!P|(V+BymN*!S)GDB#Vlvb$8Akoal8eCbUvLvq>y^4mcw+c8o_ z^4i*oT7}{V-04wu|5_%fp>@qxx$LMX&2e%q#c&$Tb<_Q-c;xENsY&yfkDua_hKc)h z?@5BSqP*z&jNPG9=hcFf3!3EZh8v>S$%F5n)jFncEWRrEsGleShu#x^YJ&@!@;<4A z?-mk5u>}%(JJ2Qyg7&@&g9QFWV>_b(;>WGOwISyxW%~+3CrvW}SHg14V&QZNpO^gR zL$4d{7e2;*pogo7e%@7A?S!x9&PFDVrw74mxqrXTWpofO~lB!RMrSP^YKn@nF!kn zyFAODD-w6y+faHY&2o&r&Lk-MS|!VScR7_9iPbE+;h3wX)mkASbyOzBwqC|2oNl0eoa;)@|q7&WyO|Z&O>Rq1I>cwj9 zuu-*m)sF$$>w9Iu1liS(lW7c4*3gj^k6h5WkUc4~-Sm?~kcQEmBTrTwwKX08O)w-w z1#n)_lZo2mK}aa2-f7icQ97&3k0?@RzeCh`CZ90KWkdDOMKP!H@l$GVXm=*ta_B_Xia z9GeCO5eU~?>%vn=e0I!>#vq96?u)IU`sTY!XrpZyU`Cp}yNp8I?QYZq7QF6MI%3$i z#f9+{aCuI70}Jr-l?@9u;wQK;R`x=8Zuz<@F0pL&4Kim^G8_0B@MM8^Z-VU#?B-KH z=xartym&;eV|-e}Ik|4li3hZlAnBd?jby<82GrsPJdZ9QiGQl9Op0$L&TSd*~FOA(xUsZ zyK^|EW$R;OGld20U5{r9Phg5hWUQOQJ_}&76&gnmD*GJNFN2w*pI`qijwVc=rmh^L zy8>gm{Qiu^{gFsJmA||Eb@BfHa3tKb` zdJBdmky&fdKq(fy02u8S3!Z@y0W^RZ`w%R28{hzfY|Fdl^CFM&RdVA!eH2S(4|c0E z8E=tFeZN-}YahkNx7N#cn@K%xxXBux)i%P7;2JiJRThpe)!47$RW#l2L(&`l2F zFNUu&^FhESk6vBm9)1A?0lKGQdkOAX2^RbE0I=G2!)-L}QzHB5l<}NPD3s$@zPv?J zC3k8?9DvawwjmwA9kW|^pW12*AV zz!mY=0GB++uwNzyFL<_5%Ef&z?))*t(o@hSn{W(P@Rcgr_?N=jTv0}jcLdc92oeT2 zKe&yNDSWY~Uh^#8T~_M5AXaIU3LZ{P*VpqXMg%fnrQ$S;?s}wYw<+*d$2b(wEJSVK znx{h)jC1^PE6_E@ejI~;*nO^5fQlundpE90e?R5TbZy2c>L<0Erqi*^6STq6XO`(F zU=y0oC0@51gglz)0Wi!V zFi{VE6YP1mSr*6*TL`E2{RB%>H;d=Gt9qRatq8vl0jY|$>J3@#oyd|*%3eN9zhkWX zxsAfx)Lthjm?iK0COCvT)f|o{ogAn+X7_B7H07X?qhA;OcwFbUuAc=vZ#hLv&9kL> zY!AKd@TuigUVMEWPn)1byr>4BF{D)8x&|*VC>auxZa2f9#`~Dqw6tO_D*?1INM|Vm zp5G`sw{vF5jpk{B=cy_(ZcC>h{=>JrDY|nX%fWbP*`4IA>!ZDOMANz+eX;Dbq$SrI z7^aaQussj9W1t?V7h^KH8`d_?_tPr$1d{hJMi9N00hso|6+f!Mu59r4k`NRKT&vI#Z@q#DFwds4R+doeK&KWRU$CJyDCj{_c`9IOE! z3!8z%U>Absjg-^ylJ*Dy-aym%tuuy~8n_QoasJ(3?-qh0UYuPX=f1s!eccgobTQDZ zyBH@P64gm>9z|M+2JKdpA1!3R7@7wM(hc~Ue9*cf+1w^{=sVx|SPnI5mjgynD`U9M z!eOF1Zk#6Hedg9kz_5JDr4jIayb8e43_clg4dug;9QKmm4b~ z7ULyZ?zWWalJ`V6h}U(0P3!zImMVMJ2Z9qgE>h=Q(Bow}C;zVGk5E0V2h0>~zI+t@WhXAw6eIN-QdDTiXeuu1eh8j~ zqR&~SSqXCo)KN1`oi4LM5`}YBO-EB?LgFN5Aa4ts)T z%L*=sy9lvksDmUyX7X$aB0R`S@B~=)?-n^Sa}DUshAN47}bxo2Ff)Q#tG{(G^#6$Z^yqQr)-F zCbgRdy~A#C^FL}7scOEiqBP6Y@fGEGh!E2Z*4a!rE&OD#V*;~tw53bZfX)3d5zeOi z;4AyU&J%uhE5MZ=LFFDkf85)NE0AR|r8!84%6XgyU+o6(oGfFA?Y^-KSJdyLE5LE= z=Au`Fc|-3@O+6T&$JXwJU9iWFvP4_*^asR*+ly{XVJb!)_|t z4vomW#EmjqJU8!Zi1pS*J z^vF6|Op2o0FefTfdutL*^|)0H6vmq+H?@|Fy(;8Pj=&3Rc1t%<#YkU4_rwI+c(ly{ zbRlgI^+!n!%%VSb?AHSRWrn$)&CjUMY&!%P5jg@)7v7kILB*6CFGAI_VBDB}bY;~J zrCxh|)aa5Krzec5Ifanje!e~>NIGN8ah#F0r!G*`La&LdVOrF4*tp*L?6{b)0bQ!( zv0n6Ck$Ur`XbBIL zI-mAQs3+Y^7yX9lEC?QQYj{ha`3EI7Oe`OVyUZWLdBGf;x#dX~!@7zR_{_n#5q`NH zT4QMmcW{REincje{(6x^cs3y7Ti+V{tUo_qz$@Qwy{2szzmY{2mk6rt@I!{7DA{Vv z9pNW?znrCgOwX7ZT&$JI)n!ZVL%3rO|F|$-+cM_TE2nQQ6oX`!F+4fE`iVVNg2EyS zj4SaTSLBsiXY<)Z!fGRhVUGA|8D-d$(_ z5kw|DSYUOYh45oJystjLsEB$w9@=}r3#DGbz*L?jiZ#;ttCc_lTqU#f=`5CSK$owE zx@eWkDnsKpk!rCSrsc)N^N}U;nL{M%2*-(%VKQxD z@@jU~S&_~UAjRVg9aFebdDmqRmd8!eRFQ=V~}wG1F<_<(^0IP8#ie5rnAi!V>{ z9xHqmdoi$4gxx{d=gS6KqIh=tvKb;EBQB)jHc}!=VRIbr$MLyNrI1#~aIK1Nx#F&| z0LYUWHGF=lsHq4O7)yBpDHZiqMtVr^;cEf`o8mf$9`cBvA%{`vA#gPCE8krPxQ{nm zMIGZ}3}yVQ_M42{OSQrk^7%l561hIpq6i!0W;kljr|SqpVU7uTJ%v(M`n>HkHa$!ppNvT0?vZ3Is|L%1iJh8OCnJl0ld8vVUjs8 zjdApuA8=hw^50g^dkMXNjZz9Zo0g)cmblpdaULDNYnm8U7{4=|v;(d_R(2qhAT3D% zCnQc`L%@&BfGZLZP1HI<-zeh+#DA{nVwuGDkWQUh>=p%+pOuogSTlfZL|X|)$Cx`i zFtTpX*3drt=v^qzIch_rR9ZIHegpHgQR&l{j8}c3%$WBBDWhKcDF1P>b3x&NC$|hN{|ltmoWBb+XSRqBqbKGBs40 zeHhzSP9T5{kd7ctOA!<`q6)p9G%TtR>VaD^_)){7^_E=x1PWZ*FVIOTOhGJ#D)0uy84E zkO0tpYBlAv-TAY7L6}4D=Yxv4E$m%o|2W_QYC0jtT^y6C1`BRyC~Of?7aosic(&{H z9*p5mY;CimODJ^_@IH}zvI(6%H0wemn{}4400hQF?6dSbfEV21>PGWu^2o58IHEd* ztm^%-fZ&EaC}np&oY^-uu{w=O9%vYtq|z#G*V7q96-TM={1WY4QT-U45yyIT7Wo^= zQjGy-C?GD*a+T)8p9zm7vn!hZjNiO?22%t$hc;gE%!|-**E(NCD0S68zFvvVQ+-i3 z7|c^6am<(>GgUg16ztmYoMtw=6~*OgK|l>?!6Z@d@qrj_y_ek>_W;9yMjIL)RrWF3 zDc^@T8D47A=kn>9rxQh@ z-%ymE;EySS54Bjo?%r(g_B1;_J29`c^e%;y=Xyf74ay}Lvdd-bSo39&fr0lL3*Yz+ z*Zw0M`ea{}_}J&mh}`XPm&a@NqJCL9Fi7yqUUEMMe2_M``< z3n#;s6jA1FasAlXrD#YUKzk+9Mxhhx1|XmsMqzTDD2|!*xklxH9XcXT)WWYR`h-A1 z96G17Ao5zTD&?!X+9aGWujmu4APy&8W5iL|#*ueY@M$em=o2|8;z6osdKPEgGOQjL z{H2Vew`SQHOiMboK1Ya`Og)ipb$92yz5cL`9^p8CIR`gjgL6=NVbBox1<0yhQ3opP zvRlYA2<3`&n<^l@as^Wr4YYBC;Ux${RapSL`g6uxh|yn)>qcNeZ+ z&ZtnWV$*_4!DZzI>_M0dA|SvqcRF#Znke2JMHV603sHzYgsM@<82BT@60I|o9j-e^4kO=Y-(}r{ znO=S=a2o-?3Ce62o;HI9iE9q=O7RV9YgSnySE0~3`m(8}y)&)Ly+)^q0JsX494#Zh zct#}3>_$f=&ctUUdSr|GCP;RYXI*)Vh??scq*q@!t~7->>|*C)b*Lj=mPId7#f}%+ z^djA4Vck5~qb{CN3j~+8NL?X|N^^_f1aTCHTrqJ|G`d59VXljbT(L!a6gZn;&A4NF z15`F)O=o7xSv3>EygZKmxVx&UUFs`6C?f#FY#@F*;aXsxgIxlc%YwW0fng6WtNkPYV;MbfaJF_aWN-i~?{2rC8- zNjpBr1Hfe|aJFO22a#yaCzIQxPu>$4iPsXca}6tAxUdhGlDWI#%>&_;qCBrj+LrjyUR*d@jgfl}rb9C#? zf}5Fo!A3Ack;b|62WD}241Jz8S*TfHK6IG*5&`;<)`T929#LSX`LgSmqZe$KlYN%M zdf4LOIXe)PXp3g?x|?H{uUF*Q?z@F+erGxZGqHR1B5F0ocmG8duh-q>5q?B^>d5uw za$8Je_UpS_g$QhQrRRH8bmd+Q9L1M@LGhRRN}D%9;^quiEBi0z0=E!AfL;!8wtI~{ z(De$?>?}5v802(arBbI9dyndxzQ3O+%pvgZ1hiD&+Hvg&$yp6+q(e- zQiyaq@H=cgaX*Yy0!okJRZ z9j@09Im0*d>fHI5qf-tx%$Gk25O8EqEeMx0`@LHg8DJT_&lW<%C>KnOUtU_@WP8=` zj+w^x2aq)7J{gI=5B;i0VPMKlY;l_}q4!{;oVAqeFk1-hcVhOUa%gw~KgEFd{@qc< zh+&#@DD;#kwjF%fX2h~VQFujc%QCEHp_6Es$1VbcQ^^as`$lolou}CNsVy~APQ$?@ zBj|Pk9G`{3w%B)ZWfGHTE6RRP%I^CzCP7PAo&)5ei(y$FIS71m7EuzomLUlw z6|BLX5=(Ah1-P*`Be$3JaqmE=2bz25(p=ilEGLi#b|m2Vs&&UvMJ$Je)N|7f_*N0? zrVHiGHB{ubkRUbG_rvy;%~Qmmxr&3<78}mmZna*LknH-gVBrVsRadV`p1|+d7oQw@ zNLzFg%yorJ5-#N43BLAtOxloD!jlf7h;IIHKiuL11Yn0!KW%w*wcagcE$*bA;krYB zpWK~m3Y)22IfK-fHG9kJ-}_Z(b5419WcTdQrh3v@F@n#w#Y<_%wmouyiyk~HTAGFzUA)i1 zk~9rX1FAr`=R81Aj6iE&F*SFSu4sX`B#@9g=XV5d;jeZE* zoVsksgL-8*jk7L%(S^Qtcr1QD`?ppa;Z2-~EB=&|2`JiA&S5;Nr%E6mBVaV6(U`XIQY=Q_K0ZoX0v`7GTKPq&8O0c*2Mo zKm+jRV^DC1`u>C4q=cBkS+2!YdiLd*I^FOr%g(b0+dVReX;3)GO*{jkwJ1p9-yZWB zaqlhujBq8{T%OoLJm~_mU@+Wx&+=ngawq}lo8F&Qta_Xw_LRX(6Nu;ZV)`OTjv@}B zRi=@o&-XG()=Tdq+AAaxfItPX()G|SjcgpLKZ>5_bHc0mI^Tv$h709FRjjv}aSiu! zNX@Cw20`@Ifli-2w!zqa8Sm0~lbm!Id>V(>IBi@fQcJy;dCQ!0zeFxiG9hlm2?RxR_ zac_AI7_{!oxtc8{&jiUgJ;-MUDzyvy7e#Cb3F2}fwL+y1>)7&R-*MHDVof)lBoQFF z$rpQBno_PC|2-*Vc{xLw+2mfk#<*k7h*3-r`v(jTpP+@Nux5#b3U0jjSdP^kk)cLB6?Xuum=jm;j4BnFkC`%9eQX_ zYoAzjeX@o#)h(ZHrFaj_@IEA?)P~OwIwwi|g-3I$*gTLqz&8{TJ)!nm?;UcF^>J@N z{_f0p{$+*WP|q^!W%AkmPJ3mpnDAB4TkUAc@nP!zM`BM4oPGD)BHJ;1OxVhC9HxGKbjmAu?E4a2F(9&D zRZT_@0wlk0o$C)nkT`}oE+?UM6L{UICQn<;^hg^@yKly zPb?xZ4#;06DdVyBbMhD{#vE@I%`6}2J_1^dBG!{c7VAyxgIvevaM|N$g@HiXX%cy7 zeU;J7s<0sMQ%ddeeY4C1fwkFzWvN9M7Qx7VozqVX%X4$!Cf6SzjZ%gfUoZ;_L=5KL zC-2Zb2c||%k`V<%w-&`cAU*(w5a7VP^_Nq~)#6L%s7Q}siTGw?`--LkuCI^FJ$Np2 z{8FYzLMMmXY95l*K-BAsj42{^7lHLI)2uReO2NI{-q{YhMVk5|WvLM@ll0+3+m1YC zbcy1|iWr6;*VuE9j_i)dtw=f@05t@F;1oq{^5EiXtMdXFBcCUI0-vX&!(9AD{}{P{ zPs%?F$}RuJ>A#VkaerhC_`RpUbw34?20 zqR_vMqX!&HPB~e(t0LS=D^MSdH6IEspXwYtaPO2qE9QfW=)qo^m8PnJze<7!>Wj@Y%w{-lKH~XSnlvr^+Cp_J7_^*Ift$7 zV3(ZAMkwZ`dfy*x@{-vCZ?iqI9IPdt^02X2t@0TV zlWIf^)p1dKXoene64_r|E46uGU2FnXf`!6gKHeyB>r#wb+x9LvC$Rg$pBqOD3`SFz z$Zq0V0#4bK+$mF06{5IWrw~W3Wa8cW#I$u}{<)d^Zs8Y7@pps%uRWtum^_#+6CrAP zDIgyFCK!-ubWtaCwL5npa1errJPwAX*_~~an@z2@e z_t}O_p#Z)eE2-q`AWe> z1=y!RH}7%<`Q#(sbP=}Ri{sd_2mkOMk@|#Kdz+jIgxx|Sh)-Mx^`lH`wdn;64$_?7 zlY-~lwbv46-}O2H%Kf-2q=;%k>Z6-~u~5y4LDD?f4;JrAQo{A+>YRXVyf$f+O=`i# zrv!l(1kv^V69LzE-G50SdW&0)W9%+SZWZI-09bz^A}yj{Lx8y{qNN{bdW_Sk_O}l@ zhbP(9ek>cqCr5n2&wix(caNLB81!Lc$HBfZA}^GxA-0%M{VvPCyZrCqYER=4iUtua zseuff8@$oRHSX-H=|u9efqcSnN{lg&D?FG<*`V2Txivv#7xz&lba`}Y{VbehT&%h0 zmvEL{U=r7;I48hI?Rb`x5zdnO1=@A6Ey4aQy~d)NglTUn%dK2Hs8$^B|J>&M&Gq^Z zx*V{$)Mi2G$1JqQD>29HRgF++KJ?1A_r|MI;l{VnB_k=OiwQ^#r04i#PMC0C_;?O>HF>d?o;*j>1j*ie14 z(xG7QYTu~K^6}_7fRahX1AROmo5;r!=c6dd<<5mtGc@Wc2Un?FR}j)6lj#h5Ei~a` ze*)-k?1Fr=+#8F>17L3R%Y1%Pe*sW!&zHW+R^*IwUSyC+2h8AYUyBH4bVt8~ zoWuGI`B~mN-ZS$+sspJQca{PHp>B1vkew#$TP#5C0_W;PS5dV=Shal~ASs3cyNs_; zT%U7dM8#FM=T?hPS$UCV!iJip-J_ao+NW(CmmT(YAPLC5gTIB4PSiQfJG5K(1iuh1 zoSw3Mxu0#)?;WxkvG`)~5D%mA7dwk`${zA6D+2&Wze*YH43hm2v zU987q^(BXQedl4x2(rOmde43Wg#|;KO1kXrLq|Xw7B3;k1eb?(D}bZYI=+}0O6Xhz zu(NQYAUAP@I|#1w7nrUY4Fnc0zA*Q7ZAKRJ5nnyN9M9d?;c-5JXq4VEhd@UFB)c9a zPJsPfcmNY{&=3R_3Kp?~D5JEQFGuonq~-3Xp>0QDKH*n``1s56ZF5%odU<)C;3!xrm0rmzs zaJ-U0MAB-nn}_rR!(O81@I2i|KrIzZ_O*s?xGb*Bm^ka2^i-KB*9asW;z_=qoU ztW`@gd;|_|hPA-6(3__Rqj*;VYzR6bBz>2(vaL-{_Pok66{AFsQTt6b_U`gOO%VQt zx&u*@BZtTsHaD>MaDEx2Ns%WUAQJ>g+jtW!JyAu#23n=A=L<;wHs#C9y?{M%h5&N^ zfze}MeCqWuUeLJT3DPkJoLUxR?H7AWkmaf6sdfcBz8*6-ya~Q2mFAwJcX^OQ6_y1U zrU*K)fHnP7a4lZeYW=gB{u^7o3I0-E{HiVj%mNi1!>yXi3jhsq;x;W6asnz|9wtQ) z=}8`F+5?-x>F4obYzJ)P@xC^*{R`IBKP&QX;csMYtP0I225lT}H5b;C@lfW(gA~xW zyBGN&B7QXbc;dz32~uk^#-Ci<5lZoc+pL}`JV%CjfNgTfRejItgepT1PR&5?!wW2P zM(v-*wNoD~2*=P01-CW{B8Viloz8QQy8hWk_9PfS7%YHVr~3_UU4j_yyx}$8*#Oz3 z8_+3W?{foEvmEy&dnq^)C3>PfFFj=g3@Hu|0OPB2Xzbuql3MUtp*~<*R)k^KTXA?i z@eW|?t5_-#3r7^;i0#AU3y@o?-pvQdlri#H2si*Yfn7r*lj@V49Jp|5$H_b#f+vmo|`U6_z0`~2t|!W9ST6QhUv>`BSNN`d5; zo=mC!R8Gcu_0XS}Oi381IQE5W%OjjTR4Umlr4jmc0%83NxPCHH2N;Gh8Z=V!& zgH`$5E^E2K1Pf{-{|^4mAX!n+QgDoe_Xx(rhtH02eisXVc}k8w`l^2a7BvI15nux<*snnOS$Uh9u-y(9bbbx95f7T+{Cq{5ak*_> z7C2$6tj_adcjIx#K*Fzi3?QpCf7Ku&`!U+{*8??Q`bL(KOXlb#Ir66-7j$Z*{Z0U1@?!Y?EPo#Xmk9X}8y-gfcOD2`N+Y>ijFIPwf zKHeF-wpjvzuf@2-`uW)5K``BZIR`}9G;&x5-b`s-pMy;QlZnFinT@IHv_10s-raf?!OWeY9Ot#0_`pojJGdV81oNyc# zU-S*uC%nNAEzbOY3rjTiK$`nHvt#JZ_38Nt2j*!ykdT=s%6+9P>CEFUnfwFMn~#0S zmv=$ceS@=03o&EGZf&r`nwLQqzXs0EIRdm!N16@p_~## zEc1I#O)~N^mONWF^=u6)#V}Q2nU@<}g1tTpoHj5G^8YUm*orX)z&^`0kns|jEgWc? zM}Xw&EqsCeb9sqRmrKNY-%#QXqz+}{DWXsA#+cB%K$BR0)TBogFZ1Fc7a=+ebp2}Qg5sg4HG z2_!*lqF~~c9UxQPFhCq%Y>;#aj6cvwrK(bQ0`c~jv(`5BCb*fx!~>k_UTqR=?*N>C z+9u=EyI+UBGGS9Xy7z$&;zNO~7+sXjthbP*VvC>1&1Cg>i!HauT8GC)-K)sn`Av*!cP zJ}`@ewwu3>5Z|9yqlzsRy~c^XP%e++S9M$9-&Nc{*Wa4U z0z5qfX^Mvp1#6qR=k>B`oVPfeos9y2ODzzVws(yUWL_pz57G^ozaG4&#K}0QCrf{PSC=T{h z?grUX&naG`!27@~0*F!ww_Ww)Y5@pYbu?uaoqNFpU=pkV!Ox-~@y1=!JK9CAGqpFZ zItPxJR<%vdm5Ds18v~+tow|Lz(T9J-NtdcIynT5~ybYv8QdRe<2ZqOhg)4v7>Z~;jYsY2V;K*kBc z4iQ`Flrdo4$@u}Qv3R$J%jHTHAQ}g@*lFS|K3{OVILaVUK4Z8zt-HBGpn=FAlqC6U zps+xwqY&9z6@dm+?jQO5;G12>frDjZe!YRM%YH_lUZjc!5DZcbk5JI)^`n=aNM74n zKVP*RZI_(`o2*!v1Nlqr^6ne~q8Yu!QRoPAFqX(jav~^~6dbs6ULAOG(=l^rk-TXj zZSBp6H#K59>T2W44D>_!Th`szvkks9fBEU0yw4$czdk1K-*XV-*Exy5dAQ)$xqiC- zgPrwh^>xZA+eVMaU%#FA_-?`9fBREIzSX|Y6*v$UBxw!F`gPc;%>8v;tMrEx$)E5G za-zUaCKLs`kWW0uQPde6r^hk`a{iJ2Z`A+0cl?YtX!4&@Kpwjg8Jb+O?~^m0e2+#1 z@%<&jF5i=Af}ZkAd``*F!H=K|Kdb(CqybkfQJ-X>*a27^?&O%^FQF(50PJe`xAj5neJy(^RvS? z|3CKL102q+>l+^g(UK#GojFMsWa?&LFA<~l>aj(?(ZF9|Cx#VH->Qk_fOpS>HO-7Raty040yIn3P-Fhp zApqNHF1i}mwY}64o?>ngaaE)kTuoLK0TnV)0n8#=Nr_5GSphGH%L==>0OqE@pR~WX z^^b=0>yUpR&tKs`?(n}bC;gA7_#e#VuXC*F;(!8PN;)>T{?~TCP4f+Fhrh=D#}9!2 zF%bVN^!_^D-!{lU#z^5$hRx+59@Z!iz|H~6^>}K(+x-3)>3>Z7UuyZ)#y14GjQWRW zz7<6NT<5>Z{iiws`{!~%S8UO6JJhc~A-4eQ{zyj~lr_rdf~chMv3c*moVK|J%y>9t-#u0Nfgg{O;G_-z_R&UH#XGKgRT5 zU-$1d{%GWPDEXrd;9j~JXA|U~RXm0{<5V?~61Ohp`c%Te4Zd@=n zF}*<90U`xafha(tAdofO-3<&;haNj<(o|Eq;0|OurvC^#eSpU%AoK{`ymjHiAMt;S zqKCVoJOHOpw}9N@wt#CRfPM(jNG}h!W0L^ktJA9|Fe9QBl zzIII8I=k2cd5-(+X6tNwOxFSQ6Hl}qK$GbJbg-wRofkm&0`w&$+Sw7HKLa$Qvz@g& z2y}w>INrk!eg~kh0yHfOYM=tp3Lwx)T8BT;z@@?O@g9I)5Rez7;_BuD2%PSCT(}J9 zzX14klDPmFS4P@-cnIAB+*MhlYyoFd&TiH&J|NKdF&~!#ojJz#0syjG;Y<~G8&L$BADtiP1u?+nY zcj-9@bowC(RNDEc@ti$gFMslLNJ_hvn@c*0P_niNn_)~kr$L;-&-38U}9)d0$ zg9>*=0@EoX+eQ2zF8HTz{iz>UfM>DqkJlFrfmP<{;s8wdufpsLjxPUf zhW~@b{?vzK^83052!@wmL5x&mHzi^lzVk`5=t~zLL5-I$Su8RxyBHfTK~K$271fjz7pj)S%NKMi49L9Eclq86*S} z14)DALDxWPAT5v{2nsR*S%To8J0NEe3giXy2i*e&gTg?+fMP*OpmfknP%a1uDh8E- z-hkeL8bGa}k01hQ81xA=16l&Dfp$O#BqSu1By=PUB&;OoNq9&ENyJEGNR&v_NOVXH zNK8m>lQ@vLkf2HYNgj}dk;ITBk-Q+uB`G4QAbCg9OwvU%NHRgPK(axyPfA8gOUg*f zNyII;{f99bnr; zOr*@GtflOxoT2=DlIG<3lVT^;Pnw=|IeGtN+)3=onv>lp=T06_{Y1q>B}b)4WlQBl z6-D)us*>OJ?HO7DS`}JzS~P7WZ8q&&+Ckb)IyyRDx@&Z%bRKk(bh&gN=*H+ipJF&Ad`k0_ z?Wv$s$)`$Bb)Q-}O?8^*^!3x0r+rVyoh~`ud3yOLs-Jj&Qu)dHr@)_*f2#PY|EFzw z26|C?J$h&QaC$6#GyVJ-$}_xY)XvzRd3YxC%!e~m3}g)43@Qw8h6fBU8R{8k7%3R} z7&RFYjA4ugj2(9uk^*-Gk_9>iNd=_@9R*VbyRML5k-g$_CF4rJ5UtR4Ay1(^ zp$Xx$!aBnDh0BCjL@tS#ibRVvihR8)c@=r}<<())Goo6e_eCp3H^cf%A-<>KoSf)aKTX%YjH43c2U$CB?Q52d7~P*OOl1?fxDx2030`(+qqZpnnpG|7_7 z-jEHFeIvUkCne`0S1h+GFC_0IkCmTS;8U+S8iOrfx3aev8y7d5}@){l}uGbHB_}j?Ti{sElF)m zolD(8Jx_g2LsG*}!Mqx2h!8jd#X2ji|^LmTUGiL`nU9x^=A!44g3unAq)^J2o|yfy#bAa z4#O_P(6IN0rwz>wa}9TmRE=VcJ{gM`2N<`Su$efTl$%nT8k%BEx6RbdV$EjECC!7) z`z-h@ye*n7*({Nk)mEpitgT9JlixPFjlKQFTF*MedfP_RCdFnAt^$vTFWFwReP+90 zr)U>rH)pSCA7el7py=?_Vd0MQowz&8jw+5xj_ZhjY3ne6%5 z%h0RDo8H^SyUj zP}|U!u&ZHlVPC@G;VluO5eX4Tk@k@tQBqN9zfk<*@=IT|a&%4%eT;9+)Kl=&(q|W* zg+AMgwTf+ylZZ=?r;bO*e@f6vC{MhU_)FrKq&rE30#;n&mEr+ywG@2n!%e9mr0T7nK}E?@MU9`Oja(24HJer%y!BClw*)nmn)r{i{-#Z z<&oy0^A_^W^FJ1-7L?k{D`AU41a8>r3i*J&vnX02|sB0e99M$^PZohSVyZp}K-PC)l z_ro8IKlIk=*LBou*Ecn&H+*Qk-dNkD*z~4ZuDPN`rlqV^sL~4$ z?kxW(`?0c1p{u4_x%*v@YEMJ2W^XG&m(bM*>l^4d@BcJlJ1{?p99$pr96A{Od4zl< zbo8guxG|2gjB)<)!cS74swY$@+9n~B!&C67rD^x+FEbBk>1N~R&d=q}i_O1TP+$1C zXtp@B`_}f@j`PmJZs=#$ z&pCTCdyV@>`*R0gUnsvM9P%Gle%1aua^!q;bhH9G_04A@$o4J33y>TQgB}8p7k@jD z{t?K255JZsB_RP)fxq7q{w{I!7DRuN^zO-LWF+(;QhE|HdXl3$lG6Zt15|cg`!U~g z98OSE(U4J|q#y^{0wO8@lmrZhl#GJ%Bo+Ay>YqR)WMm{{C#Yzt=_o0w$;pq?PLNYj z(x0TdXnlrZP4v1xW0{OM^}XmKF1%-{SA4%L+q*8Nvxaef(#EWJUrelvald9ZdYn*h za--*x8cb@ypj(B95X>U}v{yZQ?#`1YO~BNh?IXCvOGyWMzz;6KsZdb-^b_p~GD`Aq zU+GVfU!(w4+MT7@B@J#2>kCrfaFYR zP4KYpepQ4sPfeWSll`Qj1V_h#ydjNt`KgF1PcLWzTqL30JWLiLSmEoes8^EsoLQFiO? z*M30`{I>X(r@#@2S_;=o^wN$O>7uALSDkg8ac;Z>Mz;l>N!SpGZ#685kCFwuPiu7f z_YRK~jb3{B4(7v<$)m0wQZzo}@jSAr`qJQ7^l9D5$B2kB{HbY^eQ~dOeqB6=;Sng} z2*gL`RgCj;rODJg0wpJZZ5Pl}mEt}EjT`OeY~Km;@86Z#2%1i$*e)S@Sv^$YLV5V+ zKU&K(ig(q{wo88*w#X>#p`o$mv!7XJ&(~H(m1#Uv_QiEiDJINUArPi_szG?Gxh&RE z@NyoEVQ3>mCvFQiV+qb3(gj*h49ZAVk9xOhh)6HSVEsFMUnBayVneH*GHj*0BF8Af zE1ZmJl{1d+JG`y2#pr>&5qDzV-H4pfDkhjO|M*~ww+SwCH(4Y~Y&5Uyyoq^`NTQOV zKrg~atK8m^*w|e^C>36Nwsy;){g)wOF?stY{=9`zXtJM$fSNC~#8v*MQt>3If^1^P zOraS@W;SGYA1PZhT~jGCSTQGnaVXn5ukM3={=Z{E|L1y1w&yp<&Uq=)iKU@p)o&yM z&e98WPnm(L<8zW>M$tcos}e-PqTk?I+bd)ycN<;b2CS8j1tj&7d>7Le4U%v*(ExeIfvl;B_oc6&L95d(vVDV8Pwq5PQSg`mU*t8QIFE5GVgI%kV(76per^1vU56b;6s(? zD<+(?ML~bC(9@=9Z_ISDy)#CJBU0Xh*ti>;&%J z$a5^xjm*TqegS*gWq@?weYUq)){@Mk6B!uTdHV(Ib(c6*SFcEr<8j6i-O$WfCXSrr zP{k3Nnx&=-g(~?H@9L4t!|x3m?_lR-78{0WLrYEV1&|r#)xyx~6N6vO;Q5-VClDb= zAk}>63O^EImi2tmxC1WaBHT&PAA>~+OH73CU$L^tU$NGgT-_p$mU*V3D?%PGr=M~A^TH*f3c2{4uYJgohyZ_SJ=bgZ0}CUTnu-E<2xX#f7C#aUE;sY zKb?oeVluMAUi9@6I+-LDEa1Zoa?KcbZUllfK&~wJmc2AejAlJLQtd?WZ5P_G_-ZE3RkQqa~WiE%P?%uha zJ}j=zQdKFr(lX0tWVANERt@Woz>eOfA5ILvc*z0}od84Ib|wxS-sk+$$^XR^S*KjZ zn>3Ov#Q^;FdH)JHy?*&~ zGfQKLllNp#vbq%ATtn9$&;0p#%iZ1$ybV%;k4j$+{%RAtpjIS%^D)F>tC#erTCz9@;%8 zF*Ggbopf+8I(Rhs{eZ1yII>n>?TdD-kDL)3OSB;mj97ERkZAQKS)WM!UlONEW{T4_z zxA$Q8M{v$+EtFcLIj?%Ky{+AP8|DM_3+%ODT6(%#7vv%xsLBvI8Qyc%Q6jW{3AqH{ zQ3dhS&#=9|E`}d(0UBRg73^mMJ7LVG*szY-+@+z>3un-uGIhfAujmVR-r`CwpA|Q4IbAgZYlbf;F{!0uD#@j=WzBn|=f; z&rJ)c{hD9s>-%I*fQxJYiBWh~o>hAfSTW~`b|==`-by9Bz2}X7hVW<&{QStqBQm#%7Z9(j0u>ETRV2J4&lO7X+X2>18~ zE(iYTSprI)SzMYQJ1Wk>1VIp*QXG=K?8T$dO21sj*pJ0JrS5o%K3RbtlK`-EIB2Eztt-{zAa4S23C44M#+_7pp;B!m!!3CkM~68#k^P{R&-cG+{(=I zvTqE(a7pNq4!YtA~c_sC4Rl?#;_wynkm3khL3s6ICiPmnv*jBs`45stpS=03E z1IvEkx80^L09?tRk)}&+!~FnWkDKx0OGV{kaV9DL{j=vyjhrUfRtu6GB_NJF1&it| zueS7y;64}?$1Hc_UbszCXygPFr|RW8)%lx^;x@c_i60T>XFb)|w`9UXX zf#EV1ES4ARa84#6t#!F|QTEHrskmy-@fX;fjSM&22dbajR#mdy4fGMtN|FTw%dR$1 zp2f&?hBkH`0s<`|t0b+)S(V*2T&gu};g7Q%j<6q3dpXLu%EL51iYPHXn*QVXzu|=*D}0A6$EXPN zHDh%zp@>#d26+Xvj9M#$JU8)KeFfI%wK>j&`Q{j4d+7aWQbl6Ea&n=a)w{{O z3Ym#gn{zW)#Xf!C4+>Lnly>A8Dv6KEQZ3qZ3M2_`&5}>)OEX;tacTrCK5z>DY=g&a zp|U+3ZN5ZKs|VF|u)#Vvr*e3x)E){V7-B}2BfRqUp=u-bK~AkEjVtjO*&;)Bn^A=W zyt+7;$y!7N(&s_s8V~On2}Pt)!=2Hdx%=V|QLQIT7aGZ+DC*Dy!_|F1o1P`E{Er59 zCLhdVXCWG`y+-fsvy(Us>LEn|c!g%)2qxu?ureFhl?gQcC|GXv&G^lvIJkZ%GAZ9K zXknvE!2>)P(q@qzQyEa845#(V@wk^2CyIF<2?Qk&d3c)gH z3p*87zCZ;#*G$x_LXs%q*j?aJ3`a_3eZ z4f8NpLzjxwmdbs%KUaAA$@G^+INGDz;nFhWZDR@3p_qiN*AwR;9cVN@o@x|kWdyM> z*g_~}-~eA-zM?q|+?d4G=%$7YE@w+s6`sK&xQL68-F(Jq&Zkft{)jj7+$Vj|Fc>5P zBCZ&}Y%KPrgdr7gT&f=phY39ej^_Kjs1n^l$ctjbR+n5%^d;Sbnqil5MC#OJuZQz_ zqq0dn5@RNbfYXYYW=vZ*Kk^{3IH#sQ+gTkekr~8c`HIBP!3Tyc>f-$mFQ=WZ@Ewk^ zD;*f=sBF!33A>fEm=P8IdZn=0%-AsuB^ttutWLe zhP+?V=17IMt#4l0L=u!L5ar8?ZE4$16|$@1O#K=>9bXlaim>ueX{%R=4RfT;i+AoG zPG5CcX!lrbN>BT=aJT8|SjEar(p-4wM50A#M8vcPq}9i_sIEovFo}4Gzg|rNE`3hS0 zRWLEAOSV&*juz;yM7&`&Z_tL7$QU8Enapr$nzICY89h2lbqN9sSX@+8aKFK(0nuyK zY6SK~d`hzCRVe)Ym~c+(6|kQ~+Y32vl_5umVZ{X;o%51X$g5Ap(9eNUJ5j%eb!e(oZURsHS1bF$*#_L9@lxZ=wH(O-1erNP@L^Y*gO2j~( z^J1#5uTM{I(z0VBaz>Ovd{R6S=@#zIwcRSNz|O^GfA`6g#0Ks~d`(p?$_-0|`U(hq z)q!q*OMPQulS*__N||M5-9dY58^RC=Qv(GH-02a}UdZ^Q(^HrF0C9a76GJSs zJ?mIxn6Ji2C8DZYjyF4|74pGzCM=DwA}}EtGJ^Al$DJF481{(w1RES;7pFrrQy0Ab zvwh+iJ|3UJr<q zdeyJo3alLMI06m&c7Bev{!${f`XyqkYuAEdBe^#D%6QM{>6jzX%P8nybDf`d#Y9!k zr#~D0GH|H)aZhCb=C;OC!N}@ieEtzA#PEEp=%3b9)Q?*J)41p)IG)X!G9_TjJ+Tw( zBa;o{VX9tL5?)EhcW~jEaV9SihTYd;X0!|*6BY%?LU3x2kyj&H65KfEzp~&2L7>ea zE#Ohk1WB_wRn=WQSfZ_gf!#iVDEubQr7uWO9o3*DFeB3-&v&YmrYOvDTS24lbWl6( zL_t+Ss}FIKU%Z7VjkilEWrt9t7z+&w>y-J|V12bA8Te7xff?@kVIld-xdJWOkP^5# z1!Z6bAJyr11#3#`Ye8wC^X5+C`awq6CPV>FWnv?(O=r`x*ex~fdG;#5> zWDRB0*y+u;WJ@t_zkFdEC!2Pq-1|hb-7Ir6kk0kqhV+9kYoPtcgNi~olQY1bdD z%$(Z$xx{!o=;A19G|zCKj-krB57}Iq_r&C`KnP+pR3W&urPO$N&;>d)Z+1!?(qE}@ zqo~68N#ugSbGu6*mbOW|`UmJ0Mi!|=#Hp>bhtP6kw8NFEiSjCmLWYu=_>(I;+VUdk z$jY8Ev3x@TEy+w@?sLsN?&(bhsxxG#(*J{VNp|6jl=s1kc)f=CM)kO~-D{_M3ZKW0 zin}H*J&0hENSjhh8<`#J`xBDu`XfTDv-%Lp30Jwt*NWaRdWwE5HM;U7-^r4-?D$Xw z!SnoaSH8%5kws$TeZdP`{ba3<3%0pYI?gbB;z|KDLv@hfG|BFs{&Zq$4SU$M(*JuO zEHM3$?KwYIfGpPYek;hH1KB>H}EnMvv}}s!LriW z5q@7A`-#=Al<3oFnC<%DU7Stp6E=l5$7bxR0+UE8qmfj#~gDBL)L zGX3xG#{c{KpSFIjeezx9F4Grw2$~Xo5QsHfmIKF`ULARnlp8X*F8?szZ!~qcQra|k zdCw5WWWlCiq`&GPEn7IPjz!&jdi5Bf|ITodSv#w=*)_k)QgA^it|~ww7S>O_|0MU$ z0)K|?v!_s~&a4_EQg;aZV#^Mdn6hIJ$mvJD+>Hc6?_@kl!eVnjJ$ce#@2Ba63-f44 zx2e&y+k0OOSM(egoBBnmjH%lPi`?NXhVhT&a0T@q2QL>*0rIs#&crc!bTh(M<8i-|F6wq+WP%YRS_xAHl5cA zjf%LN%WFGnXV;lmh>i^fy@o1(*iF%H+Ctsa8{f;$PFbwCv76QBmr9LE{{iTsaa zF?BR|>FWYuk)3g1PK_wyu=))9LP{YwKWBA+hPq&bn`NE1QCVr+p~M=Hk9^+N{IravsTItbg%RS#=$=uv*jTKOU^{c7%P!w3>DZ&?R- zy2CqPTskiP)D;FfBXD4Zw0tmxZOlLk_d<2Z3+|W(KSJCgr zk~hLdT}3tIMs}tm&s%!8yakpk$>wf8=3NxtaLGM8JKjdn(R8J<=m6pbd;KbNZocZP zL56fqc4l-pnod1vK?nv@ws@z9N#U-DRD2{KUjSSHvFy3zR5|g1t(ZcG!gYHPE85*b zKCsswtMhnBd7fbDuGxt2GG=DeEl=5tb$>_^+PDEP{fu+S-k0-4b#|j?0!{p@u!kQj zpkVg+IEw0vS;UQ?%=gjntK=6$tG9&}YNDf?H4Y`Qj#!zshC<%E=|UR>->NiUyHj${ zERv!70;LbN{M~md{AA_jff>cjsjg*f=ywhtPyrsk>l2JE>Ql1JKwF^Hu#svuHWXJjZ52Z$yNbIja%|mfa zei{7qc55ZHSk9?b!|1eHNk6QmsAEZ8+S$Dek!Qso&17(S_)@16v&0tv$H>jCo;Tvz zj>a>uue_<7-He4unw99R>FdDHiEVr9iS_E-B|v*r!Fs1H`cakV%o@g4OPG9GxI768 zCCxf_J5i#s*ULi2+>O05+cE8UC*HV)ORhO~57?h;-nn~4eOw_`q7-hZTWOp#;l$R^ ztbj`@&C7Gb@+B^G%ky`YsU^pE=|0kZRLN?L*QxVfB1{;uvxi}+LF>T((EOd{Wv1w(M(KC)JFM(ZG(j(2^qSP|d|y0~rxL!-@?L1HAcMWzOuliE!>&F%zeQgDxdY zn|79wcE{2w7EBr&Zh9f2h;gkyF5HH{0cp)xkI@~CSjWzIEbLcXY%OOjZ>+1i&%zce z4A026HQxpXN)kMFG3A!CSxviVR{>qzXpEBzx2bs)PwXpm(RMNa0+d|rM}cPYRmn%# zuuoBHSufn95x&iAi*U4W`8KqDD@-kQdwifOb|fSXolGCWT+($O7TJuCORv-z7Ov9^ zo$V1t!(F9z^)c-xoXXUjz)hwRoz&OVfXI_m4W0s;x;Yr?Nwa8`642xDbrP5Cez8d0Ux2Nn|~+iTvU~>MJmI!R+HuhSzp;w zqG`tEjD%%XRp-QEKrjS@iNhe`;#V|+MExW^^Y%g)aodpawhnRS^%*7eZRqhFlXWNK zUEu}nSiAKZHj|iE@*0e{RgAT?QE%Ub30lFA?UmJ){G5AX1_RM%OwRHzaM^0mNPB#z zQ_*A1!=F|(w0$KO%m!maOC)+k9VA`&v&G{Rr|@1>t9OTCm6Lg}&4(-4DqN2~jFPmX;#&4LS zjJzdFi*Z?rrxY48N4uQKofRzjoc&z~p4?XYW&gE@Mz>^De96A^o8S^lan}52koc;O zbSy5(FoH- zl4YkViQTD|dFm=>8KOrnq!D5a1-5+OaPu0^f2xMw3=Mq-`JicuGrDwFw`o*h(+cB~ zC%@OF#*EI-{_0xs+Bt=g#4Bu)9WM!I!}O|;mv+Hgjjbh(fVb3X(=%M{)AQPl3yHFW z`=Qx(kR2vuBVLrDnc;o2X1+FXTY9RBa0F_X%JH|D9jCzAHCNOMZWV?X*L14PbbKZE z>&siSyC%M+(6{yt&EpT@g%7PwnO-7cmC7E%l+=}tU-n!@8erax;w#Hv$YQs1Htm)_ zP$?2_2Z)HgD3jRy`D415@XrXqYRj+T)DdV@xh2`NFzS$fJuH%>dK$#`=}7%Dq_`j%9{&wvEx%If2qIHF(hoivrggKh*m9xSo(o^U@&t6!;vtJSqw z?b0)rIJ$8Cmt`qs0+m;sdb`5yV zS7J+t{2FAfPOvvF(&ZjpbPm+ZDILuAOwuqI=r$&Pe%VX#$m53-TiNPL1U`b$+$0MedJ7w?OjC(8$b}bkf zlRTw1-U+&;ELDOq3|~}2zubQu**%f~T+v-7BM7aYP#t8Cky}0#+(|afNwg1G zo3F&KZ@>1^mYf|_1k(j@5Q$3C74-W0o&o|Fj*$oT&@_%O#7r{Gk$CT(dQWlT8W8s_ zkio}a&`p=ro^Je_`nfWPboKeK3IARHPA9avUvVO7#IYKhp|eYjiP0}(C1W|I1=F_Y zKY+m}!2a9lSTO-9;H-Wb-C9Ji2T|)ngC}|m7UUF@3g=cj7<RzX9)s@%%)QT97DvB`XJNZG$p`OH>7|w*>}1x4OTyt``bML)!r**v zYW`Q-LWV1WHAuLwDW9D}b>i}#{!C|TX7ZaV*{alJop=eGjqX9^pr^Flp}7VGMcqv z)P*+y4;uX4h@DvarkrUtz4NecG2c>y{5anyxOPjhcs)$KUbJ4Zxq;W(kT4u47#252 z6|5q$q%Xww=#EyUnzLkgY*8^0;l6LstuqbPw1Ewj=<621BEhNkPNlL;bf_`#aEHsR zd)-(=eu4MrsstofmM=dgY-H{TfES|Jz%!y2rJVO z(kPF%hiX1KI}PyV%r}t)j-dC6D+*N=OJ!2}V_gFi8-Y&?x{Ec>&9&?{bC2Nsrcu?a zs6xS%If04}*vd|85)uMd7~7Y>_w~m zS}e1O_2!Rk>x^b;TYkii9yZ$~PNJ202>~}N3O-zH!j{&9nZa>YD=yvLw{UMiNFWqtqMYUJ6fmEsCQgcKnlOrF%zIx5V?Rf)ljlD z-foyb+p*-3ou$}a!gz(}1<^GbDVWHU zc0I_JHIxnIKQQm4gErEM4`O6w9k=A&?QQ+eY&OX>rn+0xxgpwiV%6 z3vpjQmT&~*{uFp`=S%3Yiu;=Ad{cf|Md}QmN~`jG|L|mX zN%)~I^`qY2UPGv!iHQOMhK6WEfGbqNxt!db)Rj1&xilj=O-z!;uZtRHk!|a2T4p~g zh3o;Jy@TP$B-wEBKMCjJ;#=?rXQ-pKlN-ikIi? zqsn!d$2QE>_e69=w7HjS*H%3(@IY4&j&_VubERtV_&O$Kc8p}$2$cDTd*7*Yam1x~ z*1;a>GpW`&5S10-2F=4AGjl{(gJMD&RmgJP6vGVCv-ytFM-XG@WW~&Gp%uY3BtT9c zPGCP&1Q_X^)%7b=gdtx+^vfiX9#!8zxP2rq`vWLg|U~%nB3RJ zk(i3!om5c*O|`Z^*Ad8IAertQXHtK!A}8b*Sfzwr%@lX4(0hkV40%{9_PJRHQ>8pv zXJ)~K1;I>(H~JW}kczaX;fT20Mz#`J1b4?^HixSThAUa3ZlsJJA%Gb(7*$-x9(pYJ zW*LvB#;xd`=aJOw*Xt#0LyA2oB7;u(h+X@CmG`B5A`|R-1bVI@v1`-r^?dBi<|US| z%Dsc@v{F5iXJ&n~bC_?=nS1HE`32anN!9G3(F#w&T!s3sWsu%}s33nX#!p3WoZ%dw zeCu7Ev2*LVq$AKTU~N0#=5q99rJ{X#Uu$-fcra9teWLNM7FD0jq6?v@=L65=<`ppR z4XhzX`(v;i!q=_w*7s#Y!??yd)sIc#$BRAHK`Z55>TPe>Cf#E%Ig|>`(2khW5-y|a z!kgy3WW>9dQp>*-vTTj`_G`z|qW>{0DdmLio@5x^ zD+^S3(hT@ck%{AZv%BRm&9lE#3UqPY`E-nw}(B2+LuZghcTjw>7O@b?d}~;N{7@e z;h=b`3BweDt}D@#pQmFoUOLZ2?1p99z=E9!?##nTqtZ0V#Y|K-c*Av9{gB;Ono@H; zV1|g=QXQ|R|Ij!|oGKOSQQ%-O-`!S4Vp6=Y2ro&R0b+Qyl4p9RV&X=*y!c1j^mM`s zD|Mc$)G_C|Q{lN2*affxMKjuwfdSPagX8>Qg&d5Aw2!B|m%P-o*>HUw+rxr4p#Zyq z^f4GzR?Zte78vVs!TIKfmjONRNYSBx%C>4K2l&VJ1(9}EJ+tpo?C8_!*w=5$xLf1%um!K#l zezR9Hyw2|R0nEIjQGv0C0}qqulJ{4j4_`K@@+E`<+WA}xV?9lKnCfvcO0Y`grSk3E zQC{@2uh3*cSXrD_0>SKBKX0ca^;VZBFZ*+c(` z{*fsA#knb&sfI*g;x7|iV-;X6shk@A0JoZKi~02iAZO}oDyLSq^YOL7nRLz@C?sSS zJLH`!Cl~o`iD>R<0Z%E*O1=e|;qN`r0;%KpLEtrl*IlB*Z+iy!eeANT`~o=-3|s0C z$ZOwtSZ9uJ1`qkK03)t*pzn^O`mGH<-Nx4+qvmvJ2g+`zDc_Zw`WG=yT zar?kSplyFrE+^X&h$W%D7$b;8`g%OaYW3=Bo+{lIn6KD zH3~Vq5bmuI;6Vtqmojp#yW9PtYBNvQ^;U#j?-8hH=ChwPuhhQrsvkqgjCJ`aEb@CN zKqNbIerdM*7AZEqVUI>Q5iPH(w#&GRjJSOp)Z9v;#TZ`b4Qx0zBWM1K`}eU9fI|v%HNtl2#^$N>)n~%`a&?Q7_gqYJP6AxVkqUWLSTh^&YPJ_ zvFmmW_oAdq`rgd6aJfyVQAJKqF${TjIn;bS-OyA_qr?|Aj!Frx6tsK5R${(EGt&|n zukTtin3D|%VKMWWsRRRkh>`|U%_2f+R9d%VaX->Zv!|eaR7KJ^Oe!amZ3WP(0c5WP zm_GgBvv?PMdx8*+n0YTl+FDGn~rh@`eIG_A>WhxH0OxCFo~mQfIP{>W*!jSlgK&AsSzcR(PH=*CMp z!+Q2EnITH%VBb8^hC#bn`bFu()kl-<-JTHJ+Eh_( zpHy|$WSV~l)`bJv{f1433E%9%(OT~E3Q=chWcJ1PQ@J_wJ*L~#jjkT}t=Gip=u*=84J_H}xM8h3F|B?jBtMaPYvR#`H_WK`P@ED!ZX z>lrPppdF0wk3b`erbq_?;OTD2)vI?$@X7E(ZWM1qq~!Bcu;Kv_wZppk#zBAKVp&~H zDreDR#MiX4paZYZad+?UHqP)m?AO}7h}^U_Z2EO6|7ZA19R2$Pf7#vVUDEKutao_1 z>X!v&#$toB@y?bI>oiu@@L8PG;hR|W$8Jq+H>eR2hwQ#u~ZT~L|roq_P(xv zB(}>Z=Yze9*XhRwsu&o8gqj(Prveas8O{8e-NwIeDX2>vOB%@|KeM>L;UWL@foBOj)Jf z_16Idc8yn36yAhNqdr$Wr)v5psre$@=xqz*GO=udwe#sM}h!d%3E)2-jZK< z3j$GmsJ-5^X;!B<{&nR%CHf)F*w3WDSSEMIbWDw(-w&BwOrdl88R*Y=%_F_;_0nmk!70+Rw(g~k8BbS}e`l-=bbPzo&2t6Oooa2V5eFT`HK z#X^>q!;5fCY@&L8-_VPE!&X|zt-*=sFZ>L(9PJXW+WdwUO{Fh)%5A6DVX)^Dj|xpB zVjK-iECq4Ng_f&Kg~nIutn)Qjm?MNk5%E``31bD^=M_uOR);_4b$~IVz}|id>=%Js z6djTCK^W~PdMV^nBl0I%?)_Fdh%PGdp>#~g7v=N&UXzQ@X=(s4{u{&T^$sVtx$34q zJX42zEL$Hw7jHge@#M*q6s$Uzy<0>HH%0Q~sw6MYw%b|;&|gZiA#_VVz0Q>>);cOz zLnUH>6}S}F0y7eOl#(->hq1`xWQ z8QYTrZFVjuLPZkT(lJ9T$e*6FYv3=$j${-=ld>rmm&9Eb}u{mQO!gZ!u&#Ep^GA z8L_u-lp934N9%WEkxO<1`H%8OQTV*bvbg}Us>1l4QmbTllL|c_4O)GMZS;;(X&PI0 zjA(q;FHQYPd8vl2^dA(ep8&7=hCdx#-Wet@j_h>E48A{no#u8{*1alLc6K}r2pihm z1oPbr*s5<{3KolXI_o8$)awqLbH@|hB~keZ;O;l@p~eYSy|FNa@# zyl`H?D{WiDa=9Fiy#grhKClwA_sC3!@Be@7y?0cT+1@{z8AqK_(P5}kXJ7#7(xuNR zMTHQ7gh*F9p+s8fprg{u0MbGekN|-M>4Z>JO6Uk7w2&auOX$6MADlVozIUzjuKS*K z-}A>^>z6+e%zmCcPqMT3cYoSO5!E6)ip?bqw2Mn@3GCTQ0@a2A?ubI|F>W6fjeZvo zgFwT1`)vZ1`zFCtqg|wrtZ6NC>qKAQEnZH!Xu!6icG7Y)zjkazO*fzmX{yj}j@z9! z^uT8%*r(Hn72RO2^6TZ?Z1PuNGqzmA82X4a(g-+R|%NMJ8e{f}ysLASW?769P5AspcP&y1RRyQz+jfxDMr#$D-(DlccK zU0Iv4CL5o0&h!~w0E#T;SCR~rY6n{sXT?r~GXNPlv!@T9C& ziH!K@*Y7p|d8|L9R!_eFfsfScP;e#a!=`I{1zUj5QkqA%g8kT#hu9`g+h*`r!)iss zpgLCsw&>;i`oQLNdhY_c)>@=ep=iF;zBp56u6Owrbsl1q?W+PPcp}qB4=U5qp8k4- z`r}!m4@~oYWTq+HNWxVtuiZpk0AS@qZ;RNBhqm~=6=TOW?VHgyr<4iQJ(%(N~VzawVO zWH-kGP!d7m5CkrXw=*X6MgI@4uS9$k&399UsDXBW=GTs^b}wjHzF=raUKr-^MwmX< zD(>|3=dSL^{sHD*F4PpF>t-LSYvWxHrt)C|vycHQ4&_*fqNq(XMWa=BkF6%9#!4G? zHU-+o(7Ic7JUfdMvI*sh7BVTyl(NR(SoL2brs*mMBEqr~9{&ac$(pwKdU(^19!QNe z&~|nKeN^!c%M|eM;tR{0#97aK>a|SOZ~?($!IbQa5Rc+$*P$Vr`E^RCF>CqqMN@qC zd@hbw`RB(xTJPRaUYSdq2b*DPwE?r0!>lGuMvm1q55HS~K!K=azy@HHdTQgzUOINp zmb1gGB`d5G>-a}nwv<_6nBdhP($Z?OGZuBw3#Y1jQO;r4Vjqp3@EEpsho~Y7YTulg ziTP=Nz$p6J57%qMyp!2v!D?kNFZ#R~cH_vumgv9NZ&U`Sm!7(47LCZsSuQ8T!_=V0 za=o?sPmHyh!K|%X#`;=XavE@vR~_Tg%6i`B)jWXn2*T6v;F0dX<)VPkC(H&m zep1HFY+AdHOZh1xgcpn`XBUmc*?JbNGCpx$euF$Ss=ed__F5d)&M$jr8;2kBtJQG& zoT6vHI%3RZDVN=@{!yReWyUDVdOH!jKCt%%}UnU<+MWFQAdEk~qB z;3-{*S_`cnZD=2hO!afQ1MzW`=bWAlr)8}POQ)KL%xKyN+j6P5QiUC$b1U9kN{x#c z|1gBGlC*Ki`-*6GQ>Op|!)8K)L&YUa-q@RsXUTsCqjR55p7yLp zR<^vWa!ht2pgYNQ&MIyU=ez8tFgz)?%`@kgn23w+z-Bf?g#;+XN-O0q9pG3rc_wxt za;UbB83LjnH^SRhoI)BS>e%#hFo3U?r3@5>k%zC z+J+P_3^UpS+D#_rHj}~>bZn);Fm3r=r1?gQCXN&)U<>#r?|U~|Q4TUCM%20vl&f9a zgRe}%Jy4Y|d-lH>31~jRP_9KcP(M+62uez`A;}vl@Uev;!sNTM07}S}q6;D6X!<_~ ztn;*6{{F1)vm3s;t1sV8!5@ABe*Dy=h~R|Sfsx6TEXnL;#~F< zE|%5Vk4z6<|Kk+jOdb^;7`sydSwMb>jK@2rhNtH&_%U0vkUAi2<22W=?dhf8pMpS4 z;UUip=V~KznolM)t}cJi(Fi*KbE8s@P!_34bBCpka*JyUIOqB1;7d6LO4e%c6d*@! z?$`_cZ$Yrs!NJMHTJLP0UfLJXg~m>2O7OmPYVeIPv->X!>&&`lDc|=7}GWc~9`ewYWxlv)*AOCT+pu;N=v5s~9LayHRWl9To_uGj-dXsGkY?$2b zRP(F4@fObbT~$g2Ry`h5XSV9?0l5-8YnPB$=)^RBEU`CQwZSyOkLq1qs;i(^l6!*gN5y$LWr$%F>?7y8cLEm16;6zNyWzEQ zNeP6PQHa=7FXk6pZ>KSRGH_OKXQAts_)A;^I;WZusMC5lioPupy{9FvOl zWn?tNE?)&|+`R+i-7dg{9xrAtOdYpu=%(&;u{}Twi6(g+Mj2sW3~1ap7oHyl+~ew1 zHITC^gW|^HSo=dvMh?Yb#9{D^``i?k4fz6+R@MRF$nsfzAe(|=u;*L(_`dV3wv_V0 z3BWk`8DG2zG=2J zqaaGy40~%HsP_aR2`~j^MTV;kBQPPjg#R&ec;DVv&tP6xvC(8j->mkyewxSs)qw&e zkkbtvd?oCD!|a5JebH1-E-!%inwnOQG3rk6Vcm{zqW&0jE3}v7{q;uU_co4RF3J6g zx&&B%V6xR7mH=RPnEZJ!0!~-SOFawk?trH#z@Y0r{mINc%*+*n-$z8eoYrPF%af$W z+=v+aVX&J`&8T0kyyZKYCR5BA3Z55>JrGLKEivWsD97SGu>t~a#`Aemt8jKkE5)-I zST40YINE7xJ?YuF?VJ|T?#IVA44$4PXq2&&mkP;L<5CR4u3Xu+xX#tH;;zIRIIb`k z3XXn|@oOrwYN1*M(oakt$hvOkM5&F7C{d`uO7x98O-B-Ve1gNzYrw!#6U!IC6$pLQ1bA}6Scn*2+pckAZb9b$l&zDS?}lps?$kp z*fl`e3S^MPg@l?{HUZrU$Q%JG-!;a{>X{DW3|3OnDFQ#ymZ5G5t(;?0e|z?Q{w8)+g7Bg)Wg1<0UcRre zq%92_z${UV8kjDtI<6@ z`nEMOja`!cwi&2dHm%K8Nw=+LMcXpH@F29_d?m%jUfgg8&W#(!b8-7H$+4NmW#cPQI;}7;-jiQARQ=W9rc$-!?ODZ& z>wz#D!O{v9jao!64N3z_Cw3CgUD1Fekyb#rGOHAK0%<+(@2m%8>S{$yjcmB0Sd3OOje5hnmoDQ;DwCwvf!ZHz`+DkpAKHTD+FXE9>67F|}DMoCEBz=z$GLjAd^<5c9 zG}F?Ed_?5b%{C!7pSVxEt~2&NY!>&bQ8FB_-un9DkOqOxIRHmrZ57h0?^Qa-pb`;v z@x|#;&O|Mgn?)_$kttqRu3qJW!^8l)0L=nsD96HQ(Yj~z7=E&)q}{8eMsdDr5mvH# zDier7?-U-qk*_csoEc_^O8g|7$7U*+*CF|na>B@9&Ap#QG?cmKwdDn6PaC zMQ(U>{xszR=#s#21rC=3A8|b9rB2C5l&v8)Ed%Y4`>IE4rCoVXy;waxa<|8pne<{q znyI}9oE4B^0e2gLB=CFz&EQ5oI?+$X!%5GjgT|y*_pcal+t!S3^)%I_7=p`G= zl6e1ziS^KR)TlT&R|ywo*&0ZIfI8}{Jarlxz#xU!DUe{)sj)QJ9Hotwwj?m$dR*I1 zO{dDy$rXyW(e+x*u@iW+TLS2%>=iQe-2G-ptu$aIdD(B>lZ2~r>)aq_u}Z|1N%ngk z?R?fL=UR$7G;i^6vP+=CtV*k=U-cLax4QTFOov*k>&ay;_A4YeLwhr`b6!=uE8oCQ z#;3(WYOL^qrIfne;@a1SmTC$Uc*oj!r5?<6h=PJ22*w>t!A!c-*H}yQ3gWUcC%K$H z;|y^Y+Y&(rB!e9zJNMj7Ws4+VZ7wh7rc}lPP|~HP3sPB)W}1$ zh-4B`o@Co@iO187S~7stDEVb?nEqo0`^EH?ewDf{0l~VK_w|>52JY?c`b4)#&mKSR zGs9R8lM;Rv*Lny!W%>ENtjj8k?j=m?=%`cq{y6=JnIqMm92V?Q6&)aAaMYPpt#7Q| z!%&XR^g@@rC_wiRjdJRlrprEM;FHO&5j7bt@}d}8u&aQv^&S4-fr+o@OiaHNdL9t` zUE5A}vl)`p-r$aBXA%FSHy>`4Jut#(hL%-Sm`n(Fx>ek-2It*lV`txHqblNiF$vz3 z;d#S_j5iYW8T)NjWvZ!*iFh={0b8x$w=8d&Hm4o+S-D|FZF@RQm18)ZFuW717;^$& zs#O3RzA$8&cd!l5;>nEHwXC~Br9o=i8fbP3`mWIwS{3>d+JADupdibKFW2baU3#MQ zy^|kOZ8nvXQn~EDfWCl!tCaD&a=L5?D^8;#AD|rs3=Zvcg{Af~~!)!NL=BVo~n1wqaPAHQN-dgG|Iqy9|#fNwlK-5imn=6itjkF~h;b zRZ51-Vu|&OLGL8xQxAs%8W9yiF8DUjsBkd`G>E{ISkQ;SmRMjj;11+xG=+WNWJH#xa;wZPDFXoC2zp2jdC z-F;g#?V0=9yJIMzH96VPxHO{8%W#?NUai9rwX!CNFwr_I>sxt-;3Kf>pGX<{eVn^b z!0y?>9=Z~({^Fb=xebu*q>d1MMsrHILi5B$_{qcW5KHU1p4_AQ%6*D;AmF3^FKwQm zyz>dA#MI7G08{;H>8S#oK26~-Lb9Cvy2+oUikyA@1@wg1ddF7sz$Y|=pID+93)`#P z(Ou$h{Q|nD7CzHQJ#fJPg~=9EALGAXjs0c5Uw)K$>kLJDCbU zDF5HQ4*hb7(Cd~Gk7;C#MT zlTPh+?Fz+7~d-1Hh+%3{&0uaBfM(8Mz-h5BTF-cn_w&+2NJ1J=OyJ94k|C z$LmiI~sFHl~M~T%9 zjqLffFb^h-CEerj7C!N_|M{jbAl`bjb_DItq987~QNTNrCz?z3O)RQ6isKpRLY*df zzAMKASj(T>-iQXVJ_l}^`|8O>NH3CEGxVq~hX8B-vf{udaCh3p-Ups*e41f zfM%CN4%syCyEpkiMda*7(V$E@k3bagVlYz96sPvkWgC$=Hy#7`KH(zcZ?FsXeOujLzB zV>DjscOogY2lojVF2tQ8tHN1IQZQnw+1ke1-1BwT6+bLs3MY_ACa*d zwU$*cNuL&^7IK9@ccHUrKiElLcBgQ&SzXJz^V__44g^^>u|3!;mpeY1n74AkNzY7D zzP=H42euO3YIl{=Gu0}p4PmS-pxj!vpRLGb*dV{sXXC}LTnd8qGM&LWAo7Y`sM?DTCuGIdn`+Ksz zex0sSrwS!uPzwiL*oN0!a60Qm|FOrp&kBO-<%=Pgljg^GirES|tDzPSj^Ofc^9Qtq z2j-%gGba1?!&4H_k9nIC6}3oVGL4o+?_aZOPNp$rW%?Y-w3%<&vXYp5D7%UaQ6(iS z&YAogE^>y|+9~%?-8Hsv%4;N;k|045O&|z+_v; zGJAVyhm{;PBNo%!`PIx7{8F97(|VMvx*4u$n)ad{{1@E?EaNeHwN=foK*C)fpnD2U z{wFu))>uI8X>7L0J$oiyNwcT`Ah1|w3M$-#0r`AB08#vdiPN}kD?Gnr{I_I9Eixvj zxx3F05Z?omCPBa)VBAPs88t%t8=ip?D|1%;6K7OhO6SpWR>W7qmGN~Y0ImS2HKB@- z;HJ(qJ-2QBnVyE)Q^1A8kiP4fH#ltrzy>va$R!sMhT9nvIB4g!wd;xU?EL+9mhcNZ;!)SM4veQ?kBG zC~=%`D70rKWfLr>d-Jk;regYs$Sly6$Y6i{#tF(oB{DwDUog(})rYrKzH_nE@|cjE zqSv(~#+#6~Vth%GEf&vKktbpahl|vBZYmU)$Lf^h5Y7e!oa0`&F%i3@;8n>X@_fk| zqsCw*vB|f0OlMWfDd_i1DoU7ln?;-CH^xi(J60T;q0s><{!2^qQRtME>j(tGF`@i- z2;>~E1i4q7QW|w59{~vEwGhG-wQdu<6#<+f-wSuaiel1Y92^`0)PgD}S0QlA-o!yP z5{pZ?f>_3{S1Z%BO(Y|sdiTQah`^1FhP)L_B@#4|mP+@yCD>CMi^^{k*})M9#z^gi zmD5k&9*)1&>flO^*_M-a`1^!*m(OhK zqD@uy^z&g&x=AcD?=pCf0*S$iXBb24?%;GzLsK4r7iC-roX<@`J7zOrfEUyBKQn*; zd58c(mU$*`ItxVg9iiNr|2qybXZUNVDqw~N$K?hd1*!tjLX*~j5x}ADrdFm>ZYM)G zF<^V$(CRT7ptVZ)ibZ_&iJV~(050)27BOz7Ev@%Jf=QckLIH3Gp-w%$Ag2~zFP7zy z?mH`;whZ){Yzq5mzTxZOadWwSxBfnOp!^a_CeDp)_4e?3=4$HwThs4%#OC7TZSuT_ z&ArUKt5u||R@=Y>OCqV5J$+MZvVQA0?YPZjEWOvR=tCeUT#g#?@aPK&O3oYLA)syd zQ$-fU5`Egje*=L!bT8`*XWW{M)}F#+++fX|#hsOi6y>Pxs6vi5X@y;}SG#x@UT~7o z<$=D@2pl+iaMUHooC&r1{G$fJ%BnbD290SqNhe)#ZceM!9uIwK+uqLH-5E7YyVBsG z^hYa5PoeBmUlUAqZ5$&x0L%Lyreb_UZVkOFbJEw}B#d zvV1~vmn>NFvj#^?bBe)x83LO|_h6?u~VO^|9+=A-5KtpjXehR~Y1{v^R# z1Q#H7tZdgoX&e3q7IN#<#4K=bEpoq$l8;lr6XN6}J+4n`sM_UQyf~F%HCxzH*P=`m z2i}p(PuD~6VPzf@T-k~pBjcMV+POvWe8V!dKE8f#KJ&L#4kH_eS?=RoVgiqVcdcwi&^?dgXSIp|X2 zZ=yMEUfsYPgq{59&>Ka;Pl(vuXR~N&Z*qIZG!|VXD0e=0Af#7OF3F|Exg= zejbVs_a(>hdYpzTK)mc4mz^{A#AkNOw5`J>(z)vD2bPMRU5XJ-F2%*mH%dh33QF^_ zVJcB(ShENklePj_tvAS++|NQOvrRCY!vdDpWJyb6DiM6d2Gc$iiz+^IY@_o$qKeZ# z*x1-|rcqgHeoTwXbH)ldr%^BchNR?rbJwqdvE$CR7#}GSh##h7y#v*;@Vc#n8Z33k zt%lqwTkoGsqFD>oVs_w-LkKLMI%u`(eX*Gkmlz;2sFbe>g_lqqN|e*bVkWysVCdzv z*aRg)Lg4ow1f;aIbiaGVG(}z<+=(93^LUw7k_E}dDi83#CuL0dbM`=h`v0=?6SJ;2 z&8r>?z_4cRcUYZa`$VUa)!Vyzh;z;+Dk9D+i+ib}IGLv?!y*o{iOLLJZhk{9^rje~ z(psp88YbKQl2CY{5Q=ve2AjY`ESdIp+5I%i=G4oPCA%uyt2xdIw_eF57P+U>zU zvA2oIqC3ee1iol}-99)wygTdo(jE@}sq3lZf5u{&BL$57qEjpzh3u(Td&$Dc@bLoYpz^iz&Rfu%z6zo;>+*H z-Rc+D2crJHJ^lasI$#jlj#RE&5@B~2^k*8>)l=s?FawHu^4-PHMT=(gBZg)5UXK^_ znAGKi??2oy^Nwxg5WilI4~o-HcptM4V7|321Ao#uF4J6IpjOk_^2m4iUax67=lC%U zJ8fKLhbOU>=yZTGTe?WeXUB-U%V1b*q8=^`>)v7VDiLup6@qr;K6(l=6eru$_?e0a znTC|+D#x4_Jb9W2svyS{40WwtGMPtbAl5|@*Dpb>JR0u~99K-jAYAS9lwreh z2K>UWPIGy8)GDOtfBej?{q7qKM|?9`s8yuz+l^J;7gv6J3h;X*joA5*$#uF7Ot&|t zu|GQ;|Mi*uM}Km9en#RhOOn_>9x2!+ie1o7o5Mve4g}ll@n4m3&N3_7_K_6Cl%^=T zxeVr|FLt$f*W*@osE8yLJNotZV}d}7XN8GC}!O(hxb>9OTKM4q3@_bN4Gq0ZX| z9cx&|iN1d&Elk){X|2$Sx>(?VN*t5c#O62^%BK~4^?fSl8k{*XUv&~ym4RGkFw1{i zbSi}=0~)RwKpbT*ye93q@(PhFf8Q9WD1pOt{t|vs+<^2I*E6vdP%qv(_{w(y3ar1# zu3@@W$l_PFE2LM5w+kzulJZ9lmJNoucV1#k?hT!4xKZ8_wCiK!RGrPx*UTo>y0BTTRe*zmoVwS-NZ3hE|?6}NXO^fvYD##vx zRR_3Nzm_)P%jUTy1QR23#`YFvWCYrxtE&s<2J$Je#R+fUL%!434od75AHX5pU@#5+ zUX*8zc+Z>|CZ@680gzceiE;*v5$W0AM^I&p4HPJ>WKN3Qi$}c|o;jUJB|aW7f+_y6 z+1>pZ*SS1Bx_v*ax*Ot*Nmkr0a??-lLFBEZ!0(9&sB_VNS-c#yx)xTggabnt*<~gZF=_y=8uF2$OHK`7XZdKW>F@SwL>z~G_oQo9 z-|7$FJ3;kE-3(ckQU3CwxA%Lq>{H@QJeAo@VFKP{S2r7?Q^3uICHV|ik(^ZL$a(7+6L*sNfnN zW&*ZbC}XqG5o`2!y!L%V>OW5FCkBoyqo8PX8MS-5ycv4T)*JHPajlxuz0V;S zIp!2RcB#DAn73KhYVqbzXM8*e#OXL4Ka)||=P^VG*+TYQfRH zw7rz<7TV9paxN29`@WoxO7^zESo~y_neXRZ08$M%SU;q2UId6iU#ur1vz@S6AY_1-d1^3=rROj>db zFclGRDITznX~@WZ0nKiQ^VBZ~9a!(zihhFjn#Zm)X%MY5u3))dHGuvTrOP|9jP}zg zbZTIbte@b5p65r2+co7>SipGNRau@PYV*`<|x7A|^;zW{cCe_XK;wc);X`pFkkf{%7F8nE`dz8VR?% z3p)m5R%3>6%-%$~c(CfUSK!^z!6Aw^1KSwb;n`g_Y$ZCGby*>1t7On6|Eht54Iy%M zzjO1KlmN97o-muZ8;vY8Wd3`ZAYYsANVco0AGIPtC-#Ik)`o1eR zB5e%?+}JX|7~dCY{}kx6z=Q2yK%Woy27Wtq^Yz$!)}R})@`@i=Tnx zYu9p(w=v7}X%~vWfc$Wj-^O-9Oq>(9gPLlmwwmeYBn`yHKY?}^ma{KeYlU95mS?)Q z&*E}U%JpS?>#HBQuEQeGWirZ^gsLf=D?TOOXRJfZC-BEb7HWI zTL#Z9KHP@cOV*FwlXUaZ8z{n?ya%7lBr2nR*|GC9&C*kMH0U%|n3v;ls&!AB!qzWE zITsS4t-lgHGfb9<7E{_Xa&n^)?Xce{Oj`Abl)O2Wm%iSS<|)=C_~I*xrS$XKzh+tD zhdVivKbpBE+yV0k+(L(2`odA0#A2eq*}xD44%@%CTo;uczSjIvs23vSbSvxqs>I ze0ot49-;J#L|q&dUUF9`^(on(#{iRCgLotS*Da5Ty!6l5nF0NEt)7(tWnLwvoSZ7- zmpncOUA9Ehc#M5gMp;TaJF|HRzijJ|30^Z6Ki71qqV}*0V znPtXeEM<~cyK~0^h1O3rDyRrM-4%R$eiBzv9GpQl{8-Tx7nkRrh%K{b#d&TLSR1$F zUe#{tVAactCW#0Q%yHYHgJ3jwIn0nR4C^5H$K!RYLGPo}0E&!S)n1bW^Gt(BvFb~j zlRy!{+2L+&+O*3Xccr9K)>@_N;#zE8BU$%+J=U>I&{R#nRLj7WO`k&KeN>SiRou1a z0B?oKg~yf0LUkQVVEQBax;$i9=M09A9qB8M5pKu|S~wi93ru=H(~6>BZZefl!;NFx z63tvze4{w>#c(5B{-RhLm_ZW%#{`x_Rvgrek5(E8=?`z66WkPbxYe6n_u}7 zYD%iCAZEqc-#R#in>_!MbXvcHx^buw3`C$_=T*q^ZqRJRHwa7qE|qRmUDOk|aW^_~ z#JEwVTM}KIJxt>sZ*H@|uM=IGyP|-tbOgG&GqMi(0h{6xRfd~fVouVpv>ta(8$->? z&ClIhs8!>ES;XABffac-?8DCjsg{IL5pjq9f=RU+P}Ee+nLsDl%L!3ab#xCXZLRw0 z{}5_bCRH|^86L2}{<4TGs`1{WpJPVBlHz;R*R8!4R;L9vl zb5@hN07lrcBYPk?B{Gr$?09k(qX?Acz@ffJ>VOmV?;TIh>a@o|UPo}|EYLIxa|q`Ilki}(= zH~i0Vf!LgHKV#?39tMFxSFlgqH%H2YEay+>2hqtR@Hr)!jTGaSVn27KmZ3vRhz%{A zC%x$02OeYe0TzA#U=Nb}E$lsBz=qUixX-XpwEh7+VEU)3wxT`CoP}Y?8dY z?;CvU{Rsd37IaThk6x4B?AZh{+lK9Xb|#cWm;PI~&u;fg@1SrbK1Lt;X((K({xqlz zz^Al=vs!*jXZ}Bx(f=PRqbUsmbmGc66qSKb%zae9q%cl>K3Ixe z=%EI~tx?U*ucP98sGx2j)xv;E5B(2>YbCa z3DL6=5P06v@s~4&?CKfs)tF8A>Hw4dz20EXYe|cpai4NSB>x>|VQ6}PS|}xPHdP@} z!PMOHSEsQenc@Jk7!&nm1!qQDdD}oD*kLgoLk+C-TwQT5fQ~fC@70z)22368ie!C* zuC?|akI1~q^M)`#GMJpVV2qCFo%?Jn*#_aYOrI(2&K@4?X~R-n%c(^xyXL`c7>YX} zay?%|12ge5@yg$;xFuTbJjQdgwUr@g_~W1j9jTDhGm_bFf!}qXHHP_&Kq)X7vsR#C?;8OcRDNIkrxILvU`l_K|fmBc5rqwsj% zCGIf0_ZC@`-S?N&e)BoYxYJu=ahzX3sgI*d9eq~lZpgyHn6QiF2LpN5QhSH;UPXu} zqlljr>di$0Q9iXIE11Z5L=#u)N+3$+NRbL%G{2qiAx1w@EW)0IB5EDVGz}bV`8h0f z+4w?$y+VLW*Rknkmdo89gWT)law)d*?h|p0xkW<1S%&29Wfyv^dBH9&QIbu`jdw_EbW&|8 zd+e>&4j4Zg?X{}2{2=ftLJW7kPQX-d5@v~7(0gZ0ha*XsFzurmGqsl{$pOkET9>zalCFjzDhw62%a7U=(KD+&+$oS zR@P>L$w$(6b@TIMV_&7`acZJxF~{>~#9QEhUp)Z%Y!*b-AOCVjp1B<@i^rD#nZtJY z@-MTa$lC51B{&e7_6clvd=#kZ`V~V{uj-gz69pi<;_`pcgOIOj$$^?dxqz__B~1DU zLkK9!U43{?u_)j8uaae7jTc}tMa@)^LES^{mQ(9f0R7oOFHLRYaHP^acpRlMk)MQo zE)mH^Cjc`au)Ms!KYbv)xR$9S@_EE0b_|#(8;e7F=hTl1ORfG4xlp}|4Z0jvlI8q( z9nhl`zbAHEwQcz39Oc|cz%8xQCP?k3kgEk#b(7bOmthCdC>~FT*Vn%HP}pRrv(R^y z*0)#zR0GzW(>t*TUJGzwU)>GL?m6TNQi0-C!`^)Yfdb{ib5d$ntvO#=cSv1eS?1m1 zZkS}JOnux_a-3Qz!;{j*>v=>SKmA)X`u~);dG~LjKkj`@P1=f(82vAxgWz9CQuUJ> z@KebyKOi7-I_Mk(KQh-M7FHnr@2DgHGX;%l(lhC~94ieOndaw40G@QP+3b5N*|_qt zi^dmnK7D6Ah~1azTRUCLB_PBr1F9jlg?tW|otc6xD4BueSQ6q z&jmpK`LzL`Y-1X!iCelJO3ElbW=8KROnzIS$KP+bkl*Xdr>V?HIqOfr<6juxzps>!=YS!!9uKVG3cJ$EiU0dlSPbKR@e z^A5C0)Ma-OpP?w;DIm#kzM!W9ZE^c!TAxD95*v0=+`akC@nr12 zHq2QH_eeax^zmA%3&%;?x&QHB7u*{8b9Cn;%C2S$)D3SEyQa+yUC5WMULoRsfx{yZ zv1Jks1@jWs14d??-P%H|EE-u`zpYjnq_m5OJqvmOa(+3zw%zIR!`9P=!0^ZCe*76A zr#<@f{kMhOnb*!eXz*K#xZeENr}dxwdG7p#sa!2={?M}UL06-WdelV4c)LvJM;`}c zFCTnzl9GEHIwh6E3mx4xJOnuIZcN4%sW!9w`oidD(~R^?+Vud3keW6=*-}+DScrhX zLtG2rTDBukZ)WKeV=an$ec?6njM?&=XLV3D^$rr7>mVe1hT^>BD$-@G(d%C-9QztM zB#~Nv$@r|eG5vonjx-%tcB72-Kd8T{0R!Ra-zDkaztu=0D6Wd)uQw3*4DT+=Q@=05 zpf^|}=GqeIZ;;l=_UVvTU9uoo(#STTzh&L z>FHA`_aY)<1fO->kNEoeesjDHxDdC^$vP#5M=Wn*VcI6!#9%h4_NealmW=oSl;7mQ zH!m0~3b_h*4Ciacmf2nJTn+|j_kOhe6F#y%N~O#a*^G23(02*9Pp~7#A4mcMjU7l$ zE!BiWaiaWO&az@W?;P*c2Q;OE6{3{d1MSdmmuj*z}(O2?xQ^M{SpIg}@F3xyCg_+ie=6w>9KL6SRfYF21rE(0*wHxv{>-2;Pbib zZ#wyuY;kp3@potO3%|0D`?`UP4WAS}EEe=KL~W}ecC;*Gjp)M)H10e3S;doiG(TI` z=~%DvJQ-}D-(w!HYk7TEyEY*d{eH*j&Onj--anOHX*91W*;y4`t-4aOB$-k-vBWuP zs%`w+PBFgNMYwo@Oi>Pyi2C&&Gsl}bOxuSH+8|RFQqb!jc?GPjmt>$YqQ1R_z|_r= z4P*?)ZCxoQeZbChz9NM%o4Q6+sY~3APgKq)jj}fL*sq=7WF-Z&<~&?Wqs{HtlwA!c5H+va2cy(FEk91_X@X$Zahk~~ z*$2eWA+ps@%a8|?XkNYDB$+iy_3--N_Z>zE&g-i;``o+Q|6{rp)DyQ&(TX4&M_?bk z#jDCb!@zXA;con{7+uT#i7Ob@tau%&AnFG)){L4@r8kDcnuN9&81Yc#_73q2h}zs* z8N<<}DJrK)Ibyrwn;}s18wgbA`+fW`Zt{C=#r#*-3OQA#k53WVbDe?1WTjPj=dx8) z^Hoz>1rML6|JMEYWd0B;f4H#`8GFq2=j@1efAUfDhTED$n90J{xiam)N22y`f+d$Q85zCIQ_ zh*6_Ceu^0(;P=_Y9-aooYxB80+BoQUefzaTfxdmKk(&CEiuQl!1Rh=#y!T{Z zwe)AJZ4m1p00EQY%eVgpGz8&*esXyMG-N;777KYr3)%uvG}qYbi?}u3eN*T=2%()4 zefwmopQ)2~ge0R)Hlr`BbK=Y=_7oX+O7`1IggaewmwD!ERNQofHzSR!;@|jcGdKQt z(+D`F?8yh|82VPGfRELDqO-Zv4!_pcQnzh&l5a!By!o9dhXiwaM}CI~v05IM^e7i5`I_WQ?Chr&)*Yl@z-a$}k zlBK0O?Xevjb=akl1>3D*@4PpS)Jd3gsTV9JOT_MN`*rcZ-<^LIiNhTi%-x?W{v_Uy z>*X{F`djb*-;Mu+|FEGZwriMJrE zaAVcj9RnD+gn}v9F9ab;p3ycDl?08+kM-D>L1~E=bp-tC}`y8tb=d zXv?t7e%{!!pFeVNK=bk3*AtpwnFsWZ!T|oTGE1%8Rkd~l255kDSvy=f6Vu)*khKbh zX>^?}vhLY6Mi`+1c|~F;$~R;H$h@{m=_CNdxDJ*@Yg5x`^1NkNO&slah@_=YFif3| zO-Pmf9u~V);L19*-cG{@Re}Z0h6}|AkR&$MY!tOyp&0UJFuCejuq$-T*0@ABlPpO{P)62sU`Q_zuUAqjl^#nVy z0nw;yw?V&0nh5JnWA+6~>3GDJCb9xrMXjx3R8awk& zpCyu4rr-B5j1rB>83N`-vTavd=NC{gAYK8OFCd`k@Jy^688-x=<7fMU154Se(N92t z(AhHkGoV(Pjg-rxFBKyYJJQnRta#ZCQN$GNsQ+Vv4N;mx! zxArl%%;%AL`BWP9QE4ZArD>%vdRQezC7bkJWcNVa_z1zpkPXI$u-K<(HX1|UnZF;* zt}X60@h;j;F+kbsPor!rt$5eg1%x%~;q2D;%3Us%`R=Ke3C0s=)P*1r$h|AxLvQbZ zA(wx;^{6oN&lVPzs*caGI9txF8Lc~<69ms4FBAg?^DJ(603JY&jBDfF2+Ylh4?L|A zfAxm{gz7>?(7%!83#irMC4xQbWGi?+_|zO%#UIq{(X<~j8{7|=`ht-Y$ogT;tIJKZ zJC^*H!}H0DZdUQy$kS-w`8}8Banx}joZ3v=6EAD^>t~r(-o=}>wEGboCX|m?f-E0S zg)zs;%p=CGt$|fm@8tAsBrx{@IV~(K?_+G=cV7_?$(24858dVX0%{WUYo6c+E++J_ z5ZFWHVJI=@)C));`2uRwe31GD#MXRt`3q?BN5IVmh;W#{fR=Jj>P{xUfVOgu%1%WO zUo`LIzJNxpPxr%*o`>$o18QVI{7G5`Y)6^E9v7h-kRc)9T*y?xz%bb9|(bs|7G_Og1&oeE|vl0USU){&>=Igd2Kqc=3Qx6pwkTssF@r_BkhEm(KjM^Fp1K8*5>eXQNS{lY( zX%7`L97weQZpcJ!k|Rt#Cdh7TMo&p6N_zeO03rW#X6HWugcLZG>A~m}FK-t)w-GU} zz1wj*62-bOw%hITCIrrkUjr3aEHeUiyR*9ckRpNNEWH^u*nB=7<)I$ia9h^lr8zj; zrat7)^6=#uz*?x&6Fw_TJz)1TKWVNG9S*9UQ8@MN;%_k5jH2vy?1=n78fj9G;3tmV zA$P3jMb_B+KUW8Ds^2?2kkK5d{N+Hu=2Sa=_Z09Snr=@1Idaf>)K4&{_3v7B(r^1tdv&H#iWl3~4*BYD6Snwj%IzYaZV zDsH(~cq@SAxy#F|3WgCzbCW?d6IHlZ)P|w^5w*vYNBhLSi%ZXf=+;qWsDgp#s&1=o09tqgK3y^F#r%>5<^@rd+6vm9mY=TV5!fA6 z;PP=(q342Gv@GU*D{oEpVzGWt;&@~}1%OD%yZ@@pyfS&HYCf<4)Uhz{JJw36L+0+u z&R3el3aSae)73=g)niknH`7ovU@nBcQ{Mpr4u{LFZ=n zEJ9qK2HR=;U|rbs&1N88Q{kOP1^;(tHitn5k1g76(Q7>E9@!>3riBgmSotg z|C*@7HF`FkT%~=?3LcIft%k4+1Hu$(sX=v<58@4)*L&AXs&+Hl&e17R_&A?fOFf?o znzO}Hz;dvi*GCIcyCAlk-B;pc-&b6I&R^h|Z$8=??Y`W~ZUOPl`n+^}sWZk!PE4S} zNvC`wlaA#qlN(WgHLjXzVLjmDyfl~C@+7C=(+)JT8{+<;j>y-^IVR|3MXD|X0_(l_ zNCL`u7c@4`B>|OJDDay91ymKHx}|n*xor`-j{??#e0yf=&zqdkEOrTh%2%0`RT5XJlh*SUJsL-p#NO=!^!p$t=8*T%Chl>}O4eg5{l{#7riDxxL%23gUz-ZzRzWF$Yw@Z)N^z%5BNsBe6- z)SK4f3=aKn8xy38zS9i>-maGpc%}Y&g5Pej&s)9LV=#Fs8%Y~+v5Kp| z3}F4o)lPb_L{P@q%uR-alzj-iFIy0rcJtqckH(_18X!V4S+l320#BWYAT)0cci5if{1hW`|jtuop=f#E64g_CAx)>Z&~Ko2KQF4R9lcXr8U&Wj6bt-BCq%#f?8 zgv&vGE@}=i9Aj0w7&h$LhhvwkI6#fAkpRfI2OnS zhp90;GY|xj6pm!guFQUSXIBR9?EKnjs#Ve9cV~7etMVEM2>PxR4AZB6|2JQDKuy_2 zFq=Y<98~j8fR<5PXeAIR6goJe2LQEIY7{4Yi%Fmg$^W)}Ubfg;w#O^aq*2v7b6x}( zmuCPMO!y5TCG2Hjlyr%h)?pO&e~eusvHZNS8KM&&C?NgDSHkG2oJQbCat*^u$ieD@ z&hKowx%=YG6<=u%k+Ifg9_NmID%^!(fd)$?h+iI?Q>RP1<7|}f2eXsRY4yP-Q-P?c z>v>(eEEPfgUr@1xwvnmX4{L-F{L_9_h${fEM@zOnZ%QQUKT_4ygfPO<>OcKb83%l4 zU_-!w)p^iHJhj^rBc6dJq#@?n&}cv*&o%}iXOJ^8-7z;cHzJ}y=7tR8MgAPYq`Cd< z%fIhw3f6rxF3Rd#(koE)g+bFQ-n0u`V;6SyiBTOXK}e(d1U9*<;$Ax)?%Irus9DS& z)dml(Ij)e*rXXU&Xlo{Sd!gCIc?&du?EYsYD@9I_al(L{?F4ytq1gpS|86V?zMe5Y zXwU%Taosh|YFGZT>cikotz;+Opa45VL5{+*%CVxbr4yF9y)d{q8>J|u@Pg|Zg*_B7CEn_uq;=?nn{uk((k$JIjR_uHq6;`pr~Yx!hU;D{}Q9kOnANIyvz(a0n^5^ObHOYH8%;F=vK@Fo>O>>|2S2G49zJ-w! z%=96WzZ!m~{&qh&TF_Y}AUDPoly!15I~P)by$($GMu#r+)dZR^Nw>r&0pe8@Z-_C8aU(0X z*G5G6F{)RpX7LExk6F28Ru)FgMC7JMY+hsA?lQyxTjQcF%jj>spW7i@+5(P3DLJd5 z^DZbDD8tlho+rT@`Oqa_77&I0_iMS`%NNW=NpeYOzthq;NA~ot@1%Tu9>kR&ggvd? z5M-8{HBRAj%EgVK?vHw5AorJjGcrue-Yo7w}?u_D^Owv7Do;e^>M{#c5lCw4M zY%+-6@nh-Iw7gox547Yzb z1qcPE0D4*a)<^)X)wh0A{3nS4IIR#I1Xy!}otJclt~qBS!}>vN$`~>siMfV zwA61VefDr_)@}Ar8{d-ddDof!b~ZsoYVgl-H8HB5kazoIKEc zPRLg7n#-vH6ulBViqabBj>l`)V8?1+FEDqp-PC3ti?+sWamaY~JIBj7Pho7g48v3M z)huqDT8q^;aR3yir!(l+kENbh@p#0rI@F_@)c@#_?#Dv}ex&V7L4H0e61~VQtJ)Gz{11 z%nh~et2R1C;Q76dlB6FZ3JJ`+n&$U)IWjX7V3oR1r<;nXj34ldw4MFhK|unkEiQHo z8223<&j*}x!_nz>0WM^xhFauo8O8c zUiGbzrMT#>>N_*q7-dF8gu~kwYlNjmwV^tdZruji7eUw=a)O~n!Mj1h{YOIOI3~x- z9zBmVA15$n_|{*55OhNilzyH}EDMC9=o@>&pleY&_aE-xn_RDDwR_h;JNNAY|98p% zKX$10{R8a3YRRWLQ@j(a_739@x!dXEVEUHMQeUFtkNd*kzfR8)A>=PGomcuT;Xq>2^}&>yaHkkF-M?g`E+k}bye&0q|fN9I;R)4N3|pTlrVDfCaMfc(k zyDHQ&IHy%wi-J_^+Cfa;9zZ35@+Wv%u-XtHK)jec5yMC)?HY0 zLC>wssI&Q!&+tjQQDU+`22^d~T&F88SD7I4xD-N3GFIhxGX1RrDR%z$?xWM`H-aM@ z7Ai6yYySEN^|FJ_3Khv!7&6ecVJ`T(P+j+n)MOp|GEdlz$+eY*Pf&gJvE)hXnDVlq zl4^ber?E72B>j?mkT}y+kgQ+hNLuVdiB6cl&MiNC$Ly|DBLXR7e(rW#zv3!GclOwN z0K=5n+qR%~msNe?2UIoJ?EE)@ZnnUzrtN5uCWy-*-Fuu^0Gxy}K#0b3K&*Dj_%;~p zUHuQ60P+eLfw{*?foh-QZhiQIAC<&ZJ+~%Wm@ik@!~vH0VR2OYN~{3YRfrmU)~gWx z#x04^;qp$O4VC$*au2_642u|EnqCZP&P`%ipLF^XN4TXX< z@A$;ZF7HX}{9YsW3a8(nS_62Evicq)1vZCr4tCxoHy4?YC3Og5eJn%!*bGoNdirVdnQ!tOb);neH?X_*62(uGFwH zX{jrzqd^7|DlgyBBpjE6D{zFPiCF|0s8)ct_y?v&S9H#=cy7=)p@ zQ5)xeA*&~|2pnX76Fm0)(Y+@*KnB#juGoCX6 z1hP2NgH$ z78e=w@ze>^J+Ln5sov6Oc?+1w_ zohP0!?-C&;z`TXYNC!9%)ro9bXe?v4*i-jA_@@SHrP-&JLY(Tw+!~Nt1K%HI znt#np<)uf&=GA_kV3W9Y%Aw~ota8s>5yd5i=qesM8=PIafLWhS;4x$|Itv5zUt`C$ zj5@w)K?CHw9r}aU1NDyLT$}wvL2tOjU4xQyE=|+TtrY(axIYYkz_k{%ZA1e6Q}m6@ zEZKCv9SqBAbbc&w8gu`{acAUs$nMcV{^uXGP6e|q19wgg_EY-vsuXqW`v=#+xb7>> zmF_zh`^RrOztR}4=!;FbAcp@kTFzaMTgUy&EzP{r>rVGm%)^unDd1#r#B^5Eyq@9@Pl7(t z=+S~`DC4o9>8~`p3g*x!*k8borhA`?4xzqVoo7JjTx+oOr^^v|mqze>vwLsmctylY zfCZkiee4I1*}|9_`eNGog?kG6gq>@JbdszF>D)RV3d2x0@G!~3V{HFVDd&pgHOP8K2u@&u|AecjcDv*aIPgt?x>8UEG2{^^Q;3?wX}{O)*0CW@QXO4pG!qE5w-P z89d_f!u%66vf8XmalF*a)IY#^T>kWK99u3iLH5-36&np~j!9P*OV({y*)tLqLB9tFm7^JrruaJAgxTbBnjzgNJDG0p=;?z3 zmRH7Jo%e;xNAGG`-fpJ&7#4@|w5;hr8gtp};QTT!sam(_h&Ns0$wco`2&7(1M7UgSJVGrBJh8YTY*~N(9K;bVR&Jy<~B>`uhd`QPR#%xGo-DhxcE6T38`fgfb+`KU6Xj#E*QAra@ln%K z0xRc4WjL!=B@;~S1I*vXtA1)M9lhG|%e=UFMweM9Y{yq?HxD5n_gsk+q;btFj+E4e zi7>KCuTfX$IyV3o@U0{GN2^ek4$NCzS4Adm^S)aMDowa8ej_LWSRhOn)OhtOYP^=> zQ-IL7q&%TUeC-0ym+xs_r}Yl}_gpu5Bekf)5tae!NPvjKpLb_x83gAcaYZV}R*i zM)4(dIk92_IFj(#ISQGnS5+*Uq~FZ|cpKN$=~e*TXPWC~mWlu=k5qk*1K9Hl zLLlcZKQ0XNTo%vjH69x<$_m1Z5!i3FmmrAeN*}%}lgHSTjJbI!+X1~49j|vo=oP59 z1yRd?IWsdS2`qE_y~W^#pON1&PmkcujwDNvS1fA#4Q- zX9I>hJ%C@&f!bqW(0A*uop#@@7f|nW#;Q>;qM8CN{b}n^UvFvyKZjh}pWjA~VzV8}+zg zV>XgF1AqD6c|UD=8+y^3mcMpDu>3?%24HY97sn3@0WU%CJld_P)tDb)sB+k~BaBkN1vnhjk$ zHVqnnorG67rWId)w=YCt`>7R406dB=sls6Mxyf@_ySAEo!R^xM^17b;v)!j>g9b6{ zcE5>lD-5oGy9MBd<<6A)%!NPe2UCzSsxlda^C$P#&hK)sk7dYG{Mu@9JJ}tkH_qX( z9v9yqT3Ft9bDaO-wGzdghIH{CN1$N`wd6lL0tX!Ye6;l3)B!$)p@bFt>125uPtLKi zT=exXQ2(*2zfjKcm3|YGYx}!ViGoP7naO?=Dyqc!s)pcm-CK$wiZH=9Po{t)TlCAk zJfb0O#*00JJK^Kf(PO<%UhGiGnOj7= z(%PRzg?4A|;onn#OWN?jJGH)>!hMo9zl>cD6zVRY#fO*9yfTsq;%lW`xTQo1%!?#oxGcrY}DlO}O&2eNEeK7r+swzf&7>nkI5x$%7@(bX&x&M}EUMVvu$NH(_+BB@V63$n)8blUq3^d_SUD3Kz zw>gr*Hen4Mw*=f)??0~AM;2%}SdOdSD&IOu zJo%XTDOz)HV#>|>RzW%2;l*T8KHO7uR;;#`$&cD#{*l7CoKP!z3TS2OCg0v9n&cSdS?WR(dGzv>$8nH8Q}Js4SRsZ{VB%d{Ffi#~k+ z?G|3XvDQ}TjLN9C@9XawFxz6ve-9km)C4EveY*%{m`cw6qv1ED-hm2*C9118ic<0C z#_RjE6Yfd?JU^+;yTVVN!QnECau|9*TcMQ2UtDRhk76Zb=v^B-MgTy2nY?5 zGnMFIfyaMSM583WMC-&SwpIDTgE%uZe5s0%D#+0+2m^|RUm5;h+u>KzfI%;y76U!W zH+n2}@Xqg&;qC}Oy2Vxqxsupcv%BR-UA0pagg|O1u!i88Xau2})F4DxjsSmdyXV$> znR|k7t%i;jL#A)kKt~HlC^@hz?F4&FkvH3(y83-Ba*uc~%WEb#c=!EAvaeb4+9GR= zvo0nhzQZJaX*N@aAH^Yt#=k8{ZE=M%G?%7SSlI7K3Aio(j^<4^eK}aV`qx*1Crk%sRzH zkEPg>cpO@ybhxxQvzroiBHybwh?t!EG?ef3CIVBo^OykLPnI3^{uHZTRxsl@abggA z+#;@iqA1SpI+A+oT?QNaJcA3LIJFiU_tVS3DWdFwDrT?01N0#v)%+tJsMML^Vdxix zXq}5t-jyAMm!d{kO#bET0mCDvKL=Dw)j++6<&X$%gk@P)C>#wNWT%Hrt#-#)zWwB_aEqY{C8bi74 zTxn>!>Nikq3NcFp@kt##f>$PC>PE%3EPO=b1tX$V!33nRM)dV~TmPZjqM*iDiQeim zE8WRxqM;XAx*5u3On}SQ9y*~|({J#xIoS5FcMMjLCAvg*0|%h64o^k~nJF0(1CEX_ z=8rJ)>ssk;^*ru_;_1z|3l0ZAFHE0ql^n=gb`4GLy(l{Q*s^h=d$=tfej2fD19;_` zp$3;X8G2hz=1n)f>N9d`g>@)bm1CD;|EB|my+R_D*W@M>l8(ntrkuh&%8tYILy7cL z^|^}RGr7xwJL%c+b05#IGy#LhnWb~{YqZd#7qQcas9?&6re#y#4$JM*hj%$8`Z`oo z6xtnE4B3;?6)Hs%eYT+bofW#m@P~#E5_tYp(Jq(9S(jdqJ%lZ}Ruf+IPL$NixfNk7 zdK?cP5>#3EM87PhqEkO&UDKT!+?jPn)gMf{oW3qHZZog6&+rrcXfHd|;>=cXm0tE! z+rQR4{57JS|Es$vWq_SCckek=4alke?tvd&un!g(Y#%}|O`+m?Wmn>2;)>&7=n}6R zyS+LI%AvJbwOyw%l`)(>K97sevA|xe;X=c;(JIE#0>K&x;|FnunfAW0!fBt$MOy#Z z<}S&DeA)Y_!FY?WG*y(znS;rb-&4Z;Mx@8REz)C;#dkQ|GET0$Gwcr+)JxQYPcI38ts*?poN0-NUWA@C*1$kd-=uXtx z3%=4kFl`K#axCq-{gp<^Tr_Od&wVp^?ex#kwRi6LIie)@zQdBtxb)$@37(A#NT{Rw z!RBJn=_nyAVLoi<%w9-d2w8Hm@GDJ&vTpNYec#m$?wEAvEe++K6wS%q@zLWP@X=Ms zO;}~8)RaQhPuY1*>mIxD5!_yVkrRGq<|T*~4iA|;O0i6ajXIZZoov|d6_?{0mWp?y znDG|q>5#NIU!O9czd19E^YLV_nk}+~Ng@;cI%MnFc4{lLOclS>+OY;Y=XUyMTrJ<; z;;->q^^Cb%lwMY2APIq9Q)6lxE^yrSDNbUgt0loQ{HG+fwDw34D3XDpLA>cb#kwuz z{XetN3$lD!H+4vC%m~yH^ogNs-JWC`2&VL;O_Z<@jR-TlLO0S=&NaE!gp9~1bzE-P z!fZ=2UmaB`Mm;F0kld;%3Cgp^j9leCOj3Y`p}O8H>F*(ciS8qAJru%y`=I)Kd-jIq zI{M6#{s!bUN1EmUIw5$<{9A}dHh+Ep+-Giqpr){og_nzZSZyqAld-uH9sAl8*GU)tZF%Uf0wOPnZ= z;>}pP6gj+(xQOA(vp0Hi6KLvn2oqMJ`Kb6&XuqplX14UDICra*M)rumN z9t->do-Uz!RM%IUQ~f=lD0%Vd?pK#R0__F#}1J*0* zw~D)}=UOI(#NrfSHfeUi)Ug`Z+P>zNqZm)JJ_V~ z=mGJZFU1_XEZ)o91r~&p^goj2108Xu00&8_+ZyD zd1gq5`N*AFql~Gv=Gn|Q9;)JIdvNtMB~3=Hj2s6sT&1aT85sKkKP6zy{M<8A0~kRn zK27Yuz5D-dby24R94M&)B>&wEdbA4!KDbKg@Ceo_N~Ezr3>nlU$_XWj6gI7ICl#Vw zZ+LUQ8qZT5$*|8V&Wl_u>w17@t0T?j`64F{GKNFi+ej1r3}Ip~Il7g2$9D7BlhlT? zeQTy(OcP{NH6%+l(MTcCPx6h%mDCa@A<6dMV&^of)+egFBwwZ9yowux#?(3!eaaMdE8xXQg-ZA}Rsf`@kQ3KW3wEl!}tSR#$XnHe2{KT^CQAIEuB*CYpIhTiY}S^9v@f|z^P8`KDKjq zpI@v!fKrs_K>ueU_O~Bj{S7>nR+E1&9+Z6H1H)Cn%g^v1&#wLU!vj&se4B&kTI`9+ zhOhRy>fZoC)_w9GY7s*2xkC><-7rNzsrZM`o=(}*>~j(R{8eG4>;wa`ArH=wx5Jih z{q(bMNA1LY&oda=CKrTv#C?qKg*u7+nB@JSd>E@IaBp}n*4?J=>WVKjk^L(AF_&}+lSCy<%SE`&$50VxMF}TE#QRU* z;)iz11Dvk>wAgjF44dHWI8sa>NAu6Jz0OVNsBJB1jPXpId+onL`h}{s`m48dBKwt& z&$i5mrn??HCOc;*%noLT8(*8q>}nB3uh{R=vQ~1EZn-u;Eik+xP%fwHwc;cTb(YIn zgUNAgc)y;+WF(O`{e&8|TQ>&lCY^$UM?`wHw6ArwA~|lj(r>qQ+dSOZZk8Oz3Y^mI zRm^tkcp4eBb^EG0S{s29eYCjy2qjU_)IA&|pN;bXrTNvc>~7Fm*?bPO)r7rvZw(hc zc7E;0RERLK;K_CB>vL;B-u*WIi~Lrt-3M3sZ?*^s4Y`Ro-b0Y?+w^7y`rJ;G-3GhG`eUMM&)PP4-Q8;@A=`f)A>=?i?o@V+ud}MAeQ$^`I0iP*@x#| zL-Wrf{rO@Nepe86dD}%-qTV8;CuY{h6NglFj)O6?II&$fXCB46jzt#Cii?SfJ#TDo zi;Ns)WNe^&!pL~7<;<%j>X@9SSn6>2%Y0C`Q+}|wP@;k07b_n+RYP?;qYw$Hi;Yq4 zZnBhZnE0x;sxb_jU`4w03>PYKDM})fsQco$*>Y5Jvvbbgtv}XYn53%wVWbg%YBS1K zxs=!4Zz!ewtUS0nEg;~V9pc>*eT8VfG^uXt1kaj^_jxm+tI~n)J${QiwR?mVkyhllncB9-9isZ*dUGZ3K&U5Uqez9MZI%01h`)DbHPtO)ztk z(A-ed}p(pdKJ4IT=U69_}%IVGP`aT#>{#10f8A>T$ombs}2}JItv%fxI4+> zYtl5YVrtAbbydOGb1Wjs)Q1G0l`2G@7DXV(R$gTI?9o*%PB27GT@BBDG;QD{XnIp^ z+W_rvty)qVCxvaJNS4M2$Y;qi&0;r}14=*!P$0#7J(_fg)e3RZ+Iy#Njqu2oXP_%f zE>klURkMhdR~6&d4yvf!;pm+{ygVy}`%%33)6c@MTd80frZ8ogijg!?3Yv+hL(`=ayA1g)<1~anQ z#}>KnK=HJZx2N{V-Rya~J%{!W^XD{shTCo13j>1u<1?hg67wXyctNw(F=kGTT27qH z*4IlD5WTzWcY!xi&sYHEo&*e@#6%&K7krxju8I(1eO=GUhfT|O*4=524S6wYAw6rD z%cd%szVc?oy(dtqIYjBVNzcDcb^rIJ*Z(%v{jVQ45Hi|+Xgk{Z*nT7ZB-bFTOG4zx zxp&-!1QDv5pvcaJK|E?lw~yrNt@Zs(mI5CG?s~@y#n%-YttWXFJ8GLZe(1|FpAQWC z6z2DpCQ(@RE6trV%e8AdhnE45{Txk*U)JJRn%_?*PajpX9KXMe-6AS4D1#QO z{c7%4n$#Z;P!=BPEqd z3rR^5K3@75p5UT>Z>ZDeljS=!x{Up9<2EcseK8=gv1(-TafKjP?u4{?r|2&`bj7V@ zWA)eCB~5j13-)$QiQkVH^H50JMmMU7xkKL|&JID^sTG&EjkY`GxU6 zi;Uu(v+AbVnHee*bw5p$4WM0>EH19qroSdhx$JI8(3gDvuhMRuZ&|v;{CFUmQLO*y zNDgjBW+i^vQ;c?=LB{X772-5?e^idE)GyKN4&6d7E{+b=SvF2+Mi& z*m@h3_Tqj{FEM0p)aHg7aE#<3(^9kIU#FF+Ig47D0Dte>=?;}*PveHWd4l?D`vu=` zYWU{YTMg4svJskky(9&TN&TO+_wlYyZ4mL#hK<`Z!EVk8P6GVn=I4!D*LYthOwixO zhUR2-nAW?6ew<}6Q`v+u$iO(DjEq&+XvM5+D-SoxHnho0=?*x?XLF-`vWr~;{PYO} z0BvZj8Rc68zI>T|87%U4E0n--FxEuex7|^En%NhvnN`j-mpB9_q~J}es<`XN`&4c5 z&y8=&Ey&iViyDt1SN6D8O%I)U*XVQK-YrU_Wd63B_cl7dAf##}q+}7tTqjvEINd#z zJt*=5a#5gWg1+CY_5&y1PUu*6JTFlV&8IWrl(&vyY6(-YP#SD^hdVV#RXkUZnQNwf zrZFM3oA#nQhc)T2y_!h3-jc!@?jjKuQLrk9Im;>8AOyMF#-~>9%=PCE9HG|L9=q;R zgiT;g&$7tiF-AM00tH+nF_nyJxE_py27A62p~fX=51XcQb}U<3pdl*D#%@1TCg7c+ zmh0M_ROuLUYtgth0$tAt_s0x#Wpp2iGY$m>PI}?Y7d}^yU>30O!(`t?(%@mC;Nzuh zMAQc;{ zB>qgLIsbXq_$=40Xxji-6SM1nSNO&&!yB^KFzWWW0W-8N*9^!KWa^&PAA0%v%_4ax z#!&c;UHyeFd*R`{*gArlu1<d4@Y3QM zvNi!`UtC(4j4ptUVBy2ONgeIzT9kuVji9VhNpZ4;Y)z2VVs7e8K?K4PIiqqX#+T$( zQ)8Hows*kEn`vd#FQe=~1{P`}px;<(G5UadertsiIt9F*RPyh-X7$DB^R@s9c~3<~ z_21doR2W9@{|&>47Oh-?$mD_GR`5-e@4(lGHuC}h3W%qFo*sXRuMYd{nC?uxb%?!G zMMfn%t)#0fqTk}zl+}7{P+lkj?Y89^OzgXi$7H8HmLGg0CoWSI-_Gd!y%K4w@Mk8&vVae8c{6#!TS!Fdu4LqJr2 zr18z-5Ge40)(rg|zHv(maCG^pY$q_YPN^mYv5@(@0I}@%nwf~zFWal~8ucNyIeYAU zVwMm<(mE#V<#-=0H4J__WvV@2v-~I1S>EE5w<*3@x&!o;8Y3bR}q>W_5 z;f3Iu$@is!q)@uTbD=NOCjdF=|EyppkmOXL)hz!UDKamKHQM{wi)+R@36w2A1#bEv z*>14LXiwV7YXzpgz*hDiNP=@hepxHfytw}d+1;b$57(f7&O4UoIF!!0XE4RJE=k*h zEPRsjZ7l*8w}0hkFdfbHgg{Tya<>rDr{#R6@5oa&tM}nbzM@C~viGi>@w}HT55g># z6%}IxWTXeel~-h8-7q=VftEM72lsR&drw2><1Qepk1h_hNkNR zPlM|0YiSxYcL3+PJMbmI;$gIY%{ZUF4Xz_A+1INAV<@NT3KJ)dpB=`zJHOiWRlKY= z_;|yl87i1j$zfQc3zzZyj7`sb=~Ri?Tj`c^2IhsVsau>o){`J~==}ZnzGPh|xr-zn zg5Tn%EAebxYwXdqCYs0s?B)o84=dc)P0Qc+Z1eT1<;BK+k{IzkN-8XCczHf@!{5Wx z!?hvYbK7+pPpp?uB`DU+`y16YEAIQ%wc4S|q078Fim<+d?C^NnkaN<%8|%Vg;=dL= z&Bi|M0HH<+TH8)Ow(bI(MPFc_l<%9kGCN=&reCj-lF~x_psCPqhp%}zz}cKAFn#?;P#sGc zR1P1AK(tvKbM2vt6fg8r3wN`|+M?_2< z!}Gg4X!|H15TZqKhMPV`{6{#38!<#`C<9DWU@+1dogj9;G%=ZcOOBLZmP_AG z7Y7kL`ztx;30mIRag?x||CbC;7LlnTFh^-|o3yf9({^A_E-zDp^A=H7VtFoX8nuGv zASA(&_XP@Ewty2IBOv;pz5omW96<6XHQr1(rA{yX=M`!u8Zie?ZJQd`!L<&_LV$md zTI7$7SR`*8dXXq6d&fVuGc}$_>b_Jp{koE7mEF| zv9GVz8|GH|c0270_VtF_kQ_Ng7I+MDMF~N+Dqd*Gm66Cw<^W^zv}DiXdDS)>osn(F z(#z;r)-#L5(++P|Ev{?4jkvIggRtblMlbeUrNwe`~fKCfFT3N*MuVzI$e zmOe$X(#HEdk`(8XT1C1-; z@B98|Q@kHHe_@>6slS|cx;Y`c^+@3l@N=Zi*7Xi~_Pq}Nd>r;Z!-!Rx=Io=8yE?h| z*|y&I*8g$%{#@}RfO|Mhs>mFVs%F81_TBEs_dVSfuK&BRTIh;!A{H_vW)`L(31(pr!hibS?|oa5$OH z46W|bwTTWPb=(RoZ2g-irmgBm&~Zh&?)VXOes1ZTlw52~_tOFv={F<;g8d*!bI+>p z+Yt({DX`ng5?Gbq?j)s)jMV6yg;Su+y85201=fJq{6Xx>2bL1EGB}1MkMM}UzbrBJF#c@`CN;x3&lv-R3n zOSESt1>FwchC|$&?BGyOq`u#c_e-=AS@zu);o})B)~PK8<`zC&QNH790&`E zc*4e)+o>yC6gAFFv7w!Jno2{Y^4)5iW|bbN?8D$f)6D5Si89Nk9Dd)@dvBwy`3+>f zLNwkKWfku6nY*^EuoR&pd-LFmk)G9h%1#6uSY#^!j&nKu(@`vA^7s^EW}^nxM5hja zFki09<>M0BjEeh!V;${qSj(c=XSoe{kl_jl>(JaJ+94pZONMw7BUpX#x#u{nO&>B znD#~D2QY;N+c{2|iBBDdcqL9<_B?0d^u3p4eI8Qld=oEyQSx9FlQRc0j;~q_#!;gE zR2IIfqB<^;Yq(I+e-%EitFTNUrWV|pjULn>5~FWt)4`{js)Rzos>-SNaq5@Oe&xm* zZdLs}ES((qL0U3uGXpZa$0ptoi~T7#XzYA-mr!48oLEx7b&4R7M^1Ye8;>By3{jee_VR{SPP(n&%TD$$-lqE2Zbc}Agx|B^4^Ue`(V(RSfwDYCFMqJ&` z7GjnbvDs@<_6nJI%zpcFB>jhtLzID)I)WmJmJlDfdvRrC?a9SzSVhg zh@87UvEs*x_2a8@5;M!nbCAnfz*V|szr*YP^4g#$1(RXR8Cly`%<=*{cT16yGb&U5ZUWHE-5aedd}D(BoA^4{VM92GK( z<3o{IVS+EmL!V>6c$DP)CO~ZA&4#Z*QOs^JZ4jxoYLWs}be&sKB+n@^iJ>wm1$5oiEp1yHm@5#1C=Y>7{C{5UsLjpI#ejb^6LWtWFr#v~wefQ?j-O*BcsVynJksGSk)>sp~FV`&PJT9PlaD3NQXS%1O=Pa#GL6+e5n7=>>#8 z{*sJse$B=p1_JH01?rr> zFp1(3a({P|s#lO5Mcp8zt&75aUrsHd$P_(TlA2pPe&`g5C@K{<8_Pu$WxXS-rd7J( z9b60{Mu0f*p3t9@_h@jquM{Qv*?5#Du~XP-{%)QU_6sFYO- zOUJMdvY%xt8uXd;Y~BjioA!wrwRwOS>$mf9%3`S*H6z}1@_~I&dj+~8U(tIx0V@vL zxcZ3%-%P}^rgQb0*qds&cWJ*9Qc<_*JC~MYxRe=J6O`&SbwD zcyr^UVJ#YZxKf7w*3jug%xJn>WbF8BOs%6EPhsF_5n!~qr6n?_=NKZt9Qyw5@szce zD|uDZ;lmnph*_s`U;N@vF$?N_bD(lWF16o_Pil%Mfgz8_sU)>5i!_ z`7->w&QIF|+e4>NOn?O}y8LGW{zbD^LoT8oCQ#TY2_40r!&XV5^93K{^v$P8?e!t} z1_!6KWN8zo&<_9j^rzJc?@>je4oL&a)P)_h)Y02+T`<3AWJ{0jca}ExKr?xt@SdWb zqIyQ=SDH611ueDqlCf@4`0S;`J)_Xm%KMg1l~q+F9cgVW-T4P^wJ^T~<22xsi<YMX@dQf5 zsvS8__(&JkYe{r5`#@@?J(;!zO@(#wDO**wXxCEjqNenWc`%3}6 zvCQ&3KeuqX6UW@`RyLt93$4a5ULz-$Z!i#ry+Wc`Ke?l47rF=fLot4#lnN?p2s)*X z;c;pJ^sBaiwAbJoQC*9nfMGgvCJhQR`oc=193Oah0;Y7D706kG{ktj@n zY87A!L6{V@dAay;JZVp`Wf@hCXLfhJg&*Sw>)TEFBu+V(8OtGs5=&{dl>9kg`D|~| zlp1)D!9x0cr(uN(7^@Xdp$lVqJxsC5$=Tt_>4;>B@ML5*o5^DntU;5G4!d=@6eg)# zA1QT%mjs)UnTM}->^ap329TZ6PH2!A!J55Np``+4(DG1GmcB^{dEer`H<7=??dgKk z#64@_sCcM6Hp03rm#j)_E>W6Nw2)_nm={=483{45*Ewp@>sUNqK($Qb+*wm2kRiI- z0M5PB!r@@+`G#N#Fs*JbS8*j3Ys|PdPhrxIFN{<}awR<@&HtIqP3DHUoK&3?Ba>HG-C2%`JX1*yQWf%0?2he(04bl~M%hy`c9 z=CG(d_<49FY{|lOzsCP7&8JrFdO#(JwLLv?CtYWCXzdSvxTyMJbFi$i!r||nR7&1f8&1wSTCH4sVm1eyiid|4f&MJXZ;khc#ZA?AP zt4={_ofGYLqFLY?kQJ=7d;gDpR)c_3KXs=auDShYUAGB&ZH$P_`01qj}vD{!0C@`yo18RHiZQ(lq{wB@<_G zYA6JCASd~@%F!mqix4WUt6)ExrH7RTLUxkR^?Zkry>L8}Y6& z8wZR;hmoC$h;065j2HuBgTXb|4~}b|~Z#pAQx|D!4nNf<=h#`a)7>R%c2t5%Hup)#uA~iG( z5a0<3QbHu5jMBRhLJx@aE>Z=-`#5LL`@8F`^RD%tb?;g0-u(w*t*pR)cJ`Be_xIB> zU7!$AhI4V(5;4j-+2=!#nZCoG<@|(wZ5pj`cFU53Cwb4QRe3RuYWWEjbefrwf`NI6#+3x0R*WS?_ zKWF49(njtSp3_-HSZc1Yw&8=8`e_B}?9Cd=IkiLAE{(m589Duiojn24wslfryY#TJ zzMi=Em0JT{Lag@og|f=h+sSmr<^zP7r+mdJN^N`2k?(`zf&@oAh&{*j%G>0t%$_!! zMKH>PBXgoQbw^0`a3*uD*xJx})&g$qz*g@rRKe7fGXPgIyCno3)N6T}g;6N|am{Ky zGJJwG;4GsiND6uVk*!{lt=iOP;n#>#ukwp?6EmZM+paC`hZ;5SiU~Ay9^<9oRNU$DBMv#buRD_&iOg*jyA&SwHc;G6 zdSu4vIcOWSnh@{UXG?P!YZ4H0RjAq?^pJ%z9-xysoa_(1{%gw~<%1uWU1$O024A_v z(mYTSugUr5Fp(14p*)2RElAIuxg;ONn{c0ti~g1CL`Swn#MF?bl44hCm@=v;wIf!P z0xv8{yI4}ACPb7cJ1vEeoYYiq8or?7X^n_G2rQ?6I?$b?a!9V6FR4}x+0^th%j)z) z3C0xt^l-HR%XV!vdXmKW5O!}NSM63Z9~|L#%;xT-4XdY?%EPa2qWD+Km2H6+UicIhVI{u4bW9wfor; zuhT#L`~GX*$3L598h3==Ip{<)803OH*>;(L7Piiv@~k=8dPldw(14)&#n3%0Z4^do zCN5ht`J(mF!48m!oSlaU)y-`3fTHo3U#5toAUWZO!zBDH3h#~*ne+(ntI))SDOr5% zg1{HZ02m7uHa$pd{tr#U{?vWEuEFWR)(*c~k?c2Q<7Kh3h!A_fO%jG_Ld39nh>uP zgwd-*YqO^(vSxI;3(}|xv^3J7x}1@Jm3*~87kpMGW33oZ@iER*i?8yp$ z5WO9MO9YZX6_s@9tz1iDREUhQz19O)@Xq~iXSCQeZcmGeiCHLc(=AqhU6nnRytC&) ze=)CyUX($W-(@p-VR?TH_HTa<`@*z35>#EQxigs0!G$Wq1!mMkQx<55Gny9xpP}5E zUXyp;#_`En2`OpqaAAZI;J*nIW`FTadl473nT-SeMhHGPc9~suJWd_Y10xU%E+TH> zIZr#Xzi{tR5bxXXd(4M`96T;4ZLn@IknS`UU*3i(%`d$Tg?mYbOB)9B#J7*zcn#>| zi#y7oxmsy)1|puzqMqbMUmImiMv)&vmadp3CsBjZ9;zBF8| zZ#_33@e1t{QnopKV&`f07w17z(LtC)#liQU6I18FZ~RMeHp;1v{DNEez!MD!=rPkY@b(*-l0p-4G+)3)pqFD=FDzYgWme7fBA7 z5$-hkUbpjA&cpvC@R8dg)N8bUaF&^hA=d;~#k0vTM)v>s9XEIlI$(}&+L}6x4_Si< zNJu1a56py`y1xt!s;VPRT7k`K@-+wW*v_@g5gI^Rai(lF_ML)Is?6U-iwiQ~h3ant zPf0zis%wj^u&E*?MUaTv^HhFyqK+s-r-+nC%}+85jV^d-FX`ufd~Av3_-=1mv;{R$ zk44%v)07`3L``6O=w46xx-MCes^cH()I=4Y?5VI2`uWd=S@^xgJW^>Z8<_YoQP$ep zu&q_OIY-c-eOV!?V7R>Bjp|7Pis>I z#K(+{)aca<@J#7+HObqh%xSdEyIJreiIFRTNt*q!!Md!qeE~G+14@ev!v?O`%^M%a z{zBmYt`eNVQB5cQ-To!5A9%cLkQ6LB9>yt!143&$1P<3C^e6?rc$Bz-?VS<$t%cFA(QSr`R#Z%#|@5f!& zj9ye*sXFXttFPeDUp`SEeP7-GYvVRsFpS+dPPIYFiyu&a-ALW9OMc3|((f0>u}WP! zc4k+nrd@eO*r)xCf3+!}E#4v*vFPK=S`K(SoHN0lluBF*jESDXKg7uz^0+!4 zu1%Ecne1eD(@Eo^YbJ>FnK~YH847C*hkeiqd^jQnIC((*2;3vU2tge4XM3A-_U-b1 z_<)TKyx&yX(y&iY?{{!HG*5;8n8~pDEz!55meM`Gc8oDzIf%26SuE2pF^fIM({OXk zczk$px7w*}I{hBLdEfoQ1?IDr@4o!{@3_Xts9N7`Qs|GyP6hGsaZdcmxn{g|RIFE5 zV!O?&Q_X^?Ru?-D4UsG1t~goi471J-uT%6Qk|8MP@v6Zix?Scd)!6Z^>4IIi&E z#7^YrtNkt;TvwRgn_szJ9D>5Kiw?1ezn>Tk8PDB3&jHodr>rA8x%Sp{S%;7I>%Y82 zt9-tH{$E(&$6mfZw*I6)=TfLq>BzHU9oBiRQd)H z<`F2Fhs2W^Y+Bhq9Hg&2$o;lj|GjbKGh$RCPo|u+-uaMYdiT}DO3Hv2n^Y&*Z|$^# z(DA%OJE(lt8LU1HiAjTX~O%|7g*DN>lmXaC3Rljmf;b4r)D z4+3>8RgQEH>vk;4{R1-G`m2!>sA+W7H2?p&C*)n5UAg}aI=k*2GcmjzI`fyP?AbTH zS&N|OuEwyQ!sWr!nR)cpD{|3xj4*>oJ1scon2x?OO*80lt7>^(*Vkm%a=2_nf6zt$ zLy*!5^On!cS zEz(e8Bs{^>Sp{>ws`Xj^R$3NuN3qK*t7ORyzrg1-OZ4&fF*-cS!W22&b7Lsh65eVL zXFD9+U2fs_v7_;JJO(qr4L@3Ru9vxg9-Bk=vzm+us5V03d|7%MQ&*dRL_6=9=M)$k z8**T?f6)~5zy#^!`_6Q~Z>CxxUZ@Y!i|>HoS<=RKMB87CB2Z@s0<7YoL+0O6R>Rxx zK+Lhs>^4)^ykeVG;R*7Q|B9KHH3(e!ChStNSbgBOKRxbct-Z(|DeR+8^A5RmNe0Bl2eT?N*I?| zOMCdF+(b&v^mK2XoNHKKs+{Posd!ufZM_wYW=lkY5(8@ifwS+7ATXpVl8?`@Syi=4 zM)slW-46~6s;ZZvr{dE8Io%^D%&`$|h;*(!WOd47cN7OyEnpNho+)}sOVi(c@_u_K zXLD3v$;i!;q*m>?|Ysu!xk^ z1mmM;2E+{WO}L#&3{bZe+7iv{v2ZM`o`BD6+>5chI0uvFy0sI3CAo{RT5+dep8GbT z{<``oe7x?qYn@%0ca@ea-PWXcyEUO6K5{$s6Vc>UN(dhs<7>ULl6P&vb$iuazCopA zVMnP~c#ca{O9EOx)V z!5iuP!qB{59Ad;s*%+PU{d-n@0i%-)P15$xLMlLqM>k{>+Ph#bYU8>xEBHk!Gst^A zd5md+a)~AR`gD(1$3N`QO_Rw~+#$1+6 zD4r%=cE8sx@>VrPhMqQf?@cj6!)T&Xu*OsR?#2 zg_BZcx)kh1p*PCSDCpKAoEC94adS0JO9(nf(JU45mGfFjyY$xAnd;6QbXU#N-jN4D zJED-}z&p*Z>A&kcS~Gz~A@xDjNh;PvTD3fV)#R+=lybD%(H^cN`@#qKrR0f{^e%jo z`I9wC=BT$V`Uh(y#Ajg5k*99L^d}j%y*zqRxVbRArKxC9#8MVo56msahMcAmR>!D! z<>;6UDx zW-V)LdV5E}!1NLYF9nlqI);GSRps40A9Ppc#djHB5PEf6OM8@cl|mEm(7eb(%ETe( zP&(`P+V#(-8Hc4qMW~u#f%1Re@JGkQb%V|zCvg;(H<}uQUe`05JeM~YemOrWZO!W} zP(bmit9aRKQ`qX8m{-Ldg;>+@(@4rC6MN-9isq+?`cRt+7j?UC$E;L@w~@1fs-?}w zLQ6tdQFW-M)u=z;+npVhWj55FhAwLE;_E4W$D*Iyggwz`51kQJA3A|t{K_?(X>&z+ z!}k$FZF+1XWe?G8@8a?D(_G|twM+0_wHG3L=e+LzbGlqYL6bAn*37gf&UuE=(I-gM z;XQiVlza`h2$;`%qJYGP#e8?8K!j@|sg4zTAE^pvZ( zG{duq!l2-7dS~~bK@`Ai>Ym^pk4f`N5?o%Jce7Yhf<_Iv==oEt|Ng9WWSwiZK56pz zHvi0~p7X8V2(*0>rpj@KP!$$D!U`l7dCBIzcJx}vkGhl7e=k7Qp$LyZF?Ij%EBnq! zS{fb>hw$(~jC_223^kI2g8pcO!4Ytn%ljU zP5k=5Ci!3a@AU)Hp7EhlfjA-`*E!7-TPtGUMRYN@GrEsm*9 ztL2?=sODWLD*Yig*#gch+@<?>?uHp3ns`IcBqN5ia~8JK+gf#cBn$!pyr` zQo4urU4~cork|TBySI(ISepf!sVZaJ7Qe{=Kg@dLA3wi7=biJ?%F|f7d)OfSN-tRO z-mV_~#mdv@7f>ts9OTO#Z0=wDhksF+t|wTnX(J#K!}=>AgpJ~=?CL#1)$lzd}9 zs;|whdO*r2o3B8Uxc`2H;%pq$3)?jdBvS``vq4lMH!hqkon9TKi)!W&v;01^G=5y4 z9J)I;RxW@{G^Ghyc4T^a02+Q#l`m>tv^o?B++Y&z$ml(hDB0=7V+wi-6l8 zx)%IukZsX4pXXRs+(t*2Ixxy^*HYf)B-%7K;m}=9S$0XAI5JbVK#ZEqpE z2YpTW10{Ep55%3tyh$Mh!9zKozgTb}9Jy@Og$E1umXvRH?#0_Tl29r9Q6QoTf{{{v zf9d2x|FVbu=G3%&kd*w7xE9i1aV;RWs|jMB{o8-}XU0xAY5KSX09gK+c>gmmmv3tM~Ic~lMmST*lx>|amwq1DE9U7Y@_gj6|jur(eNDcwb$zcJv_X` zXx%o3a6JwR!K&nuRi~nNJj)7Z6cM2-?XNF_U)BInNaNL&^zLpQDR%7|Yg+WSj{#PK zjZ}GC)GuG%3Zvb5@*JmhhE*$GR;QExaq098B-NVMWj=1{m+DBe4jaB8MM+qQq!gvf z=)-S4=!_9U{rKL)bblvjK_wERwAItdvc;*YV`2{RO}v834#O2<*kU4qq_<@azXvhD zRY|!ro>n;0Iy%Y^s|rD9)1X*)k1{cdSv%;jM?XMRpzA9L*4Ut>TB`&se$K9IPOwJp z3f{)VDC8vpZw6?e8_XBjG?%x8%F7HD#+O>zzFb|cTSq(Q)r3gQ_sQdf^$j4HgX)}< z0LR}34A$WhxYb3?;V#Y~Va?~dW+)zNgKBAcVu-)A#8KUyel2c>mhOFVxz}h=oj&j8 zW1ySgquY=Hvy3F!hGXpnp{-OV=adxu`Kr&}pduu>y@ve#GJIy_X~d3fSS95PM=xq; zo`x#)_|%Uism2iG)4|TSh4k!@s%r!ZqwqMgep^g+~SIK zMMA-5;hDa-!7UD(eNpD_Sxd)@e-|9iS#YOM>O4qwtyF8>QJ9cQPF1KqraL|d000e+ zNUq%CG>rMo*NaeQdV{h}AQra-X3I0ewhaAl2_WfCjkkuwL&C2aBo}6+)Q7UglM!&$ zX~Tw@fU!~a9%|pjMh#Q8%eO+Nt<>JqASs(KlE00($R4Gs5%!HDN>+Er8e}Ce9wHLcl2D*#KID22SAUE@eo$I z7je@)lDsW1yrya7egCf;i;FArI)28OCeR%~gcRWcFo2MkR~rt$ZqaD0{-^TEw^Ko) z@xwv-0Zs$`l{!pvjBl$N0sT6Z8Ksi=8-HTc$KlKi!IkXfKiF*u7ty?PBkd_shPX$n zR%*f2h13Z%$DM=0m*eZ1*?AkwLqJAz><$UlGK=C#f!VMGSFDCIgN%YEr_fP89EqQT zvJIMNB~Pz>ktd%M_1q(b`8ViiyuB-;198LN4<0W)^rT(;u%@{S2~@W1`s`A0&n;y; z^t>|eqry1N<&pf6+?TWTe_j>W5uDXIXCtxdd&u7XmwuPw0|s*)ArWfiKVRnJJl>0! zQcN;c+=We7$^Ryx^?&R7bfn~I-}t0KAcq<9UDANT* z0?@{yeS!^B6)|&{}QV8L`?CE>=oT_D&<~6^ng;Cq2eWsaQ+)}C#udH@@N0xQys4zoIDViXd? zPTDZgsU&pS#`%Uu-TDL2PxYRn7OP)ryj&-Y#BJ(1taz^WE;R*To!a6X zG=iF&D|5<& zXu^GKrhepjPVZ*}VU437@M}+oibjf>5%xjl_HyM4WJb8dK-I9RRN!xLo1q3zkgV$Y zWNK}*&Hv=a{hF|+w~qe!^FJ5tNZH!Es89A2Pr>LNt1n2KBe-|}eCogScp`_M!pXJN zDqyu|ov^-hZt(4A1^;`%SgXi0SD`{x;n9>F(w%8^ldjzW{AO$HF{kVCp^}+mQ>`4~ zpuAcMeqU+b1}5%;CvYKiTcO!|Rtc4;HUlaFrVC-(j{a=l;hyKq z12M@Vg`nnJzW>Z8AUN(D+;x+K&@XBT<{nz01P9naqumE~onOzQ?%)w>IwL3dZr+u6 zE3^C&IGL5Ak!UlL5VLMB>$<17;1aB3&)Ey6j4}CV=cD7AZL1R{Qma+tSWez^)&{*C zl6IP;xQqgRzVH-!#MsxQvn#F>_k0eWsB06@BK4_pC1HT?N}zWR5Hb8*6Xy)eC=adS z^w}4PhkzV5_Cd}B)l`Ex>zzHb#s3eb#J3{PG6AdtUnO~pe^X23{X;16w_p@>i~wyS z6vHMTr6I&Yb}0Tm*?9Nij2y423r zSbfe*>76aVkvBCT#GBfItQr+Jf45zGo3~H$JUxxX9_^@He)_BZosLZ{=Jo=~RaR2K zr)6;!;nY1YM<`2Bl({tYHnYizO__Da9#ovYaN)|McBSXH(qz&Szx5#hqMiMv>X1`P zF4O>Zcx50r%{v5i?lJ&!-A;l|n;TA>7U|OLxI8|+7G1W(CC;PAK`;iP{vy2)xlP`1%v1vfEE509l%|wYqnR?fi)ZJ ze{PH(Z#!4fVzDisc)?ZnA%7EdL^=InV8*r1k?bQxhJ+)jE9loOa}&R+u_D=|)T*ee zBy2ev%`i*PYLg6=OrHhheXdn~)|-V{w>$JqEBA&;MsT#0#)w4#ex7;(@FEp8%aZ_i z85!A=G0npA3Z@ySu9^)&k_av?y#O7Jwfr-^O=s;y-JN0nKurxL0g*E$h3r zj^_nqhp09^Zg6T_(1TtCTKr?@07E|RAxDihlI9x8{?uhq47vbILrQPmFE!ww%@Dwpa@_`0pSFQO5jhr?*lu#_(Llgn`R zIdAt%cyz8VW)tSrq%j!!x=gDF2lOj~Ht)9=TC_oJkYp4?64UKa{9z^z<=u1%_&i|y z^Iyxu#f3eS>WTHGh#abt9B^($^)tgh?Fv5BDE1~^00 z1mri4&kXgAjdv}*qf6of9a%ML3mx;Hbz#W%!l=8&#OO)tOz5CslsL3* zg5>dW_;iKwklcmP+WyT;*M6#Q?@sziE~rb|!ldHnwCY4j8jehax{8OmYqLX4t^oQn z$yEkjZDVMJLsMTz&)pK)G3i^L|IDpsTh-|Ulv07a9`hzrhSz$dAfflU{u7nKUuz|J zZt_us@ua0+3od2+!W&5Kz-JRE(9=l5hWFMUoMLm_MXo`{P;e7T_h z=8Y?v$g%MEzPulFFCX<8`ulHk>6nD@aDtwO>GSm0j%`ixzUvsES3xG1+QIrIlE z6b1$JYdOLl%07(*D26UUkA;^rl%BsElY3bd<0$20!*k`H>)k6}Jtui4<^7!cPDOdW z2;sb4abd33z;0BhdasF*GAaAxt=ZpunkFtrshH|nb5`&a6ZtA2tt5{i$Mw%F*VBMs z>ZDSQfnfaPLQtNWoO|OYosTdaJQ$8+jLMQ5D+_-j=NO_K8k17>Vz0F36%Z6866Ep- zj5`yGrG+mG$>c(w2FJox3Nb7hvroz_U)_pnc)H-6I)o}8nh&*x1~UfCrBc#5$rr1Q zj;W28FH<+H_214p%n#87QqBfI;^xa<&c6VUt@s3$dvZY@u~ths3I3i5lNfZ)FVZa+ zIXm{FzTv@x=ISE^yPu0bM$0YA$t&+(ae4wxHt=djP_$TP|7qqet!|R&y|hzpo|E=>$gYN?W$R|G z=vz5IJnFpvkM~Dy>&@Du@%nYY!QGD*Kk0OeUG+nExMAzOW`fJj$j6WT4m`V^&~Pik zC@NRFzcV#QM8vbQ+;ByQRPe6vnqraIeJvy>(O66Coh5`*iu+=m#@&}K-{M)ejA5O< z>M%lwAgzO11H|w~{PY^K45JGY-d$(2)%<9|GjsHcIK~zD6TcE zf8{ESPzJb|^M60_Uwb%Vx40d@(9C?lqp!zSbbWfU8RIB@UEZN=U}Tk7&G_cHBJLtQ zr7EmR)NDF|c%tm!L~X)?@lM&G)$AH|19P0|b$15_36ujIhU0ZTsWIsU3Wpdrn!5N_ zmH0Zn`9jfKz!SJ)v1+1FG&84^MO2_&v4AQWWpZNnG2T_Z|?BwFet`HV5U%?B32{eP_&qVqbzD73|*~_V^Bh0)-XrQQ44iuyz9r zyLwQ$07_mpE&uB2J^9}q~DO=Z)O1#B-+aR zgi^xCXLPiwE0SV65Nj9%$euC}o#x;*l=ze$PdQ@_kO`kSywf}Ty5y+c3TbxK zp>A{6lB9`{R}(&4w#-^;l@fjry~y4AvY3>@+sYL0A7L9=85!v`6f82_>4n4WO7Etu zfwXj@0v*c^8lxlC-@qX((B5n$M;!Ge8Z*pvow4<0!G%1tWMnR-=X9EXO#IE_+cFsB z8wKP!;ODfhMf;85aQIMkbQS>Q%eYF4yce09WWQysBoEpJcZ4hlm^({Y%wt~LCXFvl z@URp!V#WKlYCepr-!#W6wuIgFd7g(>Rz~Z*jLRQtuZswNzU1+|FW__h+xb#;ph_?I&`-oW?#iJex0i@-L-zLi;4y)_PGU$ZSd#ak5{*np546u0>V z(?w6M-P=l$Ot_3n5Jug;(b#OlZ(UZ_BJNKUZJk0{i#DqkeFImbpI%x7zL~sa5N7=| zvMbc})YV(m+QF{MV?TrP8{m}+pZTEjw)S&2a>rTgcDZpUm1?>=Jp0^nDk>*aGNUV% zSTFVFz3Y}Qq{8@|DYUl;j?a53;jlN{9OI$#T0(9jqwiugJ-J^YgHY1k zO6*2sJBLY{))Cl|Xgmhx%e{h%p9jA;&O^{TmIjz?+Grv$10gI{Tu{}Xy&u?6n0YXtGxwxTg*7OE*f zHo$+2Yp~~Ykt&CzAVg5!6?vkqatph2tTj8!ND{?CtsmwL4G5sS2MjxfEf#tkqtsQ^ zEpf9M>3CQ7SdGqJ3_7DaU>40;8S9wGhOnN5EKb#xK412nE% zGmJJg_tzbzaq|!z26VQ?QYBb;%c3j6Y3=8SPPp&$bMVMZZd&yo*f-TeVri={Wj`#-J?*ZPee z?2tE-3S`2zf^n559}Az-gnMqG!n^KTFDS3lG)wkjb71Bw@Su8U&zlu0cp1SAX28EhS!nKXF;@4{tKnmeBLF}6QoEhrU-x`){yPt9Orn?H^U#0)_6NU>BXb9Hbcc8P zpEg~TIXfXpNXr%sM7^*k*ygo&t%>z}hWiNm`)&Av(0(UcgqH zSD^J}$wur`;+tCP^q^iI0;gZ)mdU};PNyJTzH6bF29KXGqaeLc_N=_wd5%UNj zx}3>><`OvH@80Q2hYQuN(|4;;p~W>Rr58D8gGHuaRGU6?8-P}iumH^vLGHqv3(h$% zFMN=Mi6Hv**fZu4B>YfZj`n;pwd?k)N=?jaeD&;VX6&6P6=wI+o~Ij2Nxf<0^XP9F z?-WE<(mq^Vk>NH>GO}AJ)y5$2+KEgGY?XJYwTSywyo8=sH9kl9Yw=L9m_lwO>n?C- zV&hhgvx2k?A`v#`)|c(oBuOOpAIR1WRtZ_c5|M!CT+~eB7^xt3H6@ky+;M4G%!!gf z(g{sJikRciHQA3mBdk^PNT&5;W)x$!!^9e{pBCbCvCIi`m&QZTg1l=3hB)d}MD2No z34d?7TQzyP&&B`%aU}((B4Nn4$w=M22}iJDH{P*qYzGGVubG;dn4V67z=Ppxnxdg- z^j(L~g+)0YdBV*XszETx2>#vBJ?U@(o)xVY21=A!mt8Nz5yF~SS_WdZBk4-?yS3I; zd6Oy0b^~W|1%Z`)9RW{CgUC-8QDju;Pd7)Feo2vy)3jY}U<^E1^(5{ltTAbxRD*(0 z*b`Qru9eoUD5G#>F!Z-U#Yev|;ceE~wt(3FPE!R8e8a{Tem%PUdQrUL*nFJ<1{F{pwhRu9`Hc+>-BrE4^as#^P z;l)*+SAx|{VX$b+(WoDP{gQ(b-Afg9hNVeMA?<|m>KwIPxiZbaC!;1u@->-8yO zoa3Sgi8LLpYUIxt+;xJ>PSu>pvPVGK(<^cru~no9(h5M#Oa-R#3kg9!B=<|H>OG#r z9L=?u?oj~0TlPMuzqxJIyqNdkD;M%q%NdhK5F7_PtjgWuuwM*)Gt77t^wSIf&!{LtE{EW|yl+W!{35 z0rBf^#%!Sa_8)Cf#cx=*FxcsAl%poJNOjx1(<>BPFKS@VtI3)@`w<9QR%cMk`@ zRQdAoxxD|^tsJ5olHDG3G<3<;sCbC*Tk_35ic+4n>QmToMW&%C&Gi92xS;sqitm%| zfbyhC9;70ANBddXDncZTxSADuZ9Wmw!||TeHKW!AcV-_3cT(D>a)r7{K7+cd)$U}R z><@T=fI1ORzN3SwFcx84s^u+bU=b4ImzUvu5>Fx_bB~U%Gs3NrfxS!FPaXZEbnHEg ziUB!)@ztDcAaR~OtdbH}HpsY`T}I8THicYEpE1MA(_(v-6ZckwJZrEkGq}D1)K#|o z1$n@=q}dkzeho81!t};{Z0gWp8CswMiedoOs=U_(l7o9q94oz|t(>r5rv7+GzBxp= zvZpfFMgyo{buU;L_qji)NR86DS1(*M)p27xD6PPFzd%9*UZx+9P`mE5eZ9w+nC)|*=E{bp`bO2puyHv(W~fqKTXEcq>W*P|1c#@n zL!ZU@tm^E(p1AZqLzH_d8>wG6YJ%`9B|rP6fcUbIK&6sAB9F_5qOqnLeYzf!4s7_v ziB7xrVX`WL8XFQSDGC6t1kVI}!2N<`g<8A@8)Vtvc=)fq@s?|w^+CU5I5!mziVK_HQ~7jp&R%8p5+#KJNZJPs{y8vVgb-$z2gMr}Dz0X=P=vB-~ZayM<$gPckg0$YH^Pz)XepE zZ3}e1Pv9|oJ8XI_6u4($%Tkx6cbr-YL287FvK^u{X|Kv8@gzZ_cMs>M;6E0`h*vPz zENZR7uop@^q%#Nr%}Ra3692)qrWRHe)|Lsg=>mtlObkLG#@Oy%ecS>bRV3NVvH&hz$xP*%W%-7mG zX)%WGVOMBKKS4DybezC&;sdrgClKTxGDC_8C4q%cGYE7(-;DFf2(`l~;Zf@`iifni zt#f*(eS(hO4mM^q(KQF>6YrhJ)A2DZtIBCmpzYFrr*fD~<&2;%$sjOh9nduH_zhc- z55)IYTZ`ixZJdWIMy%+0Uc+`hcS35YIB6ThrVgPywT6u!Zy^}~$^XL$&DX9zt+Z+O zA~iNE#gKKy@crk!{+nE@mTH_iB!oZ|)}WhKjE!qgFV@Wut&_91<#9nNOs`KuZv^n8 zqS~4Yi}J3nDl(jclv;hs)icwh!0fh{Xv9?jb>3H|LA`=p_SZuC+M&d-{$e=Bf4EY76KSzTY3j?BazOM zzr(F;QZsz@rKac05cve(KBzNgghg>`4Z4+7T{H*U+7U=(ZFGn{sas=K`BR|Rc9r;L zFx+{IrF8;=b)5w{QI31SjoK|M^5B#aM8UO=Vz*+ym^v_=s;#f=(UM+NsyR9tsZ?ap60Dl2cXF!SYy5b>BXf zU5|!Z{gl4F5xSDK^aM+H_UA4cUGt!{cwRz_HR&_ge9yF$2?pcj%EN+Et_VpykVU?# zc(DMlDH=~&bup6g3qBW%JNnD(R1F1@4*T=xpDb3u9SL8s?%S^ZL`4MuhWnaUkCm#G zuIgLGY?nYK^hkWy)siL~%WmH=e#6Nw65(!0Z$p&cnfnLV?&dDY{_!PaG*anw%;9^T z`o(6)Paw@yGo+qLHi(DsyxuZd6l(+lHNB_P-J_rH7>u1j9N2Y4AYEo#Ru_Aw;Nh$K z)_YOw27S<9KC4$9I-K1(0{W=fs!gOJK9}t?Dt6f}VTm>oFKeKi&s!rbR_aq99sC&_ z$&RDXun)Z>R!a^ci18`*`66Gw1F28vpX>V0fVnDu5-wXPrO&~UndkpG4^#fE6R)Cd zj2Bm?)Bu9b!q%%k&lMv6<-Xmt2Lh&P?IMm85fH#O9zM5*JwxOtuSFi-_o6wbcdQQF z%&dXg2F!p)T2mHwL=SfZxUdzchEFt9w_|F;ot(aG&HJTzLxPU~Xuyf++bwKYuSInf zfSx-yt2Sa$+r?}hg$s4ur*DRltyj^@#shRB+hzutV6KZ;;`C8LK%&5NPuU72@z)^t z2PL1MEL9cUzdMrUemM`dQqg<(%y@KAWl*2tst5Rg8fUST&u3&00m-@cBI0A8+HIV zr?^&Ysz{p`<+Z8qcRksPi;)eNR1sXv3<%+on@f0mc!n;i5y+^VudV9CHE3+LH z0G0*9)=?d^3{!s$*IXjiVJ=$kRpSa~J)8WjFu-XC5&1k)!#^duQ_;!t(jk+RDwmkDiZe;MpcVNo`y8 z#)rmykum!LAfpNbQ^T|QQx@7rH4^jnKUVi0-&=H?3vJSNWF)v^x{9b?c-80xZ-fR5 z;^()4s;sLVOZFTVvlm6rH8K2JPOGl^;EuRonT1>~^P4JPhjO^r8E*SW){-R=}3n7g=Nwh89$BD?3^4hr=0j_lKk~?O1X-usE8_O;@y)@dHEuY?yzr5 z{Ky?*5~TXpK69a#^$S3aEpetL>m(M#UpHzN|0Gyw)aOboqT&>+#a770#P|IKPH9T=UTLvSaAo%bY;5^dApORqu=I#D{41APi(+cXc!F(h zH>UB|&ep#dTRiZZfZ@$Ide%MPuh)sZ%!p5tBR&J#E~IMZWk?06wkM#8MB?%dh5fpg zJcYNB;i5}bwg*D5F|6XW5C!=q^_4-YEk`q9${&b)tc0AiB70Y#Nk*7L&LqI;ajcSh3v`iv?^iD4d~#dmAsKOj zsf}#ii*mU5j2D})P~jS-SAO=|RGb9ieI<60XIO0{DBfkq@%+H&1A)7D;q43j_cHCp3K}K8aw$0;{wkO{hIr_~lOVww{UpJm`!{m{eqN%MW3Z+tO$%YK zQ`Tz^_h>JF3_LxV1PBGmBuW%iusX+0n%}Ec{q;^1QV(*Xw{v{o)UMX~dIU{Gscr|G|$ z{FdTo@!I5On-2R~pCQF&eiKo3{LY_$Z`%Lj!;w!Jq*GDoEBeS7k0IJq)n<&PA*E~@ z%h0W@@_>jx>rQ5unY9AN?8SMKhp}DmO8RUyFVyAQ;8yK$KV?a*Hna0K%^Wt)sR$qk zwaw6Zi6bySUl`@W|*)9Zs za7)F05RrW{J*3#j%%6qzBC6Ng{pYhKtEyL%=9_7}BB!i*I_Ed216%W(qpd+tpXb5b zjCogL^+rXC5)57 zz#XTQ6cOW8XW!i|Seoz(QZm^lDYiigQK4vUfI98s)L$YsZ>(-vUpD=iw&d zP+#|v4-LqU;IIS|wq^-531BOPumwya2?U5>2ol03YET57%S=sA)x6BpJakp=(|tJg zzo&j*-CK3fch3LQzQhd?WUf-}Suh zvqQ}nP3<3z1AZDrGPz6wH2N*&08VyVPk<6u)ACNs?&LCRvwqi^j?j33`O`2P`jBq` zY=dEMJbLDEmL9GU^ zl{XJTk#=s7isFHB-z`d|0nf}VaM#Oul#dG1Tgfda66@WHT~n`+OKf29fKov*Fvjvf zYhqRHxUpSlx+*9xsof25<(r$Gtz8bsVVkY}W7mK0MRsPSev`*I-4vsVj=eqj5FRc2 zgG-z&y^pYck$rmALKXs9tbJ`vxG&fPv`qZ>rO8-Sm}(37Vu8?8fRf&Q!;C<>J42wA zEc3?@f9*`gX;Ti;xz?jKL4{k!4U5so=%oP+LNLR|UB@X-gHcSPhPrAwZLn;C(;oYm zJKg04`ZG#IlO@D*gc5qY;2Oja3U~Eup2h~QFZ#(5s*&uvQUBsNN4QZiSUZ@_ZJb~_ zEz>eoq;so$S$wOcurPkpf^t?h)k*GlABHY+2YZhV305z6?ijp!#$ns0+Fr>+H!y^y z0quLxBqm_0JVilX7;K(SQNc9h6N8c);%ab3O_&n&nmt>4O!U%B2p^(18dK+=WCWc! z1If5<{%&hOk(V47U>#Cpopku|omm}Etpk;0(_~E!*?Sswul$!(eCt(LFrFC6hW;r) zsXNxi=6KoNwA522{9GCdR9+Gy%Z~TUV?vjEJ{_YH7Z?>V?QZ!76R0b~Y{Mf*jnD9EeM~btFiF zbkFp0Y8xYrpQ`?FQ?**s6X4E8rv;%hWtL#)Ij{#_a_#OUky`ogkxZ69g{NibCLJCl zr%_d@!vo2^wQuqSrp;lYq^*w~#efqE!Hn4I6@U*LOj{k-(j^kVn3Q`ks}=b~U09;F z%fwX0#eq02oq>r7S%{DB2@yDhWSIf%bBmY_2zzF2iKbSzM$6J}HvzqfK?MG>DAT(7 zcXl*>h54Ji`MiQ@6AOQCnJ47Yi|y{no!HwnIQbJ}baIxVILde8u7>Bh~21`lnwz%e% zzaAF%xFg0d-(e|Ix2#&xwi=RCM&;$W%^$r|R@;5d(XjwyW<{5X$@7ls(3-PSEd8ew zUt)3jZLDBsy+rWVCenrk6sN6-Oivs;wsH+3!E|Q!^H)cqgcbUHS|@)ysOTeP=)KkF zb!|}&XLY;aJq$zGQR9mxE~FrXdu%+CTyWmgD>BqE&l)AU|Kp}^YrE`%f^y3|*U7yh zOg?lzWeS}kx9nSAcfDbD*(%Mhw2m72;^t<8UDFT!gtIW?%0s5c_5fx~V9
I}7QmuDB9sOb)5*VL(6nULsa)Ztia!&Xdt>ld| z${P8kqTFwnzRs?nP`$x0p~j6f5G7d9v-pPD!8#$wx@&0lJU7kW6?1Mktd zB44|mUak9hGtuJHXWj~sEK|90rN^)_907_FC7ory@15P*D~cy41iv-)nEmuwcUn?? zbk?&GGyfb!s~A!N>I?zp`fkte>=Sj8w}2qe_3xz|o4At(+Y#M~paev&`24?v=0Hy$ z5c9`qV%-z9P4Fbn? ztl8xRwu(U&cDs+0Yy(OS#C-0qQuy9(r8^KMrTHgJDk+K_7e8Fe%Ply5BhkMU^mpGe zlX75*hoqVx#%s2E9RTycHRJL-GC{4)6%pnexTANrNjNBeE88&BMgq#5(TM-WpB)9DG?8^K#>Ae!^_D^CsMXBQdE| zf9rUC)Z`oClHcGH|Ac6OXt0Yc6bPz;hi@7FM~Uz+rR$L7ex-FX`;QR%(Vfc$CyJE? z&&lZfZg?-!aJAj^2V_tC7cJGKm3AqO@Ju{F=ZCPIap-x3<)wOmN8L^VjZP!Pksu!P z8}W&4 + +

-CodeFuse-CodeGeeX2-6B \ No newline at end of file +[[中文]](#chinese) [[English]](#english) + +#### Clone with HTTP +```bash + git clone https://www.modelscope.cn/codefuse-ai/CodeFuse-CodeGeeX2-6B.git +``` + + + +## Model Description + +CodeFuse-CodeGeeX2-6B is a 6B Code-LLM finetuned by LoRA of multiple code tasks on the base model CodeGeeX2. + +
+ +## News and Updates + + 🔥🔥 2023-11-10 CodeFuse-CodeGeeX2-6B has been released, achieving a pass@1 (greedy decoding) score of 45.12% on HumanEval, which is a 9.22% increase compared to CodeGeeX2 35.9%. + + 🔥🔥 2023-10-20 CodeFuse-QWen-14B technical documentation has been released. For those interested, please refer to the CodeFuse article on our WeChat official account via the provided link.(https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw) + + 🔥🔥 2023-10-16 CodeFuse-QWen-14B has been released, achieving a pass@1 (greedy decoding) score of 48.78% on HumanEval, which is a 16% increase compared to Qwen-14b's 32.3%. + + 🔥🔥 2023-09-27 CodeFuse-StarCoder-15B has been released, achieving a pass@1 (greedy decoding) score of 54.9% on HumanEval, which is a 21% increase compared to StarCoder's 33.6%. + +🔥🔥🔥 2023-09-26 We are pleased to announce the release of the [4-bit quantized version](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits/summary) of [CodeFuse-CodeLlama-34B](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B/summary). Despite the quantization process, the model still achieves a remarkable 73.8% accuracy (greedy decoding) on the HumanEval pass@1 metric. + +🔥🔥🔥 2023-09-11 [CodeFuse-CodeLlama34B](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B/summary) has achived 74.4% of pass@1 (greedy decoding) on HumanEval, which is SOTA results for openspurced LLMs at present. + +
+ +## Code Community + +**Homepage**: 🏡 https://github.com/codefuse-ai (**Please give us your support with a Star🌟 + Fork🚀 + Watch👀**) + ++ If you wish to fine-tune the model yourself, you can visit ✨[MFTCoder](https://github.com/codefuse-ai/MFTCoder)✨✨ + ++ If you wish to deploy the model yourself, you can visit ✨[FasterTransformer4CodeFuse](https://github.com/codefuse-ai/FasterTransformer4CodeFuse)✨✨ + ++ If you wish to see a demo of the model, you can visit ✨[CodeFuse Demo](https://github.com/codefuse-ai/codefuse)✨✨ + +
+ +## Performance + + +| Model | HumanEval(pass@1) | Date | +|:----------------------------|:-----------------:|:-------:| +| **CodeFuse-CodeLlama-34B** | **74.4%** | 2023.9 | +|**CodeFuse-CodeLlama-34B-4bits** | **73.8%** | 2023.9 | +| WizardCoder-Python-34B-V1.0 | 73.2% | 2023.8 | +| GPT-4(zero-shot) | 67.0% | 2023.3 | +| PanGu-Coder2 15B | 61.6% | 2023.8 | +| CodeLlama-34b-Python | 53.7% | 2023.8 | +| CodeLlama-34b | 48.8% | 2023.8 | +| GPT-3.5(zero-shot) | 48.1% | 2022.11 | +| OctoCoder | 46.2% | 2023.8 | +| StarCoder-15B | 33.6% | 2023.5 | +| Qwen-14b | 32.3% | 2023.10 | +| **CodeFuse-StarCoder-15B** | **54.9%** | 2023.9 | +| **CodeFuse-QWen-14B** | **48.78%** | 2023.10 | +| **CodeFuse-CodeGeeX2-6B** | **45.12%** | 2023.11 | + + +
+ +## Requirements + +* python>=3.8 +* pytorch>=2.0.0 +* transformers==4.33.2 +* Sentencepiece +* CUDA 11.4 +
+ +## Inference String Format + +The inference string is a concatenated string formed by combining conversation data(system, human and bot contents) in the training data format. It is used as input during the inference process. +Here is an example format of the concatenated string: + +```python +""" +system +System instruction +human +Human 1st round input +bot +Bot 1st round output<|endoftext|> +human +Human 2nd round input +bot +Bot 2nd round output<|endoftext|> +... +... +... +human +Human nth round input +bot +{Bot output to be genreated}<|endoftext|> +""" +``` + +When applying inference, you always make your input string end with "\bot" to ask the model generating answers. + + +## Quickstart + + +```bash +pip install transformers modelscope cpm_kernels -U +pip install -r requirements.txt +``` + +```python +import torch +from modelscope import ( + AutoTokenizer, + AutoModelForCausalLM, + snapshot_download +) +model_dir = snapshot_download('codefuse-ai/CodeFuse-CodeGeeX2-6B',revision = 'v1.0.0') +tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) +tokenizer.padding_side = "left" +tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.pad_token = "" +tokenizer.eos_token = "" +# try 4bit loading if cuda memory not enough +model = AutoModelForCausalLM.from_pretrained(model_dir, + trust_remote_code=True, + load_in_4bit=False, + device_map="auto", + torch_dtype=torch.bfloat16) +model.eval() + +HUMAN_ROLE_START_TAG = "human\n" +BOT_ROLE_START_TAG = "bot\n" + +text = f"{HUMAN_ROLE_START_TAG}write a python function of quick sort.\n{BOT_ROLE_START_TAG}" +inputs = tokenizer(text, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` + + + + + + + + + + +## 模型简介 + +CodeFuse-CodeGeeX2-6B 是一个通过LoRA对基座模型CodeGeeeX2进行多代码任务微调的代码大模型。 +
+ +## 新闻 + + 🔥🔥 2023-11-10 开源了CodeFuse-CodeGeeX2-6B模型,在HumanEval pass@1(greedy decoding)上可以达到48.12%, 比CodeGeeX2提高了9.22%的代码能力(HumanEval) + + 🔥🔥 2023-10-20 公布了CodeFuse-QWen-14B技术文档,感兴趣详见微信公众号CodeFuse文章:https://mp.weixin.qq.com/s/PCQPkvbvfxSPzsqjOILCDw + + 🔥🔥 2023-10-16开源了CodeFuse-QWen-14B模型,在HumanEval pass@1(greedy decoding)上可以达到48.78%, 比Qwen-14b提高了16%的代码能力(HumanEval) + + 🔥🔥 2023-09-27开源了CodeFuse-StarCoder-15B模型,在HumanEval pass@1(greedy decoding)上可以达到54.9%, 比StarCoder提高了21%的代码能力(HumanEval) + +🔥🔥🔥 2023-09-26 [CodeFuse-CodeLlama-34B 4bits](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B-4bits/summary)量化版本发布,量化后模型在HumanEval pass@1指标为73.8% (贪婪解码)。 + +🔥🔥🔥 2023-09-11 [CodeFuse-CodeLlama-34B](https://modelscope.cn/models/codefuse-ai/CodeFuse-CodeLlama-34B/summary)发布,HumanEval pass@1指标达到74.4% (贪婪解码), 为当前开源SOTA。 + +
+ +## 代码社区 +**大本营**: 🏡 https://github.com/codefuse-ai (**请支持我们的项目Star🌟 + Fork🚀 + Watch👀**) + ++ 如果您想自己微调该模型,可以访问 ✨[MFTCoder](https://github.com/codefuse-ai/MFTCoder)✨✨ + ++ 如果您想自己部署该模型,可以访问 ✨[FasterTransformer4CodeFuse](https://github.com/codefuse-ai/FasterTransformer4CodeFuse)✨✨ + ++ 如果您想观看该模型示例,可以访问 ✨[CodeFuse Demo](https://github.com/codefuse-ai/codefuse)✨✨ + +
+ + +## 评测表现 + +### 代码 + + +| 模型 | HumanEval(pass@1) | 日期 | +|:----------------------------|:-----------------:|:-------:| +| **CodeFuse-CodeLlama-34B** | **74.4%** | 2023.9 | +|**CodeFuse-CodeLlama-34B-4bits** | **73.8%** | 2023.9 | +| WizardCoder-Python-34B-V1.0 | 73.2% | 2023.8 | +| GPT-4(zero-shot) | 67.0% | 2023.3 | +| PanGu-Coder2 15B | 61.6% | 2023.8 | +| CodeLlama-34b-Python | 53.7% | 2023.8 | +| CodeLlama-34b | 48.8% | 2023.8 | +| GPT-3.5(zero-shot) | 48.1% | 2022.11 | +| OctoCoder | 46.2% | 2023.8 | +| StarCoder-15B | 33.6% | 2023.5 | +| Qwen-14b | 32.3% | 2023.10 | +| **CodeFuse-StarCoder-15B** | **54.9%** | 2023.9 | +| **CodeFuse-QWen-14B** | **48.78%** | 2023.8 | +| **CodeFuse-CodeGeeX2-6B** | **45.12%** | 2023.11 | + + +## Requirements + +* python>=3.8 +* pytorch>=2.0.0 +* transformers==4.33.2 +* Sentencepiece +* CUDA 11.4 +
+ +## 推理数据格式 + +推理数据为模型在训练数据格式下拼接的字符串形式,它也是推理时输入prompt拼接的方式: + +```python +""" +system +这是System指令 +human +这是第1轮用户输入的问题 +bot +这是第1轮模型生成的内容<|endoftext|> +human +这是第2轮用户输入的问题 +bot +这是第2轮模型生成的内容<|endoftext|> +... +... +... +human +这是第n轮用户输入的问题 +bot +{模型现在要生成的内容}<|endoftext|> +""" +``` + +推理时,请确保拼接的prompt字符串以"\bot\n"结尾,引导模型生成回答。 + +## 快速使用 + + +```bash +pip install transformers modelscope cpm_kernels -U +pip install -r requirements.txt +``` + +```python +import torch +from modelscope import ( + AutoTokenizer, + AutoModelForCausalLM, + snapshot_download +) +model_dir = snapshot_download('codefuse-ai/CodeFuse-CodeGeeX2-6B',revision = 'v1.0.0') +tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) +tokenizer.padding_side = "left" +tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("") +tokenizer.pad_token = "" +tokenizer.eos_token = "" +# try 4bit loading if cuda memory not enough +model = AutoModelForCausalLM.from_pretrained(model_dir, + trust_remote_code=True, + load_in_4bit=False, + device_map="auto", + torch_dtype=torch.bfloat16) +model.eval() + +HUMAN_ROLE_START_TAG = "human\n" +BOT_ROLE_START_TAG = "bot\n" + +text = f"{HUMAN_ROLE_START_TAG}write a python function of quick sort.\n{BOT_ROLE_START_TAG}" +inputs = tokenizer(text, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda") +outputs = model.generate( + inputs=inputs["input_ids"], + attention_mask=inputs["attention_mask"], + max_new_tokens=512, + top_p=0.95, + temperature=0.1, + do_sample=True, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + ) +gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True) +print(gen_text) +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..2cef5bf --- /dev/null +++ b/config.json @@ -0,0 +1,49 @@ +{ + "_name_or_path": "/mnt/user/qumu/download_models/codegeex2-6b", + "add_bias_linear": false, + "add_qkv_bias": true, + "apply_query_key_layer_scaling": true, + "apply_residual_connection_post_layernorm": false, + "architectures": [ + "ChatGLMForConditionalGeneration" + ], + "attention_dropout": 0.0, + "attention_softmax_in_fp32": true, + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" + }, + "bias_dropout_fusion": true, + "eos_token": "", + "eos_token_id": 2, + "ffn_hidden_size": 13696, + "fp32_residual_connection": false, + "hidden_dropout": 0.0, + "hidden_size": 4096, + "interleaved_qkv": false, + "kv_channels": 128, + "layernorm_epsilon": 1e-05, + "model_type": "chatglm", + "multi_query_attention": true, + "multi_query_group_num": 2, + "num_attention_heads": 32, + "num_layers": 28, + "original_rope": true, + "pad_token": "", + "pad_token_id": 0, + "padded_vocab_size": 65024, + "post_layer_norm": true, + "pre_seq_len": null, + "prefix_projection": false, + "quantization_bit": 0, + "rmsnorm": true, + "rotary_percent": 0.5, + "seq_length": 8192, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.33.2", + "use_cache": true, + "vocab_size": 65024 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..f9291c3 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework":"Pytorch","task":"text-generation"} \ No newline at end of file diff --git a/configuration_chatglm.py b/configuration_chatglm.py new file mode 100644 index 0000000..3730b4e --- /dev/null +++ b/configuration_chatglm.py @@ -0,0 +1,59 @@ +from transformers import PretrainedConfig + + +class ChatGLMConfig(PretrainedConfig): + model_type = "chatglm" + def __init__( + self, + num_layers=28, + padded_vocab_size=65024, + hidden_size=4096, + ffn_hidden_size=13696, + kv_channels=128, + num_attention_heads=32, + seq_length=2048, + hidden_dropout=0.0, + attention_dropout=0.0, + layernorm_epsilon=1e-5, + rmsnorm=True, + apply_residual_connection_post_layernorm=False, + post_layer_norm=True, + add_bias_linear=False, + add_qkv_bias=False, + bias_dropout_fusion=True, + multi_query_attention=False, + multi_query_group_num=1, + apply_query_key_layer_scaling=True, + attention_softmax_in_fp32=True, + fp32_residual_connection=False, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = padded_vocab_size + self.padded_vocab_size = padded_vocab_size + self.hidden_size = hidden_size + self.ffn_hidden_size = ffn_hidden_size + self.kv_channels = kv_channels + self.num_attention_heads = num_attention_heads + self.seq_length = seq_length + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + self.layernorm_epsilon = layernorm_epsilon + self.rmsnorm = rmsnorm + self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm + self.post_layer_norm = post_layer_norm + self.add_bias_linear = add_bias_linear + self.add_qkv_bias = add_qkv_bias + self.bias_dropout_fusion = bias_dropout_fusion + self.multi_query_attention = multi_query_attention + self.multi_query_group_num = multi_query_group_num + self.apply_query_key_layer_scaling = apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = attention_softmax_in_fp32 + self.fp32_residual_connection = fp32_residual_connection + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + super().__init__(**kwargs) \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..3264de0 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,5 @@ +{ + "_from_model_config": true, + "eos_token_id": 2, + "transformers_version": "4.33.2" +} diff --git a/modeling_chatglm.py b/modeling_chatglm.py new file mode 100644 index 0000000..bfa6a55 --- /dev/null +++ b/modeling_chatglm.py @@ -0,0 +1,1197 @@ +""" PyTorch ChatGLM model. """ + +import math +import copy +import warnings +import sys +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List, Callable, Dict, Any + +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) + +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging +from transformers.generation.logits_process import LogitsProcessor +from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput + +from .configuration_chatglm import ChatGLMConfig + +# flags required to enable jit fusion kernels + +if sys.platform != 'darwin': + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM2-6B" +_CONFIG_FOR_DOC = "ChatGLM6BConfig" + +CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "THUDM/chatglm2-6b", + # See all ChatGLM models at https://huggingface.co/models?filter=chatglm +] + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class InvalidScoreLogitsProcessor(LogitsProcessor): + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if torch.isnan(scores).any() or torch.isinf(scores).any(): + scores.zero_() + scores[..., 5] = 5e4 + return scores + + +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix + Input shape: (batch-size, prefix-length) + Output shape: (batch-size, prefix-length, 2*layers*hidden) + """ + + def __init__(self, config: ChatGLMConfig): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 + self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(kv_size, config.hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, kv_size) + ) + else: + self.embedding = torch.nn.Embedding(config.pre_seq_len, + config.num_layers * config.kv_channels * config.multi_query_group_num * 2) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + +def split_tensor_along_last_dim( + tensor: torch.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, +) -> List[torch.Tensor]: + """Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim, original_impl=False, device=None, dtype=None): + super().__init__() + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) + self.register_buffer("inv_freq", inv_freq) + self.dim = dim + self.original_impl = original_impl + + def forward_impl( + self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000 + ): + """Enhanced Transformer with Rotary Position Embedding. + + Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ + transformers/rope/__init__.py. MIT License: + https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. + """ + # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ + theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem)) + + # Create position indexes `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, dtype=dtype, device=device) + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.outer(seq_idx, theta).float() + + cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) + + # this is to mimic the behaviour of complex32, else we will get different results + if dtype in (torch.float16, torch.bfloat16, torch.int8): + cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() + return cache + + def forward(self, max_seq_len, offset=0): + return self.forward_impl( + max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device + ) + + +@torch.jit.script +def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: + # x: [sq, b, np, hn] + sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3) + rot_dim = rope_cache.shape[-2] * 2 + x, x_pass = x[..., :rot_dim], x[..., rot_dim:] + # truncate to support variable sizes + rope_cache = rope_cache[:sq] + xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2) + rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2) + x_out2 = torch.stack( + [ + xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], + xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], + ], + -1, + ) + x_out2 = x_out2.flatten(3) + return torch.cat((x_out2, x_pass), dim=-1) + + +class RMSNorm(torch.nn.Module): + def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) + self.eps = eps + + def forward(self, hidden_states: torch.Tensor): + if hidden_states.dtype == torch.bfloat16: + norm_x = torch.mean(hidden_states * hidden_states, dim=-1, keepdim=True) + x_normed = hidden_states * torch.rsqrt(norm_x + self.eps) + return self.weight * x_normed + else: + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + + return (self.weight * hidden_states).to(input_dtype) + + +class CoreAttention(torch.nn.Module): + def __init__(self, config: ChatGLMConfig, layer_number): + super(CoreAttention, self).__init__() + + self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 + if self.apply_query_key_layer_scaling: + self.attention_softmax_in_fp32 = True + self.layer_number = max(1, layer_number) + + projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_partition = projection_size + self.hidden_size_per_attention_head = projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + self.coeff = coeff + + self.attention_dropout = torch.nn.Dropout(config.attention_dropout) + + def forward(self, query_layer, key_layer, value_layer, attention_mask): + pytorch_major_version = int(torch.__version__.split('.')[0]) + if pytorch_major_version >= 2: + query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]] + if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: + context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, + is_causal=True) + else: + if attention_mask is not None: + attention_mask = ~attention_mask + context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, + attention_mask) + context_layer = context_layer.permute(2, 0, 1, 3) + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.reshape(*new_context_layer_shape) + else: + # Raw attention scores + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = torch.empty( + output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, + device=query_layer.device + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # attention scores and attention mask [b, np, sq, sk] + if self.attention_softmax_in_fp32: + attention_scores = attention_scores.float() + if self.coeff is not None: + attention_scores = attention_scores * self.coeff + if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: + attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3], + device=attention_scores.device, dtype=torch.bool) + attention_mask.tril_() + attention_mask = ~attention_mask + if attention_mask is not None: + attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) + attention_probs = F.softmax(attention_scores, dim=-1) + attention_probs = attention_probs.type_as(value_layer) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attention_dropout(attention_probs) + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + return context_layer + + +class SelfAttention(torch.nn.Module): + """Parallel self-attention layer abstract class. + + Self-attention layer takes input with size [s, b, h] + and returns output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(SelfAttention, self).__init__() + self.layer_number = max(1, layer_number) + + self.projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + self.multi_query_attention = config.multi_query_attention + self.qkv_hidden_size = 3 * self.projection_size + if self.multi_query_attention: + self.num_multi_query_groups_per_partition = config.multi_query_group_num + self.qkv_hidden_size = ( + self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num + ) + self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size, + bias=config.add_bias_linear or config.add_qkv_bias, + device=device, **_config_to_kwargs(config) + ) + + self.core_attention = CoreAttention(config, self.layer_number) + + # Output. + self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, + device=device, **_config_to_kwargs(config) + ) + + def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): + if self.multi_query_attention: + num_attention_heads = self.num_multi_query_groups_per_partition + else: + num_attention_heads = self.num_attention_heads_per_partition + return torch.empty( + inference_max_sequence_len, + batch_size, + num_attention_heads, + self.hidden_size_per_attention_head, + dtype=dtype, + device=device, + ) + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True + ): + # hidden_states: [sq, b, h] + + # ================================================= + # Pre-allocate memory for key-values for inference. + # ================================================= + # ===================== + # Query, Key, and Value + # ===================== + + # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] + mixed_x_layer = self.query_key_value(hidden_states) + + if self.multi_query_attention: + (query_layer, key_layer, value_layer) = mixed_x_layer.split( + [ + self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + ], + dim=-1, + ) + query_layer = query_layer.view( + query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + key_layer = key_layer.view( + key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + value_layer = value_layer.view( + value_layer.size()[:-1] + + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + else: + new_tensor_shape = mixed_x_layer.size()[:-1] + \ + (self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] + (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) + + # apply relative positional encoding (rotary embedding) + if rotary_pos_emb is not None: + query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) + key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) + + # adjust key and value for inference + if kv_cache is not None: + cache_k, cache_v = kv_cache + key_layer = torch.cat((cache_k, key_layer), dim=0) + value_layer = torch.cat((cache_v, value_layer), dim=0) + if use_cache: + kv_cache = (key_layer, value_layer) + else: + kv_cache = None + + if self.multi_query_attention: + key_layer = key_layer.unsqueeze(-2) + key_layer = key_layer.expand( + -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 + ) + key_layer = key_layer.contiguous().view( + key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + value_layer = value_layer.unsqueeze(-2) + value_layer = value_layer.expand( + -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 + ) + value_layer = value_layer.contiguous().view( + value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + + # ================================== + # core attention computation + # ================================== + + context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) + + # ================= + # Output. [sq, b, h] + # ================= + + output = self.dense(context_layer) + + return output, kv_cache + + +def _config_to_kwargs(args): + common_kwargs = { + "dtype": args.torch_dtype, + } + return common_kwargs + + +class MLP(torch.nn.Module): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. + """ + + def __init__(self, config: ChatGLMConfig, device=None): + super(MLP, self).__init__() + + self.add_bias = config.add_bias_linear + + # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + self.dense_h_to_4h = nn.Linear( + config.hidden_size, + config.ffn_hidden_size * 2, + bias=self.add_bias, + device=device, + **_config_to_kwargs(config) + ) + + def swiglu(x): + x = torch.chunk(x, 2, dim=-1) + return F.silu(x[0]) * x[1] + + self.activation_func = swiglu + + # Project back to h. + self.dense_4h_to_h = nn.Linear( + config.ffn_hidden_size, + config.hidden_size, + bias=self.add_bias, + device=device, + **_config_to_kwargs(config) + ) + + def forward(self, hidden_states): + # [s, b, 4hp] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + intermediate_parallel = self.activation_func(intermediate_parallel) + # [s, b, h] + output = self.dense_4h_to_h(intermediate_parallel) + return output + + +class GLMBlock(torch.nn.Module): + """A single transformer layer. + + Transformer layer takes input with size [s, b, h] and returns an + output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(GLMBlock, self).__init__() + self.layer_number = layer_number + + self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm + + self.fp32_residual_connection = config.fp32_residual_connection + + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Layernorm on the input data. + self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + # Self attention. + self.self_attention = SelfAttention(config, layer_number, device=device) + self.hidden_dropout = config.hidden_dropout + + # Layernorm on the attention output + self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + # MLP + self.mlp = MLP(config, device=device) + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, + ): + # hidden_states: [s, b, h] + + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Self attention. + attention_output, kv_cache = self.self_attention( + layernorm_output, + attention_mask, + rotary_pos_emb, + kv_cache=kv_cache, + use_cache=use_cache + ) + + # Residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) + layernorm_input = residual + layernorm_input + + # Layer norm post the self attention. + layernorm_output = self.post_attention_layernorm(layernorm_input) + + # MLP. + mlp_output = self.mlp(layernorm_output) + + # Second residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) + output = residual + output + + return output, kv_cache + + +class GLMTransformer(torch.nn.Module): + """Transformer class.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(GLMTransformer, self).__init__() + + self.fp32_residual_connection = config.fp32_residual_connection + self.post_layer_norm = config.post_layer_norm + + # Number of layers. + self.num_layers = config.num_layers + + # Transformer layers. + def build_layer(layer_number): + return GLMBlock(config, layer_number, device=device) + + self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) + + if self.post_layer_norm: + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Final layer norm before output. + self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, + dtype=config.torch_dtype) + + self.gradient_checkpointing = False + + def _get_layer(self, layer_number): + return self.layers[layer_number] + + def forward( + self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, + use_cache: Optional[bool] = True, + output_hidden_states: Optional[bool] = False, + ): + if not kv_caches: + kv_caches = [None for _ in range(self.num_layers)] + presents = () if use_cache else None + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + all_self_attentions = None + all_hidden_states = () if output_hidden_states else None + for index in range(self.num_layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer = self._get_layer(index) + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, + hidden_states, + attention_mask, + rotary_pos_emb, + kv_caches[index], + use_cache + ) + else: + layer_ret = layer( + hidden_states, + attention_mask, + rotary_pos_emb, + kv_cache=kv_caches[index], + use_cache=use_cache + ) + hidden_states, kv_cache = layer_ret + if use_cache: + presents = presents + (kv_cache,) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # Final layer norm. + if self.post_layer_norm: + hidden_states = self.final_layernorm(hidden_states) + + return hidden_states, presents, all_hidden_states, all_self_attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, past_key_values, padding_mask=None): + batch_size, seq_length = input_ids.shape + full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) + full_attention_mask.tril_() + past_length = 0 + if past_key_values: + past_length = past_key_values[0][0].shape[0] + if past_length: + full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, + device=input_ids.device), full_attention_mask), dim=-1) + if padding_mask is not None: + full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) + if not past_length and padding_mask is not None: + full_attention_mask -= padding_mask.unsqueeze(-1) - 1 + full_attention_mask = (full_attention_mask < 0.5).bool() + full_attention_mask.unsqueeze_(1) + return full_attention_mask + + def get_position_ids(self, input_ids, device): + batch_size, seq_length = input_ids.shape + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + return position_ids + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, GLMTransformer): + module.gradient_checkpointing = value + + +class Embedding(torch.nn.Module): + """Language model embeddings.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(Embedding, self).__init__() + + self.hidden_size = config.hidden_size + # Word embeddings (parallel). + self.word_embeddings = nn.Embedding( + config.padded_vocab_size, + self.hidden_size, + dtype=config.torch_dtype, + device=device + ) + self.fp32_residual_connection = config.fp32_residual_connection + + def forward(self, input_ids): + # Embeddings. + words_embeddings = self.word_embeddings(input_ids) + embeddings = words_embeddings + # Data format change to avoid explicit tranposes : [b s h] --> [s b h]. + embeddings = embeddings.transpose(0, 1).contiguous() + # If the input flag for fp32 residual connection is set, convert for float. + if self.fp32_residual_connection: + embeddings = embeddings.float() + return embeddings + + +class ChatGLMModel(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + init_kwargs = {} + if device is not None: + init_kwargs["device"] = device + self.embedding = init_method(Embedding, config, **init_kwargs) + self.num_layers = config.num_layers + self.multi_query_group_num = config.multi_query_group_num + self.kv_channels = config.kv_channels + + # Rotary positional embeddings + self.seq_length = config.seq_length + rotary_dim = ( + config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels + ) + + self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device, + dtype=config.torch_dtype) + self.encoder = init_method(GLMTransformer, config, **init_kwargs) + self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False, + dtype=config.torch_dtype, **init_kwargs) + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + def get_input_embeddings(self): + return self.embedding.word_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view( + batch_size, + self.pre_seq_len, + self.num_layers * 2, + self.multi_query_group_num, + self.kv_channels + ) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + return past_key_values + + def forward( + self, + input_ids, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + full_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, seq_length = input_ids.shape + + if inputs_embeds is None: + inputs_embeds = self.embedding(input_ids) + + if self.pre_seq_len is not None: + if past_key_values is None: + past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device, + dtype=inputs_embeds.dtype) + if attention_mask is not None: + attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)), + attention_mask], dim=-1) + + if full_attention_mask is None: + if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): + full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) + + # Rotary positional embeddings + rotary_pos_emb = self.rotary_pos_emb(self.seq_length) + if position_ids is not None: + rotary_pos_emb = rotary_pos_emb[position_ids] + else: + rotary_pos_emb = rotary_pos_emb[None, :seq_length] + rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() + + # Run encoder. + hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( + inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, + kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states + ) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + def quantize(self, weight_bit_width: int): + from .quantization import quantize + quantize(self.encoder, weight_bit_width) + return self + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True, device=None): + super().__init__(config) + + self.max_sequence_length = config.max_length + self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device) + self.config = config + self.quantized = False + + if self.config.quantization_bit: + self.quantize(self.config.quantization_bit, empty_init=True) + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 + ) + + # update position ids + if "position_ids" in model_kwargs: + position_ids = model_kwargs["position_ids"] + new_position_id = position_ids[..., -1:].clone() + new_position_id += 1 + model_kwargs["position_ids"] = torch.cat( + [position_ids, new_position_id], dim=-1 + ) + + model_kwargs["is_first_forward"] = False + return model_kwargs + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + is_first_forward: bool = True, + **kwargs + ) -> dict: + # only last token for input_ids if past is not None + if position_ids is None: + position_ids = self.get_position_ids(input_ids, device=input_ids.device) + if not is_first_forward: + position_ids = position_ids[..., -1:] + input_ids = input_ids[:, -1:] + return { + "input_ids": input_ids, + "past_key_values": past_key_values, + "position_ids": position_ids, + "attention_mask": attention_mask, + "return_last_logit": True + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + return_last_logit: Optional[bool] = False, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + if return_last_logit: + hidden_states = hidden_states[-1:] + lm_logits = self.transformer.output_layer(hidden_states) + lm_logits = lm_logits.transpose(0, 1).contiguous() + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(ignore_index=-100) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + def process_response(self, response): + response = response.strip() + response = response.replace("[[训练时间]]", "2023年") + return response + + def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None): + prompt = tokenizer.build_prompt(query, history=history) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + return inputs + + def build_stream_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None): + if history: + prompt = "\n\n[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query) + input_ids = tokenizer.encode(prompt, add_special_tokens=False) + input_ids = input_ids[1:] + inputs = tokenizer.batch_encode_plus([(input_ids, None)], return_tensors="pt", add_special_tokens=False) + else: + prompt = "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + return inputs + + @torch.inference_mode() + def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 8192, num_beams=1, + do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + inputs = self.build_inputs(tokenizer, query, history=history) + outputs = self.generate(**inputs, **gen_kwargs) + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + history = history + [(query, response)] + return response, history + + @torch.inference_mode() + def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None, + max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, + return_past_key_values=False, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if past_key_values is None and not return_past_key_values: + inputs = self.build_inputs(tokenizer, query, history=history) + else: + inputs = self.build_stream_inputs(tokenizer, query, history=history) + if past_key_values is not None: + past_length = past_key_values[0][0].shape[0] + if self.transformer.pre_seq_len is not None: + past_length -= self.transformer.pre_seq_len + inputs.position_ids += past_length + attention_mask = inputs.attention_mask + attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1) + inputs['attention_mask'] = attention_mask + for outputs in self.stream_generate(**inputs, past_key_values=past_key_values, + return_past_key_values=return_past_key_values, **gen_kwargs): + if return_past_key_values: + outputs, past_key_values = outputs + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + if response and response[-1] != "�": + response = self.process_response(response) + new_history = history + [(query, response)] + if return_past_key_values: + yield response, new_history, past_key_values + else: + yield response, new_history + + @torch.inference_mode() + def stream_generate( + self, + input_ids, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + return_past_key_values=False, + **kwargs, + ): + batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] + + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) + bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None: + warnings.warn( + f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " + "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" + " recommend using `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + if not has_default_max_length: + logger.warn( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", + UserWarning, + ) + + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=input_ids, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + ) + + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + logits_warper = self._get_logits_warper(generation_config) + + unfinished_sequences = torch.ones(input_ids.shape[0], device=input_ids.device, dtype=input_ids.dtype) + scores = None + while True: + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + ) + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + if generation_config.do_sample: + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(probs, dim=-1) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long()) + if return_past_key_values: + yield input_ids, outputs.past_key_values + else: + yield input_ids + # stop when each sentence is finished, or if we exceed the maximum length + if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): + break + + def quantize(self, bits: int, empty_init=False, device=None, **kwargs): + if bits == 0: + return + + from .quantization import quantize + + if self.quantized: + logger.info("Already quantized.") + return self + + self.quantized = True + + self.config.quantization_bit = bits + + self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device, + **kwargs) + return self diff --git a/pytorch_model-00001-of-00002.bin b/pytorch_model-00001-of-00002.bin new file mode 100644 index 0000000..cf060ab --- /dev/null +++ b/pytorch_model-00001-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445eee34a6866c25accc42b2438a4f55d9b13fbe37ac514ddaf73c6b93af2df0 +size 9986266947 diff --git a/pytorch_model-00002-of-00002.bin b/pytorch_model-00002-of-00002.bin new file mode 100644 index 0000000..ff4f939 --- /dev/null +++ b/pytorch_model-00002-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d68be1e18f6edc3d9be31977569c407d7c5c15757f49a27e642d83be02d11762 +size 2500976015 diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json new file mode 100644 index 0000000..ab25cbc --- /dev/null +++ b/pytorch_model.bin.index.json @@ -0,0 +1,207 @@ +{ + "metadata": { + "total_size": 12487168064 + }, + "weight_map": { + "transformer.embedding.word_embeddings.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.final_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.10.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.11.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.12.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.13.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.14.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.15.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.16.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.17.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.18.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.19.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.20.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.21.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.22.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.23.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.23.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.23.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.24.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.25.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.26.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.27.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin", + "transformer.encoder.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.4.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.5.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.6.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.7.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.8.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin", + "transformer.encoder.layers.9.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin", + "transformer.output_layer.weight": "pytorch_model-00002-of-00002.bin", + "transformer.rotary_pos_emb.inv_freq": "pytorch_model-00001-of-00002.bin" + } +} diff --git a/quantization.py b/quantization.py new file mode 100644 index 0000000..cb95bfe --- /dev/null +++ b/quantization.py @@ -0,0 +1,188 @@ +from torch.nn import Linear +from torch.nn.parameter import Parameter + +import bz2 +import torch +import base64 +import ctypes +from transformers.utils import logging + +from typing import List +from functools import partial + +logger = logging.get_logger(__name__) + +try: + from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up + + class Kernel: + def __init__(self, code: bytes, function_names: List[str]): + self.code = code + self._function_names = function_names + self._cmodule = LazyKernelCModule(self.code) + + for name in self._function_names: + setattr(self, name, KernelFunction(self._cmodule, name)) + + quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ" + + kernels = Kernel( + bz2.decompress(base64.b64decode(quantization_code)), + [ + "int4WeightCompression", + "int4WeightExtractionFloat", + "int4WeightExtractionHalf", + "int8WeightExtractionFloat", + "int8WeightExtractionHalf", + ], + ) +except Exception as exception: + kernels = None + logger.warning("Failed to load cpm_kernels:" + str(exception)) + + +class W8A16Linear(torch.autograd.Function): + @staticmethod + def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width): + ctx.inp_shape = inp.size() + ctx.weight_bit_width = weight_bit_width + out_features = quant_w.size(0) + inp = inp.contiguous().view(-1, inp.size(-1)) + weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width) + ctx.weight_shape = weight.size() + output = inp.mm(weight.t()) + ctx.save_for_backward(inp, quant_w, scale_w) + return output.view(*(ctx.inp_shape[:-1] + (out_features,))) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inp, quant_w, scale_w = ctx.saved_tensors + weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width) + grad_output = grad_output.contiguous().view(-1, weight.size(0)) + grad_input = grad_output.mm(weight) + grad_weight = grad_output.t().mm(inp) + return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None + + +def compress_int4_weight(weight: torch.Tensor): # (n, m) + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + assert m % 2 == 0 + m = m // 2 + out = torch.empty(n, m, dtype=torch.int8, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + kernels.int4WeightCompression( + gridDim, + blockDim, + 0, + stream, + [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)], + ) + return out + + +def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): + assert scale_list.dtype in [torch.half, torch.bfloat16] + assert weight.dtype in [torch.int8] + if source_bit_width == 8: + return weight.to(scale_list.dtype) * scale_list[:, None] + elif source_bit_width == 4: + func = ( + kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16 + ) + else: + assert False, "Unsupported bit-width" + + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + func( + gridDim, + blockDim, + 0, + stream, + [ + ctypes.c_void_p(weight.data_ptr()), + ctypes.c_void_p(scale_list.data_ptr()), + ctypes.c_void_p(out.data_ptr()), + ctypes.c_int32(n), + ctypes.c_int32(m), + ], + ) + return out + + +class QuantizedLinear(torch.nn.Module): + def __init__(self, weight_bit_width: int, weight, bias=None, device="cpu", dtype=None, empty_init=False, *args, + **kwargs): + super().__init__() + self.weight_bit_width = weight_bit_width + + shape = weight.shape + + if weight is None or empty_init: + self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device) + self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device) + else: + self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1) + self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8) + if weight_bit_width == 4: + self.weight = compress_int4_weight(self.weight) + + self.weight = Parameter(self.weight.to(device), requires_grad=False) + self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False) + self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None + + def forward(self, input): + output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width) + if self.bias is not None: + output = output + self.bias + return output + + +def quantize(model, weight_bit_width, empty_init=False, device=None): + """Replace fp16 linear with quantized linear""" + for layer in model.layers: + layer.self_attention.query_key_value = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()), + bias=layer.self_attention.query_key_value.bias, + dtype=layer.self_attention.query_key_value.weight.dtype, + device=layer.self_attention.query_key_value.weight.device if device is None else device, + empty_init=empty_init + ) + layer.self_attention.dense = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()), + bias=layer.self_attention.dense.bias, + dtype=layer.self_attention.dense.weight.dtype, + device=layer.self_attention.dense.weight.device if device is None else device, + empty_init=empty_init + ) + layer.mlp.dense_h_to_4h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()), + bias=layer.mlp.dense_h_to_4h.bias, + dtype=layer.mlp.dense_h_to_4h.weight.dtype, + device=layer.mlp.dense_h_to_4h.weight.device if device is None else device, + empty_init=empty_init + ) + layer.mlp.dense_4h_to_h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()), + bias=layer.mlp.dense_4h_to_h.bias, + dtype=layer.mlp.dense_4h_to_h.weight.dtype, + device=layer.mlp.dense_4h_to_h.weight.device if device is None else device, + empty_init=empty_init + ) + + return model diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8a096d1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +numpy +pandas +einops +sentencepiece +deepspeed==0.9.3 +transformers==4.33.2 +accelerate==0.21.0 +peft==0.4.0 +BitsAndBytes==0.40.2 +xformers==0.0.21 +ujson +jsonlines +tiktoken +transformers_stream_generator diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1 @@ +{} diff --git a/tokenization_chatglm.py b/tokenization_chatglm.py new file mode 100644 index 0000000..6928fe8 --- /dev/null +++ b/tokenization_chatglm.py @@ -0,0 +1,264 @@ +import os +import torch +from typing import List, Optional, Union, Dict +from sentencepiece import SentencePieceProcessor +from transformers import PreTrainedTokenizer +from transformers.utils import logging, PaddingStrategy +from transformers.tokenization_utils_base import EncodedInput, BatchEncoding + + +class SPTokenizer: + def __init__(self, model_path: str): + # reload tokenizer + assert os.path.isfile(model_path), model_path + self.sp_model = SentencePieceProcessor(model_file=model_path) + + # BOS / EOS token IDs + self.n_words: int = self.sp_model.vocab_size() + self.bos_id: int = self.sp_model.bos_id() + self.eos_id: int = self.sp_model.eos_id() + self.pad_id: int = self.sp_model.unk_id() + assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() + + special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + self.special_tokens = {} + self.index_special_tokens = {} + for token in special_tokens: + self.special_tokens[token] = self.n_words + self.index_special_tokens[self.n_words] = token + self.n_words += 1 + + def tokenize(self, s: str): + return self.sp_model.EncodeAsPieces(s) + + def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: + assert type(s) is str + t = self.sp_model.encode(s) + if bos: + t = [self.bos_id] + t + if eos: + t = t + [self.eos_id] + return t + + def decode(self, t: List[int]) -> str: + return self.sp_model.decode(t) + + def decode_tokens(self, tokens: List[str]) -> str: + text = self.sp_model.DecodePieces(tokens) + return text + + def convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + if token in self.special_tokens: + return self.special_tokens[token] + return self.sp_model.PieceToId(token) + + def convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + if index in self.index_special_tokens or index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: + return "" + return self.sp_model.IdToPiece(index) + + +class ChatGLMTokenizer(PreTrainedTokenizer): + vocab_files_names = {"vocab_file": "tokenizer.model"} + + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, **kwargs): + super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) + self.name = "GLMTokenizer" + + self.vocab_file = vocab_file + self.tokenizer = SPTokenizer(vocab_file) + self.special_tokens = { + "": self.tokenizer.bos_id, + "": self.tokenizer.eos_id, + "": self.tokenizer.pad_id + } + + def get_command(self, token): + if token in self.special_tokens: + return self.special_tokens[token] + assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" + return self.tokenizer.special_tokens[token] + + @property + def unk_token(self) -> str: + return "" + + @property + def pad_token(self) -> str: + return "" + + @property + def pad_token_id(self): + return self.get_command("") + + @property + def eos_token(self) -> str: + return "" + + @property + def eos_token_id(self): + return self.get_command("") + + @property + def vocab_size(self): + return self.tokenizer.n_words + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text, **kwargs): + return self.tokenizer.tokenize(text) + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.tokenizer.convert_token_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.tokenizer.convert_id_to_token(index) + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + return self.tokenizer.decode_tokens(tokens) + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, self.vocab_files_names["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def get_prefix_tokens(self): + prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] + return prefix_tokens + + def build_prompt(self, query, history=None): + if history is None: + history = [] + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n\n问:{}\n\n答:{}\n\n".format(i + 1, old_query, response) + prompt += "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query) + return prompt + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + prefix_tokens = self.get_prefix_tokens() + token_ids_0 = prefix_tokens + token_ids_0 + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + # assert self.padding_side == "left" + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * seq_length + + if "position_ids" not in encoded_inputs: + encoded_inputs["position_ids"] = list(range(seq_length)) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if self.padding_side == "left": + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + else: + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = encoded_inputs["position_ids"] + [0] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference + + return encoded_inputs diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..8fa03ec --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57d9fdbdfaa7cd8c0a3a38d7e8de2e6c31374b5dbc4dc4568d85585fe745812f +size 1018370 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..7fa8f74 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,15 @@ +{ + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLMTokenizer", + null + ] + }, + "clean_up_tokenization_spaces": false, + "do_lower_case": false, + "legacy": false, + "model_max_length": 1000000000000000019884624838656, + "padding_side": "left", + "remove_space": false, + "tokenizer_class": "ChatGLMTokenizer" +}