From 160556d4db0074261958bb2d95acd54367c099df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Hru=C5=A1ka?= Date: Sun, 1 Feb 2026 19:47:15 +0100 Subject: [PATCH] Code import --- .gitignore | 7 + 26-02-01_20-36-38_673842166.jpg | Bin 0 -> 25308 bytes README.md | 332 +++++++++++++++ app.py | 78 ++++ convert_diffusers_to_safetensors.py | 53 +++ models.py | 65 +++ poc.py | 57 +++ requirements.txt | 7 + run_poc.sh | 5 + run_web_example.sh | 28 ++ sd_pipeline.py | 278 +++++++++++++ static/style.css | 386 +++++++++++++++++ templates/index.html | 625 ++++++++++++++++++++++++++++ 13 files changed, 1921 insertions(+) create mode 100644 .gitignore create mode 100644 26-02-01_20-36-38_673842166.jpg create mode 100644 README.md create mode 100644 app.py create mode 100755 convert_diffusers_to_safetensors.py create mode 100644 models.py create mode 100644 poc.py create mode 100644 requirements.txt create mode 100755 run_poc.sh create mode 100755 run_web_example.sh create mode 100644 sd_pipeline.py create mode 100644 static/style.css create mode 100644 templates/index.html diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d4de9e0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +/models/ +/lora/ +/.venv/ +.idea +/out +__pycache__ +run_web.sh diff --git a/26-02-01_20-36-38_673842166.jpg b/26-02-01_20-36-38_673842166.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3475e83106bd0924d7ef21e6fa644cb7de5f676 GIT binary patch literal 25308 zcmbSxWmpv9`|eT_BB2Y?oeLC`xxMB`ICf zjfjWe|I~GzPv@LxuK6%C?=>^`%=12R-|xTaf4=}Ux|%wg00II4fZ%om{F?=+0m$y$ zA-zLFMoLObPEJNa3A{&n_bw&l{Rh-QRwi~fRwfn}4xUH+99+WOEGz<2g2JL=U@(}S zUq(S%T>g;+So}XXAs{Cwr@TwaaPJ<2I429I`2Vy0YXi`d5dsJSLQ-qzpgQJtP3)0Km$JfvQbwI@1$an9fqESi7DXD4c8JXyU!XivDwxqPI z_ETMbLt|5O%a8Vs&aUpB-o7#X_{8MY^v{{4<(1X7^^MJ~?Zczvlhd>Fi_5G3;3Bxi z`9JNy1N%R4(ca=BBqk;zCjAdC0z$vrorsp0gj4L!eH9Z@8;=KE;$dV!)x?iA-^sZp zAP01|o}(1>Jm4kX!~a10FJ%9Bz{3B(ko_-U{~Ol~fRc#d_U94N0+awpQBxSJD7g7Q z0I2yPz$|;{1QFw1E&yVftjOQ=FLC&|39@JLE-w26>yi1-Q7fp!Wm7OQ%+%Ps;-_=s zLySeVLYcr6y7|8Ma&59CiZ5CgK8?qy-M>9!>XxR%2KRHi@Gf z{mtR|=k=mjZql9DYE6)6W{~OM0U;i(I1M{ZC4bjkjG~rZagMc*$7TjA$e`{-bui3y zrIBGhn`5EF(8b-U*513U_9GNM=jzi7Y4TLTOMfgy za&6V)!Dv9u6+v&M6y&0&=;vq5mj}9Szg|Aiy!nRlkDNc9UyT=;n2Q_kcZlZSfqUlM zSLmK7uVoO}=kwB|zv)v5y|!x-582m-M3j?%`YVL!Uhd5PqlkAuNS?}>SQ_PikZwu+ z@*lv;dA40(jZjpvkpC2CO_jXqSOZmzsy<9h4C=eT2FwLGcE0$Kfw4cDV~L4jt3UoC zs+X4&p8WTujjpliBI#V!+srqKwpaNl`@sWqSldr);TeXa`J#A_G#G4msui9nMV+-D zV3uqdWd7x8O1Ev6a@*W0y4B$jLQe6aM*gXM#xg`H*EMqqxo_uSojWDh{{e2(w_N@5 zci(CZ?e0t+c>n&EY;DMjG1Wi7{0qVB((};+-2_irDpw)tJtR7i1wI#)7^m@kuKSp2 zdw+?D=fE}ROZ-fzd=vv zN%M^}5sR~{cbsE33%`%cdE9w-xSJWxTmXgu| zMyZ1I13h%&#PrDyFGo?Yfp0@#WHv>eN_P_Cklh27XF%y4e&;gP(#&cXNdh zxs-(cF?zjnN?gl;_|ifWx6(F<*aR_z{3^W*pmH^Lj&a=UB<1F7dd7OKiI*254;5|t zW|P{exv{+vUZco!4U1RS8uND2@!?~5O)5ls(O4h!$IDA{up@a+_h?SLvnX*$M+q?r zo}2tkas9Oz8}-GY^a6b_hlD2O-|PXG9&9S%Y$eYAlIps=$dxH-yWt-6?gD{&`XFxe zp*#)1`$&J>QHbyCZ<%8Lni;u7(~tbz&b_BQraHbeWZiPg=XQw69m4oa=isoFlZqZY zyYSz@z~wqy?TgEEDZ`>SprX)Gy{P5*mx)ir&LbRatIqV8<;6Yk=r+$rQea(1pxO9+4Kg5q!OaXc_5fp0#!RZh!7B1na@pwh!W zu^YJRnL@=xw}p1Px-4rlwc2Fkt|U3H25NKfxhVV8j-^+-rz>k*ldVi$+Nx(2e=9V_ zG^R(Tr%XJ?@guM=hSbBj`mA&=a9jkzMaJM|QeBEJaQ~)Tr$Y-IQ}D;4-Ha*s4Zz1+ z+8W~KKLoK0xf;CUnzm?_Qo$zdD`u3KVdo>ox;@UUK$C{N2$b01Lz)4IZPE8aV28SC z0xI2&lhv7!#Y0;|*AxxI(=xK9jDb`IKL;4!8Hamvj+J(!KR;Ble6g)DUAAh6fuSln ze!*%-#VK+~)N?@gyH9IJ<=^Pmv)iF6@(`cc5rb&1Rn;mD{6b0lmYMC=Qwepe_?j)# zvC=N&xD#ELa9uX65Tjg-fok8Kl=4IvvPpdZ6)`ep^s?mpt3r{Q&!v^R)U9Up^;n4U zbQexw7+QAx;Lnf3v?BpCp5v{B=2Jf+g7^)+>}>Sx4t0UdB%_c0l-XnNe*h_>>XWj` z{aUYhL`aBxy_qkMu!1Wo-~7)}hN6nti$?&p+=;b+0H7jY4x7|yor}u11ly;uTUQ{( z|E*dx3wpg`e=vkn7cRbfZc2CZ58(V)Xv1YaY5g^St{P9Rn3Jyz4|>1-`=Hft57Rp9 zteDq~^F@gtS0>*yOQX0)0wFke5nIc0wX6IdKUT|X^wYui^^l_VX_XftN1p(6(|qDyFORsU zF{k|fFY5&s=?}7Asj;t_?9T+in^&e+y~5^DRf{FpE}USlhLyOl)1~lNDiD>kl532D zrb0JYk@9F_le-bZR=Ay0i{u^I4S4sIZp?h(5lkrcSgf6>J$}=bkobPgL7+SPBu>3n zSxsI0Za+Xa5Ov4?UJ7L zk~?g35ZhdrreR%MJ1Q5kLFq9lkDJsY@XDo_qf-D#;SVI!zxC~zPz4tGm>~4$16M#NMQQtks_o!pH-+&k>5YDu)%DY~PdN3#8G9^T*1c%l z7|TmGpG_Toy7fh^(kNi{H~$NPH<6^eUHpV8i9;?W;+_F!%?u^w+&w8-ezw1=6zdjo z(z*WNc08SWbY54^XQw}leaTpbEgGj}{3V{yADYl%pBs+Ts*@VfYuv0i*7gJ}<192C zI7#P+H<>sX;>To0cx2EOTPms$lI(JiYwKmHD@W3g0+ErLi96J z-Y*cM<vDR}=M;_bOZo=gi+HdBT=(x^BbShwkc*bpa zUs3G}HaZ(^3A}V zqj}{k9(i)oH#lTN+9>tHBRhT>*7%YWN$sSpp9-JJZRfHKmq}p?{{R$Q!q~X0;dM(( zt&ZsIa0Lww_rWJzDOEG1^FbaQTf;AM^U?$2^qgwa|wVIzkA6+K8eb3 zr?|teP<f(>ibjmaT0OD_I=Q};kjO)Zjxhbj06kg>5?$qH6U{r;Lc6C5`%Fm)Q#0YT12YW5fE6PjDbAsnIG2^ zKGK@3)JQ~?vRZsVt;+U~dZC)xagg`Ybe{47V-8$>8zmML3m-MV{J=@{mE|44QWkZoMTciecV*Hx$Y7f&kDli%qGx|a~Io|aKY z(q$#KVC|^-^~1DhSz?0I=^9D$;eYbPSg1C3%$Dk*N7)n86%#THtE(gM7{8j~XPGs_ zbebD#Pc>CnxLUq(xc9E-v1SOJ2e1h+u1hVjCW@=xq(+OpO}Ni$D0^%@>Akt8a&$xY zp~T;ZM^!f_4~h8rRAPYWew$@ZOvr8SasKIP=a^KrWk8jTIk{@YP zu@H=`RIN;n+~%Q1i5lBJ71y9ix3&=5-Q$2ac`p%;t23ByMd?y?$OeH5AgB}o`W8idz+LL8{9Ps+-Wa61#*WDy}~JN)_h!9_uPvz;IC zC(q2=4b%s}ph>CddX-{lGl>mKVB{6Lq-(DyTDNZ_>7pBUgFn32UOM2BC!!c$P0b@h z*|{p*o!t8|Fb{FIM8{C%f1EDSv=_mJ?jTr12djrTu57`gZR7H+1z7z;VC*UBJmnk2K zIfUCXJF%B+W{qFB25Y>YiVBjEta!IR&Dx$Tv@rA2fkEhU@(!+mG`b-sToxuhYt!Yj zIh>Q{SuQI8$Xn2qV>dAqt{4Akmm1CRqk6B^<02&W^YR<&UI$Nn2X@YkmB&#^xNz03 z=Vt2+%lJUrd`^np?z3FAjmJCID5VXh9F0OGzwXp(eZtMQ-O#5(7B6mvgMV!PUYq=V|-^>XCYN&9H``+JsUiTj<*5kL*^~#-f4H?6aE!X-O z)*LF3ICnRLE!%Pm8`f>8KKE%w>>XRQ%LX2ZpUI$9dgt<<;RKr?3pMnb%*a^$k(B+0 zQ)e=hL!cm?#h8;C`KqW}9)fzVUC)K5FEY_rYg>iZCUwI`CkipXrZr`T9n8Wf}d1^mvYtR#ACgIsKDBk*c;cy%ZUR#pO23*Z zn;Binl*^o3S0U~>jW~W59gwR|Pca|jdy~2e(WW!1_kWMt|2kh~sgqseR(?W3+iJ8yWNb_0@PpI4MRmJI@C4n6Fi0h_1=V%cRQ7Ftt@sED~VMDNBhq#xic8WvdbX z?9yk9#l({bb$Bfb&jDq+iCN983GROYGlrS3H~#?9>owqv1D+;@kmX<_=;a&P&58mL z=~2b{pi52M+j@OVXhXLFZ&lU z{KD5H5QCTHUhC4IV8fqXqhBJXTAc(=HLLS9gL=imBGq{rEFalYtye!}d2#%?=!jUH zYVKWESE@c2{gWE0FSMG_PV{^0dE}`uP zopKE>g3&QtG0>VFotbcT!W0w&Vatp%*%ZLAowNmvmw_bdKqy-_QeA)Ghj@fPTS^B2 zvK#|h&YNqRn=!16nB z_zvXp<)Hf4dage(FoGA_BMBPkDng{8)VBl2%aC-ETz_uErusHgc>MA&(( z=mGYq{E2IeH|fReeA-4QG7}cN0!sEOs1-@YQ{z`&W3|F5b{^ZV$ORZfFRKuvT#m7+ zCa%Q|5RzI*zG`YIfvpTi{+6K1WUJ>Z9;ZIRkuk?LRXE$T(3&k})kAEZ$DwTs+urit zJ1~gl3c3j!*NUvvh#pzSsH$vrnws|9q;jOcY$dQ$6;6^VKWOAQt59!&Lz6qr4Roci zs!7Mt_I2I@-A$#b??>IrmpKY9Tx~?Dsq?^!q$|`G-r;zW5-E+b?L@}--R*-;9a_L;E3C5 zY+vVeOK(SKAzb4eLzv7qUXby21H7bzdi4(=UCg-y!Kur^%Js?ViwHC7If^+0O}6y# zGf5fuBpK-hxAehek)Q!9F?hnUscj{9^=I-nI$uRGSi$aOWrs^tT?9rK#8dhn0r02e zI_uPwg?4#B`t%bGw6QYC`vufp6I{nsWZr+B zM6!7=Ai=VxX5HXecV#5)l^hf!AB^lsC=w?0>3S^*JLWtA;+xRb1fe8EmCo)dRH4S& zOV`}TR}(vrQ<)&L*;JqmRYTJ1YXi3#<@|2iKV>pkBl91^Khu${9A!RJt{0!#)|YsH z*6IGU-;G!qXe~GX4={7UxdAw1-I2=ykxH&^d$9b@TBH+3RDQe*7-(l-U_os@G>^(V zRo^%$DVh{vST?^3sB~HEu`GSj^WyZ+$FPLyQx>Le@7?E?&!zenhng>V@Oz5R6N&|O zI0)G)Wouv{>`5$FT$xb(GESQBSS-G;IRiT+>T3N%Q&rmMLD^_SmGbPEe?4kmmGJK8u6&G%(}Nue!$3q6t(^MYrlQg z+TAHU5BkTKL#C%f`-vQPN5z_ZK1z-7q?@X_6(AcQf9`puG%Jn!tMp^)#jmg5yJcRi z98@|4x!qM~@Bf-9^}IHx{K_=)eJr?{?5>}W8+SE@JwL-fY&^>FVXaf3OTH!7y`y*M zC)ciqv-&OXd=gpSv9fyPbx$2uSV>qnIn%5AV?RjG;Rb!Kf~T9~IcT(uwZMr6;14`{ zBALet!WpYX->cBq4%ErZdV}q2kKeIPoNQe#_nvG$lfSqj`3HF0wJc28)W$yr+_Q6| zR{UI_L^Sl2dfvUmbpJie->jFfkc^dRynb_aqWFGjg{E)Jk-)*$YRrMbQ~FPj=DHwj z2hui)aE~2CBZvFOb_Q`Rs|^M=@Ldkd1zW#)@y3FYpM10me}y+?u4K!)m_5eBv6@1;6DIYv2~p&n>3bC@Y|BE zOeqr&Ynh4(5jufMqd?i^*ZA`jED=Q9I1u$plID6)o=I42t!dEXtL)b!+YtS&nXzZ#3!Zdn_y%jnezu;l)lo#;n#eXQWKo;7$r{>oKuZ5Ak zOFEJ~bJaxEotoD+!GfHv9i;q&Y_C~4vuZ+aLa zKyv1(K9RceO3KzH^n+T`i#|P$G{NB4!d0}~98biXyEn{i@oszIhHv;i%roJj(V8y9 z0J|fcRU);x0Cce4kr%-28SYgVv6Xx8S-rZ}`=Lg52YuW394 znsJWy&$gtP9D1~b)>(E_@64~v1&p6H#=+k8ILM={LY9wVS+RX-k}FgGY}ikl=8K;oeK}29H-t$^^|nk`{TZ2Mn`N^r}>TF6Xfo zm$TDyu>ZCWXQn-FH~zxbRYJ!3X*y*L@D6BRnIAtgNm6o`tN2n)jv@7kR?W9}gKiQr z=v6i?QmyjRku-fxEs;kFx~Ska=&f5J<96Ey`y;+w<42a_!z)j42)WohXIeG)v)j7! zZoavDEVOE=i?_DCk13>XJuOE&=5jWC7?{SB5d74XqV4jyFs)Su+J~W6k$W5Yt4V8W zs=IMt11{1dmOs~03q@R+jZ%pge|5je5>OQW+HFg^1szZQBonN=vTH2VC{7Umrd2PW zMJN8OTqHZ(tnmoP3QQDZJ9U0H^zOpW<+qTyF%gZB?OIU(^Ocf{*P1f>PYsyVBU@dt zJBIQe1`4t;AQ*2Tn{eaZAq6zq(IBLngIqzpG5HA?AS7PbnVcSjh(z_{R8=))}| z)&M-sw;Z5f{>hdq0Tv@V{;m+*(UV;NG-d);PCnBV_v5z}=(kl_ie{z8Oo>LvFQ3x1 ze8W^x@H=^EBp4EpRAUod!HGh{in^jn?SHXynRT17jh zCK{FR(EA(2-P>Dg)X)BBf^ES=S&Ctybz0GD&9`SMDpyn6Z(Y5y?JyzZ^&F!F2l(w#dR z;Qlr7e}GSCjrZ>;OA~v7j50J${Gn5ln6JRZVreBv)>brS*H8+7h_3|Pl7@*En^YK4p5v*6Y0BV93E z7eT6V{9w9t>h@!IIediS^Q8Puy3`r05pV-9stf=nUzzoV@bT{1r<^AK{Dz3bVD>)c zxW97M()tI8Pu_ppy*C*bb=SHCN1HfqANBgm^hHmIN1Q7>`J8kIR!wEn68Ek^qhqi* z&koX;#W1dgvv-zSNEk!w+TKs*ONmG7UyX3E>smTpLa1EYx0E9eF~FSCHi{z7ZIDCB zdr1(U7KDF~1ABK$d+3uCq(FT0)pSax@iT>X`ufqb6B#$DPeB90NT_mJ?W*RNCKTYY4IXYDhp45y^_Nh!ECB#o?;?X|Ov zZ%fgD*mW~J*aCA?3nkQ8)|xh@fr4E+MfsF@Hj^2dU^OK%`P|IXCvGOF&>n*MSq0E9 z&|wf(QXCwaS!yp;w%q8pOetp0q(=D4h-1nG`Nd4WQO;#M_O5xfvDcvW-8xTy#Z;1( zFdEkF5tabk)IkVmqF48`u6(lQnE+JX3Yj@&o)ID}<=0BgNx{mR@*QTLIBB+1fu&c< zbOexxJipG0C34LaT<(1w=2(oFsdOnBGst~kQ+D@(_O?Z(YoJjmr*%kixZ!#GZ&R@$ z<;r3~6~g9Vf}rIxvYr*XaSeBDVTPi~AC=Thc z6)~A5;oN`7bEEZsKkr+L@>Q$nDfnhaMI_dxui%GZVM5@&iIvo#IZKx&_a=gbXT0k8!^ftQRcwzsHEGUVEZc` zF`N42XRs^V$!pJY3g~o6Ev7H`uLj&E%xty2I3Nl1fx!HeN=gcL$3RIJpr*64qo<>s z(OT*v7+jhpB2a_@c4#*bYS%OU1;LAn%fk#Ai&3uzMd@SE3R~0&VzH)SjOGM4Q*>#R zt#7&5)p=R(jw$3eY|d#`cF&Y_Ab-Q1HF|DcuwI369m`@&)G4=Pz^-fTLI{MuA43Z+ zC#wFvRbvcx_5KP{zPk2+OWyfAB$q^jUsG!$H!jRuculNs(O`%2;MlsvJ*~!~zfui`sOt zUyB9fc_v0Se)<~cenIVfAARDgJ&`xd{Mo;{p!qj!z4fuKj0&DE5{?_11J~6wQY794 zOvE+;ds0XkcA#vpWCKn4ZSAFp{IABvyB8CO-6#B* z2VP4PuWXrOH8)7(=HQMI+{DyD_y>~26k&Qa)W~o4s$VBLvQN({k$PDXG_fe9Lha_2 z#mPGFt+^*W+J^~JnolB5Nw1*eIaKF*O@S^G>7h!9ZQF74oFi!>cB3CnIB?xjq$y=c zt58AX$TE8Ck-Jg$Vf=h}2<+;yQ8&72@nfvDHKx_k#)A+<+2E;NK`hKdl!x=VZ$bSS zz?Pb*P|2`sNsWOeX7i*);AS}p-!P3u{aOFS$-rS-XTvJ<5!ml6Ju^!l3Rx@liNy#o8+Z zb_)In&`{NQFkO;DlP4r;sPZ1!%qIT@{f03xlBbRSL*4Vn>84PM1z$FN`@?@-anhU+e1LNkb#0P$ttO zVwad=l;s%1-n^?4Y;hF<#RtkPY<@~J0CF<>Ie!MR9dqF0*>u8Xa z&)EmEJimcE)Z=D3VT0be_rKB1*O%v|-Z%{7oJ_2kjnjqWwPG_-EGw*KM?zvfWunhZ zPIgV(QwaR&h5~rQn+XdYdcY^iuSzfpTv1ATj2p^7peX} z9xgG3a@Kgv^9gWA!UAEMd(1b<@*p=A?arZgbUgr?udL_&{65~D)E1N&(p%@v^qk4J z*o=p$Gl25mpXyuAxP6Cy%7dq$_8}1?YwU~Zi!Zn7J(Nb8#8A2%A;FaKy4Q-kp8wTy zNgq&xN&?P#_#_Ktvrfx?R-Q2x^z0?e_ZOym#iZ*;D$fhY(e9=)6O0RktR{EA4;~yZU+7pF;Kge5Rp&l^}Sp<{qt^GsjE;rc3thh1_<=3Y)rAfMS=iVzWZSCgy0GJQD*flhKAUxlX4D zzpgF8WVT@4HaXA-1B}2)LuuAZwWJ5t6}DAi#GzyBnc3c$K~YvTd9h=-Gfj+t$@D4q zfr(hYTH&s#-4)zU|nYNtgGrMlJtWVkK*(yA8P?Sxu9S4S> z)L3;DN(<7#lp!u&26xhV#sKmZ<*z=E#}8E`CK`YQhz3tTtI@URcGYq-2JF}WRT`}C zVSKUNWXFgyz4h0vx0P`&T?4Uk8{l^-e}DTH$)iUGo-O4E_c8B{{sRba67m6OUT^8| z!<7B7-Q4XU?e|nz4!l5-#Q3#B1s}$?l7al;`)6uaOmrSQ)?!85o?Cw9VEa^g*(=nk zhy(<6MvUeornNkd7gC^*lu6B9@1vR^!Q1#>BehqDp(cHwcO<;-oveP%J8$<6dEaXG zdBl29@8=+>E1;~V?ON!g_Ss449cJzrzw!^fa&p;0zEd`#7NvKnEs6dLIrbZUOHEDN zkbAFMcn!L4@l1cra4mSi`F4d>j_Fe|9oI+$6Yuqygyaqh!w{X;vmY&g`M*#4F}PIm zC5FyNZ~|7gP#e!%j^?tnMORI~=9ScW0y1UF3^=m0;#+=ur{&U(Ayg3))Ppf%{MgC* zHN0?sy{Hh=d#e5!r9GoenWq)?lcLpy4dFDxvC}U#k%Qi8Z@gg= zDwzbGM)X{k*&a_ZHZoQZ@4kf$C=-=dWrYY_e7d>+@_umIME~^9WEOii)?4cR8^-Sq zP~O{3gk2ra`wyU#;{is!nfSPSD!E(|{Q{Fka(D^v^byP2r1~*7>-=YTvfM(`-hq7q z8TXWO%zkyf-n&1j!Q)&L6h(g-)I_WFMH%P%gxnn!OyXe0E@!Mgl~5m?)emqYFb$jj zcGo>Scf!e-8TY>4q?$O?DB#;G*pKC6i-$m4nGH$CUMyR2MsPV-*MpOyUA={*FcSXb z{4usbJWb(3wh{*HgAFt$MEPOBA>gGQgZnZr|7nRjAp}3KO9mMVG9tk5(vlhOgfcDs zMe>z06(ETf9}+<<9%F?;P}Ex z9pGE8gx+LQ`i{2U=dt*%tXY(`>!Cc zn7EW)P95X&5{|R7meNMpJvQJHOyp zvU^XNENXuAl=0-JT9@bB>)z?{-X)>@{4XeKOOoP#td8@SaOJa#HJ-sGLyz(Ox?KqK zrkdu8!c-HE=I2K1>(q)Piyk&UQ2SWRppey5?YI?+c_LrQi0CQCMX!Qh*3wO{L9WpL zCKi0EHaDvvS1*9qzg+aE{D9(fg3}VaUol>YvloEj4TOmPs<0xn1m$H!Jo0DF!BlE} z!~=-=5M%faJ7Uv~h0-*do3T&3>Q1gLQLCkPHE5C8v21=`!}YF?P+3__l7Sqj`U_aXOMYLSQTr5Bj8^`rWt#$1( zru}xDS$hrh+xNR^?Kc+c67lEnk#%_S6rM(D|K9_Fdcpqyy3_G)n2&bnHDd+ZeL+V) zJA>j@=Q12Y;h9gE?ceqsKY3jmElyMTjV~j=j4XWPJytek*usT+u+p8`b{^0EtPk<~(!bbkECe^TYG2wVj>-#qh4hQ`fnS|j1`XcO zZK9pt(+m743RK<%RF)f#(gED^B;C55SnPNx3}oD0*vy^d9KdwWRHy6BZZCDr{p<;5 zTDfkHn3?_jIAN4YM!H8LFFOm3#~YN3W;tcv>@ykof@3~sOd)Jyqhmto5Tbpv+A_G# zmUr$kbKWz&F>g zQumm}tM{k`%^x_qKWwJ6<9NM-#T*6vC3)@JxP0l; zJVYM%)2J_V^$&XERH)ZUQ$lyB!^aLiP#3qn^rJ17J3s)#{|ZZ-^K-1TxUvV3?+)v6qk58Qtd2GASnAH*{S;&blx~N1d)HCv=K>(}>x+zQo}n=2m`*;r=p}g+}8PqV_=%>JB!B=tQa58^+G^wySq<>4j>J6`Gdp zuL9Udrc%I~qqz&tGcw$A2q^}RKd38a-SP9Xn2E;^FSxop8;X}%)2<30_lmM8{Snz# zxQZXEZyjP(OWpPt5L;!#e>jW1$uE1N@zh$cv*_^rIB zpfh9PScwWsP4C7-J6M30*^ll43}+%00C<2iH=M8n{qS9HiW?gb_Ah@mA-%n73I8bm z+Y+A20yp38OTL`AuPq0OtD74eV`#=grQ5(u%H3Xiw{0R?Sww_CB<0&sD1AEG@B~8e zCJ(2C-_ggM=aa6Z_vpM9t51tW!g`mZp1Tyy%}aEWXiD96_2S-hzfQI?Ot{Z-=WgR^ zMboIAansf&NHaCX#!$|~jP-87ruWf~?~@2*!@2_9Yf1QNdGtHZI%%cF=l7m>gx}{) z2wMGRrdIU5HO!-T3;orxXYE!~(kr~cc1XO|S#5We<(Mwsv$f#Sb@EzMHDe=SPmbeQGGyGEH%6z$-mCT$vTyk1ul#UATPy9L} zxGVLs>|?AP&+>=3AD_~|PDIIrjNOwTeZiR;QCx9ae98>Icj_zjeOF&2fle3x72rH% zsXRPtZ;G23(p&+l*_8Dye3(Yg&0micK1;93sI)*2Hi&UK7I3o*g*33T5xpmxH#!;g zZa}mHnz@g0vj)!tR^frQoU#c4EWYvOwFYZFY%-+xSVGIGOC;yFT0);UfiHLK2CV>} z!=-G_pA|kHiPJEPSAJ3?iRraxr8hL%(9yt!!?_hs9H`m+71`-s-ni7mtxV_Nc+bm! z|J(jofqL=_g?28ZwG3^NI`32S=cO!O&x~g3YG`;MOwH76L5d%qp$yFQl+g){-Yn#Y z*IA3-0^FWKb0_76Q7&{sU8A451x=yhWUEXzY^fZgD8m!%mbO+&F_DWL{__~S;R#^I z@MLA*;4EJ?E)ZW@w=@}c*AO^rNSHwlJT0aOz^m7)Xx!Cgrasux@RXV)nAvHOfDKXC$77x%%ITh33npn(gX>21 z^hur2&yG8vq=Vndre)>|dTHBrbK@SfrHo!eHYP(^?g8i4W5)r%|BP*rE@W5$%OrY> z`cX|Mkl%MrV+89w;*sSv-7hf~Q4&u8%TZ~))!HkqMkJLh&3|BL4ej6Dk((-s-7AP9 zwRF{*22v_5!`unaq1CQ-;y*CxtENt^yKc_|)^pzawgls86hMCm2Xq)7DPK(qIt8z# z5+6>RreQG}%dEUOhct7YHQvaxT6U8gI+sjc+4&DXl~3Cf*9EypCe>V{y#!&;H9RAh zH1nNoyq1|WIfn^6$^W{(F*=aObIiK=h`Ham%%dOm=?b#EK^2hkRdG+uUNPb#=4 zTNatk`yD5%1OVBUS^=x%y(Nx`(}@;wF27K3p_lvGA3Mek2BmXdNWtXED(+5+cGlt^ z$uo_12WM!)Olv>)iy(^OT0$8hyY7_Jk5_M4#-7y3`dg@{1l=wHofUfYCZja@2a{*~ z{B|QMm_g``ZYl7$^ze!)WH)Rl85O`-&->|7`%+ZG&Hhun-y`vdIPD-MP>I+=Vcxli zdHC8pGI`V7oKjhBV7&r)(EOnGi^m^d+$TG|(0G^hKJH5SrdT_KA!fmAroF%ysZlfZO*bfO}ANq0-uX_dUUSLt= zJ;2XuDmO(>nrB>{D1DS%mc@o%s}vLN+v7ErQ0!SJJCNmbm4OB)VgKS~Od^w%z&8B7 zaXT?A|rMF`xJZyflU3yW`S!yC@ICDk!Ky$aYVfDaJ#u z398CB4E~m?){-KHwB;${%WOd-G9n!ZRlBkWZ`)DQkLBd>pDajH$Q#E4Z_)vQoxC9MO zQW1^ql31vUl`&h0GiB-N-&ZESEn*+PNTl5DDdsNO{4(0to>9eGS8GRfT+Y=?l>KrTXQd8=<(OiCZU>_$W@^dUTVJFbD`vm7K zqWZjEadM6zNz62gRn7-j$V@9pmQNJcwhyTW_*{(%T!nyRsJSYJR#@UT)=C17q)YW* z{EC4k&Sp~yFo8M-9EN1sPNz2Up9D;mBJEThlK%lFaajysif`2fKD5-A{C4b$N~G`E zFpf||`%*H!)b+`kNnNsbNzNTZ@nF){r$fo^-JFN2i@wP(McKEp8=ERs!ox47iZ6Cd zf01rOF_Dj3}sUndm0*_}uI8nye_N|pAQb|&dI{QjN! z%=XFNBeObqe2RI^<~0i}QLGI*`w`oxv-#v)Z0PfHqE)cU;Iq1n_l#SX^yxe!b29P8 zGOXpr^`C{8RYp#MVTIrDm4{(1ErEKmeKMqyKb#M&7oN|~HY{FlE`n~Hs+Q2Z!gcSp zzoR$PZhor)Linq;5MB43=nx531|lo+v_@vY>EBD1_7eX9Upgi_!fB+Vl|bJdhy37l zy(Atx1_p24p5L=Rvik?fb-jekZ4ztq#n`d?Q#6&+3i5g7avl`FFIVr{o4nPh7KliX ztE9#iY4il0#wy9I`#E~9cgEKieZ(sbXMTp6&dm9?m8Eb^7M1F}2^YrGm%1g)gqcp0)^mM1I;1 z>I%f`QT~}~+1cUd{A6jzEw)FGBF7HD4F9P8$2pLA-~CL$}Ic0F}D zze>rqip`!9meYK@vIALgVam;nZ`?YkZ#z#CC*2tPnt8CEh~1*sdFke!2#U-22av(_ ze-P5!WD<(vPJ6=O;6TiT)!|YoZ$A9XSoQ31@rTMw7IgZ1np8#?;j5Lu!v|ID9OZN% zHKN<>Wa=*B?!w6T=*?f4V-{L}Odw=i3Myg1{|_*sZ_2ya32toFYDqxbvLG@L$v)r) zXF_jD8^wAnkhf%)Fa3%FXbx@w8oy;eA3WBYslZw{m(?)UV&hzrbf6Z>=MrR7&DUjg zB-5an?32nuJ^bVh>tMWC%O5zer$voO9qmj9EvdpVP6pE^KfX?65a?}^E^nG9A?}{> zjawd`!9MWz!Bnd(B<6``A;T)V%3Z!0A%E~12!6UqyZFq$04Z|ns}a}5y2{Grg` z(+Jgo=QQe6d(&Y=QpZ#WswEPn$yAh59l4$yRg%42CJh&oMGG;FnfdX(lRpqb4sifI zP(uy+)ywq-moSf+VzNp7`X-LfJNj2J&d*M0w1A#y4^F>Zld-JB!#?hw9_3Ra)Mp@3 z94Lvmw#8!g2sBRrF%Q)UwwL6_yPja%T@Q?>}TioxWQ5WmWLNq6E^vJ(bmgH4kSa% zPn*I0Sv^7T8tRBS8JrZ1#!A)kxni?k%;sPIsB`_a6z3dfm9X5KFRtT>`7X!*;_##_ zoxE<3RfxYCw^zv_UF0iWEVO~6(pu3W?}=$3KgE{vWt3PRnl6Pt9vq+e?c~_&W)s_- z=ot`(pehv(u@Df7%P1RtG@83!Y_jOv=X24)vaO6Dsw&wWc?|yvU$pt2-RfL zf+_1g2qlr@*hu%lVJax-N3CYm+W7e;$NU3T|H$`?{4`6JA(Mgs13b`iau*pP^xP=D ziI}O!70q&GpBIRkH9x@dvq7use^E@lom9wjq*<8w2>eC4-a^H7by}Km2w!x~s95Nsr)-kLzKdzUob1mtG`zBfW~!_9L==xx$#pJu5|B z2Nb!kB@JLY<(s2r&w{tOSjgXe|CSSQ|LbdpLGw6=&2}K#NTnFxe8f`9BQ6i&=0RyY z*o0HJY8nJnuPnZ6_{tM`dQa|!YyxA?*5glsf2M}hC;eo7RvXfd>!2OJwW6y`>nX1d zqk6SJD4xvWb+!!n5`EcQ5*on+V-NJzq6ayZaC|NTv~Gd0&DSOL#k#9bPm!{XjAb4Z zCAT~)RMla6xC&9%EKl_{)lhM|vRoNY5C5ay9>0XUFX7&Pf>Y{$rf72fh74!0p@5vV zhzZIbr;PoJH%*NShff~U?q!WUyf9Vd~=gysgjO@d$f2|zhM2i!;^EVcXMLT69b9pkn znY+0+1LA)a7$f~}&PX6;d}o}pA~k^7o{`3L`^)s>Z6yRsPjGjLS6@(GHoNrN)sx{! zJis)vPOX~Z_WN=Cp@siWo>us5kzHrZc0Dnjj3wH#m|~*zEDe;`;P;QdG{WOAgM}pB z=6S_>8MUk_5_?KsIgi31)8;lZJvg$0t^XF3A#2`|O@b_AHFZ}4nnH2}N|bR?=M@Fb z;zJuVuvHwAX_77F+*Yu>(ze*qa!q-jt*FbYUbV!$DB?8!>ej}ushvr@pbwA_L;Oe9 zvLv)2a-5G&^>L3-SjmjG5K)sj9^r%GI2ruDex{$RCQvL zvLqvla1^-BJy5a0>*-sZ_XJjU3ZRls03GY1u(1}2z3^pW&$qQEo2QF)637RddJpc^ z=A|;E)pPl7(e9W z-n)Bigp%C~oa9nsCkjEQimI0mZXPy35PjAMtyx2nQZXisOA{gPD~p;J7Y>X$Q-fW> ztW8?jETV8YZX4@PEE$mudkC#)r4c|__7$6vx{Jy7T96MWtVJe&wcW_fdhk7|VKV9S z2&Va>ZvBlyk>U;8wy`-Ctpcv2VdcNyUjBxJHZp53EQzA~hmb{Fx4IHWBbaCFRrJf9 zuJ$(_LBP#emej`FbnIvlvRg8I(R|76)KdhVh^NOR$KQ&w408SC8TF(DcJ9jK9S>^r z&x`&FT~AE@+p&xNGT46bDCR`}01;lB98sN8oFdJu8{3J2R>->f=eI0*um(lSRV72YOw->EHuS z3q{3ZM5)kprXm`iIL{ncDb!0Cs}bg-ME0u@$4w$zE!srWOp&kg99MAwFhG$n01Q_z zpvDfP9!CIIY2>SnujN}LX4u&0Vcx8Xl&@^nQyCqpFn#KiA)(VueQK)`d(_1Vr4ax! zYK3C4P2H*qaHFnK*!d!SJI+^Jh)YT1z1%IGHIe% z?h$?I*8II!W-NHC)=-yWRFifG{dS#V4qfv5_sSTO2na^kLA{Eg%O1x7sz9GLE@Z*w-nqCC-BN$ps+`mK$_vfJK6DF+8@3W*~~$mbY55}4#%FyfbX zYkN?#it#cCSpfN3xycqwc?@!{7=FJ>=9Fx9LU$j#J?hjd0%y6ZYIh&4Uju4^=}wv^ zqtJ4xCb#sv0E#Xo8DsZHr@eE@du0{e5KB4-mBOm$l1+AsKu`xvR;}@UjuLUS-6on=90qjPY49&s@_`|EO>Ip;Nv8bSz%hX5*wkv8o#C6#y z1bfpZm^U*Cr1O{LkbbqTVXMa~2(EZx+|*Z^Y%>D(=V|n;cea)%qsO{A*G>$v^r*J=i-n8J6-=?>qzmm*gsf6ExWzy!bY7fNDE6t7j8j52v>5Y#ryT`!l1U}I`H9N+ z6^Y^H%6_QZ%}jCU18 z8)b3Yp^%&!iVbGg;B!w_4N$0QTLYz8SU|_E3&QOuFe8$~JyjfM)I^p18m4tb+FrD2?w zp`)7_ws^DY%7q<6BXB{iOT9g8vpS?`3o|jm1J=B6#(puAT6eJ1nWNLe z!#Lp0{tNu*O5uAIykp|wsL1+en&esoiDqB$VMMO z#ydCo*One?OG~-zF5|kka+0CX*Vy|~lx~L^S)T9a1{C;PEsWPSpzF}01jq3=V%9!)9)^Hsxd%&5Uq8JJx(}znZfYzHDHQ{c0g=E%zY2xd@h-|il@c(L8Vst_S& zR_o4dNJ2@?QU*-}HagVJ5OK{mD6xu}onsuD1VAm6cB>HE?>GaF=ARTTFU;R6`g+y9 z%NW3<1d#=aHuj}1Sm0CvjE1Zt;8Gb+SclHg?_%q^79XX2aVq(8$ejwYACa%LXHhM@ zso1F>rF?U&L-u`c{tz*;;m7K0Dw`u(Te5-cN~f9+UMUx@DdirFTzygUB@AkyBgT5M{O3{d!| z60R`Zih!R=ZvJ}HOidA7>2~w-2G;cznJumLo=g)KUql4TF z+(dKJBDQp^hq#5@CsyM%3fo6>Cf%dl)|zaA0wRC_z#f#)nL0BJleGPKs}RY!a~u6d zGS1nqTxTp#MXsLTNrD)|T#d&ZMmkoJjm+gFjLQ^9Wh&z*pl%IxvHt+73d%vMlGr5H z@8=YT9By_!K>GILvh>|mE)0uk87v_iQdtgKX3yTq=m({0m5koHoeIa!M{2Z~b=urj zJvv$Lt;N8Je6}ZVI3#DSXeP|zfkyOdWLUEddlv(ztwncrP(t!Znz9WhS}(WO4E?d@EcQH%<4Tv888-76Vh<$06Z zs}X}*F1v+Nl`u~fnkg}W`_yfULc~%tR9vJ+-jxySQZfALhpkCNdQ(k5wKkB;dRD1E zkA9ahoUcg}JUaSUtyx{eePTMvc5oGsJH=RyOCeF!7&`t3nqn@U$vGsG>7LbU(o-Dj^-Y_5vCUY7;f89E zp2wp?HrZRJq+|J4AF4oN)S5xu*JQuyqW=JJe=5b*tvt`06;`@3i?b?d_rluvs~_D> zfPIf+TZS`fsQHQhrsIb8ZpZPiIm02r?Okq-ez*2wDR5*#f)8=)LSvHZYevWMqVCQe zLe-#{e5auuLG=_W;MB>*41{vA>9X0*VvuJC49;sI|^!Uk+3Ql zQKfD=de)pV1sy2>u}6VVX6z_t1A$B(X$+A@RF7);AI7kYe;Vr%`dkzF*ViduDEvr| z%lLt-fyQJ9^sK6EX-nccLqMR=I`^zjl4RqhN;$}JrbgsbxLEX$4~%IMpHgc2vcD!cWh z1c9l_Y8A%88z*6w@TVX9t&V4nAh*@T;&|w9@x-N=fCnU#T(-O76EG5X!?n$-UQVS%w?r{~A~wLjrZz6)n%ZDS4t%G*HqJP+n-zLz3` zzO>utGI*NF&oM&ke;o5kGfQ+mCOB5w-Y8IEV^E{;rQ!0V0w{LQ8igEYnTZp*w_&v9 z3HztF(xIN~bisR67xI+4i~FD-&Yr4dUI^Se8fXiGbx;8~K9v6eG-A2=?H)&Fj#o*C zJ6PlzhAmE8cJl543@GIl&W+^&W~xnh9C5SA>Z%4y6MzMCI`@ohG+SRX;zH6m{oD!5 zzv1ccUMb@L02*rA^1KpD_Ioq`050*K5BtaXzn8UKVA!0x9gmCtDjTV0(tJNABEd;- zM^^O-{{Y2)UbW{q+gX}u{pqcF7Ymx&3dqh+Q3wTUA~0%b&UmKCzSTqw7&QdxkWhcz?&qZp`9G%=&0 z@VAPeMU+}ylJ*XG?qWWp+tgR7S(xq=PZKN0jmpY$4R}M0k6&u{JqJ(z%JA)-q2baY zzy0IM{Y_ytsk9!JBqR6Zo`S5!^6hUa5{>)Ud2%UKd2$plc*X`ODLe;w>9>Ur)fL2HGvxqCZ+%1+bNS6^p% zlb|`|(j}`Ghf2=XEj-A~@sFJcAa@m_4-|)*kU1kISht0xAf5$HHl5|)upjEr%%l2M z=;KrscLC7C5l3o~r0gmtlRQ!bngl!?ntIa(NVJAZy({Ivim~~B5cLE7;HUJjsTRIv z_`aKdAl#qtM2Gp-bv_d5d`AL!q!gzV3g5LByCps7b{&6bMlY2mrWMVfM+D6q>4 z{M5+Y)m1!HsCg8C^-UaOMz{X}cCL1+x={ZBiAA^jf~_94sxlbWUHa9B=~bjW(_kW+ zx2K_dy#D}n6I77)&1q>k6Ug4f`Ovo$JADIBREXTV05hP>xi_;zbtI0fb@fzCRZ?I_UlinxGEbKl= zk8%Ff56D-kX?LZ+bo?-{pS~pN14!}x`bME*xgzBNAK&Ev0OQ%MDeCM*v@(`i?`C_O zsh(-ymUlTP{{XF7nv!ECwJo@aX0x?EO?1gC8YbwET`K@OE09VaS zb}Czy5wO^)$lNNU97$HAU_}CDc?Q!=-Bu<{u5nb@;Ps@&hD(eZr63tKZQC_cQ(4Hg zKxqk~jUWV^R2C|XnzD(pa(dIChXW>wn;j~WwxYROM$1%G<~cOp^$eo+iEJ-5?LOR$ zhn+G#fyw;^dyMV!*O7P{5jTsi2^>VPC*-*Pwd+#y4P!mcptLI}=ZdtQIj9s6YQprW zG$d2SJ`?j0Y9*;6;y`JL>^vbp*{6V>90nhSbh4%maQZa=0A|!7lRRe!=xe=@ZcK{M z##@vmP;x6K_R}9SirH~YC>;u(^vOAbhOXIMvHTXup9yrnGGoNI}r5ssYOP$D6H9NrJiZP)MB2sLM2m*l}}2DcBuzmwIfHY=|}!07Qgk( zRh-pbEB-+CzxB*jl50~T81YqPt1r^3$<~GgSFKyJm&=^{tx{9XFPQPLbW+R@3Omz4 z&tcJAOQKsaIm!xyQH+!|Q^A*BZkqPYerVeu9>Dhe4Q$=RxepcCju|U6fxApfY67`h z*VL_c$WALTrVS>GOs|Y|pi`Z3OkQdY4!^?_;w_ozYp6IQHN|KTwqCvKvW>SWtro^I z+?7uh$)gU3G^2x3brghi*AW;lM2XnfoqTDq4*DjngYPTbx%`=b_G2RVm{j=M{8vDk&5ZLj+~WaaAO4 zYURM%O;B%`;<1sU8;qK@Zw5|CrojPr99ELY<*gBo!Lk?m7mA}ZWYx)=9qT?`O%e!^ zQqoe^Wu1S+n8R!L5+C5UZ`Fl*V{=}2@YsLE!%#kusQ&=RtInJ91+`d~Ebk)E}t3~6@2c8pZ*#aGbS+-R1vbvp%G zb;nU!84xOximfN>KoNt3S~j-Z7kiJG^!BJMVBFqgjui3kYezZnRFIo5Xu6B( znv8P{lqU>5M|$+{l6aOxA(6l!j^NjlS83qwU(80+rf->Y(G%(1e_#IqRc}r7IqFBL zYgDwzCl$=i4%X>i_2eF69xIorq1ikcyrg zMv<~>Rtr%W6$&l8jmaViEyQcmohJgVy6qSOs>nDsqgEj^I(nfeTG5zwsuBVTtR!m^ zF0@h7ijvOf;jjFC>L>o0r}H)FhP?0LvHt)`)PLNM`L%i|up+!iX znqaA7T$(H+O3ZRAe%C1K+*qG#KuhDjl;wbwL;x$D|gzvX|hTNXt0Rx zW!k-KCRq!bp6XJ^gH+D~n9z!Ud0KYFfmxArC23o!VgTw+DGi!dqsxuu?oI_+a3o#B ztwnK&QYgnk){$h`bDxybBv3i00YM?YW&CMCrZM8AT4K>8n?}+1jUXI{bcFU5^UucC z4dZyg?P|Zw*U+$oj1R`XbNI;nyn7=50CEH$&a~1R~!|ne75*ttY<|{#)-nLs9 zy~0anZi1>wW_uG>e9}A8iI$5X)M!H#QK;#(QU~U&NIc6`Vf-YDqD~Dv6LQ+fILWDD znoE0xo)+685XwhmR0H!<#`zS}29H+KH8}NsJttfT=;{5|?@?b`1Cj-B`W~+OMfj7G z1N^e6^!56R^pIn@fn-JujxwZo710@7=9FyCKJM25oK{Q2hv{8?^{(9UT>Q{s$i^wO z369f-$&lAa8N`)=Znz^JwZ5E$KBAbJpq!|ujw(3#T1SurNC_lAEpQ(b!i^?S=S zNI&na{{TwuC7X-`UTyJy>t=zcS_AV+s?tIT=~+rQG>nWggdEdxT9R8T zjN++EstFa7jg3^Ubl(r}Qr}g$wsHZT0bg>Y@%bOpy)+z(@OWmH+TuCkVvl$QtNrivAB7b$eio$EDydOhkb*Bo98eG&G5B^nZ*k!=)RRPUfv~aSnYLP76 zvqFq3O&?=%RbfEL7$L zMAqD~HM^z7zFp^{jQ%x+WM!Jlto589ucvP_rG!^ED)P=@{vg?^5oie!u_) z7!~sm#d&<+h?<8#by0sx`gXor_?r8^BI?-e>Ob+?%BR9>OX4{ndQd$nDOBeb&d#zy z?@p$UDc+f^*`g>>nr>*$YeO;nsdG%t0MTGQcf(J+!`6hH{FtpWYZt;#yTf+<@CdDj zHAsiJ)-?;YNXDVsUGO^4BVR^Sb8S3r&Ilr@UBF?GgIfL_3-;3h=+Tfr3ZGfA`@{;_ zM)A3mm85e$Y4NdE>qgS9MI$ezMT@y{92)C9C$1Y=2A^`@xpVubC+?q5p{^=7_p7ls z;a3-8WUh}~xP(a=u5#|%WQU5I!ur|0hwOJPnd2WTALC!~ty}9*>R1kIWX^W1bI~Rm zwynlvTF)_Uam8rc50;pVVBu<y7|^aoR+d1tlH33dXU#c1>o&_sS?Bbu_ifa!`KEQOWbSH}gstBSYez`c0!YMK z&xL&SQ`puWH1aV})y)&J?(w$u+$s64SHYUWz0uKSe9HmypFPK8`F|?vcq1dFVQU+w zaq4|)RDGmi)qLR7f#QHVy)N)PVOa7jMop}l7021!nB*MR+Ftyln$bI%N;YNmJoTy5 z?_#rKoEFVnid@O?okSZzP1kzdl_T^W$XatUdEtJI;@8RbZ=ECqQ z$81%3MjI6gjf~D|MK+XG2z7j^@fr90Le`I@*!=~4S6?Q0;r#C%Yj4;6Eyv}>WmjWb zKM~K;iYQ*aE1jKakW;^{G=DmEX3bHXN3|stsff9y6x?%4C3W1g<}Jkt8q#WCX*P1lZVtk!IB zO95Qeskb8(N+XP}Qd5JEdbtaG)hGF0r-8dxdm0*+^T};Ib3)~0Ly&9I^sPocTTe-s z0lGT>0Cd-uspFp2zu}EU*=kCKu@v7c4@212k!EvUPP%w7=AR1SVy#Y_hDTbiqXx9T zh9teJyu-a%?NCqBgbd_-d#ZnBcry9bu`SjuV*0mV{B#=dg&bg4qx@Xde9P@ON>lQ~ z8Mu@E<~SdqKb?8V{VS@Hvblubqp14QW{+ynfoMG`DQEzukb2XN7|j4IQf=dkv>zjy zqm8ZYPZD5tri95{<24a{eJQUkS9v<~Nw7$j=Bm<5_zGi5?@&AO^zBgOQbT2wnyBrX zl19x!b6H86q&K}CY51haH6;wBH1a99#UY!X5Wo8#wd6BmE2Y`=Z{hx<@vl;}nB!yi zuMf*3E1@BjN>hr2uCq z6-D2B8tMEoYtNcT0+CNdbf|(GbwSuwN!yCIGjUndHwKzAGwxp*s~R}{{_3N2+|~Uj zh7=Xh>M9wAc^#?yxTwkIp#vhN9cfr|kqE!?0A7j(2iLVhq5s)Wk+G`) literal 0 HcmV?d00001 diff --git a/README.md b/README.md new file mode 100644 index 0000000..e4b1484 --- /dev/null +++ b/README.md @@ -0,0 +1,332 @@ +# Stable Diffusion Python WebUI + +> This project is based on a script from https://github.com/imanslab/poc-uncensored-stable-diffusion/ + +A local web interface for generating images with Stable Diffusion, designed for fast iteration with the model kept loaded in memory. + +This is basically a very simplified reimplementation of Automatic1111 + +--- + +**Warning** A lot of this project was made with claude code because I just wanted to have a working SD webui and Automation1111 refused to work for me. The whole thing was made in two days. This is not aiming to be a maintainable project. It's best to run claude, tell it to read this README and have it make the changes you want. + +I have AMD RX 6600 XT, which is not supported by ROCm, but works fine with `HSA_OVERRIDE_GFX_VERSION=10.3.0` in env. That essentially lies about the GPU you have, and somehow a lie is sufficient here. Welcome to ML python. + +This will work best with NVidia, but you will need some adjustments as I optimized it for my AMD card. + +--- + +## Legal Disclaimer + +THIS GENERATOR IS FOR LOCAL USE ONLY AND MUST NOT BE EXPOSED TO THE INTERNET + +It has no NSFW checker enabled and even if you added it, it is still unsafe. + +- **No Responsibility**: The creators of this project bear no responsibility for how the software is used. +- **An uncensored model has no guardrails**. +- **You are responsible for anything you do with the tool and the model you downloaded**, just as you are responsible for anything you do with any dangerous object. +- **Publishing anything this model generates is the same as publishing it yourself**. +- Ensure compliance with all applicable laws and regulations in your jurisdiction. +- If you generate something illegal, delete it immediately and permanently (not to recycle bin) + - All pictures are saved in the `out/` folder + - Clear browser cache and other relevant caches + +## Prerequisites + +- **Python 3.11+** +- Compatible GPU (default config is for AMD with ROCm) - ROCm must be installed system-wide + +Optional: +- **Git** +- **Git LFS** (Large File Storage) - Required for downloading models. Install from [git-lfs.github.com](https://git-lfs.github.com) + +## Setup + +### Create directories + +- models +- lora (if using) +- out + +these are in gitignore. + +### Install Python dependencies + +```bash +# Create Virtual Environment +python -m venv .venv +source .venv/bin/activate + +# Install Dependencies +pip install -r requirements.txt +``` + +You may need to edit the requirements file to fit your particular setup. + +You also need system-wide install of ROCm and amdgpu.ids - it will complain if they are missing + +### Download a model + +Models go in the `models/` directory. + +You can use any Stable Diffusion 1.5 or SDXL model in diffusers format, `.safetensors`, or `.ckpt` format. + +The simplest way is to download them from civitai.com (safetensors format) + +The project also supports diffusers format from Huggingface.co, example: + +```bash +mkdir -p models +cd models +git lfs install +git clone https://huggingface.co/stablediffusionapi/real-amateur-nudes +cd .. +``` + +## Configuration + +### Model Configuration + +Use environment variables to configure which model to load: + +| Variable | Values | Default | Description | +|----------|--------|---------|-------------| +| `SD_MODEL_PATH` | path | `./models/real-amateur-nudes` | Path to model file or directory | +| `SD_MODEL_TYPE` | `sd15`, `sdxl` | `sd15` | Model architecture type | +| `SD_LOW_VRAM` | `1`, `true`, `yes` | disabled | Enable for GPUs with <12GB VRAM | +| `SD_LORA_STACK` | see below | none | LoRA files to load with weights | + +```bash +# SD 1.5 safetensors file +SD_MODEL_PATH=./models/my_sd15_model.safetensors ./run_web.sh + +# SDXL safetensors file +SD_MODEL_TYPE=sdxl SD_MODEL_PATH=./models/my_sdxl_model.safetensors ./run_web.sh + +# SDXL on GPU with <12GB VRAM (slower but works) +SD_MODEL_TYPE=sdxl SD_MODEL_PATH=./models/my_sdxl_model.safetensors SD_LOW_VRAM=1 ./run_web.sh + +# SD 1.5 ckpt checkpoint file +SD_MODEL_PATH=./models/my_model.ckpt ./run_web.sh + +# Diffusers directory (default) +SD_MODEL_PATH=./models/real-amateur-nudes ./run_web.sh +``` + +**Supported formats:** +- `.safetensors` - Single-file safetensors format +- `.ckpt` - Single-file checkpoint format +- Directory - Diffusers pretrained model directory + +### LoRA Configuration + +Load one or more LoRA files using the `SD_LORA_STACK` environment variable. + +Note: I'm not sure if this actually works, in my experience it more degraded the picture quality. + +**Format:** `path/to/lora.safetensors:WEIGHT,path/to/other.safetensors:WEIGHT` + +- Paths are comma-separated +- Weight is optional (defaults to 1.0) +- Weight range: 0.0 to 1.0+ (higher values = stronger effect) + +```bash +# Single LoRA with default weight (1.0) +SD_LORA_STACK=./loras/style.safetensors ./run_web.sh + +# Single LoRA with custom weight +SD_LORA_STACK=./loras/style.safetensors:0.8 ./run_web.sh + +# Multiple LoRAs stacked +SD_LORA_STACK=./loras/style.safetensors:0.7,./loras/character.safetensors:0.5 ./run_web.sh +``` + +LoRAs are loaded at startup and applied to all generations. Make sure your LoRAs are compatible with your base model type (SD 1.5 LoRAs for SD 1.5 models, SDXL LoRAs for SDXL models). + +### Frontend + +Frontend constants in `templates/index.html`, normally this does not need changing. + +```javascript +const CONFIG = { + GUIDANCE_MIN: 1, + GUIDANCE_MAX: 20, + GUIDANCE_SPREAD: 2.5, // Range spread when syncing to slider + STEPS_MIN: 1, + STEPS_MAX: 100, + STEPS_SPREAD: 15, // Range spread when syncing to slider + DEFAULT_TIME_ESTIMATE: 20 // Seconds, for first image progress bar +}; +``` + +## Run the POC to verify config + +Run `run_poc.sh` (modify it as needed). With the real-amateur-nudes model and default settings, it produces this picture: + +![26-02-01_20-36-38_673842166.jpg](26-02-01_20-36-38_673842166.jpg) + + +## Run the Server + +1. Copy `run_web_example.sh` to `run_web.sh` and customize the env vars to fit your needs - choice of mode, special options for your GPU etc. + +2. Start the server + +```bash +./run_web.sh +``` + +Or manually: +```bash +source .venv/bin/activate +python app.py +``` + +Open http://localhost:5000 in your browser. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Browser │ +│ ┌─────────────────────────────────────────────────────┐ │ +│ │ index.html + style.css │ │ +│ │ - Form controls for generation parameters │ │ +│ │ - Real-time progress bar with ETA │ │ +│ │ - Streaming image display via SSE │ │ +│ └─────────────────────────────────────────────────────┘ │ +└─────────────────────────┬───────────────────────────────────┘ + │ HTTP + Server-Sent Events +┌─────────────────────────▼───────────────────────────────────┐ +│ Flask Server (app.py) │ +│ - GET / → Serve UI │ +│ - POST /generate → Stream generated images via SSE │ +│ - GET /out/ → Serve saved images │ +└─────────────────────────┬───────────────────────────────────┘ + │ +┌─────────────────────────▼───────────────────────────────────┐ +│ Pipeline Manager (sd_pipeline.py) │ +│ - Singleton pattern keeps model in GPU memory │ +│ - Thread-safe generation with locking │ +│ - Yields images one-by-one for streaming │ +└─────────────────────────┬───────────────────────────────────┘ + │ +┌─────────────────────────▼───────────────────────────────────┐ +│ Stable Diffusion Model │ +│ - Loaded once at startup │ +│ - Persists between requests for fast regeneration │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Technologies + +| Component | Technology | Purpose | +|--------------|--------------------------|-------------------------------------| +| Backend | Flask | Lightweight Python web framework | +| Frontend | Vanilla HTML/CSS/JS | No build step, minimal dependencies | +| ML Framework | PyTorch + Diffusers | Stable Diffusion inference | +| Streaming | Server-Sent Events (SSE) | Real-time image delivery | +| GPU | CUDA/ROCm | Hardware acceleration | + +## File Structure + +``` +poc-uncensored-stable-diffusion/ +├── app.py # Flask application with routes +├── sd_pipeline.py # Singleton pipeline manager +├── models.py # Data classes (GenerationOptions, ImageResult, etc.) +├── templates/ +│ └── index.html # Main UI with embedded JS +├── static/ +│ └── style.css # Styling with dark theme +├── run_web.sh # Startup script with env vars +├── requirements.txt # Python dependencies +├── out/ # Generated images output +└── models/ # Model files (SD 1.5, SDXL) +``` + +## Features + +### Generation Parameters + +- **Prompt**: Text description for image generation +- **Negative Prompt**: Things to avoid in the image +- **Seed**: Reproducible generation (random button generates 9-digit seed) +- **Steps**: Number of inference steps (1-100, default 20) +- **Guidance Scale**: CFG scale (1-20, default 7.5) +- **Number of Images**: Batch generation (1-10) +- **Quality Keywords**: Optional suffix for enhanced quality + +### Variation Modes + +- **Increment Seed**: Each image in batch gets seed+1 (default on) +- **Vary Guidance**: Sweep guidance scale across a range +- **Vary Steps**: Sweep step count across a range + +When vary modes are enabled, the corresponding slider hides and low/high range inputs appear. Range inputs stay synchronized with slider values when vary mode is off. + +### Progress Indication + +- Spinner animation during generation +- Progress bar with percentage and ETA countdown +- First image assumes 20s estimate, subsequent images use measured time +- After 90%, progress slows asymptotically until image arrives +- Measured time persists across generations for accurate estimates + +### Streaming Results + +- Images appear immediately as they complete (Server-Sent Events) +- No waiting for entire batch to finish +- Each image card shows: seed, steps, guidance scale, prompt, link to saved file + +### Settings Management + +- **Export**: Download current settings as JSON file +- **Import**: Load settings from JSON file +- All parameters preserved including vary mode ranges + +### Responsive Layout + +- **Narrow screens**: Stacked layout (form above results) +- **Wide screens (>1200px)**: Side-by-side layout + - Left panel: Fixed-width control form with scrollbar + - Right panel: Scrollable results grid + +### Output Files + +Each generated image saves two files to `out/`: +- `YY-MM-DD_HH-MM-SS_SEED.jpg` - The image +- `YY-MM-DD_HH-MM-SS_SEED.json` - Metadata file in a format that can be imported to the web UI to re-apply the settings. + +## API + +### POST /generate + +Request (JSON): +```json +{ + "prompt": "your prompt", + "negative_prompt": "things to avoid", + "seed": 12345, + "steps": 20, + "guidance_scale": 7.5, + "count": 1, + "width": 512, + "height": 512, + "add_quality_keywords": true, + "increment_seed": true, + "vary_guidance": false, + "guidance_low": 5.0, + "guidance_high": 12.0, + "vary_steps": false, + "steps_low": 20, + "steps_high": 80 +} +``` + +Response (SSE stream): +``` +data: {"index":1,"total":1,"filename":"...","seed":12345,"steps":20,"guidance_scale":7.5,"width":512,"height":512,"prompt":"...","negative_prompt":"...","full_prompt":"...","url":"/out/...","base64":"data:image/jpeg;base64,..."} + +data: {"done":true} +``` diff --git a/app.py b/app.py new file mode 100644 index 0000000..ccfba37 --- /dev/null +++ b/app.py @@ -0,0 +1,78 @@ +import json +import traceback +from flask import Flask, render_template, request, jsonify, send_from_directory, Response +from sd_pipeline import pipeline +from models import GenerationOptions + +app = Flask(__name__) + + +@app.route("/") +def index(): + return render_template("index.html") + + +@app.route("/generate", methods=["POST"]) +def generate(): + data = request.get_json() + + prompt = data.get("prompt", "") + if not prompt: + return jsonify({"success": False, "error": "Prompt is required"}), 400 + + # Parse and validate seed + seed = data.get("seed") + if seed is not None and seed != "": + seed = int(seed) + else: + seed = None + + # Parse and clamp numeric values + width = data.get("width") + height = data.get("height") + if width: + width = max(256, min(2048, int(width) // 8 * 8)) # must be multiple of 8 + if height: + height = max(256, min(2048, int(height) // 8 * 8)) + + options = GenerationOptions( + prompt=prompt, + negative_prompt=data.get("negative_prompt", ""), + seed=seed, + steps=max(1, min(100, int(data.get("steps", 20)))), + guidance_scale=max(1.0, min(20.0, float(data.get("guidance_scale", 7.5)))), + count=max(1, min(10, int(data.get("count", 1)))), + add_quality_keywords=data.get("add_quality_keywords", True), + increment_seed=data.get("increment_seed", True), + vary_guidance=data.get("vary_guidance", False), + guidance_low=max(1.0, min(20.0, float(data.get("guidance_low", 5.0)))), + guidance_high=max(1.0, min(20.0, float(data.get("guidance_high", 12.0)))), + vary_steps=data.get("vary_steps", False), + steps_low=max(1, min(100, int(data.get("steps_low", 20)))), + steps_high=max(1, min(100, int(data.get("steps_high", 80)))), + width=width, + height=height, + ) + + def generate_events(): + try: + for result in pipeline.generate_stream(options): + yield f"data: {json.dumps(result.to_dict())}\n\n" + yield f"data: {json.dumps({'done': True})}\n\n" + except Exception as e: + traceback.print_exc() + yield f"data: {json.dumps({'error': str(e)})}\n\n" + + return Response(generate_events(), mimetype='text/event-stream') + + +@app.route("/out/") +def serve_image(filename): + return send_from_directory("out", filename) + + +if __name__ == "__main__": + print("Loading model on startup...") + pipeline.load() + print("Starting web server...") + app.run(host="127.0.0.1", port=5000, debug=False, threaded=True) diff --git a/convert_diffusers_to_safetensors.py b/convert_diffusers_to_safetensors.py new file mode 100755 index 0000000..fae2d47 --- /dev/null +++ b/convert_diffusers_to_safetensors.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +"""Convert a diffusers-format model to a single safetensors checkpoint.""" + +import argparse +import torch +from pathlib import Path + + +def convert(model_path: str, output_path: str, half: bool = True): + from diffusers import StableDiffusionPipeline + from safetensors.torch import save_file + + dtype = torch.float16 if half else torch.float32 + print(f"Loading diffusers model from {model_path}...") + pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype) + + state_dict = {} + + print("Converting UNet...") + for k, v in pipe.unet.state_dict().items(): + state_dict[f"model.diffusion_model.{k}"] = v + + print("Converting text encoder...") + for k, v in pipe.text_encoder.state_dict().items(): + state_dict[f"cond_stage_model.transformer.{k}"] = v + + print("Converting VAE...") + for k, v in pipe.vae.state_dict().items(): + state_dict[f"first_stage_model.{k}"] = v + + print(f"Saving to {output_path}...") + save_file(state_dict, output_path) + print("Done!") + + +def main(): + parser = argparse.ArgumentParser(description="Convert diffusers model to safetensors") + parser.add_argument("model_path", help="Path to diffusers model directory") + parser.add_argument("output_path", nargs="?", help="Output safetensors file path (default: model name in SD models dir)") + parser.add_argument("--full", action="store_true", help="Use float32 instead of float16") + args = parser.parse_args() + + model_path = Path(args.model_path) + if args.output_path: + output_path = args.output_path + else: + output_path = f"/var/opt/stable-diffusion-webui/data/models/Stable-diffusion/{model_path.name}.safetensors" + + convert(str(model_path), output_path, half=not args.full) + + +if __name__ == "__main__": + main() diff --git a/models.py b/models.py new file mode 100644 index 0000000..3732721 --- /dev/null +++ b/models.py @@ -0,0 +1,65 @@ +from dataclasses import dataclass, asdict + + +@dataclass +class GenerationOptions: + """Input options for image generation.""" + prompt: str + negative_prompt: str = "" + seed: int | None = None + steps: int = 20 + guidance_scale: float = 7.5 + count: int = 1 + add_quality_keywords: bool = True + increment_seed: bool = True + vary_guidance: bool = False + guidance_low: float = 5.0 + guidance_high: float = 12.0 + vary_steps: bool = False + steps_low: int = 20 + steps_high: int = 80 + width: int | None = None + height: int | None = None + + +@dataclass +class ImageParams: + """Computed parameters for a single image generation.""" + seed: int + steps: int + guidance_scale: float + + +@dataclass +class ImageMetadata: + """Metadata saved alongside each generated image.""" + prompt: str + negative_prompt: str + seed: int + steps: int + guidance_scale: float + width: int + height: int + add_quality_keywords: bool + full_prompt: str = "" + + def to_dict(self) -> dict: + return asdict(self) + + +@dataclass +class ImageResult: + """Result returned for each generated image via SSE.""" + index: int + total: int + filename: str + url: str + base64: str + metadata: ImageMetadata + + def to_dict(self) -> dict: + """Flatten metadata into the result dict for JSON serialization.""" + result = asdict(self) + metadata = result.pop("metadata") + result.update(metadata) + return result diff --git a/poc.py b/poc.py new file mode 100644 index 0000000..f656091 --- /dev/null +++ b/poc.py @@ -0,0 +1,57 @@ +from diffusers import StableDiffusionPipeline +import torch +import random +import datetime + +def random_seed(length): + random.seed() + min = 10**(length-1) + max = 9*min + (min-1) + return random.randint(min, max) + +device_type = "cuda" # Using AMD GPU with ROCm + +def load_model(): + model_id = "./models/real-amateur-nudes" # this is what the original author used, it works with any SD model + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None) + + pipe = pipe.to(device_type) + return pipe + +def generate_image(pipe, prompt, seed=None): + generator = torch.Generator(device=device_type) + + if seed is not None: + generator.manual_seed(seed) + + with torch.no_grad(): + image = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=5, generator=generator).images[0] + return image + +quality_keywords = "Canon50, hyper detail, cinematic lighting, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited" + +def generate(pipe, prompt, seed): + image = generate_image(pipe, prompt, seed) + dt = datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S") + base_file = 'out/%s_%d' % (dt, seed) + image_file = '%s.jpg' % base_file + text_file = '%s.txt' % base_file + + image.save(image_file) + with open(text_file, "w") as file: + file.write("%d\n%s" % (seed, prompt)) + + +def main(): + pipe = load_model() + prompt = "young adult woman, ((shoulder cut dark hair)), blue eyes, no makeup, white blouse, long sleeves, enigmatic smile, %s" % quality_keywords + + seed = 673842166 + generate(pipe, prompt, seed) + + # for count in range(4): + # seed = random_seed(9) + # generate(pipe, prompt, seed) + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..077ca39 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +--extra-index-url https://download.pytorch.org/whl/rocm6.3 +torch==2.9.1+rocm6.3 +torchvision==0.24.1+rocm6.3 +transformers>=4.40.1 +diffusers>=0.27.2 +peft>=0.10.0 +flask>=3.0.0 diff --git a/run_poc.sh b/run_poc.sh new file mode 100755 index 0000000..d21d2b0 --- /dev/null +++ b/run_poc.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +source .venv/bin/activate +export HSA_OVERRIDE_GFX_VERSION=10.3.0 +python poc.py diff --git a/run_web_example.sh b/run_web_example.sh new file mode 100755 index 0000000..c43e465 --- /dev/null +++ b/run_web_example.sh @@ -0,0 +1,28 @@ +#!/bin/bash +cd "$(dirname "$0")" +source .venv/bin/activate + +# Needed for RDNA 2 cards +export HSA_OVERRIDE_GFX_VERSION=10.3.0 + + +# Example use of SD 1.5 model - these are fast and easy on VRAM, but produce only 512px + +#export SD_MODEL_PATH=models/real-amateur-nudes +#export SD_MODEL_TYPE=sd15 + + +# Example use of SD XL model - these can produce 1024px but are slow and demand more VRAM + +#export SD_MODEL_PATH=models/perfectdeliberate_v70.safetensors +export SD_MODEL_TYPE=sdxl +export SD_LOW_VRAM=1 +export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True + + +# Example of LORA stack + +#export SD_LORA_STACK="lora/detailed style xl.safetensors:0.7,lora/perfection style xl.safetensors:0.5" + + +python app.py diff --git a/sd_pipeline.py b/sd_pipeline.py new file mode 100644 index 0000000..b3989b1 --- /dev/null +++ b/sd_pipeline.py @@ -0,0 +1,278 @@ +import threading +import datetime +import random +import base64 +import io +import json +import os +from diffusers import ( + StableDiffusionPipeline, + StableDiffusionXLPipeline, + DPMSolverMultistepScheduler, +) +import torch + +from models import GenerationOptions, ImageParams, ImageMetadata, ImageResult + + +# --- Model Loaders --- +# To add a new model type, create a loader function and register it in MODEL_LOADERS + +def load_sd15(model_path, device, is_single_file): + """Load Stable Diffusion 1.5 model.""" + if is_single_file: + pipe = StableDiffusionPipeline.from_single_file( + model_path, + torch_dtype=torch.float16, + ) + pipe.safety_checker = None + pipe.requires_safety_checker = False + else: + pipe = StableDiffusionPipeline.from_pretrained( + model_path, + torch_dtype=torch.float16, + safety_checker=None, + ) + return pipe + + +def load_sdxl(model_path, device, is_single_file): + """Load Stable Diffusion XL model.""" + if is_single_file: + pipe = StableDiffusionXLPipeline.from_single_file( + model_path, + torch_dtype=torch.float16, + ) + else: + pipe = StableDiffusionXLPipeline.from_pretrained( + model_path, + torch_dtype=torch.float16, + ) + return pipe + + +MODEL_LOADERS = { + "sd15": load_sd15, + "sdxl": load_sdxl, +} + + +# --- Pipeline Manager --- + +class SDPipeline: + _instance = None + _lock = threading.Lock() + + def __new__(cls): + if cls._instance is None: + with cls._lock: + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._initialized = False + return cls._instance + + def __init__(self): + if self._initialized: + return + self._initialized = True + self._generation_lock = threading.Lock() + self.device = "cuda" + self.pipe = None + self.model_path = os.environ.get("SD_MODEL_PATH", "./models/real-amateur-nudes") + self.model_type = os.environ.get("SD_MODEL_TYPE", "sd15") + self.low_vram = os.environ.get("SD_LOW_VRAM", "").lower() in ("1", "true", "yes") + self.lora_stack = self._parse_lora_stack(os.environ.get("SD_LORA_STACK", "")) + self.quality_keywords = "hyper detail, cinematic lighting, realistic, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited" + + def _parse_lora_stack(self, lora_env: str) -> list[tuple[str, float]]: + """Parse SD_LORA_STACK env var into list of (path, weight) tuples. + + Format: path/to/lora.safetensors:0.8,path/to/other.safetensors:0.5 + """ + if not lora_env.strip(): + return [] + + result = [] + for entry in lora_env.split(","): + entry = entry.strip() + if not entry: + continue + if ":" in entry: + path, weight_str = entry.rsplit(":", 1) + weight = float(weight_str) + else: + path = entry + weight = 1.0 + result.append((path, weight)) + return result + + def load(self): + """Load the model into GPU memory.""" + if self.pipe is not None: + return + + if not os.path.exists(self.model_path): + raise FileNotFoundError(f"Model not found: {self.model_path}") + + if self.model_type not in MODEL_LOADERS: + available = ", ".join(MODEL_LOADERS.keys()) + raise ValueError(f"Unknown model type '{self.model_type}'. Available: {available}") + + print(f"Loading model ({self.model_type}) from {self.model_path}...") + + is_single_file = self.model_path.endswith((".safetensors", ".ckpt")) + loader = MODEL_LOADERS[self.model_type] + self.pipe = loader(self.model_path, self.device, is_single_file) + + self.pipe.scheduler = DPMSolverMultistepScheduler.from_config( + self.pipe.scheduler.config, + use_karras_sigmas=True, + ) + + if self.low_vram: + self.pipe.enable_sequential_cpu_offload() + self.pipe.vae.enable_slicing() + self.pipe.vae.enable_tiling() + print("Low VRAM mode: enabled sequential CPU offload and VAE slicing/tiling") + else: + self.pipe = self.pipe.to(self.device) + self.pipe.enable_attention_slicing() + + self._load_loras() + + print("Model loaded successfully!") + + def _load_loras(self): + """Load LoRA weights from SD_LORA_STACK configuration.""" + if not self.lora_stack: + return + + adapter_names = [] + adapter_weights = [] + + for i, (path, weight) in enumerate(self.lora_stack): + if not os.path.exists(path): + print(f"Warning: LoRA not found, skipping: {path}") + continue + + adapter_name = f"lora_{i}" + print(f"Loading LoRA: {path} (weight={weight})") + self.pipe.load_lora_weights(path, adapter_name=adapter_name) + adapter_names.append(adapter_name) + adapter_weights.append(weight) + + if adapter_names: + self.pipe.set_adapters(adapter_names, adapter_weights=adapter_weights) + print(f"Loaded {len(adapter_names)} LoRA(s)") + + def generate_stream(self, options: GenerationOptions): + """Generate images and yield results one by one.""" + if self.pipe is None: + self.load() + + seed = options.seed if options.seed is not None else self._random_seed() + + with self._generation_lock: + for i in range(options.count): + params = self._compute_params(options, seed, i) + full_prompt = f"{options.prompt}, {self.quality_keywords}" if options.add_quality_keywords else options.prompt + + image = self._generate_image(full_prompt, options.negative_prompt, params, options.width, options.height) + result = self._save_and_encode(image, options, params, full_prompt, i) + yield result + + def _compute_params(self, options: GenerationOptions, seed: int, index: int) -> ImageParams: + """Compute generation parameters for a single image.""" + current_seed = seed + index if options.increment_seed else seed + + if options.vary_guidance and options.count > 1: + t = index / (options.count - 1) + current_guidance = options.guidance_low + t * (options.guidance_high - options.guidance_low) + else: + current_guidance = options.guidance_scale + + if options.vary_steps and options.count > 1: + t = index / (options.count - 1) + current_steps = int(options.steps_low + t * (options.steps_high - options.steps_low)) + else: + current_steps = options.steps + + return ImageParams( + seed=current_seed, + steps=current_steps, + guidance_scale=current_guidance, + ) + + def _generate_image(self, prompt: str, negative_prompt: str, params: ImageParams, width: int | None, height: int | None): + """Run the diffusion pipeline to generate a single image.""" + if self.low_vram: + torch.cuda.empty_cache() + + gen_device = "cpu" if self.low_vram else self.device + generator = torch.Generator(device=gen_device) + generator.manual_seed(params.seed) + + kwargs = { + "prompt": prompt, + "num_inference_steps": params.steps, + "guidance_scale": params.guidance_scale, + "generator": generator, + } + if negative_prompt: + kwargs["negative_prompt"] = negative_prompt + if width: + kwargs["width"] = width + if height: + kwargs["height"] = height + + with torch.no_grad(): + result = self.pipe(**kwargs) + return result.images[0] + + def _save_and_encode(self, image, options: GenerationOptions, params: ImageParams, full_prompt: str, index: int) -> ImageResult: + """Save image to disk and encode as base64.""" + dt = datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S") + base_file = f"out/{dt}_{params.seed}" + + image.save(f"{base_file}.jpg") + + width = options.width or image.width + height = options.height or image.height + + metadata = ImageMetadata( + prompt=options.prompt, + negative_prompt=options.negative_prompt, + seed=params.seed, + steps=params.steps, + guidance_scale=params.guidance_scale, + width=width, + height=height, + add_quality_keywords=options.add_quality_keywords, + full_prompt=full_prompt, + ) + + with open(f"{base_file}.json", "w") as f: + json.dump(metadata.to_dict(), f, indent=2) + + buffer = io.BytesIO() + image.save(buffer, format="JPEG") + b64_image = base64.b64encode(buffer.getvalue()).decode("utf-8") + + return ImageResult( + index=index + 1, + total=options.count, + filename=f"{dt}_{params.seed}.jpg", + url=f"/out/{dt}_{params.seed}.jpg", + base64=f"data:image/jpeg;base64,{b64_image}", + metadata=metadata, + ) + + def _random_seed(self, length=9): + """Generate a random seed with the specified number of digits.""" + random.seed() + min_val = 10 ** (length - 1) + max_val = 10 ** length - 1 + return random.randint(min_val, max_val) + + +pipeline = SDPipeline() diff --git a/static/style.css b/static/style.css new file mode 100644 index 0000000..70c5b85 --- /dev/null +++ b/static/style.css @@ -0,0 +1,386 @@ +* { + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif; + background-color: #1a1a2e; + color: #eee; + margin: 0; + padding: 20px; + min-height: 100vh; +} + +.container { + max-width: 900px; + margin: 0 auto; +} + +h1 { + text-align: center; + margin-bottom: 20px; + margin-top: 0; + color: #fff; +} + +form { + background: #16213e; + padding: 15px; + border-radius: 10px; + margin-bottom: 20px; +} + +.form-group { + margin-bottom: 15px; +} + +.form-row { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 15px; +} + +label { + display: block; + margin-bottom: 8px; + font-weight: 500; +} + +textarea, input[type="number"] { + width: 100%; + padding: 12px; + border: 1px solid #0f3460; + border-radius: 6px; + background: #1a1a2e; + color: #fff; + font-size: 14px; +} + +textarea { + resize: vertical; + min-height: 80px; +} + +input[type="range"] { + width: 100%; + height: 8px; + -webkit-appearance: none; + background: #0f3460; + border-radius: 4px; + outline: none; +} + +input[type="range"]::-webkit-slider-thumb { + -webkit-appearance: none; + width: 20px; + height: 20px; + background: #e94560; + border-radius: 50%; + cursor: pointer; +} + +input[type="range"]::-moz-range-thumb { + width: 20px; + height: 20px; + background: #e94560; + border-radius: 50%; + cursor: pointer; + border: none; +} + +.seed-input { + display: flex; + gap: 10px; +} + +.seed-input input { + flex: 1; +} + +.seed-input button { + padding: 12px 20px; + background: #0f3460; + border: none; + border-radius: 6px; + color: #fff; + cursor: pointer; + font-size: 14px; +} + +.seed-input button:hover { + background: #1a4a7a; +} + +.checkbox-group label { + display: flex; + align-items: center; + gap: 10px; + cursor: pointer; +} + +.checkbox-group input[type="checkbox"] { + width: 18px; + height: 18px; + cursor: pointer; +} + +.range-inputs { + display: flex; + align-items: center; + gap: 10px; + margin-top: 8px; + margin-left: 28px; +} + +.range-inputs input { + width: 70px; + padding: 8px; + border: 1px solid #0f3460; + border-radius: 6px; + background: #1a1a2e; + color: #fff; + font-size: 14px; +} + +.range-inputs span { + color: #aaa; +} + +button[type="submit"] { + width: 100%; + padding: 15px; + background: #e94560; + border: none; + border-radius: 6px; + color: #fff; + font-size: 16px; + font-weight: 600; + cursor: pointer; + transition: background 0.2s; +} + +button[type="submit"]:hover { + background: #ff6b6b; +} + +button[type="submit"]:disabled { + background: #666; + cursor: not-allowed; +} + +.settings-buttons { + display: flex; + gap: 10px; + margin-bottom: 20px; +} + +.settings-buttons button { + flex: 1; + padding: 10px; + background: #0f3460; + border: none; + border-radius: 6px; + color: #fff; + font-size: 14px; + cursor: pointer; + transition: background 0.2s; +} + +.settings-buttons button:hover { + background: #1a4a7a; +} + +.status { + display: none; + align-items: center; + justify-content: center; + gap: 12px; + text-align: center; + padding: 15px; + border-radius: 6px; + margin-bottom: 20px; +} + +.status.loading, +.status.success, +.status.error { + display: flex; +} + +.status.loading { + background: #0f3460; +} + +.status.success { + background: #1e5128; +} + +.status.error { + background: #7b2d26; +} + +.spinner { + display: none; + width: 20px; + height: 20px; + border: 3px solid rgba(255, 255, 255, 0.3); + border-top-color: #fff; + border-radius: 50%; + animation: spin 0.8s linear infinite; +} + +.status.loading .spinner { + display: block; +} + +@keyframes spin { + to { + transform: rotate(360deg); + } +} + +.progress-container { + display: none; + margin-bottom: 20px; +} + +.progress-bar { + height: 8px; + background: #0f3460; + border-radius: 4px; + overflow: hidden; +} + +.progress-fill { + height: 100%; + background: linear-gradient(90deg, #e94560, #ff6b6b); + border-radius: 4px; + width: 0%; + transition: width 0.1s linear; +} + +.progress-text { + text-align: center; + margin-top: 8px; + font-size: 14px; + color: #aaa; +} + +.results { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(400px, 1fr)); + gap: 20px; +} + +.image-card { + background: #16213e; + border-radius: 10px; + overflow: hidden; +} + +.image-card img { + width: 100%; + height: auto; + display: block; +} + +.image-info { + padding: 15px; +} + +.image-info p { + margin: 8px 0; + font-size: 14px; + word-break: break-word; +} + +.image-info a { + color: #e94560; + text-decoration: none; +} + +.image-info a:hover { + text-decoration: underline; +} + +@media (max-width: 600px) { + .form-row { + grid-template-columns: 1fr; + } + + .results { + grid-template-columns: 1fr; + } +} + +/* Wide screen mode */ +@media (min-width: 1200px) { + body { + padding: 0; + height: 100vh; + overflow: hidden; + } + + .container { + display: flex; + max-width: none; + height: 100vh; + margin: 0; + } + + .panel-left { + width: 600px; + min-width: 600px; + padding: 20px; + overflow-y: auto; + border-right: 1px solid #0f3460; + } + + .panel-left h1 { + font-size: 1.5rem; + margin-bottom: 20px; + } + + .panel-left form { + margin-bottom: 20px; + } + + .panel-left .form-row { + grid-template-columns: 1fr 1fr; + } + + /* Smaller scrollbar for left panel */ + .panel-left::-webkit-scrollbar { + width: 6px; + } + + .panel-left::-webkit-scrollbar-track { + background: #1a1a2e; + } + + .panel-left::-webkit-scrollbar-thumb { + background: #0f3460; + border-radius: 3px; + } + + .panel-left::-webkit-scrollbar-thumb:hover { + background: #1a4a7a; + } + + /* Firefox scrollbar */ + .panel-left { + scrollbar-width: thin; + scrollbar-color: #0f3460 #1a1a2e; + } + + .panel-right { + flex: 1; + padding: 20px; + overflow-y: auto; + background: #12121f; + } + + .panel-right .results { + grid-template-columns: repeat(auto-fill, minmax(400px, 1fr)); + } +} diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..cb11a36 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,625 @@ + + + + + + Stable Diffusion Generator + + + +
+
+

Stable Diffusion Generator

+ +
+
+ + +
+ +
+ + +
+ +
+
+ +
+ + +
+
+ +
+ + +
+
+ +
+
+ + +
+
+ + +
+
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ +
+ +
+ +
+ + +
+ +
+ + +
+ + +
+ +
+ + + +
+ +
+
+ +
+ +
+
+
+
+
+
+
+ +
+
+
+
+ + + +