From 490e2783856c2ee89e7e739701529bb79a778d89 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 15:24:25 -0800 Subject: [PATCH 1/8] [FEAT][SequentialWorkflow][Fix] --- docs/assets/img/reliabilitythrough.png | Bin 0 -> 41362 bytes docs/limits_of_individual_agents.md | 2 + playground/models/azure_openai.py | 10 + .../structs/sequential_workflow_example.py | 0 pyproject.toml | 1 + requirements.txt | 1 + sequential_workflow_with_agents.py | 39 +++ swarms/models/azure_openai_llm.py | 223 ++++++++++++++ swarms/structs/agent.py | 6 +- swarms/structs/base_workflow.py | 85 +++++- swarms/structs/sequential_workflow.py | 276 +++++------------- swarms/utils/loguru_logger.py | 2 +- 12 files changed, 424 insertions(+), 221 deletions(-) create mode 100644 docs/assets/img/reliabilitythrough.png create mode 100644 playground/models/azure_openai.py rename sequential_workflow_example.py => playground/structs/sequential_workflow_example.py (100%) create mode 100644 sequential_workflow_with_agents.py create mode 100644 swarms/models/azure_openai_llm.py diff --git a/docs/assets/img/reliabilitythrough.png b/docs/assets/img/reliabilitythrough.png new file mode 100644 index 0000000000000000000000000000000000000000..91d554809bc2d69c0bfe8891a62c1726cc8c4048 GIT binary patch literal 41362 zcmeFZc{tQ>^gsNzq?Du;+1f0X5JiTl6v>jv7Gp?8mLbd74Mns_3&uKz3fX2b*0GGG zY=g0nB_zu*gRyU8`Mvf1{hsT3{&@a=p6j_jbJf*&Ywr6#=RWs2uk$+Rp0@`2T89sw zJ_rE7VeR{O4FQ0w8UQ$<2e`l|%;E56@E`6M_su;3;L!2CzidE4k^uOS&BIXZ4uESD zo(6y5w7;!)8vyb@9@?_m2LP#(+IMe1_F@=lpY^ z`e-oRKI|><1`uYGQ$2r|^UVRiqi3{IjIf1d7xEV(W576jWX-p?PH#Dl=$D(dl1qMQ zY>Y7lrvhIFZ+0L5?{|D`dust-_l^d${d;3@;=f1#Y2rVd05$O6QTWd({O2D3iwb)h z_%A;Ge+w3Dxb6cKmRL-ZsRz=_%LJ2@^f)6Uga0)uSuOC%ER$KXAxWi>gz)OcFiTH`zP*KCTtcxQwo$#Ur%NuEB(4I1;`ztz56QlPCldH#7I@3N)T&x)>nj2n!eta z8L+uLU6Cuirby|yr$#LkE5G<)=xo67y`j{?A8YZ3-k})pj!70QT$ctd^c6$KA%}Yx z78YjIj-kh{3ilqNVdPvV1xjwROIxB**yX?>H0`jK1-jP3UD7?L zP;D6b@#E$6r3SH}$B4k4t+J;jNy*7x5Yx7{wk+k1`B4qV&HYQm;D9ODz?HmnXAW0p z;XOq&z&)~z4nHDhH8XZSw1xe6upVQ&E8}EN6w{7-q*c0PZ;_xPq*^{H2%4zW9shcv zf-V$l$7NPqRMvW$Ek)RY6ehq|Kwn`{d%kIX%#w+WjFf>(vV)RogOUmPfC_+3G(|}o zi-D$4c4b0f4pRZYB`?!A`<##ICa-=iHuDa-!f-C|n+NB^d`k zdw6y4k+mwg?B^HBH&){Jp>J02Jqljv+1`)NKos@d=d$!CvIb5W#5cZ@s>fu zT1IlK!r-pxtxa%s^zrfWbAJdha%lqPELIOy>AyDj3%|iizm`Dxx{*{Bw2N9`1(<9g z-(ro2VQmdFZ)4YnlM-85E^XxKfRUuA>f$ui15C{3B|@3$U{|+!_Utr`g57m|7%MT` z={WSeq9I+sO?Uk)V{Gz?I3aD>qga)<$P7fQosaSN_4XdSVwEL;2H_ZCbK3QcO8H&G&oU zzuz<3Ys2#I)YR11D7WXzLFGKp8q2anJmL-2RsC0I@R8e$Ya%tC@v)XSLEWFz0+FP za`Y;_i^g|B+u8|`_MK6~GmGW#spN-JKY5fs418fcu*|69hgSI`hH0OuDZaz0^mhUL z9wJIV?R&aypbx5aG%yN#j<8^&;0J%U)Vr@Ux2A2gsC^l>rHEjHw&3M;qT>&t#q?;= z;Iwa-@ZLgU2i}Gqs9t2{*OFA{3{qbKIj0Mgh3+g7iJ~P*Vj4+88cC8GVf4XE<)?zh zPn_1EPkKIhxGjp$&L;L!ptaGe)VLa|u}1L`V+u1!o6T%6ukb@QHBRS1wY=0{&^lt-A*Po!^(7g)qB%w4oCixX!hbv8c%SBuOJDh)X>$Jdt+vRm^2p zs%NeeLA~|Uz+Cx>R#5|K4aj=9nkz2K+S_8qB-#7jrt-&;(6>fBon`uWVhWq?grXLe zBTp{pG}EsCT3YT2h1mv$zqSV2u>k0eraaC=?rJ^PY)oHrwh(#H`eCZUQv@dB)@osT$n` z7xz|W8GF)Lyk_~xB;j4H|fMy z+gVVPJPrpG3%2!Bc>hNbooU_KftTI(t5ijmb1q#o3;E>5{$(idA{aY+IyxS$Fz_T3 zFqo7}9T8Rif!dgeeNQ4Edl9J8=`oB}dv$~9%FPOIA8z<4mMs%z#&wlu9P3A7bbiov zP1HH~@M@g|jd_9+rN60MqR@Km)9<#Tp%r(_Vq??2KsvNl!VV~+Bcs&L%azN#zW%cX z%_$xKp}pmnPTZHobyY7-M|&>~sx_WlME;N?|BA9?L7~MfJb^&42%4)4Kf>Q|LnJOx zbHQG)1Tm)_y%vAwJk17$U6Ny_anpz`1!YLxMt5s!l&T8b;Dh-d$?&BAIN#4w15$S8>t6ezY zmjD0dKjZM(Rnw}wtFwc?rpVx)jWR~$$P#X`0$E+uvDA&Jz>JK6!McsX`@w|C=*oCV zLrkm``9{hN1eo|zpgkh5F>T?h_L1KFu&RLmb>GF!urJiahHRCWy;UV@H2-h2`K@Im z_vo+lNezf$t)Uv9VUwZw-f7j}^-Db>nKWoOW(b2-NYkl|qYluNX(pQivP;`fY8e%m zXpUAay%iIBw%RQG{K{DIur0q*kAL7t4Uzk~i1P_!!)pSbW$^f~n7BHpDGz8@+#9 z^A9SHK$2IPa{-EbQ1YGp&rX%H{0a|R+dcco z5Rb6Zj+PIBRH0#+M1+#kI5b&7iODUNkal$NzNF@>sw#iuE85h^l`4M@@$W5V9jjZy z{@gj)ix5J%xyAHjefv((J3s>WCk7&=-Hg%cA#TlfeC46PGdaq6E?BdOg;-_0;mEU0 z0?vQ=d*zU4UbDb4_q1UII=j3{Z=@1uV~AzMOzy%z8O8!`{CIQ44&>XWBqi>Ua$!ng z&&ICKfOihLY;X&Uqbh!8lp=ah0(t-7sC+|5#kA%0z|jF@9i!^JZWs6G_OIl_evs&} z1LtDD>SS1`C01ehTT!Py60W~6wu04mtL>`OY~V1jtG}usy0W8gvBWB;GXvt|#ad+d z74Ex>K`c(|!B53fA)bmi?eT{ih;2KL@IxzQR^|vvVnrCV9B!UL(XCPosXWK#j8iV> z*%}B_t3gB>Q}bL0ZIl7&4}11~WC~P&iDv`TgT_|> z#L5y}6mNVn=0RD!sQXB~=iS0tVaHYZ{un_%=xU&#qeG#x_rQdzu%mfqXyb;Ar!NV8 zTlyirr&|_bKj3N3l}~m~J`49L5D8pea17X5&+qb$nmC>j&AW9Lu9l`y#aMgRNG#(P z8>~YA(+y)W9J*FksSnpeVm5BZoW&Ixjf|fz&&^~U1k4~{NZ^qY$rgNc^KOP=?4wd{ zld#|9b4*E*GK5O>;(&LmLT3M(oyFV;3D(-fC(`smn1&O%=b@(QNxlB;xzHQ0_u z_cg`_E=xV6PJA2a*4scK`(w4WHsuc&eRrmlWN4Gom~9Q--&4eA!vZ5w5;q)lRNOKd zYMvT6)ph}QzSj0IhlH# zR+D(pOpfYOPHRB!iv*|t{GZzl-dD$JzmHjAalrW$&+)YX(3Z!o|L0pLiH(m>u) zWZk+(@ID4#j=%S17u(jWrxNJ-Q`)q)t>ylLVC^+E)K(uPulY=PBKr2su_yKyHRH-t zHL7sbduYNw!1~f&CN7i~B~fDxN3TrhX=^#XI@;Vbv$0C=$!@BZRrs2cHC*XW-7Npa ze9on0NlPb@^c0@6PwLM9L=Y{F%pk{rI2G`KIG@32a@TBf@@hzDG^$=7r9LK$pun^E zH=w13zYm!iTP{88HKYdPt}jr1Dzu??@KoPDDE{pXxo;?r!JM?_wa0^2h8X1tzO{%B2>+Bx`aZmQ%CTz}@jAVfsJv2$jW5{)?4WrUy z<{9P|C4=-J@chk^IqL;asgX%wNRM9~OhpJ0M79>jw0e5uY|>Ls00OT-LLud~ba@rsNm_4ZYdb4|&Z-ikjzTS*{3OUQCz~QXq$^Fg5 zUa-(Sz4&wcVJ~BOGK7QTT_9Y*_M99O$i_`5j>vlhL8iq$P;^c zuBJNG{)9}|eJ$U?{&OFsjW@P7mWFFu;u0xq9d=@Add{2MWsWV8dXwMCy3KIGpFW$* z4n3Om>CRfG=ieAb52(a|FEXF|twl|BOdV89GRUG-&!tPXFXoj75k$OU~yTfE4{OSNcAiTWr zWMWk~F63g^lj0W<#Ym7+qMngs$?`4?xaVxnaSnXNg8pz~i9n$HZMW=I==t=riX@OxNci?{b#)(;= z1sdxVb04GTx&6DSwcoS)5!ETXn4se-|EtO@bC4MNCQ}Pm>1VvXI_PR_T80DpQ=@%= z+=YK~O8)G-R@mtAosfoTyy`j6$4wgWz|P=jfNDHiyl;cyQtCA5eVl+oDN1FuJmvY^T8X~V8VH0Z=F$GYF`O>=JJ(asO*CjIqMBLrkE7fBi%*c%S%L}zM z;pfwrJM8b4&O0Se2eWU2~;_eP$@7V zQ$#Hp`|hdH@|#XfYE>LB^5U*Z2#nv9UYW8?!h252Aj5j&Mo&vW;;Qo8wt7}?fPEpk zTf&&ZSIiZULb2j`<(9FB-ELA%s7@4P-%5u%QXtD@dwpRHpK{r5g;_7m!v^qyj7eM> zPUxKyMgqYR3!f&+!m0{+#k?Gv%5WHK!F|ISW9lLg1fi7|xN8g*XRCF>&Duz6Hlv@& z!SWT83i=I50>44q$8P0~KsA;cc9&12l$xI-VTq>8s9vKK#Z9RigqYT=8`nl(` zaS7B4?V(=F_XsDi?oS-|6Z);PkvDhf_)3|fo&Anu z_|0+ya*})8)AQz=De|?-B5HxFA4?(Mo)*>F9hlzo+ulSLT}}Wp$h7{JHYZ4&%fYV2 zzKwp*k>)t?ccb!L!mkL>6HNUlgE;Bqm$9u^a9^Ca9;eY(=X=ec7xd+Bh5oDz@ z^SJZ(v9Xd7RQBWrjCMI>OoE`Z>Gd5^7dQGuv)pOX6E$;mj z^Vy&Es}LU^&ke9Cttqg)4*1bbR$7=(avM8VIQko`w^u7Mp)do`TM%?N|p@=R~d(e!WDi6S~0-V#@g z3F?F0*n4-vXFhj`KR@lS!ODzfocQTt3X!G>zdC%?X>_dNe?dKeytw;NBmW2TMg~_NM9FSO5`7)_ci^$&UlmzMVPS*DXS6 zm4XrwJBkycf~_9cUsA9^JLW~m-JNYmXQPN6SIC*yu)$c0HSVk#WVF1u=d*{;TOCZT z@Hoo@EF9&|oJ^nzdzf^bKm;ubdklog;iti(5kJz*Zr`I{!5z$$sUPrqAa@-87p~hf9mWW9q@oea{ z8x6e?J7QyT*9d2>#9kBAgGRTtmrZ*l-)Kl7bsy35mzz9R<~zfADf9helC5mRAm`)q zzP5=HyHq7COIC!;l<72gbw)nD)VjV*jdrn|Z7^9S@5sh1dVDSUBE>q>Ld zK=b6yG|b@m=4ED@!_tyfp+_t@aED*<=0o-E$812(F%XIKn$HHiT*I_zcqw`iBo3Ix zM3zI!qugd=}04ks`27QA_EU~Ww}O;t$;-Y`pJtePFo||gQ)5t?C zmXG2E{#*i`3Sf{U9q5CUXp$&6^3mW_pE!5&FewODg!J_E%%@Ndl2v^Ti|ne|ffty6 zEqMEyb;D+Md+wq2IQzuon|kiWio;zyXSk_okgYF`+^Zx8|9TG5bhXgGM)tYsznbF* zCMxQyzxyAHu7SzU$!gpNm?~uWEyco6f)_%YhhYf&*(?HEaH1LeQ2;QDkaj4AsHF7E zJ~xgPKVllM4a{Vb*@f0doid@UntDB1E48dVX04D~IcN1O)=ss^Fml_Y)n@N!nU`-}X3oH{J=Pc4Oul zAUA#zsD^+s5wN!BCIPruKR5jq+a0UYwWE5ZiABRP*hv9RQZU z+?x6G=g-#*JY=k?Mt+*#RB)wb)?Ken=<{&NR_d6 z#cFqq$pBY+YHsiZB)9OSG-cN`cI2`^Q#P>zN8W|8=|m@fY1n0L(+op1qQhG-XN@n2M+GN z$+cFNe%UCMY!YiU@Umd*N$io6<#dNMm+)hmgVSRUX^7xs4nhK*T@NhS#5+y$IN9Yi z7jg^UUW}f~ky+bWyIig{#A|0*(iL9J*8Ck|eIFb(s`!=o`on!3{!hyCUK-lHV|BfQ zE*Bb>Xk>QJdae|9D&ER0Jj^rUkd_;4=y2JLolPLb-;zySC8zHmxXMABeX|ZiDbC?1 zmosvM3q(xhT-oKgyi3?0c#)p+XAY)1)4ho8insc~F>YJ1*>#!7d9MPtFZ89PMjoCl z*Bs)VFe>RngUiW4q{t%nML)EtN!lwg{TGEm4gA0Adv`4U2Of;6s8&d4WEii^LH9-M zwFZK(Tgt1eS@eZ5N3XdNd=`zeyW?>uZ*1(KKsX3t!B?=mYTy%Nyp=H_Hr|?DPN4N6 zQW;GxjEsDUJTs=tsf9>T=6Sq1+beg*hWNn3ksHWDi)kw)bPWh z$rvd5_wodlIU=idYjREV4%pcTZ5223Tll*<&zO3?Fv{P85wzxMnrnJ8N_#_oUw;=KX209kHjDK_7a7Mm%(G4F?0eofs$Bz1S$vfe;f7C zG-u%IjQo1DE*{Y=9qP@LaTRU7FAz$^6c-+zAf85Dk7Ps=H8}XZ-W9*BDm*!plUR!% zL6uhoWe%FN2}Cf}LFo8fLL{Z^CD^07DB&_VYWhqzzXDy6UZDtZE#Po4CfMeOpJ&|R zx&`EwSrV!O{mpT6uXV~?4w2;F9Epqgr@JESG6C`jmj@m;y<24)+hFyg9wgIQYpqjT zJupJF@xk}Do|L&qCCnf2dg>Cb>xxI8vEnDxHu0+pQU&24)EqU>0Oj8_vECgBQqRT@ zJ!B4L^IFz1c@P|dvjG;LYo6#~5>TI7*7N;t_bYOf3X&=Wt>xbCnpC?yzmpp=K*4)6 zt#+V06{p=18ZtWxx1>|eQ9#1)ywkDyFJ+x5s&o@K;B}?8M=Nl{-hULzkH_Q zK=rK2ets>~NT}$YJo)&IeT4HsNawXwljdz!(0(fq(K0O6+hV`dojgln)b{2YbDu3R z_8je?y`T2Q%#bUEpa)hSh6(mP<|4tZ0akL=t9AyitJ#5z<4n@=S1~&4`v}jt?{d*X z6U^9go`w=L&G_<8w!Ri#9Vztm-;3y{@e$Lrprfri@9vRo{y1>I-Zce3BPAt5K;p|o zrmoA0pYvORNO-aPK#?N;JY{2va&jp1pUs4g5r>CHPr7@Fsq4<7nFCYNMp-7fmlz7m zUt6AD5eWpgEa)wA$8&VysG6L3w)#Iw2IucLc7<9&?R~C)TS(Mzk{EYf9Ukc2N2NFJ z!Y}?zFBI7hGKLrue&15ycv&MbMDXvP-KP`C+S#5+c?Z@!pvTk>-*jI{7iwaBN9fRA zzC2R-@2KQCL+kK2vKfO>V3Csd@`HbY;MYNgn&z(STk;${61H3dEylId8(4gN|Ms8^`0AT z@^9#)Umf;*l^e&|KKH{{mvjKGsH4GGZ%95ql*k3k_RpDV=D}CVYe@5-h<{Y#;$gsV60`t$wklj~Fn?bHA^OqxGRXMy$x#i{;{)yvq zmeV3R6@BwWaLqH$J^xyi+VU8r2Ud9-r4eyq##22HBi-)QOJDIFTxk3CF72q&-~W-K zLvgG5&--qyZOQFgyt%<%z_(G>Ut7(pl!b8}CjFI0E>P6Ldb0HZ*Jh_l4meHZ*|n`> z;51;4+68(-u*-nH&0?sW@i-pOmtOTbC*eaHa&aZG*1{8U2POpxZdB)7wYD7GkYnrQb0 zu$nuuugzF*KIRjA=BA04xs0xRR5EWj<5gaO=Jc7iUx#mL%QJ3}>u^!4l1lDI(hlNV z>;lHeQM0awyWTP4Lu^y}2NYdvWzDX!3FwL|VYo@Sa-X9Cda??PW1(QH!V3A|IDzjK zTUw(Y5?+goQj|K|bwbH~&o=drp?Y}fPV}go@6WI=pW{rF9Oaiz26)ojTP_j(Y~y%u z)=<3y*I#UaWKYF|16{IO15Ui)&!pK3ZBw%*DMf!1KZuyHvF~fkx#VHMM1S ze0!jGSPnSbr+R>tA^-*_i_@*y{1@X9g-wQ!b+f0RSAvXCAdA`W59oXnn$o+oRj7l$ z$!S*6MNF#jyY8@lGTfZ&D)~J}JC@J699#oueDm zH;EHn-RVqRwsHBJ{ya7m27H6)`Lc>3zkA(7P4oZ(PzV)p0AX-|X}VNn=;d1chE9Xi zt7c=6GzQ*(@pBq#bsI>N4!of_%3u9MKl@5JtSE#yq@3@_Ui~7}y6YI~_GeCOx%BLB zXO3r0wOfQy@w*%56MsARN#vcNw%)30J8`?~SS_B%tjWD(TGwZHr+~m_eO5nvHXN33 z8V>WPKMc0vu1P<;_39~iwY9GI+}09hy3baybHfiQD|i0M%e1}=FPc*#vR8h$qxn#F z>6Q?_&azKuq(e-Pg&_(8E>xp2CQAoMr|rR>+AtOykM03QnNj`|dH|Qvu@q@?kG>gJ zERVB{8>ez6d+-7n@~Ky^h;I?6(-raCbi;Z;!Tm{Wlqnv59QErq)HNBry8`Tu!3&P@ z5FEo@p;Wjm7_6^IKceH1FSp6DV{JQ6{MLJXR&&_nzI71;aEyTl3f7e1ffV!s!p?FE zq2~{Iy#g&OI`SIf4f?FeH$bP*Wx9$JW;@&-a}4!M!scA{PyKB2JfkTcIs22B?w>q% zF!<4dnlMlcn6p60#kAwC$>)}0-`B@`Nddnvy=)~4jkN5X*3lj{{XUf3(fJg}=&7i~ z_hmEM6fR!`BD4>H(; zLuIkTVy1z!RJ_OE)1uZT2Wp5%Q8jHxgKHGO=bwDl`cN~AXvT@28Lb=K4Gh+Ab+UV* z^>)$Cc6BaSfSerBM!IP{wZ32b6ku-aqM)nAu1HF;pLcRbz#o{04 zN?c_gOrGPvBXjJf_+am1xNn=@ex&r3Ek{+9C7ZL(an#MzXSX0b>-i8IV}gYgS_Iij zdI}C}q|fzScJA2=PGIcVD}?6yJ{8d88W9IZW8Z%cwRU*(;nVL(Pl2F~rxaQ1jo#PS zxUmJCf7=a%Tsp(9T;DoKKErMyuel!64{bnJBNvw_JvKvL!lGiPrcMhX5FSSPiN7@< z^jJF>+`(PrA_?kT0@S&P2y|p4F6_qH7GkA^fIi{3O><2RY4_$gtA;0+c&vHWm?Jw( zth#9k`7n|_9f=l3{X))iRyT(6lJpB&Y@qyDiS;b8r9Wth^2I2)}C?+kT}Q5 z0==36c#iz0pONorpZ?8&{C0ozWo7Yrs5l>ic|WLi=bAd|w{XBcmw84Tn*u5LmpT7k z@qp9cTrBtxUJp4qDTeD66`@8S~X~C^-Nd#^Ajq0AL?hx9Ktb4D5z;v zhOy~>)`G%qAR~%kN>J8FUMNR= zOprc=ZliF1H1h6b=dpqEt$-a=?~r7f;okZWX*%mMFsjHmB=e;8nGuFlU%5yvzGZdO zfEv_*tVH))g(H2C?F8Oo{B98X&deq9ndB#vBGymJMPGrlpJbQoOaJ{T{gFN&x?fA_ z1FmJ?0~u@g{4eIZ+`ks>$yc|pjGD05ZffqHn|Hi}AG}j1vEEFHTiGYwPZa8x)+jxE z6MopU^ZM31WW)W$L%M%FpdX74lM?KBL7lq|m3f=NsJzNFdJ3xI$W8Q}-~P)#K6&j5iEFq zs&*E1F0oj#4fGX8kQi-nYu#-lNKx4Yp-vLHq>~jDZPQ#P&s-d>n?=ItWP~b{NqXqb zldP&dVm8oLcs_e~_mB1VZalrxIa4}Q-*EC#Tg+{;TcTgi>csl`mQi5fCgK%!HLdF5 z#%IPGI$e<}e245A|PpQX0DW_t(mYcC)oR%`iJ{VM7c6rIZ z1h;8^bg}QrB%f3(Mx|tzWu}ndd(%bJ8m>9l(~+UUP*`$WVY(9D3x#t;HMQWwg}QCy zXOWAOddSU9pR>937;;DlPGGJ zH?F@l#jYi;^-C_FrY~J?|Hp}_d1N*y-1;N2Huu59BM~BxoPbcT)W}Dlt;K7i1*;Ij zzswrkB)Vq{(+~pnZh@8laW(8EH=3+*-N==q*%~x%a49DPi3w`Af&Jz5HvhU%eXH!u8R3QkDRp%s+$drCLa}3JGBD?{Z|)ki`GbXytHvQ z=zdX>uaQW4U4B|6csG!+c~cM$IJY6MSN-W^L6^ zN2xi_FUKh;RHs}kGjo97&6T-YK+Na`E3@{6EJcn2PD|6Z3r@8%=V#d_nN*l;-8Vgm z2CI!z=<*hO-`BcsqeJGRa&;n+gcE-y{BK#gp>K1s%LUI89VE;iY}!ofukR~71B5_3 zIBiQDWPHB%W1Wf=2(wZYxLE#kh*%=YV#t*z*v{!ydcKAn2&|-^ouY<=2(p2aT*L}m zpp1 z^tP>;>6({zd~B0v&YIE3a!y5?H+qOAo!MfCfb1hsU0`>yf6%ipBQ2`9rVGhBVrich z4^!W@%nft~WWXgfbv)|nMGd`l*x|3%9Y4+5SlWQm4w8Z+_t^QgM0a>s``Nx|vMBsv z%b;1op+920^v#X$6VAAhx;pwd!4kmv!TP>su=xNmBO()x?aiYu9zOX>PY{)>qPd#O zxgPt|ECacJ+?>3*-~tx{3xu_@DUx7ux4JX;P+&T5^;Zx+c)o$dZ@GA793&;sEZ2mA zE%F!-)D>n$uz}h(YGqHP+O%taPK#? z(x%bT+1)A^?DgO)WZ|5#kPvz)j-sFKFiST1!OvL-sFtWD_3e$%A;fQs&+6=KkMQzlMHM~-WVTpKALF4zp{f@G8KEs%0Gc!$J%*uOtkA$jrwn~}?lB_e94EDNl-l%mYm3mCrb~G+Z^vX8IVUAnEGAbd0=Q)kuKn)6 zqf;6bv`sN{q`M}zTpE1A#ZlcSf8vkRkj?sf*o5;(25()1hV1ddIux=D^e3yLH?uTP z(}C*W2S~x(;I(kz!HcdB7pUYJJW}TlT)PQLe!#9fUdUS|I%GhBwG<9`!hUg!fS6uP zQQ-cI7d~c~7DSjlc%>M{c6_gpE)7kb*GciX*x)pjtN;&UQ*g5k*VvD@8d_&&AlH#M zzV*XCbQhQ(TyPFab6#sP;F1Cp-afDh0p3FaA<%D2m*O>3f|6i0x-y%Q6SBs1-CAqe z8Nj*)A`QY}l3H3$K`ta*?8Dx}x`1^&C^UMnF#w%~SYIK_o-BHZ?cHtRi9N>QMR5~B ziS#0Shp*JMJBY~4nl5=6c)KJEi;Zk5_ZvE5kjbR2z7XOe1Otg~9o@dSlh`VPNuzUl|L`AvY1_ zjs}l{I~Kop7NuX$XD7Bu++Q!!rqRF)jKJWR(<~1tRo&`1yZBv5z&7zo!ZzLg;?F^0 zReuu^)4Kbgd`L1Mk4d36J&(R&lat@GFfdv8GNFq8CH2T!KNL1p8R%cOH3~7`d|^Yl z7+jFli`(2)_t6%@>wb&8f*`>PV{QKS{1?7S`ZF~9$rv! z&x)&KWfPCcYikwwiS>OUY;Jlmw^<)*@$a#_V;|{xdHq=*8EYPkldVO&=rMQsTIXEp zkQ?=j?*J98^?lO8d-<^var!$L!Uto77y+lwOB2}J6Xvrk;k~Ab+d{8l)u%A&6@eDB zt~bue5J;;Ba|x@AHKdYjX01xpi5FtL$(xT!AvGWtN7PtEz%l}p2w^JUbB2_P}`^|$cQ`d=m z)D&{o$Ti*BLzY|-DU9zPL)bGx#Bhc6gbI83q+6q51<62-ioDz3(9jd^guYV|mwUZ- zyW@Gy>08s_B(>lqj%5b-GbWIu&5nGlGyPsF{5sM7Z1J-(@$5<;Hd{WFfc7x}GDzKt z4p?~)#k7sU_kui?X`(^fn~P;06$4&Y@H6i~ukkesQW(@-EPBaj)%1LkFBl=q(-DJU zns%D_i@2Xnc z5)Z>-ch}umZfa^fG#X)UX>g(7J49PBJ?ZjT48Lrf(&_}+Yam4Op-T{SqzZ#21*+)Z zjBjYUN_-+&@>%hgNLBIwNz6

S}6yEtno6~NWl+FxEh$ zP0tT4mvvU9;lM8okvFhV^07HC_$OuP_mBUx+z;eIqUdHvfj-LwmVxTe-(+Y(t1K1% zj2eZXnYTdt18B_N1nASOV>o_ zK-8*s>_I9e{foL^VgDC>b9TlOjohFU?{_grxoJ^cxlP$?V_Tv@wn3(Wl}t884=Z|27 z!d*Gl*vFjZ!9On)Vb53xA`QX%NEFT6f{}eD;&iq?@JguW$c)_ zYvR2U2s8B^Eh6?fW|n6?{`i>aHm139m492Y%5DIsJ-}6*mV3i3{pK?+#J+|ZrpOCn zd8s%dRa*6pR5C@~dx#37ieXbE#n%qL=ZS<1a(gW@qfcq%+pxwf?O82{E6xp$O?U15 z*epA9sPqZ#6zU$GsWCJtkFXi&a8?PvyqwO=7^c_Z8HX4or9jfoWIV6*@azcdeZ$A{ zL5Iug+5%oT8y=LaW!g{?BDZuxVYfIqtzS4FdzyCs^OqNr`~Aua9ENT$@x2+UWKl*A zr}kx=?(+Q9KPw=FV{c?Nu|!|tMi0IeU$k1<@yH99Gi_#T7!|I=YiK8~{dvjZ{zs~! zDJc1-1;~E=S{w29E?+_tTqQ@A5gl}64_DWbIhVN`KzYBIt>uO9o)N&jDG2e8vdi6o z3$C{a&Rnd!7Q|gzwe2;$q?2kk4#!$m{yp6Xv5;7ZawTzsfRUo}ufv>q8 zY}6G|_92BM+?89OM#k_=vTsGrAA0@<;L-mjmuthd=AMEMnYOu9UG}oix|HaMJIg1O zbEv+at+y(pqL~POM@YkLRJ#+kMx8Y=<{w7eZ%EL% zuNZ2mr(_#e5}rA_Hf0jD_3ZTBU@#%P27b@ZmByMUTx>=rln7mc;?w`Dz4!iWdU^iE zqo{~dJjaGqJsw3w0RgE3*5jcoQ9=i$NeKucAYcH+0~U%(2^fkkV5FCXo`5Jw4N?+F z2oXb(gb+X=LI~WA@6WyW{SSN}_lNI!J@_Fco9yoF?Ci|!%yW{xo-VW|P4K_e?a0aq zu1Ofa62Bp?f8=Dmn?u`Lmw@K^cof@rR8l2+dt>b3urJhqs+CP~ic# zc!V1)b5r)_vHiWrHpx~6$o{ENvPZsXG|GR8=+u(#dcn+|U~!;V*k)=%Vh*u|pW`?8 z-p^<$ZNBq(bbfEUg>{20?a|nSgt2jHp9wLQuhqn!dcn9^HBL`PDYV+CLIxB_vo_^wd#E%89@cx6W^ixuQ+$LBXtjSpuwJ&VZ1Yws&dd|4hnr9_6xqxX^ z3t?-UbtX}Of?&sBoJMxfe``~&uUCY1=sawyHJF%FgU{y0L`yakjO{kPSgw|a#4VNc5KaozxQ9338wWgbrU3UV-$M_6tmCBT8j{T*I6Tt0_&yg{z41yzk z>?}n7NZTe)ZnEk>xcQ|2ku|yTw_6X%mc@h65O;ZS-&Gm%g^CL&NRqyF3k8hsWRIJ2 z7hgIQ|Kle1zgs~?tri5uPK;tj$vwpH=-?kKKayDE|j3?c(r>K4u@c2X>J6RGvJA~H-^xs94e^tmv+eeiy!ACiwVI6}lJDJy6SWL^;1+Xai&$g~4+41yws&@p9%>Hh6UZ>_mm%jOHHaI; zderMB9{mxiQFU$h35QxAb1gq?^|kVy6Zo`RG7VEQ)7gO_)&##1T%K48T8g0$EE$E@ z@r6E3F7~}EX+k<`9&ZDU( zVD~mD$d6IIAGqHR6K$2a?4^yF;fA&yB*=@oqZ`wlbqq{%E}h!2G;YTKHrA5DDL-5r zH25TX*J3x16b>@jW-!O8r{L^Hh@`&mhf?nL=C4H}T5dDQ%trJAmHE~OOqxk-U2b7a zX)hx$X0}ez`cbBtLJ*((sk=VBK1cwW>PEYTX{;_(1nqYnAu${8tl4Yo_7_L->#7x- zp6!a}#sez0GL0Y*6Q%E-kPyH&XLs8^A(8Ms{-dcp5n`9{MKLZ@w93^%L)O zL6FBPYTk95Xw2UsB28|FIjXd!kq#29O=ArKO$^`;bMb9u+?cf?s zBx$}I1I4xa%q!hyuHpjJ4#ekp@8Y~Fhyv#p>R5Z8I zO3q|*!NrjQKh3Ze-V|9QChz3Rw}nm01y>u^hS7HSPXU<*`|BrC_PpjTkgl<;%75el z?m!rvy7jN=yPx)u*(jO5;V)zN!j3>N&v_fKWHiB4qEcN)1<`YJe>C@1GnJNvC4r`Q zhZ_HWxcK#4_>aFiD*=u4mCMO7wvLjmmT}t^Ud(aI05#tFqgHyGrB}`tf4;X^A08ug z*<2>z8zm3*Haj%R{h8Als#P0W%o@>gtDZ74-Fx3SdIV{@F`}3__eAYwpbyTD7T+mw zS$bvJu|z!ivtlD6ajXkN%#De-1pQ;y@qT##pEEq0cfTBymw&yBVA^_i0pw&cK7phX z$yJcydJlf!Fzx9ahwUjyE-)KiIt5EMPH->2m#S8^S3!G|tkIO%>E}&qu;}6rha)z5 zVG6Hof9`cddEJiqr>A?xml(M5gG??kc0qm?+dZ})c9GFzTTGA&T77=Sd;7=rFB@xN z4AdVo-!GXSjMRq3A-Mb}K&CR=9UZ|Od*GDGQWeOc{7PkkkzrU-BYD`z|m^`;6|>Vz#X!hcSn6KY{1 zdb|bnKuq&xUUlK4%h-&9D>Ib6HkdolRJZY%yuh5R^0yGk7*0jUe8>#+qJ8v-X~J&;ex}PjjE9gXLBguE#{CJ}<-ajFcj2ADOGoee@*jM>R>EPqhSZnfpsag31>; zf*3bJXkNKh4qfNrFLNN%>?g8e`M3Wf>jRzJ5>BnqnSTz>l!a(!M^#R+OZu{2)n0Zw zZ}B)Zu*c>HGK&3GUXLBaoI*kVaZ`{$8d*&9ZKA%QAKa)lFQWgbdK3f>TRq{yX*jpV zF{myDmgwTz%%Aq@KnqXNvfXDy>Up*H)`_fM-LjMLPm+MX)tvvLFXbJ>$M4{Fda1(K zqeJFxSRKf-s5MW4Y2?7w0t6!ZMJZj|3eHY=fl|D?gQWilAaNhh&70YO)Z6@{9-o(Y zUBA+zrE#>*rvF&R{c^oL?}s(RIWllDx+u74%}}vkvKbg2nwvGldc^(mLvQze!I`nL z{^~XUKtR7#H-*5zOq1=WZH zHH*RlOZ1&Lyuw*O+C{LhD7z9}EOt?`UR2b&YKyM5;nxi<_`1LzOjp#9x7?7&kwm>>Oj8Lsse+m{KE8P$Tg%a9KEVr2i;!Z4xs}fAU0{@ak8mn?a{^-T#{6 zNy&5e`6gLW`95+TTW+poiH#t3gqY+UpnL5)XgU>G8BqGbbvC6;G*84nWR&#T{Cu;d zfiJZ;LxJ*_ocXI)84Y8h9FK8d4&TYt#@W1xatCdR^ zv|Pq}rbn{#Dsl8F=dJq;dd;@$s=PFJ$%tcYaYt+?)%=I46PAB*(6`-VuzFpI|6fT=^xUUz?HcnWD%qJDBvPht*X^I?k{qSStGbnrg|+!I))47XpjFE6RA8H!t6 z02e?CiIBvcANLGj-f(Qv$!=*Z>oMD0Iw;7i?clfri9mK3Y=xBo#lo0i(IAbi=!w{mmLC!^;33hjT z;HL1~FX4AV08R?d4E^zljDCNikMof+*sO!KfZ`%CxkFQ5_Sa##mD>*_iH}QlP>4;L zL9U7L&e`nUo4#&;C)(HlArfsz59Qi+LgaOD1}w9K6d@3#1TKUySyIwx{xw~7pSE%Q z+)fDOrLnFi)(3e@q>IX`#5owR&#j=;r!QMzhxZ+gLn z#4gFcruJvF9myc}9k9UtWx%!I7vv0{1R>>~I3LMSBU%Q3mbeP`3SrZ)m8R|Z`8Ld^ zEQn~Z!ukvNnJj;uW##mr>Ca9L##L2@fU_$yIIEfcYA~$7UGw^e<}|zVre=G5=c3QPjdU^lT^4Wb6Nb7 znf9jm>ouShU^ZqDoq*vZ5!y@pn|sDf=EqhSaa%hrO6CCt6G8O0+E#T_KY27{x+$#Rn8UROur>?GE2Tj|=QjS@huVe-Z zasxqq0f$yw9(EmL5hXG3wlrgv1Hj|Z$WOcO7Mr^~tD6Cc0So}rmYmPKUU(Q<>Xjpo zH#FoOnb_rr`W8T~+UuO;DEKZbMoWvV8R#jNt3HG%GWx6Tu?e4;Y-5*BnCfGtor9yo zy;6md*Zb75C^`C6xqdcA=5fezY>t`m-UvA@H#tkxL{zq zpz5TZ1^YFspkU!!-#+<}!V@=5 zo$NoDZ|q!S<7zJ5c`Q9%q+u`-%tNUdL2Xu{bGXWC5I z^x&Ll6Y{$Rg~9dJo!ly#IyYoE;k};B;AE4o2ovfX?}rNppme%Gu=X-)#z6I2t zm^?nH+P2O(K)o(tZvbmsu5=@NeJX!J?!ejH`gImzeSr4u!s2!FQf&7d>apRgx`tl!M?S!I6yW*5g?lW#h=*k7-T$CL~*KbMRonbh6ys3O}N*c~#uM63Q zrH?z6*k5ty#&7hNEoa#q2jaMScSp4*R^69s@+s0?45f(D2r_m$FTtb5piTLzVZ41v zKqgYRX-9o(oyU6@OjKUk3&)%~!>}t+jJ~Rm7W9|%9BPTTJbAW!)esxW8EQ+8k2Iy1 zQvy#Y(&aOZkO8}JZ=j_cg(50$8+#XyFp|8{vR!Vbw_(70>&8cbR`en=Jf6HZ?6uo!~3xcY1kk^n3|-<`ZJ} zc$p1fYRo1JOE~nwKIOEo_q$1C2MIrU!nXvXL=+)(`eyC!r)h_&MAfrNe+MF8aYtPnHh$H|`J z`Ju$|%YSgLJ+FiNx}kca9UWm(9IfkQ%IF0`P%e53ok%s{g-_RAnif(`=ukU)ucOgZ zeia9RbE05|@v_NIlL{9;W_w(?WH4pBntQ3(`o;U*7E58u&Zu{Tn^;p+_5hBcilr`DtM}!&l1v{j>z-4j2Bh~Vz6(ySx)E@M_5cy-)O$@ZzEeL?_zjiV|201vxymh zw__cT(5a3N6J4FJHkZ+wM-&dY7oM!%7kd((RtPW)}@9}qgV{KiR$Q=JBNYun?H z9_+r@LH2z|qpZ4s_K+>^^{SX{?)8CDcKoq?E_}em4I!pEhJGF3Z0+ZjuC0-OjcUrP6KHz&Wxx}D^2E{=eT599PIyRuI_P1Ve2FFRuD+7I5c z@?D`54Dez;mxjZ$R%eUrJG}OL(KS{5#4ZZ9H~AsmC6QmV>--v|G1592mV+vQ2++>F zy2tR2vXd2Ev^s3k0Rp#9_h`xi%5?9hA(Ip-i?&@D1^G!?A12fPlEI*@k)4h3+A49- zns^*<8WFv22k2up{3oE=esq;Fmii8Vjx}N37(M+q)nL=mfWQ&1viFi|#(!9VC{~6O z`>J!SB~)4UfPL|46MWofoSI_auKau5AwLWq(73qazL&g#*cQzrZc6B>5~uZ3+uWf^ zLN*ahT8x9Wwk*D2;1vkb9W6yj(-6Bb!qm2@2q@WLAZz5Ytr~{X30>m)q0)Ja*HG8x z7ahX4PF?XCFURdJAo^hM6=nby95{)MtE7b~ZiCoaz%dkQdng+1`4ZVarIM0Ica<^c zK>aJ*I`iJsr!2{@MU1nmx~$k?EmeXI(p{e&Ao}1py+&P~bq z)P-IOEroH;3~r0>_hnF{ZPN_A`Xg)Y_yZ;1U$7U^?0d9(T>}HV@Kb3$Ti~h?H8>R~ zgw$^>j5@6X>0B+@hAHIUDK5#~;ct#aax3fG z-4_7uQaxIZaQpi_9%9?#AHTK~2g`DIRzj*}@eQ6gf5#>H5X9-E#jMe3ym87&2>mTr zwXt;GWW;kLoe(^|r3Z&Pv}T*e?PMB#wPiYdU){#7Wb&l|;QGpRL7sXZUC%sbMJQvs z*1ME+Rj&;o9@Y_?g)^cPo2LYl&W!K=oU0@yQIV-jWPfj=K>B?6Ooee9g^K~$s^O^1 zV;ZE+Xtq(Ti|M<*?%?3yFDO1E94)x{G^TVt#tKfF#K(vf7es}rd1ns-98dgVUR$1# z{?--g40hq&o*y}C?fh>5B#gHz_Kduqv3kRKe;QF^#l(8BGeQWoKW9oo;lhThu-S$B zTBJx7C(}!C1rVxQn0k0xyKpxBM=|pR&ki=G;*rUPqnu}O|PxRMjmr&$)at)1-mdHVewfT_al45hY3Jhmn)|R zDe#|yq3es`dd9j6tH)W$9-q7W+u~OmGLjFQb9Pu-fIWMCT2M!uQd{CJD1unNh_?9% zpcsE6s4Oif$?Mgr;t9v{&{aF}<$3e-SU5C&0*MKMbHYkmeC>inJFu zno#}sl4;Gs8EEO@JIa%MywG?Gv8CX(o!gPQyyv`g(?-VG4>a$>5V2R}ws#b3EdT zAh5Vl!!K@iMLpky>BGSAxP5iUg`eSW2!^hia&?t)8~x2Sk|gpAu%-z7nhnxhqWO1I zq~vZ3@yYm9jpTFOq1!G_sbs`5Q(sbP^(bO>5h~DtUsZ)5Y!yzx2JEgIOb}2g@ zHTju0ugC|<6%I0S%-yzF)aR+2cz73yicg0EY{5U)?knyBM01M~BIo%CLtXr~lS$vR z&wapc3l98gYs3k%_Q14t`OcM^bIjt`pQrYL3@C-w%pxO6gi`zbH`jaMgp#id_SU|E6)br&gvsiE7c zYUAIYXb`{0PS`Rtypv_UlW_kW=2+`Y>cjh&B~?yPlP@KNMcq>CQ<|D@F`L4?%SkNf z6|!EP-mLlatOovkm zZ7?^~5@}=f=$L6u^1Uxqz|6Fm(J zZuq@Rv!sUB-{y@&=Tl2 zYOR5U zP1!H{ex1FH(ZZprJ3BqHL5aV+7}brO2fRa6?2C}VDD$@*-d&~wI>r9RGgPxr+x#5E1@m> z;SE%ByA6g9Z>SHxLD&SS83&^2MoVzqXqVZp6(>u3(;1IiA9F$4+?2*kq44dmJw(%P?o0eQHcVyJ_ zASarmr%xBShSN10_DL4M$0x7|@2YYUvj|(K1eZXbV-7KpUFIUWID6B)+p*r%rw(Tu zHTM2D{XhgM5BoB}i9>sEfjorhsQx_ULr-0ex!a|Ky!(u7ZoRISRi6a&8Tk99_wkEE z;M&;xZ6EWkq@U0Ik^1nUiVL>#x6Ul?QG(uclAD~rx2X9&;#&B2*s@mKe zO9*BkyzS;=rG8+s%ecjSVYE`p&h+wyC_SHD(S9qe=nLE>yn(_K$+`hz>47U*Z1Ks; z+|~T$MoVK8lR|?f3)Qn`HK##b*49s1@4w5IW;)oS2P|`nbKs}>o{6}++o@FM1PZR3 z^+z;28euW>&D8~?Bu`mA{y{@Oa3#4!keh5%=k%VEZTEL82MQT%RIEj7zFe`9ep1ltxaFh5^I zGxSXIiepTB%fb{}k94X|%Q%}x1yF)}qM|{?Ia=FN4ylpb7TxPP?@u9I7_~9k&uZi6 zWU`l)cot5PL2sV1?2CU1g$YFP;{Be?&!Pb?AR9r z3|3pgdRK+H?d3EjH{16q85su12vber*l?u}8h*~4U5`XYO`|;N8M!cei7M)$y>s;P z^jxN+v1y=1!gK0-_=c@9%!xN2mDxY~={eVy*5jS8o3hzg?kYOiOX@Xb*}u!F(8m` zqA1>(2=cwwcPNwna6pcQ5_ebS7{ZTWmkbGz6JCD1R0M~4hrRriGYn%kYhD^|2#|{{ zE)0T_dvHPi;;&WlPft!#4&eFaWMD+r1>N+Ev6PzgGLCO9EEjpQ(Fxoy(?XNDo=uXR9xAVi;OMc8s+0{PIdBNaWR8Q2f z4y16<*MU5+3Be)t8J6yB+J<01$7!M%X1iM!3hsB)7OEwh@+oTECiYi$-MP_Yjeo8F zjwm7=w%A)C!Dx?<4inK1Eo8;&7CvK8YnM+ogHC-OTXUJFk=L<$TAf3OxAJ6FPUD1WK?** zd=qvyCK0$85K}{8npnFn?i~hbE3vY)>#lLNPfeZHm1+u`kH|ID-4mV_Wsv zP1_D==SXN3nL!l|*slagDczjgi^a>qlGL>jd%`B9_6Ir9!il7nMJ;@f6 zy*9>GRmAS>>DuaUw$(j<-C}Lq%Yn3gGadOH;&HyFLv>Q6@efzM#)5N{;0BF~u90^U zk4B>MLwWz;R%VoqBL`4RgRoS7@qy)uY;vc4Ze_d4=hYLajWU~+6EPb?zC6`8Vss>$ z)r%e-kG7pIov0PmQNP-=v$4$iGwa|wAx{`R+o-p&c4P^zlvfvMz8nNT6NC@J1cplX zb=FY69*&Xc#dZy^FY7M9LE*|KOWj-X<9{n2w6egen=v$DuHmUBGYTM`B!buGQC`J0 z!#LAPEP`U7@Bs$d6RB*Bk>-_`LCX`&*_&6 zf1IXvyefgno5p0;9q~Gofap+l*+f_+PC6?-^P=>8S9q+}w8!g5MfYBJ)a)P`v8`}auRjrPS71e-j#gabts$YXPI_{xRq+RXve;XY<=pZ2;x zuGpwp+)+l;c?e}rKR58O;^Y$**^M-QG+nL9j_UP3&OLk*aS4p_ z&I9T#!~;E&-RqwnP8+2?O?S+B_Ph|A+}QKCi*b>Fec;iB#_GZ_ zTvg7^J4tKK3vw)>VZe`p{)C1Ob=XyKa`QAu@+B9uFJLZM*pbkz)jyd}T%t`As2yaF z^;u6vC|M~)5yn{>JN+XPRyW~ ze$xf{yL;oqJn*AuJl#-KKR8)kpnqY*d!()Qv4`SI5NazGAzWSZAz4hRLPrPQK2tZ= zKu?rn0=Gg+&RdVj+L%3$>q4Hhqt(y<&{{%I?+}#{HwM9A-kx(oOI;bC&${&-f1h*i z8pg&Db{^7nszVsw3RAy4*;1O*FyE;`+Qa*zjU-uuOWUuZ!aU*{ZIjhc(guHHpZFH- z^s$XK@`yImu@%q_4Sjj{8D^7MyVYxY?m@x>=d&k9F z&a)C3T1pCCO>WyZ19l#DadviImxowiFK)CDWaw(ZU1(>1b&@R(Lh*k+R=+M@MKAxq z4vGTT>NTPNb9tVcQd?8=Jr@k|WH5j+?bJHb*;OIJqaDftehxhSG)uprKzG*1?L1wi zZOeUdi(c_sI0d|*4wH3|8BnK8&Iz=RRdmv$R7(8c#+LWC?|fVCyx+UHF5<`J>$YFf zE&Z;fXrV*I;`giKWgxb!?8@fi{Ti-a(I-T+E;BNizB?oGJV5MfH%snY9ZOJNUa%K^ z1aa4M-n4C}RKHwF{f@511kqb<)1W3b1Go+At;UI0vok;?fc{i>FuH!wB3@emu(HuQm-m3e?G6@MQqKn{_?7IJf7;P zr7nIKWQxoG-J*@$J2b}^B?KyK#NnN)1BF2ZaP`fO5pb(#K@7qZ!<#KymAP1X%;Zv~uyVxU zoT!e0iGaaISnv#D)Zm?<+F2<#)RQ4fB1y^>abb3RBKGe5r~M!}Vxkj`W*s_$KNSbu zo5GM0>`~UamZ9D|Ye%$3p9Uw;IM(e8As@ms+~gA4GkY|pD+3iPv&)ezy$A;IS1wmF znE=Li0dL{q;BlbeL2=_i3Nqc=E>F8eCjfKejn1gv1E>Qf8=f~`Ft6(>Y>*VxEIK++ z6FUujt#!Q4KtUHSe{{H6=C^s)moQK*M1e%34Yl-P;t;hX5{(`cOTC|x@CN~!m6AV0z z;u6sV8F^gCiolsdnV839dDjA+41A9!{W<-=a6 z<#@V?Dm+FUHD_v)BXhNi0k9J+T@gfW@Gy`8qp;!fFRpyv;zRf6r zmAqw9qIq1A=Tqc)M&7D*hnDo!g?<~;y*{tn5n7Ywo<)C>r{gjms;531dY`$~E^FIr zTw%R3Twz7%&i8OSm5A?tR$gS~D%O((k@P+W(zr2`nj{UE=Q$R=X!ECpb}!B|>%Zlu zYeTYD`g#UN=KPU4zuOH4$lWY1AwQp*Du?nz04Ce6fB?oGG@f76?gNGd8EQWeWiAq@F;-Z zC$VrkVy2Id8~AYTYiaUApUx$NP@Kz-9E&1i**n&wfiVd3GvgFt`90oH_sVGco^cSL8VJf@*v5Wjp1{y3!2Gd-inn zGu(~@c;53ED#2=5Se9bfIiQo!qCcdmu7~G(z@QPw&08kk+Sy%Qw#(mKw$?bHkG(3V zQ83`Hva6k$vBe!bF}~#YV|&55lcyiH_?KqjAi%-pICaGDhdDJDO7q4iTfm7kV+MHX zv$%OTAH10MiI|_^z9mzAg3L$_j$!2gVfn|eXNWVjfkwQa$`+r1eBJm{r)RHfB|_89 z;SB{k*0xT0w1r9&{QR@Qr#^0*hO&h(%5!K7Ag&o7^)Q=g zAAeJF*L0fQs%!URM-Z%E+1va>Ux*+7w@&PG?RfMFbUV56fnop2jEua_IsZ}&DSh%? z56^;ct>z-<=0pOrc|Tfl1vM>f%{C~3c^RlWOx@_X|7+|UgZbx*Tt;WtL8v&SmtYxe zFW2^tp>_tUOldVXuf~1$CSqkJyjrX+FEd=t5E?r^8-X?G9=mUq|n z{Cisuqwt7oMp!GBH=eH-)?*vieUC-lBC?*A8$ei=mS1SlbbXk2!arD4QcRauCGY7(q$dMyQ z4|NY9%x^fG;SN9L>Q1dlOt#Y1a-M8?h#0l$AvmgZI#lxxmpahGIh%FCkiM2__jB&PPZ7NmCZscMzQ6k83V23 zliA}rTo{cvj7H>|E@fJo6aqAoeJesf^T`d1X!Ktt1NVmhTzlt2JVW$E(I*t+0!V$= z5umhH?vF*k&)|B|qU~nj_v%3AWn607#LgzsXllkEP*9ALgNLqrKg1=qSz4iF@l#$S zH)i4Lt#%hO`}7mD{7sI#T*XIX5>@dK|NX2Ga11~ln3rq}p||QP z#>T9%lq;1nC{kc#d*8L>*nx8 z6${s$xhz-Pvro1Fl|*&Rf&35h^iXgyb6LgbyK{XQ{7p!a-=-VW0dJo>CInmi%!42< z#_w36!Hj^mnD8rhE%wK=;K#`7vd{MC&zHTRW&V|KJre4C3u*aAM=7MMQe4s2SFWWN zHV)VRYwHYv?%(&`>K~Y&q)0P_zt;7cBWm0u9mn#W>JM>7+PJKa`Uehp-(9eR^9tkn z)nL3$EB4_?>OqyhNfXZ__$X@zNFS#E0|PlX%k_x{#ZJ64NBIHDXy*8APEPWU4Uu2k*lCpA}otow;e{rkY{ za9Z<;AU7Xs+WBHj|ECywGN|VXF#jP1D);Q*@tMz^Rqb}#nS0n^Q-ds8S?5{vZ)r7@ zy5NF_XiB*AWcGsw^{<8*<%Kl+p6pY#ecY=6l?FD&Q!KMrH*nkE6R$UMqB>Z+E4%{U9W6bjq|t6hEA%+B60W4D zME*Qvxvsl+C@Cpu<4RQEhf0xLYP)03sd(gK$;uSIKswjKE8NUW-7sc5^s{9izYbMi zP5zwloY+4%>>>DTt_7H7CIjO^SnB5r^935KZ8<0Vn+K>dC49wuyH`1nl&)kNZoC>_ zSBF~N7Ue@$HC|3JT|Z{B{L2J6T_F_Qp1A-Qq-D7Eg=^`p77b94*M0Mj|6idUGE^ah zoqy1mX<3x4=9T#7t&GiMj;oWmvaCyTQjtlRU(uf)f3du%t?>&lu)&!=ASuS4zaDQZ zuPvW4?^zq60rg{9pLWP=V7`(}DNPcjw+7`Ctdz%51bD?pMp2BfGfp=bKWiLhNWdQR zxdxnRd%c3Z%pe&Hpk$jL%g+BUID}O3J)e3t^P4Sk$cL}@WPos+j0wws9#8){(KtCZ z!1AM4c=lU=xYl;GBAS)_?tkl8U=j%$0!<~GcmL?Kt{nEzIgL-vDUf8H$GaC-m0W1A z`805+kF9TMrFz1`A^h8Ik+2N28*I&)JhK5R(rpNS9swSqI=-<_;(OivV-RRs%U(Vt zPRhzEIMbpte5@cZ`hCytGb0s;`#F{I5)ZCNFa>4WWwPcPA$PV@w*c9gN{CXP*h*zQK z%@19cYlF{j;QhWQzWw5ST=6Z`|5hb~xH5_P#P8_{GaMImb0(DBPGe>?L@c}bAKh1F z%kiWj&zaYY0-Ev15K?KjWAiPz4fqGVOk{m0YW;gj3(G{2O^YT;kg%*_99ehU#53_MSc?a!OSi zYDJL1y3Hp%?K0BRu5%k6l;1Ff;!9a>q_*B(_h?52rK* z*HBQ|JAc#vZQ6d~cg~uEdZqPYjP(U`e<)9ihg^Ct2t6CmkGkHM6#=%j4~gtxVrU4) z(nL(lXUbcDW-Pydhe~Woq=w73(IzB5IxK72=?26#^r*MB$8#&{;EqbEar;2hyROJY z`{v+k;P01*{`$+5EE@%xOGmvTKMhvKgOVjsNm7VG!!P?A@|6wn@+dV?{J?NlYZ5-C zGkXMd3V}E`l8}&qldx#;vFJ&TgGjntNRWtv*?JM%Kw?<{wsPpotiI*-5r1eXaD~z6ng(t^XeoheEy3SdO`h)UoRk5|GxDvCI00J5e5EV dZiTcB@s`^6T)N?tNzqWuE?HkJIq!b|e*s5`KSuxn literal 0 HcmV?d00001 diff --git a/docs/limits_of_individual_agents.md b/docs/limits_of_individual_agents.md index d6e802a5..e178136e 100644 --- a/docs/limits_of_individual_agents.md +++ b/docs/limits_of_individual_agents.md @@ -1,5 +1,7 @@ # The Limits of Individual Agents +![Reliable Agents](docs/assets/img/reliabilitythrough.png) + - Context Window Limits - Single Task Execution - Hallucination diff --git a/playground/models/azure_openai.py b/playground/models/azure_openai.py new file mode 100644 index 00000000..aeda11c5 --- /dev/null +++ b/playground/models/azure_openai.py @@ -0,0 +1,10 @@ +from swarms.models.azure_openai_llm import AzureOpenAI + +# Initialize Azure OpenAI +model = AzureOpenAI() + +# Run the model +model( + "Create a youtube script for a video on how to use the swarms" + " framework" +) diff --git a/sequential_workflow_example.py b/playground/structs/sequential_workflow_example.py similarity index 100% rename from sequential_workflow_example.py rename to playground/structs/sequential_workflow_example.py diff --git a/pyproject.toml b/pyproject.toml index 90ec0f3b..6c335aa7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,7 @@ supervision = "*" scikit-image = "*" pinecone-client = "*" roboflow = "*" +langchain-core = "0.1.27" diff --git a/requirements.txt b/requirements.txt index 714727fb..a23fcf4c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,7 @@ transformers pandas==1.5.3 langchain==0.0.333 langchain-experimental==0.0.10 +langchain-core==0.1.27 httpx==0.24.1 Pillow==9.4.0 faiss-cpu==1.7.4 diff --git a/sequential_workflow_with_agents.py b/sequential_workflow_with_agents.py new file mode 100644 index 00000000..bfd28f08 --- /dev/null +++ b/sequential_workflow_with_agents.py @@ -0,0 +1,39 @@ +from swarms import Agent, OpenAIChat, SequentialWorkflow + +# Example usage +llm = OpenAIChat( + temperature=0.5, + max_tokens=3000, +) + +# Initialize the Agent with the language agent +agent1 = Agent( + agent_name="John the writer", + llm=llm, + max_loops=1, + dashboard=False, +) + + +# Create another Agent for a different task +agent2 = Agent("Summarizer", llm=llm, max_loops=1, dashboard=False) + + +# Create the workflow +workflow = SequentialWorkflow( + name="Blog Generation Workflow", + description=( + "Generate a youtube transcript on how to deploy agents into production" + ), + max_loops=1, + autosave=True, + dashboard=False, + agents=[agent1, agent2], +) + +# Run the workflow +workflow.run() + +# # # Output the results +# for task in workflow.tasks: +# print(f"Task: {task.description}, Result: {task.result}") diff --git a/swarms/models/azure_openai_llm.py b/swarms/models/azure_openai_llm.py new file mode 100644 index 00000000..3ff8bd22 --- /dev/null +++ b/swarms/models/azure_openai_llm.py @@ -0,0 +1,223 @@ +from __future__ import annotations + +import logging +import os +from typing import Any, Callable, Dict, List, Mapping, Optional, Union + +import openai +from langchain_core.pydantic_v1 import ( + Field, + SecretStr, + root_validator, +) +from langchain_core.utils import ( + convert_to_secret_str, + get_from_dict_or_env, +) +from langchain_openai.llms.base import BaseOpenAI + +logger = logging.getLogger(__name__) + + +class AzureOpenAI(BaseOpenAI): + """Azure-specific OpenAI large language models. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from swarms import AzureOpenAI + + openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct") + """ + + azure_endpoint: Union[str, None] = None + """Your Azure endpoint, including the resource. + + Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. + + Example: `https://example-resource.azure.openai.com/` + """ + deployment_name: Union[str, None] = Field( + default=None, alias="azure_deployment" + ) + """A model deployment. + + If given sets the base client URL to include `/deployments/{azure_deployment}`. + Note: this means you won't be able to use non-deployment endpoints. + """ + openai_api_version: str = Field(default="", alias="api_version") + """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" + openai_api_key: Optional[SecretStr] = Field( + default=None, alias="api_key" + ) + """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" + azure_ad_token: Optional[SecretStr] = None + """Your Azure Active Directory token. + + Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. + + For more: + https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. + """ # noqa: E501 + azure_ad_token_provider: Union[Callable[[], str], None] = None + """A function that returns an Azure Active Directory token. + + Will be invoked on every request. + """ + openai_api_type: str = "" + """Legacy, for openai<1.0.0 support.""" + validate_base_url: bool = True + """For backwards compatibility. If legacy val openai_api_base is passed in, try to + infer if it is a base_url or azure_endpoint and update accordingly. + """ + + @classmethod + def get_lc_namespace(cls) -> List[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "llms", "openai"] + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + if values["n"] < 1: + raise ValueError("n must be at least 1.") + if values["streaming"] and values["n"] > 1: + raise ValueError("Cannot stream results when n > 1.") + if values["streaming"] and values["best_of"] > 1: + raise ValueError( + "Cannot stream results when best_of > 1." + ) + + # Check OPENAI_KEY for backwards compatibility. + # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using + # other forms of azure credentials. + openai_api_key = ( + values["openai_api_key"] + or os.getenv("AZURE_OPENAI_API_KEY") + or os.getenv("OPENAI_API_KEY") + ) + values["openai_api_key"] = ( + convert_to_secret_str(openai_api_key) + if openai_api_key + else None + ) + + values["azure_endpoint"] = values[ + "azure_endpoint" + ] or os.getenv("AZURE_OPENAI_ENDPOINT") + azure_ad_token = values["azure_ad_token"] or os.getenv( + "AZURE_OPENAI_AD_TOKEN" + ) + values["azure_ad_token"] = ( + convert_to_secret_str(azure_ad_token) + if azure_ad_token + else None + ) + values["openai_api_base"] = values[ + "openai_api_base" + ] or os.getenv("OPENAI_API_BASE") + values["openai_proxy"] = get_from_dict_or_env( + values, + "openai_proxy", + "OPENAI_PROXY", + default="", + ) + values["openai_organization"] = ( + values["openai_organization"] + or os.getenv("OPENAI_ORG_ID") + or os.getenv("OPENAI_ORGANIZATION") + ) + values["openai_api_version"] = values[ + "openai_api_version" + ] or os.getenv("OPENAI_API_VERSION") + values["openai_api_type"] = get_from_dict_or_env( + values, + "openai_api_type", + "OPENAI_API_TYPE", + default="azure", + ) + # For backwards compatibility. Before openai v1, no distinction was made + # between azure_endpoint and base_url (openai_api_base). + openai_api_base = values["openai_api_base"] + if openai_api_base and values["validate_base_url"]: + if "/openai" not in openai_api_base: + values["openai_api_base"] = ( + values["openai_api_base"].rstrip("/") + "/openai" + ) + raise ValueError( + "As of openai>=1.0.0, Azure endpoints should be" + " specified via the `azure_endpoint` param not" + " `openai_api_base` (or alias `base_url`)." + ) + if values["deployment_name"]: + raise ValueError( + "As of openai>=1.0.0, if `deployment_name` (or" + " alias `azure_deployment`) is specified then" + " `openai_api_base` (or alias `base_url`) should" + " not be. Instead use `deployment_name` (or alias" + " `azure_deployment`) and `azure_endpoint`." + ) + values["deployment_name"] = None + client_params = { + "api_version": values["openai_api_version"], + "azure_endpoint": values["azure_endpoint"], + "azure_deployment": values["deployment_name"], + "api_key": ( + values["openai_api_key"].get_secret_value() + if values["openai_api_key"] + else None + ), + "azure_ad_token": ( + values["azure_ad_token"].get_secret_value() + if values["azure_ad_token"] + else None + ), + "azure_ad_token_provider": values[ + "azure_ad_token_provider" + ], + "organization": values["openai_organization"], + "base_url": values["openai_api_base"], + "timeout": values["request_timeout"], + "max_retries": values["max_retries"], + "default_headers": values["default_headers"], + "default_query": values["default_query"], + "http_client": values["http_client"], + } + values["client"] = openai.AzureOpenAI( + **client_params + ).completions + values["async_client"] = openai.AsyncAzureOpenAI( + **client_params + ).completions + + return values + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return { + **{"deployment_name": self.deployment_name}, + **super()._identifying_params, + } + + @property + def _invocation_params(self) -> Dict[str, Any]: + openai_params = {"model": self.deployment_name} + return {**openai_params, **super()._invocation_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "azure" + + @property + def lc_attributes(self) -> Dict[str, Any]: + return { + "openai_api_type": self.openai_api_type, + "openai_api_version": self.openai_api_version, + } diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index e1282e5b..412dc02d 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -671,9 +671,9 @@ class Agent: ): break - if self.parse_done_token: - if parse_done_token(response): - break + # if self.parse_done_token: + # if parse_done_token(response): + # break if self.stopping_func is not None: if self.stopping_func(response) is True: diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index ace1fd3d..1326e2d3 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -5,7 +5,8 @@ from termcolor import colored from swarms.structs.base import BaseStructure from swarms.structs.task import Task - +from swarms.structs.agent import Agent +from swarms.utils.loguru_logger import logger class BaseWorkflow(BaseStructure): """ @@ -14,18 +15,27 @@ class BaseWorkflow(BaseStructure): Attributes: task_pool (list): A list to store tasks. - Methods: - add(task: Task = None, tasks: List[Task] = None, *args, **kwargs): - Adds a task or a list of tasks to the task pool. - run(): - Abstract method to run the workflow. + """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.task_pool = [] + self.agent_pool = [] + + # Logging + logger.info("Number of agents activated:") + if self.agents: + logger.info(f"Agents: {len(self.agents)}") + else: + logger.info("No agents activated.") - def add( + if self.task_pool: + logger.info(f"Task Pool Size: {len(self.task_pool)}") + else: + logger.info("Task Pool is empty.") + + def add_task( self, task: Task = None, tasks: List[Task] = None, @@ -50,6 +60,14 @@ class BaseWorkflow(BaseStructure): raise ValueError( "You must provide a task or a list of tasks" ) + + def add_agent( + self, + agent: Agent, + *args, + **kwargs + ): + return self.agent_pool(agent) def run(self): """ @@ -318,3 +336,56 @@ class BaseWorkflow(BaseStructure): "red", ) ) + + + def workflow_dashboard(self, **kwargs) -> None: + """ + Displays a dashboard for the workflow. + + Args: + **kwargs: Additional keyword arguments to pass to the dashboard. + + Examples: + >>> from swarms.models import OpenAIChat + >>> from swarms.structs import SequentialWorkflow + >>> llm = OpenAIChat(openai_api_key="") + >>> workflow = SequentialWorkflow(max_loops=1) + >>> workflow.add("What's the weather in miami", llm) + >>> workflow.add("Create a report on these metrics", llm) + >>> workflow.workflow_dashboard() + + """ + print( + colored( + f""" + Sequential Workflow Dashboard + -------------------------------- + Name: {self.name} + Description: {self.description} + task_pool: {len(self.task_pool)} + Max Loops: {self.max_loops} + Autosave: {self.autosave} + Autosave Filepath: {self.saved_state_filepath} + Restore Filepath: {self.restore_state_filepath} + -------------------------------- + Metadata: + kwargs: {kwargs} + """, + "cyan", + attrs=["bold", "underline"], + ) + ) + + def workflow_bootup(self, **kwargs) -> None: + """ + Workflow bootup. + + """ + print( + colored( + """ + Sequential Workflow Initializing...""", + "green", + attrs=["bold", "underline"], + ) + ) \ No newline at end of file diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index f21c3056..2fe23a1c 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -10,6 +10,7 @@ from swarms.structs.task import Task from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.utils.loguru_logger import logger +from swarms.structs.base_workflow import BaseWorkflow # SequentialWorkflow class definition using dataclasses @@ -61,8 +62,16 @@ class SequentialWorkflow: ) # Logging - logger.info(f"Number of agents activated: {len(self.agents)}") - logger.info(f"Task Pool Size: {self.task_pool}") + logger.info("Number of agents activated:") + if self.agents: + logger.info(f"Agents: {len(self.agents)}") + else: + logger.info("No agents activated.") + + if self.task_pool: + logger.info(f"Task Pool Size: {len(self.task_pool)}") + else: + logger.info("Task Pool is empty.") def add( self, @@ -81,7 +90,6 @@ class SequentialWorkflow: *args: Additional arguments to pass to the task execution. **kwargs: Additional keyword arguments to pass to the task execution. """ - logger.info("A") for agent in self.agents: out = agent(str(self.description)) self.conversation.add(agent.agent_name, out) @@ -169,217 +177,65 @@ class SequentialWorkflow: ), ) - def save_workflow_state( - self, - filepath: Optional[str] = "sequential_workflow_state.json", - **kwargs, - ) -> None: - """ - Saves the workflow state to a json file. - - Args: - filepath (str): The path to save the workflow state to. - - Examples: - >>> from swarms.models import OpenAIChat - >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") - >>> workflow = SequentialWorkflow(max_loops=1) - >>> workflow.add("What's the weather in miami", llm) - >>> workflow.add("Create a report on these metrics", llm) - >>> workflow.save_workflow_state("sequential_workflow_state.json") - """ - try: - filepath = filepath or self.saved_state_filepath - - with open(filepath, "w") as f: - # Saving the state as a json for simplicuty - state = { - "task_pool": [ - { - "description": task.description, - "args": task.args, - "kwargs": task.kwargs, - "result": task.result, - "history": task.history, - } - for task in self.task_pool - ], - "max_loops": self.max_loops, - } - json.dump(state, f, indent=4) - - logger.info( - "[INFO][SequentialWorkflow] Saved workflow state to" - f" {filepath}" - ) - except Exception as error: - logger.error( - colored( - f"Error saving workflow state: {error}", - "red", - ) - ) - - def workflow_bootup(self, **kwargs) -> None: - """ - Workflow bootup. - - """ - print( - colored( - """ - Sequential Workflow Initializing...""", - "green", - attrs=["bold", "underline"], - ) - ) - - def workflow_dashboard(self, **kwargs) -> None: - """ - Displays a dashboard for the workflow. - - Args: - **kwargs: Additional keyword arguments to pass to the dashboard. - - Examples: - >>> from swarms.models import OpenAIChat - >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") - >>> workflow = SequentialWorkflow(max_loops=1) - >>> workflow.add("What's the weather in miami", llm) - >>> workflow.add("Create a report on these metrics", llm) - >>> workflow.workflow_dashboard() - - """ - print( - colored( - f""" - Sequential Workflow Dashboard - -------------------------------- - Name: {self.name} - Description: {self.description} - task_pool: {len(self.task_pool)} - Max Loops: {self.max_loops} - Autosave: {self.autosave} - Autosave Filepath: {self.saved_state_filepath} - Restore Filepath: {self.restore_state_filepath} - -------------------------------- - Metadata: - kwargs: {kwargs} - """, - "cyan", - attrs=["bold", "underline"], - ) - ) - - def workflow_shutdown(self, **kwargs) -> None: - """Shuts down the workflow.""" - print( - colored( - """ - Sequential Workflow Shutdown...""", - "red", - attrs=["bold", "underline"], - ) - ) - - def load_workflow_state( - self, filepath: str = None, **kwargs - ) -> None: - """ - Loads the workflow state from a json file and restores the workflow state. - - Args: - filepath (str): The path to load the workflow state from. - - Examples: - >>> from swarms.models import OpenAIChat - >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") - >>> workflow = SequentialWorkflow(max_loops=1) - >>> workflow.add("What's the weather in miami", llm) - >>> workflow.add("Create a report on these metrics", llm) - >>> workflow.save_workflow_state("sequential_workflow_state.json") - >>> workflow.load_workflow_state("sequential_workflow_state.json") - - """ - try: - filepath = filepath or self.restore_state_filepath - - with open(filepath) as f: - state = json.load(f) - self.max_loops = state["max_loops"] - self.task_pool = [] - for task_state in state["task_pool"]: - task = Task( - description=task_state["description"], - agent=task_state["agent"], - args=task_state["args"], - kwargs=task_state["kwargs"], - result=task_state["result"], - history=task_state["history"], - ) - self.task_pool.append(task) - - print( - "[INFO][SequentialWorkflow] Loaded workflow state" - f" from {filepath}" - ) - except Exception as error: - logger.error( - colored( - f"Error loading workflow state: {error}", - "red", - ) - ) - def run(self) -> None: """ Run the workflow. Raises: - ValueError: If a Agent instance is used as a task and the 'task' argument is not provided. + ValueError: If an Agent instance is used as a task and the 'task' argument is not provided. """ - try: - self.workflow_bootup() - loops = 0 - while loops < self.max_loops: - for i in range(len(self.task_pool)): - task = self.task_pool[i] - # Check if the current task can be executed - if task.result is None: - # Get the inputs for the current task - task.context(task) - - result = task.execute() - - # Pass the inputs to the next task - if i < len(self.task_pool) - 1: - next_task = self.task_pool[i + 1] - next_task.description = result - - # Execute the current task - task.execute() - - # Autosave the workflow state - if self.autosave: - self.save_workflow_state( - "sequential_workflow_state.json" - ) - - self.workflow_shutdown() - loops += 1 - except Exception as e: - logger.error( - colored( - ( - "Error initializing the Sequential workflow:" - f" {e} try optimizing your inputs like the" - " agent class and task description" - ), - "red", - attrs=["bold", "underline"], - ) - ) + self.workflow_bootup() + loops = 0 + while loops < self.max_loops: + for i, agent in enumerate(self.agents): + logger.info(f"Agent {i+1} is executing the task.") + out = agent(self.description) + self.conversation.add(agent.agent_name, str(out)) + prompt = self.conversation.return_history_as_string() + print(prompt) + print(f"Next agent...........") + out = agent(prompt) + + return out + # try: + # self.workflow_bootup() + # loops = 0 + # while loops < self.max_loops: + # for i in range(len(self.task_pool)): + # task = self.task_pool[i] + # # Check if the current task can be executed + # if task.result is None: + # # Get the inputs for the current task + # task.context(task) + + # result = task.execute() + + # # Pass the inputs to the next task + # if i < len(self.task_pool) - 1: + # next_task = self.task_pool[i + 1] + # next_task.description = result + + # # Execute the current task + # task.execute() + + # # Autosave the workflow state + # if self.autosave: + # self.save_workflow_state( + # "sequential_workflow_state.json" + # ) + + # self.workflow_shutdown() + # loops += 1 + # except Exception as e: + # logger.error( + # colored( + # ( + # "Error initializing the Sequential workflow:" + # f" {e} try optimizing your inputs like the" + # " agent class and task description" + # ), + # "red", + # attrs=["bold", "underline"], + # ) + # ) diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py index b94ff33f..dbbed560 100644 --- a/swarms/utils/loguru_logger.py +++ b/swarms/utils/loguru_logger.py @@ -1,6 +1,6 @@ from loguru import logger -logger = logger.add( +logger.add( "MessagePool.log", level="INFO", colorize=True, From 1f65acb68930b830760ad23e62d6307fc3d38590 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 15:31:45 -0800 Subject: [PATCH 2/8] [DOCS] --- docs/corporate/faq.md | 67 +++++++++++++++++++++++++-- docs/limits_of_individual_agents.md | 4 +- docs/why_swarms.md | 53 +++++++++++++++++++++ sequential_workflow_with_agents.py | 3 +- swarms/structs/base_workflow.py | 19 +++----- swarms/structs/sequential_workflow.py | 4 +- 6 files changed, 130 insertions(+), 20 deletions(-) create mode 100644 docs/why_swarms.md diff --git a/docs/corporate/faq.md b/docs/corporate/faq.md index 99e78ac7..20b709e9 100644 --- a/docs/corporate/faq.md +++ b/docs/corporate/faq.md @@ -1,7 +1,68 @@ -This page summarizes questions we were asked on [Discord](https://discord.gg/gnWRz88eym), Hacker News, and Reddit. Feel free to post a question to [Discord](https://discord.gg/gnWRz88eym) or open a discussion on our [Github Page](https://github.com/kyegomez) or hit us up directly: [kye@apac.ai](mailto:hello@swarms.ai). +### FAQ on Swarm Intelligence and Multi-Agent Systems -## 1. How is Swarms different from LangChain? +#### What is an agent in the context of AI and swarm intelligence? -Swarms is an open source alternative to LangChain and differs in its approach to creating LLM pipelines and DAGs. In addition to agents, it uses more general-purpose DAGs and pipelines. A close proxy might be *Airflow for LLMs*. Swarms still implements chain of thought logic for prompt tasks that use "tools" but it also supports any type of input / output (images, audio, etc.). +In artificial intelligence (AI), an agent refers to an LLM with some objective to accomplish. +In swarm intelligence, each agent interacts with other agents and possibly the environment to achieve complex collective behaviors or solve problems more efficiently than individual agents could on their own. + +#### What do you need Swarms at all? +Individual agents are limited by a vast array of issues such as context window loss, single task execution, hallucination, and no collaboration. + + +#### How does a swarm work? + +A swarm works through the principles of decentralized control, local interactions, and simple rules followed by each agent. Unlike centralized systems, where a single entity dictates the behavior of all components, in a swarm, each agent makes its own decisions based on local information and interactions with nearby agents. These local interactions lead to the emergence of complex, organized behaviors or solutions at the collective level, enabling the swarm to tackle tasks efficiently. + +#### Why do you need more agents in a swarm? + +More agents in a swarm can enhance its problem-solving capabilities, resilience, and efficiency. With more agents: + +- **Diversity and Specialization**: The swarm can leverage a wider range of skills, knowledge, and perspectives, allowing for more creative and effective solutions to complex problems. +- **Scalability**: Adding more agents can increase the swarm's capacity to handle larger tasks or multiple tasks simultaneously. +- **Robustness**: A larger number of agents enhances the system's redundancy and fault tolerance, as the failure of a few agents has a minimal impact on the overall performance of the swarm. + +#### Isn't it more expensive to use more agents? + +While deploying more agents can initially increase costs, especially in terms of computational resources, hosting, and potentially API usage, there are several factors and strategies that can mitigate these expenses: + +- **Efficiency at Scale**: Larger swarms can often solve problems more quickly or effectively, reducing the overall computational time and resources required. +- **Optimization and Caching**: Implementing optimizations and caching strategies can reduce redundant computations, lowering the workload on individual agents and the overall system. +- **Dynamic Scaling**: Utilizing cloud services that offer dynamic scaling can ensure you only pay for the resources you need when you need them, optimizing cost-efficiency. + +#### Can swarms make decisions better than individual agents? + +Yes, swarms can make better decisions than individual agents for several reasons: + +- **Collective Intelligence**: Swarms combine the knowledge and insights of multiple agents, leading to more informed and well-rounded decision-making processes. +- **Error Correction**: The collaborative nature of swarms allows for error checking and correction among agents, reducing the likelihood of mistakes. +- **Adaptability**: Swarms are highly adaptable to changing environments or requirements, as the collective can quickly reorganize or shift strategies based on new information. + +#### How do agents in a swarm communicate? + +Communication in a swarm can vary based on the design and purpose of the system but generally involves either direct or indirect interactions: + +- **Direct Communication**: Agents exchange information directly through messaging, signals, or other communication protocols designed for the system. +- **Indirect Communication**: Agents influence each other through the environment, a method known as stigmergy. Actions by one agent alter the environment, which in turn influences the behavior of other agents. + +#### Are swarms only useful in computational tasks? + +While swarms are often associated with computational tasks, their applications extend far beyond. Swarms can be utilized in: + +- **Robotics**: Coordinating multiple robots for tasks like search and rescue, exploration, or surveillance. +- **Environmental Monitoring**: Using sensor networks to monitor pollution, wildlife, or climate conditions. +- **Social Sciences**: Modeling social behaviors or economic systems to understand complex societal dynamics. +- **Healthcare**: Coordinating care strategies in hospital settings or managing pandemic responses through distributed data analysis. + +#### How do you ensure the security of a swarm system? + +Security in swarm systems involves: + +- **Encryption**: Ensuring all communications between agents are encrypted to prevent unauthorized access or manipulation. +- **Authentication**: Implementing strict authentication mechanisms to verify the identity of each agent in the swarm. +- **Resilience to Attacks**: Designing the swarm to continue functioning effectively even if some agents are compromised or attacked, utilizing redundancy and fault tolerance strategies. + +#### Conclusion + +Swarms represent a powerful paradigm in AI, offering innovative solutions to complex, dynamic problems through collective intelligence and decentralized control. While challenges exist, particularly regarding cost and security, strategic design and management can leverage the strengths of swarm intelligence to achieve remarkable efficiency, adaptability, and robustness in a wide range of applications. \ No newline at end of file diff --git a/docs/limits_of_individual_agents.md b/docs/limits_of_individual_agents.md index e178136e..6a05daa8 100644 --- a/docs/limits_of_individual_agents.md +++ b/docs/limits_of_individual_agents.md @@ -2,13 +2,15 @@ ![Reliable Agents](docs/assets/img/reliabilitythrough.png) + +Individual agents have pushed the boundaries of what machines can learn and accomplish. However, despite their impressive capabilities, these agents face inherent limitations that can hinder their effectiveness in complex, real-world applications. This blog explores the critical constraints of individual agents, such as context window limits, hallucination, single-task threading, and lack of collaboration, and illustrates how multi-agent collaboration can address these limitations. In short, + - Context Window Limits - Single Task Execution - Hallucination - No collaboration -In the rapidly evolving field of artificial intelligence, individual agents have pushed the boundaries of what machines can learn and accomplish. However, despite their impressive capabilities, these agents face inherent limitations that can hinder their effectiveness in complex, real-world applications. This discussion explores the critical constraints of individual agents, such as context window limits, hallucination, single-task threading, and lack of collaboration, and illustrates how multi-agent collaboration can address these limitations. #### Context Window Limits diff --git a/docs/why_swarms.md b/docs/why_swarms.md new file mode 100644 index 00000000..f157af6d --- /dev/null +++ b/docs/why_swarms.md @@ -0,0 +1,53 @@ +# Why Swarms? + +The need for multiple agents to work together in artificial intelligence (AI) and particularly in the context of Large Language Models (LLMs) stems from several inherent limitations and challenges in handling complex, dynamic, and multifaceted tasks with single-agent systems. Collaborating with multiple agents offers a pathway to enhance computational efficiency, cognitive diversity, and problem-solving capabilities. This section delves into the rationale behind employing multi-agent systems and strategizes on overcoming the associated expenses, such as API bills and hosting costs. + +### Why Multiple Agents Are Necessary + +#### 1. **Cognitive Diversity** + +Different agents can bring varied perspectives, knowledge bases, and problem-solving approaches to a task. This diversity is crucial in complex problem-solving scenarios where a single approach might not be sufficient. Cognitive diversity enhances creativity, leading to innovative solutions and the ability to tackle a broader range of problems. + +#### 2. **Specialization and Expertise** + +In many cases, tasks are too complex for a single agent to handle efficiently. By dividing the task among multiple specialized agents, each can focus on a segment where it excels, thereby increasing the overall efficiency and effectiveness of the solution. This approach leverages the expertise of individual agents to achieve superior performance in tasks that require multifaceted knowledge and skills. + +#### 3. **Scalability and Flexibility** + +Multi-agent systems can more easily scale to handle large-scale or evolving tasks. Adding more agents to the system can increase its capacity or capabilities, allowing it to adapt to larger workloads or new types of tasks. This scalability is essential in dynamic environments where the demand and nature of tasks can change rapidly. + +#### 4. **Robustness and Redundancy** + +Collaboration among multiple agents enhances the system's robustness by introducing redundancy. If one agent fails or encounters an error, others can compensate, ensuring the system remains operational. This redundancy is critical in mission-critical applications where failure is not an option. + +### Overcoming Expenses with API Bills and Hosting + +Deploying multiple agents, especially when relying on cloud-based services or APIs, can incur significant costs. Here are strategies to manage and reduce these expenses: + +#### 1. **Optimize Agent Efficiency** + +Before scaling up the number of agents, ensure each agent operates as efficiently as possible. This can involve refining algorithms, reducing unnecessary API calls, and optimizing data processing to minimize computational requirements and, consequently, the associated costs. + +#### 2. **Use Open Source and Self-Hosted Solutions** + +Where possible, leverage open-source models and technologies that can be self-hosted. While there is an initial investment in setting up the infrastructure, over time, self-hosting can significantly reduce costs related to API calls and reliance on third-party services. + +#### 3. **Implement Intelligent Caching** + +Caching results for frequently asked questions or common tasks can drastically reduce the need for repeated computations or API calls. Intelligent caching systems can determine what information to store and for how long, optimizing the balance between fresh data and computational savings. + +#### 4. **Dynamic Scaling and Load Balancing** + +Use cloud services that offer dynamic scaling and load balancing to adjust the resources allocated based on the current demand. This ensures you're not paying for idle resources during low-usage periods while still being able to handle high demand when necessary. + +#### 5. **Collaborative Cost-Sharing Models** + +In scenarios where multiple stakeholders benefit from the multi-agent system, consider implementing a cost-sharing model. This approach distributes the financial burden among the users or beneficiaries, making it more sustainable. + +#### 6. **Monitor and Analyze Costs** + +Regularly monitor and analyze your usage and associated costs to identify potential savings. Many cloud providers offer tools to track and forecast expenses, helping you to adjust your usage patterns and configurations to minimize costs without sacrificing performance. + +### Conclusion + +The collaboration of multiple agents in AI systems presents a robust solution to the complexity, specialization, scalability, and robustness challenges inherent in single-agent approaches. While the associated costs can be significant, strategic optimization, leveraging open-source technologies, intelligent caching, dynamic resource management, collaborative cost-sharing, and diligent monitoring can mitigate these expenses. By adopting these strategies, organizations can harness the power of multi-agent systems to tackle complex problems more effectively and efficiently, ensuring the sustainable deployment of these advanced technologies. \ No newline at end of file diff --git a/sequential_workflow_with_agents.py b/sequential_workflow_with_agents.py index bfd28f08..78bbc697 100644 --- a/sequential_workflow_with_agents.py +++ b/sequential_workflow_with_agents.py @@ -23,7 +23,8 @@ agent2 = Agent("Summarizer", llm=llm, max_loops=1, dashboard=False) workflow = SequentialWorkflow( name="Blog Generation Workflow", description=( - "Generate a youtube transcript on how to deploy agents into production" + "Generate a youtube transcript on how to deploy agents into" + " production" ), max_loops=1, autosave=True, diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index 1326e2d3..f91f994a 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -6,7 +6,8 @@ from termcolor import colored from swarms.structs.base import BaseStructure from swarms.structs.task import Task from swarms.structs.agent import Agent -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import logger + class BaseWorkflow(BaseStructure): """ @@ -22,7 +23,7 @@ class BaseWorkflow(BaseStructure): super().__init__(*args, **kwargs) self.task_pool = [] self.agent_pool = [] - + # Logging logger.info("Number of agents activated:") if self.agents: @@ -60,13 +61,8 @@ class BaseWorkflow(BaseStructure): raise ValueError( "You must provide a task or a list of tasks" ) - - def add_agent( - self, - agent: Agent, - *args, - **kwargs - ): + + def add_agent(self, agent: Agent, *args, **kwargs): return self.agent_pool(agent) def run(self): @@ -337,7 +333,6 @@ class BaseWorkflow(BaseStructure): ) ) - def workflow_dashboard(self, **kwargs) -> None: """ Displays a dashboard for the workflow. @@ -375,7 +370,7 @@ class BaseWorkflow(BaseStructure): attrs=["bold", "underline"], ) ) - + def workflow_bootup(self, **kwargs) -> None: """ Workflow bootup. @@ -388,4 +383,4 @@ class BaseWorkflow(BaseStructure): "green", attrs=["bold", "underline"], ) - ) \ No newline at end of file + ) diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 2fe23a1c..4db797fe 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -1,4 +1,3 @@ -import json from dataclasses import dataclass from typing import Any, Dict, List, Optional @@ -10,7 +9,6 @@ from swarms.structs.task import Task from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.utils.loguru_logger import logger -from swarms.structs.base_workflow import BaseWorkflow # SequentialWorkflow class definition using dataclasses @@ -194,7 +192,7 @@ class SequentialWorkflow: self.conversation.add(agent.agent_name, str(out)) prompt = self.conversation.return_history_as_string() print(prompt) - print(f"Next agent...........") + print("Next agent...........") out = agent(prompt) return out From d49720b2f1ea00a812afe48a6d5b6ed39ebc8ffb Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 15:38:06 -0800 Subject: [PATCH 3/8] [REQUIREMENTS][CLEANUP] --- pyproject.toml | 2 -- requirements.txt | 1 - 2 files changed, 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6c335aa7..013a9b0b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,8 +75,6 @@ supervision = "*" scikit-image = "*" pinecone-client = "*" roboflow = "*" -langchain-core = "0.1.27" - [tool.poetry.group.lint.dependencies] diff --git a/requirements.txt b/requirements.txt index a23fcf4c..714727fb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ transformers pandas==1.5.3 langchain==0.0.333 langchain-experimental==0.0.10 -langchain-core==0.1.27 httpx==0.24.1 Pillow==9.4.0 faiss-cpu==1.7.4 From 5758e6d5758b26c728e19047bdce52fbdf2caeb4 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 15:48:46 -0800 Subject: [PATCH 4/8] [DOCS][FAQ] --- docs/corporate/faq.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/docs/corporate/faq.md b/docs/corporate/faq.md index 20b709e9..b2bad0d4 100644 --- a/docs/corporate/faq.md +++ b/docs/corporate/faq.md @@ -63,6 +63,48 @@ Security in swarm systems involves: - **Authentication**: Implementing strict authentication mechanisms to verify the identity of each agent in the swarm. - **Resilience to Attacks**: Designing the swarm to continue functioning effectively even if some agents are compromised or attacked, utilizing redundancy and fault tolerance strategies. +#### How do individual agents within a swarm share insights without direct learning mechanisms like reinforcement learning? + +In the context of pre-trained Large Language Models (LLMs) that operate within a swarm, sharing insights typically involves explicit communication and data exchange protocols rather than direct learning mechanisms like reinforcement learning. Here's how it can work: + +- **Shared Databases and Knowledge Bases**: Agents can write to and read from a shared database or knowledge base where insights, generated content, and relevant data are stored. This allows agents to benefit from the collective experience of the swarm by accessing information that other agents have contributed. + +- **APIs for Information Exchange**: Custom APIs can facilitate the exchange of information between agents. Through these APIs, agents can request specific information or insights from others within the swarm, effectively sharing knowledge without direct learning. + +#### How do you balance the autonomy of individual LLMs with the need for coherent collective behavior in a swarm? + +Balancing autonomy with collective coherence in a swarm of LLMs involves: + +- **Central Coordination Mechanism**: Implementing a lightweight central coordination mechanism that can assign tasks, distribute information, and collect outputs from individual LLMs. This ensures that while each LLM operates autonomously, their actions are aligned with the swarm's overall objectives. + +- **Standardized Communication Protocols**: Developing standardized protocols for how LLMs communicate and share information ensures that even though each agent works autonomously, the information exchange remains coherent and aligned with the collective goals. + +#### How do LLM swarms adapt to changing environments or tasks without machine learning techniques? + +Adaptation in LLM swarms, without relying on machine learning techniques for dynamic learning, can be achieved through: + +- **Dynamic Task Allocation**: A central system or distributed algorithm can dynamically allocate tasks to different LLMs based on the changing environment or requirements. This ensures that the most suitable LLMs are addressing tasks for which they are best suited as conditions change. + +- **Pre-trained Versatility**: Utilizing a diverse set of pre-trained LLMs with different specialties or training data allows the swarm to select the most appropriate agent for a task as the requirements evolve. + +- **In Context Learning**: In context learning is another mechanism that can be employed within LLM swarms to adapt to changing environments or tasks. This approach involves leveraging the collective knowledge and experiences of the swarm to facilitate learning and improve performance. Here's how it can work: + + +#### Can LLM swarms operate in physical environments, or are they limited to digital spaces? + +LLM swarms primarily operate in digital spaces, given their nature as software entities. However, they can interact with physical environments indirectly through interfaces with sensors, actuaries, or other devices connected to the Internet of Things (IoT). For example, LLMs can process data from physical sensors and control devices based on their outputs, enabling applications like smart home management or autonomous vehicle navigation. + +#### Without direct learning from each other, how do agents in a swarm improve over time? + +Improvement over time in a swarm of pre-trained LLMs, without direct learning from each other, can be achieved through: + +- **Human Feedback**: Incorporating feedback from human operators or users can guide adjustments to the usage patterns or selection criteria of LLMs within the swarm, optimizing performance based on observed outcomes. + +- **Periodic Re-training and Updating**: The individual LLMs can be periodically re-trained or updated by their developers based on collective insights and feedback from their deployment within swarms. While this does not involve direct learning from each encounter, it allows the LLMs to improve over time based on aggregated experiences. + +These adjustments to the FAQ reflect the specific context of pre-trained LLMs operating within a swarm, focusing on communication, coordination, and adaptation mechanisms that align with their capabilities and constraints. + + #### Conclusion Swarms represent a powerful paradigm in AI, offering innovative solutions to complex, dynamic problems through collective intelligence and decentralized control. While challenges exist, particularly regarding cost and security, strategic design and management can leverage the strengths of swarm intelligence to achieve remarkable efficiency, adaptability, and robustness in a wide range of applications. \ No newline at end of file From 0d7fee779c7dd751ed68c518eb76b9dd513093bd Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 16:09:01 -0800 Subject: [PATCH 5/8] [DOCS] --- docs/swarms/memory/qdrant.md | 3 ++- docs/swarms/tokenizers/basetokenizer.md | 1 - .../agents/multi_modal_auto_agent_example.py | 2 +- playground/agents/multion_agent.py | 3 ++- playground/memory/chroma_usage_example.py | 1 - .../structs/dialogue_simulator_example.py | 2 +- playground/structs/godmode_example.py | 2 +- playground/structs/kyle_hackathon.py | 1 + playground/structs/message_pool_example.py | 2 +- playground/tools/agent_with_tools_example.py | 2 +- swarms/agents/multion_agent.py | 3 ++- swarms/agents/worker_agent.py | 1 + swarms/models/__init__.py | 25 +++++-------------- swarms/models/azure_openai_llm.py | 20 +++++++-------- swarms/models/cog_vlm.py | 10 ++++---- swarms/models/fire_function.py | 6 +++-- swarms/models/test_fire_function.py | 1 + swarms/structs/agent.py | 2 +- swarms/structs/async_workflow.py | 2 +- swarms/structs/base_workflow.py | 2 +- swarms/structs/majority_voting.py | 6 ++--- swarms/structs/sequential_workflow.py | 3 +-- swarms/utils/video_to_frames.py | 3 ++- tests/agents/test_multion.py | 4 ++- tests/models/test_fire_function_caller.py | 1 - tests/structs/test_message_pool.py | 2 +- tests/utils/test_pdf_to_text.py | 1 + 27 files changed, 53 insertions(+), 58 deletions(-) diff --git a/docs/swarms/memory/qdrant.md b/docs/swarms/memory/qdrant.md index e234e7be..cfc65670 100644 --- a/docs/swarms/memory/qdrant.md +++ b/docs/swarms/memory/qdrant.md @@ -22,7 +22,8 @@ class Qdrant: collection_name: str = "qdrant", model_name: str = "BAAI/bge-small-en-v1.5", https: bool = True, - ): ... + ): + ... ``` ### Constructor Parameters diff --git a/docs/swarms/tokenizers/basetokenizer.md b/docs/swarms/tokenizers/basetokenizer.md index 8047f301..325640ee 100644 --- a/docs/swarms/tokenizers/basetokenizer.md +++ b/docs/swarms/tokenizers/basetokenizer.md @@ -27,7 +27,6 @@ from swarms.tokenizers import BaseTokenizer class SimpleTokenizer(BaseTokenizer): - def count_tokens(self, text: Union[str, List[dict]]) -> int: if isinstance(text, str): # Split text by spaces as a simple tokenization approach diff --git a/playground/agents/multi_modal_auto_agent_example.py b/playground/agents/multi_modal_auto_agent_example.py index a21aebb8..65f8fa2b 100644 --- a/playground/agents/multi_modal_auto_agent_example.py +++ b/playground/agents/multi_modal_auto_agent_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv -from swarms import GPT4VisionAPI, Agent +from swarms import Agent, GPT4VisionAPI # Load the environment variables load_dotenv() diff --git a/playground/agents/multion_agent.py b/playground/agents/multion_agent.py index d1a02b8e..2bbe7e92 100644 --- a/playground/agents/multion_agent.py +++ b/playground/agents/multion_agent.py @@ -1,6 +1,7 @@ -from swarms.agents.multion_agent import MultiOnAgent import timeit + from swarms import Agent, ConcurrentWorkflow, Task +from swarms.agents.multion_agent import MultiOnAgent # model model = MultiOnAgent(multion_api_key="api-key") diff --git a/playground/memory/chroma_usage_example.py b/playground/memory/chroma_usage_example.py index 4f45117e..b2fc3ce0 100644 --- a/playground/memory/chroma_usage_example.py +++ b/playground/memory/chroma_usage_example.py @@ -1,6 +1,5 @@ from swarms.memory import ChromaDB - # Initialize the memory chroma = ChromaDB( metric="cosine", diff --git a/playground/structs/dialogue_simulator_example.py b/playground/structs/dialogue_simulator_example.py index b83e13ef..a7cdfe16 100644 --- a/playground/structs/dialogue_simulator_example.py +++ b/playground/structs/dialogue_simulator_example.py @@ -1,5 +1,5 @@ -from swarms.models import OpenAIChat from swarms import DialogueSimulator, Worker +from swarms.models import OpenAIChat llm = OpenAIChat( model_name="gpt-4", openai_api_key="api-key", temperature=0.5 diff --git a/playground/structs/godmode_example.py b/playground/structs/godmode_example.py index 5d3cef83..53e6b32e 100644 --- a/playground/structs/godmode_example.py +++ b/playground/structs/godmode_example.py @@ -2,8 +2,8 @@ import os from dotenv import load_dotenv -from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat from swarms import ModelParallelizer +from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat load_dotenv() diff --git a/playground/structs/kyle_hackathon.py b/playground/structs/kyle_hackathon.py index 48c15b39..1de48f1b 100644 --- a/playground/structs/kyle_hackathon.py +++ b/playground/structs/kyle_hackathon.py @@ -1,6 +1,7 @@ import os from dotenv import load_dotenv + from swarms import Agent, OpenAIChat from swarms.agents.multion_agent import MultiOnAgent from swarms.memory.chroma_db import ChromaDB diff --git a/playground/structs/message_pool_example.py b/playground/structs/message_pool_example.py index dca596ba..6dbad128 100644 --- a/playground/structs/message_pool_example.py +++ b/playground/structs/message_pool_example.py @@ -1,6 +1,6 @@ +from swarms import OpenAIChat from swarms.structs.agent import Agent from swarms.structs.message_pool import MessagePool -from swarms import OpenAIChat agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") agent2 = Agent(llm=OpenAIChat(), agent_name="agent2") diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 4524edf1..35b61703 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms import OpenAIChat, Agent +from swarms import Agent, OpenAIChat from swarms.tools.tool import tool load_dotenv() diff --git a/swarms/agents/multion_agent.py b/swarms/agents/multion_agent.py index 2ef66b47..efeb5a43 100644 --- a/swarms/agents/multion_agent.py +++ b/swarms/agents/multion_agent.py @@ -1,8 +1,9 @@ import os + import multion +from dotenv import load_dotenv from swarms.models.base_llm import AbstractLLM -from dotenv import load_dotenv # Load environment variables load_dotenv() diff --git a/swarms/agents/worker_agent.py b/swarms/agents/worker_agent.py index 6dffc483..c0e7f464 100644 --- a/swarms/agents/worker_agent.py +++ b/swarms/agents/worker_agent.py @@ -6,6 +6,7 @@ from langchain.docstore import InMemoryDocstore from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain_experimental.autonomous_agents import AutoGPT + from swarms.tools.tool import BaseTool from swarms.utils.decorators import error_decorator, timing_decorator diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 8981f70e..3f8fb3e2 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -1,9 +1,7 @@ from swarms.models.anthropic import Anthropic # noqa: E402 from swarms.models.base_embedding_model import BaseEmbeddingModel from swarms.models.base_llm import AbstractLLM # noqa: E402 -from swarms.models.base_multimodal_model import ( - BaseMultiModalModel, -) +from swarms.models.base_multimodal_model import BaseMultiModalModel # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402 @@ -15,9 +13,7 @@ from swarms.models.clipq import CLIPQ # noqa: E402 # from swarms.models.kosmos_two import Kosmos # noqa: E402 # from swarms.models.cog_agent import CogAgent # noqa: E402 ## Function calling models -from swarms.models.fire_function import ( - FireFunctionCaller, -) +from swarms.models.fire_function import FireFunctionCaller from swarms.models.fuyu import Fuyu # noqa: E402 from swarms.models.gemini import Gemini # noqa: E402 from swarms.models.gigabind import Gigabind # noqa: E402 @@ -25,9 +21,7 @@ from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 from swarms.models.idefics import Idefics # noqa: E402 from swarms.models.kosmos_two import Kosmos # noqa: E402 -from swarms.models.layoutlm_document_qa import ( - LayoutLMDocumentQA, -) +from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA # noqa: E402 from swarms.models.llava import LavaMultiModal # noqa: E402 @@ -47,10 +41,7 @@ from swarms.models.petals import Petals # noqa: E402 from swarms.models.qwen import QwenVLMultiModal # noqa: E402 from swarms.models.roboflow_model import RoboflowMultiModal from swarms.models.sam_supervision import SegmentAnythingMarkGenerator -from swarms.models.sampling_params import ( - SamplingParams, - SamplingType, -) +from swarms.models.sampling_params import SamplingParams, SamplingType from swarms.models.timm import TimmModel # noqa: E402 # from swarms.models.modelscope_pipeline import ModelScopePipeline @@ -67,15 +58,11 @@ from swarms.models.types import ( # noqa: E402 TextModality, VideoModality, ) -from swarms.models.ultralytics_model import ( - UltralyticsModel, -) +from swarms.models.ultralytics_model import UltralyticsModel # noqa: E402 from swarms.models.vilt import Vilt # noqa: E402 -from swarms.models.wizard_storytelling import ( - WizardLLMStoryTeller, -) +from swarms.models.wizard_storytelling import WizardLLMStoryTeller # noqa: E402 # from swarms.models.vllm import vLLM # noqa: E402 diff --git a/swarms/models/azure_openai_llm.py b/swarms/models/azure_openai_llm.py index 3ff8bd22..aebb03fb 100644 --- a/swarms/models/azure_openai_llm.py +++ b/swarms/models/azure_openai_llm.py @@ -2,7 +2,7 @@ from __future__ import annotations import logging import os -from typing import Any, Callable, Dict, List, Mapping, Optional, Union +from typing import Any, Callable, Mapping import openai from langchain_core.pydantic_v1 import ( @@ -36,14 +36,14 @@ class AzureOpenAI(BaseOpenAI): openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct") """ - azure_endpoint: Union[str, None] = None + azure_endpoint: str | None = None """Your Azure endpoint, including the resource. Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. Example: `https://example-resource.azure.openai.com/` """ - deployment_name: Union[str, None] = Field( + deployment_name: str | None = Field( default=None, alias="azure_deployment" ) """A model deployment. @@ -53,11 +53,11 @@ class AzureOpenAI(BaseOpenAI): """ openai_api_version: str = Field(default="", alias="api_version") """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" - openai_api_key: Optional[SecretStr] = Field( + openai_api_key: SecretStr | None = Field( default=None, alias="api_key" ) """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" - azure_ad_token: Optional[SecretStr] = None + azure_ad_token: SecretStr | None = None """Your Azure Active Directory token. Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. @@ -65,7 +65,7 @@ class AzureOpenAI(BaseOpenAI): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. """ # noqa: E501 - azure_ad_token_provider: Union[Callable[[], str], None] = None + azure_ad_token_provider: Callable[[], str] | None = None """A function that returns an Azure Active Directory token. Will be invoked on every request. @@ -78,12 +78,12 @@ class AzureOpenAI(BaseOpenAI): """ @classmethod - def get_lc_namespace(cls) -> List[str]: + def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object.""" return ["langchain", "llms", "openai"] @root_validator() - def validate_environment(cls, values: Dict) -> Dict: + def validate_environment(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" if values["n"] < 1: raise ValueError("n must be at least 1.") @@ -206,7 +206,7 @@ class AzureOpenAI(BaseOpenAI): } @property - def _invocation_params(self) -> Dict[str, Any]: + def _invocation_params(self) -> dict[str, Any]: openai_params = {"model": self.deployment_name} return {**openai_params, **super()._invocation_params} @@ -216,7 +216,7 @@ class AzureOpenAI(BaseOpenAI): return "azure" @property - def lc_attributes(self) -> Dict[str, Any]: + def lc_attributes(self) -> dict[str, Any]: return { "openai_api_type": self.openai_api_type, "openai_api_version": self.openai_api_version, diff --git a/swarms/models/cog_vlm.py b/swarms/models/cog_vlm.py index e456b669..a3f820c5 100644 --- a/swarms/models/cog_vlm.py +++ b/swarms/models/cog_vlm.py @@ -209,8 +209,6 @@ class CogVLMMultiModal(BaseMultiModalModel): total_gb = total_bytes / (1 << 30) if total_gb < 40: pass - else: - pass torch.cuda.empty_cache() @@ -462,7 +460,7 @@ class CogVLMMultiModal(BaseMultiModalModel): elif role == "assistant": if formatted_history: if formatted_history[-1][1] != "": - assert False, ( + raise AssertionError( "the last query is answered. answer" f" again. {formatted_history[-1][0]}," f" {formatted_history[-1][1]}," @@ -473,9 +471,11 @@ class CogVLMMultiModal(BaseMultiModalModel): text_content, ) else: - assert False, "assistant reply before user" + raise AssertionError( + "assistant reply before user" + ) else: - assert False, f"unrecognized role: {role}" + raise AssertionError(f"unrecognized role: {role}") return last_user_query, formatted_history, image_list diff --git a/swarms/models/fire_function.py b/swarms/models/fire_function.py index 6803d822..f61ec2bd 100644 --- a/swarms/models/fire_function.py +++ b/swarms/models/fire_function.py @@ -1,8 +1,10 @@ -from transformers import AutoModelForCausalLM, AutoTokenizer import json -from swarms.models.base_llm import AbstractLLM from typing import Any +from transformers import AutoModelForCausalLM, AutoTokenizer + +from swarms.models.base_llm import AbstractLLM + class FireFunctionCaller(AbstractLLM): """ diff --git a/swarms/models/test_fire_function.py b/swarms/models/test_fire_function.py index b6a67c37..082d954d 100644 --- a/swarms/models/test_fire_function.py +++ b/swarms/models/test_fire_function.py @@ -1,4 +1,5 @@ from unittest.mock import MagicMock + from swarms.models.fire_function import FireFunctionCaller diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 412dc02d..9d2a5e1a 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -8,6 +8,7 @@ import time import uuid from typing import Any, Callable, Dict, List, Optional, Tuple +import yaml from loguru import logger from termcolor import colored @@ -31,7 +32,6 @@ from swarms.utils.video_to_frames import ( save_frames_as_images, video_to_frames, ) -import yaml # Utils diff --git a/swarms/structs/async_workflow.py b/swarms/structs/async_workflow.py index 0d5e85b7..6cf9e312 100644 --- a/swarms/structs/async_workflow.py +++ b/swarms/structs/async_workflow.py @@ -2,9 +2,9 @@ import asyncio from dataclasses import dataclass, field from typing import Any, Callable, List, Optional +from swarms.structs.agent import Agent from swarms.structs.task import Task from swarms.utils.logger import logger -from swarms.structs.agent import Agent @dataclass diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index f91f994a..17b98ce8 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -3,9 +3,9 @@ from typing import Any, Dict, List, Optional from termcolor import colored +from swarms.structs.agent import Agent from swarms.structs.base import BaseStructure from swarms.structs.task import Task -from swarms.structs.agent import Agent from swarms.utils.loguru_logger import logger diff --git a/swarms/structs/majority_voting.py b/swarms/structs/majority_voting.py index fc4f8018..a2b414ba 100644 --- a/swarms/structs/majority_voting.py +++ b/swarms/structs/majority_voting.py @@ -1,15 +1,15 @@ import asyncio import concurrent.futures import re +import sys from collections import Counter from multiprocessing import Pool from typing import Any, List -from swarms.structs.agent import Agent -from swarms.structs.conversation import Conversation from loguru import logger -import sys +from swarms.structs.agent import Agent +from swarms.structs.conversation import Conversation # Configure loguru logger with advanced settings logger.remove() diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 4db797fe..7c94f426 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -3,11 +3,10 @@ from typing import Any, Dict, List, Optional from termcolor import colored -from swarms.structs.task import Task - # from swarms.utils.logger import logger from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation +from swarms.structs.task import Task from swarms.utils.loguru_logger import logger diff --git a/swarms/utils/video_to_frames.py b/swarms/utils/video_to_frames.py index ae16610c..528e45b0 100644 --- a/swarms/utils/video_to_frames.py +++ b/swarms/utils/video_to_frames.py @@ -1,6 +1,7 @@ -import cv2 from typing import List +import cv2 + def video_to_frames(video_file: str) -> List: """ diff --git a/tests/agents/test_multion.py b/tests/agents/test_multion.py index 8da68e23..23614934 100644 --- a/tests/agents/test_multion.py +++ b/tests/agents/test_multion.py @@ -1,5 +1,7 @@ +from unittest.mock import MagicMock, patch + import pytest -from unittest.mock import patch, MagicMock + from swarms.agents.multion_agent import MultiOnAgent diff --git a/tests/models/test_fire_function_caller.py b/tests/models/test_fire_function_caller.py index 703417a7..082d954d 100644 --- a/tests/models/test_fire_function_caller.py +++ b/tests/models/test_fire_function_caller.py @@ -1,6 +1,5 @@ from unittest.mock import MagicMock - from swarms.models.fire_function import FireFunctionCaller diff --git a/tests/structs/test_message_pool.py b/tests/structs/test_message_pool.py index 91d0c28b..cfbb4df5 100644 --- a/tests/structs/test_message_pool.py +++ b/tests/structs/test_message_pool.py @@ -1,6 +1,6 @@ +from swarms import OpenAIChat from swarms.structs.agent import Agent from swarms.structs.message_pool import MessagePool -from swarms import OpenAIChat def test_message_pool_initialization(): diff --git a/tests/utils/test_pdf_to_text.py b/tests/utils/test_pdf_to_text.py index f271af60..257364b4 100644 --- a/tests/utils/test_pdf_to_text.py +++ b/tests/utils/test_pdf_to_text.py @@ -1,5 +1,6 @@ import pypdf import pytest + from swarms.utils import pdf_to_text From 6ade4d509aa4123844b2f9ff732987827f03de55 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 16:23:05 -0800 Subject: [PATCH 6/8] [DOCS][FIX] --- docs/corporate/data_room.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/corporate/data_room.md b/docs/corporate/data_room.md index cd5cada0..199aa7cb 100644 --- a/docs/corporate/data_room.md +++ b/docs/corporate/data_room.md @@ -108,6 +108,3 @@ Swarms is an open source framework for developers in python to enable seamless, | Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) | | Issues with the framework | Current open issues for the product on Github. | [![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) | - -gi -------- \ No newline at end of file From 2eb2b68c0afc849da26819be494b054fea7c0442 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 16:35:56 -0800 Subject: [PATCH 7/8] [DOCS][FIX] --- docs/corporate/data_room.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/corporate/data_room.md b/docs/corporate/data_room.md index 199aa7cb..91b719bc 100644 --- a/docs/corporate/data_room.md +++ b/docs/corporate/data_room.md @@ -93,10 +93,10 @@ Swarms is an open source framework for developers in python to enable seamless, [Here is the official Swarms Github Page:](https://github.com/kyegomez/swarms) -### Product Growth Metrics +### Product Growth Metrics | Name | Description | Link | -|--------------------------b--------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +|----------------------------------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | Total Downloads of all time | Total number of downloads for the product over its entire lifespan. | [![Downloads](https://static.pepy.tech/badge/swarms)](https://pepy.tech/project/swarms) | | Downloads this month | Number of downloads for the product in the current month. | [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms) | | Total Downloads this week | Total number of downloads for the product in the current week. | [![Downloads](https://static.pepy.tech/badge/swarms/week)](https://pepy.tech/project/swarms) | @@ -108,3 +108,4 @@ Swarms is an open source framework for developers in python to enable seamless, | Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) | | Issues with the framework | Current open issues for the product on Github. | [![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) | + From 2c9bed2a41c839655862ead029487eab2745f3fb Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 28 Feb 2024 16:51:04 -0800 Subject: [PATCH 8/8] [DOCS] --- docs/corporate/data_room.md | 12 +++++++----- pyproject.toml | 2 +- sequential_workflow_with_agents.py | 4 ---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/docs/corporate/data_room.md b/docs/corporate/data_room.md index 91b719bc..0b7b1afb 100644 --- a/docs/corporate/data_room.md +++ b/docs/corporate/data_room.md @@ -68,11 +68,11 @@ The team has thousands of hours building and optimizing autonomous agents. Leade Key milestones: get 80K framework users in January 2024, start contracts in target verticals, introduce commercial products in 2025 with various pricing models. -## Resources -### **Pre-Seed Pitch Deck** +### **Resources** +#### **Pre-Seed Pitch Deck** - [Here is our pitch deck for our preseed round](https://drive.google.com/file/d/1c76gK5UIdrfN4JOSpSlvVBEOpzR9emWc/view?usp=sharing) -### **The Swarm Corporation Memo** +#### **The Swarm Corporation Memo** To learn more about our mission, vision, plans for GTM, and much more please refer to the [Swarm Memo here](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing) @@ -91,8 +91,10 @@ This section is dedicated entirely for corporate documents. ## **Product** Swarms is an open source framework for developers in python to enable seamless, reliable, and scalable multi-agent orchestration through modularity, customization, and precision. -[Here is the official Swarms Github Page:](https://github.com/kyegomez/swarms) - +- [Swarms Github Page:](https://github.com/kyegomez/swarms) +- [Swarms Memo](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit) +- [Swarms Project Board](https://github.com/users/kyegomez/projects/1) +- [Swarms Website](https://www.swarms.world/g) ### Product Growth Metrics | Name | Description | Link | diff --git a/pyproject.toml b/pyproject.toml index 013a9b0b..03304a3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "4.2.0" +version = "4.2.1" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/sequential_workflow_with_agents.py b/sequential_workflow_with_agents.py index 78bbc697..06f071db 100644 --- a/sequential_workflow_with_agents.py +++ b/sequential_workflow_with_agents.py @@ -34,7 +34,3 @@ workflow = SequentialWorkflow( # Run the workflow workflow.run() - -# # # Output the results -# for task in workflow.tasks: -# print(f"Task: {task.description}, Result: {task.result}")