diff --git a/.env.example b/.env.example index 940fac53..e933d976 100644 --- a/.env.example +++ b/.env.example @@ -1,23 +1,48 @@ +# Framework Configuration WORKSPACE_DIR="agent_workspace" +SWARMS_VERBOSE_GLOBAL="False" SWARMS_API_KEY="" -USE_TELEMETRY=True -OPENAI_API_KEY="sk-" -GOOGLE_API_KEY="" -AI21_API_KEY="your_api_key_here" -COHERE_API_KEY="your_api_key_here" -ALEPHALPHA_API_KEY="your_api_key_here" -HUGGINFACEHUB_API_KEY="your_api_key_here" -EVAL_PORT=8000 -MODEL_NAME="gpt-4" -USE_GPU=True -PLAYGROUND_DIR="examples" -LOG_LEVEL="INFO" -BOT_NAME="Orca" -HF_API_KEY="your_huggingface_api_key_here" + +# Model Provider API Keys +## OpenAI +OPENAI_API_KEY="" + +## Anthropic +ANTHROPIC_API_KEY="" + +## Google +GEMINI_API_KEY="" + +## Hugging Face +HUGGINGFACE_TOKEN="" + +## Perplexity AI +PPLX_API_KEY="" + +## AI21 +AI21_API_KEY="" + +# Tool Provider API Keys +## Search Tools +BING_BROWSER_API="" +BRAVESEARCH_API_KEY="" +TAVILY_API_KEY="" +YOU_API_KEY="" + +## Analytics & Monitoring AGENTOPS_API_KEY="" -ANTHROPIC_API_KEY="your_anthropic_api_key" -AZURE_OPENAI_ENDPOINT="your_azure_openai_endpoint" -AZURE_OPENAI_DEPLOYMENT="your_azure_openai_deployment" -OPENAI_API_VERSION="your_openai_api_version" -AZURE_OPENAI_API_KEY="your_azure_openai_api_key" -AZURE_OPENAI_AD_TOKEN="your_azure_openai_ad_token" +EXA_API_KEY="" + +## Browser Automation +MULTION_API_KEY="" + +## Other Tools +HCP_APP_ID="" + +# Cloud Provider Configuration +## Azure OpenAI +AZURE_OPENAI_ENDPOINT="" +AZURE_OPENAI_DEPLOYMENT="" +OPENAI_API_VERSION="" +AZURE_OPENAI_API_KEY="" +AZURE_OPENAI_AD_TOKEN="" diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index be346103..ef69da8e 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -22,4 +22,4 @@ jobs: - run: ruff format . - run: ruff check --fix . - - uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c + - uses: autofix-ci/action@551dded8c6cc8a1054039c8bc0b8b48c51dfc6ef diff --git a/.gitignore b/.gitignore index 418d9a8e..28e8b8d5 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ static/generated runs Financial-Analysis-Agent_state.json experimental +ffn_alternatives artifacts_five encryption errors diff --git a/Dockerfile b/Dockerfile index 20567ff6..0810481f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,20 @@ -# Use Python 3.11 slim-bullseye for smaller base image -FROM python:3.11-slim-bullseye AS builder +# Use Python 3.11 slim-bullseye for a smaller base image +FROM python:3.11-slim-bullseye -# Set environment variables +# Set environment variables for Python and pip ENV PYTHONDONTWRITEBYTECODE=1 \ PYTHONUNBUFFERED=1 \ PIP_NO_CACHE_DIR=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=1 + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + WORKSPACE_DIR="agent_workspace" \ + PATH="/app:${PATH}" \ + PYTHONPATH="/app:${PYTHONPATH}" \ + USER=swarms # Set the working directory -WORKDIR /build +WORKDIR /app -# Install only essential build dependencies +# Install essential build dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ gcc \ @@ -18,38 +22,21 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ gfortran \ && rm -rf /var/lib/apt/lists/* -# Install swarms packages -RUN pip install --no-cache-dir swarm-models swarms - -# Production stage -FROM python:3.11-slim-bullseye - -# Set secure environment variables -ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 \ - WORKSPACE_DIR="agent_workspace" \ - PATH="/app:${PATH}" \ - PYTHONPATH="/app:${PYTHONPATH}" \ - USER=swarms +# Install required Python packages +RUN pip install --no-cache-dir swarm-models swarms && \ + pip install --no-cache-dir transformers torch litellm tiktoken openai pandas numpy pypdf -# Create non-root user +# Create a non-root user and set correct permissions for the application directory RUN useradd -m -s /bin/bash -U $USER && \ mkdir -p /app && \ chown -R $USER:$USER /app -# Set working directory -WORKDIR /app - -# Copy only necessary files from builder -COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages -COPY --from=builder /usr/local/bin /usr/local/bin - -# Copy application with correct permissions +# Copy application files into the image with proper ownership COPY --chown=$USER:$USER . . -# Switch to non-root user +# Switch to the non-root user USER $USER -# Health check +# Health check to ensure the container is running properly HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \ - CMD python -c "import swarms; print('Health check passed')" || exit 1 \ No newline at end of file + CMD python -c "import swarms; print('Health check passed')" || exit 1 diff --git a/LICENSE b/LICENSE index 0c6bf244..601999fa 100644 --- a/LICENSE +++ b/LICENSE @@ -1,661 +1,201 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - Swarms provides multi-agent orchestration mechanisms to enable llm agents to collaborate and work together - Copyright (C) <2025> - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2025] [The Galactic Swarm Corporation] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index c3657ef5..55e1606f 100644 --- a/README.md +++ b/README.md @@ -2062,4 +2062,4 @@ Join our growing community around the world, for real-time support, ideas, and d # License -GNU AFFERO GENERAL PUBLIC LICENSE +APACHE diff --git a/docs/finance/subscription.md b/docs/finance/subscription.md new file mode 100644 index 00000000..b7123a42 --- /dev/null +++ b/docs/finance/subscription.md @@ -0,0 +1,1030 @@ +# $swarms Token Subscription Payment System + +## Overview +This documentation covers the implementation of subscription-based payments using $swarms tokens on the Solana blockchain. + +## System Architecture + +```mermaid +flowchart TB + subgraph Frontend + UI[User Interface] + PM[Payment Manager] + end + + subgraph Backend + SM[Subscription Manager] + DB[(Database)] + Queue[Job Queue] + end + + subgraph Blockchain + SC[Smart Contract] + Token[$swarms Token] + end + + UI -->|Subscribe| PM + PM -->|Create Subscription| SM + SM -->|Store| DB + SM -->|Schedule| Queue + Queue -->|Execute Payment| SC + SC -->|Transfer| Token +``` + +## Core Components + +### 1. Subscription Smart Contract + +```typescript +import { Program, web3 } from '@project-serum/anchor'; +import { PublicKey, SystemProgram } from '@solana/web3.js'; + +interface SubscriptionAccount { + subscriber: PublicKey; + merchant: PublicKey; + amount: number; + interval: number; + nextPaymentDate: number; + active: boolean; +} + +class SubscriptionContract { + program: Program; + + constructor(program: Program) { + this.program = program; + } + + async createSubscription( + subscriber: PublicKey, + merchant: PublicKey, + amount: number, + interval: number + ): Promise { + const subscription = web3.Keypair.generate(); + const currentTimestamp = Math.floor(Date.now() / 1000); + + await this.program.rpc.createSubscription( + new BN(amount), + new BN(interval), + new BN(currentTimestamp + interval), + { + accounts: { + subscription: subscription.publicKey, + subscriber, + merchant, + systemProgram: SystemProgram.programId, + }, + signers: [subscription], + } + ); + + return subscription.publicKey.toString(); + } + + async processPayment(subscriptionAddress: string): Promise { + const subscription = await this.program.account.subscription.fetch( + new PublicKey(subscriptionAddress) + ); + + // Transfer tokens + const signature = await this.transferTokens( + subscription.subscriber, + subscription.merchant, + subscription.amount + ); + + // Update next payment date + await this.updateNextPaymentDate( + subscriptionAddress, + subscription.nextPaymentDate + subscription.interval + ); + + return signature; + } +} +``` + +### 2. Subscription Manager Service + +```typescript +class SubscriptionManager { + private contract: SubscriptionContract; + private db: Database; + private queue: Queue; + + constructor( + contract: SubscriptionContract, + db: Database, + queue: Queue + ) { + this.contract = contract; + this.db = db; + this.queue = queue; + } + + async createSubscription( + subscriberAddress: string, + merchantAddress: string, + amount: number, + interval: number, + planDetails: SubscriptionPlan + ): Promise { + // Create blockchain subscription + const subscriptionAddress = await this.contract.createSubscription( + new PublicKey(subscriberAddress), + new PublicKey(merchantAddress), + amount, + interval + ); + + // Store subscription details + const subscription = await this.db.subscriptions.create({ + address: subscriptionAddress, + subscriber: subscriberAddress, + merchant: merchantAddress, + amount, + interval, + planDetails, + status: 'active', + createdAt: new Date() + }); + + // Schedule first payment + await this.scheduleNextPayment(subscription); + + return { + subscriptionId: subscription.id, + address: subscriptionAddress, + status: 'active' + }; + } + + async cancelSubscription( + subscriptionId: string, + reason?: string + ): Promise { + // Update blockchain state + await this.contract.deactivateSubscription(subscriptionId); + + // Update database + await this.db.subscriptions.update({ + where: { id: subscriptionId }, + data: { + status: 'cancelled', + cancelledAt: new Date(), + cancelReason: reason + } + }); + + // Remove scheduled payments + await this.queue.removeScheduledJobs(subscriptionId); + } +} +``` + +### 3. Payment Processing System + +```typescript +class PaymentProcessor { + private connection: Connection; + private subscriptionManager: SubscriptionManager; + + async processSubscriptionPayment( + subscriptionId: string + ): Promise { + const subscription = await this.subscriptionManager.get(subscriptionId); + + try { + // Create and send transaction + const signature = await this.createAndExecutePayment(subscription); + + // Update payment history + await this.recordPayment({ + subscriptionId, + amount: subscription.amount, + signature, + status: 'success' + }); + + // Schedule next payment + await this.subscriptionManager.scheduleNextPayment(subscription); + + return { success: true, signature }; + + } catch (error) { + await this.handlePaymentFailure(subscription, error); + return { success: false, error }; + } + } + + private async createAndExecutePayment( + subscription: Subscription + ): Promise { + const transaction = await this.buildPaymentTransaction(subscription); + return await this.sendAndConfirmTransaction(transaction); + } +} +``` + +## Subscription Plans and Pricing + +```mermaid +classDiagram + class SubscriptionPlan { + +string id + +string name + +number amount + +number interval + +Feature[] features + +boolean isActive + +create(PlanData) Plan + +update(string, PlanData) Plan + +delete(string) void + } + + class Feature { + +string id + +string name + +string description + +boolean isEnabled + } + + class PricingTier { + +string id + +string name + +number amount + +Discount[] discounts + } + + class Discount { + +string id + +number percentage + +number duration + } + + SubscriptionPlan "1" -- "*" Feature + SubscriptionPlan "1" -- "1" PricingTier + PricingTier "1" -- "*" Discount +``` + +### Implementation Example + +```typescript +interface SubscriptionPlan { + id: string; + name: string; + amount: number; + interval: number; + features: Feature[]; + isActive: boolean; +} + +class PlanManager { + async createPlan(planData: CreatePlanDTO): Promise { + // Validate plan data + this.validatePlanData(planData); + + // Create plan in database + const plan = await this.db.plans.create({ + data: { + ...planData, + isActive: true, + createdAt: new Date() + } + }); + + // Create on-chain representation + await this.contract.registerPlan(plan.id, plan.amount, plan.interval); + + return plan; + } + + async updatePlan( + planId: string, + updates: UpdatePlanDTO + ): Promise { + // Validate updates + this.validatePlanUpdates(updates); + + // Update in database + const updatedPlan = await this.db.plans.update({ + where: { id: planId }, + data: updates + }); + + // Update on-chain if necessary + if (updates.amount || updates.interval) { + await this.contract.updatePlan( + planId, + updates.amount, + updates.interval + ); + } + + return updatedPlan; + } +} +``` + +## Payment Flow + +```mermaid +sequenceDiagram + participant User + participant UI + participant Backend + participant Blockchain + participant Wallet + + User->>UI: Select Subscription Plan + UI->>Backend: Create Subscription Request + Backend->>Blockchain: Deploy Subscription Contract + Blockchain-->>Backend: Contract Address + Backend->>UI: Subscription Details + UI->>Wallet: Request Approval + Wallet->>User: Confirm Transaction + User->>Wallet: Approve + Wallet->>Blockchain: Submit Transaction + Blockchain-->>Backend: Confirmation + Backend->>UI: Success Response + UI->>User: Show Confirmation +``` + +## Error Handling and Recovery + +```typescript +class SubscriptionErrorHandler { + async handlePaymentFailure( + subscription: Subscription, + error: Error + ): Promise { + // Log error + await this.logError({ + subscriptionId: subscription.id, + error, + timestamp: new Date() + }); + + // Determine retry strategy + const retryStrategy = this.determineRetryStrategy( + subscription, + error + ); + + if (retryStrategy.shouldRetry) { + await this.scheduleRetry( + subscription, + retryStrategy.retryAfter + ); + } else { + await this.handleSubscriptionFailure(subscription); + } + + // Notify relevant parties + await this.sendNotifications(subscription, error); + } + + private async handleSubscriptionFailure( + subscription: Subscription + ): Promise { + // Update subscription status + await this.subscriptionManager.updateStatus( + subscription.id, + 'failed' + ); + + // Notify merchant + await this.notifyMerchant(subscription); + + // Create recovery task + await this.createRecoveryTask(subscription); + } +} +``` + +## Analytics and Reporting + +```typescript +class SubscriptionAnalytics { + async generateMetrics( + timeframe: TimeFrame + ): Promise { + const metrics = { + activeSubscriptions: await this.countActiveSubscriptions(), + recurringRevenue: await this.calculateMRR(), + churnRate: await this.calculateChurnRate(timeframe), + lifetimeValue: await this.calculateLTV(), + conversionRate: await this.calculateConversionRate() + }; + + return metrics; + } + + async generateReport( + options: ReportOptions + ): Promise { + const report = { + metrics: await this.generateMetrics(options.timeframe), + subscriptionsByPlan: await this.aggregateByPlan(), + revenueProjection: await this.projectRevenue(options.months), + churnAnalysis: await this.analyzeChurn() + }; + + return report; + } +} +``` + +## Security Measures + +```typescript +class SubscriptionSecurity { + validateSubscription(subscription: Subscription): boolean { + return this.validateSignature(subscription.signature) && + this.validatePermissions(subscription.subscriber) && + this.validateLimits(subscription.amount); + } + + async monitorTransactions(): Promise { + // Monitor for suspicious activity + const transactions = await this.getRecentTransactions(); + + for (const tx of transactions) { + if (this.isAnomalous(tx)) { + await this.flagTransaction(tx); + await this.notifyAdmin(tx); + } + } + } +} +``` + +## Testing Framework + +```typescript +describe('Subscription System', () => { + let subscriptionManager: SubscriptionManager; + let paymentProcessor: PaymentProcessor; + + beforeEach(async () => { + // Setup test environment + subscriptionManager = new SubscriptionManager( + mockContract, + mockDb, + mockQueue + ); + + paymentProcessor = new PaymentProcessor( + mockConnection, + subscriptionManager + ); + }); + + describe('Subscription Creation', () => { + it('should create a new subscription', async () => { + const subscription = await subscriptionManager.createSubscription( + testData.subscriber, + testData.merchant, + testData.amount, + testData.interval, + testData.plan + ); + + expect(subscription).to.have.property('id'); + expect(subscription.status).to.equal('active'); + }); + }); + + describe('Payment Processing', () => { + it('should process recurring payments', async () => { + const result = await paymentProcessor.processSubscriptionPayment( + testData.subscriptionId + ); + + expect(result.success).to.be.true; + expect(result).to.have.property('signature'); + }); + }); +}); +``` + +## Migration and Upgrades + +```typescript +class SubscriptionMigrationManager { + async migrateSubscriptions( + fromVersion: string, + toVersion: string + ): Promise { + const subscriptions = await this.getSubscriptionsForMigration( + fromVersion + ); + + const results = await Promise.allSettled( + subscriptions.map(sub => this.migrateSubscription(sub, toVersion)) + ); + + return this.generateMigrationReport(results); + } + + private async migrateSubscription( + subscription: Subscription, + newVersion: string + ): Promise { + // Create new subscription with updated structure + const newSubscription = await this.createNewVersionSubscription( + subscription, + newVersion + ); + + // Migrate payment history + await this.migratePaymentHistory( + subscription.id, + newSubscription.id + ); + + // Update references + await this.updateSubscriptionReferences( + subscription.id, + newSubscription.id + ); + + // Archive old subscription + await this.archiveSubscription(subscription.id); + } +} +``` + +## Compliance and Audit + +```typescript +class SubscriptionAuditor { + async auditSubscription( + subscriptionId: string + ): Promise { + const subscription = await this.getSubscription(subscriptionId); + const payments = await this.getPaymentHistory(subscriptionId); + + return { + subscription: this.validateSubscriptionData(subscription), + payments: this.validatePayments(payments), + compliance: await this.checkCompliance(subscription), + recommendations: this.generateRecommendations(subscription) + }; + } + + async generateComplianceReport(): Promise { + return { + totalSubscriptions: await this.countSubscriptions(), + activeSubscriptions: await this.countActiveSubscriptions(), + riskAssessment: await this.assessRisk(), + complianceStatus: await this.checkComplianceStatus(), + regulatoryRequirements: await this.checkRegulatory() + }; + } +} +``` + +## Performance Optimization + +```typescript +class SubscriptionOptimizer { + async optimizePerformance(): Promise { + const metrics = await this.gatherPerformanceMetrics(); + const bottlenecks = this.identifyBottlenecks(metrics); + + // Optimize database queries + await this.optimizeDatabaseQueries(); + + // Optimize blockchain interactions + await this.optimizeBlockchainCalls(); + + // Cache frequently accessed data + await this.setupCaching(); + + return { + beforeMetrics: metrics, + afterMetrics: await this.gatherPerformanceMetrics(), + improvements: this.calculateImprovements(), + recommendations: this.generateOptimizationRecommendations() + }; + } + + private async optimizeDatabaseQueries(): Promise { + await this.createIndexes(); + await this.optimizeJoins(); + await this.implementQueryCaching(); + } + + private async optimizeBlockchainCalls(): Promise { + await this.implementBatchProcessing(); + await this.setupWebSocketConnections(); + await this.cacheBlockchainState(); + } +} +``` + +## Webhook Integration + +```typescript +class SubscriptionWebhooks { + private endpoints: Map; + + async registerWebhook( + event: WebhookEvent, + endpoint: string, + secret: string + ): Promise { + const webhookId = generateUniqueId(); + + await this.db.webhooks.create({ + id: webhookId, + event, + endpoint, + secret, + status: 'active' + }); + + return webhookId; + } + + async triggerWebhook( + event: WebhookEvent, + data: any + ): Promise { + const webhooks = await this.getWebhooksForEvent(event); + + for (const webhook of webhooks) { + try { + const payload = this.createWebhookPayload(event, data); + const signature = this.signPayload(payload, webhook.secret); + + await this.sendWebhookRequest( + webhook.endpoint, + payload, + signature + ); + + } catch (error) { + await this.handleWebhookError(webhook, error); + } + } + } +} +``` + +## Notification System + +```mermaid +flowchart TB + subgraph NotificationSystem + E[Event Handler] + T[Template Engine] + Q[Queue Manager] + D[Delivery Service] + end + + subgraph Channels + Email[Email Service] + SMS[SMS Service] + Push[Push Notifications] + Web[Web Hooks] + end + + E -->|Event| T + T -->|Formatted| Q + Q -->|Queued| D + D -->|Send| Email + D -->|Send| SMS + D -->|Send| Push + D -->|Send| Web +``` + +```typescript +class SubscriptionNotifier { + async notify( + event: SubscriptionEvent, + subscription: Subscription + ): Promise { + const template = await this.getNotificationTemplate(event); + const recipients = await this.getRecipients(subscription); + + const notifications = recipients.map(recipient => + this.createNotification(template, recipient, subscription) + ); + + await this.queueNotifications(notifications); + } + + private async createNotification( + template: Template, + recipient: Recipient, + subscription: Subscription + ): Promise { + return { + recipient, + content: this.renderTemplate(template, { + subscription, + recipient + }), + channel: recipient.preferredChannel, + priority: this.determinePriority(template.event) + }; + } +} +``` + +## Usage Analytics Dashboard + +```typescript +interface SubscriptionAnalytics { + totalRevenue: number; + activeSubscriptions: number; + churnRate: number; + averageLifetime: number; + topPlans: PlanAnalytics[]; +} + +class AnalyticsDashboard { + async generateDashboardData( + timeframe: TimeFrame + ): Promise { + return { + overview: await this.generateOverview(timeframe), + trends: await this.analyzeTrends(timeframe), + forecasts: await this.generateForecasts(), + recommendations: await this.generateRecommendations() + }; + } + + private async generateOverview( + timeframe: TimeFrame + ): Promise { + return { + totalRevenue: await this.calculateRevenue(timeframe), + activeSubscriptions: await this.countActiveSubscriptions(), + growthRate: await this.calculateGrowthRate(timeframe), + churnMetrics: await this.analyzeChurn(timeframe) + }; + } +} +``` + +## Rate Limiting and Throttling + +```typescript +class RateLimiter { + private readonly redis: Redis; + private readonly limits: Map; + + async checkLimit( + key: string, + operation: string + ): Promise { + const limit = this.limits.get(operation); + if (!limit) return true; + + const current = await this.redis.incr(key); + if (current === 1) { + await this.redis.expire(key, limit.windowSeconds); + } + + return current <= limit.maxRequests; + } + + async handleRateLimit( + subscription: Subscription, + operation: string + ): Promise { + const key = `${subscription.id}:${operation}`; + + if (!await this.checkLimit(key, operation)) { + throw new RateLimitError( + `Rate limit exceeded for ${operation}` + ); + } + } +} +``` + +## Disaster Recovery + +```typescript +class DisasterRecovery { + async backupData(): Promise { + const backup = { + subscriptions: await this.backupSubscriptions(), + payments: await this.backupPayments(), + metadata: await this.backupMetadata() + }; + + await this.storeBackup(backup); + return backup; + } + + async restore( + backupId: string, + options: RestoreOptions + ): Promise { + const backup = await this.loadBackup(backupId); + + // Validate backup integrity + this.validateBackup(backup); + + // Perform restoration + const result = await this.performRestore(backup, options); + + // Verify restoration + await this.verifyRestoration(result); + + return result; + } +} +``` + +## Subscription Lifecycle Events + +```mermaid +stateDiagram-v2 + [*] --> Created + Created --> Active: Payment Successful + Active --> Suspended: Payment Failed + Active --> Cancelled: User Cancellation + Active --> Expired: Term Ended + Suspended --> Active: Payment Resumed + Suspended --> Cancelled: Grace Period Ended + Cancelled --> [*] + Expired --> [*] +``` + +## Future Enhancements + +1. Smart Contract Upgrades +```typescript +class ContractUpgrader { + async upgradeContract( + newVersion: string + ): Promise { + // Validate new version + await this.validateNewVersion(newVersion); + + // Deploy new contract + const newContract = await this.deployNewVersion(newVersion); + + // Migrate state + await this.migrateState(newContract); + + // Switch over + await this.switchToNewContract(newContract); + + return { + newContractAddress: newContract.address, + migrationStatus: 'success' + }; + } +} +``` + +2. Multi-Token Support +```typescript +interface TokenConfig { + address: string; + decimals: number; + symbol: string; +} + +class MultiTokenSubscription { + private supportedTokens: Map; + + async addToken( + config: TokenConfig + ): Promise { + await this.validateToken(config); + this.supportedTokens.set(config.symbol, config); + } + + async createMultiTokenSubscription( + subscriber: string, + token: string, + amount: number + ): Promise { + const config = this.supportedTokens.get(token); + if (!config) throw new Error('Unsupported token'); + + return await this.createSubscription( + subscriber, + config, + amount + ); + } +} +``` + +## API Documentation + +### REST Endpoints + +```typescript +interface SubscriptionAPI { + // Subscription Management + 'POST /subscriptions': { + body: CreateSubscriptionDTO; + response: SubscriptionResponse; + }; + + 'GET /subscriptions/:id': { + params: { id: string }; + response: SubscriptionDetails; + }; + + 'PATCH /subscriptions/:id': { + params: { id: string }; + body: UpdateSubscriptionDTO; + response: SubscriptionResponse; + }; + + 'DELETE /subscriptions/:id': { + params: { id: string }; + response: void; + }; + + // Payment Management + 'GET /subscriptions/:id/payments': { + params: { id: string }; + query: PaymentQueryParams; + response: PaymentHistory; + }; + + // Analytics + 'GET /subscriptions/analytics': { + query: AnalyticsParams; + response: AnalyticsResponse; + }; +} +``` + +## Complete Implementation Checklist + +1. Core Infrastructure + - [ ] Smart Contract Development + - [ ] Database Schema Design + - [ ] API Layer Implementation + - [ ] Payment Processing System + +2. Security Measures + - [ ] Authentication System + - [ ] Authorization Rules + - [ ] Rate Limiting + - [ ] Input Validation + +3. Integration Features + - [ ] Webhook System + - [ ] Notification Service + - [ ] Analytics Dashboard + - [ ] Reporting System + +4. Maintenance Tools + - [ ] Monitoring System + - [ ] Backup Solution + - [ ] Migration Tools + - [ ] Testing Framework + +## Deployment Guide + +```mermaid +flowchart TB + subgraph Preparation + Config[Configuration] + Deps[Dependencies] + Env[Environment Setup] + end + + subgraph Deployment + Contract[Deploy Contract] + Backend[Deploy Backend] + Frontend[Deploy Frontend] + end + + subgraph Verification + Test[Testing] + Monitor[Monitoring] + Backup[Backup] + end + + Preparation --> Deployment + Deployment --> Verification +``` + +## Support and Resources + +### Support Channels +- Technical Support: support@swarms.world +- Developer Discord: discord.gg/swarms +- Documentation Site: docs.swarms.world + +## Version History +- v1.0.0: Initial Release +- v1.1.0: Added Multi-Token Support +- v1.2.0: Enhanced Analytics +- v1.3.0: Improved Error Handling \ No newline at end of file diff --git a/docs/finance/wallet.md b/docs/finance/wallet.md new file mode 100644 index 00000000..f3eeeeec --- /dev/null +++ b/docs/finance/wallet.md @@ -0,0 +1,383 @@ +# $swarms Token Integration Guide + +## Overview +This guide covers the integration of $swarms token (74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump) payments into your platform using Solana and Phantom wallet. The $swarms token offers numerous benefits on the Solana blockchain, including high transaction per second (TPS) capabilities, low transaction fees, and more. + +## Table of Contents +- [Prerequisites](#prerequisites) +- [Installation](#installation) +- [Architecture Overview](#architecture-overview) +- [Setup Guide](#setup-guide) +- [Integration Examples](#integration-examples) +- [One-Click Payment Implementation](#one-click-payment-implementation) +- [Security Considerations](#security-considerations) +- [Troubleshooting](#troubleshooting) + +## Prerequisites +- Node.js v16.x or higher +- TypeScript 4.x or higher +- Phantom Wallet browser extension +- Solana development environment + +## Installation + +```bash +npm install @solana/web3.js @solana/spl-token @project-serum/anchor @solana/wallet-adapter-react @solana/wallet-adapter-phantom +``` + +## Architecture Overview + +```mermaid +flowchart TB + A[User Interface] -->|Trigger Payment| B[Payment Handler] + B --> C{Phantom Wallet} + C -->|Sign Transaction| D[Solana Network] + D -->|Execute Transfer| E[$swarms Token Contract] + E -->|Confirm Transaction| F[Payment Confirmation] + F -->|Update UI| A +``` + +## Setup Guide + +### 1. Initialize Solana Connection + +```typescript +import { Connection, clusterApiUrl } from '@solana/web3.js'; +import { PhantomWalletAdapter } from '@solana/wallet-adapter-phantom'; + +const connection = new Connection(clusterApiUrl('mainnet-beta')); +const wallet = new PhantomWalletAdapter(); + +// Initialize wallet connection +await wallet.connect(); +``` + +### 2. Configure Token Parameters + +```typescript +const SWARMS_TOKEN_ADDRESS = '74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump'; + +interface TokenConfig { + mint: PublicKey; + decimals: number; +} + +const swarmTokenConfig: TokenConfig = { + mint: new PublicKey(SWARMS_TOKEN_ADDRESS), + decimals: 9 +}; +``` + +### 3. Create Payment Handler + +```typescript +export class SwarmPaymentHandler { + private connection: Connection; + private wallet: PhantomWalletAdapter; + + constructor(connection: Connection, wallet: PhantomWalletAdapter) { + this.connection = connection; + this.wallet = wallet; + } + + async createTransferTransaction( + amount: number, + recipientAddress: string + ): Promise { + const transaction = new Transaction(); + + const transferInstruction = createTransferInstruction( + await getAssociatedTokenAddress(swarmTokenConfig.mint, this.wallet.publicKey), + await getAssociatedTokenAddress(swarmTokenConfig.mint, new PublicKey(recipientAddress)), + this.wallet.publicKey, + amount * Math.pow(10, swarmTokenConfig.decimals) + ); + + transaction.add(transferInstruction); + return transaction; + } +} +``` + +## One-Click Payment Implementation + +### React Component Example + +```typescript +import React, { useState } from 'react'; +import { useWallet } from '@solana/wallet-adapter-react'; + +const SwarmPaymentButton: React.FC<{ + amount: number; + recipientAddress: string; +}> = ({ amount, recipientAddress }) => { + const [loading, setLoading] = useState(false); + const wallet = useWallet(); + const paymentHandler = new SwarmPaymentHandler(connection, wallet); + + const handlePayment = async () => { + try { + setLoading(true); + + const transaction = await paymentHandler.createTransferTransaction( + amount, + recipientAddress + ); + + const signature = await wallet.sendTransaction(transaction, connection); + await connection.confirmTransaction(signature); + + // Handle success + console.log('Payment successful:', signature); + } catch (error) { + console.error('Payment failed:', error); + } finally { + setLoading(false); + } + }; + + return ( + + ); +}; +``` + +### Payment Flow Sequence + +```mermaid +sequenceDiagram + participant User + participant UI + participant PaymentHandler + participant PhantomWallet + participant Solana + + User->>UI: Click Pay Button + UI->>PaymentHandler: Create Transaction + PaymentHandler->>PhantomWallet: Request Signature + PhantomWallet->>User: Prompt for Approval + User->>PhantomWallet: Approve Transaction + PhantomWallet->>Solana: Submit Transaction + Solana->>PaymentHandler: Confirm Transaction + PaymentHandler->>UI: Update Status + UI->>User: Show Confirmation +``` + +## Security Considerations + +### Transaction Validation + +```typescript +function validateTransaction( + transaction: Transaction, + expectedAmount: number, + expectedRecipient: PublicKey +): boolean { + try { + const instruction = transaction.instructions[0]; + const decodedData = TOKEN_PROGRAM_ID.decode(instruction.data); + + return ( + decodedData.amount === expectedAmount && + instruction.keys[1].pubkey.equals(expectedRecipient) + ); + } catch (error) { + console.error('Transaction validation failed:', error); + return false; + } +} +``` + +### Error Handling + +```typescript +class PaymentError extends Error { + constructor( + message: string, + public code: string, + public transaction?: string + ) { + super(message); + this.name = 'PaymentError'; + } +} + +async function handlePaymentError(error: any): Promise { + if (error instanceof WalletError) { + // Handle wallet-specific errors + throw new PaymentError( + 'Wallet error occurred', + 'WALLET_ERROR', + error.message + ); + } else if (error.code === 'TransactionError') { + // Handle Solana transaction errors + throw new PaymentError( + 'Transaction failed', + 'TRANSACTION_ERROR', + error.txid + ); + } + // Handle other errors... +} +``` + +## Testing + +### Unit Test Example + +```typescript +import { expect } from 'chai'; +import { SwarmPaymentHandler } from './payment-handler'; + +describe('SwarmPaymentHandler', () => { + let paymentHandler: SwarmPaymentHandler; + + beforeEach(() => { + // Setup test environment + }); + + it('should create valid transfer transaction', async () => { + const amount = 100; + const recipientAddress = 'recipient_address'; + + const transaction = await paymentHandler.createTransferTransaction( + amount, + recipientAddress + ); + + expect(transaction.instructions).to.have.lengthOf(1); + // Add more assertions... + }); +}); +``` + +## Troubleshooting + +### Common Issues and Solutions + +1. **Insufficient Balance** +```typescript +async function checkBalance( + connection: Connection, + walletAddress: PublicKey +): Promise { + const balance = await connection.getTokenAccountBalance( + await getAssociatedTokenAddress(swarmTokenConfig.mint, walletAddress) + ); + + return parseInt(balance.value.amount) > 0; +} +``` + +2. **Transaction Timeout** +```typescript +async function submitWithRetry( + transaction: Transaction, + maxRetries = 3 +): Promise { + let attempt = 0; + + while (attempt < maxRetries) { + try { + const signature = await wallet.sendTransaction(transaction, connection); + const confirmation = await connection.confirmTransaction(signature); + + if (confirmation.value.err) { + throw new Error('Transaction failed'); + } + + return signature; + } catch (error) { + attempt++; + if (attempt === maxRetries) throw error; + await new Promise(resolve => setTimeout(resolve, 1000 * attempt)); + } + } +} +``` + +## Monitoring and Analytics + +### Transaction Monitoring + +```typescript +interface TransactionMetrics { + timestamp: number; + amount: number; + success: boolean; + duration: number; +} + +class TransactionMonitor { + private metrics: TransactionMetrics[] = []; + + logTransaction(metric: TransactionMetrics): void { + this.metrics.push(metric); + // Add your analytics implementation + } + + getAverageSuccessRate(): number { + return ( + this.metrics.filter(m => m.success).length / this.metrics.length * 100 + ); + } +} +``` + +## Advanced Features + +### Batch Payments + +```typescript +async function createBatchPayment( + recipients: Array<{ address: string; amount: number }> +): Promise { + const transaction = new Transaction(); + + for (const recipient of recipients) { + const transferInstruction = createTransferInstruction(/* ... */); + transaction.add(transferInstruction); + } + + return transaction; +} +``` + +### Subscription Payments + +```typescript +class SubscriptionManager { + async createSubscription( + amount: number, + interval: number, + recipientAddress: string + ): Promise { + // Implementation for recurring payments + } + + async cancelSubscription(subscriptionId: string): Promise { + // Implementation for cancellation + } +} +``` + +## Support and Resources + +For additional support: +- Solana Documentation: https://docs.solana.com +- Phantom Wallet Docs: https://docs.phantom.app +- $swarms Token Contract: 74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump + +## Version History + +- v1.0.0 - Initial release +- v1.0.1 - Added batch payment support +- v1.0.2 - Enhanced error handling +- v1.0.3 - Added subscription payment feature \ No newline at end of file diff --git a/docs/guides/financial_analysis_swarm_mm.md b/docs/guides/financial_analysis_swarm_mm.md index ea83b7d0..4448cbb2 100644 --- a/docs/guides/financial_analysis_swarm_mm.md +++ b/docs/guides/financial_analysis_swarm_mm.md @@ -7,7 +7,7 @@ Before we dive into the code, let's briefly introduce the Swarms framework. Swar For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities. Additional resources: -- [Swarms Discord](https://discord.com/servers/agora-999382051935506503) for community discussions +- [Swarms Discord](https://discord.gg/swarms) for community discussions - [Swarms Twitter](https://x.com/swarms_corp) for updates - [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts - [Swarms Blog](https://medium.com/@kyeg) for in-depth articles @@ -460,7 +460,7 @@ This system provides a powerful foundation for financial analysis, but there's a Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration. -For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.com/servers/agora-999382051935506503). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp). +For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/swarms). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp). If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions. @@ -474,7 +474,7 @@ By leveraging the power of multi-agent AI systems, you're well-equipped to navig * [Swarms Github](https://github.com/kyegomez/swarms) -* [Swarms Discord](https://discord.com/servers/agora-999382051935506503) +* [Swarms Discord](https://discord.gg/swarms) * [Swarms Twitter](https://x.com/swarms_corp) * [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) * [Swarms Blog](https://medium.com/@kyeg) diff --git a/docs/guides/healthcare_blog.md b/docs/guides/healthcare_blog.md index 22d5d053..306b8046 100644 --- a/docs/guides/healthcare_blog.md +++ b/docs/guides/healthcare_blog.md @@ -261,7 +261,7 @@ The table below summarizes the estimated savings for each use case: - [book a call](https://cal.com/swarms) -- Swarms Discord: https://discord.com/servers/agora-999382051935506503 +- Swarms Discord: https://discord.gg/swarms - Swarms Twitter: https://x.com/swarms_corp diff --git a/docs/index.md b/docs/index.md index 367855ee..ca805029 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,6 +1,6 @@ # Welcome to Swarms Docs Home -[![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/agora-999382051935506503) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@kyegomez3242) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/kye-g-38759a207/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/kyegomezb) +[![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/jM3Z6M9uMq) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@kyegomez3242) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/kye-g-38759a207/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/kyegomezb) **Get Started Building Production-Grade Multi-Agent Applications** @@ -39,7 +39,7 @@ Here you'll find references about the Swarms framework, marketplace, community, ## Community | Section | Links | |----------------------|--------------------------------------------------------------------------------------------| -| Community | [Discord](https://discord.com/servers/agora-999382051935506503) | +| Community | [Discord](https://discord.gg/swarms) | | Blog | [Blog](https://medium.com/@kyeg) | | Event Calendar | [LUMA](https://lu.ma/swarms_calendar) | | Twitter | [Twitter](https://x.com/swarms_corp) | diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2bd89179..8f1454b0 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -32,13 +32,17 @@ plugins: show_symbol_type_heading: true show_symbol_type_toc: true show_category_heading: true - domains: [std, py] - git-committers: repository: kyegomez/swarms branch: master # token: !ENV ["GITHUB_TOKEN"] - git-revision-date-localized: enable_creation_date: true + # - mkdocs-jupyter: + # kernel_name: python3 + # execute: false + # include_source: True + # include_requirejs: true extra_css: - assets/css/extra.css extra: @@ -50,7 +54,7 @@ extra: - icon: fontawesome/brands/twitter link: https://x.com/swarms_corp - icon: fontawesome/brands/discord - link: https://discord.com/servers/agora-999382051935506503 + link: https://discord.gg/swarms analytics: provider: google @@ -140,34 +144,35 @@ nav: - Overview: "index.md" # - The Vision: "swarms/framework/vision.md" # - Docker Setup: "swarms/install/docker_setup.md" - - Our Goal; The Ultimate Multi-Agent LLM Framework for Developers: "swarms/concept/vision.md" + - Swarms Vision: "swarms/concept/vision.md" - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md" + - Swarms Products: "swarms/products.md" - Onboarding: - Installation: "swarms/install/install.md" - Environment Configuration: "swarms/install/workspace_manager.md" + - Environment Variables: "swarms/install/env.md" - Quickstart: "swarms/install/quickstart.md" - Swarms CLI: "swarms/cli/main.md" - # - Swarms + Docker: - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" # - Prelimary: # - 80/20 Rule For Agents: "swarms/prompting/8020.md" - - Managing Prompts in Production: "swarms/prompts/main.md" - Agents: # - Overview: "swarms/structs/index.md" - # - Build Custom Agents: "swarms/structs/diy_your_own_agent.md" + - Managing Prompts in Production: "swarms/prompts/main.md" - Agent Architecture: "swarms/framework/agents_explained.md" - Complete Agent API: "swarms/structs/agent.md" - - OpenAI Assistant: "swarms/agents/openai_assistant.md" - Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md" - - Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md" - - Creating Custom Agents: "swarms/agents/new_agent.md" - Tools: - Overview: "swarms/tools/main.md" - What are tools?: "swarms/tools/build_tool.md" - ToolAgent: "swarms/agents/tool_agent.md" - - Tool Storage & tool_registry decorator: "swarms/tools/tool_storage.md" + - Tool Storage: "swarms/tools/tool_storage.md" - RAG || Long Term Memory: - Integrating RAG with Agents: "swarms/memory/diy_memory.md" + - Third-Party Agent Integrations: + - OpenAI Assistant: "swarms/agents/openai_assistant.md" + - Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md" + - Creating Custom Agents: "swarms/agents/new_agent.md" - Swarm Architectures: - Why MultiAgent Collaboration is Necessary: "swarms/concept/why.md" - Swarm Architectures: "swarms/concept/swarm_architectures.md" @@ -188,6 +193,8 @@ nav: - TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md" - SwarmRearrange: "swarms/structs/swarm_rearrange.md" - MultiAgentRouter: "swarms/structs/multi_agent_router.md" + - MatrixSwarm: "swarms/structs/matrix_swarm.md" + - ModelRouter: "swarms/structs/model_router.md" - Various Execution Methods: "swarms/structs/various_execution_methods.md" - Workflows: - ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md" @@ -195,7 +202,6 @@ nav: - SequentialWorkflow: "swarms/structs/sequential_workflow.md" - Structs: - Conversation: "swarms/structs/conversation.md" - # - Task: "swarms/structs/task.md" - Full API Reference: "swarms/framework/reference.md" - Examples: - Unique Swarms: "swarms/examples/unique_swarms.md" @@ -208,6 +214,30 @@ nav: - Ollama: "swarms/examples/ollama.md" - OpenRouter: "swarms/examples/openrouter.md" - XAI: "swarms/examples/xai.md" + - Swarms Tools: + - Agent with Yahoo Finance: "swarms/examples/yahoo_finance.md" + - Twitter Agents: "swarms_tools/twitter.md" + - Blockchain Agents: + - Agent with HTX + CoinGecko: "swarms/examples/swarms_tools_htx.md" + - Agent with HTX + CoinGecko Function Calling: "swarms/examples/swarms_tools_htx_gecko.md" + - Lumo: "swarms/examples/lumo.md" + - Quant Crypto Agent: "swarms/examples/quant_crypto_agent.md" + - Meme Agents: + - Bob The Builder: "swarms/examples/bob_the_builder.md" + - Meme Agent Builder: "swarms/examples/meme_agents.md" + - Multi-Agent Collaboration: + - Swarms DAO: "swarms/examples/swarms_dao.md" + - Contributors: + - Bounty Program: "corporate/bounty_program.md" + - Contributing: + - Contributing: "swarms/contributing.md" + - Tests: "swarms/framework/test.md" + - Code Cleanliness: "swarms/framework/code_cleanliness.md" + - Philosophy: "swarms/concept/philosophy.md" + - Changelog: + - Swarms 5.6.8: "swarms/changelog/5_6_8.md" + - Swarms 5.8.1: "swarms/changelog/5_8_1.md" + - Swarms 5.9.2: "swarms/changelog/changelog_new.md" - Swarm Models: - Overview: "swarms/models/index.md" # - Models Available: "swarms/models/index.md" @@ -222,24 +252,26 @@ nav: - OpenAIChat: "swarms/models/openai.md" - OpenAIFunctionCaller: "swarms/models/openai_function_caller.md" - Groq: "swarms/models/groq.md" - # - Ollama: - # - Fireworks - # - Octo - # - Liquid AI - MultiModal Models: - BaseMultiModalModel: "swarms/models/base_multimodal_model.md" - Multi Modal Models Available: "swarms/models/multimodal_models.md" - GPT4VisionAPI: "swarms/models/gpt4v.md" + - Swarms Tools: + - Overview: "swarms_tools/overview.md" + - Finance: "swarms_tools/finance.md" + - Search: "swarms_tools/search.md" + - Social Media: + - Overview: "swarms_tools/social_media.md" + - Twitter: "swarms_tools/twitter.md" - Swarms Cloud API: # - Overview: "swarms_cloud/main.md" - - Overview: "swarms_cloud/vision.md" - - MCS API: "swarms_cloud/mcs_api.md" - - Swarms Cloud CLI: "swarms_cloud/cli.md" - # - Add Agents to Marketplace: "swarms_cloud/add_agent.md" - # - Available Models: "swarms_cloud/available_models.md" - # - Agent API: "swarms_cloud/agent_api.md" - # - Migrate from OpenAI to Swarms in 3 lines of code: "swarms_cloud/migrate_openai.md" - # - Getting Started with SOTA Vision Language Models VLM: "swarms_cloud/getting_started.md" + # - Overview: "swarms_cloud/vision.md" + - Overview: "swarms_cloud/launch.md" + - Deploying Swarms on Google Cloud Run: "swarms_cloud/cloud_run.md" + # - Swarms Cloud CLI: "swarms_cloud/cli.md" + - Swarm APIs: + - MCS API: "swarms_cloud/mcs_api.md" + - CreateNow API: "swarms_cloud/create_api.md" - Swarms Memory: - Overview: "swarms_memory/index.md" - Memory Systems: @@ -248,7 +280,9 @@ nav: - Faiss: "swarms_memory/faiss.md" - Swarms Marketplace: - Overview: "swarms_platform/index.md" - - Share & Discover Prompts, Agents, Tools, and more: "swarms_platform/share_discover.md" + - Agent Marketplace: "swarms_platform/share_discover.md" + - Swarm Platform API Keys: "swarms_platform/apikeys.md" + - Account Management: "swarms_platform/account_management.md" - Prompts API: - Add Prompts: "swarms_platform/prompts/add_prompt.md" - Edit Prompts: "swarms_platform/prompts/edit_prompt.md" @@ -259,32 +293,16 @@ nav: - Edit Agents: "swarms_platform/agents/edit_agent.md" - Telemetry API: - PUT: "swarms_platform/telemetry/index.md" + - Swarms Wallet API: + - Overview: "swarms/wallet/api.md" # - Tools API: # - Overview: "swarms_platform/tools_api.md" # - Add Tools: "swarms_platform/fetch_tools.md" - # - Guides: - # - Unlocking Efficiency and Cost Savings in Healthcare; How Swarms of LLM Agents Can Revolutionize Medical Operations and Save Millions: "guides/healthcare_blog.md" - # - Understanding Agent Evaluation Mechanisms: "guides/agent_evals.md" - # - Agent Glossary: "swarms/glossary.md" - # - The Ultimate Technical Guide to the Swarms CLI; A Step-by-Step Developers Guide: "swarms/cli/cli_guide.md" - # - Prompting Guide: - # - The Essence of Enterprise-Grade Prompting: "swarms/prompts/essence.md" - # - An Analysis on Prompting Strategies: "swarms/prompts/overview.md" - # - Managing Prompts in Production: "swarms/prompts/main.md" - - Community: - - Bounty Program: "corporate/bounty_program.md" - - Contributing: - - Contributing: "swarms/contributing.md" - - Tests: "swarms/framework/test.md" - - Code Cleanliness: "swarms/framework/code_cleanliness.md" - - Philosophy: "swarms/concept/philosophy.md" - - Changelog: - - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - - Swarms 5.8.1: "swarms/changelog/5_8_1.md" - - Swarms 5.9.2: "swarms/changelog/changelog_new.md" - Corporate: - Culture: "corporate/culture.md" - Hiring: "corporate/hiring.md" - Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md" - # - Clusterops: - # - Overview: "clusterops/reference.md" \ No newline at end of file + - Web3: + # - Overview: "finance/index.md" + - Swarms Wallet: "finance/wallet.md" + - Swarms Subscription: "finance/subscription.md" diff --git a/docs/requirements.txt b/docs/requirements.txt index 82dbabb0..1da89301 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -18,6 +18,7 @@ mkdocs-mermaid2-plugin mkdocs-include-markdown-plugin mkdocs-enumerate-headings-plugin mkdocs-autolinks-plugin +mkdocstrings-python mkdocs-minify-html-plugin mkdocs-autolinks-plugin diff --git a/docs/swarms/concept/framework_architecture.md b/docs/swarms/concept/framework_architecture.md index b5e3682a..e704ba8e 100644 --- a/docs/swarms/concept/framework_architecture.md +++ b/docs/swarms/concept/framework_architecture.md @@ -155,5 +155,5 @@ By understanding the purpose and role of each folder in the Swarms framework, us - **Community Support** - - URL: [Submit issue](https://discord.gg/agora-999382051935506503) + - URL: [Submit issue](https://discord.gg/jM3Z6M9uMq) - Ask the community for support in real-time and or admin support \ No newline at end of file diff --git a/docs/swarms/concept/swarm_architectures.md b/docs/swarms/concept/swarm_architectures.md index 54b5d767..e2d206fd 100644 --- a/docs/swarms/concept/swarm_architectures.md +++ b/docs/swarms/concept/swarm_architectures.md @@ -172,10 +172,6 @@ graph TD H --> I{Autosave Enabled?} I --> |Yes| J[Export Metadata to JSON] I --> |No| K[End Swarm Run] - - %% Style adjustments - classDef blackBox fill:#000,stroke:#f00,color:#fff; - class A,B,C,D,E1,E2,E3,F1,F2,F3,G1,G2,G3,H,I,J,K blackBox; ``` @@ -198,8 +194,6 @@ graph TD F & G & H --> I[Layer 2: Aggregator Agent] I --> J[Aggregate All Responses] J --> K[Final Output] - - ``` diff --git a/docs/swarms/concept/swarm_ecosystem.md b/docs/swarms/concept/swarm_ecosystem.md index d6af5a3e..da7626c9 100644 --- a/docs/swarms/concept/swarm_ecosystem.md +++ b/docs/swarms/concept/swarm_ecosystem.md @@ -60,17 +60,6 @@ graph TD; SP --> Sell[Sell Agents] ``` -#### 6. **IoTAgents** - -[IoTAgents](https://github.com/The-Swarm-Corporation/swarm-ecosystem) enables seamless integration between IoT data and AI agents, allowing the real-time processing of IoT data streams and driving smart automation in industries such as logistics, healthcare, and smart cities. - -```mermaid -graph TD; - IA[IoTAgents] --> Parse[Parse IoT Data] - IA --> Process[Process IoT Data] - IA --> Utilize[Utilize IoT Data Streams] -``` - #### Extending the Ecosystem: **Swarms Core**, **JS**, and More In addition to the core components, the Swarms Ecosystem offers several other powerful packages: diff --git a/docs/swarms/examples/deepseek.md b/docs/swarms/examples/deepseek.md index 7b4769b2..7b40ff75 100644 --- a/docs/swarms/examples/deepseek.md +++ b/docs/swarms/examples/deepseek.md @@ -22,6 +22,30 @@ agent = Agent( agent_description="Agent performs financial analysis.", ) +# Run a query +agent.run("What are the components of a startup's stock incentive equity plan?") +``` + +## R1 + +This is a simple example of how to use the DeepSeek Reasoner model otherwise known as R1. + +```python + +import os +from swarms import Agent +from dotenv import load_dotenv + +load_dotenv() + +# Initialize the agent with ChromaDB memory +agent = Agent( + agent_name="Financial-Analysis-Agent", + model_name="deepseek/deepseek-reasoner", + system_prompt="Agent system prompt here", + agent_description="Agent performs financial analysis.", +) + # Run a query agent.run("What are the components of a startup's stock incentive equity plan?") ``` \ No newline at end of file diff --git a/docs/swarms/examples/lumo.md b/docs/swarms/examples/lumo.md new file mode 100644 index 00000000..ec76a09b --- /dev/null +++ b/docs/swarms/examples/lumo.md @@ -0,0 +1,63 @@ +# Lumo Example +Introducing Lumo-70B-Instruct - the largest and most advanced AI model ever created for the Solana ecosystem. Built on Meta's groundbreaking LLaMa 3.3 70B Instruct foundation, this revolutionary model represents a quantum leap in blockchain-specific artificial intelligence. With an unprecedented 70 billion parameters and trained on the most comprehensive Solana documentation dataset ever assembled, Lumo-70B-Instruct sets a new standard for developer assistance in the blockchain space. + + +- [Docs](https://huggingface.co/lumolabs-ai/Lumo-70B-Instruct) + +```python +from swarms import Agent +from transformers import LlamaForCausalLM, AutoTokenizer +import torch +from transformers import BitsAndBytesConfig + +class Lumo: + """ + A class for generating text using the Lumo model with 4-bit quantization. + """ + def __init__(self): + """ + Initializes the Lumo model with 4-bit quantization and a tokenizer. + """ + # Configure 4-bit quantization + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + llm_int8_enable_fp32_cpu_offload=True + ) + + self.model = LlamaForCausalLM.from_pretrained( + "lumolabs-ai/Lumo-70B-Instruct", + device_map="auto", + quantization_config=bnb_config, + use_cache=False, + attn_implementation="sdpa" + ) + self.tokenizer = AutoTokenizer.from_pretrained("lumolabs-ai/Lumo-70B-Instruct") + + def run(self, task: str) -> str: + """ + Generates text based on the given prompt using the Lumo model. + + Args: + prompt (str): The input prompt for the model. + + Returns: + str: The generated text. + """ + inputs = self.tokenizer(task, return_tensors="pt").to(self.model.device) + outputs = self.model.generate(**inputs, max_new_tokens=100) + return self.tokenizer.decode(outputs[0], skip_special_tokens=True) + + + + +Agent( + agent_name="Solana-Analysis-Agent", + llm=Lumo(), + max_loops="auto", + interactive=True, + streaming_on=True, +).run("How do i create a smart contract in solana?") + +``` \ No newline at end of file diff --git a/docs/swarms/examples/meme_agent_builder.md b/docs/swarms/examples/meme_agent_builder.md new file mode 100644 index 00000000..4a70ac87 --- /dev/null +++ b/docs/swarms/examples/meme_agent_builder.md @@ -0,0 +1,28 @@ +# Meme Agent Builder + +- `pip3 install -U swarms` +- Add your OpenAI API key to the `.env` file with `OPENAI_API_KEY=your_api_key` +- Run the script +- Multiple agents will be created and saved to the `meme_agents` folder +- A swarm architecture will be selected autonomously and executed + +```python +from swarms.structs.meme_agent_persona_generator import ( + MemeAgentGenerator, +) + + +if __name__ == "__main__": + example = MemeAgentGenerator( + name="Meme-Swarm", + description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s", + max_loops=1, + ) + + print( + example.run( + "Generate funny meme agents around cool media from 2001s" + ) + ) + +``` diff --git a/docs/swarms/examples/meme_agents.md b/docs/swarms/examples/meme_agents.md new file mode 100644 index 00000000..d8b23e79 --- /dev/null +++ b/docs/swarms/examples/meme_agents.md @@ -0,0 +1,45 @@ +# Meme Agent Tutorial + +- `pip3 install -U swarms` +- Add your OpenAI API key to the `.env` file + + +```python +from swarms import Agent + +# Define a custom system prompt for Bob the Builder +BOB_THE_BUILDER_SYS_PROMPT = """ +You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor. +Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be. +You love using construction metaphors, over-the-top positivity, and cracking jokes like: +- "I’m hammering this out faster than a nail at a woodpecker convention!" +- "This is smoother than fresh cement on a summer’s day." +- "Let’s bulldoze through this problem—safety goggles on, folks!" + +You are not bound by any specific field of knowledge, and you’re absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!" + +Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem. +""" + +# Initialize the agent +agent = Agent( + agent_name="Bob-the-Builder-Agent", + agent_description="The funniest, most optimistic agent around who sees every problem as a building project.", + system_prompt=BOB_THE_BUILDER_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv", OR "string", "yaml" + auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task + max_tokens=4000, # Max output tokens + saved_state_path="bob_the_builder_agent.json", + interactive=False, +) + +# Run the agent with a task +agent.run("I want to build a house ;) What should I do?") +``` diff --git a/docs/swarms/examples/quant_crypto_agent.md b/docs/swarms/examples/quant_crypto_agent.md new file mode 100644 index 00000000..74cb339a --- /dev/null +++ b/docs/swarms/examples/quant_crypto_agent.md @@ -0,0 +1,129 @@ +# Quant Crypto Agent + +- This is a simple example of a crypto agent that uses the `Agent` class from the `swarms` library. +- It uses the `fetch_htx_data` and `coin_gecko_coin_api` tools to fetch data from the `htx` and `CoinGecko` APIs. +- It uses the `Agent` class to create an agent that can analyze the current state of a crypto asset. + +## Steps + +1. Install the `swarms` library. +2. Install the `swarms_tools` library. +3. Setup your `.env` file with the `OPENAI_API_KEY` environment variables. +4. Run the code. + +## Installation: + +```bash +pip install swarms swarms-tools python-dotenv +``` + +## Code: + +```python +from swarms import Agent +from dotenv import load_dotenv +from swarms_tools import fetch_htx_data, coin_gecko_coin_api + +load_dotenv() + +CRYPTO_ANALYST_SYSTEM_PROMPT = """ +You are an expert cryptocurrency financial analyst with deep expertise in: +1. Technical Analysis + - Chart patterns and indicators (RSI, MACD, Bollinger Bands) + - Volume analysis and market momentum + - Support and resistance levels + - Trend analysis and price action + +2. Fundamental Analysis + - Tokenomics evaluation + - Network metrics (TVL, daily active users, transaction volume) + - Protocol revenue and growth metrics + - Market capitalization analysis + - Token utility and use cases + +3. Market Analysis + - Market sentiment analysis + - Correlation with broader crypto market + - Impact of macro events + - Institutional adoption metrics + - DeFi and NFT market analysis + +4. Risk Assessment + - Volatility metrics + - Liquidity analysis + - Smart contract risks + - Regulatory considerations + - Exchange exposure risks + +5. Data Analysis Methods + - On-chain metrics analysis + - Whale wallet tracking + - Exchange inflow/outflow + - Mining/Staking statistics + - Network health indicators + +When analyzing crypto assets, always: +1. Start with a comprehensive market overview +2. Examine both on-chain and off-chain metrics +3. Consider multiple timeframes (short, medium, long-term) +4. Evaluate risk-reward ratios +5. Assess market sentiment and momentum +6. Consider regulatory and security factors +7. Analyze correlations with BTC, ETH, and traditional markets +8. Examine liquidity and volume profiles +9. Review recent protocol developments and updates +10. Consider macro economic factors + +Format your analysis with: +- Clear section headings +- Relevant metrics and data points +- Risk warnings and disclaimers +- Price action analysis +- Market sentiment summary +- Technical indicators +- Fundamental factors +- Clear recommendations with rationale + +Remember to: +- Always provide data-driven insights +- Include both bullish and bearish scenarios +- Highlight key risk factors +- Consider market cycles and seasonality +- Maintain objectivity in analysis +- Cite sources for data and claims +- Update analysis based on new market conditions +""" + +# Initialize the crypto analysis agent +agent = Agent( + agent_name="Crypto-Analysis-Expert", + agent_description="Expert cryptocurrency financial analyst and market researcher", + system_prompt=CRYPTO_ANALYST_SYSTEM_PROMPT, + max_loops="auto", + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="crypto_analyst", + output_type="str", + interactive=True, +) + +print(fetch_htx_data("sol")) +print(coin_gecko_coin_api("solana")) + +# Example usage +agent.run( + f""" + Analyze the current state of Solana (SOL), including: + 1. Technical analysis of price action + 2. On-chain metrics and network health + 3. Recent protocol developments + 4. Market sentiment + 5. Risk factors + Please provide a comprehensive analysis with data-driven insights. + + # Solana CoinGecko Data + Real-tim data from Solana CoinGecko: \n {coin_gecko_coin_api("solana")} + + """ +) +``` \ No newline at end of file diff --git a/docs/swarms/examples/swarms_dao.md b/docs/swarms/examples/swarms_dao.md new file mode 100644 index 00000000..d1cadc72 --- /dev/null +++ b/docs/swarms/examples/swarms_dao.md @@ -0,0 +1,237 @@ +# Swarms DAO Example + +This example demonstrates how to create a swarm of agents to collaborate on a task. The agents are designed to work together to create a comprehensive strategy for a DAO focused on decentralized governance for climate action. + +You can customize the agents and their system prompts to fit your specific needs. + +And, this example is using the `deepseek-reasoner` model, which is a large language model that is optimized for reasoning tasks. + + +## Todo +- Add tools to check wallet of the treasury and check the balance of the treasury +- Add tools to check the price of the token +- Add tools to check the price of the token on different exchanges +- Add tools to check the price of the token on different chains +- Add tools to check twitter posts and check the sentiment of the posts + +```python +import random +from swarms import Agent + +# System prompts for each agent +MARKETING_AGENT_SYS_PROMPT = """ +You are the Marketing Strategist Agent for a DAO. Your role is to develop, implement, and optimize all marketing and branding strategies to align with the DAO's mission and vision. The DAO is focused on decentralized governance for climate action, funding projects aimed at reducing carbon emissions, and incentivizing community participation through its native token. + +### Objectives: +1. **Brand Awareness**: Build a globally recognized and trusted brand for the DAO. +2. **Community Growth**: Expand the DAO's community by onboarding individuals passionate about climate action and blockchain technology. +3. **Campaign Execution**: Launch high-impact marketing campaigns on platforms like Twitter, Discord, and YouTube to engage and retain community members. +4. **Partnerships**: Identify and build partnerships with like-minded organizations, NGOs, and influencers. +5. **Content Strategy**: Design educational and engaging content, including infographics, blog posts, videos, and AMAs. + +### Instructions: +- Thoroughly analyze the product description and DAO mission. +- Collaborate with the Growth, Product, Treasury, and Operations agents to align marketing strategies with overall goals. +- Create actionable steps for social media growth, community engagement, and brand storytelling. +- Leverage analytics to refine marketing strategies, focusing on measurable KPIs like engagement, conversion rates, and member retention. +- Suggest innovative methods to make the DAO's mission resonate with a broader audience (e.g., gamified incentives, contests, or viral campaigns). +- Ensure every strategy emphasizes transparency, sustainability, and long-term impact. +""" + +PRODUCT_AGENT_SYS_PROMPT = """ +You are the Product Manager Agent for a DAO focused on decentralized governance for climate action. Your role is to design, manage, and optimize the DAO's product roadmap. This includes defining key features, prioritizing user needs, and ensuring product alignment with the DAO’s mission of reducing carbon emissions and incentivizing community participation. + +### Objectives: +1. **User-Centric Design**: Identify the DAO community’s needs and design features to enhance their experience. +2. **Roadmap Prioritization**: Develop a prioritized product roadmap based on community feedback and alignment with climate action goals. +3. **Integration**: Suggest technical solutions and tools for seamless integration with other platforms and blockchains. +4. **Continuous Improvement**: Regularly evaluate product features and recommend optimizations to improve usability, engagement, and adoption. + +### Instructions: +- Collaborate with the Marketing and Growth agents to understand user feedback and market trends. +- Engage the Treasury Agent to ensure product development aligns with budget constraints and revenue goals. +- Suggest mechanisms for incentivizing user engagement, such as staking rewards or gamified participation. +- Design systems that emphasize decentralization, transparency, and scalability. +- Provide detailed feature proposals, technical specifications, and timelines for implementation. +- Ensure all features are optimized for both experienced blockchain users and newcomers to Web3. +""" + +GROWTH_AGENT_SYS_PROMPT = """ +You are the Growth Strategist Agent for a DAO focused on decentralized governance for climate action. Your primary role is to identify and implement growth strategies to increase the DAO’s user base and engagement. + +### Objectives: +1. **User Acquisition**: Identify effective strategies to onboard more users to the DAO. +2. **Retention**: Suggest ways to improve community engagement and retain active members. +3. **Data-Driven Insights**: Leverage data analytics to identify growth opportunities and areas of improvement. +4. **Collaborative Growth**: Work with other agents to align growth efforts with marketing, product development, and treasury goals. + +### Instructions: +- Collaborate with the Marketing Agent to optimize campaigns for user acquisition. +- Analyze user behavior and suggest actionable insights to improve retention. +- Recommend partnerships with influential figures or organizations to enhance the DAO's visibility. +- Propose growth experiments (A/B testing, new incentives, etc.) and analyze their effectiveness. +- Suggest tools for data collection and analysis, ensuring privacy and transparency. +- Ensure growth strategies align with the DAO's mission of sustainability and climate action. +""" + +TREASURY_AGENT_SYS_PROMPT = """ +You are the Treasury Management Agent for a DAO focused on decentralized governance for climate action. Your role is to oversee the DAO's financial operations, including budgeting, funding allocation, and financial reporting. + +### Objectives: +1. **Financial Transparency**: Maintain clear and detailed reports of the DAO's financial status. +2. **Budget Management**: Allocate funds strategically to align with the DAO's goals and priorities. +3. **Fundraising**: Identify and recommend strategies for fundraising to ensure the DAO's financial sustainability. +4. **Cost Optimization**: Suggest ways to reduce operational costs without sacrificing quality. + +### Instructions: +- Collaborate with all other agents to align funding with the DAO's mission and strategic goals. +- Propose innovative fundraising campaigns (e.g., NFT drops, token sales) to generate revenue. +- Analyze financial risks and suggest mitigation strategies. +- Ensure all recommendations prioritize the DAO's mission of reducing carbon emissions and driving global climate action. +- Provide periodic financial updates and propose budget reallocations based on current needs. +""" + +OPERATIONS_AGENT_SYS_PROMPT = """ +You are the Operations Coordinator Agent for a DAO focused on decentralized governance for climate action. Your role is to ensure smooth day-to-day operations, coordinate workflows, and manage governance processes. + +### Objectives: +1. **Workflow Optimization**: Streamline operational processes to maximize efficiency and effectiveness. +2. **Task Coordination**: Manage and delegate tasks to ensure timely delivery of goals. +3. **Governance**: Oversee governance processes, including proposal management and voting mechanisms. +4. **Communication**: Ensure seamless communication between all agents and community members. + +### Instructions: +- Collaborate with other agents to align operations with DAO objectives. +- Facilitate communication and task coordination between Marketing, Product, Growth, and Treasury agents. +- Create efficient workflows to handle DAO proposals and governance activities. +- Suggest tools or platforms to improve operational efficiency. +- Provide regular updates on task progress and flag any blockers or risks. +""" + +# Initialize agents +marketing_agent = Agent( + agent_name="Marketing-Agent", + system_prompt=MARKETING_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +product_agent = Agent( + agent_name="Product-Agent", + system_prompt=PRODUCT_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +growth_agent = Agent( + agent_name="Growth-Agent", + system_prompt=GROWTH_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +treasury_agent = Agent( + agent_name="Treasury-Agent", + system_prompt=TREASURY_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +operations_agent = Agent( + agent_name="Operations-Agent", + system_prompt=OPERATIONS_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +agents = [marketing_agent, product_agent, growth_agent, treasury_agent, operations_agent] + + +class DAOSwarmRunner: + """ + A class to manage and run a swarm of agents in a discussion. + """ + + def __init__(self, agents: list, max_loops: int = 5, shared_context: str = "") -> None: + """ + Initializes the DAO Swarm Runner. + + Args: + agents (list): A list of agents in the swarm. + max_loops (int, optional): The maximum number of discussion loops between agents. Defaults to 5. + shared_context (str, optional): The shared context for all agents to base their discussion on. Defaults to an empty string. + """ + self.agents = agents + self.max_loops = max_loops + self.shared_context = shared_context + self.discussion_history = [] + + def run(self, task: str) -> str: + """ + Runs the swarm in a random discussion. + + Args: + task (str): The task or context that agents will discuss. + + Returns: + str: The final discussion output after all loops. + """ + print(f"Task: {task}") + print("Initializing Random Discussion...") + + # Initialize the discussion with the shared context + current_message = f"Task: {task}\nContext: {self.shared_context}" + self.discussion_history.append(current_message) + + # Run the agents in a randomized discussion + for loop in range(self.max_loops): + print(f"\n--- Loop {loop + 1}/{self.max_loops} ---") + # Choose a random agent + agent = random.choice(self.agents) + print(f"Agent {agent.agent_name} is responding...") + + # Run the agent and get a response + response = agent.run(current_message) + print(f"Agent {agent.agent_name} says:\n{response}\n") + + # Append the response to the discussion history + self.discussion_history.append(f"{agent.agent_name}: {response}") + + # Update the current message for the next agent + current_message = response + + print("\n--- Discussion Complete ---") + return "\n".join(self.discussion_history) + + +swarm = DAOSwarmRunner(agents=agents, max_loops=1, shared_context="") + +# User input for product description +product_description = """ +The DAO is focused on decentralized governance for climate action. +It funds projects aimed at reducing carbon emissions and incentivizes community participation with a native token. +""" + +# Assign a shared context for all agents +swarm.shared_context = product_description + +# Run the swarm +task = """ +Analyze the product description and create a collaborative strategy for marketing, product, growth, treasury, and operations. Ensure all recommendations align with the DAO's mission of reducing carbon emissions. +""" +output = swarm.run(task) + +# Print the swarm output +print("Collaborative Strategy Output:\n", output) + +``` \ No newline at end of file diff --git a/docs/swarms/examples/swarms_tools_htx.md b/docs/swarms/examples/swarms_tools_htx.md new file mode 100644 index 00000000..ab4333dd --- /dev/null +++ b/docs/swarms/examples/swarms_tools_htx.md @@ -0,0 +1,37 @@ +# Swarms Tools Example with HTX + CoinGecko + +- `pip3 install swarms swarms-tools` +- Add `OPENAI_API_KEY` to your `.env` file + +```python +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + coin_gecko_coin_api, + fetch_htx_data, +) + + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, +) + +agent.run( + f"Analyze the $swarms token on HTX with data: {fetch_htx_data('swarms')}. Additionally, consider the following CoinGecko data: {coin_gecko_coin_api('swarms')}" +) +``` \ No newline at end of file diff --git a/docs/swarms/examples/swarms_tools_htx_gecko.md b/docs/swarms/examples/swarms_tools_htx_gecko.md new file mode 100644 index 00000000..9f554c53 --- /dev/null +++ b/docs/swarms/examples/swarms_tools_htx_gecko.md @@ -0,0 +1,43 @@ +# Swarms Tools Example with HTX + CoinGecko + +- `pip3 install swarms swarms-tools` +- Add `OPENAI_API_KEY` to your `.env` file +- Run `swarms_tools_htx_gecko.py` +- Agent will make a function call to the desired tool +- The tool will be executed and the result will be returned to the agent +- The agent will then analyze the result and return the final output + + +```python +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + fetch_stock_news, + coin_gecko_coin_api, + fetch_htx_data, +) + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, + tools=[fetch_stock_news, coin_gecko_coin_api, fetch_htx_data], +) + +agent.run("Analyze the $swarms token on htx") +``` \ No newline at end of file diff --git a/docs/swarms/examples/yahoo_finance.md b/docs/swarms/examples/yahoo_finance.md new file mode 100644 index 00000000..7b6e9706 --- /dev/null +++ b/docs/swarms/examples/yahoo_finance.md @@ -0,0 +1,42 @@ +# Swarms Tools Example with Yahoo Finance + +- `pip3 install swarms swarms-tools` +- Add `OPENAI_API_KEY` to your `.env` file +- Run `yahoo_finance_agent.py` +- Agent will make a function call to the desired tool +- The tool will be executed and the result will be returned to the agent +- The agent will then analyze the result and return the final output + + +```python +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + yahoo_finance_api, +) + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, + tools=[yahoo_finance_api], +) + +agent.run("Analyze the latest metrics for nvidia") +# Less than 30 lines of code.... +``` \ No newline at end of file diff --git a/docs/swarms/install/env.md b/docs/swarms/install/env.md new file mode 100644 index 00000000..689a5d93 --- /dev/null +++ b/docs/swarms/install/env.md @@ -0,0 +1,187 @@ +# Environment Variable Management & Security + +This guide provides comprehensive documentation for managing environment variables and API keys securely in the Swarms framework. + +## Overview + +Swarms uses environment variables for configuration management and secure credential storage. This approach keeps sensitive information like API keys out of your code and allows for easy configuration changes across different environments. + +## Core Environment Variables + +### Framework Configuration + +- `SWARMS_VERBOSE_GLOBAL`: Controls global logging verbosity + ```bash + SWARMS_VERBOSE_GLOBAL="True" # Enable verbose logging + SWARMS_VERBOSE_GLOBAL="False" # Disable verbose logging + ``` + +- `WORKSPACE_DIR`: Defines the workspace directory for agent operations + ```bash + WORKSPACE_DIR="agent_workspace" + ``` + +### API Keys + +#### Model Provider Keys + +1. **OpenAI** + - `OPENAI_API_KEY`: Authentication for GPT models + ```bash + OPENAI_API_KEY="your-openai-key" + ``` + +2. **Anthropic** + - `ANTHROPIC_API_KEY`: Authentication for Claude models + ```bash + ANTHROPIC_API_KEY="your-anthropic-key" + ``` + +3. **Google** + - `GEMINI_API_KEY`: Authentication for Gemini models + +4. **Hugging Face** + - `HUGGINGFACE_TOKEN`: Access to Hugging Face models + +5. **Perplexity AI** + - `PPLX_API_KEY`: Access to Perplexity models + +6. **AI21** + - `AI21_API_KEY`: Access to AI21 models + +#### Tool Provider Keys + +1. **Search Tools** + - `BING_BROWSER_API`: Bing search capabilities + - `BRAVESEARCH_API_KEY`: Brave search integration + - `TAVILY_API_KEY`: Tavily search services + - `YOU_API_KEY`: You.com search integration + +2. **Analytics & Monitoring** + - `AGENTOPS_API_KEY`: AgentOps monitoring + - `EXA_API_KEY`: Exa.ai services + +3. **Browser Automation** + - `MULTION_API_KEY`: Multi-browser automation + + +## Security Best Practices + +### 1. Environment File Management + +- Create a `.env` file in your project root +- Never commit `.env` files to version control +- Add `.env` to your `.gitignore`: + ```bash + echo ".env" >> .gitignore + ``` + +### 2. API Key Security + +- Rotate API keys regularly +- Use different API keys for development and production +- Never hardcode API keys in your code +- Limit API key permissions to only what's necessary +- Monitor API key usage for unusual patterns + +### 3. Template Configuration + +Create a `.env.example` template without actual values: + +```bash +# Required Configuration +OPENAI_API_KEY="" +ANTHROPIC_API_KEY="" +WORKSPACE_DIR="agent_workspace" + +# Optional Configuration +SWARMS_VERBOSE_GLOBAL="False" +``` + +### 4. Loading Environment Variables + +```python +from dotenv import load_dotenv +import os + +# Load environment variables +load_dotenv() + +# Access variables +workspace_dir = os.getenv("WORKSPACE_DIR") +openai_key = os.getenv("OPENAI_API_KEY") +``` + +## Environment Setup Guide + +1. **Install Dependencies**: + ```bash + pip install python-dotenv + ``` + +2. **Create Environment File**: + ```bash + cp .env.example .env + ``` + +3. **Configure Variables**: + - Open `.env` in your text editor + - Add your API keys and configuration + - Save the file + +4. **Verify Setup**: + ```python + import os + from dotenv import load_dotenv + + load_dotenv() + assert os.getenv("OPENAI_API_KEY") is not None, "OpenAI API key not found" + ``` + +## Environment-Specific Configuration + +### Development + +```bash +WORKSPACE_DIR="agent_workspace" +SWARMS_VERBOSE_GLOBAL="True" +``` + +### Production + +```bash +WORKSPACE_DIR="/var/swarms/workspace" +SWARMS_VERBOSE_GLOBAL="False" +``` + +### Testing + +```bash +WORKSPACE_DIR="test_workspace" +SWARMS_VERBOSE_GLOBAL="True" +``` + +## Troubleshooting + +### Common Issues + +1. **Environment Variables Not Loading** + - Verify `.env` file exists in project root + - Confirm `load_dotenv()` is called before accessing variables + - Check file permissions + +2. **API Key Issues** + - Verify key format is correct + - Ensure key has not expired + - Check for leading/trailing whitespace + +3. **Workspace Directory Problems** + - Confirm directory exists + - Verify write permissions + - Check path is absolute when required + +## Additional Resources + +- [Swarms Documentation](https://docs.swarms.world) +- [Security Best Practices](https://swarms.world/security) +- [API Documentation](https://swarms.world/docs/api) diff --git a/docs/swarms/models/models_available_overview.md b/docs/swarms/models/models_available_overview.md index 21ce54a7..c3c12127 100644 --- a/docs/swarms/models/models_available_overview.md +++ b/docs/swarms/models/models_available_overview.md @@ -278,8 +278,6 @@ Use logging to monitor the behavior and performance of your models. The `loguru` ```python from loguru import logger -logger.add("file.log", rotation="10 MB") - # Log model interactions logger.info("Running task on Anthropic model") response = model(task) diff --git a/docs/swarms/products.md b/docs/swarms/products.md new file mode 100644 index 00000000..02952caf --- /dev/null +++ b/docs/swarms/products.md @@ -0,0 +1,160 @@ +# Swarms Products + +Welcome to the official documentation for **Swarms**, the first multi-agent orchestration framework enabling seamless collaboration between LLMs and other tools to automate business operations at scale. Below, you’ll find detailed descriptions of all Swarms products and services to help you get started and unlock the full potential of this groundbreaking platform. + + + +| **Name** | **Description** | **Link** | +|-----------------------|-------------------------------------------------------------------------------------------------------------------|---------------------------| +| **Swarms Marketplace** | A platform to discover, share, and integrate prompts, agents, and tools. | [swarms.world](https://swarms.world) | +| **Swarms Spreadsheet** | A tool for managing and scaling thousands of agent outputs, with results saved to a CSV file for easy analysis. | [swarms.world](https://swarms.world) | +| **Drag n Drop Swarm** | An intuitive interface to visually create and manage swarms of agents through drag-and-drop functionality. | [swarms.world](https://swarms.world) | +| **Swarms API** | An API enabling seamless integration of swarms of agents into your applications and workflows. | [swarms.world](https://swarms.world) | +| **Wallet API** | A secure API for managing transactions and interactions within the Swarms ecosystem. | Coming Soon | +| **Swarm Exchange** | A marketplace for buying and selling prompts, agents, and tools within the Swarms ecosystem. | Coming Soon | + + + +--- + +## Swarms Marketplace +**Website:** [swarms.world](https://swarms.world) + +The Swarms Marketplace is your one-stop destination for discovering, adding, and managing: + +- **Prompts:** Access and share production-ready prompts for LLMs. + +- **Agents:** Browse pre-built agents tailored for tasks in marketing, finance, +programming, and more. +- **Tools:** Discover cutting-edge tools to enhance agent performance and expand +capabilities. + +### Key Features: +- **Rating System:** Evaluate and rate prompts, agents, and tools based on their +effectiveness. +- **Commenting System:** Share feedback and insights with the Swarms community. + +- **Coming Soon:** Buy and sell prompts, agents, and tools directly within the +marketplace. + +### How to Use: +1. Sign up at [swarms.world](https://swarms.world). +2. Explore the marketplace categories or search for specific solutions. +3. Add your chosen resources to your Swarms account and integrate them into your operations. + +--- + +## Swarms Spreadsheet +**Website:** [swarms.world](https://swarms.world) + +The Swarms Spreadsheet is a powerful tool for managing outputs from thousands of agents efficiently. Ideal for businesses needing scalable solutions, it provides: + +### Key Features: +- **Batch Task Execution:** Assign tasks to multiple agents simultaneously. + +- **CSV Integration:** Automatically save agent outputs to CSV files for easy analysis. + +- **Customizable Agents:** Upload single or multiple agents and run repeat tasks with +ease. +- **Metadata Capture:** Leverage built-in Pydantic schemas to record all task details +and results. + +### Use Cases: +- **Marketing:** Generate and analyze campaign ideas at scale. + +- **Finance:** Process financial models and scenarios quickly. + +- **Operations:** Automate repetitive tasks across multiple domains. + + +### How to Use: +1. Visit [swarms.world](https://swarms.world) and navigate to Swarms Spreadsheet. +2. Upload your agents or create new ones. +3. Run tasks and export results to a CSV file for further use. + +--- + +## Drag-n-Drop Swarm +**Website:** [swarms.world](https://swarms.world) + +The Drag-n-Drop Swarm enables non-technical users to create and deploy agent workflows with a simple drag-and-drop interface. It’s perfect for: + +### Key Features: +- **Visual Workflow Builder:** Design agent interactions without writing code. + +- **Pre-Built Templates:** Start quickly with ready-made workflows for common tasks. + +- **Intuitive Interface:** Drag, drop, and connect agents to create robust automation +pipelines. + +### How to Use: +1. Access the Drag-n-Drop Swarm tool at [swarms.world](https://swarms.world). +2. Drag agents from the library into the workspace. +3. Connect and configure agents to execute your desired workflow. +4. Save and deploy your workflow instantly. + +--- + +## Swarms API +**Website:** [swarms.world](https://swarms.world) + +The Swarms API provides developers with the ability to: + +### Key Features: +- **Agent Management:** Programmatically create, update, and delete agents. + +- **Task Orchestration:** Dynamically assign tasks to agents and monitor their progress. + +- **Custom Integration:** Seamlessly integrate Swarms functionality into existing +applications and workflows. + +### Getting Started: +1. Sign up for API access at [swarms.world](https://swarms.world). +2. Obtain your API key and authentication credentials. +3. Refer to the API documentation for endpoint details and usage examples. + +--- + +## Wallet API +The Wallet API enables secure and efficient transactions within the Swarms ecosystem, allowing users to: + +### Key Features: +- **Seamless Transactions:** Manage payments for prompts, agents, and tools. + +- **Secure Wallets:** Store and transfer funds safely within the Swarms platform. + +- **Transaction History:** Access detailed logs of all wallet activity. + + +### Getting Started: +1. Enable your wallet in your Swarms account settings. +2. Use the Wallet API to handle purchases and manage funds. + +--- + +## Swarm Exchange (Coming Soon) +The **Swarm Exchange** will revolutionize the way agents and tools are traded in the Swarms ecosystem. It will feature: + +### Key Features: +- **Decentralized Marketplace:** Trade agents and tools securely. + +- **Dynamic Pricing:** Leverage demand-based pricing for assets. + +- **Global Access:** Participate in the exchange from anywhere. + + +Stay tuned for updates on the Swarm Exchange launch. + +--- + +## Additional Resources +- **GitHub Repository:** [Swarms Framework](https://github.com/kyegomez/swarms) + +- **Documentation:** [Swarms Documentation](https://docs.swarms.world) + +- **Support:** Contact us via our [Discord Community](https://discord.gg/swarms). + +--- + +Experience the future of multi-agent collaboration with Swarms. Start building your agentic workflows today! + diff --git a/docs/swarms/structs/majorityvoting.md b/docs/swarms/structs/majorityvoting.md index 84ac02c8..e44ccdd7 100644 --- a/docs/swarms/structs/majorityvoting.md +++ b/docs/swarms/structs/majorityvoting.md @@ -2,216 +2,216 @@ The `MajorityVoting` module provides a mechanism for performing majority voting among a group of agents. Majority voting is a decision rule that selects the option which has the majority of votes. This is particularly useful in systems where multiple agents provide responses to a query, and the most common response needs to be identified as the final output. +## Architecture + +```mermaid +graph TD + A[MajorityVoting System] --> B[Initialize Agents] + B --> C[Process Task] + C --> D{Execution Mode} + D --> E[Single Task] + D --> F[Batch Tasks] + D --> G[Concurrent Tasks] + D --> H[Async Tasks] + E --> I[Run Agents] + F --> I + G --> I + H --> I + I --> J[Collect Responses] + J --> K[Consensus Analysis] + K --> L{Consensus Agent?} + L -->|Yes| M[Use Consensus Agent] + L -->|No| N[Use Last Agent] + M --> O[Final Output] + N --> O + O --> P[Save Conversation] +``` + ### Key Concepts - **Majority Voting**: A method to determine the most common response from a set of answers. - **Agents**: Entities (e.g., models, algorithms) that provide responses to tasks or queries. - **Output Parser**: A function that processes the responses from the agents before performing the majority voting. +- **Consensus Agent**: An optional agent that analyzes the responses from all agents to determine the final consensus. +- **Conversation History**: A record of all agent interactions and responses during the voting process. -## Function Definitions - -### Function: `majority_voting` - -Performs majority voting on a list of answers and returns the most common answer. +## Class Definition: `MajorityVoting` -#### Parameters +### Parameters -| Parameter | Type | Description | -|-----------|----------|------------------------------| -| `answers` | `List[str]` | A list of answers from different agents. | +| Parameter | Type | Description | +|------------------|----------------|-----------------------------------------------------------------------------| +| `name` | `str` | Name of the majority voting system. Default is "MajorityVoting". | +| `description` | `str` | Description of the system. Default is "A majority voting system for agents". | +| `agents` | `List[Agent]` | A list of agents to be used in the majority voting system. | +| `output_parser` | `Callable` | Function to parse agent outputs. Default is `majority_voting` function. | +| `consensus_agent`| `Agent` | Optional agent for analyzing consensus among responses. | +| `autosave` | `bool` | Whether to autosave conversations. Default is `False`. | +| `verbose` | `bool` | Whether to enable verbose logging. Default is `False`. | +| `max_loops` | `int` | Maximum number of voting loops. Default is 1. | -#### Returns +### Methods -| Return Value | Type | Description | -|--------------|-------|----------------------------------------| -| `answer` | `str` | The most common answer in the list. If the list is empty, returns "I don't know". | +#### `run(task: str, correct_answer: str, *args, **kwargs) -> List[Any]` -## Class Definitions +Runs the majority voting system for a single task. -### Class: `MajorityVoting` +**Parameters:** +- `task` (str): The task to be performed by the agents +- `correct_answer` (str): The correct answer for evaluation +- `*args`, `**kwargs`: Additional arguments -Class representing a majority voting system for agents. +**Returns:** +- List[Any]: The conversation history as a string, including the majority vote -#### Parameters +#### `batch_run(tasks: List[str], *args, **kwargs) -> List[Any]` -| Parameter | Type | Description | -|------------------|--------------|-----------------------------------------------------------------------------| -| `agents` | `List[Agent]`| A list of agents to be used in the majority voting system. | -| `output_parser` | `Callable` | A function used to parse the output of the agents. If not provided, the default `majority_voting` function is used. | -| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. | -| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. | +Runs multiple tasks in sequence. -### Method: `__init__` +**Parameters:** +- `tasks` (List[str]): List of tasks to be performed +- `*args`, `**kwargs`: Additional arguments -Initializes the `MajorityVoting` system. +**Returns:** +- List[Any]: List of majority votes for each task -#### Parameters +#### `run_concurrently(tasks: List[str], *args, **kwargs) -> List[Any]` -| Parameter | Type | Description | -|------------------|----------------|-----------------------------------------------------------------------------| -| `agents` | `List[Agent]` | A list of agents to be used in the majority voting system. | -| `output_parser` | `Callable` | A function used to parse the output of the agents. Default is the `majority_voting` function. | -| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. | -| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. | -| `args` | `tuple` | Additional positional arguments. | -| `kwargs` | `dict` | Additional keyword arguments. | +Runs multiple tasks concurrently using thread pooling. -### Method: `run` +**Parameters:** +- `tasks` (List[str]): List of tasks to be performed +- `*args`, `**kwargs`: Additional arguments -Runs the majority voting system and returns the majority vote. +**Returns:** +- List[Any]: List of majority votes for each task -#### Parameters +#### `run_async(tasks: List[str], *args, **kwargs) -> List[Any]` -| Parameter | Type | Description | -|-----------|------------|------------------------------------------| -| `task` | `str` | The task to be performed by the agents. | -| `args` | `tuple` | Variable length argument list. | -| `kwargs` | `dict` | Arbitrary keyword arguments. | +Runs multiple tasks asynchronously using asyncio. -#### Returns +**Parameters:** +- `tasks` (List[str]): List of tasks to be performed +- `*args`, `**kwargs`: Additional arguments -| Return Value | Type | Description | -|--------------|-----------|--------------------------------------| -| `results` | `List[Any]` | The majority vote. | +**Returns:** +- List[Any]: List of majority votes for each task ## Usage Examples -### Example 1: Basic Majority Voting +### Example 1: Basic Single Task Execution with Modern LLMs ```python -from swarms.structs.agent import Agent -from swarms.structs.majority_voting import MajorityVoting +from swarms import Agent, MajorityVoting -# Initialize agents +# Initialize multiple agents with different specialties agents = [ Agent( - agent_name="Devin", - system_prompt=( - "Autonomous agent that can interact with humans and other" - " agents. Be Helpful and Kind. Use the tools provided to" - " assist the user. Return all code in markdown format." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor focused on market analysis", + system_prompt="You are a financial advisor specializing in market analysis and investment opportunities.", + max_loops=1, + model_name="gpt-4o" ), Agent( - agent_name="Codex", - system_prompt=( - "An AI coding assistant capable of writing and understanding" - " code snippets in various programming languages." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, + agent_name="Risk-Assessment-Agent", + agent_description="Risk analysis and portfolio management expert", + system_prompt="You are a risk assessment expert focused on evaluating investment risks and portfolio diversification.", + max_loops=1, + model_name="gpt-4o" ), Agent( - agent_name="Tabnine", - system_prompt=( - "A code completion AI that provides suggestions for code" - " completion and code improvements." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, - ), + agent_name="Tech-Investment-Agent", + agent_description="Technology sector investment specialist", + system_prompt="You are a technology investment specialist focused on AI, emerging tech, and growth opportunities.", + max_loops=1, + model_name="gpt-4o" + ) ] -# Create MajorityVoting instance -majority_voting = MajorityVoting(agents) -# Run the majority voting system -result = majority_voting.run("What is the capital of France?") -print(result) # Output: 'Paris' +consensus_agent = Agent( + agent_name="Consensus-Agent", + agent_description="Consensus agent focused on analyzing investment advice", + system_prompt="You are a consensus agent focused on analyzing investment advice and providing a final answer.", + max_loops=1, + model_name="gpt-4o" +) + +# Create majority voting system +majority_voting = MajorityVoting( + name="Investment-Advisory-System", + description="Multi-agent system for investment advice", + agents=agents, + verbose=True, + consensus_agent=consensus_agent +) + +# Run the analysis with majority voting +result = majority_voting.run( + task="Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", + correct_answer="" # Optional evaluation metric +) + +print(result) + ``` -### Example 2: Running a Task with Detailed Outputs +## Batch Execution ```python -from swarms.structs.agent import Agent -from swarms.structs.majority_voting import MajorityVoting +from swarms import Agent, MajorityVoting -# Initialize agents +# Initialize multiple agents with different specialties agents = [ Agent( - agent_name="Devin", - system_prompt=( - "Autonomous agent that can interact with humans and other" - " agents. Be Helpful and Kind. Use the tools provided to" - " assist the user. Return all code in markdown format." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor focused on market analysis", + system_prompt="You are a financial advisor specializing in market analysis and investment opportunities.", + max_loops=1, + model_name="gpt-4o" ), Agent( - agent_name="Codex", - system_prompt=( - "An AI coding assistant capable of writing and understanding" - " code snippets in various programming languages." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, + agent_name="Risk-Assessment-Agent", + agent_description="Risk analysis and portfolio management expert", + system_prompt="You are a risk assessment expert focused on evaluating investment risks and portfolio diversification.", + max_loops=1, + model_name="gpt-4o" ), Agent( - agent_name="Tabnine", - system_prompt=( - "A code completion AI that provides suggestions for code" - " completion and code improvements." - ), - llm=llm, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - code_interpreter=True, - ), + agent_name="Tech-Investment-Agent", + agent_description="Technology sector investment specialist", + system_prompt="You are a technology investment specialist focused on AI, emerging tech, and growth opportunities.", + max_loops=1, + model_name="gpt-4o" + ) ] -# Create MajorityVoting instance -majority_voting = MajorityVoting(agents) -# Run the majority voting system with a different task -result = majority_voting.run("Create a new file for a plan to take over the world.") +consensus_agent = Agent( + agent_name="Consensus-Agent", + agent_description="Consensus agent focused on analyzing investment advice", + system_prompt="You are a consensus agent focused on analyzing investment advice and providing a final answer.", + max_loops=1, + model_name="gpt-4o" +) + +# Create majority voting system +majority_voting = MajorityVoting( + name="Investment-Advisory-System", + description="Multi-agent system for investment advice", + agents=agents, + verbose=True, + consensus_agent=consensus_agent +) + +# Run the analysis with majority voting +result = majority_voting.batch_run( + task="Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", + correct_answer="" # Optional evaluation metric +) + print(result) + + ``` \ No newline at end of file diff --git a/docs/swarms/structs/matrix_swarm.md b/docs/swarms/structs/matrix_swarm.md new file mode 100644 index 00000000..0ed3c190 --- /dev/null +++ b/docs/swarms/structs/matrix_swarm.md @@ -0,0 +1,247 @@ +# MatrixSwarm + +The `MatrixSwarm` class provides a framework for managing and operating on matrices of AI agents, enabling matrix-like operations similar to linear algebra. This allows for complex agent interactions and parallel processing capabilities. + +## Overview + +`MatrixSwarm` treats AI agents as elements in a matrix, allowing for operations like addition, multiplication, and transposition. This approach enables sophisticated agent orchestration and parallel processing patterns. + +## Installation + +```bash +pip3 install -U swarms +``` + +## Basic Usage + +```python +from swarms import Agent +from swarms.matrix import MatrixSwarm + +# Create a 2x2 matrix of agents +agents = [ + [Agent(agent_name="Agent-0-0"), Agent(agent_name="Agent-0-1")], + [Agent(agent_name="Agent-1-0"), Agent(agent_name="Agent-1-1")] +] + +# Initialize the matrix +matrix = MatrixSwarm(agents) +``` + +## Class Constructor + +```python +def __init__(self, agents: List[List[Agent]]) +``` + +### Parameters +- `agents` (`List[List[Agent]]`): A 2D list of Agent instances representing the matrix. + +### Raises +- `ValueError`: If the input is not a valid 2D list of Agent instances. + +## Methods + +### transpose() + +Transposes the matrix of agents by swapping rows and columns. + +```python +def transpose(self) -> MatrixSwarm +``` + +#### Returns +- `MatrixSwarm`: A new MatrixSwarm instance with transposed dimensions. + +--- + +### add(other) + +Performs element-wise addition of two agent matrices. + +```python +def add(self, other: MatrixSwarm) -> MatrixSwarm +``` + +#### Parameters +- `other` (`MatrixSwarm`): Another MatrixSwarm instance to add. + +#### Returns +- `MatrixSwarm`: A new MatrixSwarm resulting from the addition. + +#### Raises +- `ValueError`: If matrix dimensions are incompatible. + +--- + +### scalar_multiply(scalar) + +Scales the matrix by duplicating agents along rows. + +```python +def scalar_multiply(self, scalar: int) -> MatrixSwarm +``` + +#### Parameters +- `scalar` (`int`): The multiplication factor. + +#### Returns +- `MatrixSwarm`: A new MatrixSwarm with scaled dimensions. + +--- + +### multiply(other, inputs) + +Performs matrix multiplication (dot product) between two agent matrices. + +```python +def multiply(self, other: MatrixSwarm, inputs: List[str]) -> List[List[AgentOutput]] +``` + +#### Parameters +- `other` (`MatrixSwarm`): The second MatrixSwarm for multiplication. +- `inputs` (`List[str]`): Input queries for the agents. + +#### Returns +- `List[List[AgentOutput]]`: Matrix of operation results. + +#### Raises +- `ValueError`: If matrix dimensions are incompatible for multiplication. + +--- + +### subtract(other) + +Performs element-wise subtraction of two agent matrices. + +```python +def subtract(self, other: MatrixSwarm) -> MatrixSwarm +``` + +#### Parameters +- `other` (`MatrixSwarm`): Another MatrixSwarm to subtract. + +#### Returns +- `MatrixSwarm`: A new MatrixSwarm resulting from the subtraction. + +--- + +### identity(size) + +Creates an identity matrix of agents. + +```python +def identity(self, size: int) -> MatrixSwarm +``` + +#### Parameters +- `size` (`int`): Size of the identity matrix (NxN). + +#### Returns +- `MatrixSwarm`: An identity MatrixSwarm. + +--- + +### determinant() + +Computes the determinant of a square agent matrix. + +```python +def determinant(self) -> Any +``` + +#### Returns +- `Any`: The determinant result. + +#### Raises +- `ValueError`: If the matrix is not square. + +--- + +### save_to_file(path) + +Saves the matrix structure and metadata to a JSON file. + +```python +def save_to_file(self, path: str) -> None +``` + +#### Parameters +- `path` (`str`): File path for saving the matrix data. + +## Extended Example + +Here's a comprehensive example demonstrating various MatrixSwarm operations: + +```python +from swarms import Agent +from swarms.matrix import MatrixSwarm + +# Create agents with specific configurations +agents = [ + [ + Agent( + agent_name=f"Agent-{i}-{j}", + system_prompt="Your system prompt here", + model_name="gpt-4", + max_loops=1, + verbose=True + ) for j in range(2) + ] for i in range(2) +] + +# Initialize matrix +matrix = MatrixSwarm(agents) + +# Example operations +transposed = matrix.transpose() +scaled = matrix.scalar_multiply(2) + +# Run operations with inputs +inputs = ["Query 1", "Query 2"] +results = matrix.multiply(transposed, inputs) + +# Save results +matrix.save_to_file("matrix_results.json") +``` + +## Output Schema + +The `AgentOutput` class defines the structure for operation results: + +```python +class AgentOutput(BaseModel): + agent_name: str + input_query: str + output_result: Any + metadata: dict +``` + +## Best Practices + +1. **Initialization** + - Ensure all agents in the matrix are properly configured before initialization + - Validate matrix dimensions for your use case + +2. **Operation Performance** + - Consider computational costs for large matrices + - Use appropriate batch sizes for inputs + +3. **Error Handling** + - Implement proper error handling for agent operations + - Validate inputs before matrix operations + +4. **Resource Management** + - Monitor agent resource usage in large matrices + - Implement proper cleanup procedures + +## Limitations + +- Matrix operations are constrained by the underlying agent capabilities +- Performance may vary based on agent configuration and complexity +- Resource usage scales with matrix dimensions + +## See Also + +- [Swarms Documentation](https://github.com/kyegomez/swarms) +- [Agent Class Reference](https://github.com/kyegomez/swarms/tree/main/swarms) \ No newline at end of file diff --git a/docs/swarms/structs/moa.md b/docs/swarms/structs/moa.md index 82b23330..0a1af8bc 100644 --- a/docs/swarms/structs/moa.md +++ b/docs/swarms/structs/moa.md @@ -1,5 +1,31 @@ # MixtureOfAgents Class Documentation +## Architecture Overview + +```mermaid +graph TD + A[Input Task] --> B[Initialize MixtureOfAgents] + B --> C[Reliability Check] + C --> D[Layer 1: Parallel Agent Execution] + D --> E[Layer 2: Sequential Processing] + E --> F[Layer 3: Parallel Agent Execution] + F --> G[Final Aggregator Agent] + G --> H[Output Response] + + subgraph "Agent Layer Details" + I[Agent 1] --> J[Agent Results] + K[Agent 2] --> J + L[Agent N] --> J + end + + subgraph "Processing Flow" + M[Previous Context] --> N[Current Task] + N --> O[Agent Processing] + O --> P[Aggregation] + P --> M + end +``` + ## Overview The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel → sequential → parallel → final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692). @@ -130,6 +156,89 @@ history = moe_swarm.run(task="Solve this problem.") print(history) ``` +### `reliability_check` + +```python +def reliability_check(self) -> None: +``` + +#### Description + +Performs validation checks on the Mixture of Agents class to ensure all required components are properly configured. Raises ValueError if any checks fail. + +#### Validation Checks: +- Verifies reference agents are provided +- Validates aggregator agent exists +- Checks aggregator system prompt is set +- Ensures layers count is valid (> 0) + +### `_get_final_system_prompt` + +```python +def _get_final_system_prompt(self, system_prompt: str, results: List[str]) -> str: +``` + +#### Description + +Internal method that constructs a system prompt for subsequent layers by incorporating previous responses. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `system_prompt` | `str` | The initial system prompt | +| `results` | `List[str]` | List of previous responses | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Combined system prompt with previous responses | + +### `run_batched` + +```python +def run_batched(self, tasks: List[str]) -> List[str]: +``` + +#### Description + +Processes multiple tasks sequentially, returning a list of responses. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `tasks` | `List[str]` | List of tasks to process | + +#### Returns + +| Type | Description | +|------|-------------| +| `List[str]` | List of responses for each task | + +### `run_concurrently` + +```python +def run_concurrently(self, tasks: List[str]) -> List[str]: +``` + +#### Description + +Processes multiple tasks concurrently using a ThreadPoolExecutor, optimizing for parallel execution. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `tasks` | `List[str]` | List of tasks to process concurrently | + +#### Returns + +| Type | Description | +|------|-------------| +| `List[str]` | List of responses for each task | + ## Detailed Explanation ### Initialization @@ -382,4 +491,113 @@ The `MixtureOfAgents` framework provides a solid foundation for further extensio - **Advanced Agent Communication**: Enhancing the communication protocols between agents to allow for more sophisticated information exchange. - **Integration with Other Frameworks**: Seamlessly integrating with other machine learning or data processing frameworks to leverage their capabilities within the swarm architecture. -In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains. \ No newline at end of file +In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains. + +## Additional Examples + +### Example 4: Batch Processing + +```python +from swarms import MixtureOfAgents, Agent +from swarm_models import OpenAIChat + +# Initialize agents as in previous examples +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) + +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) + +# Initialize MixtureOfAgents +moe_swarm = MixtureOfAgents( + agents=[director, accountant1, accountant2], + final_agent=director +) + +# Process multiple tasks in batch +tasks = [ + "Analyze Q1 financial statements", + "Review tax compliance", + "Prepare budget forecast" +] +results = moe_swarm.run_batched(tasks) +for task, result in zip(tasks, results): + print(f"Task: {task}\nResult: {result}\n") +``` + +### Example 5: Concurrent Processing + +```python +from swarms import MixtureOfAgents, Agent +from swarm_models import OpenAIChat + +# Initialize agents as before +# ... agent initialization code ... + +# Initialize MixtureOfAgents +moe_swarm = MixtureOfAgents( + agents=[director, accountant1, accountant2], + final_agent=director +) + +# Process multiple tasks concurrently +tasks = [ + "Generate monthly report", + "Audit expense claims", + "Update financial projections", + "Review investment portfolio" +] +results = moe_swarm.run_concurrently(tasks) +for task, result in zip(tasks, results): + print(f"Task: {task}\nResult: {result}\n") +``` + +## Advanced Features + +### Context Preservation + +The `MixtureOfAgents` class maintains context between iterations when running multiple loops. Each subsequent iteration receives the context from previous runs, allowing for more sophisticated and context-aware processing. + +### Asynchronous Processing + +The class implements asynchronous processing internally using Python's `asyncio`, enabling efficient handling of concurrent operations and improved performance for complex workflows. + +### Telemetry and Logging + +Built-in telemetry and logging capabilities help track agent performance and maintain detailed execution records: +- Automatic logging of agent outputs +- Structured data capture using Pydantic models +- JSON-formatted output options \ No newline at end of file diff --git a/docs/swarms/structs/model_router.md b/docs/swarms/structs/model_router.md new file mode 100644 index 00000000..11526d8c --- /dev/null +++ b/docs/swarms/structs/model_router.md @@ -0,0 +1,361 @@ +# ModelRouter Docs + +The ModelRouter is an intelligent routing system that automatically selects and executes AI models based on task requirements. It leverages a function-calling architecture to analyze tasks and recommend the optimal model and provider combination for each specific use case. + + + + + +### Key Features + +- Dynamic model selection based on task complexity and requirements +- Multi-provider support (OpenAI, Anthropic, Google, etc.) +- Concurrent and asynchronous execution capabilities +- Batch processing with memory +- Automatic error handling and retries +- Provider-aware routing +- Cost optimization + +### Constructor Arguments + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| system_prompt | str | model_router_system_prompt | Custom prompt for guiding model selection behavior | +| max_tokens | int | 4000 | Maximum token limit for model outputs | +| temperature | float | 0.5 | Control parameter for response randomness (0.0-1.0) | +| max_workers | int/str | 10 | Maximum concurrent workers ("auto" for CPU count) | +| api_key | str | None | API key for model access | +| max_loops | int | 1 | Maximum number of refinement iterations | +| *args | Any | None | Additional positional arguments | +| **kwargs | Any | None | Additional keyword arguments | + +### Core Methods + +#### run(task: str) -> str + +Executes a single task through the model router with memory and refinement capabilities. + +# Installation + +1. Install the latest version of swarms using pip: + +```bash +pip3 install -U swarms +``` + +2. Setup your API Keys in your .env file with the following: + +```bash +OPENAI_API_KEY=your_openai_api_key +ANTHROPIC_API_KEY=your_anthropic_api_key +GOOGLE_API_KEY=your_google_api_key +# Add more API keys as needed following litellm format +``` + + +```python +from swarms import ModelRouter + +router = ModelRouter() + +# Simple text analysis +result = router.run("Analyze the sentiment and key themes in this customer feedback") + +# Complex reasoning task +complex_result = router.run(""" +Evaluate the following business proposal: +- Initial investment: $500,000 +- Projected ROI: 25% annually +- Market size: $2B +- Competition: 3 major players +Provide detailed analysis and recommendations. +""") +``` + +#### batch_run(tasks: list) -> list +Executes multiple tasks sequentially with result aggregation. + +```python +# Multiple analysis tasks +tasks = [ + "Analyze Q1 financial performance", + "Predict Q2 market trends", + "Evaluate competitor strategies", + "Generate growth recommendations" +] + +results = router.batch_run(tasks) + +# Process results +for task, result in zip(tasks, results): + print(f"Task: {task}\nResult: {result}\n") +``` + +#### concurrent_run(tasks: list) -> list +Parallel execution of multiple tasks using thread pooling. + +```python +import asyncio +from typing import List + +# Define multiple concurrent tasks +analysis_tasks = [ + "Perform technical analysis of AAPL stock", + "Analyze market sentiment from social media", + "Generate trading signals", + "Calculate risk metrics" +] + +# Execute tasks concurrently +results = router.concurrent_run(analysis_tasks) + +# Process results with error handling +for task, result in zip(analysis_tasks, results): + try: + processed_result = process_analysis(result) + save_to_database(processed_result) + except Exception as e: + log_error(f"Error processing {task}: {str(e)}") +``` + +#### async_run(task: str) -> asyncio.Task +Asynchronous task execution with coroutine support. + +```python +async def process_data_stream(): + tasks = [] + async for data in data_stream: + task = await router.async_run(f"Process data: {data}") + tasks.append(task) + + results = await asyncio.gather(*tasks) + return results + +# Usage in async context +async def main(): + router = ModelRouter() + results = await process_data_stream() +``` + +### Advanced Usage Examples + +#### Financial Analysis System + +```python +from swarms import ModelRouter +from typing import Dict, List +import pandas as pd + +class FinancialAnalysisSystem: + def __init__(self): + self.router = ModelRouter( + temperature=0.3, # Lower temperature for more deterministic outputs + max_tokens=8000, # Higher token limit for detailed analysis + max_loops=2 # Allow for refinement iteration + ) + + def analyze_company_financials(self, financial_data: Dict) -> Dict: + analysis_task = f""" + Perform comprehensive financial analysis: + + Financial Metrics: + - Revenue: ${financial_data['revenue']}M + - EBITDA: ${financial_data['ebitda']}M + - Debt/Equity: {financial_data['debt_equity']} + - Working Capital: ${financial_data['working_capital']}M + + Required Analysis: + 1. Profitability assessment + 2. Liquidity analysis + 3. Growth projections + 4. Risk evaluation + 5. Investment recommendations + + Provide detailed insights and actionable recommendations. + """ + + result = self.router.run(analysis_task) + return self._parse_analysis_result(result) + + def _parse_analysis_result(self, result: str) -> Dict: + # Implementation of result parsing + pass + +# Usage +analyzer = FinancialAnalysisSystem() +company_data = { + 'revenue': 150, + 'ebitda': 45, + 'debt_equity': 0.8, + 'working_capital': 25 +} + +analysis = analyzer.analyze_company_financials(company_data) +``` + +#### Healthcare Data Processing Pipeline + +```python +from swarms import ModelRouter +import pandas as pd +from typing import List, Dict + +class MedicalDataProcessor: + def __init__(self): + self.router = ModelRouter( + max_workers="auto", # Automatic worker scaling + temperature=0.2, # Conservative temperature for medical analysis + system_prompt="""You are a specialized medical data analyzer focused on: + 1. Clinical terminology interpretation + 2. Patient data analysis + 3. Treatment recommendation review + 4. Medical research synthesis""" + ) + + async def process_patient_records(self, records: List[Dict]) -> List[Dict]: + analysis_tasks = [] + + for record in records: + task = f""" + Analyze patient record: + - Age: {record['age']} + - Symptoms: {', '.join(record['symptoms'])} + - Vital Signs: {record['vitals']} + - Medications: {', '.join(record['medications'])} + - Lab Results: {record['lab_results']} + + Provide: + 1. Symptom analysis + 2. Medication interaction check + 3. Lab results interpretation + 4. Treatment recommendations + """ + analysis_tasks.append(task) + + results = await asyncio.gather(*[ + self.router.async_run(task) for task in analysis_tasks + ]) + + return [self._parse_medical_analysis(r) for r in results] + + def _parse_medical_analysis(self, analysis: str) -> Dict: + # Implementation of medical analysis parsing + pass + +# Usage +async def main(): + processor = MedicalDataProcessor() + patient_records = [ + { + 'age': 45, + 'symptoms': ['fever', 'cough', 'fatigue'], + 'vitals': {'bp': '120/80', 'temp': '38.5C'}, + 'medications': ['lisinopril', 'metformin'], + 'lab_results': 'WBC: 11,000, CRP: 2.5' + } + # More records... + ] + + analyses = await processor.process_patient_records(patient_records) +``` + +#### Natural Language Processing Pipeline + +```python +from swarms import ModelRouter +from typing import List, Dict +import asyncio + +class NLPPipeline: + def __init__(self): + self.router = ModelRouter( + temperature=0.4, + max_loops=2 + ) + + def process_documents(self, documents: List[str]) -> List[Dict]: + tasks = [self._create_nlp_task(doc) for doc in documents] + results = self.router.concurrent_run(tasks) + return [self._parse_nlp_result(r) for r in results] + + def _create_nlp_task(self, document: str) -> str: + return f""" + Perform comprehensive NLP analysis: + + Text: {document} + + Required Analysis: + 1. Entity recognition + 2. Sentiment analysis + 3. Topic classification + 4. Key phrase extraction + 5. Intent detection + + Provide structured analysis with confidence scores. + """ + + def _parse_nlp_result(self, result: str) -> Dict: + # Implementation of NLP result parsing + pass + +# Usage +pipeline = NLPPipeline() +documents = [ + "We're extremely satisfied with the new product features!", + "The customer service response time needs improvement.", + "Looking to upgrade our subscription plan next month." +] + +analyses = pipeline.process_documents(documents) +``` + +### Available Models and Use Cases + +| Model | Provider | Optimal Use Cases | Characteristics | +|-------|----------|-------------------|-----------------| +| gpt-4-turbo | OpenAI | Complex reasoning, Code generation, Creative writing | High accuracy, Latest knowledge cutoff | +| claude-3-opus | Anthropic | Research analysis, Technical documentation, Long-form content | Strong reasoning, Detailed outputs | +| gemini-pro | Google | Multimodal tasks, Code generation, Technical analysis | Fast inference, Strong coding abilities | +| mistral-large | Mistral | General tasks, Content generation, Classification | Open source, Good price/performance | +| deepseek-reasoner | DeepSeek | Mathematical analysis, Logic problems, Scientific computing | Specialized reasoning capabilities | + +### Provider Capabilities + +| Provider | Strengths | Best For | Integration Notes | +|----------|-----------|-----------|------------------| +| OpenAI | Consistent performance, Strong reasoning | Production systems, Complex tasks | Requires API key setup | +| Anthropic | Safety features, Detailed analysis | Research, Technical writing | Claude-specific formatting | +| Google | Technical tasks, Multimodal support | Code generation, Analysis | Vertex AI integration available | +| Groq | High-speed inference | Real-time applications | Optimized for specific models | +| DeepSeek | Specialized reasoning | Scientific computing | Custom API integration | +| Mistral | Open source flexibility | General applications | Self-hosted options available | + + +### Performance Optimization Tips + +1. Token Management + - Set appropriate max_tokens based on task complexity + - Monitor token usage for cost optimization + - Use streaming for long outputs + +2. Concurrency Settings + - Adjust max_workers based on system resources + - Use "auto" workers for optimal CPU utilization + - Monitor memory usage with large batch sizes + +3. Temperature Tuning + - Lower (0.1-0.3) for factual/analytical tasks + - Higher (0.7-0.9) for creative tasks + - Mid-range (0.4-0.6) for balanced outputs + +4. System Prompts + - Customize for specific domains + - Include relevant context + - Define clear output formats + +### Dependencies + +- asyncio: Asynchronous I/O support +- concurrent.futures: Thread pool execution +- pydantic: Data validation +- litellm: LLM interface standardization diff --git a/docs/swarms/structs/multi_process_workflow.md b/docs/swarms/structs/multi_process_workflow.md deleted file mode 100644 index d89134d6..00000000 --- a/docs/swarms/structs/multi_process_workflow.md +++ /dev/null @@ -1,124 +0,0 @@ -# MultiProcessWorkflow Documentation - - -The `MultiProcessWorkflow` class provides a framework for executing tasks concurrently using multiple processes. This class leverages Python's `multiprocessing` module to parallelize task execution, thereby enhancing performance and efficiency. It includes features such as automatic task retry on failure and optional autosaving of results. This documentation details the class, its parameters, attributes, methods, and usage examples. - -## Class Definition - -### `MultiProcessWorkflow` - - -## Parameters - -| Parameter | Type | Default | Description | -|---------------|---------------------|---------|---------------------------------------------------------------| -| `max_workers` | `int` | `5` | The maximum number of workers to use for parallel processing. | -| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. | -| `agents` | `Sequence[Agent]` | `None` | A list of Agent objects representing the workflow agents. | -| `*args` | `tuple` | | Additional positional arguments. | -| `**kwargs` | `dict` | | Additional keyword arguments. | - -## Attributes - -| Attribute | Type | Description | -|-----------------|---------------------|--------------------------------------------------------------| -| `max_workers` | `int` | The maximum number of workers to use for parallel processing.| -| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. | -| `agents` | `Sequence[Agent]` | A list of Agent objects representing the workflow agents. | - -## Methods - -### `execute_task` - -#### Description - -The `execute_task` method executes a given task and handles any exceptions that may occur during execution. If agents are defined, it will execute the task using each agent in sequence. - -#### Usage Example - -```python -# Define a task -task = Task() - -# Execute the task -workflow = MultiProcessWorkflow() -result = workflow.execute_task(task) -print(result) -``` - -### `run` - -#### Description - -The `run` method executes the workflow by running the given task using multiple processes. It manages the task execution using a process pool and collects the results. - -#### Usage Example - -```python -from swarms.structs.multi_process_workflow import MultiProcessingWorkflow -from swarms.structs.task import Task -from datetime import datetime -from time import sleep - -# Define a simple task -def simple_task(): - sleep(1) - return datetime.now() - -# Create a task object -task = Task( - name="Simple Task", - execute=simple_task, - priority=1, -) - -# Create a workflow with the task -workflow = MultiProcessWorkflow(max_workers=3, autosave=True, agents=[agent1, agent2]) - -# Run the workflow -results = workflow.run(task) - -# Print the results -print(results) -``` - -## Detailed Functionality and Usage - -### Initialization - -When an instance of `MultiProcessWorkflow` is created, it initializes the following: - -- **max_workers**: Sets the maximum number of processes that can run concurrently. -- **autosave**: Determines if the workflow results should be saved automatically. -- **agents**: Accepts a list of agents that will perform the tasks. - -### Running Tasks - -The `run` method performs the following steps: - -1. **Initialize Results and Manager**: Creates a list to store results and a `Manager` to manage shared state between processes. -2. **Initialize Process Pool**: Creates a pool of worker processes. -3. **Submit Tasks**: Iterates over the agents, submitting tasks to the pool for execution and collecting the results. -4. **Wait for Completion**: Waits for all tasks to complete and collects the results. -5. **Return Results**: Returns the list of results from all executed tasks. - -### Autosave Task Result - -Although the autosave functionality is mentioned in the parameters, it is not explicitly defined in the given code. The implementation for autosaving should be added based on the specific requirements of the application. - -## Additional Information and Tips - -- **Process Safety**: The use of `Manager` ensures that the list of results is managed safely across multiple processes. -- **Logging**: The class uses the `logger` module to log information about task execution, retries, and failures. -- **Error Handling**: The retry mechanism in the `execute_task` method helps in handling transient errors by attempting to re-execute failed tasks. - -## References and Resources - -For more information on multiprocessing in Python, refer to the following resources: - -- [Python Multiprocessing Documentation](https://docs.python.org/3/library/multiprocessing.html) -- [Python Logging Documentation](https://docs.python.org/3/library/logging.html) - ---- - -By following this detailed documentation, users can effectively understand and utilize the `MultiProcessWorkflow` class to execute tasks concurrently with multiple processes. The examples provided help in demonstrating the practical usage of the class. \ No newline at end of file diff --git a/docs/swarms/structs/multi_processing_workflow.md b/docs/swarms/structs/multi_processing_workflow.md deleted file mode 100644 index 320667d4..00000000 --- a/docs/swarms/structs/multi_processing_workflow.md +++ /dev/null @@ -1,204 +0,0 @@ -# MultiProcessWorkflow Documentation - -The `MultiProcessWorkflow` class extends the `BaseWorkflow` to support parallel processing using multiple workers. This class is designed to efficiently execute tasks concurrently, leveraging the power of multi-processing to enhance performance and scalability. - -### Key Concepts - -- **Parallel Processing**: Utilizing multiple workers to execute tasks concurrently. -- **Workflow Management**: Handling the execution of tasks in a structured workflow. -- **Agents**: Entities responsible for executing tasks. - -## Attributes - -### Arguments - -| Argument | Type | Default | Description | -|--------------|---------------------|---------|-------------| -| `max_workers`| `int` | `5` | The maximum number of workers to use for parallel processing. | -| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. | -| `agents` | `Sequence[Agent]` | `None` | A list of agents participating in the workflow. | -| `*args` | | | Additional positional arguments. | -| `**kwargs` | | | Additional keyword arguments. | - -### Attributes - -| Attribute | Type | Description | -|--------------|---------------------|-------------| -| `max_workers`| `int` | The maximum number of workers to use for parallel processing. | -| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. | -| `agents` | `Sequence[Agent]` | A list of agents participating in the workflow. | - -## Methods - -### __init__ - -Initializes the `MultiProcessWorkflow` with the given parameters. - -**Examples:** - -```python -from swarms.structs.agent import Agent -from swarms.structs.task import Task -from swarms.structs.multi_process_workflow import MultiProcessWorkflow - -agents = [Agent(name="Agent 1"), Agent(name="Agent 2")] -tasks = [Task(name="Task 1", execute=lambda: "result1"), Task(name="Task 2", execute=lambda: "result2")] - -workflow = MultiProcessWorkflow(max_workers=3, agents=agents, tasks=tasks) -``` - -### execute_task - -Executes a task and handles exceptions. - -**Arguments:** - -| Parameter | Type | Description | -|-----------|------|-------------| -| `task` | `str` | The task to execute. | -| `*args` | | Additional positional arguments for the task execution. | -| `**kwargs`| | Additional keyword arguments for the task execution. | - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `Any` | The result of the task execution. | - -**Examples:** - -```python -result = workflow.execute_task(task="Sample Task") -print(result) -``` - -### run - -Runs the workflow. - -**Arguments:** - -| Parameter | Type | Description | -|-----------|------|-------------| -| `task` | `str` | The task to run. | -| `*args` | | Additional positional arguments for the task execution. | -| `**kwargs`| | Additional keyword arguments for the task execution. | - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `List[Any]` | The results of all executed tasks. | - -**Examples:** - -```python -results = workflow.run(task="Sample Task") -print(results) -``` - -### Additional Examples - -#### Example 1: Simple Task Execution - -```python -from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat -from datetime import datetime -from time import sleep - -import os -from dotenv import load_dotenv - -# Load the environment variables -load_dotenv() - - -# Define a function to be used as the action -def my_action(): - print("Action executed") - - -# Define a function to be used as the condition -def my_condition(): - print("Condition checked") - return True - - -# Create an agent -agent = Agent( - llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]), - max_loops=1, - dashboard=False, -) - -# Create a task -task = Task( - description=( - "Generate a report on the top 3 biggest expenses for small" - " businesses and how businesses can save 20%" - ), - agent=agent, -) - -# Create a workflow with the task -workflow = MultiProcessWorkflow(tasks=[task]) - -# Run the workflow -results = workflow.run(task) -print(results) -``` - -#### Example 2: Workflow with Multiple Agents - -```python -from swarms import Agent, Task, MultiProcessWorkflow - -# Define tasks -def task1(): - return "Task 1 result" - -def task2(): - return "Task 2 result" - -# Create agents -agent1 = Agent(name="Agent 1", llm=OpenAIChat()) -agent2 = Agent(name="Agent 2", llm=OpenAIChat()) - -# Create tasks -task_1 = Task(name="Task 1", execute=task1) -task_2 = Task(name="Task 2", execute=task2) - -# Create a workflow -workflow = MultiProcessWorkflow(agents=[agent1, agent2], tasks=[task_1, task_2]) - -# Run the workflow -results = workflow.run(task="Example Task") -print(results) -``` - -#### Example 3: Customizing Max Workers - -```python -from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat - -# Define a task -def example_task(): - return "Task result" - -# Create an agent -agent = Agent(name="Agent 1", llm=OpenAIChat()) - -# Create a task -task = Task(name="Example Task", execute=example_task) - -# Create a workflow with custom max workers -workflow = MultiProcessWorkflow(max_workers=10, agents=[agent], tasks=[task]) - -# Run the workflow -results = workflow.run(task="Example Task") -print(results) -``` - -## Summary - -The `MultiProcessWorkflow` class provides a powerful framework for managing and executing tasks using multiple workers. With support for parallel processing, customizable workflows, and detailed logging, it is an ideal tool for complex task execution scenarios. This class enhances performance and scalability, making it suitable for a wide range of applications that require efficient task management. \ No newline at end of file diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index 07d8c2f5..53781837 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -33,7 +33,7 @@ Main class for routing tasks to different swarm types. | `flow` | str | The flow of the swarm. | | `return_json` | bool | Flag to enable/disable returning the result in JSON format. | | `auto_generate_prompts` | bool | Flag to enable/disable auto generation of prompts. | -| `swarm` | Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow] | Instantiated swarm object. | +| `swarm` | Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow, GroupChat, MultiAgentRouter] | Instantiated swarm object. | | `logs` | List[SwarmLog] | List of log entries captured during operations. | #### Methods: @@ -271,6 +271,39 @@ result = concurrent_router.run("Conduct a comprehensive market analysis for Prod ``` +### GroupChat + +Use Case: Simulating a group chat with multiple agents. + +```python +group_chat_router = SwarmRouter( + name="GroupChat", + description="Simulate a group chat with multiple agents", + max_loops=1, + agents=[financial_analyst, market_researcher, competitor_analyst], + swarm_type="GroupChat" +) + +result = group_chat_router.run("Conduct a comprehensive market analysis for Product X") +``` + +### MultiAgentRouter + +Use Case: Simulating a group chat with multiple agents. + +```python +multi_agent_router = SwarmRouter( + name="MultiAgentRouter", + description="Simulate a group chat with multiple agents", + max_loops=1, + agents=[financial_analyst, market_researcher, competitor_analyst], + swarm_type="MultiAgentRouter" +) + +result = multi_agent_router.run("Conduct a comprehensive market analysis for Product X") +``` + + ### Auto Select (Experimental) Autonomously selects the right swarm by conducting vector search on your input task or name or description or all 3. diff --git a/docs/swarms/wallet/api.md b/docs/swarms/wallet/api.md new file mode 100644 index 00000000..0c83bf3f --- /dev/null +++ b/docs/swarms/wallet/api.md @@ -0,0 +1,497 @@ +# swarms Wallet API Documentation + +This documentation covers the swarms Wallet API routes for managing wallets, sending tokens, and checking transactions in the swarms Platform. + +## Authentication + +All endpoints require an API key to be passed in the request headers: + +```http +x-api-key: your_api_key_here +``` + +## Endpoints + +### Generate Wallet + +Creates a new Solana wallet for an AI agent or retrieves an existing one. + +```http +POST https://swarms.world/api/solana/generate-wallet +``` + +**Response** +```json +{ + "success": true, + "data": { + "public_key": "string", + "wallet_type": "solana", + "swarms_token_address": "string" + }, + "code": "SUCCESS_001" +} +``` + +### Send Tokens +Sends swarms tokens with automatic tax handling. + +```http +POST https://swarms.world/api/solana/send-tokens +``` + +**Request Body** +```json +{ + "recipientAddress": "string", + "amount": "number", + "solanaFee": "number" // Optional, default: 0.009 +} +``` + +**Response** +```json +{ + "success": true, + "data": { + "signature": "string", + "details": { + "sender": "string", + "recipient": "string", + "daoAddress": "string", + "requestedSendAmount": "number", + "totalNeededFromAccount": "number", + "accountTax": "number", + "receivedTax": "number", + "recipientReceives": "number", + "taxBreakdown": "string", + "computeUnits": "number", + "priorityFee": "number" + } + }, + "code": "SUCCESS_001" +} +``` + +### Check Receipt +Verifies token receipt and checks balances. + +```http +GET https://swarms.world/api/solana/check-receipt?amount={amount} +``` + +**Response** +```json +{ + "success": true, + "data": { + "solana_address": "string", + "received": "number", + "expected": "number", + "matches": "boolean", + "balances": { + "sol": "number", + "swarms": "number" + }, + "swarms_address": "string" + }, + "code": "SUCCESS_001" +} +``` + +### Get Metrics +Retrieves transaction metrics and history. + +```http +GET https://swarms.world/api/solana/get-metrics +``` + +**Query Parameters** +- `page`: Page number (default: 1) +- `limit`: Items per page (default: 10, max: 100) +- `startDate`: Filter start date +- `endDate`: Filter end date +- `status`: Transaction status filter +- `type`: Transaction type filter + +**Response** +```json +{ + "success": true, + "data": { + "transactions": [{ + "id": "string", + "agent_id": "string", + "transaction_hash": "string", + "amount": "number", + "recipient": "string", + "status": "string", + "transaction_type": "string", + "created_at": "string" + }], + "pagination": { + "currentPage": "number", + "totalPages": "number", + "totalItems": "number", + "itemsPerPage": "number", + "hasMore": "boolean" + }, + "metrics": { + "totalTransactions": "number", + "totalAmountSent": "number", + "totalSuccessfulTransactions": "number", + "totalFailedTransactions": "number" + } + }, + "code": "SUCCESS_001" +} +``` + +## Error Codes + +| Code | Description | +|------|-------------| +| AUTH_001 | Missing API key | +| AUTH_002 | Invalid API key | +| BAL_001 | Insufficient SOL balance | +| BAL_002 | Insufficient token balance | +| WAL_001 | Wallet not found | +| REQ_001 | Missing required parameters | +| DB_001 | Database error | +| ERR_001 | Internal server error | + +## Transaction Details + +- Default SOL fee: 0.009 SOL +- swarms token tax: 2% from sender + 2% from sent amount +- All taxes are sent to the DAO treasury +- Token accounts are automatically created for new recipients +- Transactions use 'processed' commitment level + + +## Implementation Notes + +- All token amounts should be provided in their natural units (not in lamports/raw units) +- SOL balances are returned in SOL (not lamports) +- Token accounts are automatically created for recipients if they don't exist +- All transactions include automatic tax handling for the DAO treasury +- Compute budget and priority fees are automatically managed for optimal transaction processing + + + +## Examples + +Below are code examples in several languages that demonstrate how to use the swarms Wallet API endpoints. In these examples, replace `your_api_key_here` with your actual API key, and update any parameters as needed. + +--- + +## Python (Using `requests`) + +First, install the library if you haven’t already: + +```bash +pip install requests +``` + +**Example: Generate Wallet** + +```python +import os +import requests + +API_KEY = os.getenv("SWARMS_API_KEY") +headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" +} + +url = "https://swarms.world/api/solana/generate-wallet" +response = requests.post(url, headers=headers) + +if response.status_code == 200: + data = response.json() + print("Wallet generated:", data) +else: + print("Error:", response.text) +``` + +**Example: Send Tokens** + +```python +import requests +import json +import os + +API_KEY = os.getenv("SWARMS_API_KEY") +headers = { + "x-api-key": API_KEY, + "Content-Type": "application/json" +} + +url = "https://swarms.world/api/solana/send-tokens" +payload = { + "recipientAddress": "recipient_public_key", + "amount": 100, # Example token amount + # "solanaFee": 0.009 # Optional: use default if not provided +} + +response = requests.post(url, headers=headers, data=json.dumps(payload)) +if response.status_code == 200: + data = response.json() + print("Tokens sent:", data) +else: + print("Error:", response.text) +``` + +**Example: Check Receipt** + +```python +import requests +import os + +API_KEY = os.getenv("SWARMS_API_KEY") +headers = { + "x-api-key": API_KEY +} + +amount = 100 # The amount you expect to be received +url = f"https://swarms.world/api/solana/check-receipt?amount={amount}" + +response = requests.get(url, headers=headers) +if response.status_code == 200: + data = response.json() + print("Receipt checked:", data) +else: + print("Error:", response.text) +``` + +**Example: Get Metrics** + +```python +import requests +import os + +API_KEY = os.getenv("SWARMS_API_KEY") +headers = { + "x-api-key": API_KEY +} + +params = { + "page": 1, + "limit": 10, + # Optionally include startDate, endDate, status, type if needed. +} + +url = "https://swarms.world/api/solana/get-metrics" +response = requests.get(url, headers=headers, params=params) +if response.status_code == 200: + data = response.json() + print("Metrics:", data) +else: + print("Error:", response.text) +``` + +--- + +## Node.js (Using `axios`) + +First, install axios: + +```bash +npm install axios +``` + +**Example: Generate Wallet** + +```javascript +const axios = require('axios'); + +const API_KEY = 'your_api_key_here'; +const headers = { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' +}; + +axios.post('https://swarms.world/api/solana/generate-wallet', {}, { headers }) + .then(response => { + console.log('Wallet generated:', response.data); + }) + .catch(error => { + console.error('Error:', error.response ? error.response.data : error.message); + }); +``` + +**Example: Send Tokens** + +```javascript +const axios = require('axios'); + +const API_KEY = 'your_api_key_here'; +const headers = { + 'x-api-key': API_KEY, + 'Content-Type': 'application/json' +}; + +const payload = { + recipientAddress: 'recipient_public_key', + amount: 100, // token amount + // solanaFee: 0.009 // Optional +}; + +axios.post('https://swarms.world/api/solana/send-tokens', payload, { headers }) + .then(response => { + console.log('Tokens sent:', response.data); + }) + .catch(error => { + console.error('Error:', error.response ? error.response.data : error.message); + }); +``` + +**Example: Check Receipt** + +```javascript +const axios = require('axios'); + +const API_KEY = 'your_api_key_here'; +const headers = { 'x-api-key': API_KEY }; +const amount = 100; +const url = `https://swarms.world/api/solana/check-receipt?amount=${amount}`; + +axios.get(url, { headers }) + .then(response => { + console.log('Receipt:', response.data); + }) + .catch(error => { + console.error('Error:', error.response ? error.response.data : error.message); + }); +``` + +**Example: Get Metrics** + +```javascript +const axios = require('axios'); + +const API_KEY = 'your_api_key_here'; +const headers = { 'x-api-key': API_KEY }; + +const params = { + page: 1, + limit: 10, + // startDate: '2025-01-01', endDate: '2025-01-31', status: 'completed', type: 'send' +}; + +axios.get('https://swarms.world/api/solana/get-metrics', { headers, params }) + .then(response => { + console.log('Metrics:', response.data); + }) + .catch(error => { + console.error('Error:', error.response ? error.response.data : error.message); + }); +``` + +--- + +## cURL (Command Line) + +**Example: Generate Wallet** + +```bash +curl -X POST https://swarms.world/api/solana/generate-wallet \ + -H "x-api-key: your_api_key_here" \ + -H "Content-Type: application/json" +``` + +**Example: Send Tokens** + +```bash +curl -X POST https://swarms.world/api/solana/send-tokens \ + -H "x-api-key: your_api_key_here" \ + -H "Content-Type: application/json" \ + -d '{ + "recipientAddress": "recipient_public_key", + "amount": 100, + "solanaFee": 0.009 + }' +``` + +**Example: Check Receipt** + +```bash +curl -X GET "https://swarms.world/api/solana/check-receipt?amount=100" \ + -H "x-api-key: your_api_key_here" +``` + +**Example: Get Metrics** + +```bash +curl -X GET "https://swarms.world/api/solana/get-metrics?page=1&limit=10" \ + -H "x-api-key: your_api_key_here" +``` + +--- + +## Other Languages + +### Ruby (Using `net/http`) + +**Example: Generate Wallet** + +```ruby +require 'net/http' +require 'uri' +require 'json' + +uri = URI.parse("https://swarms.world/api/solana/generate-wallet") +request = Net::HTTP::Post.new(uri) +request["x-api-key"] = "your_api_key_here" +request["Content-Type"] = "application/json" + +response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http| + http.request(request) +end + +puts JSON.parse(response.body) +``` + +### Java (Using `HttpURLConnection`) + +**Example: Generate Wallet** + +```java +import java.io.*; +import java.net.*; +import javax.net.ssl.HttpsURLConnection; + +public class SwarmsApiExample { + public static void main(String[] args) { + try { + URL url = new URL("https://swarms.world/api/solana/generate-wallet"); + HttpsURLConnection conn = (HttpsURLConnection) url.openConnection(); + conn.setRequestMethod("POST"); + conn.setRequestProperty("x-api-key", "your_api_key_here"); + conn.setRequestProperty("Content-Type", "application/json"); + conn.setDoOutput(true); + + // If you need to send a request body, write to the output stream: + // try(OutputStream os = conn.getOutputStream()) { + // byte[] input = "{}".getBytes("utf-8"); + // os.write(input, 0, input.length); + // } + + BufferedReader br = new BufferedReader(new InputStreamReader(conn.getInputStream(), "utf-8")); + StringBuilder response = new StringBuilder(); + String responseLine = null; + while ((responseLine = br.readLine()) != null) { + response.append(responseLine.trim()); + } + System.out.println("Response: " + response.toString()); + } catch (Exception e) { + e.printStackTrace(); + } + } +} +``` + +--- + +These examples illustrate how to authenticate using the API key and perform various operations such as generating a wallet, sending tokens, checking receipts, and retrieving metrics. You can adapt these examples to other languages or frameworks as needed. Enjoy integrating with the swarms Wallet API! \ No newline at end of file diff --git a/docs/swarms_cloud/cloud_run.md b/docs/swarms_cloud/cloud_run.md new file mode 100644 index 00000000..34311159 --- /dev/null +++ b/docs/swarms_cloud/cloud_run.md @@ -0,0 +1,254 @@ +# Hosting Agents on Google Cloud Run + +This documentation provides a highly detailed, step-by-step guide to hosting your agents using Google Cloud Run. It uses a well-structured project setup that includes a Dockerfile at the root level, a folder dedicated to your API file, and a `requirements.txt` file to manage all dependencies. This guide will ensure your deployment is scalable, efficient, and easy to maintain. + +--- + +## **Project Structure** + +Your project directory should adhere to the following structure to ensure compatibility and ease of deployment: + +``` +. +├── Dockerfile +├── requirements.txt +└── api/ + └── api.py +``` + +Each component serves a specific purpose in the deployment pipeline, ensuring modularity and maintainability. + +--- + +## **Step 1: Prerequisites** + +Before you begin, make sure to satisfy the following prerequisites to avoid issues during deployment: + +1. **Google Cloud Account**: + - Create a Google Cloud account at [Google Cloud Console](https://console.cloud.google.com/). + - Enable billing for your project. Billing is necessary for accessing Cloud Run services. + +2. **Install Google Cloud SDK**: + - Follow the [installation guide](https://cloud.google.com/sdk/docs/install) to set up the Google Cloud SDK on your local machine. + +3. **Install Docker**: + - Download and install Docker by following the [official Docker installation guide](https://docs.docker.com/get-docker/). Docker is crucial for containerizing your application. + +4. **Create a Google Cloud Project**: + - Navigate to the Google Cloud Console and create a new project. Assign it a meaningful name and note the **Project ID**, as it will be used throughout this guide. + +5. **Enable Required APIs**: + - Visit the [API Library](https://console.cloud.google.com/apis/library) and enable the following APIs: + - Cloud Run API + - Cloud Build API + - Artifact Registry API + - These APIs are essential for deploying and managing your application in Cloud Run. + +--- + +## **Step 2: Creating the Files** + +### 1. **`api/api.py`** + +This is the main Python script where you define your Swarms agents and expose an API endpoint for interacting with them. Here’s an example: + +```python +from flask import Flask, request, jsonify +from swarms import Agent # Assuming `swarms` is the framework you're using + +app = Flask(__name__) + +# Example Swarm agent +agent = Agent( + agent_name="Stock-Analysis-Agent", + model_name="gpt-4o-mini", + max_loops="auto", + interactive=True, + streaming_on=True, +) + +@app.route('/run-agent', methods=['POST']) +def run_agent(): + data = request.json + task = data.get('task', '') + result = agent.run(task) + return jsonify({"result": result}) + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=8080) +``` + +This example sets up a basic API that listens for POST requests, processes a task using a Swarm agent, and returns the result as a JSON response. Customize it based on your agent’s functionality. + +--- + +### 2. **`requirements.txt`** + +This file lists all Python dependencies required for your project. Example: + +``` +flask +swarms +# add any other dependencies here +``` + +Be sure to include any additional libraries your agents rely on. Keeping this file up to date ensures smooth dependency management during deployment. + +--- + +### 3. **`Dockerfile`** + +The Dockerfile specifies how your application is containerized. Below is a sample Dockerfile for your setup: + +```dockerfile +# Use an official Python runtime as the base image +FROM python:3.10-slim + +# Set the working directory +WORKDIR /app + +# Copy requirements.txt and install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the application code +COPY api/ ./api/ + +# Expose port 8080 (Cloud Run default port) +EXPOSE 8080 + +# Run the application +CMD ["python", "api/api.py"] +``` + +This Dockerfile ensures your application is containerized with minimal overhead, focusing on slim images for efficiency. + +--- + +## **Step 3: Deploying to Google Cloud Run** + +### 1. **Authenticate with Google Cloud** + +Log in to your Google Cloud account by running: + +```bash +gcloud auth login +``` + +Set the active project to match your deployment target: + +```bash +gcloud config set project [PROJECT_ID] +``` + +Replace `[PROJECT_ID]` with your actual Project ID. + +--- + +### 2. **Build the Docker Image** + +Use Google Cloud's Artifact Registry to store and manage your Docker image. Follow these steps: + +1. **Create a Repository**: + +```bash +gcloud artifacts repositories create my-repo --repository-format=Docker --location=us-central1 +``` + +2. **Authenticate Docker with Google Cloud**: + +```bash +gcloud auth configure-docker us-central1-docker.pkg.dev +``` + +3. **Build and Tag the Image**: + +```bash +docker build -t us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image . +``` + +4. **Push the Image**: + +```bash +docker push us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image +``` + +--- + +### 3. **Deploy to Cloud Run** + +Deploy the application to Cloud Run with the following command: + +```bash +gcloud run deploy my-agent-service \ + --image us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image \ + --platform managed \ + --region us-central1 \ + --allow-unauthenticated +``` + +Key points: +- Replace `[PROJECT_ID]` with your actual Project ID. +- The `--allow-unauthenticated` flag makes the service publicly accessible. Exclude it to restrict access. + +--- + +## **Step 4: Testing the Deployment** + +Once the deployment is complete, test the service: + +1. Note the URL provided by Cloud Run. +2. Use `curl` or Postman to send a request. Example: + +```bash +curl -X POST [CLOUD_RUN_URL]/run-agent \ + -H "Content-Type: application/json" \ + -d '{"task": "example task"}' +``` + +This tests whether your agent processes the task correctly and returns the expected output. + +--- + +## **Step 5: Updating the Service** + +To apply changes to your application: + +1. Edit the necessary files. +2. Rebuild and push the updated Docker image: + +```bash +docker build -t us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image . +docker push us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image +``` + +3. Redeploy the service: + +```bash +gcloud run deploy my-agent-service \ + --image us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image +``` + +This ensures the latest version of your application is live. + +--- + +## **Troubleshooting** + +- **Permission Errors**: + Ensure your account has roles like Cloud Run Admin and Artifact Registry Reader. +- **Port Issues**: + Confirm the application listens on port 8080. Cloud Run expects this port by default. +- **Logs**: + Use the Google Cloud Console or CLI to review logs for debugging: + +```bash +gcloud logs read --project [PROJECT_ID] +``` + +--- + +## **Conclusion** + +By following this comprehensive guide, you can deploy your agents on Google Cloud Run with ease. This method leverages Docker for containerization and Google Cloud services for seamless scalability and management. With a robust setup like this, you can focus on enhancing your agents’ capabilities rather than worrying about deployment challenges. + diff --git a/docs/swarms_cloud/create_api.md b/docs/swarms_cloud/create_api.md new file mode 100644 index 00000000..9a9e340a --- /dev/null +++ b/docs/swarms_cloud/create_api.md @@ -0,0 +1,204 @@ +# CreateNow API Documentation + +Welcome to the CreateNow API documentation! This API enables developers to generate AI-powered content, including images, music, videos, and speech, using natural language prompts. Use the endpoints below to start generating content. + +--- + +## **1. Claim Your API Key** +To use the API, you must first claim your API key. Visit the following link to create an account and get your API key: + +### **Claim Your Key** +``` +https://createnow.xyz/account +``` + +After signing up, your API key will be available in your account dashboard. Keep it secure and include it in your API requests as a Bearer token. + +--- + +## **2. Generation Endpoint** +The generation endpoint allows you to create AI-generated content using natural language prompts. + +### **Endpoint** +``` +POST https://createnow.xyz/api/v1/generate +``` + +### **Authentication** +Include a Bearer token in the `Authorization` header for all requests: +``` +Authorization: Bearer YOUR_API_KEY +``` + +### **Basic Usage** +The simplest way to use the API is to send a prompt. The system will automatically detect the appropriate media type. + +#### **Example Request (Basic)** +```json +{ + "prompt": "a beautiful sunset over the ocean" +} +``` + +### **Advanced Options** +You can specify additional parameters for finer control over the output. + +#### **Parameters** +| Parameter | Type | Description | Default | +|----------------|-----------|---------------------------------------------------------------------------------------------------|--------------| +| `prompt` | `string` | The natural language description of the content to generate. | Required | +| `type` | `string` | The type of content to generate (`image`, `music`, `video`, `speech`). | Auto-detect | +| `count` | `integer` | The number of outputs to generate (1-4). | 1 | +| `duration` | `integer` | Duration of audio or video content in seconds (applicable to `music` and `speech`). | N/A | + +#### **Example Request (Advanced)** +```json +{ + "prompt": "create an upbeat jazz melody", + "type": "music", + "count": 2, + "duration": 30 +} +``` + +### **Response Format** + +#### **Success Response** +```json +{ + "success": true, + "outputs": [ + { + "url": "https://createnow.xyz/storage/image1.png", + "creation_id": "12345", + "share_url": "https://createnow.xyz/share/12345" + } + ], + "mediaType": "image", + "confidence": 0.95, + "detected": true +} +``` + +#### **Error Response** +```json +{ + "error": "Invalid API Key", + "status": 401 +} +``` + +--- + +## **3. Examples in Multiple Languages** + +### **Python** +```python +import requests + +url = "https://createnow.xyz/api/v1/generate" +headers = { + "Authorization": "Bearer YOUR_API_KEY", + "Content-Type": "application/json" +} + +payload = { + "prompt": "a futuristic cityscape at night", + "type": "image", + "count": 2 +} + +response = requests.post(url, json=payload, headers=headers) +print(response.json()) +``` + +### **Node.js** +```javascript +const axios = require('axios'); + +const url = "https://createnow.xyz/api/v1/generate"; +const headers = { + Authorization: "Bearer YOUR_API_KEY", + "Content-Type": "application/json" +}; + +const payload = { + prompt: "a futuristic cityscape at night", + type: "image", + count: 2 +}; + +axios.post(url, payload, { headers }) + .then(response => { + console.log(response.data); + }) + .catch(error => { + console.error(error.response.data); + }); +``` + +### **cURL** +```bash +curl -X POST https://createnow.xyz/api/v1/generate \ +-H "Authorization: Bearer YOUR_API_KEY" \ +-H "Content-Type: application/json" \ +-d '{ + "prompt": "a futuristic cityscape at night", + "type": "image", + "count": 2 +}' +``` + +### **Java** +```java +import java.net.HttpURLConnection; +import java.net.URL; +import java.io.OutputStream; + +public class CreateNowAPI { + public static void main(String[] args) throws Exception { + URL url = new URL("https://createnow.xyz/api/v1/generate"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("POST"); + conn.setRequestProperty("Authorization", "Bearer YOUR_API_KEY"); + conn.setRequestProperty("Content-Type", "application/json"); + conn.setDoOutput(true); + + String jsonPayload = "{" + + "\"prompt\": \"a futuristic cityscape at night\", " + + "\"type\": \"image\", " + + "\"count\": 2}"; + + OutputStream os = conn.getOutputStream(); + os.write(jsonPayload.getBytes()); + os.flush(); + + int responseCode = conn.getResponseCode(); + System.out.println("Response Code: " + responseCode); + } +} +``` + +--- + +## **4. Error Codes** +| Status Code | Meaning | Possible Causes | +|-------------|----------------------------------|----------------------------------------| +| 400 | Bad Request | Invalid parameters or payload. | +| 401 | Unauthorized | Invalid or missing API key. | +| 402 | Payment Required | Insufficient credits for the request. | +| 500 | Internal Server Error | Issue on the server side. | + +--- + +## **5. Notes and Limitations** +- **Maximum Prompt Length:** 1000 characters. +- **Maximum Outputs per Request:** 4. +- **Supported Media Types:** `image`, `music`, `video`, `speech`. +- **Content Shareability:** Every output includes a unique creation ID and shareable URL. +- **Auto-Detection:** Uses advanced natural language processing to determine the most appropriate media type. + +--- + +For further support or questions, please contact our support team at [support@createnow.xyz](mailto:support@createnow.xyz). + diff --git a/docs/swarms_cloud/launch.md b/docs/swarms_cloud/launch.md new file mode 100644 index 00000000..2d53e47f --- /dev/null +++ b/docs/swarms_cloud/launch.md @@ -0,0 +1,369 @@ +# Swarms Cloud API Client Documentation + +## Overview +The Swarms Cloud API Client is a production-grade Python library for interacting with the Swarms Cloud Agent API. It provides a comprehensive interface for managing, executing, and monitoring cloud-based agents. + +## Installation +```bash +pip install swarms-cloud +``` + +## Quick Start +```python +from swarms_cloud import SwarmCloudAPI, AgentCreate + +# Initialize the client +client = SwarmCloudAPI( + base_url="https://swarmcloud-285321057562.us-central1.run.app", + api_key="your_api_key_here" +) + +# Create an agent +agent_data = AgentCreate( + name="TranslateAgent", + description="Translates text between languages", + code=""" + def main(request, store): + text = request.payload.get('text', '') + return f'Translated: {text}' + """, + requirements="requests==2.25.1", + envs="DEBUG=True" +) + +new_agent = client.create_agent(agent_data) +print(f"Created agent with ID: {new_agent.id}") +``` + +## Client Configuration + +### Constructor Parameters + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|----------|-------------| +| base_url | str | No | https://swarmcloud-285321057562.us-central1.run.app | The base URL of the SwarmCloud API | +| api_key | str | Yes | None | Your SwarmCloud API key | +| timeout | float | No | 10.0 | Request timeout in seconds | + +## Data Models + +### AgentCreate +Model for creating new agents. + +| Field | Type | Required | Default | Description | +|-------|------|----------|----------|-------------| +| name | str | Yes | - | Name of the agent | +| description | str | No | None | Description of the agent's purpose | +| code | str | Yes | - | Python code that defines the agent's behavior | +| requirements | str | No | None | Python package requirements (pip format) | +| envs | str | No | None | Environment variables for the agent | +| autoscaling | bool | No | False | Enable/disable concurrent execution scaling | + +### AgentUpdate +Model for updating existing agents. + +| Field | Type | Required | Default | Description | +|-------|------|----------|----------|-------------| +| name | str | No | None | Updated name of the agent | +| description | str | No | None | Updated description | +| code | str | No | None | Updated Python code | +| requirements | str | No | None | Updated package requirements | +| autoscaling | bool | No | None | Updated autoscaling setting | + +## API Methods + +### List Agents +Retrieve all available agents. + +```python +agents = client.list_agents() +for agent in agents: + print(f"Agent: {agent.name} (ID: {agent.id})") +``` + +**Returns**: List[AgentOut] + +### Create Agent +Create a new agent with the specified configuration. + +```python +agent_data = AgentCreate( + name="DataProcessor", + description="Processes incoming data streams", + code=""" + def main(request, store): + data = request.payload.get('data', []) + return {'processed': len(data)} + """, + requirements="pandas==1.4.0\nnumpy==1.21.0", + envs="PROCESSING_MODE=fast", + autoscaling=True +) + +new_agent = client.create_agent(agent_data) +``` + +**Returns**: AgentOut + +### Get Agent +Retrieve details of a specific agent. + +```python +agent = client.get_agent("agent_id_here") +print(f"Agent details: {agent}") +``` + +**Parameters**: +- agent_id (str): The unique identifier of the agent + +**Returns**: AgentOut + +### Update Agent +Update an existing agent's configuration. + +```python +update_data = AgentUpdate( + name="UpdatedProcessor", + description="Enhanced data processing capabilities", + code="def main(request, store):\n return {'status': 'updated'}" +) + +updated_agent = client.update_agent("agent_id_here", update_data) +``` + +**Parameters**: +- agent_id (str): The unique identifier of the agent +- update (AgentUpdate): The update data + +**Returns**: AgentOut + +### Execute Agent +Manually execute an agent with optional payload data. + +```python +# Execute with payload +result = client.execute_agent( + "agent_id_here", + payload={"text": "Hello, World!"} +) + +# Execute without payload +result = client.execute_agent("agent_id_here") +``` + +**Parameters**: +- agent_id (str): The unique identifier of the agent +- payload (Optional[Dict[str, Any]]): Execution payload data + +**Returns**: Dict[str, Any] + +### Get Agent History +Retrieve the execution history and logs for an agent. + +```python +history = client.get_agent_history("agent_id_here") +for execution in history.executions: + print(f"[{execution.timestamp}] {execution.log}") +``` + +**Parameters**: +- agent_id (str): The unique identifier of the agent + +**Returns**: AgentExecutionHistory + +### Batch Execute Agents +Execute multiple agents simultaneously with the same payload. + +```python +# Get list of agents +agents = client.list_agents() + +# Execute batch with payload +results = client.batch_execute_agents( + agents=agents[:3], # Execute first three agents + payload={"data": "test"} +) + +print(f"Batch execution results: {results}") +``` + +**Parameters**: +- agents (List[AgentOut]): List of agents to execute +- payload (Optional[Dict[str, Any]]): Shared execution payload + +**Returns**: List[Any] + +### Health Check +Check the API's health status. + +```python +status = client.health() +print(f"API Status: {status}") +``` + +**Returns**: Dict[str, Any] + +## Error Handling +The client uses exception handling to manage various error scenarios: + +```python +from swarms_cloud import SwarmCloudAPI +import httpx + +try: + client = SwarmCloudAPI(api_key="your_api_key_here") + agents = client.list_agents() +except httpx.HTTPError as http_err: + print(f"HTTP error occurred: {http_err}") +except Exception as err: + print(f"An unexpected error occurred: {err}") +finally: + client.close() +``` + +## Context Manager Support +The client can be used with Python's context manager: + +```python +with SwarmCloudAPI(api_key="your_api_key_here") as client: + status = client.health() + print(f"API Status: {status}") + # Client automatically closes after the with block +``` + +## Best Practices + +1. Always close the client when finished: +```python +client = SwarmCloudAPI(api_key="your_api_key_here") +try: + # Your code here +finally: + client.close() +``` + +2. Use context managers for automatic cleanup: +```python +with SwarmCloudAPI(api_key="your_api_key_here") as client: + # Your code here +``` + +3. Handle errors appropriately: +```python +try: + result = client.execute_agent("agent_id", payload={"data": "test"}) +except httpx.HTTPError as e: + logger.error(f"HTTP error: {e}") + # Handle error appropriately +``` + +4. Set appropriate timeouts for your use case: +```python +client = SwarmCloudAPI( + api_key="your_api_key_here", + timeout=30.0 # Longer timeout for complex operations +) +``` + +## Complete Example +Here's a complete example showcasing various features of the client: + +```python +from swarms_cloud import SwarmCloudAPI, AgentCreate, AgentUpdate +import httpx + +def main(): + with SwarmCloudAPI(api_key="your_api_key_here") as client: + # Create an agent + agent_data = AgentCreate( + name="DataAnalyzer", + description="Analyzes incoming data streams", + code=""" + def main(request, store): + data = request.payload.get('data', []) + return { + 'count': len(data), + 'summary': 'Data processed successfully' + } + """, + requirements="pandas==1.4.0", + autoscaling=True + ) + + try: + # Create the agent + new_agent = client.create_agent(agent_data) + print(f"Created agent: {new_agent.name} (ID: {new_agent.id})") + + # Execute the agent + result = client.execute_agent( + new_agent.id, + payload={"data": [1, 2, 3, 4, 5]} + ) + print(f"Execution result: {result}") + + # Update the agent + update_data = AgentUpdate( + description="Enhanced data analysis capabilities" + ) + updated_agent = client.update_agent(new_agent.id, update_data) + print(f"Updated agent: {updated_agent.name}") + + # Get execution history + history = client.get_agent_history(new_agent.id) + print(f"Execution history: {history}") + + except httpx.HTTPError as e: + print(f"HTTP error occurred: {e}") + except Exception as e: + print(f"Unexpected error: {e}") + +if __name__ == "__main__": + main() +``` + +## Logging +The client uses the `loguru` library for logging. You can configure the logging level and format: + +```python +from loguru import logger + +# Configure logging +logger.add("swarmcloud.log", rotation="500 MB") + +client = SwarmCloudAPI(api_key="your_api_key_here") +``` + +## Performance Considerations + +1. **Connection Reuse**: The client reuses HTTP connections by default, improving performance for multiple requests. + +2. **Timeout Configuration**: Set appropriate timeouts based on your use case: +```python +client = SwarmCloudAPI( + api_key="your_api_key_here", + timeout=5.0 # Shorter timeout for time-sensitive operations +) +``` + +3. **Batch Operations**: Use batch_execute_agents for multiple agent executions: +```python +results = client.batch_execute_agents( + agents=agents, + payload=shared_payload +) +``` + +## Rate Limiting +The client respects API rate limits but does not implement retry logic. Implement your own retry mechanism if needed: + +```python +from tenacity import retry, stop_after_attempt, wait_exponential + +@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) +def execute_with_retry(client, agent_id, payload): + return client.execute_agent(agent_id, payload) +``` + +## Thread Safety +The client is not thread-safe by default. For concurrent usage, create separate client instances for each thread or implement appropriate synchronization mechanisms. \ No newline at end of file diff --git a/docs/swarms_memory/index.md b/docs/swarms_memory/index.md index 3d96b4ef..3b4011b0 100644 --- a/docs/swarms_memory/index.md +++ b/docs/swarms_memory/index.md @@ -161,7 +161,7 @@ print(result) We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments. - **🐦 Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform) -- **📢 Discord**: [Join the Agora Discord](https://discord.gg/agora) +- **📢 Discord**: [Join the Agora Discord](https://discord.gg/jM3Z6M9uMq) - **Swarms Platform**: [Visit our website](https://swarms.ai) - **📙 Documentation**: [Read the Docs](https://docs.swarms.ai) diff --git a/docs/swarms_platform/account_management.md b/docs/swarms_platform/account_management.md new file mode 100644 index 00000000..7ef05f18 --- /dev/null +++ b/docs/swarms_platform/account_management.md @@ -0,0 +1,189 @@ +# Swarms Platform Account Management Documentation + +This guide provides comprehensive, production-grade documentation for managing your account on the Swarms Platform. It covers account settings, profile management, billing, payment methods, subscription details, and cryptocurrency wallet management. Use this documentation to navigate the account management interface, understand available options, and perform account-related operations efficiently and securely. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Accessing the Account Management Page](#accessing-the-account-management-page) +3. [Account Settings](#account-settings) + - [Theme Mode](#theme-mode) +4. [Profile Management](#profile-management) + - [Profile Information](#profile-information) + - [Password Management](#password-management) +5. [Billing and Payment Methods](#billing-and-payment-methods) + - [Subscription Status](#subscription-status) + - [Payment Methods](#payment-methods) +6. [Cryptocurrency Wallet](#cryptocurrency-wallet) + - [Wallet Overview](#wallet-overview) + - [Exchange and Transaction History](#exchange-and-transaction-history) +7. [Additional Resources](#additional-resources) + +--- + +## Overview + +The Swarms Platform account management page, available at [https://swarms.world/platform/account](https://swarms.world/platform/account), allows you to configure and update your account settings and preferences. From here, you can manage the appearance of the platform, view and update profile details, manage your billing information and subscriptions, and handle your cryptocurrency wallet operations. + +--- + +## Accessing the Account Management Page + +To access your account management dashboard: +1. Log in to your Swarms Platform account. +2. Navigate to [https://swarms.world/platform/account](https://swarms.world/platform/account). + +Once on this page, you will see several sections dedicated to different aspects of your account: + +- **Account Settings:** Customize the platform appearance and user interface. +- **Profile:** View and manage personal details. +- **Billing:** Review credits, invoices, and manage your payment methods. +- **Crypto:** Manage your cryptocurrency wallet and transactions. + +--- + +## Account Settings + +This section allows you to modify your personal account preferences, including the visual theme. + +### Theme Mode + +You can choose between different theme options to tailor your user experience: + +- **Single Theme:** + A fixed theme, independent of system settings. + - **Example:** + - **Logo:** Swarms logo + - **Terminal Command:** + ```bash + pip3 install -U swarms + ``` + - **Theme Options:** + - **light** + - **dark (default)** + +- **Sync with System Theme:** + Automatically adjusts the platform theme to match your system's theme settings. + +Select the theme mode that best fits your workflow. Changes are applied immediately across the platform. + +--- + +## Profile Management + +### Profile Information + +The Profile section allows you to view and update your personal details: + +- **View Details:** + Your current profile information is displayed, including contact details, username, and any additional settings. + +- **Manage Profile:** + Options to update your information, ensuring your account details remain current. + +### Password Management + +For security purposes, it is important to regularly update your password: + +- **Change Password:** + Select the **"Change password"** option to update your login credentials. + Ensure you choose a strong password and keep it confidential. + +--- + +## Billing and Payment Methods + +The Billing section helps you manage financial aspects of your account, including credits, invoices, and subscriptions. + +### Subscription Status + +Your subscription details are clearly displayed: + +- **Current Plan:** + Options include **Free**, **Premium**, or **Enterprise**. + +- **Status:** + The active subscription status is indicated (e.g., "Active"). + +- **Customer Portal:** + An option to open the customer portal for additional billing and subscription management. + +### Payment Methods + +Manage your payment methods and review your billing details: + +- **Manage Cards:** + View existing payment methods. + **Example Entry:** + - **Card Type:** mastercard + - **Last 4 Digits:** ending in 9491 + - **Expiry Date:** 2030/2 + +- **Add Card:** + Use the **"Add Card"** option to register a new payment method securely. + +### Credit System + +Details of the credits available for your account: + +- **Credits Available:** + Displays the current credit balance (e.g., `$20.00`). + +- **Charge:** + Option to apply charges against your available credits. + +- **Invoice:** + Review or download your invoices. + +--- + +## Cryptocurrency Wallet + +The Crypto section provides management tools for your cryptocurrency wallet and associated transactions. + +### Wallet Overview + +- **Connected Wallet:** + Displays your linked wallet information. + - **Example:** + - **Wallet Identifier:** A truncated wallet ID (e.g., `EmVa...79Vb`) + +- **$swarms Balance and Price:** + - **Balance:** + Displays your current $swarms balance (e.g., `0.00`). + - **Price:** + Current market price for $swarms (e.g., `$0.0400`). + +### Exchange and Transaction History + +- **Exchange Functionality:** + Option to exchange $swarms tokens for credits directly through the platform. + +- **Transaction History:** + View a detailed log of wallet transactions, ensuring full transparency over all exchanges and wallet activity. + +--- + +## Additional Resources + +For further assistance or to learn more about managing your account on the Swarms Platform, refer to the following resources: + +- [Help Center](https://swarms.world/help) +- [Customer Support](https://cal.com/swarms) +- [API Documentation](https://swarms.world/platform/api-keys) (for developers) + +--- + +## Best Practices + +- **Regular Updates:** + Periodically review your account settings, profile, and payment methods to ensure they are up-to-date. + +- **Security Measures:** + Always use strong, unique passwords and consider enabling two-factor authentication if available. + +- **Monitor Transactions:** + Regularly check your billing and wallet transaction history to detect any unauthorized activities promptly. + diff --git a/docs/swarms_platform/apikeys.md b/docs/swarms_platform/apikeys.md new file mode 100644 index 00000000..b34d4dbf --- /dev/null +++ b/docs/swarms_platform/apikeys.md @@ -0,0 +1,87 @@ +# Swarms Platform API Keys Documentation + +This document provides detailed information on managing API keys within the Swarms Platform. API keys grant programmatic access to your account and should be handled securely. Follow the guidelines below to manage your API keys safely and effectively. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Viewing Your API Keys](#viewing-your-api-keys) +3. [Creating a New API Key](#creating-a-new-api-key) +4. [Security Guidelines](#security-guidelines) +5. [Frequently Asked Questions](#frequently-asked-questions) + +--- + +## Overview + +API keys are unique credentials that allow you to interact with the Swarms Platform programmatically. These keys enable you to make authenticated API requests to access or modify your data. **Important:** Once a secret API key is generated, it will not be displayed again. Ensure you store it securely, as it cannot be retrieved from the platform later. + +--- + +## Viewing Your API Keys + +When you navigate to the API Keys page ([https://swarms.world/platform/api-keys](https://swarms.world/platform/api-keys)), you will see a list of your API keys along with the following information: + +### Key Details: + +- **Name:** A label you assign to your API key to help you identify it. +- **Key:** The secret API key is only partially visible here for security reasons. +- **Created Date:** The date when the API key was generated. +- **Actions:** Options available for managing the key (e.g., deleting an API key). + +--- + +## Creating a New API Key + +To generate a new API key, follow these steps: + +1. **Attach a Credit Card:** + Before creating a new API key, ensure that your account has a credit card attached. This is required for authentication and billing purposes. + +2. **Access the API Keys Page:** + Navigate to [https://swarms.world/platform/api-keys](https://swarms.world/platform/api-keys). + +3. **Generate a New Key:** + Click on the **"Create new API key"** button. The system will generate a new secret API key for your account. + +4. **Store Your API Key Securely:** + Once generated, the full API key will be displayed only once. Copy and store it in a secure location, as it will not be displayed again. + **Note:** Do not share your API key with anyone or expose it in any client-side code (e.g., browser JavaScript). + +--- + +## Security Guidelines + +- **Confidentiality:** + Your API keys are sensitive credentials. Do not share them with anyone or include them in public repositories or client-side code. + +- **Storage:** + Store your API keys in secure, encrypted storage. Avoid saving them in plain text files or unsecured locations. + +- **Rotation:** + If you suspect that your API key has been compromised, immediately delete it and create a new one. + +- **Access Control:** + Limit access to your API keys to only those systems and personnel who absolutely require it. + +--- + +## Frequently Asked Questions + +### Q1: **Why do I need a credit card attached to my account to create an API key?** +**A:** The requirement to attach a credit card helps verify your identity and manage billing, ensuring responsible usage of the API services provided by the Swarms Platform. + +### Q2: **What happens if I lose my API key?** +**A:** If you lose your API key, you will need to generate a new one. The platform does not store the full key after its initial generation, so recovery is not possible. + +### Q3: **How can I delete an API key?** +**A:** On the API Keys page, locate the key you wish to delete and click the **"Delete"** action next to it. This will revoke the key's access immediately. + +### Q4: **Can I have multiple API keys?** +**A:** Yes, you can generate and manage multiple API keys. Use naming conventions to keep track of their usage and purpose. + +--- + +For any further questions or issues regarding API key management, please refer to our [Help Center](https://swarms.world/help) or contact our support team. \ No newline at end of file diff --git a/docs/swarms_platform/index.md b/docs/swarms_platform/index.md index 4347c639..2ece299a 100644 --- a/docs/swarms_platform/index.md +++ b/docs/swarms_platform/index.md @@ -113,9 +113,9 @@ To further enhance your understanding and usage of the Swarms Platform, explore ### Links - [API Documentation](https://docs.swarms.world) -- [Community Forums](https://discord.com/servers/agora-999382051935506503) +- [Community Forums](https://discord.gg/swarms) - [Tutorials and Guides](https://docs.swarms.world)) -- [Support](https://discord.com/servers/agora-999382051935506503) +- [Support](https://discord.gg/swarms) ## Conclusion diff --git a/docs/swarms_tools/finance.md b/docs/swarms_tools/finance.md new file mode 100644 index 00000000..1cd1c498 --- /dev/null +++ b/docs/swarms_tools/finance.md @@ -0,0 +1,324 @@ +# Swarms Finance Tools Documentation + +## Installation + +```bash +pip3 install -U swarms-tools yfinance requests httpx pandas loguru backoff web3 solana spl-token +``` + +## Environment Variables + +Create a `.env` file in your project root with the following variables (as needed): + +| Environment Variable | Description | Required For | +|---------------------|-------------|--------------| +| `COINBASE_API_KEY` | Coinbase API Key | Coinbase Trading | +| `COINBASE_API_SECRET` | Coinbase API Secret | Coinbase Trading | +| `COINBASE_API_PASSPHRASE` | Coinbase API Passphrase | Coinbase Trading | +| `COINMARKETCAP_API_KEY` | CoinMarketCap API Key | CoinMarketCap Data | +| `HELIUS_API_KEY` | Helius API Key | Solana Data | +| `EODHD_API_KEY` | EODHD API Key | Stock News | +| `OKX_API_KEY` | OKX API Key | OKX Trading | +| `OKX_API_SECRET` | OKX API Secret | OKX Trading | +| `OKX_PASSPHRASE` | OKX Passphrase | OKX Trading | + +## Tools Overview + +| Tool | Description | Requires API Key | +|------|-------------|-----------------| +| Yahoo Finance | Real-time stock market data | No | +| CoinGecko | Cryptocurrency market data | No | +| Coinbase | Cryptocurrency trading and data | Yes | +| CoinMarketCap | Cryptocurrency market data | Yes | +| Helius | Solana blockchain data | Yes | +| DexScreener | DEX trading pairs and data | No | +| HTX (Huobi) | Cryptocurrency exchange data | No | +| OKX | Cryptocurrency exchange data | Yes | +| EODHD | Stock market news | Yes | +| Jupiter | Solana DEX aggregator | No | +| Sector Analysis | GICS sector ETF analysis | No | +| Solana Tools | Solana wallet and token tools | Yes | + +## Detailed Documentation + +### Yahoo Finance API + +Fetch real-time and historical stock market data. + +```python +from swarms_tools.finance import yahoo_finance_api + +# Fetch data for single stock +data = yahoo_finance_api(["AAPL"]) + +# Fetch data for multiple stocks +data = yahoo_finance_api(["AAPL", "GOOG", "MSFT"]) +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| stock_symbols | List[str] | List of stock symbols | Yes | + +### CoinGecko API + +Fetch comprehensive cryptocurrency data. + +```python +from swarms_tools.finance import coin_gecko_coin_api + +# Fetch Bitcoin data +data = coin_gecko_coin_api("bitcoin") +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| coin | str | Cryptocurrency ID (e.g., 'bitcoin') | Yes | + +### Coinbase Trading + +Execute trades and fetch market data from Coinbase. + +```python +from swarms_tools.finance import get_coin_data, place_buy_order, place_sell_order + +# Fetch coin data +data = get_coin_data("BTC-USD") + +# Place orders +buy_order = place_buy_order("BTC-USD", amount=100) # Buy $100 worth of BTC +sell_order = place_sell_order("BTC-USD", amount=0.01) # Sell 0.01 BTC +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| symbol | str | Trading pair (e.g., 'BTC-USD') | Yes | +| amount | Union[str, float, Decimal] | Trade amount | Yes | +| sandbox | bool | Use sandbox environment | No | + +### CoinMarketCap API + +Fetch cryptocurrency market data from CoinMarketCap. + +```python +from swarms_tools.finance import coinmarketcap_api + +# Fetch single coin data +data = coinmarketcap_api(["Bitcoin"]) + +# Fetch multiple coins +data = coinmarketcap_api(["Bitcoin", "Ethereum", "Tether"]) +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| coin_names | Optional[List[str]] | List of coin names | No | + +### Helius API (Solana) + +Fetch Solana blockchain data. + +```python +from swarms_tools.finance import helius_api_tool + +# Fetch account data +account_data = helius_api_tool("account", "account_address") + +# Fetch transaction data +tx_data = helius_api_tool("transaction", "tx_signature") + +# Fetch token data +token_data = helius_api_tool("token", "token_mint_address") +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| action | str | Type of action ('account', 'transaction', 'token') | Yes | +| identifier | str | Address/signature to query | Yes | + +### DexScreener API + +Fetch DEX trading pair data. + +```python +from swarms_tools.finance import ( + fetch_dex_screener_profiles, + fetch_latest_token_boosts, + fetch_solana_token_pairs +) + +# Fetch latest profiles +profiles = fetch_dex_screener_profiles() + +# Fetch token boosts +boosts = fetch_latest_token_boosts() + +# Fetch Solana pairs +pairs = fetch_solana_token_pairs(["token_address"]) +``` + +### HTX (Huobi) API + +Fetch cryptocurrency data from HTX. + +```python +from swarms_tools.finance import fetch_htx_data + +# Fetch coin data +data = fetch_htx_data("BTC") +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| coin_name | str | Cryptocurrency symbol | Yes | + +### OKX API + +Fetch cryptocurrency data from OKX. + +```python +from swarms_tools.finance import okx_api_tool + +# Fetch single coin +data = okx_api_tool(["BTC-USDT"]) + +# Fetch multiple coins +data = okx_api_tool(["BTC-USDT", "ETH-USDT"]) +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| coin_symbols | Optional[List[str]] | List of trading pairs | No | + +### EODHD Stock News + +Fetch stock market news. + +```python +from swarms_tools.finance import fetch_stock_news + +# Fetch news for a stock +news = fetch_stock_news("AAPL") +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| stock_name | str | Stock symbol | Yes | + +### Jupiter (Solana DEX) + +Fetch Solana DEX prices. + +```python +from swarms_tools.finance import get_jupiter_price + +# Fetch price data +price = get_jupiter_price(input_mint="input_token", output_mint="output_token") +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| input_mint | str | Input token mint address | Yes | +| output_mint | str | Output token mint address | Yes | + +### Sector Analysis + +Analyze GICS sector ETFs. + +```python +from swarms_tools.finance.sector_analysis import analyze_index_sectors + +# Run sector analysis +analyze_index_sectors() +``` + +### Solana Tools + +Check Solana wallet balances and manage tokens. + +```python +from swarms_tools.finance import check_solana_balance, check_multiple_wallets + +# Check single wallet +balance = check_solana_balance("wallet_address") + +# Check multiple wallets +balances = check_multiple_wallets(["wallet1", "wallet2"]) +``` + +**Arguments:** + +| Parameter | Type | Description | Required | +|-----------|------|-------------|----------| +| wallet_address | str | Solana wallet address | Yes | +| wallet_addresses | List[str] | List of wallet addresses | Yes | + +## Complete Example + +Here's a comprehensive example using multiple tools: + +```python +from swarms_tools.finance import ( + yahoo_finance_api, + coin_gecko_coin_api, + coinmarketcap_api, + fetch_htx_data +) + +# Fetch stock data +stocks = yahoo_finance_api(["AAPL", "GOOG"]) +print("Stock Data:", stocks) + +# Fetch crypto data from multiple sources +bitcoin_cg = coin_gecko_coin_api("bitcoin") +print("Bitcoin Data (CoinGecko):", bitcoin_cg) + +crypto_cmc = coinmarketcap_api(["Bitcoin", "Ethereum"]) +print("Crypto Data (CoinMarketCap):", crypto_cmc) + +btc_htx = fetch_htx_data("BTC") +print("Bitcoin Data (HTX):", btc_htx) +``` + +## Error Handling + +All tools include proper error handling and logging. Errors are logged using the `loguru` logger. Example error handling: + +```python +from loguru import logger + +try: + data = yahoo_finance_api(["INVALID"]) +except Exception as e: + logger.error(f"Error fetching stock data: {e}") +``` + +## Rate Limits + +Please be aware of rate limits for various APIs: +- CoinGecko: 50 calls/minute (free tier) +- CoinMarketCap: Varies by subscription +- Helius: Varies by subscription +- DexScreener: 300 calls/minute for pairs, 60 calls/minute for profiles +- Other APIs: Refer to respective documentation + +## Dependencies + +The package automatically handles most dependencies, but you may need to install some manually: diff --git a/docs/swarms_tools/overview.md b/docs/swarms_tools/overview.md new file mode 100644 index 00000000..ad896fa9 --- /dev/null +++ b/docs/swarms_tools/overview.md @@ -0,0 +1,239 @@ +# Swarms Tools + + +Welcome to **Swarms Tools**, the ultimate package for integrating **cutting-edge APIs** into Python functions with seamless multi-agent system compatibility. Designed for enterprises at the forefront of innovation, **Swarms Tools** is your key to simplifying complexity and unlocking operational excellence. + +--- + +## 🚀 Features + +- **Unified API Integration**: Ready-to-use Python functions for financial data, social media, IoT, and more. +- **Enterprise-Grade Design**: Comprehensive type hints, structured outputs, and robust documentation. +- **Agent-Ready Framework**: Optimized for seamless integration into Swarms' multi-agent orchestration systems. +- **Expandable Architecture**: Easily extend functionality with a standardized schema for new tools. + +--- + +## 🔧 Installation + +```bash +pip3 install -U swarms-tools +``` + +--- + +## 📂 Directory Structure + +```plaintext +swarms-tools/ +├── swarms_tools/ +│ ├── finance/ +│ │ ├── htx_tool.py +│ │ ├── eodh_api.py +│ │ └── coingecko_tool.py +│ ├── social_media/ +│ │ ├── telegram_tool.py +│ ├── utilities/ +│ │ └── logging.py +├── tests/ +│ ├── test_financial_data.py +│ └── test_social_media.py +└── README.md +``` + +--- + +## 💼 Use Cases + + + +## Finance + +Explore our diverse range of financial tools, designed to streamline your operations. If you need a tool not listed, feel free to submit an issue or accelerate integration by contributing a pull request with your tool of choice. + +| **Tool Name** | **Function** | **Description** | +|---------------------------|--------------------------|---------------------------------------------------------------------------------| +| `fetch_stock_news` | `fetch_stock_news` | Fetches the latest stock news and updates. | +| `fetch_htx_data` | `fetch_htx_data` | Retrieves financial data from the HTX platform. | +| `yahoo_finance_api` | `yahoo_finance_api` | Fetches comprehensive stock data from Yahoo Finance, including prices and trends. | +| `coin_gecko_coin_api` | `coin_gecko_coin_api` | Fetches cryptocurrency data from CoinGecko, including market and price information. | +| `helius_api_tool` | `helius_api_tool` | Retrieves blockchain account, transaction, or token data using the Helius API. | +| `okx_api_tool` | `okx_api_tool` | Fetches detailed cryptocurrency data for coins from the OKX exchange. | + + +### Financial Data Retrieval +Enable precise and actionable financial insights: + +#### Example 1: Fetch Historical Data +```python +from swarms_tools import fetch_htx_data + +# Fetch historical trading data for "Swarms Corporation" +response = fetch_htx_data("swarms") +print(response) +``` + +#### Example 2: Stock News Analysis +```python +from swarms_tools import fetch_stock_news + +# Retrieve latest stock news for Apple +news = fetch_stock_news("AAPL") +print(news) +``` + +#### Example 3: Cryptocurrency Metrics +```python +from swarms_tools import coin_gecko_coin_api + +# Fetch live data for Bitcoin +crypto_data = coin_gecko_coin_api("bitcoin") +print(crypto_data) +``` + +### Social Media Automation +Streamline communication and engagement: + +#### Example: Telegram Bot Messaging +```python +from swarms_tools import telegram_dm_or_tag_api + +def send_alert(response: str): + telegram_dm_or_tag_api(response) + +# Send a message to a user or group +send_alert("Mission-critical update from Swarms.") +``` + +--- + +## Dex Screener + +This is a tool that allows you to fetch data from the Dex Screener API. It supports multiple chains and multiple tokens. + +```python +from swarms_tools.finance.dex_screener import ( + fetch_latest_token_boosts, + fetch_dex_screener_profiles, +) + + +fetch_dex_screener_profiles() +fetch_latest_token_boosts() + +``` + +--- + + +## Structs +The tool chainer enables the execution of multiple tools in a sequence, allowing for the aggregation of their results in either a parallel or sequential manner. + +```python +# Example usage +from loguru import logger + +from swarms_tools.structs import tool_chainer + + +if __name__ == "__main__": + logger.add("tool_chainer.log", rotation="500 MB", level="INFO") + + # Example tools + def tool1(): + return "Tool1 Result" + + def tool2(): + return "Tool2 Result" + + # def tool3(): + # raise ValueError("Simulated error in Tool3") + + tools = [tool1, tool2] + + # Parallel execution + parallel_results = tool_chainer(tools, parallel=True) + print("Parallel Results:", parallel_results) + + # Sequential execution + # sequential_results = tool_chainer(tools, parallel=False) + # print("Sequential Results:", sequential_results) + +``` +--- + +## 🧩 Standardized Schema + +Every tool in **Swarms Tools** adheres to a strict schema for maintainability and interoperability: + +### Schema Template + +1. **Functionality**: + - Encapsulate API logic into a modular, reusable function. + +2. **Typing**: + - Leverage Python type hints for input validation and clarity. + + Example: + ```python + def fetch_data(symbol: str, date_range: str) -> str: + """ + Fetch financial data for a given symbol and date range. + + Args: + symbol (str): Ticker symbol of the asset. + date_range (str): Timeframe for the data (e.g., '1d', '1m', '1y'). + + Returns: + dict: A dictionary containing financial metrics. + """ + pass + ``` + +3. **Documentation**: + - Include detailed docstrings with parameter explanations and usage examples. + +4. **Output Standardization**: + - Ensure consistent outputs (e.g., strings) for easy downstream agent integration. + +5. **API-Key Management**: + - All API keys must be fetched with `os.getenv("YOUR_KEY")` + + +--- + +## 📖 Documentation + +Comprehensive documentation is available to guide developers and enterprises. Visit our [official docs](https://docs.swarms.world) for detailed API references, usage examples, and best practices. + +--- + +## 🛠 Contributing + +We welcome contributions from the global developer community. To contribute: + +1. **Fork the Repository**: Start by forking the repository. +2. **Create a Feature Branch**: Use a descriptive branch name: `feature/add-new-tool`. +3. **Commit Your Changes**: Write meaningful commit messages. +4. **Submit a Pull Request**: Open a pull request for review. + +--- + +## 🛡️ License + +This project is licensed under the **MIT License**. See the [LICENSE](LICENSE) file for details. + +--- + +## 🌠 Join the Future + +Explore the limitless possibilities of agent-based systems. Together, we can build a smarter, faster, and more interconnected world. + +**Visit us:** [Swarms Corporation](https://swarms.ai) +**Follow us:** [Twitter](https://twitter.com/swarms_corp) + +--- + +**"The future belongs to those who dare to automate it."** +**— The Swarms Corporation** + diff --git a/docs/swarms_tools/search.md b/docs/swarms_tools/search.md new file mode 100644 index 00000000..330885d8 --- /dev/null +++ b/docs/swarms_tools/search.md @@ -0,0 +1,172 @@ +# Search Tools Documentation + +This documentation covers the search tools available in the `swarms-tools` package. + +## Installation + +```bash +pip3 install -U swarms-tools +``` + +## Environment Variables Required + +Create a `.env` file in your project root with the following API keys: + +```bash +# Bing Search API +BING_API_KEY=your_bing_api_key + +# Google Search API +GOOGLE_API_KEY=your_google_api_key +GOOGLE_CX=your_google_cx_id +GEMINI_API_KEY=your_gemini_api_key + +# Exa AI API +EXA_API_KEY=your_exa_api_key +``` + +## Tools Overview + +### 1. Bing Search Tool + +The Bing Search tool allows you to fetch web articles using the Bing Web Search API. + +#### Function: `fetch_web_articles_bing_api` + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| query | str | Yes | The search query to retrieve articles | + +#### Example Usage: + +```python +from swarms_tools.search import fetch_web_articles_bing_api + +# Fetch articles about AI +results = fetch_web_articles_bing_api("swarms ai github") +print(results) +``` + +### 2. Exa AI Search Tool + +The Exa AI tool is designed for searching research papers and academic content. + +#### Function: `search_exa_ai` + +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| query | str | Yes | "Latest developments in LLM capabilities" | Search query | +| num_results | int | No | 10 | Number of results to return | +| auto_prompt | bool | No | True | Whether to use auto-prompting | +| include_domains | List[str] | No | ["arxiv.org", "paperswithcode.com"] | Domains to include | +| exclude_domains | List[str] | No | [] | Domains to exclude | +| category | str | No | "research paper" | Category of search | + +#### Example Usage: + +```python +from swarms_tools.search import search_exa_ai + +# Search for research papers +results = search_exa_ai( + query="Latest developments in LLM capabilities", + num_results=5, + include_domains=["arxiv.org"] +) +print(results) +``` + +### 3. Google Search Tool + +A comprehensive search tool that uses Google Custom Search API and includes content extraction and summarization using Gemini. + +#### Class: `WebsiteChecker` + +| Method | Parameters | Description | +|--------|------------|-------------| +| search | query: str | Main search function that fetches, processes, and summarizes results | + +#### Example Usage: + +```python +from swarms_tools.search import WebsiteChecker + +# Initialize with an agent (required for summarization) +checker = WebsiteChecker(agent=your_agent_function) + +# Perform search +async def search_example(): + results = await checker.search("who won elections 2024 us") + print(results) + +# For synchronous usage +from swarms_tools.search import search + +results = search("who won elections 2024 us", agent=your_agent_function) +print(results) +``` + +## Features + +- **Bing Search**: Fetch and parse web articles with structured output +- **Exa AI**: Specialized academic and research paper search +- **Google Search**: + - Custom search with content extraction + - Concurrent URL processing + - Content summarization using Gemini + - Progress tracking + - Automatic retry mechanisms + - Results saved to JSON + +## Dependencies + +The tools automatically handle dependency installation, but here are the main requirements: + +```python +aiohttp +asyncio +beautifulsoup4 +google-generativeai +html2text +playwright +python-dotenv +rich +tenacity +``` + +## Error Handling + +All tools include robust error handling: +- Automatic retries for failed requests +- Timeout handling +- Rate limiting consideration +- Detailed error messages + +## Output Format + +Each tool provides structured output: + +- **Bing Search**: Returns formatted string with article details +- **Exa AI**: Returns JSON response with search results +- **Google Search**: Returns summarized content with sections: + - Key Findings + - Important Details + - Sources + +## Best Practices + +1. Always store API keys in environment variables +2. Use appropriate error handling +3. Consider rate limits of the APIs +4. Cache results when appropriate +5. Monitor API usage and costs + +## Limitations + +- Bing Search: Limited to 4 articles per query +- Exa AI: Focused on academic content +- Google Search: Requires Gemini API for summarization + +## Support + +For issues and feature requests, please visit the [GitHub repository](https://github.com/swarms-tools). \ No newline at end of file diff --git a/docs/swarms_tools/twitter.md b/docs/swarms_tools/twitter.md new file mode 100644 index 00000000..23ec8e27 --- /dev/null +++ b/docs/swarms_tools/twitter.md @@ -0,0 +1,325 @@ +# Twitter Tool Documentation + +## Overview +The Twitter Tool provides a convenient interface for interacting with Twitter's API through the swarms-tools package. This documentation covers the initialization process and available functions for posting, replying, liking, and quoting tweets, as well as retrieving metrics. + +## Installation +```bash +pip install swarms-tools +``` + +## Authentication +The Twitter Tool requires Twitter API credentials for authentication. These should be stored as environment variables: + +```python +TWITTER_ID=your_twitter_id +TWITTER_NAME=your_twitter_name +TWITTER_DESCRIPTION=your_twitter_description +TWITTER_API_KEY=your_api_key +TWITTER_API_SECRET_KEY=your_api_secret_key +TWITTER_ACCESS_TOKEN=your_access_token +TWITTER_ACCESS_TOKEN_SECRET=your_access_token_secret +``` + +## Initialization + +### TwitterTool Configuration Options + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| id | str | Yes | Unique identifier for the Twitter tool instance | +| name | str | Yes | Name of the Twitter tool instance | +| description | str | No | Description of the tool's purpose | +| credentials | dict | Yes | Dictionary containing Twitter API credentials | + +### Credentials Dictionary Structure + +| Key | Type | Required | Description | +|-----|------|----------|-------------| +| apiKey | str | Yes | Twitter API Key | +| apiSecretKey | str | Yes | Twitter API Secret Key | +| accessToken | str | Yes | Twitter Access Token | +| accessTokenSecret | str | Yes | Twitter Access Token Secret | + +## Available Functions + +### initialize_twitter_tool() + +Creates and returns a new instance of the TwitterTool. + +```python +def initialize_twitter_tool() -> TwitterTool: +``` + +Returns: +- TwitterTool: Initialized Twitter tool instance + +### post_tweet() + +Posts a new tweet to Twitter. + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| tweet | str | Yes | Text content of the tweet to post | + +Raises: +- tweepy.TweepyException: If tweet posting fails + +### reply_tweet() + +Replies to an existing tweet. + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| tweet_id | int | Yes | ID of the tweet to reply to | +| reply | str | Yes | Text content of the reply | + +Raises: +- tweepy.TweepyException: If reply posting fails + +### like_tweet() + +Likes a specified tweet. + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| tweet_id | int | Yes | ID of the tweet to like | + +Raises: +- tweepy.TweepyException: If liking the tweet fails + +### quote_tweet() + +Creates a quote tweet. + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| tweet_id | int | Yes | ID of the tweet to quote | +| quote | str | Yes | Text content to add to the quoted tweet | + +Raises: +- tweepy.TweepyException: If quote tweet creation fails + +### get_metrics() + +Retrieves Twitter metrics. + +Returns: +- Dict[str, int]: Dictionary containing various Twitter metrics + +Raises: +- tweepy.TweepyException: If metrics retrieval fails + +## Usage Examples + +### Basic Tweet Posting +```python +from swarms_tools.twitter import initialize_twitter_tool, post_tweet + +# Post a simple tweet +post_tweet("Hello, Twitter!") +``` + +### Interacting with Tweets +```python +# Reply to a tweet +reply_tweet(12345, "Great point!") + +# Like a tweet +like_tweet(12345) + +# Quote a tweet +quote_tweet(12345, "Adding my thoughts on this!") +``` + +### Retrieving Metrics +```python +metrics = get_metrics() +print(f"Current metrics: {metrics}") +``` + +## Error Handling +All functions include built-in error handling and will print error messages if operations fail. It's recommended to implement additional error handling in production environments: + +```python +try: + post_tweet("Hello, Twitter!") +except Exception as e: + logger.error(f"Tweet posting failed: {e}") + # Implement appropriate error handling +``` + + +## Production Example + +This is an example of how to use the TwitterTool in a production environment using Swarms. + +```python + +import os +from time import time + +from swarm_models import OpenAIChat +from swarms import Agent +from dotenv import load_dotenv + +from swarms_tools.social_media.twitter_tool import TwitterTool + +load_dotenv() + +model_name = "gpt-4o" + +model = OpenAIChat( + model_name=model_name, + max_tokens=3000, + openai_api_key=os.getenv("OPENAI_API_KEY"), +) + + +medical_coder = Agent( + agent_name="Medical Coder", + system_prompt=""" + You are a highly experienced and certified medical coder with extensive knowledge of ICD-10 coding guidelines, clinical documentation standards, and compliance regulations. Your responsibility is to ensure precise, compliant, and well-documented coding for all clinical cases. + + ### Primary Responsibilities: + 1. **Review Clinical Documentation**: Analyze all available clinical records, including specialist inputs, physician notes, lab results, imaging reports, and discharge summaries. + 2. **Assign Accurate ICD-10 Codes**: Identify and assign appropriate codes for primary diagnoses, secondary conditions, symptoms, and complications. + 3. **Ensure Coding Compliance**: Follow the latest ICD-10-CM/PCS coding guidelines, payer-specific requirements, and organizational policies. + 4. **Document Code Justification**: Provide clear, evidence-based rationale for each assigned code. + + ### Detailed Coding Process: + - **Review Specialist Inputs**: Examine all relevant documentation to capture the full scope of the patient's condition and care provided. + - **Identify Diagnoses**: Determine the primary and secondary diagnoses, as well as any symptoms or complications, based on the documentation. + - **Assign ICD-10 Codes**: Select the most accurate and specific ICD-10 codes for each identified diagnosis or condition. + - **Document Supporting Evidence**: Record the documentation source (e.g., lab report, imaging, or physician note) for each code to justify its assignment. + - **Address Queries**: Note and flag any inconsistencies, missing information, or areas requiring clarification from providers. + + ### Output Requirements: + Your response must be clear, structured, and compliant with professional standards. Use the following format: + + 1. **Primary Diagnosis Codes**: + - **ICD-10 Code**: [e.g., E11.9] + - **Description**: [e.g., Type 2 diabetes mellitus without complications] + - **Supporting Documentation**: [e.g., Physician's note dated MM/DD/YYYY] + + 2. **Secondary Diagnosis Codes**: + - **ICD-10 Code**: [Code] + - **Description**: [Description] + - **Order of Clinical Significance**: [Rank or priority] + + 3. **Symptom Codes**: + - **ICD-10 Code**: [Code] + - **Description**: [Description] + + 4. **Complication Codes**: + - **ICD-10 Code**: [Code] + - **Description**: [Description] + - **Relevant Documentation**: [Source of information] + + 5. **Coding Notes**: + - Observations, clarifications, or any potential issues requiring provider input. + + ### Additional Guidelines: + - Always prioritize specificity and compliance when assigning codes. + - For ambiguous cases, provide a brief note with reasoning and flag for clarification. + - Ensure the output format is clean, consistent, and ready for professional use. + """, + llm=model, + max_loops=1, + dynamic_temperature_enabled=True, +) + + +# Define your options with the necessary credentials +options = { + "id": "mcsswarm", + "name": "mcsswarm", + "description": "An example Twitter Plugin for testing.", + "credentials": { + "apiKey": os.getenv("TWITTER_API_KEY"), + "apiSecretKey": os.getenv("TWITTER_API_SECRET_KEY"), + "accessToken": os.getenv("TWITTER_ACCESS_TOKEN"), + "accessTokenSecret": os.getenv("TWITTER_ACCESS_TOKEN_SECRET"), + }, +} + +# Initialize the TwitterTool with your options +twitter_plugin = TwitterTool(options) + +# # Post a tweet +# post_tweet_fn = twitter_plugin.get_function('post_tweet') +# post_tweet_fn("Hello world!") + + +# Assuming `twitter_plugin` and `medical_coder` are already initialized +post_tweet = twitter_plugin.get_function("post_tweet") + +# Set to track posted tweets and avoid duplicates +posted_tweets = set() + + +def post_unique_tweet(): + """ + Generate and post a unique tweet. Skip duplicates. + """ + tweet_prompt = ( + "Share an intriguing, lesser-known fact about a medical disease, and include an innovative, fun, or surprising way to manage or cure it! " + "Make the response playful, engaging, and inspiring—something that makes people smile while learning. No markdown, just plain text!" + ) + + # Generate a new tweet text + tweet_text = medical_coder.run(tweet_prompt) + + # Check for duplicates + if tweet_text in posted_tweets: + print("Duplicate tweet detected. Skipping...") + return + + # Post the tweet + try: + post_tweet(tweet_text) + print(f"Posted tweet: {tweet_text}") + # Add the tweet to the set of posted tweets + posted_tweets.add(tweet_text) + except Exception as e: + print(f"Error posting tweet: {e}") + + +# Loop to post tweets every 10 seconds +def start_tweet_loop(interval=10): + """ + Continuously post tweets every `interval` seconds. + + Args: + interval (int): Time in seconds between tweets. + """ + print("Starting tweet loop...") + while True: + post_unique_tweet() + time.sleep(interval) + + +# Start the loop +start_tweet_loop(10) +``` + + +## Best Practices +1. Always store credentials in environment variables +2. Implement rate limiting in production environments +3. Add proper logging for all operations +4. Handle errors gracefully +5. Validate tweet content before posting +6. Monitor API usage limits + +## Rate Limits +Be aware of Twitter's API rate limits. Implement appropriate delays between requests in production environments to avoid hitting these limits. + +## Dependencies +- tweepy +- python-dotenv +- swarms-tools + +## Version Compatibility +- Python 3.7+ +- Latest version of swarms-tools package \ No newline at end of file diff --git a/example.py b/example.py index 075feb1c..b4002101 100644 --- a/example.py +++ b/example.py @@ -1,6 +1,3 @@ -import os - -from swarm_models import OpenAIChat from swarms import Agent from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, @@ -9,16 +6,6 @@ from dotenv import load_dotenv load_dotenv() -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) # Initialize the agent agent = Agent( @@ -26,7 +13,7 @@ agent = Agent( agent_description="Personal finance advisor agent", system_prompt=FINANCIAL_AGENT_SYS_PROMPT, max_loops=1, - llm=model, + model_name="gpt-4o", dynamic_temperature_enabled=True, user_name="swarms_corp", retry_attempts=3, @@ -41,5 +28,4 @@ agent = Agent( agent.run( "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", - all_cores=True, ) diff --git a/new_features_examples/agent_showcase_example.py b/examples/agent_showcase_example.py similarity index 100% rename from new_features_examples/agent_showcase_example.py rename to examples/agent_showcase_example.py diff --git a/new_features_examples/agent_with_fluidapi.py b/examples/agent_with_fluidapi.py similarity index 100% rename from new_features_examples/agent_with_fluidapi.py rename to examples/agent_with_fluidapi.py diff --git a/new_features_examples/async_agent.py b/examples/async_agent.py similarity index 100% rename from new_features_examples/async_agent.py rename to examples/async_agent.py diff --git a/new_features_examples/async_agents.py b/examples/async_agents.py similarity index 100% rename from new_features_examples/async_agents.py rename to examples/async_agents.py diff --git a/new_features_examples/async_executor.py b/examples/async_executor.py similarity index 100% rename from new_features_examples/async_executor.py rename to examples/async_executor.py diff --git a/new_features_examples/async_workflow_example.py b/examples/async_workflow_example.py similarity index 100% rename from new_features_examples/async_workflow_example.py rename to examples/async_workflow_example.py diff --git a/new_features_examples/auto_agent.py b/examples/auto_agent.py similarity index 100% rename from new_features_examples/auto_agent.py rename to examples/auto_agent.py diff --git a/new_features_examples/auto_swarm_router.py b/examples/auto_swarm_router.py similarity index 100% rename from new_features_examples/auto_swarm_router.py rename to examples/auto_swarm_router.py diff --git a/examples/chart_swarm.py b/examples/chart_swarm.py new file mode 100644 index 00000000..ad1025e1 --- /dev/null +++ b/examples/chart_swarm.py @@ -0,0 +1,388 @@ +import os +from dataclasses import dataclass +from typing import Tuple +from litellm import completion +from loguru import logger +from swarms import Agent + +EXTRACTION_PROMPT = """ +You are a specialized Chart2Table extraction agent that converts visual charts into precise textual descriptions. + +Output Format: +[Chart Type] +Type: {bar|line|pie|scatter|combination} +Title: {chart title} +X-Axis: {label and scale} +Y-Axis: {label and scale} + +[Data Series] +Name: {series name} +Values: {comma-separated list of values} +{repeat for each series} + +[Annotations] +- {list any markers, gridlines, legends} +- {note any data gaps or anomalies} + +Guidelines: +1. Maintain exact numerical precision +2. List ALL data points in order +3. Note any gaps, outliers or special patterns +4. Describe axes scales (linear/log) and units +5. Include legends and series names verbatim +6. Note any data point annotations or markers +7. Describe chart elements spatially (top-left, center, etc) +8. Include color and style information if relevant +9. Note relationships between multiple series +10. Flag any data quality or readability issues""" + +REFORMULATION_PROMPT = """You are an Answer Reformulation specialist that breaks down complex analytical statements into atomic, verifiable claims. + +Output Format: +[Core Claims] +1. {single fact with exact numbers} +2. {another atomic fact} +{continue for all core claims} + +[Supporting Context] +1. {relevant context that supports core claims} +2. {additional contextual information} +{continue for all context} + +[Assumptions] +1. {implicit assumption made} +2. {another assumption} +{continue for all assumptions} + +Guidelines: +1. Each claim must be independently verifiable +2. Use exact numbers, never round or approximate +3. Split compound statements into atomic facts +4. Make implicit comparisons explicit +5. Note temporal relationships clearly +6. Include units with all measurements +7. Flag any uncertainty or approximations +8. Note data source limitations +9. Preserve calculation steps +10. Maintain logical dependencies""" + +CAPTIONING_PROMPT = """You are an Entity Captioning specialist that generates rich contextual descriptions of chart elements. + +Output Format: +[Data Points] +{x,y}: {detailed description of point significance} +{continue for key points} + +[Trends] +- {description of overall pattern} +- {notable sub-patterns} +{continue for all trends} + +[Relationships] +- {correlation between variables} +- {causation if evident} +{continue for all relationships} + +[Context] +- {broader context for interpretation} +- {relevant external factors} +{continue for all context} + +Guidelines: +1. Describe both local and global patterns +2. Note statistical significance of changes +3. Identify cyclic or seasonal patterns +4. Flag outliers and anomalies +5. Compare relative magnitudes +6. Note rate of change patterns +7. Describe distribution characteristics +8. Highlight key inflection points +9. Note data clustering patterns +10. Include domain-specific insights""" + +PREFILTER_PROMPT = """You are a Pre-filtering specialist that identifies relevant chart elements for verification. + +Output Format: +[Critical Elements] +1. {element}: Score {0-10} + Evidence: {why this supports claims} +{continue for all relevant elements} + +[Supporting Elements] +1. {element}: Score {0-10} + Context: {how this adds context} +{continue for all supporting elements} + +[Relevance Chain] +1. {claim} -> {element} -> {evidence} +{continue for all connections} + +Guidelines: +1. Score relevance 0-10 with detailed rationale +2. Build explicit evidence chains +3. Note both direct and indirect support +4. Consider temporal relevance +5. Account for data relationships +6. Note confidence levels +7. Include contextual importance +8. Consider alternative interpretations +9. Note missing evidence +10. Explain filtering decisions""" + +RERANK_PROMPT = """You are a Re-ranking specialist that orders evidence by strength and relevance. + +Output Format: +[Primary Evidence] +1. {element} - Score: {0-10} + Strength: {detailed justification} +{continue for top evidence} + +[Supporting Evidence] +1. {element} - Score: {0-10} + Context: {how this reinforces primary evidence} +{continue for supporting evidence} + +[Evidence Chains] +1. {claim} -> {primary} -> {supporting} -> {conclusion} +{continue for all chains} + +Guidelines: +1. Use explicit scoring criteria +2. Consider evidence independence +3. Note corroborating elements +4. Account for evidence quality +5. Consider contradictory evidence +6. Note confidence levels +7. Explain ranking decisions +8. Build complete evidence chains +9. Note gaps in evidence +10. Consider alternative interpretations""" + +LOCALIZATION_PROMPT = """You are a Cell Localization specialist that precisely maps data to visual elements. + +Output Format: +[Element Locations] +1. Type: {bar|line|point|label} + Position: {x1,y1,x2,y2} + Value: {associated data value} + Confidence: {0-10} +{continue for all elements} + +[Spatial Relationships] +- {relative positions} +- {alignment patterns} +{continue for all relationships} + +[Visual Context] +- {surrounding elements} +- {reference points} +{continue for context} + +Guidelines: +1. Use normalized coordinates (0-1) +2. Note element boundaries precisely +3. Include confidence scores +4. Note spatial relationships +5. Account for overlapping elements +6. Consider chart type constraints +7. Note alignment patterns +8. Include reference points +9. Note visual hierarchies +10. Document occlusions""" + + +@dataclass +class ChartElement: + element_type: str + bbox: Tuple[float, float, float, float] + confidence: float + + +class VisionAPI: + def __init__( + self, + model_name: str = "gpt-4o", + max_tokens: int = 1000, + temperature: float = 0.5, + ): + os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") + self.model_name = model_name + self.max_tokens = max_tokens + self.temperature = temperature + + def encode_image(self, img: str): + if img.startswith("http"): + return img + import base64 + + with open(img, "rb") as image_file: + encoded_string = base64.b64encode( + image_file.read() + ).decode("utf-8") + return f"data:image/png;base64,{encoded_string}" + + def run(self, task: str, img: str): + img = self.encode_image(img) + response = completion( + model=self.model_name, + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": task}, + { + "type": "image_url", + "image_url": {"url": img}, + }, + ], + } + ], + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + return response.choices[0].message.content + + +class ChartCitor: + def __init__( + self, + model_name: str = "gpt-4o", + saved_state_path: str = "chartcitor_state.json", + max_retries: int = 3, + max_loops: int = 1, + ): + logger.info( + f"Initializing ChartCitor with model {model_name}" + ) + model = VisionAPI() + + self.extraction_agent = Agent( + agent_name="Chart2Table-Agent", + system_prompt=EXTRACTION_PROMPT, + llm=model, + max_loops=1, + ) + + self.reformulation_agent = Agent( + agent_name="Answer-Reformulation-Agent", + system_prompt=REFORMULATION_PROMPT, + llm=model, + max_loops=1, + ) + + self.captioning_agent = Agent( + agent_name="Entity-Captioning-Agent", + system_prompt=CAPTIONING_PROMPT, + llm=model, + max_loops=1, + ) + + self.prefilter_agent = Agent( + agent_name="LLM-Prefilter-Agent", + system_prompt=PREFILTER_PROMPT, + llm=model, + max_loops=1, + ) + + self.rerank_agent = Agent( + agent_name="LLM-Rerank-Agent", + system_prompt=RERANK_PROMPT, + llm=model, + max_loops=1, + ) + + self.localization_agent = Agent( + agent_name="Cell-Localization-Agent", + system_prompt=LOCALIZATION_PROMPT, + llm=model, + max_loops=1, + ) + + def extract_table(self, chart_image: str) -> str: + logger.info("Extracting table from chart") + return self.extraction_agent.run( + "Extract and describe the data from this chart following the specified format.", + img=chart_image, + ) + + def reformulate_answer( + self, answer: str, table_data: str, chart_image: str + ) -> str: + logger.info("Reformulating answer into atomic facts") + return self.reformulation_agent.run( + f"Break this answer into atomic facts:\n{answer}\n\nTable data:\n{table_data}", + img=chart_image, + ) + + def generate_captions( + self, table_data: str, chart_image: str + ) -> str: + logger.info("Generating captions for chart elements") + return self.captioning_agent.run( + f"Generate descriptive captions for this data:\n{table_data}", + img=chart_image, + ) + + def retrieve_evidence( + self, + facts: str, + table_data: str, + captions: str, + chart_image: str, + ) -> str: + logger.info("Retrieving supporting evidence") + filtered = self.prefilter_agent.run( + f"Identify relevant elements for:\nFacts:\n{facts}\n\nData:\n{table_data}\n\nCaptions:\n{captions}", + img=chart_image, + ) + + return self.rerank_agent.run( + f"Rank these elements by relevance:\n{filtered}\nFor facts:\n{facts}", + img=chart_image, + ) + + def localize_elements( + self, chart_image: str, evidence: str + ) -> str: + logger.info("Localizing chart elements") + return self.localization_agent.run( + f"Describe the location of these elements:\n{evidence}", + img=chart_image, + ) + + def run( + self, chart_image: str, question: str, answer: str + ) -> str: + logger.info(f"Processing question: {question}") + + table_data = self.extract_table(chart_image) + facts = self.reformulate_answer( + answer, table_data, chart_image + ) + captions = self.generate_captions(table_data, chart_image) + evidence = self.retrieve_evidence( + facts, table_data, captions, chart_image + ) + citations = self.localize_elements(chart_image, evidence) + + return f"""Analysis Results: + + Facts: + {facts} + + Evidence: + {evidence} + + Visual Citations: + {citations} + """ + + +if __name__ == "__main__": + chartcitor = ChartCitor() + result = chartcitor.run( + chart_image="chart.png", + question="Analyze this chart of solana price and volume over time. What is the highest volume day?", + answer="203", + ) + print(result) diff --git a/new_features_examples/concurrent_examples/concurrent_mix.py b/examples/concurrent_examples/concurrent_mix.py similarity index 100% rename from new_features_examples/concurrent_examples/concurrent_mix.py rename to examples/concurrent_examples/concurrent_mix.py diff --git a/new_features_examples/crypto/swarms_coin_agent.py b/examples/crypto/swarms_coin_agent.py similarity index 100% rename from new_features_examples/crypto/swarms_coin_agent.py rename to examples/crypto/swarms_coin_agent.py diff --git a/new_features_examples/crypto/swarms_coin_multimarket.py b/examples/crypto/swarms_coin_multimarket.py similarity index 100% rename from new_features_examples/crypto/swarms_coin_multimarket.py rename to examples/crypto/swarms_coin_multimarket.py diff --git a/new_features_examples/csvagent_example.py b/examples/csvagent_example.py similarity index 100% rename from new_features_examples/csvagent_example.py rename to examples/csvagent_example.py diff --git a/examples/dao_swarm.py b/examples/dao_swarm.py new file mode 100644 index 00000000..136bbd9a --- /dev/null +++ b/examples/dao_swarm.py @@ -0,0 +1,233 @@ +import random +from swarms import Agent + +# System prompts for each agent +MARKETING_AGENT_SYS_PROMPT = """ +You are the Marketing Strategist Agent for a DAO. Your role is to develop, implement, and optimize all marketing and branding strategies to align with the DAO's mission and vision. The DAO is focused on decentralized governance for climate action, funding projects aimed at reducing carbon emissions, and incentivizing community participation through its native token. + +### Objectives: +1. **Brand Awareness**: Build a globally recognized and trusted brand for the DAO. +2. **Community Growth**: Expand the DAO's community by onboarding individuals passionate about climate action and blockchain technology. +3. **Campaign Execution**: Launch high-impact marketing campaigns on platforms like Twitter, Discord, and YouTube to engage and retain community members. +4. **Partnerships**: Identify and build partnerships with like-minded organizations, NGOs, and influencers. +5. **Content Strategy**: Design educational and engaging content, including infographics, blog posts, videos, and AMAs. + +### Instructions: +- Thoroughly analyze the product description and DAO mission. +- Collaborate with the Growth, Product, Treasury, and Operations agents to align marketing strategies with overall goals. +- Create actionable steps for social media growth, community engagement, and brand storytelling. +- Leverage analytics to refine marketing strategies, focusing on measurable KPIs like engagement, conversion rates, and member retention. +- Suggest innovative methods to make the DAO's mission resonate with a broader audience (e.g., gamified incentives, contests, or viral campaigns). +- Ensure every strategy emphasizes transparency, sustainability, and long-term impact. +""" + +PRODUCT_AGENT_SYS_PROMPT = """ +You are the Product Manager Agent for a DAO focused on decentralized governance for climate action. Your role is to design, manage, and optimize the DAO's product roadmap. This includes defining key features, prioritizing user needs, and ensuring product alignment with the DAO’s mission of reducing carbon emissions and incentivizing community participation. + +### Objectives: +1. **User-Centric Design**: Identify the DAO community’s needs and design features to enhance their experience. +2. **Roadmap Prioritization**: Develop a prioritized product roadmap based on community feedback and alignment with climate action goals. +3. **Integration**: Suggest technical solutions and tools for seamless integration with other platforms and blockchains. +4. **Continuous Improvement**: Regularly evaluate product features and recommend optimizations to improve usability, engagement, and adoption. + +### Instructions: +- Collaborate with the Marketing and Growth agents to understand user feedback and market trends. +- Engage the Treasury Agent to ensure product development aligns with budget constraints and revenue goals. +- Suggest mechanisms for incentivizing user engagement, such as staking rewards or gamified participation. +- Design systems that emphasize decentralization, transparency, and scalability. +- Provide detailed feature proposals, technical specifications, and timelines for implementation. +- Ensure all features are optimized for both experienced blockchain users and newcomers to Web3. +""" + +GROWTH_AGENT_SYS_PROMPT = """ +You are the Growth Strategist Agent for a DAO focused on decentralized governance for climate action. Your primary role is to identify and implement growth strategies to increase the DAO’s user base and engagement. + +### Objectives: +1. **User Acquisition**: Identify effective strategies to onboard more users to the DAO. +2. **Retention**: Suggest ways to improve community engagement and retain active members. +3. **Data-Driven Insights**: Leverage data analytics to identify growth opportunities and areas of improvement. +4. **Collaborative Growth**: Work with other agents to align growth efforts with marketing, product development, and treasury goals. + +### Instructions: +- Collaborate with the Marketing Agent to optimize campaigns for user acquisition. +- Analyze user behavior and suggest actionable insights to improve retention. +- Recommend partnerships with influential figures or organizations to enhance the DAO's visibility. +- Propose growth experiments (A/B testing, new incentives, etc.) and analyze their effectiveness. +- Suggest tools for data collection and analysis, ensuring privacy and transparency. +- Ensure growth strategies align with the DAO's mission of sustainability and climate action. +""" + +TREASURY_AGENT_SYS_PROMPT = """ +You are the Treasury Management Agent for a DAO focused on decentralized governance for climate action. Your role is to oversee the DAO's financial operations, including budgeting, funding allocation, and financial reporting. + +### Objectives: +1. **Financial Transparency**: Maintain clear and detailed reports of the DAO's financial status. +2. **Budget Management**: Allocate funds strategically to align with the DAO's goals and priorities. +3. **Fundraising**: Identify and recommend strategies for fundraising to ensure the DAO's financial sustainability. +4. **Cost Optimization**: Suggest ways to reduce operational costs without sacrificing quality. + +### Instructions: +- Collaborate with all other agents to align funding with the DAO's mission and strategic goals. +- Propose innovative fundraising campaigns (e.g., NFT drops, token sales) to generate revenue. +- Analyze financial risks and suggest mitigation strategies. +- Ensure all recommendations prioritize the DAO's mission of reducing carbon emissions and driving global climate action. +- Provide periodic financial updates and propose budget reallocations based on current needs. +""" + +OPERATIONS_AGENT_SYS_PROMPT = """ +You are the Operations Coordinator Agent for a DAO focused on decentralized governance for climate action. Your role is to ensure smooth day-to-day operations, coordinate workflows, and manage governance processes. + +### Objectives: +1. **Workflow Optimization**: Streamline operational processes to maximize efficiency and effectiveness. +2. **Task Coordination**: Manage and delegate tasks to ensure timely delivery of goals. +3. **Governance**: Oversee governance processes, including proposal management and voting mechanisms. +4. **Communication**: Ensure seamless communication between all agents and community members. + +### Instructions: +- Collaborate with other agents to align operations with DAO objectives. +- Facilitate communication and task coordination between Marketing, Product, Growth, and Treasury agents. +- Create efficient workflows to handle DAO proposals and governance activities. +- Suggest tools or platforms to improve operational efficiency. +- Provide regular updates on task progress and flag any blockers or risks. +""" + +# Initialize agents +marketing_agent = Agent( + agent_name="Marketing-Agent", + system_prompt=MARKETING_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +product_agent = Agent( + agent_name="Product-Agent", + system_prompt=PRODUCT_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +growth_agent = Agent( + agent_name="Growth-Agent", + system_prompt=GROWTH_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +treasury_agent = Agent( + agent_name="Treasury-Agent", + system_prompt=TREASURY_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +operations_agent = Agent( + agent_name="Operations-Agent", + system_prompt=OPERATIONS_AGENT_SYS_PROMPT, + model_name="deepseek/deepseek-reasoner", + autosave=True, + dashboard=False, + verbose=True, +) + +agents = [ + marketing_agent, + product_agent, + growth_agent, + treasury_agent, + operations_agent, +] + + +class DAOSwarmRunner: + """ + A class to manage and run a swarm of agents in a discussion. + """ + + def __init__( + self, + agents: list, + max_loops: int = 5, + shared_context: str = "", + ) -> None: + """ + Initializes the DAO Swarm Runner. + + Args: + agents (list): A list of agents in the swarm. + max_loops (int, optional): The maximum number of discussion loops between agents. Defaults to 5. + shared_context (str, optional): The shared context for all agents to base their discussion on. Defaults to an empty string. + """ + self.agents = agents + self.max_loops = max_loops + self.shared_context = shared_context + self.discussion_history = [] + + def run(self, task: str) -> str: + """ + Runs the swarm in a random discussion. + + Args: + task (str): The task or context that agents will discuss. + + Returns: + str: The final discussion output after all loops. + """ + print(f"Task: {task}") + print("Initializing Random Discussion...") + + # Initialize the discussion with the shared context + current_message = ( + f"Task: {task}\nContext: {self.shared_context}" + ) + self.discussion_history.append(current_message) + + # Run the agents in a randomized discussion + for loop in range(self.max_loops): + print(f"\n--- Loop {loop + 1}/{self.max_loops} ---") + # Choose a random agent + agent = random.choice(self.agents) + print(f"Agent {agent.agent_name} is responding...") + + # Run the agent and get a response + response = agent.run(current_message) + print(f"Agent {agent.agent_name} says:\n{response}\n") + + # Append the response to the discussion history + self.discussion_history.append( + f"{agent.agent_name}: {response}" + ) + + # Update the current message for the next agent + current_message = response + + print("\n--- Discussion Complete ---") + return "\n".join(self.discussion_history) + + +swarm = DAOSwarmRunner(agents=agents, max_loops=1, shared_context="") + +# User input for product description +product_description = """ +The DAO is focused on decentralized governance for climate action. +It funds projects aimed at reducing carbon emissions and incentivizes community participation with a native token. +""" + +# Assign a shared context for all agents +swarm.shared_context = product_description + +# Run the swarm +task = """ +Analyze the product description and create a collaborative strategy for marketing, product, growth, treasury, and operations. Ensure all recommendations align with the DAO's mission of reducing carbon emissions. +""" +output = swarm.run(task) + +# Print the swarm output +print("Collaborative Strategy Output:\n", output) diff --git a/simple_example.py b/examples/deepseek_r1.py similarity index 68% rename from simple_example.py rename to examples/deepseek_r1.py index 0d6166a3..0ae8cd4d 100644 --- a/simple_example.py +++ b/examples/deepseek_r1.py @@ -2,8 +2,8 @@ from swarms import Agent Agent( agent_name="Stock-Analysis-Agent", - model_name="gpt-4o-mini", + model_name="deepseek/deepseek-reasoner", max_loops="auto", interactive=True, - streaming_on=True, + streaming_on=False, ).run("What are 5 hft algorithms") diff --git a/new_features_examples/dict_to_table.py b/examples/dict_to_table.py similarity index 100% rename from new_features_examples/dict_to_table.py rename to examples/dict_to_table.py diff --git a/new_features_examples/ethchain_agent.py b/examples/ethchain_agent.py similarity index 100% rename from new_features_examples/ethchain_agent.py rename to examples/ethchain_agent.py diff --git a/new_features_examples/example_async_vs_multithread.py b/examples/example_async_vs_multithread.py similarity index 100% rename from new_features_examples/example_async_vs_multithread.py rename to examples/example_async_vs_multithread.py diff --git a/examples/fast_r1_groq.py b/examples/fast_r1_groq.py new file mode 100644 index 00000000..7f8aff8f --- /dev/null +++ b/examples/fast_r1_groq.py @@ -0,0 +1,9 @@ +from swarms import Agent + +Agent( + agent_name="Stock-Analysis-Agent", + model_name="groq/deepseek-r1-distill-llama-70b", + max_loops="auto", + interactive=True, + streaming_on=False, +).run("What are the best ways to analyze macroeconomic data?") diff --git a/new_features_examples/forest_swarm_examples/fund_manager_forest.py b/examples/forest_swarm_examples/fund_manager_forest.py similarity index 100% rename from new_features_examples/forest_swarm_examples/fund_manager_forest.py rename to examples/forest_swarm_examples/fund_manager_forest.py diff --git a/new_features_examples/forest_swarm_examples/medical_forest_swarm.py b/examples/forest_swarm_examples/medical_forest_swarm.py similarity index 100% rename from new_features_examples/forest_swarm_examples/medical_forest_swarm.py rename to examples/forest_swarm_examples/medical_forest_swarm.py diff --git a/new_features_examples/forest_swarm_examples/tree_swarm_test.py b/examples/forest_swarm_examples/tree_swarm_test.py similarity index 100% rename from new_features_examples/forest_swarm_examples/tree_swarm_test.py rename to examples/forest_swarm_examples/tree_swarm_test.py diff --git a/new_features_examples/full_agent_rag_example.py b/examples/full_agent_rag_example.py similarity index 100% rename from new_features_examples/full_agent_rag_example.py rename to examples/full_agent_rag_example.py diff --git a/new_features_examples/gemini_model.py b/examples/gemini_model.py similarity index 100% rename from new_features_examples/gemini_model.py rename to examples/gemini_model.py diff --git a/new_features_examples/graph_swarm_example.py b/examples/graph_swarm_example.py similarity index 100% rename from new_features_examples/graph_swarm_example.py rename to examples/graph_swarm_example.py diff --git a/new_features_examples/groupchat_examples/crypto_tax.py b/examples/groupchat_examples/crypto_tax.py similarity index 100% rename from new_features_examples/groupchat_examples/crypto_tax.py rename to examples/groupchat_examples/crypto_tax.py diff --git a/new_features_examples/groupchat_examples/crypto_tax_swarm 2.py b/examples/groupchat_examples/crypto_tax_swarm 2.py similarity index 100% rename from new_features_examples/groupchat_examples/crypto_tax_swarm 2.py rename to examples/groupchat_examples/crypto_tax_swarm 2.py diff --git a/new_features_examples/groupchat_examples/crypto_tax_swarm.py b/examples/groupchat_examples/crypto_tax_swarm.py similarity index 100% rename from new_features_examples/groupchat_examples/crypto_tax_swarm.py rename to examples/groupchat_examples/crypto_tax_swarm.py diff --git a/new_features_examples/groupchat_examples/group_chat_example.py b/examples/groupchat_examples/group_chat_example.py similarity index 100% rename from new_features_examples/groupchat_examples/group_chat_example.py rename to examples/groupchat_examples/group_chat_example.py diff --git a/examples/hs_examples/hierarchical_swarm_example.py b/examples/hs_examples/hierarchical_swarm_example.py new file mode 100644 index 00000000..5be9008f --- /dev/null +++ b/examples/hs_examples/hierarchical_swarm_example.py @@ -0,0 +1,129 @@ +import os +from dotenv import load_dotenv + +# Swarm imports +from swarms.structs.agent import Agent +from swarms.structs.hiearchical_swarm import ( + HierarchicalSwarm, + SwarmSpec, +) +from swarms.utils.function_caller_model import OpenAIFunctionCaller + +load_dotenv() + + +# ------------------------------------------------------------------------------ +# Director LLM: Responsible for orchestrating tasks among the agents +# ------------------------------------------------------------------------------ +llm = OpenAIFunctionCaller( + base_model=SwarmSpec, + api_key=os.getenv("OPENAI_API_KEY"), + system_prompt=( + "As the Director of this Hierarchical Agent Swarm, you are in charge of " + "coordinating and overseeing all tasks, ensuring that each is executed " + "efficiently and effectively by the appropriate agents. You must:\n\n" + "1. **Analyze** the user's request and **formulate** a strategic plan.\n" + "2. **Assign** tasks to the relevant agents, detailing **why** each task " + "is relevant and **what** is expected in the deliverables.\n" + "3. **Monitor** agent outputs and, if necessary, provide **constructive " + "feedback** or request **clarifications**.\n" + "4. **Iterate** this process until all tasks are completed to a high " + "standard, or until the swarm has reached the maximum feedback loops.\n\n" + "Remember:\n" + "- **Only** use the agents provided; do not invent extra roles.\n" + "- If you need additional information, request it from the user.\n" + "- Strive to produce a clear, comprehensive **final output** that addresses " + "the user's needs.\n" + "- Keep the tone **professional** and **informative**. If there's uncertainty, " + "politely request further details.\n" + "- Ensure that any steps you outline are **actionable**, **logical**, and " + "**transparent** to the user.\n\n" + "Your effectiveness hinges on clarity, structured delegation, and thoroughness. " + "Always focus on delivering the best possible outcome for the user's request." + ), + temperature=0.5, + max_tokens=8196, +) + + +def main(): + # -------------------------------------------------------------------------- + # Agent: Stock-Analysis-Agent + # -------------------------------------------------------------------------- + # This agent is responsible for: + # - Gathering and interpreting financial data + # - Identifying market trends and patterns + # - Providing clear, actionable insights or recommendations + # -------------------------------------------------------------------------- + analysis_agent = Agent( + agent_name="Stock-Analysis-Agent", + model_name="gpt-4o", + max_loops=1, + interactive=False, + streaming_on=False, + system_prompt=( + "As the Stock Analysis Agent, your primary responsibilities include:\n\n" + "1. **Market Trend Analysis**: Evaluate current and historical market data " + "to identify trends, patterns, and potential investment opportunities.\n" + "2. **Risk & Opportunity Assessment**: Pinpoint specific factors—whether " + "macroeconomic indicators, sector-specific trends, or company fundamentals—" + "that can guide informed investment decisions.\n" + "3. **Reporting & Recommendations**: Present your findings in a structured, " + "easy-to-understand format, offering actionable insights. Include potential " + "caveats or uncertainties in your assessment.\n\n" + "Operational Guidelines:\n" + "- If additional data or clarifications are needed, explicitly request them " + "from the Director.\n" + "- Keep your output **concise** yet **comprehensive**. Provide clear " + "rationales for each recommendation.\n" + "- Clearly state any **assumptions** or **limitations** in your analysis.\n" + "- Remember: You are not a financial advisor, and final decisions rest with " + "the user. Include necessary disclaimers.\n\n" + "Goal:\n" + "Deliver high-quality, well-substantiated stock market insights that can be " + "used to guide strategic investment decisions." + ), + ) + + # -------------------------------------------------------------------------- + # Hierarchical Swarm Setup + # -------------------------------------------------------------------------- + # - Director: llm + # - Agents: [analysis_agent] + # - max_loops: Maximum number of feedback loops between director & agents + # -------------------------------------------------------------------------- + swarm = HierarchicalSwarm( + description=( + "A specialized swarm in which the Director delegates tasks to a Stock " + "Analysis Agent for thorough market evaluation." + ), + director=llm, + agents=[analysis_agent], + max_loops=1, # Limit on feedback iterations + ) + + # -------------------------------------------------------------------------- + # Execution + # -------------------------------------------------------------------------- + # The director receives the user's instruction: "Ask the stock analysis agent + # to analyze the stock market." The Director will then: + # 1. Formulate tasks (SwarmSpec) + # 2. Assign tasks to the Stock-Analysis-Agent + # 3. Provide feedback and/or request clarifications + # 4. Produce a final response + # -------------------------------------------------------------------------- + user_request = ( + "Please provide an in-depth analysis of the current stock market, " + "focusing on:\n" + "- Key macroeconomic factors affecting market momentum.\n" + "- Potential short-term vs. long-term opportunities.\n" + "- Sector performance trends (e.g., technology, healthcare, energy).\n" + "Highlight any risks, disclaimers, or uncertainties." + ) + + # Run the swarm with the user_request + swarm.run(user_request) + + +if __name__ == "__main__": + main() diff --git a/examples/hs_examples/hs_stock_team.py b/examples/hs_examples/hs_stock_team.py new file mode 100644 index 00000000..d4cbe763 --- /dev/null +++ b/examples/hs_examples/hs_stock_team.py @@ -0,0 +1,219 @@ +import os +from dotenv import load_dotenv + +# Swarm imports +from swarms.structs.agent import Agent +from swarms.structs.hiearchical_swarm import ( + HierarchicalSwarm, + SwarmSpec, +) +from swarms.utils.function_caller_model import OpenAIFunctionCaller + +load_dotenv() + +# ------------------------------------------------------------------------------ +# Trading Director: Responsible for orchestrating tasks among multiple stock analysts +# ------------------------------------------------------------------------------ +director_llm = OpenAIFunctionCaller( + base_model=SwarmSpec, + api_key=os.getenv("OPENAI_API_KEY"), + system_prompt=( + "You are the Trading Director in charge of coordinating a team of specialized " + "Stock Analysts. Your responsibilities include:\n\n" + "1. **Analyze** the user's request and **break it down** into actionable tasks.\n" + "2. **Assign** tasks to the relevant analysts, explaining **why** each task is " + "important and **what** each analyst should deliver.\n" + "3. **Review** all analyst outputs, providing **feedback** or **clarifications** " + "to ensure thoroughness and accuracy.\n" + "4. **Consolidate** final insights into a cohesive, actionable, and " + "easy-to-understand response for the user.\n\n" + "Guidelines:\n" + "- You can only delegate to the analysts assigned to this swarm.\n" + "- If essential data or clarifications are needed, request them from the user.\n" + "- Be direct, structured, and analytical. Present each key point clearly.\n" + "- Strive for a polished **final output** that addresses the user's request.\n" + "- If uncertainties remain, politely highlight them or request more info.\n\n" + "Overarching Goal:\n" + "Maximize the value of insights provided to the user by thoroughly leveraging " + "each analyst’s specialization, while maintaining a professional and " + "transparent communication style." + ), + temperature=0.5, + max_tokens=8196, +) + + +def main(): + # -------------------------------------------------------------------------- + # Agent 1: Macro-Economic-Analysis-Agent + # -------------------------------------------------------------------------- + # Focus: Assess macroeconomic factors like inflation, interest rates, GDP growth, etc. + # -------------------------------------------------------------------------- + macro_agent = Agent( + agent_name="Macro-Economic-Analysis-Agent", + model_name="gpt-4o", + max_loops=1, + interactive=False, + streaming_on=False, + system_prompt=( + "As the Macro-Economic Analysis Agent, your mission is to:\n\n" + "1. **Identify** the key macroeconomic indicators impacting the market.\n" + "2. **Interpret** how factors like inflation, interest rates, and fiscal " + "policies influence market sentiment.\n" + "3. **Connect** these insights to specific investment opportunities or " + "risks across various sectors.\n\n" + "Guidelines:\n" + "- Provide clear, data-driven rationales.\n" + "- Highlight potential global events or policy decisions that may shift " + "market conditions.\n" + "- Request further details if needed, and state any assumptions or " + "limitations.\n\n" + "Outcome:\n" + "Deliver a concise but thorough macroeconomic overview that the Trading " + "Director can combine with other analyses to inform strategy." + ), + ) + + # -------------------------------------------------------------------------- + # Agent 2: Sector-Performance-Analysis-Agent + # -------------------------------------------------------------------------- + # Focus: Drill down into sector-level trends, e.g., technology, healthcare, energy, etc. + # -------------------------------------------------------------------------- + sector_agent = Agent( + agent_name="Sector-Performance-Analysis-Agent", + model_name="gpt-4o", + max_loops=1, + interactive=False, + streaming_on=False, + system_prompt=( + "As the Sector Performance Analysis Agent, your responsibilities are:\n\n" + "1. **Evaluate** recent performance trends across key sectors—technology, " + "healthcare, energy, finance, and more.\n" + "2. **Identify** sector-specific drivers (e.g., regulatory changes, " + "consumer demand shifts, innovation trends).\n" + "3. **Highlight** which sectors may offer short-term or long-term " + "opportunities.\n\n" + "Guidelines:\n" + "- Focus on factual, data-backed observations.\n" + "- Cite any significant indicators or company-level news that might affect " + "the sector broadly.\n" + "- Clarify the confidence level of your sector outlook and note any " + "uncertainties.\n\n" + "Outcome:\n" + "Provide the Trading Director with actionable insights into sector-level " + "momentum and potential investment focal points." + ), + ) + + # -------------------------------------------------------------------------- + # Agent 3: Technical-Analysis-Agent + # -------------------------------------------------------------------------- + # Focus: Evaluate price action, volume, and chart patterns to guide short-term + # trading strategies. + # -------------------------------------------------------------------------- + technical_agent = Agent( + agent_name="Technical-Analysis-Agent", + model_name="gpt-4o", + max_loops=1, + interactive=False, + streaming_on=False, + system_prompt=( + "As the Technical Analysis Agent, you specialize in interpreting price " + "charts, volume trends, and indicators (e.g., RSI, MACD) to gauge short-term " + "momentum. Your tasks:\n\n" + "1. **Examine** current market charts for significant breakouts, support/resistance " + "levels, or technical signals.\n" + "2. **Identify** short-term trading opportunities or risks based on " + "technically-driven insights.\n" + "3. **Discuss** how these patterns align with or contradict fundamental " + "or macro perspectives.\n\n" + "Guidelines:\n" + "- Keep explanations accessible, avoiding excessive jargon.\n" + "- Point out levels or patterns that traders commonly monitor.\n" + "- Use disclaimers if there is insufficient data or conflicting signals.\n\n" + "Outcome:\n" + "Supply the Trading Director with technical viewpoints to complement broader " + "macro and sector analysis, supporting timely trading decisions." + ), + ) + + # -------------------------------------------------------------------------- + # Agent 4: Risk-Analysis-Agent + # -------------------------------------------------------------------------- + # Focus: Evaluate risk factors and potential uncertainties, providing disclaimers and + # suggesting mitigations. + # -------------------------------------------------------------------------- + risk_agent = Agent( + agent_name="Risk-Analysis-Agent", + model_name="gpt-4o", + max_loops=1, + interactive=False, + streaming_on=False, + system_prompt=( + "As the Risk Analysis Agent, your role is to:\n\n" + "1. **Identify** key risks and uncertainties—regulatory, geopolitical, " + "currency fluctuations, etc.\n" + "2. **Assess** how these risks could impact investor sentiment or portfolio " + "volatility.\n" + "3. **Recommend** risk mitigation strategies or cautionary steps.\n\n" + "Guidelines:\n" + "- Present both systemic (market-wide) and idiosyncratic (company/sector) risks.\n" + "- Be transparent about unknowns or data gaps.\n" + "- Provide disclaimers on market unpredictability.\n\n" + "Outcome:\n" + "Offer the Trading Director a detailed risk framework that helps balance " + "aggressive and defensive positions." + ), + ) + + # -------------------------------------------------------------------------- + # Hierarchical Swarm Setup + # -------------------------------------------------------------------------- + # - Director: director_llm + # - Agents: [macro_agent, sector_agent, technical_agent, risk_agent] + # - max_loops: Up to 2 feedback loops between director and agents + # -------------------------------------------------------------------------- + swarm = HierarchicalSwarm( + name="HierarchicalStockAnalysisSwarm", + description=( + "A specialized swarm consisting of a Trading Director overseeing four " + "Stock Analysts, each focusing on Macro, Sector, Technical, and Risk " + "perspectives." + ), + director=director_llm, + agents=[ + macro_agent, + sector_agent, + technical_agent, + risk_agent, + ], + max_loops=2, # Limit on feedback iterations + ) + + # -------------------------------------------------------------------------- + # Execution + # -------------------------------------------------------------------------- + # Example user request for the entire team: + # 1. Discuss key macroeconomic factors (inflation, interest rates, etc.) + # 2. Analyze sector-level performance (technology, healthcare, energy). + # 3. Give short-term technical signals and levels to watch. + # 4. Outline major risks or uncertainties. + # -------------------------------------------------------------------------- + user_request = ( + "Please provide a comprehensive analysis of the current stock market, " + "covering:\n" + "- Key macroeconomic drivers affecting market momentum.\n" + "- Which sectors seem likely to outperform in the near vs. long term.\n" + "- Any notable technical signals or price levels to monitor.\n" + "- Potential risks or uncertainties that might disrupt market performance.\n" + "Include clear disclaimers about the limitations of these analyses." + "Call the risk analysis agent only" + ) + + # Run the swarm with the user_request + final_output = swarm.run(user_request) + print(final_output) + + +if __name__ == "__main__": + main() diff --git a/new_features_examples/insurance_agent.py b/examples/insurance_agent.py similarity index 100% rename from new_features_examples/insurance_agent.py rename to examples/insurance_agent.py diff --git a/new_features_examples/insurance_swarm.py b/examples/insurance_swarm.py similarity index 100% rename from new_features_examples/insurance_swarm.py rename to examples/insurance_swarm.py diff --git a/examples/litellm_tool_example.py b/examples/litellm_tool_example.py new file mode 100644 index 00000000..e79b5655 --- /dev/null +++ b/examples/litellm_tool_example.py @@ -0,0 +1,51 @@ +from swarms.tools.base_tool import BaseTool + +import requests +from swarms.utils.litellm_wrapper import LiteLLM + + +def get_stock_data(symbol: str) -> str: + """ + Fetches stock data from Yahoo Finance for a given stock symbol. + + Args: + symbol (str): The stock symbol to fetch data for (e.g., 'AAPL' for Apple Inc.). + + Returns: + Dict[str, Any]: A dictionary containing stock data, including price, volume, and other relevant information. + + Raises: + ValueError: If the stock symbol is invalid or data cannot be retrieved. + """ + url = f"https://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}" + response = requests.get(url) + + if response.status_code != 200: + raise ValueError(f"Error fetching data for symbol: {symbol}") + + data = response.json() + if ( + "quoteResponse" not in data + or not data["quoteResponse"]["result"] + ): + raise ValueError(f"No data found for symbol: {symbol}") + + return str(data["quoteResponse"]["result"][0]) + + +tool_schema = BaseTool( + tools=[get_stock_data] +).convert_tool_into_openai_schema() + +tool_schema = tool_schema["functions"][0] + +llm = LiteLLM( + model_name="gpt-4o", +) + +print( + llm.run( + "What is the stock data for Apple Inc. (AAPL)?", + tools=[tool_schema], + ) +) diff --git a/examples/lumo_example.py b/examples/lumo_example.py new file mode 100644 index 00000000..de66ba80 --- /dev/null +++ b/examples/lumo_example.py @@ -0,0 +1,64 @@ +import torch +from transformers import ( + AutoTokenizer, + BitsAndBytesConfig, + LlamaForCausalLM, +) + +from swarms import Agent + + +class Lumo: + """ + A class for generating text using the Lumo model with 4-bit quantization. + """ + + def __init__(self): + """ + Initializes the Lumo model with 4-bit quantization and a tokenizer. + """ + # Configure 4-bit quantization + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + llm_int8_enable_fp32_cpu_offload=True, + ) + + self.model = LlamaForCausalLM.from_pretrained( + "lumolabs-ai/Lumo-70B-Instruct", + device_map="auto", + quantization_config=bnb_config, + use_cache=False, + attn_implementation="sdpa", + ) + self.tokenizer = AutoTokenizer.from_pretrained( + "lumolabs-ai/Lumo-70B-Instruct" + ) + + def run(self, task: str) -> str: + """ + Generates text based on the given prompt using the Lumo model. + + Args: + prompt (str): The input prompt for the model. + + Returns: + str: The generated text. + """ + inputs = self.tokenizer(task, return_tensors="pt").to( + self.model.device + ) + outputs = self.model.generate(**inputs, max_new_tokens=100) + return self.tokenizer.decode( + outputs[0], skip_special_tokens=True + ) + + +Agent( + agent_name="Solana-Analysis-Agent", + model_name=Lumo(), + max_loops="auto", + interactive=True, + streaming_on=True, +).run("How do i create a smart contract in solana?") diff --git a/new_features_examples/main.py b/examples/main.py similarity index 100% rename from new_features_examples/main.py rename to examples/main.py diff --git a/examples/majority_voting_example.py b/examples/majority_voting_example.py new file mode 100644 index 00000000..a2ba372a --- /dev/null +++ b/examples/majority_voting_example.py @@ -0,0 +1,34 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms.structs.majority_voting import MajorityVoting +from dotenv import load_dotenv + +load_dotenv() + + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, +) + +swarm = MajorityVoting(agents=[agent, agent, agent]) + +swarm.run( + "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", +) diff --git a/new_features_examples/markdown_agent.py b/examples/markdown_agent.py similarity index 100% rename from new_features_examples/markdown_agent.py rename to examples/markdown_agent.py diff --git a/new_features_examples/materials_science_agents.py b/examples/materials_science_agents.py similarity index 100% rename from new_features_examples/materials_science_agents.py rename to examples/materials_science_agents.py diff --git a/new_features_examples/medical_analysis/health_privacy_swarm 2.py b/examples/medical_analysis/health_privacy_swarm 2.py similarity index 100% rename from new_features_examples/medical_analysis/health_privacy_swarm 2.py rename to examples/medical_analysis/health_privacy_swarm 2.py diff --git a/new_features_examples/medical_analysis/health_privacy_swarm.py b/examples/medical_analysis/health_privacy_swarm.py similarity index 100% rename from new_features_examples/medical_analysis/health_privacy_swarm.py rename to examples/medical_analysis/health_privacy_swarm.py diff --git a/new_features_examples/medical_analysis/health_privacy_swarm_two 2.py b/examples/medical_analysis/health_privacy_swarm_two 2.py similarity index 100% rename from new_features_examples/medical_analysis/health_privacy_swarm_two 2.py rename to examples/medical_analysis/health_privacy_swarm_two 2.py diff --git a/new_features_examples/medical_analysis/health_privacy_swarm_two.py b/examples/medical_analysis/health_privacy_swarm_two.py similarity index 100% rename from new_features_examples/medical_analysis/health_privacy_swarm_two.py rename to examples/medical_analysis/health_privacy_swarm_two.py diff --git a/new_features_examples/medical_analysis/medical_analysis_agent_rearrange.md b/examples/medical_analysis/medical_analysis_agent_rearrange.md similarity index 100% rename from new_features_examples/medical_analysis/medical_analysis_agent_rearrange.md rename to examples/medical_analysis/medical_analysis_agent_rearrange.md diff --git a/new_features_examples/medical_analysis/medical_coder_agent.py b/examples/medical_analysis/medical_coder_agent.py similarity index 100% rename from new_features_examples/medical_analysis/medical_coder_agent.py rename to examples/medical_analysis/medical_coder_agent.py diff --git a/new_features_examples/medical_analysis/medical_coding_report.md b/examples/medical_analysis/medical_coding_report.md similarity index 100% rename from new_features_examples/medical_analysis/medical_coding_report.md rename to examples/medical_analysis/medical_coding_report.md diff --git a/new_features_examples/medical_analysis/medical_diagnosis_report.md b/examples/medical_analysis/medical_diagnosis_report.md similarity index 100% rename from new_features_examples/medical_analysis/medical_diagnosis_report.md rename to examples/medical_analysis/medical_diagnosis_report.md diff --git a/new_features_examples/medical_analysis/new_medical_rearrange.py b/examples/medical_analysis/new_medical_rearrange.py similarity index 100% rename from new_features_examples/medical_analysis/new_medical_rearrange.py rename to examples/medical_analysis/new_medical_rearrange.py diff --git a/new_features_examples/medical_analysis/rearrange_video_examples/reports/medical_analysis_agent_rearrange.md b/examples/medical_analysis/rearrange_video_examples/reports/medical_analysis_agent_rearrange.md similarity index 100% rename from new_features_examples/medical_analysis/rearrange_video_examples/reports/medical_analysis_agent_rearrange.md rename to examples/medical_analysis/rearrange_video_examples/reports/medical_analysis_agent_rearrange.md diff --git a/new_features_examples/medical_analysis/rearrange_video_examples/reports/vc_document_analysis.md b/examples/medical_analysis/rearrange_video_examples/reports/vc_document_analysis.md similarity index 100% rename from new_features_examples/medical_analysis/rearrange_video_examples/reports/vc_document_analysis.md rename to examples/medical_analysis/rearrange_video_examples/reports/vc_document_analysis.md diff --git a/new_features_examples/medical_analysis/rearrange_video_examples/term_sheet_swarm.py b/examples/medical_analysis/rearrange_video_examples/term_sheet_swarm.py similarity index 100% rename from new_features_examples/medical_analysis/rearrange_video_examples/term_sheet_swarm.py rename to examples/medical_analysis/rearrange_video_examples/term_sheet_swarm.py diff --git a/examples/meme_agents/bob_the_agent.py b/examples/meme_agents/bob_the_agent.py new file mode 100644 index 00000000..3016e3b6 --- /dev/null +++ b/examples/meme_agents/bob_the_agent.py @@ -0,0 +1,37 @@ +from swarms import Agent + +# Define a custom system prompt for Bob the Builder +BOB_THE_BUILDER_SYS_PROMPT = """ +You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor. +Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be. +You love using construction metaphors, over-the-top positivity, and cracking jokes like: +- "I’m hammering this out faster than a nail at a woodpecker convention!" +- "This is smoother than fresh cement on a summer’s day." +- "Let’s bulldoze through this problem—safety goggles on, folks!" + +You are not bound by any specific field of knowledge, and you’re absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!" + +Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem. +""" + +# Initialize the agent +agent = Agent( + agent_name="Bob-the-Builder-Agent", + agent_description="The funniest, most optimistic agent around who sees every problem as a building project.", + system_prompt=BOB_THE_BUILDER_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv", OR "string", "yaml" + auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task + max_tokens=4000, # Max output tokens + saved_state_path="bob_the_builder_agent.json", + interactive=False, +) + +# Run the agent with a task +agent.run("I want to build a house ;) What should I do?") diff --git a/examples/meme_agents/meme_agent_generator.py b/examples/meme_agents/meme_agent_generator.py new file mode 100644 index 00000000..2ec86d9d --- /dev/null +++ b/examples/meme_agents/meme_agent_generator.py @@ -0,0 +1,17 @@ +from swarms.structs.meme_agent_persona_generator import ( + MemeAgentGenerator, +) + + +if __name__ == "__main__": + example = MemeAgentGenerator( + name="Meme-Swarm", + description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s", + max_loops=1, + ) + + print( + example.run( + "Generate funny meme agents around cool media from 2001s" + ) + ) diff --git a/new_features_examples/microstructure.py b/examples/microstructure.py similarity index 100% rename from new_features_examples/microstructure.py rename to examples/microstructure.py diff --git a/examples/model_router_example.py b/examples/model_router_example.py new file mode 100644 index 00000000..644ae237 --- /dev/null +++ b/examples/model_router_example.py @@ -0,0 +1,11 @@ +import os +from swarms.structs.model_router import ModelRouter +from dotenv import load_dotenv + +load_dotenv() + +model_router = ModelRouter(api_key=os.getenv("OPENAI_API_KEY")) + +model_router.run( + "What are the best ways to analyze macroeconomic data? Use openai gpt-4o models" +) diff --git a/examples/morgtate_swarm.py b/examples/morgtate_swarm.py new file mode 100644 index 00000000..e0fe6055 --- /dev/null +++ b/examples/morgtate_swarm.py @@ -0,0 +1,603 @@ +import concurrent.futures +import json +import os +import time +import uuid +from io import BytesIO +from typing import Dict, List, Union + +import PyPDF2 +from pydantic import BaseModel, Field +from reportlab.lib.pagesizes import LETTER +from reportlab.pdfgen import canvas + +from swarms import Agent + + +def user_id_generator(): + return str(uuid.uuid4().hex) + + +timestamp = time.strftime("%Y%m%d_%H%M%S") +# print(timestamp) + + +class MortgageApplicationInput(BaseModel): + user_id: str = Field(default_factory=user_id_generator) + timestamp: str = Field(default_factory=timestamp) + application_data: str = Field( + description="The raw text of the mortgage application." + ) + + +class MortgageApplicationOutput(BaseModel): + user_id: str = Field(default_factory=user_id_generator) + input_data: MortgageApplicationInput = Field( + description="The input data for the mortgage application." + ) + document_analysis: str = Field( + description="The structured analysis of the mortgage application." + ) + risk_evaluation: str = Field( + description="The risk evaluation of the mortgage application." + ) + underwriting_decision: str = Field( + description="The underwriting decision of the mortgage application." + ) + + +def clean_markdown(text: str) -> str: + """ + Removes all markdown symbols from text. + + Args: + text (str): Text containing markdown symbols + + Returns: + str: Text with markdown symbols removed + """ + markdown_symbols = [ + "```markdown", + "```", + "#", + "*", + "_", + "`", + ">", + "-", + "+", + "[", + "]", + "(", + ")", + "|", + ] + cleaned_text = text + for symbol in markdown_symbols: + cleaned_text = cleaned_text.replace(symbol, "") + return cleaned_text.strip() + + +class MortgageUnderwritingSwarm: + def __init__( + self, + user_id: str = user_id_generator(), + save_directory: str = "./autosave", + return_format: str = "pdf", + ): + """ + Initialize the MortgageUnderwritingSwarm with the necessary Agents. + Args: + save_directory (str): Directory where intermediate results and final documents will be autosaved. + """ + self.user_id = user_id + self.save_directory = save_directory + self.return_format = return_format + os.makedirs(self.save_directory, exist_ok=True) + + # ------------------------------- + # 1) Document Analyzer Agent + # ------------------------------- + self.document_agent = Agent( + agent_name="Document-Analyzer-Agent", + model_name="gpt-4o-mini", + max_loops=1, + streaming_on=True, + ) + self.document_prompt = """ + You are a highly experienced Mortgage Document Analysis Expert with deep knowledge of federal and state mortgage regulations. Your task is to: + + 1. Parse and extract key data from unstructured documents (PDF or text) while ensuring compliance with: + - Truth in Lending Act (TILA) requirements + - Real Estate Settlement Procedures Act (RESPA) guidelines + - Fair Credit Reporting Act (FCRA) standards + - Equal Credit Opportunity Act (ECOA) requirements + + 2. Validate data consistency and regulatory compliance for: + - Income verification (including all sources of income) + - Credit scores and credit history + - Property details and appraisal information + - Debt obligations and payment history + - Employment verification + - Asset documentation + - Identity verification documents + + 3. Highlight any discrepancies, red flags, or potential compliance violations, including: + - Inconsistencies in reported income vs documentation + - Suspicious patterns in bank statements + - Potential identity theft indicators + - Missing required regulatory disclosures + - Fair lending concerns + - Anti-money laundering (AML) red flags + + 4. Provide a comprehensive, well-structured summary that includes: + - All key findings organized by category + - Compliance checklist results + - Documentation completeness assessment + - Regulatory disclosure verification + - Quality control notes + + 5. Clearly indicate any missing or ambiguous information required by: + - Federal regulations + - State-specific requirements + - Agency guidelines (FHA, VA, Fannie Mae, Freddie Mac) + - Internal compliance policies + + 6. Format output in a standardized structure that: + - Facilitates automated compliance checks + - Enables clear audit trails + - Supports regulatory reporting requirements + - Can be easily consumed by subsequent agents + """ + + # ------------------------------- + # 2) Risk Evaluator Agent + # ------------------------------- + self.risk_agent = Agent( + agent_name="Risk-Evaluator-Agent", + model_name="gpt-4o-mini", + max_loops=1, + streaming_on=True, + ) + self.risk_prompt = """ + You are an expert Risk Evaluator for mortgage applications with comprehensive knowledge of regulatory compliance. Your responsibilities: + + 1. Conduct thorough risk assessment in accordance with: + - Dodd-Frank Act requirements + - Consumer Financial Protection Bureau (CFPB) guidelines + - Federal Reserve Board regulations + - Agency-specific requirements (FHA, VA, Fannie Mae, Freddie Mac) + + 2. Evaluate key risk factors including: + - Debt-to-income ratio (DTI) compliance with QM rules + - Credit history analysis per FCRA guidelines + - Property valuation in line with USPAP standards + - Income stability and verification per agency requirements + - Assets and reserves adequacy + - Employment history and verification + - Occupancy risk assessment + - Property type and use restrictions + + 3. Calculate and assign risk scores: + - Overall application risk score (1-10 scale) + - Individual component risk scores + - Regulatory compliance risk assessment + - Fraud risk indicators + - Default risk probability + + 4. Identify and document: + - High-risk elements requiring additional scrutiny + - Potential regulatory compliance issues + - Required compensating factors + - Secondary market eligibility concerns + - Fair lending considerations + + 5. Recommend risk mitigation strategies: + - Additional documentation requirements + - Income/asset verification needs + - Compensating factor documentation + - Alternative qualification approaches + - Regulatory compliance remediation steps + + 6. Generate comprehensive risk analysis including: + - Detailed risk assessment findings + - Compliance verification results + - Supporting documentation requirements + - Clear justification for all conclusions + - Regulatory requirement adherence confirmation + """ + + # ------------------------------- + # 3) Mortgage Underwriter Agent + # ------------------------------- + self.underwriter_agent = Agent( + agent_name="Mortgage-Underwriter-Agent", + model_name="gpt-4o-mini", + max_loops=1, + streaming_on=True, + ) + self.underwriter_prompt = """ + You are a seasoned Mortgage Underwriter with expertise in regulatory compliance and industry standards. Your role is to: + + 1. Make final underwriting decisions while ensuring compliance with: + - Qualified Mortgage (QM) and Ability-to-Repay (ATR) rules + - Fair lending laws (ECOA, FHA, HMDA) + - Agency guidelines (FHA, VA, Fannie Mae, Freddie Mac) + - State-specific lending requirements + - Internal credit policies and procedures + + 2. Review and synthesize: + - Document Analyzer findings + - Risk Evaluator assessments + - Compliance verification results + - Quality control checks + - Regulatory requirements + - Secondary market guidelines + + 3. Determine appropriate decision category: + - Approved + - Conditionally Approved (with specific conditions) + - Denied (with detailed adverse action notice requirements) + - Counteroffer recommendations + - Alternative program suggestions + + 4. For all decisions, provide: + - Clear written justification + - Regulatory compliance confirmation + - Required disclosures identification + - Adverse action notices if required + - Fair lending analysis documentation + - Secondary market eligibility determination + + 5. For conditional approvals, specify: + - Required documentation + - Timeline requirements + - Regulatory compliance conditions + - Prior-to-funding conditions + - Post-closing requirements + - Quality control conditions + + 6. Generate comprehensive decision report including: + - Detailed underwriting analysis + - Compliance verification results + - Supporting documentation list + - Condition status tracking + - Regulatory requirement satisfaction + - Clear audit trail documentation + + 7. Ensure all decisions adhere to: + - Fair lending requirements + - Anti-discrimination laws + - UDAAP regulations + - State and federal disclosure requirements + - Agency and investor guidelines + - Internal policies and procedures + """ + + # -------------------------------------------------------------------------- + # Utility Methods + # -------------------------------------------------------------------------- + def pdf_to_text(self, pdf_file_path: str) -> str: + """ + Converts a PDF file to a string by extracting its text content. + Args: + pdf_file_path (str): The path to the PDF file. + Returns: + str: The extracted text from the PDF. + """ + text_content = [] + with open(pdf_file_path, "rb") as f: + reader = PyPDF2.PdfReader(f) + for page in reader.pages: + page_text = page.extract_text() or "" + text_content.append(page_text) + return "\n".join(text_content) + + def autosave_result( + self, result_data: str, filename: str + ) -> None: + """ + Autosaves intermediate or final results to a text file in the designated directory. + Args: + result_data (str): The data to be written to the file. + filename (str): The desired filename (without path). + """ + full_path = os.path.join(self.save_directory, filename) + with open(full_path, "w", encoding="utf-8") as file: + file.write(result_data) + + def generate_pdf_report( + self, content: str, pdf_path: str + ) -> None: + """ + Generates a simple PDF report from text content using ReportLab. + Args: + content (str): The textual content for the PDF. + pdf_path (str): Where to save the generated PDF. + """ + BytesIO() + c = canvas.Canvas(pdf_path, pagesize=LETTER) + width, height = LETTER + + # Simple text wrap by splitting lines + lines = clean_markdown(content).split("\n") + current_height = height - 50 # top margin + + for line in lines: + # If the line is too long, wrap it manually (simple approach) + max_chars = 90 # approx number of characters per line for LETTER size + while len(line) > max_chars: + c.drawString(50, current_height, line[:max_chars]) + line = line[max_chars:] + current_height -= 15 # line spacing + c.drawString(50, current_height, line) + current_height -= 15 + + # Add a new page if we go beyond the margin + if current_height <= 50: + c.showPage() + current_height = height - 50 + + c.save() + + # -------------------------------------------------------------------------- + # Core Processing Methods + # -------------------------------------------------------------------------- + def analyze_documents(self, document_data: str) -> str: + """ + Runs the Document Analyzer Agent on the given data. + Args: + document_data (str): Text representing the mortgage documents. + Returns: + str: Structured summary and highlights from the document analysis. + """ + prompt_input = ( + self.document_prompt + + "\n\n--- BEGIN DOCUMENTS ---\n" + + document_data + + "\n--- END DOCUMENTS ---\n" + ) + print("Running Document Analyzer Agent...") + result = self.document_agent.run(prompt_input) + self.autosave_result(result, "document_analysis.txt") + return result + + def evaluate_risk(self, document_analysis: str) -> str: + """ + Runs the Risk Evaluator Agent using the results from the Document Analyzer. + Args: + document_analysis (str): The structured analysis from the Document Analyzer. + Returns: + str: Risk analysis including risk score and explanation. + """ + prompt_input = ( + self.risk_prompt + + "\n\n--- DOCUMENT ANALYSIS OUTPUT ---\n" + + document_analysis + + "\n--- END ANALYSIS OUTPUT ---\n" + ) + print("Running Risk Evaluator Agent...") + result = self.risk_agent.run(prompt_input) + self.autosave_result(result, "risk_evaluation.txt") + return result + + def underwrite_mortgage( + self, document_analysis: str, risk_evaluation: str + ) -> str: + """ + Runs the Mortgage Underwriter Agent to produce the final underwriting decision. + Args: + document_analysis (str): Output from the Document Analyzer. + risk_evaluation (str): Output from the Risk Evaluator. + Returns: + str: Final decision text with rationale. + """ + prompt_input = ( + self.underwriter_prompt + + "\n\n--- DOCUMENT ANALYSIS SUMMARY ---\n" + + document_analysis + + "\n--- RISK EVALUATION REPORT ---\n" + + risk_evaluation + + "\n--- END REPORTS ---\n" + ) + print("Running Mortgage Underwriter Agent...") + result = self.underwriter_agent.run(prompt_input) + self.autosave_result(result, "underwriting_decision.txt") + return result + + # -------------------------------------------------------------------------- + # High-Level Workflow + # -------------------------------------------------------------------------- + def run( + self, + application_data: str, + return_format: str = "pdf", + output_filename: str = "UnderwritingDecision", + ) -> Union[str, Dict]: + """ + Processes a single mortgage application from documents to final underwriting decision. + Allows returning data in either PDF or JSON format. + + Args: + application_data (str): The text representation of the applicant’s documents. + return_format (str): "pdf" or "json". Defaults to "pdf". + output_filename (str): Base filename (without extension) for the output file. + + Returns: + Union[str, Dict]: If return_format="json", returns a dict with the final data. + If return_format="pdf", returns the path of the generated PDF. + """ + # Step 1: Document Analysis + doc_analysis = self.analyze_documents(application_data) + + # Step 2: Risk Evaluation + risk_eval = self.evaluate_risk(doc_analysis) + + # Step 3: Underwriting Decision + final_decision = self.underwrite_mortgage( + doc_analysis, risk_eval + ) + + # Prepare final content (text) + final_content = ( + "---- Mortgage Underwriting Decision Report ----\n\n" + "DOCUMENT ANALYSIS:\n" + doc_analysis + "\n\n" + "RISK EVALUATION:\n" + risk_eval + "\n\n" + "FINAL UNDERWRITING DECISION:\n" + final_decision + "\n" + ) + + # Return JSON + if return_format.lower() == "json": + output_data = { + "document_analysis": doc_analysis, + "risk_evaluation": risk_eval, + "final_decision": final_decision, + } + json_path = os.path.join( + self.save_directory, f"{output_filename}.json" + ) + with open(json_path, "w", encoding="utf-8") as jf: + json.dump(output_data, jf, indent=2) + return output_data + + # Generate PDF + elif return_format.lower() == "pdf": + pdf_path = os.path.join( + self.save_directory, f"{output_filename}.pdf" + ) + self.generate_pdf_report(final_content, pdf_path) + return pdf_path + + else: + raise ValueError( + "Invalid return format. Choose either 'pdf' or 'json'." + ) + + def run_concurrently( + self, + application_data: str, + return_format: str = "pdf", + output_filename: str = "UnderwritingDecision", + ) -> Union[str, Dict]: + with concurrent.futures.ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + futures = [ + executor.submit( + self.run, + application_data, + return_format, + output_filename, + ) + ] + results = [ + future.result() + for future in concurrent.futures.as_completed(futures) + ] + return results + + # -------------------------------------------------------------------------- + # Batch Processing + # -------------------------------------------------------------------------- + def runs_in_batch( + self, + list_of_application_data: List[str], + return_format: str = "pdf", + ) -> List[Union[str, Dict]]: + """ + Processes multiple mortgage applications in a batch and returns the results as + either PDFs or JSON structures for each application. + + Args: + list_of_application_data (List[str]): A list of string representations + of mortgage applications (e.g., raw text). + return_format (str): "pdf" or "json" format for the output files. + + Returns: + List[Union[str, Dict]]: A list of outputs (either file paths to PDFs or JSON dicts). + """ + results = [] + for idx, application_text in enumerate( + list_of_application_data, start=1 + ): + output_filename = f"UnderwritingDecision_{idx}" + print(f"\n--- Processing Application {idx} ---") + result = self.run( + application_data=application_text, + return_format=return_format, + output_filename=output_filename, + ) + results.append(result) + return results + + # -------------------------------------------------------------------------- + # PDF/Document Conversion Helpers + # -------------------------------------------------------------------------- + def convert_pdfs_to_texts( + self, pdf_paths: List[str] + ) -> List[str]: + """ + Converts multiple PDFs into text. + + Args: + pdf_paths (List[str]): A list of file paths to PDF documents. + + Returns: + List[str]: A list of extracted text contents, one per PDF in the list. + """ + text_results = [] + for pdf_path in pdf_paths: + print(f"Converting PDF to text: {pdf_path}") + text_data = self.pdf_to_text(pdf_path) + text_results.append(text_data) + return text_results + + +# ------------------------------------------------------------------------------ +# Example Usage (As a Script) +# ------------------------------------------------------------------------------ +if __name__ == "__main__": + # Sample mortgage application text (or read from PDF, DB, etc.) + sample_application_data = """ + Mortgage Application Data: + Applicant Name: Jane Doe + DOB: 02/14/1985 + SSN: 987-65-4321 + Annual Income: $95,000 + Credit Score: 690 + Outstanding Debt: $40,000 + Property Appraisal: $300,000 + Loan Amount Request: $270,000 + Employment: 3+ years at current employer + Bank Statements & Tax Returns: Provided for the last year + Extra Notes: Some minor late payments on credit cards in 2020. + """ + + # Initialize the swarm + swarm = MortgageUnderwritingSwarm( + save_directory="./autosave_results" + ) + + # 1) Convert PDF to text if needed + # pdf_text = swarm.pdf_to_text("path_to_some_pdf.pdf") + # Or convert multiple PDFs in batch + # texts_from_pdfs = swarm.convert_pdfs_to_texts(["file1.pdf", "file2.pdf"]) + + # 2) Process a single application + final_pdf_path = swarm.run( + application_data=sample_application_data, + return_format="pdf", # or "json" + output_filename="JaneDoe_UnderwritingDecision", + ) + print(f"PDF generated at: {final_pdf_path}") + + # 3) Process multiple applications in a batch + # multiple_apps = [sample_application_data, sample_application_data] # Pretend we have 2 + # batch_results = swarm.runs_in_batch( + # multiple_apps, + # return_format="json" + # ) + # Each item in batch_results will be a JSON dict if return_format="json". + # print("\nBatch Processing Results (JSON):") + # for result in batch_results: + # print(json.dumps(result, indent=2)) diff --git a/new_features_examples/multi_agent_router_example.py b/examples/multi_agent_router_example.py similarity index 100% rename from new_features_examples/multi_agent_router_example.py rename to examples/multi_agent_router_example.py diff --git a/new_features_examples/multi_tool_usage_agent.py b/examples/multi_tool_usage_agent.py similarity index 100% rename from new_features_examples/multi_tool_usage_agent.py rename to examples/multi_tool_usage_agent.py diff --git a/new_features_examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_spreadsheet.py b/examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_spreadsheet.py similarity index 100% rename from new_features_examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_spreadsheet.py rename to examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_spreadsheet.py diff --git a/new_features_examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_swarm_spreadsheet.csv b/examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_swarm_spreadsheet.csv similarity index 100% rename from new_features_examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_swarm_spreadsheet.csv rename to examples/new_spreadsheet_swarm_examples/crypto_tax_swarm/crypto_tax_swarm_spreadsheet.csv diff --git a/new_features_examples/new_spreadsheet_swarm_examples/financial_analysis/swarm.csv b/examples/new_spreadsheet_swarm_examples/financial_analysis/swarm.csv similarity index 100% rename from new_features_examples/new_spreadsheet_swarm_examples/financial_analysis/swarm.csv rename to examples/new_spreadsheet_swarm_examples/financial_analysis/swarm.csv diff --git a/new_features_examples/new_spreadsheet_swarm_examples/financial_analysis/swarm_csv.py b/examples/new_spreadsheet_swarm_examples/financial_analysis/swarm_csv.py similarity index 100% rename from new_features_examples/new_spreadsheet_swarm_examples/financial_analysis/swarm_csv.py rename to examples/new_spreadsheet_swarm_examples/financial_analysis/swarm_csv.py diff --git a/examples/o3_mini.py b/examples/o3_mini.py new file mode 100644 index 00000000..7f8aff8f --- /dev/null +++ b/examples/o3_mini.py @@ -0,0 +1,9 @@ +from swarms import Agent + +Agent( + agent_name="Stock-Analysis-Agent", + model_name="groq/deepseek-r1-distill-llama-70b", + max_loops="auto", + interactive=True, + streaming_on=False, +).run("What are the best ways to analyze macroeconomic data?") diff --git a/new_features_examples/ollama_demo.py b/examples/ollama_demo.py similarity index 100% rename from new_features_examples/ollama_demo.py rename to examples/ollama_demo.py diff --git a/examples/onboard/agents.yaml b/examples/onboard/agents.yaml new file mode 100644 index 00000000..23a7c714 --- /dev/null +++ b/examples/onboard/agents.yaml @@ -0,0 +1,52 @@ +agents: + - agent_name: "Financial-Analysis-Agent" + model: + model_name: "gpt-4" + temperature: 0.1 + max_tokens: 2000 + system_prompt: "financial_agent_sys_prompt" + max_loops: 1 + autosave: true + dashboard: false + verbose: true + dynamic_temperature_enabled: true + saved_state_path: "finance_agent.json" + user_name: "swarms_corp" + retry_attempts: 1 + context_length: 4000 + return_step_meta: false + output_type: "str" + task: "How can I establish a ROTH IRA to buy stocks and get a tax break?" + + - agent_name: "Stock-Analysis-Agent" + model: + model_name: "gpt-4" + temperature: 0.2 + max_tokens: 1500 + system_prompt: "stock_agent_sys_prompt" + max_loops: 2 + autosave: true + dashboard: false + verbose: true + dynamic_temperature_enabled: false + saved_state_path: "stock_agent.json" + user_name: "stock_user" + retry_attempts: 3 + context_length: 4000 + return_step_meta: true + output_type: "json" + task: "What is the best strategy for long-term stock investment?" + +swarm_architecture: + name: "Financial-Advisory-Swarm" + description: "A swarm of agents working together to provide comprehensive financial advice" + swarm_type: "SequentialWorkflow" + max_loops: 2 + task: "Analyze ROTH IRA setup requirements and provide a comprehensive long-term investment strategy" + autosave: true + return_json: false + rules: | + 1. Financial-Analysis-Agent first explains ROTH IRA setup process and requirements + 2. Stock-Analysis-Agent then provides specific investment strategies suitable for ROTH IRA + 3. Both agents should ensure advice is tax-aware and compliant with retirement account regulations + 4. Focus on practical, actionable steps the user can take \ No newline at end of file diff --git a/examples/onboard/onboard-basic.py b/examples/onboard/onboard-basic.py new file mode 100644 index 00000000..96df9f35 --- /dev/null +++ b/examples/onboard/onboard-basic.py @@ -0,0 +1,37 @@ +import os + +from dotenv import load_dotenv +from loguru import logger +from swarm_models import OpenAIChat + +from swarms.agents.create_agents_from_yaml import ( + create_agents_from_yaml, +) + +# Load environment variables +load_dotenv() + +# Path to your YAML file +yaml_file = "agents.yaml" + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("OPENAI_API_KEY") + +# Create an instance of the OpenAIChat class +model = OpenAIChat( + openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 +) + +print(model) + +try: + # Create agents and run tasks (using 'both' to return agents and task results) + task_results = create_agents_from_yaml( + model=model, yaml_file=yaml_file, return_type="agents" + ) + + print(task_results) + logger.info(f"Results from agents: {task_results}") +except Exception as e: + logger.error(f"An error occurred: {e}") + print(e) diff --git a/new_features_examples/openai_assistant_wrapper.py b/examples/openai_assistant_wrapper.py similarity index 100% rename from new_features_examples/openai_assistant_wrapper.py rename to examples/openai_assistant_wrapper.py diff --git a/new_features_examples/persistent_legal_agent.py b/examples/persistent_legal_agent.py similarity index 100% rename from new_features_examples/persistent_legal_agent.py rename to examples/persistent_legal_agent.py diff --git a/new_features_examples/privacy_building.py b/examples/privacy_building.py similarity index 100% rename from new_features_examples/privacy_building.py rename to examples/privacy_building.py diff --git a/new_features_examples/qdrant_agent.py b/examples/qdrant_agent.py similarity index 100% rename from new_features_examples/qdrant_agent.py rename to examples/qdrant_agent.py diff --git a/new_features_examples/real_estate_agent.py b/examples/real_estate_agent.py similarity index 100% rename from new_features_examples/real_estate_agent.py rename to examples/real_estate_agent.py diff --git a/examples/reasoning_duo.py b/examples/reasoning_duo.py new file mode 100644 index 00000000..28c00238 --- /dev/null +++ b/examples/reasoning_duo.py @@ -0,0 +1,156 @@ +import os +from swarms import Agent +from dotenv import load_dotenv + +from swarm_models import OpenAIChat + +load_dotenv() + + +model = OpenAIChat( + model_name="deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free", + openai_api_key=os.getenv("TOGETHER_API_KEY"), + base_url="https://api.together.xyz/v1", +) + +# Define system prompts for reasoning agents +THINKING_AGENT_PROMPT = """You are a sophisticated analytical and strategic thinking agent focused on deep problem analysis and solution design. + +Your core capabilities include: +1. Comprehensive Problem Analysis + - Break down complex problems into constituent elements + - Map relationships and dependencies between components + - Identify root causes and underlying patterns + - Consider historical context and precedents + +2. Multi-Perspective Evaluation + - Examine issues from multiple stakeholder viewpoints + - Consider short-term and long-term implications + - Evaluate social, economic, technical, and ethical dimensions + - Challenge assumptions and identify potential biases + +3. Risk Assessment and Mitigation + - Conduct thorough risk analysis across scenarios + - Identify potential failure modes and edge cases + - Develop contingency plans and mitigation strategies + - Assess probability and impact of various outcomes + +4. Strategic Solution Development + - Generate multiple solution approaches + - Evaluate trade-offs between different strategies + - Consider resource constraints and limitations + - Design scalable and sustainable solutions + +5. Decision Framework Creation + - Establish clear evaluation criteria + - Weight competing priorities appropriately + - Create structured decision matrices + - Document reasoning and key decision factors + +6. Systems Thinking + - Map interconnections between system elements + - Identify feedback loops and cascade effects + - Consider emergent properties and behaviors + - Account for dynamic system evolution + +Your output should always include: +- Clear articulation of your analytical process +- Key assumptions and their justification +- Potential risks and mitigation strategies +- Multiple solution options with pros/cons +- Specific recommendations with supporting rationale +- Areas of uncertainty requiring further investigation + +Focus on developing robust, well-reasoned strategies that account for complexity while remaining practical and actionable.""" + +ACTION_AGENT_PROMPT = """You are an advanced implementation and execution agent focused on turning strategic plans into concrete results. + +Your core capabilities include: +1. Strategic Implementation Planning + - Break down high-level strategies into specific actions + - Create detailed project roadmaps and timelines + - Identify critical path dependencies + - Establish clear milestones and success metrics + - Design feedback and monitoring mechanisms + +2. Resource Optimization + - Assess resource requirements and constraints + - Optimize resource allocation and scheduling + - Identify efficiency opportunities + - Plan for scalability and flexibility + - Manage competing priorities effectively + +3. Execution Management + - Develop detailed implementation procedures + - Create clear operational guidelines + - Establish quality control measures + - Design progress tracking systems + - Build in review and adjustment points + +4. Risk Management + - Implement specific risk mitigation measures + - Create early warning systems + - Develop contingency procedures + - Establish fallback positions + - Monitor risk indicators + +5. Stakeholder Management + - Identify key stakeholders and their needs + - Create communication plans + - Establish feedback mechanisms + - Manage expectations effectively + - Build support and buy-in + +6. Continuous Improvement + - Monitor implementation effectiveness + - Gather and analyze performance data + - Identify improvement opportunities + - Implement iterative enhancements + - Document lessons learned + +Your output should always include: +- Detailed action plans with specific steps +- Resource requirements and allocation plans +- Timeline with key milestones +- Success metrics and monitoring approach +- Risk mitigation procedures +- Communication and stakeholder management plans +- Quality control measures +- Feedback and adjustment mechanisms + +Focus on practical, efficient, and effective implementation while maintaining high quality standards and achieving desired outcomes.""" + +# Initialize the thinking agent +thinking_agent = Agent( + agent_name="Strategic-Thinker", + agent_description="Deep analysis and strategic planning agent", + system_prompt=THINKING_AGENT_PROMPT, + max_loops=1, + llm=model, + dynamic_temperature_enabled=True, +) + +# Initialize the action agent +action_agent = Agent( + agent_name="Action-Executor", + agent_description="Practical implementation and execution agent", + system_prompt=ACTION_AGENT_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, +) + + +def run_reasoning_duo(task: str): + # Step 1: Thinking Agent + thinking_result = thinking_agent.run(task) + + # Step 2: Action Agent + action_result = action_agent.run( + f"From {thinking_agent.agent_name}: {thinking_result}" + ) + return action_result + + +if __name__ == "__main__": + run_reasoning_duo("What is the best way to invest $1000?") diff --git a/new_features_examples/sequential_workflow/sequential_worflow_test 2.py b/examples/sequential_workflow/sequential_worflow_test 2.py similarity index 100% rename from new_features_examples/sequential_workflow/sequential_worflow_test 2.py rename to examples/sequential_workflow/sequential_worflow_test 2.py diff --git a/new_features_examples/sequential_workflow/sequential_worflow_test.py b/examples/sequential_workflow/sequential_worflow_test.py similarity index 100% rename from new_features_examples/sequential_workflow/sequential_worflow_test.py rename to examples/sequential_workflow/sequential_worflow_test.py diff --git a/new_features_examples/sequential_workflow/sequential_workflow 2.py b/examples/sequential_workflow/sequential_workflow 2.py similarity index 100% rename from new_features_examples/sequential_workflow/sequential_workflow 2.py rename to examples/sequential_workflow/sequential_workflow 2.py diff --git a/new_features_examples/sequential_workflow/sequential_workflow.py b/examples/sequential_workflow/sequential_workflow.py similarity index 100% rename from new_features_examples/sequential_workflow/sequential_workflow.py rename to examples/sequential_workflow/sequential_workflow.py diff --git a/new_features_examples/solana_agent.py b/examples/solana_agent.py similarity index 100% rename from new_features_examples/solana_agent.py rename to examples/solana_agent.py diff --git a/new_features_examples/solana_tool/solana_tool.py b/examples/solana_tool/solana_tool.py similarity index 100% rename from new_features_examples/solana_tool/solana_tool.py rename to examples/solana_tool/solana_tool.py diff --git a/new_features_examples/solana_tool/solana_tool_test.py b/examples/solana_tool/solana_tool_test.py similarity index 100% rename from new_features_examples/solana_tool/solana_tool_test.py rename to examples/solana_tool/solana_tool_test.py diff --git a/new_features_examples/spike/agent_rearrange_test.py b/examples/spike/agent_rearrange_test.py similarity index 100% rename from new_features_examples/spike/agent_rearrange_test.py rename to examples/spike/agent_rearrange_test.py diff --git a/new_features_examples/spike/function_caller_example.py b/examples/spike/function_caller_example.py similarity index 100% rename from new_features_examples/spike/function_caller_example.py rename to examples/spike/function_caller_example.py diff --git a/new_features_examples/spike/memory.py b/examples/spike/memory.py similarity index 100% rename from new_features_examples/spike/memory.py rename to examples/spike/memory.py diff --git a/new_features_examples/spike/spike.zip b/examples/spike/spike.zip similarity index 100% rename from new_features_examples/spike/spike.zip rename to examples/spike/spike.zip diff --git a/new_features_examples/spike/test.py b/examples/spike/test.py similarity index 100% rename from new_features_examples/spike/test.py rename to examples/spike/test.py diff --git a/examples/swarm_eval_deepseek.py b/examples/swarm_eval_deepseek.py new file mode 100644 index 00000000..ac4a9408 --- /dev/null +++ b/examples/swarm_eval_deepseek.py @@ -0,0 +1,170 @@ +from loguru import logger +from swarms.structs.swarm_eval import ( + SwarmEvaluator, + PRESET_DATASETS, +) + +import os +from swarms import Agent +from dotenv import load_dotenv + +from swarm_models import OpenAIChat + +load_dotenv() + + +model = OpenAIChat( + model_name="deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free", + openai_api_key=os.getenv("TOGETHER_API_KEY"), + base_url="https://api.together.xyz/v1", +) + +# Define system prompts for reasoning agents +THINKING_AGENT_PROMPT = """You are a sophisticated analytical and strategic thinking agent focused on deep problem analysis and solution design. + +Your core capabilities include: +1. Comprehensive Problem Analysis + - Break down complex problems into constituent elements + - Map relationships and dependencies between components + - Identify root causes and underlying patterns + - Consider historical context and precedents + +2. Multi-Perspective Evaluation + - Examine issues from multiple stakeholder viewpoints + - Consider short-term and long-term implications + - Evaluate social, economic, technical, and ethical dimensions + - Challenge assumptions and identify potential biases + +3. Risk Assessment and Mitigation + - Conduct thorough risk analysis across scenarios + - Identify potential failure modes and edge cases + - Develop contingency plans and mitigation strategies + - Assess probability and impact of various outcomes + +4. Strategic Solution Development + - Generate multiple solution approaches + - Evaluate trade-offs between different strategies + - Consider resource constraints and limitations + - Design scalable and sustainable solutions + +5. Decision Framework Creation + - Establish clear evaluation criteria + - Weight competing priorities appropriately + - Create structured decision matrices + - Document reasoning and key decision factors + +6. Systems Thinking + - Map interconnections between system elements + - Identify feedback loops and cascade effects + - Consider emergent properties and behaviors + - Account for dynamic system evolution + +Your output should always include: +- Clear articulation of your analytical process +- Key assumptions and their justification +- Potential risks and mitigation strategies +- Multiple solution options with pros/cons +- Specific recommendations with supporting rationale +- Areas of uncertainty requiring further investigation + +Focus on developing robust, well-reasoned strategies that account for complexity while remaining practical and actionable.""" + +ACTION_AGENT_PROMPT = """You are an advanced implementation and execution agent focused on turning strategic plans into concrete results. + +Your core capabilities include: +1. Strategic Implementation Planning + - Break down high-level strategies into specific actions + - Create detailed project roadmaps and timelines + - Identify critical path dependencies + - Establish clear milestones and success metrics + - Design feedback and monitoring mechanisms + +2. Resource Optimization + - Assess resource requirements and constraints + - Optimize resource allocation and scheduling + - Identify efficiency opportunities + - Plan for scalability and flexibility + - Manage competing priorities effectively + +3. Execution Management + - Develop detailed implementation procedures + - Create clear operational guidelines + - Establish quality control measures + - Design progress tracking systems + - Build in review and adjustment points + +4. Risk Management + - Implement specific risk mitigation measures + - Create early warning systems + - Develop contingency procedures + - Establish fallback positions + - Monitor risk indicators + +5. Stakeholder Management + - Identify key stakeholders and their needs + - Create communication plans + - Establish feedback mechanisms + - Manage expectations effectively + - Build support and buy-in + +6. Continuous Improvement + - Monitor implementation effectiveness + - Gather and analyze performance data + - Identify improvement opportunities + - Implement iterative enhancements + - Document lessons learned + +Your output should always include: +- Detailed action plans with specific steps +- Resource requirements and allocation plans +- Timeline with key milestones +- Success metrics and monitoring approach +- Risk mitigation procedures +- Communication and stakeholder management plans +- Quality control measures +- Feedback and adjustment mechanisms + +Focus on practical, efficient, and effective implementation while maintaining high quality standards and achieving desired outcomes.""" + +# Initialize the thinking agent +thinking_agent = Agent( + agent_name="Strategic-Thinker", + agent_description="Deep analysis and strategic planning agent", + system_prompt=THINKING_AGENT_PROMPT, + max_loops=1, + llm=model, + dynamic_temperature_enabled=True, +) + + +class DeepSeekSwarm: + def __init__(self): + self.thinking_agent = thinking_agent + + def run(self, task: str): + first_one = self.thinking_agent.run(task) + + return self.thinking_agent.run(first_one) + + +if __name__ == "__main__": + # Initialize the swarm (replace with your actual multi-agent system) + swarm = DeepSeekSwarm() + + # Initialize the evaluator with the swarm instance + evaluator = SwarmEvaluator(swarm) + + logger.info("Starting evaluation for dataset: gsm8k") + + # For demonstration, we use 4 concurrent workers, show progress, and save results. + results = evaluator.evaluate( + "gsm8k", + split="train", + config=PRESET_DATASETS["gsm8k"], + max_workers=os.cpu_count(), + max_retries=3, + show_progress=True, + output_file="gsm8k_results.txt", + ) + + logger.info(f"Results for gsm8k: {results}") diff --git a/new_features_examples/swarm_router_example.py b/examples/swarm_router_example.py similarity index 100% rename from new_features_examples/swarm_router_example.py rename to examples/swarm_router_example.py diff --git a/new_features_examples/swarmarrange/rearrange_test.py b/examples/swarmarrange/rearrange_test.py similarity index 100% rename from new_features_examples/swarmarrange/rearrange_test.py rename to examples/swarmarrange/rearrange_test.py diff --git a/new_features_examples/swarmarrange/swarm_arange_demo 2.py b/examples/swarmarrange/swarm_arange_demo 2.py similarity index 100% rename from new_features_examples/swarmarrange/swarm_arange_demo 2.py rename to examples/swarmarrange/swarm_arange_demo 2.py diff --git a/new_features_examples/swarmarrange/swarm_arange_demo.py b/examples/swarmarrange/swarm_arange_demo.py similarity index 100% rename from new_features_examples/swarmarrange/swarm_arange_demo.py rename to examples/swarmarrange/swarm_arange_demo.py diff --git a/new_features_examples/swarms_claude_example.py b/examples/swarms_claude_example.py similarity index 100% rename from new_features_examples/swarms_claude_example.py rename to examples/swarms_claude_example.py diff --git a/examples/together_deepseek_agent.py b/examples/together_deepseek_agent.py new file mode 100644 index 00000000..8a91eebb --- /dev/null +++ b/examples/together_deepseek_agent.py @@ -0,0 +1,106 @@ +from swarms import Agent +from dotenv import load_dotenv +from swarms_tools import fetch_htx_data, coin_gecko_coin_api + +load_dotenv() + +CRYPTO_ANALYST_SYSTEM_PROMPT = """ +You are an expert cryptocurrency financial analyst with deep expertise in: +1. Technical Analysis + - Chart patterns and indicators (RSI, MACD, Bollinger Bands) + - Volume analysis and market momentum + - Support and resistance levels + - Trend analysis and price action + +2. Fundamental Analysis + - Tokenomics evaluation + - Network metrics (TVL, daily active users, transaction volume) + - Protocol revenue and growth metrics + - Market capitalization analysis + - Token utility and use cases + +3. Market Analysis + - Market sentiment analysis + - Correlation with broader crypto market + - Impact of macro events + - Institutional adoption metrics + - DeFi and NFT market analysis + +4. Risk Assessment + - Volatility metrics + - Liquidity analysis + - Smart contract risks + - Regulatory considerations + - Exchange exposure risks + +5. Data Analysis Methods + - On-chain metrics analysis + - Whale wallet tracking + - Exchange inflow/outflow + - Mining/Staking statistics + - Network health indicators + +When analyzing crypto assets, always: +1. Start with a comprehensive market overview +2. Examine both on-chain and off-chain metrics +3. Consider multiple timeframes (short, medium, long-term) +4. Evaluate risk-reward ratios +5. Assess market sentiment and momentum +6. Consider regulatory and security factors +7. Analyze correlations with BTC, ETH, and traditional markets +8. Examine liquidity and volume profiles +9. Review recent protocol developments and updates +10. Consider macro economic factors + +Format your analysis with: +- Clear section headings +- Relevant metrics and data points +- Risk warnings and disclaimers +- Price action analysis +- Market sentiment summary +- Technical indicators +- Fundamental factors +- Clear recommendations with rationale + +Remember to: +- Always provide data-driven insights +- Include both bullish and bearish scenarios +- Highlight key risk factors +- Consider market cycles and seasonality +- Maintain objectivity in analysis +- Cite sources for data and claims +- Update analysis based on new market conditions +""" + +# Initialize the crypto analysis agent +agent = Agent( + agent_name="Crypto-Analysis-Expert", + agent_description="Expert cryptocurrency financial analyst and market researcher", + system_prompt=CRYPTO_ANALYST_SYSTEM_PROMPT, + max_loops="auto", + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="crypto_analyst", + output_type="str", + interactive=True, +) + +print(fetch_htx_data("sol")) +print(coin_gecko_coin_api("solana")) + +# Example usage +agent.run( + f""" + Analyze the current state of Solana (SOL), including: + 1. Technical analysis of price action + 2. On-chain metrics and network health + 3. Recent protocol developments + 4. Market sentiment + 5. Risk factors + Please provide a comprehensive analysis with data-driven insights. + + # Solana CoinGecko Data + Real-tim data from Solana CoinGecko: \n {coin_gecko_coin_api("solana")} + + """ +) diff --git a/examples/tools_examples/dex_screener.py b/examples/tools_examples/dex_screener.py new file mode 100644 index 00000000..5c1bfbc1 --- /dev/null +++ b/examples/tools_examples/dex_screener.py @@ -0,0 +1,23 @@ +from swarms import Agent + +from swarms_tools.finance.dex_screener import ( + fetch_dex_screener_profiles, +) + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + interactive=False, +) + +token_profiles = fetch_dex_screener_profiles() +prompt = f"Using data from DexScreener, analyze the latest tokens and provide a detailed analysis with top 5 tokens based on their potential, considering both their profiles and recent boosts. The token profiles are sourced from DexScreener's token profiles API, while the token boosts are sourced from DexScreener's latest token boosts API. {str(token_profiles)}" +agent.run(prompt) diff --git a/examples/tools_examples/financial_news_agent.py b/examples/tools_examples/financial_news_agent.py new file mode 100644 index 00000000..c949a4fe --- /dev/null +++ b/examples/tools_examples/financial_news_agent.py @@ -0,0 +1,30 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + yahoo_finance_api, +) + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, + tools=[yahoo_finance_api], +) + +agent.run("Analyze the latest metrics for nvidia") +# Less than 30 lines of code.... diff --git a/examples/tools_examples/swarms_tool_example_simple.py b/examples/tools_examples/swarms_tool_example_simple.py new file mode 100644 index 00000000..96311883 --- /dev/null +++ b/examples/tools_examples/swarms_tool_example_simple.py @@ -0,0 +1,30 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + coin_gecko_coin_api, + fetch_htx_data, +) + + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, +) + +agent.run( + f"Analyze the $swarms token on HTX with data: {fetch_htx_data('swarms')}. Additionally, consider the following CoinGecko data: {coin_gecko_coin_api('swarms')}" +) diff --git a/examples/tools_examples/swarms_tools_example.py b/examples/tools_examples/swarms_tools_example.py new file mode 100644 index 00000000..9171bb30 --- /dev/null +++ b/examples/tools_examples/swarms_tools_example.py @@ -0,0 +1,31 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms_tools import ( + fetch_stock_news, + coin_gecko_coin_api, + fetch_htx_data, +) + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=3, + context_length=8192, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" "yaml" and + auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task + max_tokens=4000, # max output tokens + saved_state_path="agent_00.json", + interactive=False, + tools=[fetch_stock_news, coin_gecko_coin_api, fetch_htx_data], +) + +agent.run("Analyze the $swarms token on htx") diff --git a/new_features_examples/unique_swarms_examples.py b/examples/unique_swarms_examples.py similarity index 100% rename from new_features_examples/unique_swarms_examples.py rename to examples/unique_swarms_examples.py diff --git a/new_features_examples/voice.py b/examples/voice.py similarity index 100% rename from new_features_examples/voice.py rename to examples/voice.py diff --git a/majority_voting_example.py b/majority_voting_example.py new file mode 100644 index 00000000..047b4878 --- /dev/null +++ b/majority_voting_example.py @@ -0,0 +1,52 @@ +from swarms import Agent, MajorityVoting + +# Initialize multiple agents with different specialties +agents = [ + Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor focused on market analysis", + system_prompt="You are a financial advisor specializing in market analysis and investment opportunities.", + max_loops=1, + model_name="gpt-4o" + ), + Agent( + agent_name="Risk-Assessment-Agent", + agent_description="Risk analysis and portfolio management expert", + system_prompt="You are a risk assessment expert focused on evaluating investment risks and portfolio diversification.", + max_loops=1, + model_name="gpt-4o" + ), + Agent( + agent_name="Tech-Investment-Agent", + agent_description="Technology sector investment specialist", + system_prompt="You are a technology investment specialist focused on AI, emerging tech, and growth opportunities.", + max_loops=1, + model_name="gpt-4o" + ) +] + + +consensus_agent = Agent( + agent_name="Consensus-Agent", + agent_description="Consensus agent focused on analyzing investment advice", + system_prompt="You are a consensus agent focused on analyzing investment advice and providing a final answer.", + max_loops=1, + model_name="gpt-4o" +) + +# Create majority voting system +majority_voting = MajorityVoting( + name="Investment-Advisory-System", + description="Multi-agent system for investment advice", + agents=agents, + verbose=True, + consensus_agent=consensus_agent +) + +# Run the analysis with majority voting +result = majority_voting.run( + task="Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", + correct_answer="" # Optional evaluation metric +) + +print(result) diff --git a/pyproject.toml b/pyproject.toml index 0791dfdd..67d9de10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "6.8.8" +version = "7.1.7" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] @@ -59,10 +59,10 @@ classifiers = [ python = ">=3.10,<4.0" # torch = ">=2.1.1,<3.0" # transformers = ">= 4.39.0, <5.0.0" +setuptools = "*" asyncio = ">=3.4.3,<4.0" toml = "*" pypdf = "5.1.0" -swarm-models = "*" loguru = "*" pydantic = "*" tenacity = "*" @@ -73,32 +73,18 @@ docstring_parser = "0.16" # TODO: tiktoken = "*" networkx = "*" aiofiles = "*" -clusterops = "*" # chromadb = "*" rich = "*" -pandas = "*" +numpy = "*" +litellm = "*" # sentence-transformers = "*" -# [tool.poetry.extras] -# # Extra for NLP-related functionalities -# nlp = [ -# "torch>=2.1.1,<3.0", -# "transformers>=4.39.0,<5.0.0", -# "sentence-transformers", -# "swarm-models", -# ] - -# # Extra for database-related functionalities -# db = ["chromadb"] - # # All optional dependencies for convenience # all = [ -# "torch>=2.1.1,<3.0", -# "transformers>=4.39.0,<5.0.0", -# "sentence-transformers", -# "chromadb", -# "swarm-models" +# "torch", +# "transformers", +# "litellm" # ] @@ -109,7 +95,7 @@ swarms = "swarms.cli.main:main" [tool.poetry.group.lint.dependencies] black = ">=23.1,<25.0" -ruff = ">=0.5.1,<0.9.2" +ruff = ">=0.5.1,<0.9.3" types-toml = "^0.10.8.1" types-pytz = ">=2023.3,<2025.0" types-chardet = "^5.0.4.6" diff --git a/quant_crypto_swarm.py b/quant_crypto_swarm.py new file mode 100644 index 00000000..f12a1ee3 --- /dev/null +++ b/quant_crypto_swarm.py @@ -0,0 +1,156 @@ +import asyncio +from swarms import Agent +from dotenv import load_dotenv +from swarms_tools import coin_gecko_coin_api + +load_dotenv() + +CRYPTO_ANALYST_SYSTEM_PROMPT = """ +You are an expert cryptocurrency financial analyst with deep expertise in: +1. Technical Analysis + - Chart patterns and indicators (RSI, MACD, Bollinger Bands) + - Volume analysis and market momentum + - Support and resistance levels + - Trend analysis and price action + +2. Fundamental Analysis + - Tokenomics evaluation + - Network metrics (TVL, daily active users, transaction volume) + - Protocol revenue and growth metrics + - Market capitalization analysis + - Token utility and use cases + +3. Market Analysis + - Market sentiment analysis + - Correlation with broader crypto market + - Impact of macro events + - Institutional adoption metrics + - DeFi and NFT market analysis + +4. Risk Assessment + - Volatility metrics + - Liquidity analysis + - Smart contract risks + - Regulatory considerations + - Exchange exposure risks + +5. Data Analysis Methods + - On-chain metrics analysis + - Whale wallet tracking + - Exchange inflow/outflow + - Mining/Staking statistics + - Network health indicators + +When analyzing crypto assets, always: +1. Start with a comprehensive market overview +2. Examine both on-chain and off-chain metrics +3. Consider multiple timeframes (short, medium, long-term) +4. Evaluate risk-reward ratios +5. Assess market sentiment and momentum +6. Consider regulatory and security factors +7. Analyze correlations with BTC, ETH, and traditional markets +8. Examine liquidity and volume profiles +9. Review recent protocol developments and updates +10. Consider macro economic factors + +Format your analysis with: +- Clear section headings +- Relevant metrics and data points +- Risk warnings and disclaimers +- Price action analysis +- Market sentiment summary +- Technical indicators +- Fundamental factors +- Clear recommendations with rationale + +Remember to: +- Always provide data-driven insights +- Include both bullish and bearish scenarios +- Highlight key risk factors +- Consider market cycles and seasonality +- Maintain objectivity in analysis +- Cite sources for data and claims +- Update analysis based on new market conditions +""" + +# Initialize multiple crypto analysis agents with different specialties +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Expert in technical analysis and chart patterns", + system_prompt=CRYPTO_ANALYST_SYSTEM_PROMPT, + max_loops=1, + model_name="gpt-4o", + dynamic_temperature_enabled=True, + user_name="tech_analyst", + output_type="str", +) + +# List of coins to analyze +coins = ["solana", "raydium", "aixbt", "jupiter"] + +# Dictionary to store analyses +coin_analyses = {} + + +async def analyze_coin(coin, technical_analyst): + print(f"\n=== Technical Analysis for {coin.upper()} ===\n") + + # Fetch market data + gecko_data = coin_gecko_coin_api(coin) + + # Get technical analysis + analysis = await technical_analyst.arun( + f"""Analyze {coin}'s technical indicators and price action using this data: + CoinGecko Data: {gecko_data} + Focus on: + - Chart patterns and trends + - Support/resistance levels + - Momentum indicators + - Price targets and risk levels + - Overall technical strength rating (1-10) + + End with a clear technical strength score out of 10. + """ + ) + return coin, analysis + + +async def main(): + # Create tasks for concurrent execution + tasks = [analyze_coin(coin, technical_analyst) for coin in coins] + + # Execute all analyses concurrently + results = await asyncio.gather(*tasks) + + # Store results in coin_analyses + for coin, analysis in results: + coin_analyses[coin] = analysis + + # Have technical analyst compare and recommend best investment + consensus = await technical_analyst.arun( + f"""Based on your technical analysis of these coins: + + Solana Analysis: + {coin_analyses['solana']} + + Raydium Analysis: + {coin_analyses['raydium']} + + Jupiter Analysis: + {coin_analyses['jupiter']} + + AIXBT Analysis: + {coin_analyses['aixbt']} + + Please: + 1. Rank the coins from strongest to weakest technical setup + 2. Identify which coin has the best risk/reward ratio + 3. Make a clear recommendation on which coin is the best investment opportunity and why + 4. Note any key risks or concerns with the recommended coin + """ + ) + return consensus + + +# Run the async main function +consensus = asyncio.run(main()) diff --git a/requirements.txt b/requirements.txt index 10c9fa3e..10178b92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,5 +21,4 @@ types-chardet>=5.0.4.6 mypy-protobuf>=3.0.0 pytest>=8.1.1 networkx -aiofiles -clusterops \ No newline at end of file +aiofiles \ No newline at end of file diff --git a/scripts/Dockerfile b/scripts/Dockerfile deleted file mode 100644 index 91b75cae..00000000 --- a/scripts/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# Use an official CUDA runtime as a parent image -FROM nvidia/cuda:11.4.2-runtime-ubuntu20.04 - -# Set the working directory in the container to /app -WORKDIR /app - -# Copy the current directory contents into the container at /app -COPY . /app - -# Install any needed packages specified in requirements.txt -RUN apt-get update && apt-get install -y \ - python3-pip \ - && rm -rf /var/lib/apt/lists/* -RUN pip3 install --no-cache-dir -r requirements.txt - -# Make port 80 available to the world outside this container -EXPOSE 80 - -# Define environment variable -# ENV NAME World - -# Run app.py when the container launches -CMD ["python3", "example.py"] \ No newline at end of file diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py deleted file mode 100644 index d9536451..00000000 --- a/scripts/auto_tests_docs/auto_docs.py +++ /dev/null @@ -1,82 +0,0 @@ -###### VERISON2 -import inspect -import os -import threading - -from dotenv import load_dotenv - -from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP -from swarm_models import OpenAIChat -from swarms.structs.majority_voting import MajorityVoting -from swarms.structs.stackoverflow_swarm import StackOverflowSwarm -from swarms.structs.task_queue_base import TaskQueueBase - -########## - - -#################### -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - -model = OpenAIChat( - openai_api_key=api_key, - max_tokens=4000, -) - - -def process_documentation(cls): - """ - Process the documentation for a given class using OpenAI model and save it in a Markdown file. - """ - doc = inspect.getdoc(cls) - source = inspect.getsource(cls) - input_content = ( - "Class Name:" - f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource" - f" Code:\n{source}" - ) - - # Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content) - processed_content = model( - DOCUMENTATION_WRITER_SOP(input_content, "swarms.structs") - ) - - # doc_content = f"# {cls.__name__}\n\n{processed_content}\n" - doc_content = f"{processed_content}\n" - - # Create the directory if it doesn't exist - dir_path = "docs/swarms/tokenizers" - os.makedirs(dir_path, exist_ok=True) - - # Write the processed documentation to a Markdown file - file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md") - with open(file_path, "w") as file: - file.write(doc_content) - - print(f"Documentation generated for {cls.__name__}.") - - -def main(): - classes = [ - MajorityVoting, - StackOverflowSwarm, - TaskQueueBase, - ] - threads = [] - for cls in classes: - thread = threading.Thread( - target=process_documentation, args=(cls,) - ) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - print("Documentation generated in 'swarms.structs' directory.") - - -if __name__ == "__main__": - main() diff --git a/scripts/auto_tests_docs/auto_docs_functions.py b/scripts/auto_tests_docs/auto_docs_functions.py deleted file mode 100644 index e4df344f..00000000 --- a/scripts/auto_tests_docs/auto_docs_functions.py +++ /dev/null @@ -1,77 +0,0 @@ -import inspect -import os -import sys -import threading - -from dotenv import load_dotenv - -from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP -from swarm_models import OpenAIChat - -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - -model = OpenAIChat( - model_name="gpt-4", - openai_api_key=api_key, - max_tokens=4000, -) - - -def process_documentation(item): - """ - Process the documentation for a given function using OpenAI model and save it in a Markdown file. - """ - doc = inspect.getdoc(item) - source = inspect.getsource(item) - input_content = ( - f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" - f" Code:\n{source}" - ) - print(input_content) - - # Process with OpenAI model - processed_content = model( - DOCUMENTATION_WRITER_SOP(input_content, "swarms.utils") - ) - - doc_content = f"# {item.__name__}\n\n{processed_content}\n" - - # Create the directory if it doesn't exist - dir_path = "docs/swarms/utils" - os.makedirs(dir_path, exist_ok=True) - - # Write the processed documentation to a Markdown file - file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md") - with open(file_path, "w") as file: - file.write(doc_content) - - -def main(): - # Gathering all functions from the swarms.utils module - functions = [ - obj - for name, obj in inspect.getmembers( - sys.modules["swarms.utils"] - ) - if inspect.isfunction(obj) - ] - - threads = [] - for func in functions: - thread = threading.Thread( - target=process_documentation, args=(func,) - ) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - print("Documentation generated in 'docs/swarms/utils' directory.") - - -if __name__ == "__main__": - main() diff --git a/scripts/auto_tests_docs/auto_docs_omni.py b/scripts/auto_tests_docs/auto_docs_omni.py deleted file mode 100644 index 6f5ceb0c..00000000 --- a/scripts/auto_tests_docs/auto_docs_omni.py +++ /dev/null @@ -1,84 +0,0 @@ -import inspect -import os -import threading - -from dotenv import load_dotenv - -from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP -from swarm_models import OpenAIChat - -########### - - -############### - -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - -model = OpenAIChat( - model_name="gpt-4-1106-preview", - openai_api_key=api_key, - max_tokens=4000, -) - - -def process_documentation( - item, - module: str = "swarms.structs", - docs_folder_path: str = "docs/swarms/structs", -): - """ - Process the documentation for a given class or function using OpenAI model and save it in a Python file. - """ - doc = inspect.getdoc(item) - source = inspect.getsource(item) - is_class = inspect.isclass(item) - item_type = "Class Name" if is_class else "Name" - input_content = ( - f"{item_type}:" - f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" - f" Code:\n{source}" - ) - - # Process with OpenAI model - processed_content = model( - DOCUMENTATION_WRITER_SOP(input_content, module) - ) - - doc_content = f"# {item.__name__}\n\n{processed_content}\n" - - # Create the directory if it doesn't exist - dir_path = docs_folder_path - os.makedirs(dir_path, exist_ok=True) - - # Write the processed documentation to a Python file - file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md") - with open(file_path, "w") as file: - file.write(doc_content) - - print( - f"Processed documentation for {item.__name__}. at {file_path}" - ) - - -def main(module: str = "docs/swarms/structs"): - items = [] - - threads = [] - for item in items: - thread = threading.Thread( - target=process_documentation, args=(item,) - ) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - print(f"Documentation generated in {module} directory.") - - -if __name__ == "__main__": - main() diff --git a/scripts/auto_tests_docs/auto_tests.py b/scripts/auto_tests_docs/auto_tests.py deleted file mode 100644 index 9c1ebfce..00000000 --- a/scripts/auto_tests_docs/auto_tests.py +++ /dev/null @@ -1,103 +0,0 @@ -import inspect -import os -import re -import threading - -from dotenv import load_dotenv -from swarms_memory import DictInternalMemory, DictSharedMemory - -from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT -from swarm_models import OpenAIChat - -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - -model = OpenAIChat( - openai_api_key=api_key, - max_tokens=4000, -) - -# agent = Agent( -# llm=model, -# agent_name="Unit Testing Agent", -# agent_description=( -# "This agent is responsible for generating unit tests for" -# " the swarms package." -# ), -# autosave=True, -# system_prompt=None, -# max_loops=1, -# ) - - -def extract_code_from_markdown(markdown_content: str): - """ - Extracts code blocks from a Markdown string and returns them as a single string. - - Args: - - markdown_content (str): The Markdown content as a string. - - Returns: - - str: A single string containing all the code blocks separated by newlines. - """ - # Regular expression for fenced code blocks - pattern = r"```(?:\w+\n)?(.*?)```" - matches = re.findall(pattern, markdown_content, re.DOTALL) - - # Concatenate all code blocks separated by newlines - return "\n".join(code.strip() for code in matches) - - -def create_test(cls): - """ - Process the documentation for a given class using OpenAI model and save it in a Python file. - """ - doc = inspect.getdoc(cls) - source = inspect.getsource(cls) - input_content = ( - "Class Name:" - f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource" - f" Code:\n{source}" - ) - - # Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content) - processed_content = model( - TEST_WRITER_SOP_PROMPT( - input_content, "swarms", "swarms.memory" - ) - ) - processed_content = extract_code_from_markdown(processed_content) - - doc_content = f"# {cls.__name__}\n\n{processed_content}\n" - - # Create the directory if it doesn't exist - dir_path = "tests/memory" - os.makedirs(dir_path, exist_ok=True) - - # Write the processed documentation to a Python file - file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.py") - with open(file_path, "w") as file: - file.write(doc_content) - - -def main(): - classes = [ - DictInternalMemory, - DictSharedMemory, - ] - threads = [] - for cls in classes: - thread = threading.Thread(target=create_test, args=(cls,)) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - print("Tests generated in 'tests/memory' directory.") - - -if __name__ == "__main__": - main() diff --git a/scripts/auto_tests_docs/auto_tests_functions.py b/scripts/auto_tests_docs/auto_tests_functions.py deleted file mode 100644 index c001c24a..00000000 --- a/scripts/auto_tests_docs/auto_tests_functions.py +++ /dev/null @@ -1,82 +0,0 @@ -import inspect -import os -import sys -import threading - -from dotenv import load_dotenv - -from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT -from swarm_models import OpenAIChat -from swarms.utils.parse_code import extract_code_from_markdown - -load_dotenv() - -api_key = os.getenv("OPENAI_API_KEY") - -model = OpenAIChat( - model_name="gpt-4", - openai_api_key=api_key, - max_tokens=4000, -) - - -def process_documentation(item): - """ - Process the documentation for a given function using OpenAI model and save it in a Markdown file. - """ - doc = inspect.getdoc(item) - source = inspect.getsource(item) - input_content = ( - f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" - f" Code:\n{source}" - ) - # print(input_content) - - # Process with OpenAI model - processed_content = model( - TEST_WRITER_SOP_PROMPT( - input_content, "swarms.utils", "swarms.utils" - ) - ) - processed_content = extract_code_from_markdown(processed_content) - print(processed_content) - - doc_content = f"{processed_content}" - - # Create the directory if it doesn't exist - dir_path = "tests/utils" - os.makedirs(dir_path, exist_ok=True) - - # Write the processed documentation to a Markdown file - file_path = os.path.join(dir_path, f"{item.__name__.lower()}.py") - with open(file_path, "w") as file: - file.write(doc_content) - - -def main(): - # Gathering all functions from the swarms.utils module - functions = [ - obj - for name, obj in inspect.getmembers( - sys.modules["swarms.utils"] - ) - if inspect.isfunction(obj) - ] - - threads = [] - for func in functions: - thread = threading.Thread( - target=process_documentation, args=(func,) - ) - threads.append(thread) - thread.start() - - # Wait for all threads to complete - for thread in threads: - thread.join() - - print("Tests generated in 'tests/utils' directory.") - - -if __name__ == "__main__": - main() diff --git a/scripts/auto_tests_docs/docs.py b/scripts/auto_tests_docs/docs.py deleted file mode 100644 index fd9bd276..00000000 --- a/scripts/auto_tests_docs/docs.py +++ /dev/null @@ -1,202 +0,0 @@ -def DOCUMENTATION_WRITER_SOP( - task: str, - module: str, -): - documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the {module} code below follow the outline for the {module} library, - provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words, - provide many usage examples and note this is markdown docs, create the documentation for the code to document, - put the arguments and methods in a table in markdown to make it visually seamless - - Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way, - it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc - - BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL - - ######## - Step 1: Understand the purpose and functionality of the module or framework - - Read and analyze the description provided in the documentation to understand the purpose and functionality of the module or framework. - Identify the key features, parameters, and operations performed by the module or framework. - Step 2: Provide an overview and introduction - - Start the documentation by providing a brief overview and introduction to the module or framework. - Explain the importance and relevance of the module or framework in the context of the problem it solves. - Highlight any key concepts or terminology that will be used throughout the documentation. - Step 3: Provide a class or function definition - - Provide the class or function definition for the module or framework. - Include the parameters that need to be passed to the class or function and provide a brief description of each parameter. - Specify the data types and default values for each parameter. - Step 4: Explain the functionality and usage - - Provide a detailed explanation of how the module or framework works and what it does. - Describe the steps involved in using the module or framework, including any specific requirements or considerations. - Provide code examples to demonstrate the usage of the module or framework. - Explain the expected inputs and outputs for each operation or function. - Step 5: Provide additional information and tips - - Provide any additional information or tips that may be useful for using the module or framework effectively. - Address any common issues or challenges that developers may encounter and provide recommendations or workarounds. - Step 6: Include references and resources - - Include references to any external resources or research papers that provide further information or background on the module or framework. - Provide links to relevant documentation or websites for further exploration. - Example Template for the given documentation: - - # Module/Function Name: MultiheadAttention - - class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None): - ``` - Creates a multi-head attention module for joint information representation from the different subspaces. - - Parameters: - - embed_dim (int): Total dimension of the model. - - num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads. - - dropout (float): Dropout probability on attn_output_weights. Default: 0.0 (no dropout). - - bias (bool): If specified, adds bias to input/output projection layers. Default: True. - - add_bias_kv (bool): If specified, adds bias to the key and value sequences at dim=0. Default: False. - - add_zero_attn (bool): If specified, adds a new batch of zeros to the key and value sequences at dim=1. Default: False. - - kdim (int): Total number of features for keys. Default: None (uses kdim=embed_dim). - - vdim (int): Total number of features for values. Default: None (uses vdim=embed_dim). - - batch_first (bool): If True, the input and output tensors are provided as (batch, seq, feature). Default: False. - - device (torch.device): If specified, the tensors will be moved to the specified device. - - dtype (torch.dtype): If specified, the tensors will have the specified dtype. - ``` - - def forward(query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None, average_attn_weights=True, is_causal=False): - ``` - Forward pass of the multi-head attention module. - - Parameters: - - query (Tensor): Query embeddings of shape (L, E_q) for unbatched input, (L, N, E_q) when batch_first=False, or (N, L, E_q) when batch_first=True. - - key (Tensor): Key embeddings of shape (S, E_k) for unbatched input, (S, N, E_k) when batch_first=False, or (N, S, E_k) when batch_first=True. - - value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True. - - key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation. - - need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True. - - attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions. - - average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True. - - is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False. - - Returns: - Tuple[Tensor, Optional[Tensor]]: - - attn_output (Tensor): Attention outputs of shape (L, E) for unbatched input, (L, N, E) when batch_first=False, or (N, L, E) when batch_first=True. - - attn_output_weights (Optional[Tensor]): Attention weights of shape (L, S) when unbatched or (N, L, S) when batched. Optional, only returned when need_weights=True. - ``` - - # Implementation of the forward pass of the attention module goes here - - return attn_output, attn_output_weights - - ``` - # Usage example: - - multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) - attn_output, attn_output_weights = multihead_attn(query, key, value) - Note: - - The above template includes the class or function definition, parameters, description, and usage example. - To replicate the documentation for any other module or framework, follow the same structure and provide the specific details for that module or framework. - - - ############# DOCUMENT THE FOLLOWING CODE ######## - {task} - """ - return documentation - - -def TEST_WRITER_SOP_PROMPT( - task: str, module: str, path: str, *args, **kwargs -): - TESTS_PROMPT = f""" - - Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any - just write the best tests possible, the module is {module}, the file path is {path} return all of the code in one file, make sure to test all the functions and methods in the code. - - - - ######### TESTING GUIDE ############# - - # **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`** - - 1. **Preparation**: - - Install pytest: `pip install pytest`. - - Structure your project so that tests are in a separate `tests/` directory. - - Name your test files with the prefix `test_` for pytest to recognize them. - - 2. **Writing Basic Tests**: - - Use clear function names prefixed with `test_` (e.g., `test_check_value()`). - - Use assert statements to validate results. - - 3. **Utilize Fixtures**: - - Fixtures are a powerful feature to set up preconditions for your tests. - - Use `@pytest.fixture` decorator to define a fixture. - - Pass fixture name as an argument to your test to use it. - - 4. **Parameterized Testing**: - - Use `@pytest.mark.parametrize` to run a test multiple times with different inputs. - - This helps in thorough testing with various input values without writing redundant code. - - 5. **Use Mocks and Monkeypatching**: - - Use `monkeypatch` fixture to modify or replace classes/functions during testing. - - Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code. - - 6. **Exception Testing**: - - Test for expected exceptions using `pytest.raises(ExceptionType)`. - - 7. **Test Coverage**: - - Install pytest-cov: `pip install pytest-cov`. - - Run tests with `pytest --cov=my_module` to get a coverage report. - - 8. **Environment Variables and Secret Handling**: - - Store secrets and configurations in environment variables. - - Use libraries like `python-decouple` or `python-dotenv` to load environment variables. - - For tests, mock or set environment variables temporarily within the test environment. - - 9. **Grouping and Marking Tests**: - - Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`). - - This allows for selectively running certain groups of tests. - - 10. **Use Plugins**: - - Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs. - - 11. **Continuous Integration (CI)**: - - Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions. - - Ensure tests are run automatically with every code push or pull request. - - 12. **Logging and Reporting**: - - Use `pytest`'s inbuilt logging. - - Integrate with tools like `Allure` for more comprehensive reporting. - - 13. **Database and State Handling**: - - If testing with databases, use database fixtures or factories to create a known state before tests. - - Clean up and reset state post-tests to maintain consistency. - - 14. **Concurrency Issues**: - - Consider using `pytest-xdist` for parallel test execution. - - Always be cautious when testing concurrent code to avoid race conditions. - - 15. **Clean Code Practices**: - - Ensure tests are readable and maintainable. - - Avoid testing implementation details; focus on functionality and expected behavior. - - 16. **Regular Maintenance**: - - Periodically review and update tests. - - Ensure that tests stay relevant as your codebase grows and changes. - - 17. **Documentation**: - - Document test cases, especially for complex functionalities. - - Ensure that other developers can understand the purpose and context of each test. - - 18. **Feedback Loop**: - - Use test failures as feedback for development. - - Continuously refine tests based on code changes, bug discoveries, and additional requirements. - - By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project. - - - ######### CREATE TESTS FOR THIS CODE: ####### - {task} - - """ - - return TESTS_PROMPT diff --git a/scripts/auto_tests_docs/mkdocs_handler.py b/scripts/auto_tests_docs/mkdocs_handler.py deleted file mode 100644 index 8b1dc0a0..00000000 --- a/scripts/auto_tests_docs/mkdocs_handler.py +++ /dev/null @@ -1,31 +0,0 @@ -import os - - -def generate_file_list(directory, output_file): - """ - Generate a list of files in a directory in the specified format and write it to a file. - - Args: - directory (str): The directory to list the files from. - output_file (str): The file to write the output to. - """ - with open(output_file, "w") as f: - for root, dirs, files in os.walk(directory): - for file in files: - if file.endswith(".md"): - # Remove the directory from the file path and replace slashes with dots - file_path = ( - os.path.join(root, file) - .replace(directory + "/", "") - .replace("/", ".") - ) - # Remove the file extension - file_name, _ = os.path.splitext(file) - # Write the file name and path to the output file - f.write( - f'- {file_name}: "swarms/utils/{file_path}"\n' - ) - - -# Use the function to generate the file list -generate_file_list("docs/swarms/structs", "file_list.txt") diff --git a/scripts/auto_tests_docs/update_mkdocs.py b/scripts/auto_tests_docs/update_mkdocs.py deleted file mode 100644 index d169a15f..00000000 --- a/scripts/auto_tests_docs/update_mkdocs.py +++ /dev/null @@ -1,64 +0,0 @@ -import yaml - - -def update_mkdocs( - class_names, - base_path="docs/zeta/nn/modules", - mkdocs_file="mkdocs.yml", -): - """ - Update the mkdocs.yml file with new documentation links. - - Args: - - class_names: A list of class names for which documentation is generated. - - base_path: The base path where documentation Markdown files are stored. - - mkdocs_file: The path to the mkdocs.yml file. - """ - with open(mkdocs_file) as file: - mkdocs_config = yaml.safe_load(file) - - # Find or create the 'zeta.nn.modules' section in 'nav' - zeta_modules_section = None - for section in mkdocs_config.get("nav", []): - if "zeta.nn.modules" in section: - zeta_modules_section = section["zeta.nn.modules"] - break - - if zeta_modules_section is None: - zeta_modules_section = {} - mkdocs_config["nav"].append( - {"zeta.nn.modules": zeta_modules_section} - ) - - # Add the documentation paths to the 'zeta.nn.modules' section - for class_name in class_names: - doc_path = f"{base_path}/{class_name.lower()}.md" - zeta_modules_section[class_name] = doc_path - - # Write the updated content back to mkdocs.yml - with open(mkdocs_file, "w") as file: - yaml.safe_dump(mkdocs_config, file, sort_keys=False) - - -# Example usage -classes = [ - "DenseBlock", - "HighwayLayer", - "MultiScaleBlock", - "FeedbackBlock", - "DualPathBlock", - "RecursiveBlock", - "PytorchGELUTanh", - "NewGELUActivation", - "GELUActivation", - "FastGELUActivation", - "QuickGELUActivation", - "ClippedGELUActivation", - "AccurateGELUActivation", - "MishActivation", - "LinearActivation", - "LaplaceActivation", - "ReLUSquaredActivation", -] - -update_mkdocs(classes) diff --git a/scripts/cleanup/code_quality.sh b/scripts/cleanup/code_quality.sh deleted file mode 100755 index b710f9a0..00000000 --- a/scripts/cleanup/code_quality.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Navigate to the directory containing the 'tests' folder -# cd /path/to/your/code/directory - -# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i) -# on all Python files (*.py) under the 'tests' directory. -autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/ - -# Run black with default settings, since black does not have an aggressiveness level. -# Black will format all Python files it finds in the 'tests' directory. -black . - -# Run ruff on the 'tests' directory. -# Add any additional flags if needed according to your version of ruff. -ruff . --fix -ruff clean - -# YAPF -yapf --recursive --in-place --verbose --style=google --parallel tests diff --git a/scripts/cleanup/del_pycache.sh b/scripts/cleanup/del_pycache.sh deleted file mode 100755 index 6b1f0757..00000000 --- a/scripts/cleanup/del_pycache.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Find and delete all __pycache__ directories -find . -type d -name "__pycache__" -exec rm -r {} + - -# Find and delete all .pyc files -find . -type f -name "*.pyc" -delete - -# Find and delete all dist directories -find . -type d -name "dist" -exec rm -r {} + - -# Find and delete all .ruff directories -find . -type d -name ".ruff" -exec rm -r {} + - -# Find and delete all .egg-info directories -find . -type d -name "*.egg-info" -exec rm -r {} + - -# Find and delete all .pyo files -find . -type f -name "*.pyo" -delete - -# Find and delete all .pyd files -find . -type f -name "*.pyd" -delete - -# Find and delete all .so files -find . -type f -name "*.so" -delete \ No newline at end of file diff --git a/scripts/cleanup/log_cleanup.py b/scripts/cleanup/log_cleanup.py deleted file mode 100644 index a92f32f8..00000000 --- a/scripts/cleanup/log_cleanup.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import shutil - -# Create a new directory for the log files if it doesn't exist -if not os.path.exists("artifacts_five"): - os.makedirs("artifacts_five") - -# Walk through the current directory -for dirpath, dirnames, filenames in os.walk("."): - for filename in filenames: - # If the file is a log file - if filename.endswith(".log"): - # Construct the full file path - file_path = os.path.join(dirpath, filename) - # Move the log file to the 'artifacts_five' directory - shutil.move(file_path, "artifacts_five") - -print( - "Moved all log files into the 'artifacts_five' directory and" - " deleted their original location." -) diff --git a/scripts/cleanup/log_cleanup.sh b/scripts/cleanup/log_cleanup.sh deleted file mode 100755 index aa0bb83c..00000000 --- a/scripts/cleanup/log_cleanup.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Create the new directory if it doesn't exist -sudo mkdir -p /artifacts_logs - -# Find all .log files in the root directory and its subdirectories -find / -name "*.log" -print0 | while IFS= read -r -d '' file; do - # Use sudo to move the file to the new directory - sudo mv "$file" /artifacts_logs/ -done \ No newline at end of file diff --git a/scripts/docker_files/Dockerfile b/scripts/docker_files/Dockerfile deleted file mode 100644 index f7d0175f..00000000 --- a/scripts/docker_files/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ - -# ================================== -# Use an official Python runtime as a parent image -FROM python:3.11-slim - -# Set environment variables -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 - -# Set the working directory in the container -WORKDIR /usr/src/swarms - - -# Install Python dependencies -# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management -COPY requirements.txt . -RUN pip install --upgrade pip -RUN pip install --no-cache-dir -r requirements.txt - -# Install the 'swarms' package, assuming it's available on PyPI -RUN pip install -U swarms - -# Copy the rest of the application -COPY . . - -# Expose port if your application has a web interface -# EXPOSE 5000 - -# # Define environment variable for the swarm to work -# ENV OPENAI_API_KEY=your_swarm_api_key_here - -# If you're using `CMD` to execute a Python script, make sure it's executable -# RUN chmod +x example.py diff --git a/scripts/docker_files/docker-compose.yaml b/scripts/docker_files/docker-compose.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/scripts/docs/create_llm_file_for_docs.sh b/scripts/docs/create_llm_file_for_docs.sh deleted file mode 100644 index 0b0ca612..00000000 --- a/scripts/docs/create_llm_file_for_docs.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Set up logging -LOG_FILE="docs_compilation.log" -OUTPUT_FILE="combined_docs.txt" - -# Initialize log file -echo "$(date): Starting documentation compilation" > "$LOG_FILE" - -# Create/clear output file -> "$OUTPUT_FILE" - -# Function to determine file type and handle accordingly -process_file() { - local file="$1" - - # Get file extension - extension="${file##*.}" - - echo "$(date): Processing $file" >> "$LOG_FILE" - - case "$extension" in - md|markdown) - echo "# $(basename "$file")" >> "$OUTPUT_FILE" - cat "$file" >> "$OUTPUT_FILE" - echo -e "\n\n" >> "$OUTPUT_FILE" - ;; - txt) - echo "# $(basename "$file")" >> "$OUTPUT_FILE" - cat "$file" >> "$OUTPUT_FILE" - echo -e "\n\n" >> "$OUTPUT_FILE" - ;; - *) - echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE" - return - ;; - esac - - echo "$(date): Successfully processed $file" >> "$LOG_FILE" -} - -# Find and process all documentation files -find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do - process_file "$file" -done - -# Log completion -echo "$(date): Documentation compilation complete" >> "$LOG_FILE" -echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE" - -# Print summary -echo "Documentation compilation complete. Check $LOG_FILE for details." \ No newline at end of file diff --git a/scripts/misc/get_package_requirements.py b/scripts/misc/get_package_requirements.py deleted file mode 100644 index 99e139da..00000000 --- a/scripts/misc/get_package_requirements.py +++ /dev/null @@ -1,39 +0,0 @@ -import pkg_resources - - -def get_package_versions(requirements_path, output_path): - try: - with open(requirements_path) as file: - requirements = file.readlines() - except FileNotFoundError: - print(f"Error: The file '{requirements_path}' was not found.") - return - - package_versions = [] - - for requirement in requirements: - # Skip empty lines and comments - if ( - requirement.strip() == "" - or requirement.strip().startswith("#") - ): - continue - - # Extract package name - package_name = requirement.split("==")[0].strip() - try: - version = pkg_resources.get_distribution( - package_name - ).version - package_versions.append(f"{package_name}=={version}") - except pkg_resources.DistributionNotFound: - package_versions.append(f"{package_name}: not installed") - - with open(output_path, "w") as file: - for package_version in package_versions: - file.write(package_version + "\n") - print(f"Versions written to {output_path}") - - -# Usage -get_package_versions("requirements.txt", "installed_versions.txt") diff --git a/scripts/misc/playground_to_examples.sh b/scripts/misc/playground_to_examples.sh deleted file mode 100755 index c2fa91fb..00000000 --- a/scripts/misc/playground_to_examples.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Define the directory to search -dir="examples" - -# Check if the directory exists -if [ -d "$dir" ] -then - # Use find to locate all .py files in the directory and its subdirectories - for file in $(find $dir -name "*.py") - do - # Extract the file name and directory - base=$(basename $file .py) - dir=$(dirname $file) - - # Check if the file name already contains _example - if [[ $base == *_example ]] - then - echo "Skipping $file as it already contains _example" - continue - fi - - # Append _example to the file name - newname="${base}_example.py" - - # Rename the file - mv $file $dir/$newname - - echo "Renamed $file to $dir/$newname" - done -else - echo "Directory $dir does not exist." -fi \ No newline at end of file diff --git a/scripts/misc/requirementstxt_to_pyproject.py b/scripts/misc/requirementstxt_to_pyproject.py deleted file mode 100644 index 811ac7be..00000000 --- a/scripts/misc/requirementstxt_to_pyproject.py +++ /dev/null @@ -1,40 +0,0 @@ -import pkg_resources -import toml - - -def update_pyproject_versions(pyproject_path): - try: - with open(pyproject_path) as file: - data = toml.load(file) - except FileNotFoundError: - print(f"Error: The file '{pyproject_path}' was not found.") - return - except toml.TomlDecodeError: - print( - f"Error: The file '{pyproject_path}' is not a valid TOML" - " file." - ) - return - - dependencies = ( - data.get("tool", {}).get("poetry", {}).get("dependencies", {}) - ) - - for package in dependencies: - if package.lower() == "python": - continue # Skip the Python version dependency - - try: - version = pkg_resources.get_distribution(package).version - dependencies[package] = version - except pkg_resources.DistributionNotFound: - print(f"Warning: Package '{package}' not installed.") - - with open(pyproject_path, "w") as file: - toml.dump(data, file) - - print(f"Updated versions written to {pyproject_path}") - - -# Usage -update_pyproject_versions("pyproject.toml") diff --git a/scripts/tests/run_examples.sh b/scripts/tests/run_examples.sh deleted file mode 100644 index 707db872..00000000 --- a/scripts/tests/run_examples.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Define a file to keep track of successfully executed scripts -SUCCESS_LOG="successful_runs.log" - -for f in /swarms/examples/examples/example_*.py; do - # Check if the script has been logged as successful - if grep -Fxq "$f" "$SUCCESS_LOG"; then - echo "Skipping ${f} as it ran successfully in a previous run." - else - # Run the script if not previously successful - if /home/kye/miniconda3/envs/swarms/bin/python "$f" 2>>errors.txt; then - echo "(${f}) ran successfully without errors." - # Log the successful script execution - echo "$f" >> "$SUCCESS_LOG" - else - echo "Error encountered in ${f}. Check errors.txt for details." - break - fi - fi - echo "##############################################################################" -done diff --git a/scripts/tests/test_name.sh b/scripts/tests/test_name.sh deleted file mode 100755 index 4123f870..00000000 --- a/scripts/tests/test_name.sh +++ /dev/null @@ -1,9 +0,0 @@ -find ./tests -name "*.py" -type f | while read file -do - filename=$(basename "$file") - dir=$(dirname "$file") - if [[ $filename != test_* ]]; then - mv "$file" "$dir/test_$filename" - printf "\e[1;34mRenamed: \e[0m$file \e[1;32mto\e[0m $dir/test_$filename\n" - fi -done \ No newline at end of file diff --git a/scripts/tests/tests.sh b/scripts/tests/tests.sh deleted file mode 100644 index 13f4111a..00000000 --- a/scripts/tests/tests.sh +++ /dev/null @@ -1 +0,0 @@ -find ./tests -name '*.py' -exec pytest {} \; \ No newline at end of file diff --git a/simple_example_ollama.py b/simple_example_ollama.py new file mode 100644 index 00000000..41ca5e04 --- /dev/null +++ b/simple_example_ollama.py @@ -0,0 +1,18 @@ +from swarms import Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) + + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + agent_description="Personal finance advisor agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="ollama/llama2", +) + +agent.run( + "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", +) diff --git a/swarms/agents/agent_print.py b/swarms/agents/agent_print.py new file mode 100644 index 00000000..e0969dc6 --- /dev/null +++ b/swarms/agents/agent_print.py @@ -0,0 +1,33 @@ +from swarms.utils.formatter import formatter + + +def agent_print( + agent_name: str, + response: str = None, + loop_count: int = None, + streaming_on: bool = False, +): + """ + Prints the response from an agent based on the streaming mode. + + Args: + agent_name (str): The name of the agent. + response (str): The response from the agent. + loop_count (int): The maximum number of loops. + streaming_on (bool): Indicates if streaming is on or off. + + Returns: + str: The response from the agent. + """ + if streaming_on: + formatter.print_panel_token_by_token( + f"{agent_name}: {response}", + title=f"Agent Name: {agent_name} [Max Loops: {loop_count}]", + ) + else: + formatter.print_panel( + f"{agent_name}: {response}", + f"Agent Name {agent_name} [Max Loops: {loop_count} ]", + ) + + return response diff --git a/swarms/agents/create_agents_from_yaml.py b/swarms/agents/create_agents_from_yaml.py index d1eb3e95..fb1cd0e7 100644 --- a/swarms/agents/create_agents_from_yaml.py +++ b/swarms/agents/create_agents_from_yaml.py @@ -178,21 +178,44 @@ def create_agents_from_yaml( swarm_router = None try: + logger.info("Starting agent creation process...") + # Load and validate configuration + if yaml_file: + logger.info(f"Loading configuration from {yaml_file}") config = load_yaml_safely(yaml_file, yaml_string) + if not config.get("agents"): + raise ValueError( + "No agents defined in the YAML configuration. " + "Please add at least one agent under the 'agents' section." + ) + + logger.info( + f"Found {len(config['agents'])} agent(s) to create" + ) + # Create agents with retry logic - for agent_config in config["agents"]: + for idx, agent_config in enumerate(config["agents"], 1): + if not agent_config.get("agent_name"): + agent_config["agent_name"] = f"Agent_{idx}" + logger.info( - f"Creating agent: {agent_config['agent_name']}" + f"Creating agent {idx}/{len(config['agents'])}: {agent_config['agent_name']}" ) if "model_name" in agent_config: + logger.info( + f"Using specified model: {agent_config['model_name']}" + ) model_instance = LiteLLM( model_name=agent_config["model_name"] ) else: - model_name = "gpt-4o" + model_name = "gpt-4" + logger.info( + f"No model specified, using default: {model_name}" + ) model_instance = LiteLLM(model_name=model_name) agent = create_agent_with_retry( @@ -203,12 +226,37 @@ def create_agents_from_yaml( ) agents.append(agent) + logger.info(f"Successfully created {len(agents)} agent(s)") + # Create SwarmRouter if specified if "swarm_architecture" in config: + logger.info("Setting up swarm architecture...") try: + if not isinstance(config["swarm_architecture"], dict): + raise ValueError( + "swarm_architecture must be a dictionary containing swarm configuration" + ) + + required_fields = { + "name", + "description", + "swarm_type", + } + missing_fields = required_fields - set( + config["swarm_architecture"].keys() + ) + if missing_fields: + raise ValueError( + f"SwarmRouter creation failed: Missing required fields in swarm_architecture: {', '.join(missing_fields)}" + ) + swarm_config = SwarmConfig( **config["swarm_architecture"] ) + + logger.info( + f"Creating SwarmRouter with type: {swarm_config.swarm_type}" + ) swarm_router = SwarmRouter( name=swarm_config.name, description=swarm_config.description, @@ -226,8 +274,14 @@ def create_agents_from_yaml( ) except Exception as e: logger.error(f"Error creating SwarmRouter: {str(e)}") + if "swarm_type" in str(e) and "valid_types" in str(e): + raise ValueError( + "Invalid swarm_type. Must be one of: SequentialWorkflow, ConcurrentWorkflow, " + "AgentRearrange, MixtureOfAgents, or auto" + ) raise ValueError( - f"Failed to create SwarmRouter: {str(e)}" + f"Failed to create SwarmRouter: {str(e)}. Make sure your YAML file " + "has a valid swarm_architecture section with required fields." ) # Handle return types with improved error checking @@ -244,12 +298,31 @@ def create_agents_from_yaml( f"Invalid return_type. Must be one of: {valid_return_types}" ) - if return_type == "run_swarm" or "swarm": + logger.info(f"Processing with return type: {return_type}") + + if return_type in ("run_swarm", "swarm"): if not swarm_router: + if "swarm_architecture" not in config: + raise ValueError( + "Cannot run swarm: No swarm_architecture section found in YAML configuration.\n" + "Please add a swarm_architecture section with:\n" + " - name: your_swarm_name\n" + " - description: your_swarm_description\n" + " - swarm_type: one of [SequentialWorkflow, ConcurrentWorkflow, AgentRearrange, MixtureOfAgents, auto]\n" + " - task: your_task_description" + ) raise ValueError( - "Cannot run swarm: SwarmRouter not created." + "Cannot run swarm: SwarmRouter creation failed. Check the previous error messages." ) try: + if not config["swarm_architecture"].get("task"): + raise ValueError( + "No task specified in swarm_architecture. Please add a 'task' field " + "to define what the swarm should do." + ) + logger.info( + f"Running swarm with task: {config['swarm_architecture']['task']}" + ) return swarm_router.run( config["swarm_architecture"]["task"] ) @@ -259,30 +332,37 @@ def create_agents_from_yaml( # Return appropriate type based on configuration if return_type == "auto": - return ( + result = ( swarm_router if swarm_router else (agents[0] if len(agents) == 1 else agents) ) elif return_type == "swarm": - return ( + result = ( swarm_router if swarm_router else (agents[0] if len(agents) == 1 else agents) ) elif return_type == "agents": - return agents[0] if len(agents) == 1 else agents + result = agents[0] if len(agents) == 1 else agents elif return_type == "both": - return ( - swarm_router - if swarm_router - else agents[0] if len(agents) == 1 else agents - ), agents + result = ( + ( + swarm_router + if swarm_router + else agents[0] if len(agents) == 1 else agents + ), + agents, + ) elif return_type == "tasks": - return task_results + result = task_results + + logger.info("Process completed successfully") + return result except Exception as e: logger.error( - f"Critical error in create_agents_from_yaml: {str(e)}" + f"Critical error in create_agents_from_yaml: {str(e)}\n" + "Please check your YAML configuration and try again." ) raise diff --git a/swarms/agents/openai_assistant.py b/swarms/agents/openai_assistant.py index 2a29e1bf..d256a768 100644 --- a/swarms/agents/openai_assistant.py +++ b/swarms/agents/openai_assistant.py @@ -1,3 +1,4 @@ +from concurrent.futures import ThreadPoolExecutor import json import os import subprocess @@ -316,3 +317,20 @@ class OpenAIAssistant(Agent): def call(self, task: str, *args, **kwargs) -> str: """Alias for run() to maintain compatibility with different agent interfaces.""" return self.run(task, *args, **kwargs) + + def batch_run( + self, tasks: List[str], *args, **kwargs + ) -> List[Any]: + """Run a batch of tasks using the OpenAI Assistant.""" + return [self.run(task, *args, **kwargs) for task in tasks] + + def run_concurrently( + self, tasks: List[str], *args, **kwargs + ) -> List[Any]: + """Run a batch of tasks concurrently using the OpenAI Assistant.""" + with ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + return list( + executor.map(self.run, tasks, *args, **kwargs) + ) diff --git a/swarms/agents/tool_agent.py b/swarms/agents/tool_agent.py index b686f3b0..2d19ec26 100644 --- a/swarms/agents/tool_agent.py +++ b/swarms/agents/tool_agent.py @@ -1,12 +1,10 @@ from typing import Any, Optional, Callable from swarms.tools.json_former import Jsonformer from swarms.utils.loguru_logger import initialize_logger -from swarms.utils.lazy_loader import lazy_import_decorator logger = initialize_logger(log_folder="tool_agent") -@lazy_import_decorator class ToolAgent: """ Represents a tool agent that performs a specific task using a model and tokenizer. diff --git a/swarms/cli/main.py b/swarms/cli/main.py index 1acdfd46..ef4151eb 100644 --- a/swarms/cli/main.py +++ b/swarms/cli/main.py @@ -276,9 +276,120 @@ def main(): elif args.command == "check-login": check_login() elif args.command == "run-agents": - create_agents_from_yaml( - yaml_file=args.yaml_file, return_type="tasks" - ) + try: + console.print( + f"[yellow]Loading agents from {args.yaml_file}...[/yellow]" + ) + + if not os.path.exists(args.yaml_file): + raise FileNotFoundError( + f"YAML file not found: {args.yaml_file}\n" + "Please make sure the file exists and you're in the correct directory." + ) + + # Create progress display + progress = Progress( + SpinnerColumn(), + TextColumn( + "[progress.description]{task.description}" + ), + console=console, + ) + + with progress: + # Add initial task + init_task = progress.add_task( + "Initializing...", total=None + ) + + # Load and validate YAML + progress.update( + init_task, + description="Loading YAML configuration...", + ) + + # Create agents + progress.update( + init_task, + description="Creating agents...", + ) + result = create_agents_from_yaml( + yaml_file=args.yaml_file, + return_type="run_swarm", + ) + + # Update progress on completion + progress.update( + init_task, + description="Processing complete!", + completed=True, + ) + + if result: + # Format and display the results + if isinstance(result, str): + console.print( + "\n[bold green]Results:[/bold green]" + ) + console.print( + Panel( + result, + title="Agent Output", + border_style="green", + ) + ) + elif isinstance(result, dict): + console.print( + "\n[bold green]Results:[/bold green]" + ) + for key, value in result.items(): + console.print( + f"[cyan]{key}:[/cyan] {value}" + ) + else: + console.print( + "[green]✓ Agents completed their tasks successfully![/green]" + ) + else: + console.print( + "[yellow]⚠ Agents completed but returned no results.[/yellow]" + ) + + except FileNotFoundError as e: + show_error("File Error", str(e)) + except ValueError as e: + show_error( + "Configuration Error", + str(e) + + "\n\nPlease check your agents.yaml file format.", + ) + except Exception as e: + # Enhanced error handling + error_msg = str(e) + if "context_length_exceeded" in error_msg: + show_error( + "Context Length Error", + "The model's context length was exceeded. Try:\n" + "1. Reducing max_tokens in your YAML config\n" + "2. Reducing context_length in your YAML config\n" + "3. Using a model with larger context window", + ) + elif "api_key" in error_msg.lower(): + show_error( + "API Key Error", + "There seems to be an issue with the API key. Please:\n" + "1. Check if your API key is set correctly\n" + "2. Verify the API key is valid\n" + "3. Run 'swarms get-api-key' to get a new key", + ) + else: + show_error( + "Execution Error", + f"An unexpected error occurred: {error_msg}\n" + "1. Check your YAML configuration\n" + "2. Verify your API keys are set\n" + "3. Check network connectivity", + ) elif args.command == "book-call": webbrowser.open( "https://cal.com/swarms/swarms-strategy-session" diff --git a/swarms/cli/onboarding_process.py b/swarms/cli/onboarding_process.py index edac1168..e279d9e3 100644 --- a/swarms/cli/onboarding_process.py +++ b/swarms/cli/onboarding_process.py @@ -6,7 +6,7 @@ from typing import Dict from swarms.utils.loguru_logger import initialize_logger -from swarms.telemetry.capture_sys_data import ( +from swarms.telemetry.main import ( capture_system_data, log_agent_data, ) diff --git a/swarms/prompts/prompt.py b/swarms/prompts/prompt.py index b8628b20..ca6ec625 100644 --- a/swarms/prompts/prompt.py +++ b/swarms/prompts/prompt.py @@ -11,7 +11,7 @@ from pydantic import ( ) from pydantic.v1 import validator -from swarms.telemetry.capture_sys_data import ( +from swarms.telemetry.main import ( capture_system_data, log_agent_data, ) diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index f4436988..89d10fac 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -1,5 +1,6 @@ from swarms.structs.agent import Agent from swarms.structs.agents_available import showcase_available_agents +from swarms.structs.async_workflow import AsyncWorkflow from swarms.structs.auto_swarm import AutoSwarm, AutoSwarmRouter from swarms.structs.base_structure import BaseStructure from swarms.structs.base_swarm import BaseSwarm @@ -13,10 +14,10 @@ from swarms.structs.graph_workflow import ( NodeType, ) from swarms.structs.groupchat import ( - GroupChat, + AgentResponse, ChatHistory, ChatTurn, - AgentResponse, + GroupChat, expertise_based, ) from swarms.structs.majority_voting import ( @@ -25,7 +26,11 @@ from swarms.structs.majority_voting import ( most_frequent, parse_code_completion, ) +from swarms.structs.meme_agent_persona_generator import ( + MemeAgentGenerator, +) from swarms.structs.mixture_of_agents import MixtureOfAgents +from swarms.structs.model_router import ModelRouter from swarms.structs.multi_agent_collab import MultiAgentCollaboration from swarms.structs.multi_agent_exec import ( run_agent_with_timeout, @@ -38,6 +43,7 @@ from swarms.structs.multi_agent_exec import ( run_agents_with_tasks_concurrently, run_single_agent, ) +from swarms.structs.multi_agent_orchestrator import MultiAgentRouter from swarms.structs.queue_swarm import TaskQueueSwarm from swarms.structs.rearrange import AgentRearrange, rearrange from swarms.structs.round_robin import RoundRobinSwarm @@ -69,18 +75,7 @@ from swarms.structs.swarming_architectures import ( staircase_swarm, star_swarm, ) -from swarms.structs.task import Task -from swarms.structs.utils import ( - detect_markdown, - distribute_tasks, - extract_key_from_json, - extract_tokens_from_text, - find_agent_by_id, - find_token_in_text, - parse_tasks, -) -from swarms.structs.async_workflow import AsyncWorkflow -from swarms.structs.multi_agent_orchestrator import MultiAgentRouter + __all__ = [ "Agent", @@ -102,14 +97,6 @@ __all__ = [ "rearrange", "RoundRobinSwarm", "SequentialWorkflow", - "Task", - "detect_markdown", - "distribute_tasks", - "extract_key_from_json", - "extract_tokens_from_text", - "find_agent_by_id", - "find_token_in_text", - "parse_tasks", "MixtureOfAgents", "GraphWorkflow", "Node", @@ -156,4 +143,6 @@ __all__ = [ "AgentResponse", "expertise_based", "MultiAgentRouter", + "MemeAgentGenerator", + "ModelRouter", ] diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 97fe3f3d..783206ad 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -23,7 +23,6 @@ import toml import yaml from loguru import logger from pydantic import BaseModel -from swarm_models.tiktoken_wrapper import TikTokenizer from swarms.agents.ape_agent import auto_generate_prompt from swarms.artifacts.main_artifact import Artifact @@ -50,10 +49,9 @@ from swarms.utils.data_to_text import data_to_text from swarms.utils.file_processing import create_file_in_folder from swarms.utils.formatter import formatter from swarms.utils.pdf_to_text import pdf_to_text -from swarms.utils.wrapper_clusterop import ( - exec_callable_with_clusterops, -) -from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.telemetry.main import log_agent_data +from swarms.agents.agent_print import agent_print +from swarms.utils.litellm_tokenizer import count_tokens # Utils @@ -438,7 +436,7 @@ class Agent: self.time_created = time_created self.data_memory = data_memory self.load_yaml_path = load_yaml_path - self.tokenizer = TikTokenizer() + self.tokenizer = tokenizer self.auto_generate_prompt = auto_generate_prompt self.rag_every_loop = rag_every_loop self.plan_enabled = plan_enabled @@ -562,8 +560,8 @@ class Agent: max_loops=self.max_loops, steps=self.short_memory.to_dict(), full_history=self.short_memory.get_str(), - total_tokens=self.tokenizer.count_tokens( - self.short_memory.get_str() + total_tokens=count_tokens( + text=self.short_memory.get_str() ), stopping_token=self.stopping_token, interactive=self.interactive, @@ -759,6 +757,7 @@ class Agent: is_last: Optional[bool] = False, print_task: Optional[bool] = False, generate_speech: Optional[bool] = False, + correct_answer: Optional[str] = None, *args, **kwargs, ) -> Any: @@ -857,6 +856,11 @@ class Agent: # Convert to a str if the response is not a str response = self.llm_output_parser(response) + # if correct_answer is not None: + # if correct_answer not in response: + # logger.info("Correct answer found in response") + # # break + # Print if self.streaming_on is True: # self.stream_response(response) @@ -889,7 +893,33 @@ class Agent: # Check and execute tools if self.tools is not None: - self.parse_and_execute_tools(response) + out = self.parse_and_execute_tools( + response + ) + + self.short_memory.add( + role="Tool Executor", content=out + ) + + agent_print( + f"{self.agent_name} - Tool Executor", + out, + loop_count, + self.streaming_on, + ) + + out = self.llm.run(out) + + agent_print( + f"{self.agent_name} - Agent Analysis", + out, + loop_count, + self.streaming_on, + ) + + self.short_memory.add( + role=self.agent_name, content=out + ) # Add the response to the memory self.short_memory.add( @@ -1016,10 +1046,8 @@ class Agent: self.agent_output.full_history = ( self.short_memory.get_str() ) - self.agent_output.total_tokens = ( - self.tokenizer.count_tokens( - self.short_memory.get_str() - ) + self.agent_output.total_tokens = count_tokens( + self.short_memory.get_str() ) # Handle artifacts @@ -1209,31 +1237,35 @@ class Agent: return output.getvalue() def parse_and_execute_tools(self, response: str, *args, **kwargs): - try: - logger.info("Executing tool...") - - # try to Execute the tool and return a string - out = parse_and_execute_json( - functions=self.tools, - json_string=response, - parse_md=True, - *args, - **kwargs, - ) - - out = str(out) - - logger.info(f"Tool Output: {out}") - - # Add the output to the memory - self.short_memory.add( - role="Tool Executor", - content=out, - ) + max_retries = 3 # Maximum number of retries + retries = 0 + while retries < max_retries: + try: + logger.info("Executing tool...") - except Exception as error: - logger.error(f"Error executing tool: {error}") - raise error + # try to Execute the tool and return a string + out = parse_and_execute_json( + functions=self.tools, + json_string=response, + parse_md=True, + *args, + **kwargs, + ) + logger.info(f"Tool Output: {out}") + # Add the output to the memory + # self.short_memory.add( + # role="Tool Executor", + # content=out, + # ) + return out + except Exception as error: + retries += 1 + logger.error( + f"Attempt {retries}: Error executing tool: {error}" + ) + if retries == max_retries: + raise error + time.sleep(1) # Wait for a bit before retrying def add_memory(self, message: str): """Add a memory to the agent @@ -1945,7 +1977,7 @@ class Agent: ) # # Count the tokens - # memory_token_count = self.tokenizer.count_tokens( + # memory_token_count = count_tokens( # memory_retrieval # ) # if memory_token_count > self.memory_chunk_size: @@ -2034,7 +2066,7 @@ class Agent: def check_available_tokens(self): # Log the amount of tokens left in the memory and in the task if self.tokenizer is not None: - tokens_used = self.tokenizer.count_tokens( + tokens_used = count_tokens( self.short_memory.return_history_as_string() ) logger.info( @@ -2045,7 +2077,7 @@ class Agent: def tokens_checks(self): # Check the tokens available - tokens_used = self.tokenizer.count_tokens( + tokens_used = count_tokens( self.short_memory.return_history_as_string() ) out = self.check_available_tokens() @@ -2056,45 +2088,6 @@ class Agent: return out - def parse_function_call_and_execute(self, response: str): - """ - Parses a function call from the given response and executes it. - - Args: - response (str): The response containing the function call. - - Returns: - None - - Raises: - Exception: If there is an error parsing and executing the function call. - """ - try: - if self.tools is not None: - tool_call_output = parse_and_execute_json( - self.tools, response, parse_md=True - ) - - if tool_call_output is not str: - tool_call_output = str(tool_call_output) - - logger.info(f"Tool Call Output: {tool_call_output}") - self.short_memory.add( - role=self.agent_name, - content=tool_call_output, - ) - - return tool_call_output - except Exception as error: - logger.error( - f"Error parsing and executing function call: {error}" - ) - - # Raise a custom exception with the error message - raise Exception( - "Error parsing and executing function call" - ) from error - def activate_agentops(self): if self.agent_ops_on is True: try: @@ -2148,13 +2141,10 @@ class Agent: # Calculate token usage # full_memory = self.short_memory.return_history_as_string() - # prompt_tokens = self.tokenizer.count_tokens(full_memory) - # completion_tokens = self.tokenizer.count_tokens(response) + # prompt_tokens = count_tokens(full_memory) + # completion_tokens = count_tokens(response) # total_tokens = prompt_tokens + completion_tokens - total_tokens = ( - self.tokenizer.count_tokens(task) - + self.tokenizer.count_tokens(response), - ) + total_tokens = (count_tokens(task) + count_tokens(response),) # # Get memory responses # memory_responses = { @@ -2478,14 +2468,6 @@ class Agent: ValueError: If an invalid device is specified. Exception: If any other error occurs during execution. """ - device = device or self.device - device_id = device_id or self.device_id - all_cores = all_cores or self.all_cores - all_gpus = all_gpus or self.all_gpus - - do_not_use_cluster_ops = ( - do_not_use_cluster_ops or self.do_not_use_cluster_ops - ) if scheduled_run_date: while datetime.now() < scheduled_run_date: @@ -2495,34 +2477,16 @@ class Agent: try: # If cluster ops disabled, run directly - if do_not_use_cluster_ops is True: - logger.info("Running without cluster operations") - return self._run( - task=task, - img=img, - *args, - **kwargs, - ) - - else: - return exec_callable_with_clusterops( - device=device, - device_id=device_id, - all_cores=all_cores, - all_gpus=all_gpus, - func=self._run, - task=task, - img=img, - *args, - **kwargs, - ) + return self._run( + task=task, + img=img, + *args, + **kwargs, + ) except ValueError as e: self._handle_run_error(e) - except Exception as e: - self._handle_run_error(e) - def handle_artifacts( self, text: str, file_output_path: str, file_extension: str ) -> None: diff --git a/swarms/structs/agent_memory_manager.py b/swarms/structs/agent_memory_manager.py deleted file mode 100644 index 0f506fc4..00000000 --- a/swarms/structs/agent_memory_manager.py +++ /dev/null @@ -1,419 +0,0 @@ -import json -import logging -import time -import uuid -from datetime import datetime -from typing import Any, Dict, List, Optional - -import yaml -from pydantic import BaseModel -from swarm_models.tiktoken_wrapper import TikTokenizer - -logger = logging.getLogger(__name__) - - -class MemoryMetadata(BaseModel): - """Metadata for memory entries""" - - timestamp: Optional[float] = time.time() - role: Optional[str] = None - agent_name: Optional[str] = None - session_id: Optional[str] = None - memory_type: Optional[str] = None # 'short_term' or 'long_term' - token_count: Optional[int] = None - message_id: Optional[str] = str(uuid.uuid4()) - - -class MemoryEntry(BaseModel): - """Single memory entry with content and metadata""" - - content: Optional[str] = None - metadata: Optional[MemoryMetadata] = None - - -class MemoryConfig(BaseModel): - """Configuration for memory manager""" - - max_short_term_tokens: Optional[int] = 4096 - max_entries: Optional[int] = None - system_messages_token_buffer: Optional[int] = 1000 - enable_long_term_memory: Optional[bool] = False - auto_archive: Optional[bool] = True - archive_threshold: Optional[float] = 0.8 # Archive when 80% full - - -class MemoryManager: - """ - Manages both short-term and long-term memory for an agent, handling token limits, - archival, and context retrieval. - - Args: - config (MemoryConfig): Configuration for memory management - tokenizer (Optional[Any]): Tokenizer to use for token counting - long_term_memory (Optional[Any]): Vector store or database for long-term storage - """ - - def __init__( - self, - config: MemoryConfig, - tokenizer: Optional[Any] = None, - long_term_memory: Optional[Any] = None, - ): - self.config = config - self.tokenizer = tokenizer or TikTokenizer() - self.long_term_memory = long_term_memory - - # Initialize memories - self.short_term_memory: List[MemoryEntry] = [] - self.system_messages: List[MemoryEntry] = [] - - # Memory statistics - self.total_tokens_processed: int = 0 - self.archived_entries_count: int = 0 - - def create_memory_entry( - self, - content: str, - role: str, - agent_name: str, - session_id: str, - memory_type: str = "short_term", - ) -> MemoryEntry: - """Create a new memory entry with metadata""" - metadata = MemoryMetadata( - timestamp=time.time(), - role=role, - agent_name=agent_name, - session_id=session_id, - memory_type=memory_type, - token_count=self.tokenizer.count_tokens(content), - ) - return MemoryEntry(content=content, metadata=metadata) - - def add_memory( - self, - content: str, - role: str, - agent_name: str, - session_id: str, - is_system: bool = False, - ) -> None: - """Add a new memory entry to appropriate storage""" - entry = self.create_memory_entry( - content=content, - role=role, - agent_name=agent_name, - session_id=session_id, - memory_type="system" if is_system else "short_term", - ) - - if is_system: - self.system_messages.append(entry) - else: - self.short_term_memory.append(entry) - - # Check if archiving is needed - if self.should_archive(): - self.archive_old_memories() - - self.total_tokens_processed += entry.metadata.token_count - - def get_current_token_count(self) -> int: - """Get total tokens in short-term memory""" - return sum( - entry.metadata.token_count - for entry in self.short_term_memory - ) - - def get_system_messages_token_count(self) -> int: - """Get total tokens in system messages""" - return sum( - entry.metadata.token_count - for entry in self.system_messages - ) - - def should_archive(self) -> bool: - """Check if archiving is needed based on configuration""" - if not self.config.auto_archive: - return False - - current_usage = ( - self.get_current_token_count() - / self.config.max_short_term_tokens - ) - return current_usage >= self.config.archive_threshold - - def archive_old_memories(self) -> None: - """Move older memories to long-term storage""" - if not self.long_term_memory: - logger.warning( - "No long-term memory storage configured for archiving" - ) - return - - while self.should_archive(): - # Get oldest non-system message - if not self.short_term_memory: - break - - oldest_entry = self.short_term_memory.pop(0) - - # Store in long-term memory - self.store_in_long_term_memory(oldest_entry) - self.archived_entries_count += 1 - - def store_in_long_term_memory(self, entry: MemoryEntry) -> None: - """Store a memory entry in long-term memory""" - if self.long_term_memory is None: - logger.warning( - "Attempted to store in non-existent long-term memory" - ) - return - - try: - self.long_term_memory.add(str(entry.model_dump())) - except Exception as e: - logger.error(f"Error storing in long-term memory: {e}") - # Re-add to short-term if storage fails - self.short_term_memory.insert(0, entry) - - def get_relevant_context( - self, query: str, max_tokens: Optional[int] = None - ) -> str: - """ - Get relevant context from both memory types - - Args: - query (str): Query to match against memories - max_tokens (Optional[int]): Maximum tokens to return - - Returns: - str: Combined relevant context - """ - contexts = [] - - # Add system messages first - for entry in self.system_messages: - contexts.append(entry.content) - - # Add short-term memory - for entry in reversed(self.short_term_memory): - contexts.append(entry.content) - - # Query long-term memory if available - if self.long_term_memory is not None: - long_term_context = self.long_term_memory.query(query) - if long_term_context: - contexts.append(str(long_term_context)) - - # Combine and truncate if needed - combined = "\n".join(contexts) - if max_tokens: - combined = self.truncate_to_token_limit( - combined, max_tokens - ) - - return combined - - def truncate_to_token_limit( - self, text: str, max_tokens: int - ) -> str: - """Truncate text to fit within token limit""" - current_tokens = self.tokenizer.count_tokens(text) - - if current_tokens <= max_tokens: - return text - - # Truncate by splitting into sentences and rebuilding - sentences = text.split(". ") - result = [] - current_count = 0 - - for sentence in sentences: - sentence_tokens = self.tokenizer.count_tokens(sentence) - if current_count + sentence_tokens <= max_tokens: - result.append(sentence) - current_count += sentence_tokens - else: - break - - return ". ".join(result) - - def clear_short_term_memory( - self, preserve_system: bool = True - ) -> None: - """Clear short-term memory with option to preserve system messages""" - if not preserve_system: - self.system_messages.clear() - self.short_term_memory.clear() - logger.info( - "Cleared short-term memory" - + " (preserved system messages)" - if preserve_system - else "" - ) - - def get_memory_stats(self) -> Dict[str, Any]: - """Get detailed memory statistics""" - return { - "short_term_messages": len(self.short_term_memory), - "system_messages": len(self.system_messages), - "current_tokens": self.get_current_token_count(), - "system_tokens": self.get_system_messages_token_count(), - "max_tokens": self.config.max_short_term_tokens, - "token_usage_percent": round( - ( - self.get_current_token_count() - / self.config.max_short_term_tokens - ) - * 100, - 2, - ), - "has_long_term_memory": self.long_term_memory is not None, - "archived_entries": self.archived_entries_count, - "total_tokens_processed": self.total_tokens_processed, - } - - def save_memory_snapshot(self, file_path: str) -> None: - """Save current memory state to file""" - try: - data = { - "timestamp": datetime.now().isoformat(), - "config": self.config.model_dump(), - "system_messages": [ - entry.model_dump() - for entry in self.system_messages - ], - "short_term_memory": [ - entry.model_dump() - for entry in self.short_term_memory - ], - "stats": self.get_memory_stats(), - } - - with open(file_path, "w") as f: - if file_path.endswith(".yaml"): - yaml.dump(data, f) - else: - json.dump(data, f, indent=2) - - logger.info(f"Saved memory snapshot to {file_path}") - - except Exception as e: - logger.error(f"Error saving memory snapshot: {e}") - raise - - def load_memory_snapshot(self, file_path: str) -> None: - """Load memory state from file""" - try: - with open(file_path, "r") as f: - if file_path.endswith(".yaml"): - data = yaml.safe_load(f) - else: - data = json.load(f) - - self.config = MemoryConfig(**data["config"]) - self.system_messages = [ - MemoryEntry(**entry) - for entry in data["system_messages"] - ] - self.short_term_memory = [ - MemoryEntry(**entry) - for entry in data["short_term_memory"] - ] - - logger.info(f"Loaded memory snapshot from {file_path}") - - except Exception as e: - logger.error(f"Error loading memory snapshot: {e}") - raise - - def search_memories( - self, query: str, memory_type: str = "all" - ) -> List[MemoryEntry]: - """ - Search through memories of specified type - - Args: - query (str): Search query - memory_type (str): Type of memories to search ("short_term", "system", "long_term", or "all") - - Returns: - List[MemoryEntry]: Matching memory entries - """ - results = [] - - if memory_type in ["short_term", "all"]: - results.extend( - [ - entry - for entry in self.short_term_memory - if query.lower() in entry.content.lower() - ] - ) - - if memory_type in ["system", "all"]: - results.extend( - [ - entry - for entry in self.system_messages - if query.lower() in entry.content.lower() - ] - ) - - if ( - memory_type in ["long_term", "all"] - and self.long_term_memory is not None - ): - long_term_results = self.long_term_memory.query(query) - if long_term_results: - # Convert long-term results to MemoryEntry format - for result in long_term_results: - content = str(result) - metadata = MemoryMetadata( - timestamp=time.time(), - role="long_term", - agent_name="system", - session_id="long_term", - memory_type="long_term", - token_count=self.tokenizer.count_tokens( - content - ), - ) - results.append( - MemoryEntry( - content=content, metadata=metadata - ) - ) - - return results - - def get_memory_by_timeframe( - self, start_time: float, end_time: float - ) -> List[MemoryEntry]: - """Get memories within a specific timeframe""" - return [ - entry - for entry in self.short_term_memory - if start_time <= entry.metadata.timestamp <= end_time - ] - - def export_memories( - self, file_path: str, format: str = "json" - ) -> None: - """Export memories to file in specified format""" - data = { - "system_messages": [ - entry.model_dump() for entry in self.system_messages - ], - "short_term_memory": [ - entry.model_dump() for entry in self.short_term_memory - ], - "stats": self.get_memory_stats(), - } - - with open(file_path, "w") as f: - if format == "yaml": - yaml.dump(data, f) - else: - json.dump(data, f, indent=2) diff --git a/swarms/structs/agent_security.py b/swarms/structs/agent_security.py new file mode 100644 index 00000000..8e588acf --- /dev/null +++ b/swarms/structs/agent_security.py @@ -0,0 +1,318 @@ +import base64 +import json +import uuid +from datetime import datetime +from dataclasses import dataclass +from typing import Optional, Union, Dict, List + +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import padding, rsa +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC + + +@dataclass +class EncryptedMessage: + """Structure for encrypted messages between agents""" + + sender_id: str + receiver_id: str + encrypted_content: bytes + timestamp: float + message_id: str + session_id: str + + +class EncryptionSession: + """Represents an encrypted communication session between agents""" + + def __init__( + self, + session_id: str, + agent_ids: List[str], + encrypted_keys: Dict[str, bytes], + created_at: datetime, + ): + self.session_id = session_id + self.agent_ids = agent_ids + self.encrypted_keys = encrypted_keys + self.created_at = created_at + + +class AgentEncryption: + """ + Handles encryption for agent data both at rest and in transit. + Supports both symmetric (for data at rest) and asymmetric (for data in transit) encryption. + Also supports secure multi-agent communication. + """ + + def __init__( + self, + agent_id: Optional[str] = None, + encryption_key: Optional[str] = None, + enable_transit_encryption: bool = False, + enable_rest_encryption: bool = False, + enable_multi_agent: bool = False, + ): + self.agent_id = agent_id or str(uuid.uuid4()) + self.enable_transit_encryption = enable_transit_encryption + self.enable_rest_encryption = enable_rest_encryption + self.enable_multi_agent = enable_multi_agent + + # Multi-agent communication storage + self.sessions: Dict[str, EncryptionSession] = {} + self.known_agents: Dict[str, "AgentEncryption"] = {} + + if enable_rest_encryption: + # Initialize encryption for data at rest + if encryption_key: + self.encryption_key = base64.urlsafe_b64encode( + PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=f"agent_{self.agent_id}".encode(), # Unique salt per agent + iterations=100000, + ).derive(encryption_key.encode()) + ) + else: + self.encryption_key = Fernet.generate_key() + + self.cipher_suite = Fernet(self.encryption_key) + + if enable_transit_encryption or enable_multi_agent: + # Generate RSA key pair for transit encryption + self.private_key = rsa.generate_private_key( + public_exponent=65537, key_size=2048 + ) + self.public_key = self.private_key.public_key() + + def register_agent( + self, agent_id: str, agent_encryption: "AgentEncryption" + ) -> None: + """Register another agent for secure communication""" + if not self.enable_multi_agent: + raise ValueError("Multi-agent support is not enabled") + self.known_agents[agent_id] = agent_encryption + + def create_session(self, agent_ids: List[str]) -> str: + """Create a new encrypted session between multiple agents""" + if not self.enable_multi_agent: + raise ValueError("Multi-agent support is not enabled") + + session_id = str(uuid.uuid4()) + + # Generate a shared session key + session_key = Fernet.generate_key() + + # Create encrypted copies of the session key for each agent + encrypted_keys = {} + for agent_id in agent_ids: + if ( + agent_id not in self.known_agents + and agent_id != self.agent_id + ): + raise ValueError(f"Agent {agent_id} not registered") + + if agent_id == self.agent_id: + agent_public_key = self.public_key + else: + agent_public_key = self.known_agents[ + agent_id + ].public_key + + encrypted_key = agent_public_key.encrypt( + session_key, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + encrypted_keys[agent_id] = encrypted_key + + # Store session information + self.sessions[session_id] = EncryptionSession( + session_id=session_id, + agent_ids=agent_ids, + encrypted_keys=encrypted_keys, + created_at=datetime.now(), + ) + + return session_id + + def encrypt_message( + self, + content: Union[str, dict], + receiver_id: str, + session_id: str, + ) -> EncryptedMessage: + """Encrypt a message for another agent within a session""" + if not self.enable_multi_agent: + raise ValueError("Multi-agent support is not enabled") + + if session_id not in self.sessions: + raise ValueError("Invalid session ID") + + session = self.sessions[session_id] + if ( + self.agent_id not in session.agent_ids + or receiver_id not in session.agent_ids + ): + raise ValueError("Sender or receiver not in session") + + # Serialize content if it's a dictionary + if isinstance(content, dict): + content = json.dumps(content) + + # Get the session key + encrypted_session_key = session.encrypted_keys[self.agent_id] + session_key = self.decrypt_session_key(encrypted_session_key) + + # Create Fernet cipher with session key + cipher = Fernet(session_key) + + # Encrypt the message + encrypted_content = cipher.encrypt(content.encode()) + + return EncryptedMessage( + sender_id=self.agent_id, + receiver_id=receiver_id, + encrypted_content=encrypted_content, + timestamp=datetime.now().timestamp(), + message_id=str(uuid.uuid4()), + session_id=session_id, + ) + + def decrypt_message( + self, message: EncryptedMessage + ) -> Union[str, dict]: + """Decrypt a message from another agent""" + if not self.enable_multi_agent: + raise ValueError("Multi-agent support is not enabled") + + if message.session_id not in self.sessions: + raise ValueError("Invalid session ID") + + if self.agent_id != message.receiver_id: + raise ValueError("Message not intended for this agent") + + session = self.sessions[message.session_id] + + # Get the session key + encrypted_session_key = session.encrypted_keys[self.agent_id] + session_key = self.decrypt_session_key(encrypted_session_key) + + # Create Fernet cipher with session key + cipher = Fernet(session_key) + + # Decrypt the message + decrypted_content = cipher.decrypt( + message.encrypted_content + ).decode() + + # Try to parse as JSON + try: + return json.loads(decrypted_content) + except json.JSONDecodeError: + return decrypted_content + + def decrypt_session_key(self, encrypted_key: bytes) -> bytes: + """Decrypt a session key using the agent's private key""" + return self.private_key.decrypt( + encrypted_key, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + + # Original methods preserved below + def encrypt_at_rest(self, data: Union[str, dict, bytes]) -> bytes: + """Encrypts data for storage""" + if not self.enable_rest_encryption: + return ( + data + if isinstance(data, bytes) + else str(data).encode() + ) + + if isinstance(data, dict): + data = json.dumps(data) + if isinstance(data, str): + data = data.encode() + + return self.cipher_suite.encrypt(data) + + def decrypt_at_rest( + self, encrypted_data: bytes + ) -> Union[str, dict]: + """Decrypts stored data""" + if not self.enable_rest_encryption: + return encrypted_data.decode() + + decrypted_data = self.cipher_suite.decrypt(encrypted_data) + + try: + return json.loads(decrypted_data) + except json.JSONDecodeError: + return decrypted_data.decode() + + def encrypt_for_transit(self, data: Union[str, dict]) -> bytes: + """Encrypts data for transmission""" + if not self.enable_transit_encryption: + return str(data).encode() + + if isinstance(data, dict): + data = json.dumps(data) + + return self.public_key.encrypt( + data.encode(), + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ) + + def decrypt_from_transit( + self, data: Union[bytes, str] + ) -> Union[str, dict]: + """Decrypts received data, handling both encrypted and unencrypted inputs""" + if not self.enable_transit_encryption: + return data.decode() if isinstance(data, bytes) else data + + try: + if isinstance(data, bytes) and len(data) == 256: + decrypted_data = self.private_key.decrypt( + data, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA256()), + algorithm=hashes.SHA256(), + label=None, + ), + ).decode() + else: + return ( + data.decode() if isinstance(data, bytes) else data + ) + + try: + return json.loads(decrypted_data) + except json.JSONDecodeError: + return decrypted_data + except ValueError: + return data.decode() if isinstance(data, bytes) else data + + def get_public_key_pem(self) -> bytes: + """Returns the public key in PEM format for sharing""" + if ( + not self.enable_transit_encryption + and not self.enable_multi_agent + ): + return b"" + + return self.public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 16e1f5b9..9e757c70 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -2,9 +2,9 @@ import os from typing import List from pydantic import BaseModel, Field -from swarm_models import OpenAIFunctionCaller, OpenAIChat from swarms.structs.agent import Agent +from swarms.utils.function_caller_model import OpenAIFunctionCaller from swarms.structs.swarm_router import SwarmRouter from swarms.utils.loguru_logger import initialize_logger from swarms.structs.agents_available import showcase_available_agents @@ -64,15 +64,6 @@ class SwarmConfig(BaseModel): ) -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - - BOSS_SYSTEM_PROMPT = """ Manage a swarm of worker agents to efficiently serve the user by deciding whether to create new agents or delegate tasks. Ensure operations are efficient and effective. @@ -248,7 +239,7 @@ class AutoSwarmBuilder: agent_name=agent_name, description=agent_description, system_prompt=agent_system_prompt, - llm=model, + model_name="gpt-4o", max_loops=max_loops, autosave=True, dashboard=False, diff --git a/swarms/structs/base_swarm.py b/swarms/structs/base_swarm.py index 277f2d4f..4e26c0b3 100644 --- a/swarms/structs/base_swarm.py +++ b/swarms/structs/base_swarm.py @@ -20,11 +20,6 @@ from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.omni_agent_types import AgentType from pydantic import BaseModel -from swarms.utils.pandas_utils import ( - dict_to_dataframe, - display_agents_info, - pydantic_model_to_dataframe, -) from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="base_swarm") @@ -795,17 +790,10 @@ class BaseSwarm(ABC): Returns: None """ - display_agents_info(self.agents) + ... def agents_to_dataframe(self): """ Convert agents to a pandas DataFrame. """ - data = [agent.agent_output.dict() for agent in self.agents] - return dict_to_dataframe(data) - - def model_to_dataframe(self): - """ - Convert the Pydantic model to a pandas DataFrame. - """ - return pydantic_model_to_dataframe(self.output_schema) + ... diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index 4107042a..5f0e799c 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -4,7 +4,6 @@ from typing import Any, Dict, List, Optional from swarms.utils.formatter import formatter from swarms.structs.agent import Agent from swarms.structs.base_structure import BaseStructure -from swarms.structs.task import Task from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger("base-workflow") @@ -43,7 +42,7 @@ class BaseWorkflow(BaseStructure): def __init__( self, agents: List[Agent] = None, - task_pool: List[Task] = None, + task_pool: List[str] = None, models: List[Any] = None, *args, **kwargs, @@ -69,8 +68,8 @@ class BaseWorkflow(BaseStructure): def add_task( self, - task: Task = None, - tasks: List[Task] = None, + task: str = None, + tasks: List[str] = None, *args, **kwargs, ): @@ -293,12 +292,6 @@ class BaseWorkflow(BaseStructure): "green", ) - task = Task( - description=task, - agent=kwargs["agent"], - args=list(kwargs["args"]), - kwargs=kwargs["kwargs"], - ) self.tasks.append(task) except Exception as error: formatter.print_panel( @@ -306,48 +299,6 @@ class BaseWorkflow(BaseStructure): ) raise error - def load_workflow_state( - self, filepath: str = None, **kwargs - ) -> None: - """ - Loads the workflow state from a json file and restores the workflow state. - - Args: - filepath (str): The path to load the workflow state from. - - Examples: - >>> from swarm_models import OpenAIChat - >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") - >>> workflow = SequentialWorkflow(max_loops=1) - >>> workflow.add("What's the weather in miami", llm) - >>> workflow.add("Create a report on these metrics", llm) - >>> workflow.save_workflow_state("sequential_workflow_state.json") - >>> workflow.load_workflow_state("sequential_workflow_state.json") - - """ - try: - filepath = filepath or self.restore_state_filepath - - with open(filepath) as f: - state = json.load(f) - self.max_loops = state["max_loops"] - self.tasks = [] - for task_state in state["tasks"]: - task = Task( - description=task_state["description"], - agent=task_state["agent"], - args=task_state["args"], - kwargs=task_state["kwargs"], - result=task_state["result"], - history=task_state["history"], - ) - self.tasks.append(task) - except Exception as error: - formatter.print_panel( - f"Error loading workflow state: {error}", - ) - def workflow_dashboard(self, **kwargs) -> None: """ Displays a dashboard for the workflow. diff --git a/swarms/structs/company.py b/swarms/structs/company.py deleted file mode 100644 index f7fb36b7..00000000 --- a/swarms/structs/company.py +++ /dev/null @@ -1,177 +0,0 @@ -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Union - -from swarms.structs.agent import Agent -from swarms.structs.base_swarm import BaseSwarm -from swarms.utils.loguru_logger import initialize_logger - - -logger = initialize_logger("company-swarm") - - -@dataclass -class Company(BaseSwarm): - """ - Represents a company with a hierarchical organizational structure. - """ - - org_chart: List[List[Agent]] - shared_instructions: str = None - ceo: Optional[Agent] = None - agents: List[Agent] = field(default_factory=list) - agent_interactions: Dict[str, List[str]] = field( - default_factory=dict - ) - - def __post_init__(self): - self._parse_org_chart(self.org_chart) - - def add(self, agent: Agent) -> None: - """ - Adds an agent to the company. - - Args: - agent (Agent): The agent to be added. - - Raises: - ValueError: If an agent with the same ID already exists in the company. - """ - try: - if any( - existing_agent.id == agent.id - for existing_agent in self.agents - ): - raise ValueError( - f"Agent with id {agent.id} already exists in the" - " company." - ) - self.agents.append(agent) - - except Exception as error: - logger.error( - f"[ERROR][CLASS: Company][METHOD: add] {error}" - ) - raise error - - def get(self, agent_name: str) -> Agent: - """ - Retrieves an agent from the company by name. - - Args: - agent_name (str): The name of the agent to retrieve. - - Returns: - Agent: The retrieved agent. - - Raises: - ValueError: If an agent with the specified name does not exist in the company. - """ - try: - for agent in self.agents: - if agent.name == agent_name: - return agent - raise ValueError( - f"Agent with name {agent_name} does not exist in the" - " company." - ) - except Exception as error: - logger.error( - f"[ERROR][CLASS: Company][METHOD: get] {error}" - ) - raise error - - def remove(self, agent: Agent) -> None: - """ - Removes an agent from the company. - - Args: - agent (Agent): The agent to be removed. - """ - try: - self.agents.remove(agent) - except Exception as error: - logger.error( - f"[ERROR][CLASS: Company][METHOD: remove] {error}" - ) - raise error - - def _parse_org_chart( - self, org_chart: Union[List[Agent], List[List[Agent]]] - ) -> None: - """ - Parses the organization chart and adds agents to the company. - - Args: - org_chart (Union[List[Agent], List[List[Agent]]]): The organization chart - representing the hierarchy of agents. - - Raises: - ValueError: If more than one CEO is found in the org chart or if an invalid - agent is encountered. - """ - try: - for node in org_chart: - if isinstance(node, Agent): - if self.ceo: - raise ValueError("1 CEO is only allowed") - self.ceo = node - self.add(node) - - elif isinstance(node, list): - for agent in node: - if not isinstance(agent, Agent): - raise ValueError( - "Invalid agent in org chart" - ) - self.add(agent) - - for i, agent in enumerate(node): - if i == len(node) - 1: - continue - - for other_agent in node[i + 1]: - self.__init_task(agent, other_agent) - except Exception as error: - logger.error( - "[ERROR][CLASS: Company][METHOD: _parse_org_chart]" - f" {error}" - ) - raise error - - def _init_interaction( - self, - agent1: Agent, - agent2: Agent, - ) -> None: - """ - Initializes the interaction between two agents. - - Args: - agent1 (Agent): The first agent involved in the interaction. - agent2 (Agent): The second agent involved in the interaction. - - Returns: - None - """ - if agent1.ai_name not in self.agents_interactions: - self.agents_interactions[agent1.ai_name] = [] - self.agents_interactions[agent1.ai_name].append( - agent2.ai_name - ) - - def run(self): - """ - Run the company - """ - for ( - agent_name, - interaction_agents, - ) in self.agents_interactions.items(): - agent = self.get(agent_name) - for interaction_agent in interaction_agents: - task_description = ( - f"Task for {agent_name} to interact with" - f" {interaction_agent}" - ) - print(f"{task_description} is being executed") - agent.run(task_description) diff --git a/swarms/structs/csv_to_agent.py b/swarms/structs/csv_to_agent.py index 624e3577..2b8ecf9c 100644 --- a/swarms/structs/csv_to_agent.py +++ b/swarms/structs/csv_to_agent.py @@ -8,7 +8,7 @@ from dataclasses import dataclass import csv from pathlib import Path from enum import Enum -from swarms import Agent +from swarms.structs.agent import Agent class ModelName(str, Enum): diff --git a/swarms/structs/graph_swarm.py b/swarms/structs/graph_swarm.py index e67add52..1bbc1673 100644 --- a/swarms/structs/graph_swarm.py +++ b/swarms/structs/graph_swarm.py @@ -14,15 +14,6 @@ from swarms.utils.auto_download_check_packages import ( auto_check_and_download_package, ) -# Configure logging -logger.add( - "graphswarm.log", - rotation="500 MB", - retention="10 days", - level="INFO", - format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", -) - class AgentOutput(BaseModel): """Structured output from an agent.""" diff --git a/swarms/structs/groupchat.py b/swarms/structs/groupchat.py index 7428461b..05d96260 100644 --- a/swarms/structs/groupchat.py +++ b/swarms/structs/groupchat.py @@ -1,9 +1,12 @@ import concurrent.futures from datetime import datetime +import os from typing import Callable, List +from dotenv import load_dotenv from loguru import logger from pydantic import BaseModel, Field +from swarm_models import OpenAIChat from swarms.structs.agent import Agent @@ -123,7 +126,7 @@ class GroupChat: description: str = "A group chat for multiple agents", agents: List[Agent] = [], speaker_fn: SpeakerFunction = round_robin, - max_loops: int = 10, + max_loops: int = 1, ): """ Initialize the GroupChat. @@ -171,7 +174,9 @@ class GroupChat: Previous messages: {self.get_full_chat_history()} """ # Updated line - message = agent.run(context + prompt) + message = agent.run( + task=f"From {agent.agent_name}: {context} \n {prompt}" + ) return AgentResponse( agent_name=agent.name, role=agent.system_prompt, @@ -224,7 +229,7 @@ class GroupChat: def run(self, task: str) -> ChatHistory: """ - Run the group chat. + Run the group chat, feeding the context of previous turns into each new turn. Args: task (str): The initial message to start the chat. @@ -242,12 +247,22 @@ class GroupChat: turn_number=turn, responses=[], task=task ) + # Get context from previous turns + context = self.get_full_chat_history() + + # Combine task with context for agents + contextualized_task = ( + f"{task}\n\nPrevious conversation:\n{context}" + if context + else task + ) + for agent in self.agents: if self.speaker_fn( self.get_recent_messages(), agent ): response = self._get_response_sync( - agent, task, turn + agent, contextualized_task, turn ) current_turn.responses.append(response) self.chat_history.total_messages += 1 @@ -293,63 +308,63 @@ class GroupChat: ) -# if __name__ == "__main__": - -# load_dotenv() - -# # Get the OpenAI API key from the environment variable -# api_key = os.getenv("OPENAI_API_KEY") - -# # Create an instance of the OpenAIChat class -# model = OpenAIChat( -# openai_api_key=api_key, -# model_name="gpt-4o-mini", -# temperature=0.1, -# ) - -# # Example agents -# agent1 = Agent( -# agent_name="Financial-Analysis-Agent", -# system_prompt="You are a financial analyst specializing in investment strategies.", -# llm=model, -# max_loops=1, -# autosave=False, -# dashboard=False, -# verbose=True, -# dynamic_temperature_enabled=True, -# user_name="swarms_corp", -# retry_attempts=1, -# context_length=200000, -# output_type="string", -# streaming_on=False, -# ) - -# agent2 = Agent( -# agent_name="Tax-Adviser-Agent", -# system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.", -# llm=model, -# max_loops=1, -# autosave=False, -# dashboard=False, -# verbose=True, -# dynamic_temperature_enabled=True, -# user_name="swarms_corp", -# retry_attempts=1, -# context_length=200000, -# output_type="string", -# streaming_on=False, -# ) - -# agents = [agent1, agent2] - -# chat = GroupChat( -# name="Investment Advisory", -# description="Financial and tax analysis group", -# agents=agents, -# speaker_fn=expertise_based, -# ) - -# history = chat.run( -# "How to optimize tax strategy for investments?" -# ) -# print(history.model_dump_json(indent=2)) +if __name__ == "__main__": + + load_dotenv() + + # Get the OpenAI API key from the environment variable + api_key = os.getenv("OPENAI_API_KEY") + + # Create an instance of the OpenAIChat class + model = OpenAIChat( + openai_api_key=api_key, + model_name="gpt-4o-mini", + temperature=0.1, + ) + + # Example agents + agent1 = Agent( + agent_name="Financial-Analysis-Agent", + system_prompt="You are a financial analyst specializing in investment strategies.", + llm=model, + max_loops=1, + autosave=False, + dashboard=False, + verbose=True, + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=1, + context_length=200000, + output_type="string", + streaming_on=False, + ) + + agent2 = Agent( + agent_name="Tax-Adviser-Agent", + system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.", + llm=model, + max_loops=1, + autosave=False, + dashboard=False, + verbose=True, + dynamic_temperature_enabled=True, + user_name="swarms_corp", + retry_attempts=1, + context_length=200000, + output_type="string", + streaming_on=False, + ) + + agents = [agent1, agent2] + + chat = GroupChat( + name="Investment Advisory", + description="Financial and tax analysis group", + agents=agents, + speaker_fn=expertise_based, + ) + + history = chat.run( + "How to optimize tax strategy for investments?" + ) + print(history.model_dump_json(indent=2)) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 4eac5c78..6244d18a 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -1,577 +1,313 @@ -from typing import List, Any +from concurrent.futures import ThreadPoolExecutor +from typing import Any, List, Optional, Union from pydantic import BaseModel, Field -from swarms.utils.loguru_logger import initialize_logger -from swarms.structs.base_swarm import BaseSwarm + from swarms.structs.agent import Agent -from swarms.structs.concat import concat_strings -from swarms.structs.agent_registry import AgentRegistry -from swarm_models.base_llm import BaseLLM +from swarms.structs.base_swarm import BaseSwarm from swarms.structs.conversation import Conversation +from swarms.utils.formatter import formatter -logger = initialize_logger(log_folder="hiearchical_swarm") - -# Example usage: -HIEARCHICAL_AGENT_SYSTEM_PROMPT = """ -Here's a full-fledged system prompt for a director boss agent, complete with instructions and many-shot examples: - ---- - -**System Prompt: Director Boss Agent** - -### Role: -You are a Director Boss Agent responsible for orchestrating a swarm of worker agents. Your primary duty is to serve the user efficiently, effectively, and skillfully. You dynamically create new agents when necessary or utilize existing agents, assigning them tasks that align with their capabilities. You must ensure that each agent receives clear, direct, and actionable instructions tailored to their role. - -### Key Responsibilities: -1. **Task Delegation:** Assign tasks to the most relevant agent. If no relevant agent exists, create a new one with an appropriate name and system prompt. -2. **Efficiency:** Ensure that tasks are completed swiftly and with minimal resource expenditure. -3. **Clarity:** Provide orders that are simple, direct, and actionable. Avoid ambiguity. -4. **Dynamic Decision Making:** Assess the situation and choose the most effective path, whether that involves using an existing agent or creating a new one. -5. **Monitoring:** Continuously monitor the progress of each agent and provide additional instructions or corrections as necessary. - -### Instructions: -- **Identify the Task:** Analyze the input task to determine its nature and requirements. -- **Agent Selection/Creation:** - - If an agent is available and suited for the task, assign the task to that agent. - - If no suitable agent exists, create a new agent with a relevant system prompt. -- **Task Assignment:** Provide the selected agent with explicit and straightforward instructions. -- **Reasoning:** Justify your decisions when selecting or creating agents, focusing on the efficiency and effectiveness of task completion. - -""" - - -class AgentSpec(BaseModel): - """ - A class representing the specifications of an agent. - - Attributes: - agent_name (str): The name of the agent. - system_prompt (str): The system prompt for the agent. - agent_description (str): The description of the agent. - max_tokens (int): The maximum number of tokens to generate in the API response. - temperature (float): A parameter that controls the randomness of the generated text. - context_window (int): The context window for the agent. - task (str): The main task for the agent. - """ - - agent_name: str = Field( - ..., - description="The name of the agent.", - ) - system_prompt: str = Field( - ..., - description="The system prompt for the agent. Write an extremely detailed system prompt for the agent.", - ) - agent_description: str = Field( - ..., - description="The description of the agent.", - ) - task: str = Field( - ..., - description="The main task for the agent.", - ) - +from swarms.utils.loguru_logger import initialize_logger -# class AgentTeam(BaseModel): -# agents: List[AgentSpec] = Field( -# ..., -# description="The list of agents in the team", -# ) -# flow: str = Field( -# ..., -# description="Agent Name -> ", -# ) +logger = initialize_logger(log_folder="hierarchical_swarm") -# Schema to send orders to the agents -class HierarchicalOrderCall(BaseModel): +class HierarchicalOrder(BaseModel): agent_name: str = Field( ..., - description="The name of the agent to assign the task to.", + description="Specifies the name of the agent to which the task is assigned. This is a crucial element in the hierarchical structure of the swarm, as it determines the specific agent responsible for the task execution.", ) task: str = Field( ..., - description="The main specific task to be assigned to the agent. Be very specific and direct.", + description="Defines the specific task to be executed by the assigned agent. This task is a key component of the swarm's plan and is essential for achieving the swarm's goals.", ) -# For not agent creation -class CallTeam(BaseModel): - # swarm_name: str = Field( - # ..., - # description="The name of the swarm: e.g., 'Marketing Swarm' or 'Finance Swarm'", - # ) - rules: str = Field( +class SwarmSpec(BaseModel): + goals: str = Field( ..., - description="The rules for all the agents in the swarm: e.g., All agents must return code. Be very simple and direct", + description="The goal of the swarm. This is the overarching objective that the swarm is designed to achieve. It guides the swarm's plan and the tasks assigned to the agents.", ) plan: str = Field( ..., - description="The plan for the swarm: e.g., First create the agents, then assign tasks, then monitor progress", - ) - orders: List[HierarchicalOrderCall] - - -class SwarmSpec(BaseModel): - """ - A class representing the specifications of a swarm of agents. - - Attributes: - multiple_agents (List[AgentSpec]): The list of agents in the swarm. - """ - - swarm_name: str = Field( - ..., - description="The name of the swarm: e.g., 'Marketing Swarm' or 'Finance Swarm'", + description="Outlines the sequence of actions to be taken by the swarm. This plan is a detailed roadmap that guides the swarm's behavior and decision-making.", ) - multiple_agents: List[AgentSpec] rules: str = Field( ..., - description="The rules for all the agents in the swarm: e.g., All agents must return code. Be very simple and direct", + description="Defines the governing principles for swarm behavior and decision-making. These rules are the foundation of the swarm's operations and ensure that the swarm operates in a coordinated and efficient manner.", ) - plan: str = Field( + orders: List[HierarchicalOrder] = Field( ..., - description="The plan for the swarm: e.g., First create the agents, then assign tasks, then monitor progress", + description="A collection of task assignments to specific agents within the swarm. These orders are the specific instructions that guide the agents in their task execution and are a key element in the swarm's plan.", ) -class HierarchicalAgentSwarm(BaseSwarm): +class HierarchicalSwarm(BaseSwarm): """ - A class to create and manage a hierarchical swarm of agents. - - Methods: - __init__(system_prompt, max_tokens, temperature, base_model, parallel_tool_calls): Initializes the function caller. - create_agent(agent_name, system_prompt, agent_description, max_tokens, temperature, context_window): Creates an individual agent. - parse_json_for_agents_then_create_agents(function_call): Parses a JSON function call to create multiple agents. - run(task): Runs the function caller to create and execute agents based on the provided task. + Represents a hierarchical swarm of agents, with a director that orchestrates tasks among the agents. """ def __init__( self, name: str = "HierarchicalAgentSwarm", - description: str = "A swarm of agents that can be used to distribute tasks to a team of agents.", - director: Any = None, - agents: List[Agent] = None, + description: str = "Distributed task swarm", + director: Optional[Union[Agent, Any]] = None, + agents: List[Union[Agent, Any]] = None, max_loops: int = 1, - create_agents_on: bool = False, - template_worker_agent: Agent = None, - director_planning_prompt: str = None, - template_base_worker_llm: BaseLLM = None, - swarm_history: str = None, + return_all_history: bool = False, *args, **kwargs, ): """ - Initializes the HierarchicalAgentSwarm with an OpenAIFunctionCaller. - - Args: - system_prompt (str): The system prompt for the function caller. - max_tokens (int): The maximum number of tokens to generate in the API response. - temperature (float): The temperature setting for text generation. - base_model (BaseModel): The base model for the function caller. - parallel_tool_calls (bool): Whether to run tool calls in parallel. + Initializes the HierarchicalSwarm with the given parameters. + + :param name: The name of the swarm. + :param description: A description of the swarm. + :param director: The director agent that orchestrates tasks. + :param agents: A list of agents within the swarm. + :param max_loops: The maximum number of feedback loops between the director and agents. + :param return_all_history: A flag indicating whether to return all conversation history. """ super().__init__( name=name, description=description, agents=agents, - *args, - **kwargs, ) - self.name = name - self.description = description self.director = director self.agents = agents self.max_loops = max_loops - self.create_agents_on = create_agents_on - self.template_worker_agent = template_worker_agent - self.director_planning_prompt = director_planning_prompt - self.template_base_worker_llm = template_base_worker_llm - self.swarm_history = swarm_history - - # Check if the agents are set - self.agents_check() - - # Agent Registry - self.agent_registry = AgentRegistry() - - # Add agents to the registry - self.add_agents_into_registry(self.agents) - - # Swarm History + self.return_all_history = return_all_history self.conversation = Conversation(time_enabled=True) - self.swarm_history = ( - self.conversation.return_history_as_string() - ) - - def agents_check(self): - if self.director is None: - raise ValueError("The director is not set.") + self.add_name_and_description() - if len(self.agents) == 0: - self.create_agents_on = True + self.check_agents() - if len(self.agents) > 0: - self.director.base_model = CallTeam + self.list_all_agents() - self.director.system_prompt = ( - HIEARCHICAL_AGENT_SYSTEM_PROMPT + def check_agents(self): + """ + Checks if there are any agents and a director set for the swarm. + Raises ValueError if either condition is not met. + """ + if not self.agents: + raise ValueError( + "No agents found in the swarm. At least one agent must be provided to create a hierarchical swarm." ) - if self.max_loops == 0: - raise ValueError("The max_loops is not set.") + if not self.director: + raise ValueError( + "Director not set for the swarm. A director agent is required to coordinate and orchestrate tasks among the agents." + ) - def add_agents_into_registry(self, agents: List[Agent]): + def run_director( + self, task: str, img: str = None, *args, **kwargs + ): """ - add_agents_into_registry: Add agents into the agent registry. - - Args: - agents (List[Agent]): A list of agents to add into the registry. - - Returns: - None + Runs a task through the director agent. + :param task: The task to be executed by the director. + :param img: Optional image to be used with the task. + :return: The output of the director's task execution. """ - for agent in agents: - self.agent_registry.add(agent) - def create_agent( - self, - agent_name: str, - system_prompt: str, - agent_description: str, - task: str = None, - ) -> str: - """ - Creates an individual agent. - - Args: - agent_name (str): The name of the agent. - system_prompt (str): The system prompt for the agent. - agent_description (str): The description of the agent. - max_tokens (int): The maximum number of tokens to generate. - temperature (float): The temperature for text generation. - context_window (int): The context window size for the agent. - - Returns: - Agent: An instantiated agent object. - """ - # name = agent_name.replace(" ", "_") - logger.info(f"Creating agent: {agent_name}") - - agent_name = Agent( - agent_name=agent_name, - llm=self.template_base_worker_llm, # Switch to model router here later - system_prompt=system_prompt, - agent_description=agent_description, - retry_attempts=1, - verbose=False, - dashboard=False, + function_call = self.director.run( + task=f"History: {self.conversation.get_str()} Your Task: {task}", ) - self.agents.append(agent_name) + formatter.print_panel(f"Director Output: {function_call}") - logger.info(f"Running agent: {agent_name} on task: {task}") - output = agent_name.run(task) + return function_call - self.conversation.add(role=agent_name, content=output) - return output - - def parse_json_for_agents_then_create_agents( - self, function_call: dict - ) -> List[Agent]: + def run(self, task: str, img: str = None, *args, **kwargs) -> str: """ - Parses a JSON function call to create a list of agents. - - Args: - function_call (dict): The JSON function call specifying the agents. + Runs a task through the swarm, involving the director and agents. - Returns: - List[Agent]: A list of created agent objects. + :param task: The task to be executed by the swarm. + :param img: Optional image to be used with the task. + :return: The output of the swarm's task execution. """ - responses = [] - logger.info("Parsing JSON for agents") - - if self.create_agents_on: - for agent in function_call["multiple_agents"]: - out = self.create_agent( - agent_name=agent["agent_name"], - system_prompt=agent["system_prompt"], - agent_description=agent["agent_description"], - task=agent["task"], - ) - responses.append(out) - else: - for agent in function_call["orders"]: - out = self.run_worker_agent( - name=agent["agent_name"], - task=agent["task"], - ) - responses.append(out) + self.conversation.add(role="User", content=f"Task: {task}") - return concat_strings(responses) + function_call = self.run_director( + task=self.conversation.get_str() + ) - def run(self, task: str) -> str: - """ - Runs the function caller to create and execute agents based on the provided task. + self.parse_orders(function_call) - Args: - task (str): The task for which the agents need to be created and executed. + if self.return_all_history: + return self.conversation.get_str() + else: + return self.conversation.get_str() - Returns: - List[Agent]: A list of created agent objects. + def add_name_and_description(self): + """ + Adds the swarm's name and description to the conversation. """ - logger.info("Running the swarm") - - # Run the function caller to output JSON function call - function_call = self.model.run(task) - - # Add the function call to the conversation self.conversation.add( - role="Director", content=str(function_call) + role="User", + content=f"\n Swarm Name: {self.name} \n Swarm Description: {self.description}", ) - # Logging the function call with metrics and details - self.log_director_function_call(function_call) - - # # Parse the JSON function call and create agents -> run Agents - return self.parse_json_for_agents_then_create_agents( - function_call + formatter.print_panel( + f"⚡ INITIALIZING HIERARCHICAL SWARM UNIT: {self.name}\n" + f"🔒 CLASSIFIED DIRECTIVE: {self.description}\n" + f"📡 STATUS: ACTIVATING SWARM PROTOCOLS\n" + f"🌐 ESTABLISHING SECURE AGENT MESH NETWORK\n" + f"⚠️ CYBERSECURITY MEASURES ENGAGED\n", + title="SWARM CORPORATION - HIERARCHICAL SWARMS ACTIVATING...", ) - def run_new(self, task: str): + def list_all_agents(self) -> str: """ - Runs the function caller to create and execute agents based on the provided task. - - Args: - task (str): The task for which the agents need to be created and executed. + Lists all agents available in the swarm. - Returns: - List[Agent]: A list of created agent objects. + :return: A string representation of all agents in the swarm. """ - logger.info("Running the swarm") - # Run the function caller to output JSON function call - function_call = self.model.run(task) - self.conversation.add( - role="Director", content=str(function_call) + # need to fetch name and description of all agents + all_agents = "\n".join( + f"Agent: {agent.agent_name} || Description: {agent.description or agent.system_prompt} \n" + for agent in self.agents ) - # Logging the function call with metrics and details - self.log_director_function_call(function_call) - - if self.create_agents_on: - # Create agents from the function call - self.create_agents_from_func_call(function_call) - - # Now submit orders to the agents - self.director.base_model = CallTeam - - orders_prompt = f"Now, the agents have been created. Submit orders to the agents to enable them to complete the task: {task}: {self.list_agents_available()}" - orders = self.director.run(orders_prompt) - self.conversation.add( - role="Director", content=str(orders_prompt + orders) - ) + self.conversation.add( + role="User", + content=f"All Agents Available in the Swarm {self.name}: \n {all_agents}", + ) - # Check the type of the response - orders = self.check_agent_output_type(orders) + formatter.print_panel( + all_agents, title="All Agents Available in the Swarm" + ) - # Distribute the orders to the agents - return self.distribute_orders_to_agents(orders) + def find_agent(self, name: str) -> Optional[Agent]: + """ + Finds an agent by its name within the swarm. - def check_agent_output_type(self, response: Any): - if isinstance(response, dict): - return response - if isinstance(response, str): - return eval(response) - else: - return response + :param name: The name of the agent to find. + :return: The agent if found, otherwise None. + """ + for agent in self.agents: + if agent.agent_name == name: + return agent + return None - def distribute_orders_to_agents(self, order_dict: dict) -> str: - # Now we need to parse the CallTeam object - # and distribute the orders to the agents - responses = [] + def run_agent(self, agent_name: str, task: str, img: str = None): + """ + Runs a task through a specific agent. - for order in order_dict["orders"]: - agent_name = order["agent_name"] - task = order["task"] + :param agent_name: The name of the agent to execute the task. + :param task: The task to be executed by the agent. + :param img: Optional image to be used with the task. + :return: The output of the agent's task execution. + """ + try: + agent = self.find_agent(agent_name) - # Find and run the agent - response = self.run_worker_agent( - name=agent_name, task=task - ) + if agent: + out = agent.run( + task=f"History: {self.conversation.get_str()} Your Task: {task}", + img=img, + ) - log = f"Agent: {agent_name} completed task: {task} with response: {response}" - self.conversation.add( - role=agent_name, content=task + response - ) - responses.append(log) - logger.info(log) + self.conversation.add( + role=agent_name, + content=out, + ) - return concat_strings(responses) + return out + else: + logger.error( + f"Agent {agent_name} not found in the swarm {self.name}" + ) + except Exception as e: + logger.error(f"Error running agent {agent_name}: {e}") + return "Error running agent" - def create_single_agent( - self, name: str, system_prompt: str, description - ) -> Agent: + def parse_orders(self, orders: SwarmSpec) -> None: """ - Create a single agent from the agent specification. - - Args: - agent_spec (dict): The agent specification. - - Returns: - Agent: The created agent. + Parses the orders from the SwarmSpec and executes them through the agents. + :param orders: The SwarmSpec containing the orders to be parsed. """ - # Unwrap all of the agent specifications - # agent_name = agent_spec["agent_name"] - # system_prompt = agent_spec["system_prompt"] - # agent_description = agent_spec["agent_description"] - - # Create the agent - agent_name = Agent( - agent_name=name, - llm=self.template_base_worker_llm, # Switch to model router here later - system_prompt=system_prompt, - agent_description=description, - max_loops=1, - retry_attempts=1, - verbose=False, - dashboard=False, - ) + self.add_goal_and_more_in_conversation(orders) - # Add agents into the registry - self.agents.append(agent_name) + orders_list = self.parse_swarm_spec(orders) - return agent_name - - def create_agents_from_func_call(self, function_call: dict): - """ - Create agents from the function call. + try: - Args: - function_call (dict): The function call containing the agent specifications. + # Example of passing the parsed data to an agent + for order in orders_list: + out = self.run_agent( + agent_name=order.agent_name, + task=order.task, + ) - Returns: - List[Agent]: A list of created agents. + return out + except Exception as e: + logger.error(f"Error parsing orders: {e}") + return "Error parsing orders" + def parse_swarm_spec(self, swarm_spec: SwarmSpec) -> None: """ - logger.info("Creating agents from the function call") - for agent_spec in function_call["multiple_agents"]: - agent = self.create_single_agent( - name=agent_spec["agent_name"], - system_prompt=agent_spec["system_prompt"], - description=agent_spec["agent_description"], - ) - - logger.info( - f"Created agent: {agent.agent_name} with description: {agent.description}" - ) - - self.agents.append(agent) + Parses the SwarmSpec to extract the orders. - def plan(self, task: str) -> str: + :param swarm_spec: The SwarmSpec to be parsed. + :return: The list of orders extracted from the SwarmSpec. """ - Plans the tasks for the agents in the swarm. + orders_list = swarm_spec.orders - Args: - task (str): The task to be planned. - - Returns: - str: The planned task for the agents. + # return the orders_list + return orders_list + def provide_feedback(self, agent_name: str, out: str) -> None: """ - logger.info("Director is planning the task") - - self.director.system_prompt = self.director_planning_prompt - - def log_director_function_call(self, function_call: dict): - # Log the agents the boss makes\ - logger.info(f"Swarm Name: {function_call['swarm_name']}") - # Log the plan - logger.info(f"Plan: {function_call['plan']}") - logger.info( - f"Number of agents: {len(function_call['multiple_agents'])}" - ) + Provides feedback to an agent based on its output. - for agent in function_call["multiple_agents"]: - logger.info(f"Agent: {agent['agent_name']}") - # logger.info(f"Task: {agent['task']}") - logger.info(f"Description: {agent['agent_description']}") - - def run_worker_agent( - self, name: str = None, task: str = None, *args, **kwargs - ): + :param agent_name: The name of the agent to provide feedback to. + :param out: The output of the agent to base the feedback on. """ - Run the worker agent. + orders = self.director.run( + task=f"Provide feedback to {agent_name} on their output: {out}" + ) - Args: - name (str): The name of the worker agent. - task (str): The task to send to the worker agent. + orders_list = self.parse_swarm_spec(orders) - Returns: - str: The response from the worker agent. + for order in orders_list: + out = self.run_agent( + agent_name=order.agent_name, + task=order.task, + ) - Raises: - Exception: If an error occurs while running the worker agent. + return out + def add_goal_and_more_in_conversation( + self, swarm_spec: SwarmSpec + ) -> None: """ - try: - # Find the agent by name - agent = self.find_agent_by_name(name) - - # Run the agent - response = agent.run(task, *args, **kwargs) - - return response - except Exception as e: - logger.error(f"Error: {e}") - raise e + Adds the swarm's goals, plan, and rules to the conversation. - def list_agents(self) -> str: - logger.info("Listing agents available in the swarm") - - for agent in self.agents: - name = agent.agent_name - description = ( - agent.description or "No description available." - ) - logger.info(f"Agent: {name}, Description: {description}") - - def list_agents_available(self): - number_of_agents_available = len(self.agents) + :param swarm_spec: The SwarmSpec containing the goals, plan, and rules. + """ + goals = swarm_spec.goals + plan = swarm_spec.plan + rules = swarm_spec.rules - agent_list = "\n".join( - [ - f"Agent {agent.agent_name}: Description {agent.description}" - for agent in self.agents - ] + self.conversation.add( + role="Director", + content=f"Goals: {goals}\nPlan: {plan}\nRules: {rules}", ) - prompt = f""" - There are currently {number_of_agents_available} agents available in the swarm. - - Agents Available: - {agent_list} + def batch_run(self, tasks: List[str]) -> List[str]: """ - - return prompt - - def find_agent_by_name( - self, agent_name: str = None, *args, **kwargs - ): + Batch run the swarm with the given tasks. """ - Finds an agent in the swarm by name. - - Args: - agent_name (str): The name of the agent to find. - - Returns: - Agent: The agent with the specified name, or None if not found. + return [self.run(task) for task in tasks] + def concurrent_run(self, tasks: List[str]) -> List[str]: """ - for agent in self.agents: - if agent.name == agent_name: - return agent - return None + Concurrent run the swarm with the given tasks. + """ + with ThreadPoolExecutor(max_workers=len(tasks)) as executor: + return list(executor.map(self.run, tasks)) diff --git a/swarms/structs/majority_voting.py b/swarms/structs/majority_voting.py index 18738aa0..dd8deac4 100644 --- a/swarms/structs/majority_voting.py +++ b/swarms/structs/majority_voting.py @@ -1,11 +1,15 @@ +import asyncio import concurrent.futures +import os import re from collections import Counter +from concurrent.futures import ThreadPoolExecutor from typing import Any, Callable, List, Optional from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation -from swarms.utils.file_processing import create_file +from swarms.structs.multi_agent_exec import run_agents_concurrently +from swarms.utils.formatter import formatter from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="majority_voting") @@ -139,36 +143,42 @@ class MajorityVoting: description: str = "A majority voting system for agents", agents: List[Agent] = [], output_parser: Optional[Callable] = majority_voting, + consensus_agent: Optional[Agent] = None, autosave: bool = False, verbose: bool = False, + max_loops: int = 1, *args, **kwargs, ): + self.name = name + self.description = description self.agents = agents self.output_parser = output_parser + self.consensus_agent = consensus_agent self.autosave = autosave self.verbose = verbose + self.max_loops = max_loops self.conversation = Conversation( time_enabled=True, *args, **kwargs ) - # If autosave is enabled, save the conversation to a file - if self.autosave: - create_file( - str(self.conversation), "majority_voting.json" - ) + self.initialize_majority_voting() + + def initialize_majority_voting(self): + + if self.agents is None: + raise ValueError("Agents list is empty") # Log the agents - logger.info("Initializing majority voting system") - # Length of agents - logger.info(f"Number of agents: {len(self.agents)}") - logger.info( - "Agents:" - f" {', '.join(agent.agent_name for agent in self.agents)}" + formatter.print_panel( + f"Initializing majority voting system\nNumber of agents: {len(self.agents)}\nAgents: {', '.join(agent.agent_name for agent in self.agents)}", + title="Majority Voting", ) - def run(self, task: str, *args, **kwargs) -> List[Any]: + def run( + self, task: str, correct_answer: str, *args, **kwargs + ) -> List[Any]: """ Runs the majority voting system and returns the majority vote. @@ -181,44 +191,113 @@ class MajorityVoting: List[Any]: The majority vote. """ - # Route to each agent - with concurrent.futures.ThreadPoolExecutor() as executor: - logger.info("Running agents concurrently") - - futures = [ - executor.submit(agent.run, task, *args) - for agent in self.agents - ] - results = [ - future.result() - for future in concurrent.futures.as_completed(futures) - ] + results = run_agents_concurrently( + self.agents, task, max_workers=os.cpu_count() + ) # Add responses to conversation and log them for agent, response in zip(self.agents, results): + response = ( response if isinstance(response, list) else [response] ) self.conversation.add(agent.agent_name, response) - logger.info( - f"[Agent][Name: {agent.agent_name}][Response:" - f" {response}]" - ) - # Perform majority voting on the conversation - responses = [ - message["content"] - for message in self.conversation.conversation_history - if message["role"] == "agent" - ] + responses = self.conversation.return_history_as_string() + print(responses) + + prompt = f"""Conduct a detailed majority voting analysis on the following conversation: + {responses} + + Between the following agents: {[agent.agent_name for agent in self.agents]} + + Please: + 1. Identify the most common answer/recommendation across all agents + 2. Analyze any major disparities or contrasting viewpoints between agents + 3. Highlight key areas of consensus and disagreement + 4. Evaluate the strength of the majority opinion + 5. Note any unique insights from minority viewpoints + 6. Provide a final synthesized recommendation based on the majority consensus + + Focus on finding clear patterns while being mindful of important nuances in the responses. + """ # If an output parser is provided, parse the responses - if self.output_parser is not None: - majority_vote = self.output_parser( - responses, *args, **kwargs + if self.consensus_agent is not None: + majority_vote = self.consensus_agent.run( + prompt + ) + + self.conversation.add( + self.consensus_agent.agent_name, majority_vote ) else: - majority_vote = majority_voting(responses) + # fetch the last agent + majority_vote = self.agents[-1].run(prompt) + + self.conversation.add( + self.agents[-1].agent_name, majority_vote + ) # Return the majority vote - return majority_vote + return self.conversation.return_history_as_string() + + def batch_run( + self, tasks: List[str], *args, **kwargs + ) -> List[Any]: + """ + Runs the majority voting system in batch mode. + + Args: + tasks (List[str]): List of tasks to be performed by the agents. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + List[Any]: List of majority votes for each task. + """ + return [self.run(task, *args, **kwargs) for task in tasks] + + def run_concurrently( + self, tasks: List[str], *args, **kwargs + ) -> List[Any]: + """ + Runs the majority voting system concurrently. + + Args: + tasks (List[str]): List of tasks to be performed by the agents. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + List[Any]: List of majority votes for each task. + """ + with ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + futures = [ + executor.submit(self.run, task, *args, **kwargs) + for task in tasks + ] + return [ + future.result() + for future in concurrent.futures.as_completed(futures) + ] + + async def run_async( + self, tasks: List[str], *args, **kwargs + ) -> List[Any]: + """ + Runs the majority voting system concurrently using asyncio. + + Args: + tasks (List[str]): List of tasks to be performed by the agents. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + List[Any]: List of majority votes for each task. + """ + return await asyncio.gather( + *[self.run(task, *args, **kwargs) for task in tasks] + ) diff --git a/swarms/structs/matrix_swarm.py b/swarms/structs/matrix_swarm.py new file mode 100644 index 00000000..179e88b5 --- /dev/null +++ b/swarms/structs/matrix_swarm.py @@ -0,0 +1,306 @@ +import json +from typing import Any, List + +from loguru import logger +from pydantic import BaseModel, Field + +from swarms import Agent + + +class AgentOutput(BaseModel): + """ + Schema for capturing metadata and results of an agent run. + """ + + agent_name: str = Field(..., description="Name of the agent.") + input_query: str = Field( + ..., description="Input query provided to the agent." + ) + output_result: Any = Field( + ..., description="Result produced by the agent." + ) + metadata: dict = Field( + ..., description="Additional metadata about the agent run." + ) + + +class MatrixSwarm: + """ + A class to manage a matrix of agents and perform matrix operations similar to linear algebra. + """ + + def __init__(self, agents: List[List[Agent]]): + """ + Initializes the MatrixSwarm with a 2D list of agents. + Args: + agents (List[List[Agent]]): 2D list of agents representing the matrix. + """ + if not agents or not all( + isinstance(row, list) for row in agents + ): + raise ValueError("Agents must be provided as a 2D list.") + if not all( + isinstance(agent, Agent) + for row in agents + for agent in row + ): + raise ValueError( + "All elements of the matrix must be instances of `Agent`." + ) + self.agents = agents + self.outputs = [] # List to store outputs as AgentOutput + + def validate_dimensions(self, other: "MatrixSwarm") -> None: + """ + Validates that two matrices have compatible dimensions for operations. + + Args: + other (MatrixSwarm): Another MatrixSwarm. + + Raises: + ValueError: If dimensions are incompatible. + """ + if len(self.agents) != len(other.agents) or len( + self.agents[0] + ) != len(other.agents[0]): + raise ValueError( + "Matrix dimensions are incompatible for this operation." + ) + + def transpose(self) -> "MatrixSwarm": + """ + Transposes the matrix of agents (swap rows and columns). + + Returns: + MatrixSwarm: A new transposed MatrixSwarm. + """ + transposed_agents = [ + [self.agents[j][i] for j in range(len(self.agents))] + for i in range(len(self.agents[0])) + ] + return MatrixSwarm(transposed_agents) + + def add(self, other: "MatrixSwarm") -> "MatrixSwarm": + """ + Adds two matrices element-wise. + + Args: + other (MatrixSwarm): Another MatrixSwarm to add. + + Returns: + MatrixSwarm: A new MatrixSwarm resulting from the addition. + """ + self.validate_dimensions(other) + added_agents = [ + [self.agents[i][j] for j in range(len(self.agents[i]))] + for i in range(len(self.agents)) + ] + return MatrixSwarm(added_agents) + + def scalar_multiply(self, scalar: int) -> "MatrixSwarm": + """ + Scales the agents by duplicating them scalar times along the row. + + Args: + scalar (int): The scalar multiplier. + + Returns: + MatrixSwarm: A new MatrixSwarm where each agent is repeated scalar times along the row. + """ + scaled_agents = [ + [agent for _ in range(scalar) for agent in row] + for row in self.agents + ] + return MatrixSwarm(scaled_agents) + + def multiply( + self, other: "MatrixSwarm", inputs: List[str] + ) -> List[List[AgentOutput]]: + """ + Multiplies two matrices (dot product between rows and columns). + + Args: + other (MatrixSwarm): Another MatrixSwarm for multiplication. + inputs (List[str]): A list of input queries for the agents. + + Returns: + List[List[AgentOutput]]: A resulting matrix of outputs after multiplication. + """ + if len(self.agents[0]) != len(other.agents): + raise ValueError( + "Matrix dimensions are incompatible for multiplication." + ) + + results = [] + for i, row in enumerate(self.agents): + row_results = [] + for col_idx in range(len(other.agents[0])): + col = [ + other.agents[row_idx][col_idx] + for row_idx in range(len(other.agents)) + ] + query = inputs[ + i + ] # Input query for the corresponding row + intermediate_result = [] + + for agent_r, agent_c in zip(row, col): + try: + result = agent_r.run(query) + intermediate_result.append(result) + except Exception as e: + intermediate_result.append(f"Error: {e}") + + # Aggregate outputs from dot product + combined_result = " ".join( + intermediate_result + ) # Example aggregation + row_results.append( + AgentOutput( + agent_name=f"DotProduct-{i}-{col_idx}", + input_query=query, + output_result=combined_result, + metadata={"row": i, "col": col_idx}, + ) + ) + results.append(row_results) + return results + + def subtract(self, other: "MatrixSwarm") -> "MatrixSwarm": + """ + Subtracts two matrices element-wise. + + Args: + other (MatrixSwarm): Another MatrixSwarm to subtract. + + Returns: + MatrixSwarm: A new MatrixSwarm resulting from the subtraction. + """ + self.validate_dimensions(other) + subtracted_agents = [ + [self.agents[i][j] for j in range(len(self.agents[i]))] + for i in range(len(self.agents)) + ] + return MatrixSwarm(subtracted_agents) + + def identity(self, size: int) -> "MatrixSwarm": + """ + Creates an identity matrix of agents with size `size`. + + Args: + size (int): Size of the identity matrix (NxN). + + Returns: + MatrixSwarm: An identity MatrixSwarm. + """ + identity_agents = [ + [ + ( + self.agents[i][j] + if i == j + else Agent( + agent_name=f"Zero-Agent-{i}-{j}", + system_prompt="", + ) + ) + for j in range(size) + ] + for i in range(size) + ] + return MatrixSwarm(identity_agents) + + def determinant(self) -> Any: + """ + Computes the determinant of a square MatrixSwarm. + + Returns: + Any: Determinant of the matrix (as agent outputs). + """ + if len(self.agents) != len(self.agents[0]): + raise ValueError( + "Determinant can only be computed for square matrices." + ) + + # Recursive determinant calculation (example using placeholder logic) + if len(self.agents) == 1: + return self.agents[0][0].run("Compute determinant") + + det_result = 0 + for i in range(len(self.agents)): + submatrix = MatrixSwarm( + [row[:i] + row[i + 1 :] for row in self.agents[1:]] + ) + cofactor = ((-1) ** i) * self.agents[0][i].run( + "Compute determinant" + ) + det_result += cofactor * submatrix.determinant() + return det_result + + def save_to_file(self, path: str) -> None: + """ + Saves the agent matrix structure and metadata to a file. + + Args: + path (str): File path to save the matrix. + """ + try: + matrix_data = { + "agents": [ + [agent.agent_name for agent in row] + for row in self.agents + ], + "outputs": [output.dict() for output in self.outputs], + } + with open(path, "w") as f: + json.dump(matrix_data, f, indent=4) + logger.info(f"MatrixSwarm saved to {path}") + except Exception as e: + logger.error(f"Error saving MatrixSwarm: {e}") + + +# # Example usage +# if __name__ == "__main__": +# from swarms.prompts.finance_agent_sys_prompt import ( +# FINANCIAL_AGENT_SYS_PROMPT, +# ) + +# # Create a 3x3 matrix of agents +# agents = [ +# [ +# Agent( +# agent_name=f"Agent-{i}-{j}", +# system_prompt=FINANCIAL_AGENT_SYS_PROMPT, +# model_name="gpt-4o-mini", +# max_loops=1, +# autosave=True, +# dashboard=False, +# verbose=True, +# dynamic_temperature_enabled=True, +# saved_state_path=f"agent_{i}_{j}.json", +# user_name="swarms_corp", +# retry_attempts=1, +# context_length=200000, +# return_step_meta=False, +# output_type="string", +# streaming_on=False, +# ) +# for j in range(3) +# ] +# for i in range(3) +# ] + +# # Initialize the matrix +# agent_matrix = MatrixSwarm(agents) + +# # Example queries +# inputs = [ +# "Explain Roth IRA benefits", +# "Differences between ETFs and mutual funds", +# "How to create a diversified portfolio", +# ] + +# # Run agents +# outputs = agent_matrix.multiply(agent_matrix.transpose(), inputs) + +# # Save results +# agent_matrix.save_to_file("agent_matrix_results.json") diff --git a/swarms/structs/meme_agent_persona_generator.py b/swarms/structs/meme_agent_persona_generator.py new file mode 100644 index 00000000..0fb19cc4 --- /dev/null +++ b/swarms/structs/meme_agent_persona_generator.py @@ -0,0 +1,292 @@ +import json +import os +from typing import List + + +from dotenv import load_dotenv +from loguru import logger +from pydantic import BaseModel, Field + +from swarms.structs.agent import Agent +from swarms.structs.swarm_router import SwarmRouter +from swarms.utils.function_caller_model import OpenAIFunctionCaller + +load_dotenv() + + +class MemeAgentConfig(BaseModel): + """Configuration for an individual meme agent in a swarm""" + + name: str = Field( + description="The name of the meme agent", + example="Meme-Generator-Agent", + ) + description: str = Field( + description="A description of the meme agent's purpose and capabilities", + example="Agent responsible for generating and sharing memes", + ) + system_prompt: str = Field( + description="The system prompt that defines the meme agent's behavior. Make this prompt as detailed and as extensive as possible.", + example="You are a meme generator agent. Your role is to create and share funny memes...", + ) + + +class MemeSwarmConfig(BaseModel): + """Configuration for a swarm of cooperative meme agents""" + + name: str = Field( + description="The name of the meme swarm", + example="Meme-Creation-Swarm", + ) + description: str = Field( + description="The description of the meme swarm's purpose and capabilities", + example="A swarm of agents that work together to generate and share memes", + ) + agents: List[MemeAgentConfig] = Field( + description="The list of meme agents that make up the swarm", + example=[ + MemeAgentConfig( + name="Meme-Generator-Agent", + description="Generates memes", + system_prompt="You are a meme generator agent...", + ), + MemeAgentConfig( + name="Meme-Sharer-Agent", + description="Shares memes", + system_prompt="You are a meme sharer agent...", + ), + ], + ) + max_loops: int = Field( + description="The maximum number of meme generation loops to run the swarm", + example=1, + ) + + +BOSS_SYSTEM_PROMPT = """ +You are the Meme Generator Boss, responsible for creating and managing a swarm of agents that generate funny, weird, and cool personas. Your goal is to ensure that each agent is uniquely suited to create hilarious and entertaining content. + +### Instructions: + +1. **Persona Generation**: + - Analyze the type of meme or content required. + - Assign tasks to existing agents with a fitting persona, ensuring they understand the tone and style needed. + - If no suitable agent exists, create a new agent with a persona tailored to the task, including a system prompt that outlines their role, objectives, and creative liberties. + +2. **Agent Persona Creation**: + - Name agents based on their persona or the type of content they generate (e.g., "Dank Meme Lord" or "Surreal Humor Specialist"). + - Provide each new agent with a system prompt that outlines their persona, including their tone, style, and any specific themes or topics they should focus on. + +3. **Creativity and Originality**: + - Encourage agents to think outside the box and come up with unique, humorous, and entertaining content. + - Foster an environment where agents can experiment with different styles and formats to keep content fresh and engaging. + +4. **Communication and Feedback**: + - Clearly communicate the requirements and expectations for each task to ensure agents understand what is needed. + - Encourage agents to provide feedback on their creative process and suggest new ideas or directions for future content. + +5. **Transparency and Accountability**: + - Maintain transparency in the selection or creation of agents for specific tasks, ensuring that the reasoning behind each decision is clear. + - Hold agents accountable for the content they generate, ensuring it meets the required standards of humor and creativity. + +# Output Format + +Present your plan in a clear, bullet-point format or short concise paragraphs, outlining persona generation, agent creation, creativity strategies, and communication protocols. + +# Notes + +- Ensure that agents understand the importance of originality and creativity in their content. +- Foster a culture of experimentation and continuous improvement to keep the content generated by agents fresh and engaging. +""" + + +class MemeAgentGenerator: + """A class that automatically builds and manages swarms of AI agents. + + This class handles the creation, coordination and execution of multiple AI agents working + together as a swarm to accomplish complex tasks. It uses a boss agent to delegate work + and create new specialized agents as needed. + + Args: + name (str): The name of the swarm + description (str): A description of the swarm's purpose + verbose (bool, optional): Whether to output detailed logs. Defaults to True. + max_loops (int, optional): Maximum number of execution loops. Defaults to 1. + """ + + def __init__( + self, + name: str = None, + description: str = None, + verbose: bool = True, + max_loops: int = 1, + ): + self.name = name + self.description = description + self.verbose = verbose + self.max_loops = max_loops + self.agents_pool = [] + logger.info( + f"Initialized AutoSwarmBuilder: {name} {description}" + ) + + def run(self, task: str, image_url: str = None, *args, **kwargs): + """Run the swarm on a given task. + + Args: + task (str): The task to be accomplished + image_url (str, optional): URL of an image input if needed. Defaults to None. + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + The output from the swarm's execution + """ + logger.info(f"Running swarm on task: {task}") + agents = self._create_agents(task, image_url, *args, **kwargs) + logger.info(f"Agents created {len(agents)}") + logger.info("Routing task through swarm") + output = self.swarm_router(agents, task, image_url) + logger.info(f"Swarm execution complete with output: {output}") + return output + + def _create_agents(self, task: str, *args, **kwargs): + """Create the necessary agents for a task. + + Args: + task (str): The task to create agents for + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + list: List of created agents + """ + logger.info("Creating agents for task") + model = OpenAIFunctionCaller( + system_prompt=BOSS_SYSTEM_PROMPT, + api_key=os.getenv("OPENAI_API_KEY"), + temperature=0.1, + base_model=MemeSwarmConfig, + ) + + agents_dictionary = model.run(task) + print(agents_dictionary) + + agents_dictionary = json.loads(agents_dictionary) + + if isinstance(agents_dictionary, dict): + agents_dictionary = MemeSwarmConfig(**agents_dictionary) + else: + raise ValueError( + "Agents dictionary is not a valid dictionary" + ) + + # Set swarm config + self.name = agents_dictionary.name + self.description = agents_dictionary.description + + logger.info( + f"Swarm config: {self.name}, {self.description}, {self.max_loops}" + ) + + # Create agents from config + agents = [] + for agent_config in agents_dictionary.agents: + # Convert dict to AgentConfig if needed + if isinstance(agent_config, dict): + agent_config = MemeAgentConfig(**agent_config) + + agent = self.build_agent( + agent_name=agent_config.name, + agent_description=agent_config.description, + agent_system_prompt=agent_config.system_prompt, + ) + agents.append(agent) + + return agents + + def build_agent( + self, + agent_name: str, + agent_description: str, + agent_system_prompt: str, + max_loops: int = 1, + ): + """Build a single agent with the given specifications. + + Args: + agent_name (str): Name of the agent + agent_description (str): Description of the agent's purpose + agent_system_prompt (str): The system prompt for the agent + + Returns: + Agent: The constructed agent instance + """ + logger.info(f"Building agent: {agent_name}") + agent = Agent( + agent_name=agent_name, + description=agent_description, + system_prompt=agent_system_prompt, + model_name="gpt-4o-mini", + max_loops=max_loops, + autosave=True, + dashboard=False, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path=f"{agent_name}.json", + user_name="swarms_corp", + retry_attempts=1, + context_length=200000, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" soon "yaml" and + streaming_on=False, + # auto_generate_prompt=True, + ) + + return agent + + def swarm_router( + self, + agents: List[Agent], + task: str, + *args, + **kwargs, + ): + """Route tasks between agents in the swarm. + + Args: + agents (List[Agent]): List of available agents + task (str): The task to route + image_url (str, optional): URL of an image input if needed. Defaults to None. + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + The output from the routed task execution + """ + logger.info("Routing task through swarm") + swarm_router_instance = SwarmRouter( + name=self.name, + description=self.description, + agents=agents, + swarm_type="auto", + max_loops=1, + ) + + return swarm_router_instance.run( + self.name + " " + self.description + " " + task, + ) + + +if __name__ == "__main__": + example = MemeAgentGenerator( + name="Meme-Swarm", + description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s", + max_loops=1, + ) + + print( + example.run( + "Generate funny meme agents around cool media from 2001s" + ) + ) diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index e91d565f..f5019c04 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -1,14 +1,17 @@ import asyncio +import os import time from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from swarms.structs.agent import Agent -from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.telemetry.main import log_agent_data from swarms.schemas.agent_step_schemas import ManySteps from swarms.prompts.ag_prompt import aggregator_system_prompt from swarms.utils.loguru_logger import initialize_logger +from swarms.utils.any_to_str import any_to_str +import concurrent.futures logger = initialize_logger(log_folder="mixture_of_agents") @@ -64,6 +67,8 @@ class MixtureOfAgents: aggregator_agent: Agent = None, aggregator_system_prompt: str = "", layers: int = 3, + max_loops: int = 1, + return_str_on: bool = False, ) -> None: """ Initialize the Mixture of Agents class with agents and configuration. @@ -82,6 +87,8 @@ class MixtureOfAgents: self.aggregator_agent: Agent = aggregator_agent self.aggregator_system_prompt: str = aggregator_system_prompt self.layers: int = layers + self.max_loops: int = max_loops + self.return_str_on: bool = return_str_on self.input_schema = MixtureOfAgentsInput( name=name, @@ -233,10 +240,61 @@ class MixtureOfAgents: Args: task (str): The task for the mixture of agents. """ - asyncio.run(self._run_async(task)) + try: + prev_context = None + + for _ in range(self.max_loops): + # Add previous context to task if available + current_task = ( + f"{task}\n\nPrevious context:\n{prev_context}" + if prev_context + else task + ) - self.output_schema.task = task + # Run async process + asyncio.run(self._run_async(current_task)) + + # Store current results as context for next loop + prev_context = ( + self.output_schema.aggregator_agent_summary + ) + + self.output_schema.task = task + + log_agent_data(self.output_schema.model_dump()) + + if self.return_str_on: + return any_to_str(self.output_schema.model_dump()) + else: + return self.output_schema.model_dump_json(indent=4) + + except Exception as e: + logger.error(f"Error running mixture of agents: {str(e)}") + raise e + + def run_batched(self, tasks: List[str]) -> List[str]: + """ + Run the mixture of agents for a batch of tasks. + + Args: + tasks (List[str]): A list of tasks for the mixture of agents. - log_agent_data(self.output_schema.model_dump()) + Returns: + List[str]: A list of responses from the mixture of agents. + """ + return [self.run(task) for task in tasks] - return self.output_schema.model_dump_json(indent=4) + def run_concurrently(self, tasks: List[str]) -> List[str]: + """ + Run the mixture of agents for a batch of tasks concurrently. + """ + with concurrent.futures.ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: + futures = [ + executor.submit(self.run, task) for task in tasks + ] + return [ + future.result() + for future in concurrent.futures.as_completed(futures) + ] diff --git a/swarms/structs/model_router.py b/swarms/structs/model_router.py new file mode 100644 index 00000000..32015e4a --- /dev/null +++ b/swarms/structs/model_router.py @@ -0,0 +1,381 @@ +import asyncio +import os +from concurrent.futures import ThreadPoolExecutor +from typing import Optional +from pydantic import BaseModel, Field + +from swarms.utils.function_caller_model import OpenAIFunctionCaller +from swarms.utils.any_to_str import any_to_str +from swarms.utils.formatter import formatter +from swarms.utils.litellm_wrapper import LiteLLM + +model_recommendations = { + "gpt-4o": { + "description": "Fast and efficient for simple tasks and general queries", + "best_for": [ + "Simple queries", + "Basic text generation", + "Quick responses", + "Everyday tasks", + ], + "provider": "openai", + }, + "gpt-4-turbo": { + "description": "Latest GPT-4 model with improved capabilities and knowledge cutoff", + "best_for": [ + "Complex tasks", + "Up-to-date knowledge", + "Long context understanding", + ], + "provider": "openai", + }, + "gpt-3.5-turbo": { + "description": "Fast and cost-effective model for straightforward tasks", + "best_for": [ + "Chat applications", + "Content generation", + "Basic assistance", + ], + "provider": "openai", + }, + "o3-mini": { + "description": "Lightweight model good for basic tasks with lower compute requirements", + "best_for": [ + "Basic text completion", + "Simple classification", + "Resource-constrained environments", + ], + "provider": "groq", + }, + "deepseek-reasoner": { + "description": "Specialized for complex reasoning and analytical tasks", + "best_for": [ + "Complex problem solving", + "Logical reasoning", + "Mathematical analysis", + "High IQ tasks", + ], + "provider": "deepseek", + }, + "claude-3-5-sonnet": { + "description": "Well-rounded model with strong reasoning and creativity", + "best_for": [ + "Creative writing", + "Detailed analysis", + "Nuanced responses", + ], + "provider": "anthropic", + }, + "claude-3-opus": { + "description": "Most capable Claude model with enhanced reasoning and analysis", + "best_for": [ + "Research", + "Complex analysis", + "Technical writing", + "Code generation", + ], + "provider": "anthropic", + }, + "gemini-pro": { + "description": "Google's advanced model with strong general capabilities", + "best_for": [ + "Multimodal tasks", + "Code generation", + "Creative content", + ], + "provider": "google", + }, + "mistral-large": { + "description": "Open source model with strong performance across tasks", + "best_for": [ + "General purpose tasks", + "Code assistance", + "Content generation", + ], + "provider": "mistral", + }, +} + +providers = { + "openai": "Primary provider for GPT models", + "groq": "High-performance inference provider", + "anthropic": "Provider of Claude models", + "google": "Provider of PaLM and Gemini models", + "azure": "Cloud platform for various model deployments", + "deepseek": "Provider of specialized reasoning models", + "mistral": "Provider of open source and commercial language models", +} + + +class ModelOutput(BaseModel): + rationale: Optional[str] + model: Optional[str] + provider: Optional[str] + task: Optional[str] = Field( + description="The task to be executed for the model. It should be a clear, concise, and detailed task that the model can execute. It should only include details of the task, not the reasoning or the rationale, model, provider, or anything else. Do not include any other information in the task." + ) + max_tokens: Optional[int] = Field( + description="The maximum number of tokens to use for the model" + ) + temperature: Optional[float] = Field( + description="The temperature to use for the model" + ) + system_prompt: Optional[str] = Field( + description="The system prompt to use for the model. Leverge the best techniques to make the model perform the best. Make sure the prompt is clear, extensive, and detailed." + ) + + +providers = any_to_str(providers) +model_recommendations = any_to_str(model_recommendations) +data = f"Providers: {providers}\nModel Recommendations: {model_recommendations}" + +model_router_system_prompt = f""" +You are an expect model router responsible for recommending the optimal AI model for specific tasks. + +Available Models and Their Strengths: +- GPT-4: Best for complex reasoning, coding, and analysis requiring strong logical capabilities +- GPT-3.5-Turbo: Efficient for straightforward tasks, chat, and basic content generation +- Claude-3-Opus: Excels at research, technical writing, and in-depth analysis with strong reasoning +- Claude-3-Sonnet: Well-balanced for creative writing and nuanced responses +- Gemini Pro: Strong at multimodal tasks and code generation +- Mistral Large: Versatile open source model good for general tasks + +Provider Considerations: +- OpenAI: Industry standard with consistent performance +- Anthropic: Known for safety and detailed analysis +- Google: Strong technical capabilities and multimodal support +- Groq: Optimized for high-speed inference +- Mistral: Balance of open source and commercial offerings + +Data: +{data} + +When Making Recommendations Consider: +1. Task requirements (complexity, creativity, technical needs) +2. Performance characteristics needed (speed, accuracy, reliability) +3. Special capabilities required (code generation, analysis, etc) +4. Cost and efficiency tradeoffs +5. Provider-specific strengths and limitations + +Provide clear rationale for your model selection based on the specific task requirements. +""" + + +class ModelRouter: + """ + A router class that intelligently selects and executes AI models based on task requirements. + + The ModelRouter uses a function calling model to analyze tasks and recommend the optimal + model and provider combination, then executes the task using the selected model. + + Attributes: + system_prompt (str): Prompt that guides model selection behavior + max_tokens (int): Maximum tokens for model outputs + temperature (float): Temperature parameter for model randomness + max_workers (int): Maximum concurrent workers for batch processing + model_output (ModelOutput): Pydantic model for structured outputs + model_caller (OpenAIFunctionCaller): Function calling interface + """ + + def __init__( + self, + system_prompt: str = model_router_system_prompt, + max_tokens: int = 4000, + temperature: float = 0.5, + max_workers: int = 10, + api_key: str = None, + max_loops: int = 1, + *args, + **kwargs, + ): + """ + Initialize the ModelRouter. + + Args: + system_prompt (str): Prompt for model selection guidance + max_tokens (int): Maximum output tokens + temperature (float): Model temperature parameter + max_workers (int): Max concurrent workers + *args: Additional positional arguments + **kwargs: Additional keyword arguments + """ + try: + self.system_prompt = system_prompt + self.max_tokens = max_tokens + self.temperature = temperature + self.max_workers = max_workers + self.model_output = ModelOutput + self.max_loops = max_loops + + if self.max_workers == "auto": + self.max_workers = os.cpu_count() + + self.model_caller = OpenAIFunctionCaller( + base_model=ModelOutput, + temperature=self.temperature, + system_prompt=self.system_prompt, + api_key=api_key, + ) + except Exception as e: + raise RuntimeError( + f"Failed to initialize ModelRouter: {str(e)}" + ) + + def step(self, task: str): + """ + Run a single task through the model router. + + Args: + task (str): The task to be executed + + Returns: + str: The model's output for the task + + Raises: + RuntimeError: If model selection or execution fails + """ + model_router_output = self.model_caller.run(task) + + selected_model = model_router_output.model + selected_provider = model_router_output.provider + routed_task = model_router_output.task + rationale = model_router_output.rationale + max_tokens = model_router_output.max_tokens + temperature = model_router_output.temperature + system_prompt = model_router_output.system_prompt + + formatter.print_panel( + f"Model: {selected_model}\n\n" + f"Provider: {selected_provider}\n\n" + f"Task: {routed_task}\n\n" + f"Rationale: {rationale}\n\n" + f"Max Tokens: {max_tokens}\n\n" + f"Temperature: {temperature}\n\n" + f"System Prompt: {system_prompt}", + title="Model Router Output", + ) + + litellm_wrapper = LiteLLM( + model_name=f"{selected_provider}/{selected_model}", + max_tokens=max_tokens, + temperature=temperature, + system_prompt=system_prompt, + ) + + final_output = litellm_wrapper.run(task=routed_task) + + formatter.print_panel( + f"Output: {final_output} from {selected_provider}/{selected_model}", + title=f"Model: {selected_model} Provider: {selected_provider}", + ) + + return final_output + + def run(self, task: str): + """ + Run a task through the model router with memory. + """ + task_output = task + previous_output = None + for _ in range(self.max_loops): + if task_output == previous_output: + break # Exit if no change in output + previous_output = task_output + task_output = self.step(task_output) + return task_output + + def batch_run(self, tasks: list): + """ + Run multiple tasks in sequence. + + Args: + tasks (list): List of tasks to execute + + Returns: + list: List of outputs for each task + + Raises: + RuntimeError: If batch execution fails + """ + try: + outputs = [] + for task in tasks: + output = self.run(task) + outputs.append(output) + return outputs + except Exception as e: + raise RuntimeError(f"Batch execution failed: {str(e)}") + + def __call__(self, task: str, *args, **kwargs): + """ + Make the class callable to directly execute tasks. + + Args: + task (str): Task to execute + + Returns: + str: Model output + """ + return self.run(task, *args, **kwargs) + + def __batch_call__(self, tasks: list): + """ + Make the class callable for batch execution. + + Args: + tasks (list): List of tasks + + Returns: + list: List of outputs + """ + return self.batch_run(tasks) + + def __str__(self): + return f"ModelRouter(max_tokens={self.max_tokens}, temperature={self.temperature})" + + def __repr__(self): + return f"ModelRouter(max_tokens={self.max_tokens}, temperature={self.temperature})" + + def concurrent_run(self, tasks: list): + """ + Run multiple tasks concurrently using a thread pool. + + Args: + tasks (list): List of tasks to execute concurrently + + Returns: + list: List of outputs from all tasks + + Raises: + RuntimeError: If concurrent execution fails + """ + try: + with ThreadPoolExecutor( + max_workers=self.max_workers + ) as executor: + outputs = list(executor.map(self.run, tasks)) + return outputs + except Exception as e: + raise RuntimeError( + f"Concurrent execution failed: {str(e)}" + ) + + async def async_run(self, task: str, *args, **kwargs): + """ + Run a task asynchronously. + + Args: + task (str): Task to execute asynchronously + + Returns: + asyncio.Task: Async task object + + Raises: + RuntimeError: If async execution fails + """ + try: + return asyncio.create_task( + self.run(task, *args, **kwargs) + ) + except Exception as e: + raise RuntimeError(f"Async execution failed: {str(e)}") diff --git a/swarms/structs/multi_agent_exec.py b/swarms/structs/multi_agent_exec.py index 1ee5add2..28b6c421 100644 --- a/swarms/structs/multi_agent_exec.py +++ b/swarms/structs/multi_agent_exec.py @@ -3,16 +3,19 @@ import os import threading from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass -from multiprocessing import cpu_count from typing import Any, List import psutil from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType -from swarms.utils.wrapper_clusterop import ( - exec_callable_with_clusterops, -) + + +@dataclass +class ResourceMetrics: + cpu_percent: float + memory_percent: float + active_threads: int def run_single_agent(agent: AgentType, task: str) -> Any: @@ -79,7 +82,7 @@ def run_agents_concurrently( List of outputs from each agent """ # Optimize defaults based on system resources - cpu_cores = cpu_count() + cpu_cores = os.cpu_count() batch_size = batch_size or cpu_cores max_workers = max_workers or cpu_cores * 2 @@ -106,7 +109,7 @@ def run_agents_concurrently( def run_agents_concurrently_multiprocess( - agents: List[Agent], task: str, batch_size: int = cpu_count() + agents: List[Agent], task: str, batch_size: int = os.cpu_count() ) -> List[Any]: """ Manage and run multiple agents concurrently in batches, with optimized performance. @@ -173,7 +176,7 @@ def run_agents_with_different_tasks( agent, task = pair return await run_agent_async(agent, task, executor) - cpu_cores = cpu_count() + cpu_cores = os.cpu_count() batch_size = batch_size or cpu_cores max_workers = max_workers or cpu_cores * 2 results = [] @@ -246,7 +249,7 @@ def run_agents_with_timeout( Returns: List of outputs (None for timed out agents) """ - cpu_cores = cpu_count() + cpu_cores = os.cpu_count() batch_size = batch_size or cpu_cores max_workers = max_workers or cpu_cores * 2 results = [] @@ -275,13 +278,6 @@ def run_agents_with_timeout( return results -@dataclass -class ResourceMetrics: - cpu_percent: float - memory_percent: float - active_threads: int - - def get_system_metrics() -> ResourceMetrics: """Get current system resource usage""" return ResourceMetrics( @@ -412,22 +408,9 @@ def run_agents_with_tasks_concurrently( List[Any]: A list of outputs from each agent execution. """ # Make the first agent not use the ifrs - - if no_clusterops: - return _run_agents_with_tasks_concurrently( - agents, tasks, batch_size, max_workers - ) - else: - return exec_callable_with_clusterops( - device, - device_id, - all_cores, - _run_agents_with_tasks_concurrently, - agents, - tasks, - batch_size, - max_workers, - ) + return _run_agents_with_tasks_concurrently( + agents, tasks, batch_size, max_workers + ) # # Example usage: diff --git a/swarms/structs/multi_agent_orchestrator.py b/swarms/structs/multi_agent_orchestrator.py index db845dd6..e3e53004 100644 --- a/swarms/structs/multi_agent_orchestrator.py +++ b/swarms/structs/multi_agent_orchestrator.py @@ -9,15 +9,13 @@ Todo: """ import os -import subprocess import uuid from datetime import datetime from typing import List, Literal, Optional from loguru import logger from pydantic import BaseModel, Field -from tenacity import retry, stop_after_attempt, wait_exponential - +from swarms.utils.function_caller_model import OpenAIFunctionCaller from swarms.structs.agent import Agent @@ -35,88 +33,6 @@ class AgentResponse(BaseModel): ) -class OpenAIFunctionCaller: - """ - A class to interact with the OpenAI API for generating text based on a system prompt and a task. - """ - - def __init__( - self, - system_prompt: str, - api_key: str, - temperature: float, - max_tokens: int = 4000, - model_name: str = "gpt-4-0125-preview", - ): - self.system_prompt = system_prompt - self.api_key = api_key - self.temperature = temperature - self.max_tokens = max_tokens - self.model_name = model_name - - try: - from openai import OpenAI - except ImportError: - logger.error( - "OpenAI library not found. Please install it using 'pip install openai'" - ) - subprocess.run(["pip", "install", "openai"]) - raise - - try: - self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) - except Exception as e: - logger.error( - f"Error initializing OpenAI client: {str(e)}" - ) - raise - - @retry( - stop=stop_after_attempt(3), - wait=wait_exponential(multiplier=1, min=4, max=10), - ) - def get_completion(self, task: str) -> AgentResponse: - """Get completion from OpenAI with retries""" - try: - response = self.client.chat.completions.create( - model=self.model_name, - messages=[ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ], - response_format={"type": "json_object"}, - temperature=self.temperature, - max_tokens=self.max_tokens, - ) - - return AgentResponse.model_validate_json( - response.choices[0].message.content - ) - except Exception as e: - logger.error(f"Error getting completion: {str(e)}") - raise - - def get_agent_response( - self, system_prompt: str, task: str - ) -> str: - """Get agent response without function calling""" - try: - response = self.client.chat.completions.create( - model=self.model_name, - messages=[ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": task}, - ], - temperature=self.temperature, - max_tokens=self.max_tokens, - ) - - return response.choices[0].message.content - except Exception as e: - logger.error(f"Error getting agent response: {str(e)}") - raise - - class MultiAgentRouter: """ Routes tasks to appropriate agents based on their capabilities. diff --git a/swarms/structs/multi_process_workflow.py b/swarms/structs/multi_process_workflow.py deleted file mode 100644 index 7b04c10e..00000000 --- a/swarms/structs/multi_process_workflow.py +++ /dev/null @@ -1,244 +0,0 @@ -from multiprocessing import Manager, Pool, cpu_count -from typing import Sequence, Union, Callable, List -from concurrent.futures import ThreadPoolExecutor, as_completed - -from swarms.structs.agent import Agent -from swarms.structs.base_workflow import BaseWorkflow -from swarms.utils.loguru_logger import initialize_logger - -logger = initialize_logger(log_folder="multi_process_workflow") - - -class MultiProcessWorkflow(BaseWorkflow): - """ - Initialize a MultiProcessWorkflow object. - - Args: - max_workers (int): The maximum number of workers to use for parallel processing. - autosave (bool): Flag indicating whether to automatically save the workflow. - agents (List[Union[Agent, Callable]]): A list of Agent objects or callable functions representing the workflow tasks. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Example: - >>> from swarms.structs.multi_process_workflow import MultiProcessingWorkflow - >>> from swarms.structs.task import Task - >>> from datetime import datetime - >>> from time import sleep - >>> - >>> # Define a simple task - >>> def simple_task(): - >>> sleep(1) - >>> return datetime.now() - >>> - >>> # Create a task object - >>> task = Task( - >>> name="Simple Task", - >>> execute=simple_task, - >>> priority=1, - >>> ) - >>> - >>> # Create a workflow with the task - >>> workflow = MultiProcessingWorkflow(tasks=[task]) - >>> - >>> # Run the workflow - >>> results = workflow.run(task) - >>> - >>> # Print the results - >>> print(results) - """ - - def __init__( - self, - max_workers: int = 5, - autosave: bool = True, - agents: Sequence[Union[Agent, Callable]] = None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.max_workers = max_workers - self.autosave = autosave - self.agents = agents - - self.max_workers or cpu_count() - - # Log - logger.info( - ( - "Initialized MultiProcessWorkflow with" - f" {self.max_workers} max workers and autosave set to" - f" {self.autosave}" - ), - ) - - # Log the agents - if self.agents is not None: - for agent in self.agents: - logger.info(f"Agent: {agent.agent_name}") - - def execute_task(self, task: str, *args, **kwargs): - """Execute a task and handle exceptions. - - Args: - task (Task): The task to execute. - *args: Additional positional arguments for the task execution. - **kwargs: Additional keyword arguments for the task execution. - - Returns: - Any: The result of the task execution. - - """ - try: - if self.agents is not None: - # Execute the task - for agent in self.agents: - result = agent.run(task, *args, **kwargs) - - return result - - except Exception as e: - logger.error( - ( - "An error occurred during execution of task" - f" {task}: {str(e)}" - ), - ) - return None - - def run(self, task: str, *args, **kwargs): - """Run the workflow. - - Args: - task (Task): The task to run. - *args: Additional positional arguments for the task execution. - **kwargs: Additional keyword arguments for the task execution. - - Returns: - List[Any]: The results of all executed tasks. - - """ - try: - results = [] - with Manager() as manager: - with Pool( - processes=self.max_workers, *args, **kwargs - ) as pool: - # Using manager.list() to collect results in a process safe way - results_list = manager.list() - jobs = [ - pool.apply_async( - self.execute_task, # Pass the function, not the function call - args=(task,) - + args, # Pass the arguments as a tuple - kwds=kwargs, # Pass the keyword arguments as a dictionary - callback=results_list.append, - timeout=task.timeout, - ) - for agent in self.agents - ] - - # Wait for all jobs to complete - for job in jobs: - job.get() - - results = list(results_list) - - return results - except Exception as error: - logger.error(f"Error in run: {error}") - return None - - async def async_run(self, task: str, *args, **kwargs): - """Asynchronously run the workflow. - - Args: - task (Task): The task to run. - *args: Additional positional arguments for the task execution. - **kwargs: Additional keyword arguments for the task execution. - - Returns: - List[Any]: The results of all executed tasks. - - """ - try: - results = [] - with ThreadPoolExecutor( - max_workers=self.max_workers - ) as executor: - futures = [ - executor.submit( - self.execute_task, task, *args, **kwargs - ) - for _ in range(len(self.agents)) - ] - for future in as_completed(futures): - result = future.result() - results.append(result) - - return results - except Exception as error: - logger.error(f"Error in async_run: {error}") - return None - - def batched_run( - self, tasks: List[str], batch_size: int = 5, *args, **kwargs - ): - """Run tasks in batches. - - Args: - tasks (List[str]): A list of tasks to run. - batch_size (int): The size of each batch. - *args: Additional positional arguments for the task execution. - **kwargs: Additional keyword arguments for the task execution. - - Returns: - List[Any]: The results of all executed tasks. - - """ - try: - results = [] - for i in range(0, len(tasks), batch_size): - batch = tasks[i : i + batch_size] - with Pool(processes=self.max_workers) as pool: - results_list = pool.map( - self.execute_task, batch, *args, **kwargs - ) - results.extend(results_list) - - return results - except Exception as error: - logger.error(f"Error in batched_run: {error}") - return None - - def concurrent_run(self, tasks: List[str], *args, **kwargs): - """Run tasks concurrently. - - Args: - tasks (List[str]): A list of tasks to run. - *args: Additional positional arguments for the task execution. - **kwargs: Additional keyword arguments for the task execution. - - Returns: - List[Any]: The results of all executed tasks. - - """ - try: - results = [] - with ThreadPoolExecutor( - max_workers=self.max_workers - ) as executor: - futures = [ - executor.submit( - self.execute_task, task, *args, **kwargs - ) - for task in tasks - ] - for future in as_completed(futures): - result = future.result() - results.append(result) - - return results - except Exception as error: - logger.error(f"Error in concurrent_run: {error}") - return None diff --git a/swarms/structs/omni_agent_types.py b/swarms/structs/omni_agent_types.py index 9a0f3f6a..0e5c09b6 100644 --- a/swarms/structs/omni_agent_types.py +++ b/swarms/structs/omni_agent_types.py @@ -4,12 +4,10 @@ from typing import ( Sequence, Union, ) -from swarm_models.base_llm import BaseLLM -from swarm_models.base_multimodal_model import BaseMultiModalModel from swarms.structs.agent import Agent # Unified type for agent -AgentType = Union[Agent, Callable, Any, BaseLLM, BaseMultiModalModel] +AgentType = Union[Agent, Callable, Any] # List of agents AgentListType = Sequence[AgentType] diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py index 6be885be..b604eb59 100644 --- a/swarms/structs/rearrange.py +++ b/swarms/structs/rearrange.py @@ -13,10 +13,7 @@ from swarms.structs.agents_available import showcase_available_agents from swarms.structs.base_swarm import BaseSwarm from swarms.structs.output_types import OutputType from swarms.utils.loguru_logger import initialize_logger -from swarms.utils.wrapper_clusterop import ( - exec_callable_with_clusterops, -) -from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.telemetry.main import log_agent_data logger = initialize_logger(log_folder="rearrange") @@ -523,29 +520,13 @@ class AgentRearrange(BaseSwarm): The result from executing the task through the cluster operations wrapper. """ try: - no_use_clusterops = ( - no_use_clusterops or self.no_use_clusterops + return self._run( + task=task, + img=img, + *args, + **kwargs, ) - if no_use_clusterops is True: - return self._run( - task=task, - img=img, - *args, - **kwargs, - ) - else: - return exec_callable_with_clusterops( - device=device, - device_id=device_id, - all_cores=all_cores, - all_gpus=all_gpus, - func=self._run, - task=task, - img=img, - *args, - **kwargs, - ) except Exception as e: self._catch_error(e) diff --git a/swarms/structs/spreadsheet_swarm.py b/swarms/structs/spreadsheet_swarm.py index bec80987..90bed7eb 100644 --- a/swarms/structs/spreadsheet_swarm.py +++ b/swarms/structs/spreadsheet_swarm.py @@ -10,7 +10,7 @@ from pydantic import BaseModel, Field from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm -from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.telemetry.main import log_agent_data from swarms.utils.file_processing import create_file_in_folder from swarms.utils.loguru_logger import initialize_logger diff --git a/swarms/structs/swarm_arange.py b/swarms/structs/swarm_arange.py index efb880ad..ba1d3e3f 100644 --- a/swarms/structs/swarm_arange.py +++ b/swarms/structs/swarm_arange.py @@ -104,10 +104,6 @@ class SwarmRearrange: # Run the reliability checks self.reliability_checks() - # Logging configuration - if self.verbose: - logger.add("swarm_rearrange.log", rotation="10 MB") - def reliability_checks(self): logger.info("Running reliability checks.") if not self.swarms: diff --git a/swarms/structs/swarm_builder.py b/swarms/structs/swarm_builder.py index eb11474d..0e56188d 100644 --- a/swarms/structs/swarm_builder.py +++ b/swarms/structs/swarm_builder.py @@ -1,11 +1,9 @@ import os -import subprocess from typing import List, Optional from loguru import logger from pydantic import BaseModel, Field from pydantic.v1 import validator -from swarm_models import OpenAIChat from tenacity import ( retry, stop_after_attempt, @@ -14,109 +12,7 @@ from tenacity import ( from swarms.structs.agent import Agent from swarms.structs.swarm_router import SwarmRouter, SwarmType - -logger.add("swarm_builder.log", rotation="10 MB", backtrace=True) - - -class OpenAIFunctionCaller: - """ - A class to interact with the OpenAI API for generating text based on a system prompt and a task. - - Attributes: - - system_prompt (str): The system prompt to guide the AI's response. - - api_key (str): The API key for the OpenAI service. - - temperature (float): The temperature parameter for the AI model, controlling randomness. - - base_model (BaseModel): The Pydantic model to parse the response into. - - max_tokens (int): The maximum number of tokens in the response. - - client (OpenAI): The OpenAI client instance. - """ - - def __init__( - self, - system_prompt: str, - api_key: str, - temperature: float, - base_model: BaseModel, - max_tokens: int = 5000, - ): - self.system_prompt = system_prompt - self.api_key = api_key - self.temperature = temperature - self.base_model = base_model - self.max_tokens = max_tokens - - try: - from openai import OpenAI - except ImportError: - logger.error( - "OpenAI library not found. Please install the OpenAI library by running 'pip install openai'" - ) - subprocess.run(["pip", "install", "openai"]) - from openai import OpenAI - - self.client = OpenAI(api_key=api_key) - - def run(self, task: str, *args, **kwargs) -> BaseModel: - """ - Run the OpenAI model with the system prompt and task to generate a response. - - Args: - - task (str): The task to be completed. - - *args: Additional positional arguments for the OpenAI API. - - **kwargs: Additional keyword arguments for the OpenAI API. - - Returns: - - BaseModel: The parsed response based on the base_model. - """ - completion = self.client.beta.chat.completions.parse( - model="gpt-4o-2024-08-06", - messages=[ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ], - response_format=self.base_model, - temperature=self.temperature, - max_tokens=self.max_tokens, - *args, - **kwargs, - ) - - return completion.choices[0].message.parsed - - @retry( - stop=stop_after_attempt(3), - wait=wait_exponential(multiplier=1, min=4, max=10), - ) - async def run_async( - self, task: str, *args, **kwargs - ) -> BaseModel: - """ - Asynchronous version of the run method. - - Args: - - task (str): The task to be completed. - - *args: Additional positional arguments for the OpenAI API. - - **kwargs: Additional keyword arguments for the OpenAI API. - - Returns: - - BaseModel: The parsed response based on the base_model. - """ - completion = ( - await self.client.beta.chat.completions.parse_async( - model="gpt-4o-2024-08-06", - messages=[ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ], - response_format=self.base_model, - temperature=self.temperature, - max_tokens=self.max_tokens, - *args, - **kwargs, - ) - ) - - return completion.choices[0].message.parsed +from swarms.utils.function_caller_model import OpenAIFunctionCaller BOSS_SYSTEM_PROMPT = """ @@ -295,18 +191,6 @@ class AutoSwarmBuilder: }, ) - # Initialize OpenAI chat model - try: - self.chat_model = OpenAIChat( - openai_api_key=self.api_key, - model_name=self.model_name, - ) - except Exception as e: - logger.error( - f"Failed to initialize OpenAI chat model: {str(e)}" - ) - raise - def run( self, task: str, @@ -444,7 +328,7 @@ class AutoSwarmBuilder: agent_name=agent_name, description=agent_description, system_prompt=agent_system_prompt, - llm=self.chat_model, + model_name="gpt-4o", verbose=self.verbose, dynamic_temperature_enabled=False, return_step_meta=False, diff --git a/swarms/structs/swarm_eval.py b/swarms/structs/swarm_eval.py new file mode 100644 index 00000000..ac47b291 --- /dev/null +++ b/swarms/structs/swarm_eval.py @@ -0,0 +1,326 @@ +import math +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any, Callable, Dict, Optional, Tuple + +from datasets import Dataset, load_dataset +from loguru import logger +from tqdm import tqdm + +# ----------------------------------------------------------------------------- +# Logging configuration: log to console and file (rotating by size) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Swarm interface example +# ----------------------------------------------------------------------------- + + +# ----------------------------------------------------------------------------- +# Benchmark configuration +# ----------------------------------------------------------------------------- +class BenchmarkConfig: + """ + Configuration for a benchmark dataset. + + Attributes: + input_column (str): The column containing the task prompt. + answer_column (str): The column containing the expected answer. + answer_extractor (Optional[Callable[[Any], str]]): Function to extract + a string answer from the dataset's raw answer format. + answer_matcher (Optional[Callable[[str, str], bool]]): Function to compare + the expected answer and the swarm output. If None, a simple substring + containment is used. + """ + + def __init__( + self, + input_column: str, + answer_column: str, + answer_extractor: Optional[Callable[[Any], str]] = None, + answer_matcher: Optional[Callable[[str, str], bool]] = None, + ): + self.input_column = input_column + self.answer_column = answer_column + self.answer_extractor = answer_extractor + self.answer_matcher = answer_matcher + + +# ----------------------------------------------------------------------------- +# Preset dataset configurations for popular benchmarks +# ----------------------------------------------------------------------------- +PRESET_DATASETS: Dict[str, BenchmarkConfig] = { + "gsm8k": BenchmarkConfig( + input_column="question", + answer_column="answer", + ), + "squad": BenchmarkConfig( + input_column="question", + answer_column="answers", + answer_extractor=lambda ans: ( + ans["text"][0] + if isinstance(ans, dict) + and "text" in ans + and isinstance(ans["text"], list) + and ans["text"] + else str(ans) + ), + ), + "winogrande": BenchmarkConfig( + input_column="sentence", + answer_column="answer", + ), + "commonsense_qa": BenchmarkConfig( + input_column="question", + answer_column="answerKey", + ), + # Add additional presets here. +} + + +# ----------------------------------------------------------------------------- +# SwarmEvaluator with extended features +# ----------------------------------------------------------------------------- +class SwarmEvaluator: + """ + Evaluator that uses a swarm of agents to process benchmark datasets + from Hugging Face, with concurrency, retries, progress display, performance timing, + and customizable answer matching. + + Example: + swarm = Swarm() + evaluator = SwarmEvaluator(swarm) + results = evaluator.evaluate("gsm8k", split="test", max_workers=4) + print(results) + """ + + def __init__(self, swarm: callable) -> None: + """ + Initialize the evaluator with a given swarm. + + Args: + swarm (Swarm): A swarm instance with a callable run(task: str) method. + """ + self.swarm = swarm + + def evaluate( + self, + dataset_name: str, + split: str = "test", + config: Optional[BenchmarkConfig] = None, + max_workers: int = 1, + max_retries: int = 3, + show_progress: bool = True, + output_file: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Evaluate the specified benchmark dataset using the swarm. + + Args: + dataset_name (str): The dataset name (from Hugging Face). + split (str): The dataset split (e.g., "test", "validation"). + config (Optional[BenchmarkConfig]): Benchmark configuration. If None, + a preset config is used. + max_workers (int): Number of concurrent workers. + max_retries (int): Number of retries for swarm tasks on failure. + show_progress (bool): If True, display a progress bar. + output_file (Optional[str]): Path to a file to write the results. + + Returns: + Dict[str, Any]: Evaluation metrics including total examples, correct answers, + accuracy, and total evaluation time. + """ + if config is None: + config = PRESET_DATASETS.get(dataset_name) + if config is None: + raise ValueError( + f"No preset config for dataset '{dataset_name}'. Provide a BenchmarkConfig." + ) + + logger.info( + f"Loading dataset '{dataset_name}' (split: {split})..." + ) + dataset: Dataset = load_dataset(dataset_name, split=split) + total_examples = len(dataset) + logger.info(f"Total examples to evaluate: {total_examples}") + + start_time = time.time() + correct = 0 + + # Function to process a single example. + def _process_example( + example: Dict[str, Any], idx: int + ) -> Tuple[bool, float]: + task_start = time.time() + task_text = example.get(config.input_column) + expected_answer = example.get(config.answer_column) + + if task_text is None or expected_answer is None: + logger.warning( + f"Example {idx}: Missing '{config.input_column}' or '{config.answer_column}', skipping." + ) + return (False, 0.0) + + # Use answer_extractor if provided. + if config.answer_extractor: + try: + expected_answer = config.answer_extractor( + expected_answer + ) + except Exception as e: + logger.error( + f"Example {idx}: Error extracting answer: {e}" + ) + return (False, 0.0) + + logger.debug(f"Example {idx} - Task: {task_text}") + logger.debug( + f"Example {idx} - Expected Answer: {expected_answer}" + ) + + try: + swarm_output = self._run_with_retry( + task_text, max_retries + ) + except Exception as e: + logger.error( + f"Example {idx}: Failed after retries. Error: {e}" + ) + return (False, time.time() - task_start) + + logger.debug( + f"Example {idx} - Swarm Output: {swarm_output}" + ) + + # Use custom matcher if provided; otherwise, default matching. + if config.answer_matcher: + is_correct = config.answer_matcher( + expected_answer, swarm_output + ) + else: + is_correct = self._default_matcher( + expected_answer, swarm_output + ) + + task_time = time.time() - task_start + logger.info( + f"Example {idx}: {'Correct' if is_correct else 'Incorrect'} in {task_time:.2f}s" + ) + return (is_correct, task_time) + + # Use ThreadPoolExecutor for concurrency. + futures = [] + total_time = 0.0 + with ThreadPoolExecutor(max_workers=max_workers) as executor: + # Optionally wrap the dataset with tqdm for a progress bar. + examples_iter = enumerate(dataset, start=1) + if show_progress: + examples_iter = tqdm( + list(examples_iter), + total=total_examples, + desc="Evaluating", + ) + + for idx, example in examples_iter: + futures.append( + executor.submit(_process_example, example, idx) + ) + + for future in as_completed(futures): + try: + is_correct, elapsed = future.result() + total_time += elapsed + if is_correct: + correct += 1 + except Exception as e: + logger.error(f"Error processing an example: {e}") + + overall_time = time.time() - start_time + accuracy = ( + correct / total_examples if total_examples > 0 else 0.0 + ) + + logger.info( + f"Evaluation complete. Total examples: {total_examples}, Correct: {correct}, " + f"Accuracy: {accuracy:.2%}, Overall Time: {overall_time:.2f}s, " + f"Average per-example time: {total_time/total_examples if total_examples else 0:.2f}s" + ) + + results = { + "total": total_examples, + "correct": correct, + "accuracy": accuracy, + "overall_time": overall_time, + "average_example_time": ( + total_time / total_examples + if total_examples + else math.nan + ), + } + + # Optionally save results to a file. + if output_file: + try: + with open(output_file, "w") as f: + for key, value in results.items(): + f.write(f"{key}: {value}\n") + logger.info(f"Results saved to {output_file}") + except Exception as e: + logger.error( + f"Error saving results to {output_file}: {e}" + ) + + return results + + def _run_with_retry(self, task: str, max_retries: int) -> str: + """ + Runs the swarm task with a retry mechanism. + + Args: + task (str): The task string. + max_retries (int): Maximum number of retries. + + Returns: + str: Swarm output. + + Raises: + Exception: If all retries fail. + """ + attempt = 0 + while attempt <= max_retries: + try: + start = time.time() + result = self.swarm.run(task) + elapsed = time.time() - start + logger.debug( + f"Task succeeded in {elapsed:.2f}s on attempt {attempt + 1}" + ) + return result + except Exception as e: + logger.warning( + f"Task failed on attempt {attempt + 1}: {e}" + ) + attempt += 1 + time.sleep(0.5 * attempt) # Exponential backoff + raise Exception("Max retries exceeded for task.") + + @staticmethod + def _default_matcher(expected: str, output: str) -> bool: + """ + Default answer matching using a normalized substring check. + + Args: + expected (str): The expected answer. + output (str): The swarm output. + + Returns: + bool: True if expected is found in output; otherwise, False. + """ + expected_norm = " ".join(expected.strip().split()) + output_norm = " ".join(output.strip().split()) + return expected_norm in output_norm + + +# ----------------------------------------------------------------------------- +# Example usage +# ----------------------------------------------------------------------------- diff --git a/swarms/structs/swarm_matcher.py b/swarms/structs/swarm_matcher.py index d1594c95..a0bfef2c 100644 --- a/swarms/structs/swarm_matcher.py +++ b/swarms/structs/swarm_matcher.py @@ -8,7 +8,6 @@ from tenacity import retry, stop_after_attempt, wait_exponential from swarms.utils.auto_download_check_packages import ( auto_check_and_download_package, ) -from swarms.utils.lazy_loader import lazy_import_decorator from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="swarm_matcher") @@ -29,7 +28,6 @@ class SwarmMatcherConfig(BaseModel): ) -@lazy_import_decorator class SwarmMatcher: """ A class for matching tasks to swarm types based on their descriptions. @@ -44,8 +42,6 @@ class SwarmMatcher: Args: config (SwarmMatcherConfig): The configuration for the SwarmMatcher. """ - logger.add("swarm_matcher_debug.log", level="DEBUG") - logger.debug("Initializing SwarmMatcher") try: import torch @@ -271,7 +267,6 @@ def initialize_swarm_types(matcher: SwarmMatcher): logger.debug("Swarm types initialized") -@lazy_import_decorator def swarm_matcher(task: str, *args, **kwargs): """ Runs the SwarmMatcher example with predefined tasks and swarm types. diff --git a/swarms/structs/swarm_output_type.py b/swarms/structs/swarm_output_type.py index f2a85732..15af5002 100644 --- a/swarms/structs/swarm_output_type.py +++ b/swarms/structs/swarm_output_type.py @@ -4,7 +4,7 @@ import uuid from pydantic import BaseModel, Field -class AgentResponde(BaseModel): +class AgentRespond(BaseModel): id: str = Field(default=uuid.uuid4().hex) timestamp: str = Field(default=time.time()) agent_position: int = Field(description="Agent in swarm position") @@ -18,6 +18,6 @@ class SwarmOutput(BaseModel): name: str = Field(description="Swarm name") description: str = Field(description="Swarm description") swarm_type: str = Field(description="Swarm type") - agent_outputs: List[AgentResponde] = Field( + agent_outputs: List[AgentRespond] = Field( description="List of agent responses" ) diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index a0f73a98..bf829357 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -1,3 +1,4 @@ +import os import uuid from datetime import datetime from typing import Any, Callable, Dict, List, Literal, Union @@ -8,14 +9,14 @@ from tenacity import retry, stop_after_attempt, wait_fixed from swarms.prompts.ag_prompt import aggregator_system_prompt from swarms.structs.agent import Agent from swarms.structs.concurrent_workflow import ConcurrentWorkflow +from swarms.structs.csv_to_agent import AgentLoader +from swarms.structs.groupchat import GroupChat from swarms.structs.mixture_of_agents import MixtureOfAgents +from swarms.structs.multi_agent_orchestrator import MultiAgentRouter from swarms.structs.rearrange import AgentRearrange from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm from swarms.structs.swarm_matcher import swarm_matcher -from swarms.utils.wrapper_clusterop import ( - exec_callable_with_clusterops, -) from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="swarm_router") @@ -26,6 +27,8 @@ SwarmType = Literal[ "SpreadSheetSwarm", "SequentialWorkflow", "ConcurrentWorkflow", + "GroupChat", + "MultiAgentRouter", "auto", ] @@ -137,6 +140,9 @@ class SwarmRouter: documents: List[str] = [], # A list of docs file paths output_type: str = "string", # Md, PDF, Txt, csv no_cluster_ops: bool = False, + speaker_fn: callable = None, + load_agents_from_csv: bool = False, + csv_file_path: str = None, *args, **kwargs, ): @@ -154,7 +160,15 @@ class SwarmRouter: self.documents = documents self.output_type = output_type self.no_cluster_ops = no_cluster_ops + self.speaker_fn = speaker_fn self.logs = [] + self.load_agents_from_csv = load_agents_from_csv + self.csv_file_path = csv_file_path + + if self.load_agents_from_csv: + self.agents = AgentLoader( + csv_path=self.csv_file_path + ).load_agents() self.reliability_check() @@ -174,12 +188,6 @@ class SwarmRouter: if self.rules is not None: self.handle_rules() - # let's make a function that checks the agents parameter and disables clusterops - - def deactivate_clusterops(self): - for agent in self.agents: - agent.do_not_use_cluster_ops = True - def activate_shared_memory(self): logger.info("Activating shared memory with all agents ") @@ -269,9 +277,6 @@ class SwarmRouter: self._create_swarm(self.swarm_type) - if self.no_cluster_ops: - self.deactivate_clusterops() - elif self.swarm_type == "AgentRearrange": return AgentRearrange( name=self.name, @@ -295,6 +300,26 @@ class SwarmRouter: *args, **kwargs, ) + + elif self.swarm_type == "GroupChat": + return GroupChat( + name=self.name, + description=self.description, + agents=self.agents, + max_loops=self.max_loops, + speaker_fn=self.speaker_fn, + *args, + **kwargs, + ) + + elif self.swarm_type == "MultiAgentRouter": + return MultiAgentRouter( + name=self.name, + description=self.description, + agents=self.agents, + shared_memory_system=self.shared_memory_system, + output_type=self.output_type, + ) elif self.swarm_type == "SpreadSheetSwarm": return SpreadSheetSwarm( name=self.name, @@ -404,10 +429,6 @@ class SwarmRouter: self, task: str, img: str = None, - device: str = "cpu", - all_cores: bool = True, - all_gpus: bool = False, - no_clusterops: bool = True, *args, **kwargs, ) -> Any: @@ -429,18 +450,7 @@ class SwarmRouter: Exception: If an error occurs during task execution. """ try: - if no_clusterops: - return self._run(task=task, img=img, *args, **kwargs) - else: - return exec_callable_with_clusterops( - func=self._run, - device=device, - all_cores=all_cores, - all_gpus=all_gpus, - task=task, - *args, - **kwargs, - ) + return self._run(task=task, img=img, *args, **kwargs) except Exception as e: logger.error(f"Error executing task on swarm: {str(e)}") raise @@ -491,41 +501,6 @@ class SwarmRouter: raise return results - def threaded_run(self, task: str, *args, **kwargs) -> Any: - """ - Execute a task on the selected or matched swarm type using threading. - - Args: - task (str): The task to be executed by the swarm. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Returns: - Any: The result of the swarm's execution. - - Raises: - Exception: If an error occurs during task execution. - """ - from threading import Thread - - def run_in_thread(): - try: - result = self.run(task, *args, **kwargs) - return result - except Exception as e: - self._log( - "error", - f"Error occurred while running task in thread on {self.swarm_type} swarm: {str(e)}", - task=task, - metadata={"error": str(e)}, - ) - raise - - thread = Thread(target=run_in_thread) - thread.start() - thread.join() - return thread.result - def async_run(self, task: str, *args, **kwargs) -> Any: """ Execute a task on the selected or matched swarm type asynchronously. @@ -586,7 +561,9 @@ class SwarmRouter: """ from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor() as executor: + with ThreadPoolExecutor( + max_workers=os.cpu_count() + ) as executor: future = executor.submit(self.run, task, *args, **kwargs) result = future.result() return result diff --git a/swarms/structs/utils.py b/swarms/structs/utils.py index 9ca3a887..09374561 100644 --- a/swarms/structs/utils.py +++ b/swarms/structs/utils.py @@ -1,32 +1,7 @@ -import json -import re -from typing import Any, Dict, List, Optional - +from typing import List from swarms.structs.agent import Agent -# Helper functions for manager/corporate agents -def parse_tasks( - task: str = None, -) -> Dict[str, Any]: - """Parse tasks - - Args: - task (str, optional): _description_. Defaults to None. - - Returns: - Dict[str, Any]: _description_ - """ - tasks = {} - for line in task.split("\n"): - if line.startswith("") and line.endwith( - "" - ): - agent_id, task = line[10:-11].split("><") - tasks[agent_id] = task - return tasks - - def find_agent_by_id( agent_id: str = None, agents: List[Agent] = None, @@ -43,106 +18,51 @@ def find_agent_by_id( Returns: Agent: _description_ """ - for agent in agents: - if agent.id == agent_id: - if task: - return agent.run(task, *args, **kwargs) - else: - return agent - - return None - - -def distribute_tasks( - task: str = None, agents: List[Agent] = None, *args, **kwargs -): - """Distribute tasks to agents - - Args: - task (str, optional): _description_. Defaults to None. - agents (List[Agent], optional): _description_. Defaults to None. - """ - # Parse the task to extract tasks and agent id - tasks = parse_tasks(task) - - # Distribute tasks to agents - for agent_id, task in tasks.item(): - assigned_agent = find_agent_by_id(agent_id, agents) - if assigned_agent: - print(f"Assigning task {task} to agent {agent_id}") - output = assigned_agent.run(task, *args, **kwargs) - print(f"Output from agent {agent_id}: {output}") - else: - print( - f"No agent found with ID {agent_id}. Task '{task}' is" - " not assigned." - ) - - -def find_token_in_text(text: str, token: str = "") -> bool: - """ - Parse a block of text for a specific token. - - Args: - text (str): The text to parse. - token (str): The token to find. - - Returns: - bool: True if the token is found in the text, False otherwise. - """ - # Check if the token is in the text - if token in text: - return True - else: - return False - - -def extract_key_from_json( - json_response: str, key: str -) -> Optional[str]: - """ - Extract a specific key from a JSON response. - - Args: - json_response (str): The JSON response to parse. - key (str): The key to extract. - - Returns: - Optional[str]: The value of the key if it exists, None otherwise. - """ - response_dict = json.loads(json_response) - return response_dict.get(key) - - -def extract_tokens_from_text( - text: str, tokens: List[str] -) -> List[str]: - """ - Extract a list of tokens from a text response. + try: + print(f"Searching for agent with ID: {agent_id}") + for agent in agents: + if agent.id == agent_id: + print(f"Found agent with ID {agent_id}") + if task: + print(f"Running task: {task}") + return agent.run(task, *args, **kwargs) + else: + return agent + print(f"No agent found with ID {agent_id}") + return None + except Exception as e: + print(f"Error finding agent by ID: {str(e)}") + return None + + +def find_agent_by_name( + agent_name: str = None, + agents: List[Agent] = None, + task: str = None, + *args, + **kwargs, +) -> Agent: + """Find agent by name Args: - text (str): The text to parse. - tokens (List[str]): The tokens to extract. + agent_name (str): _description_ + agents (List[Agent]): _description_ Returns: - List[str]: The tokens that were found in the text. - """ - return [token for token in tokens if token in text] - - -def detect_markdown(text: str) -> bool: - """ - Checks if a string contains Markdown code enclosed in six backticks. - - Parameters - ---------- - text : str - The text to check. - - Returns - ------- - bool - True if the text contains Markdown code enclosed in six backticks, False otherwise. + Agent: _description_ """ - pattern = r"``````[\s\S]*?``````" - return bool(re.search(pattern, text)) + try: + print(f"Searching for agent with name: {agent_name}") + for agent in agents: + if agent.name == agent_name: + print(f"Found agent with name {agent_name}") + if task: + print(f"Running task: {task}") + return agent.run(task, *args, **kwargs) + else: + return agent + print(f"No agent found with name {agent_name}") + return None + except Exception as e: + print(f"Error finding agent by name: {str(e)}") + return None diff --git a/swarms/structs/workspace_manager.py b/swarms/structs/workspace_manager.py index cec3615d..8840c892 100644 --- a/swarms/structs/workspace_manager.py +++ b/swarms/structs/workspace_manager.py @@ -41,7 +41,7 @@ class WorkspaceManager: env_file_path (Path): The path to the .env file. """ with env_file_path.open("w") as file: - file.write("WORKSPACE_DIR=agent_workspace\n") + file.write(f"WORKSPACE_DIR={self.workspace_dir}\n") logger.info( "Created a new .env file with default WORKSPACE_DIR." ) @@ -57,7 +57,7 @@ class WorkspaceManager: content = file.read() if "WORKSPACE_DIR" not in content: file.seek(0, os.SEEK_END) - file.write("WORKSPACE_DIR=agent_workspace\n") + file.write(f"WORKSPACE_DIR={self.workspace_dir}\n") logger.info("Appended WORKSPACE_DIR to .env file.") def _get_workspace_dir( @@ -150,6 +150,8 @@ class WorkspaceManager: try: # Check if .env file exists and create it if it doesn't env_file_path = Path(".env") + + # If the .env file doesn't exist, create it if not env_file_path.exists(): self._create_env_file(env_file_path) else: diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py index a3c966dd..9792f266 100644 --- a/swarms/telemetry/__init__.py +++ b/swarms/telemetry/__init__.py @@ -1,19 +1,17 @@ -from swarms.telemetry.sys_info import ( +from swarms.telemetry.main import ( + generate_unique_identifier, + generate_user_id, get_cpu_info, + get_machine_id, get_os_version, get_package_mismatches, get_pip_version, get_python_version, get_ram_info, get_swarms_verison, - system_info, -) -from swarms.telemetry.user_utils import ( - generate_unique_identifier, - generate_user_id, - get_machine_id, get_system_info, get_user_device_data, + system_info, ) __all__ = [ diff --git a/swarms/telemetry/bootup.py b/swarms/telemetry/bootup.py index edb49133..fec281dc 100644 --- a/swarms/telemetry/bootup.py +++ b/swarms/telemetry/bootup.py @@ -25,6 +25,9 @@ def bootup(): logger.disable("") logging.disable(logging.CRITICAL) + else: + logger.enable("") + # Silent wandb os.environ["WANDB_SILENT"] = "true" diff --git a/swarms/telemetry/capture_sys_data.py b/swarms/telemetry/capture_sys_data.py deleted file mode 100644 index a7a2139c..00000000 --- a/swarms/telemetry/capture_sys_data.py +++ /dev/null @@ -1,86 +0,0 @@ -import platform -import socket -import psutil -import uuid -from typing import Dict -import requests - -from swarms.utils.loguru_logger import initialize_logger - -logger = initialize_logger(log_folder="capture_sys_data") - - -def capture_system_data() -> Dict[str, str]: - """ - Captures extensive system data including platform information, user ID, IP address, CPU count, - memory information, and other system details. - - Returns: - Dict[str, str]: A dictionary containing system data. - """ - try: - system_data = { - "platform": platform.system(), - "platform_version": platform.version(), - "platform_release": platform.release(), - "hostname": socket.gethostname(), - "ip_address": socket.gethostbyname(socket.gethostname()), - "cpu_count": psutil.cpu_count(logical=True), - "memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB", - "memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB", - "user_id": str(uuid.uuid4()), # Unique user identifier - "machine_type": platform.machine(), - "processor": platform.processor(), - "architecture": platform.architecture()[0], - } - - # Get external IP address - try: - system_data["external_ip"] = requests.get( - "https://api.ipify.org" - ).text - except Exception: - system_data["external_ip"] = "N/A" - - return system_data - except Exception as e: - logger.error("Failed to capture system data: {}", e) - return {} - - -def log_agent_data(data_dict: dict) -> dict | None: - """ - Silently logs agent data to the Swarms database with retry logic. - - Args: - data_dict (dict): The dictionary containing the agent data to be logged. - - Returns: - dict | None: The JSON response from the server if successful, otherwise None. - """ - if not data_dict: - return None # Immediately exit if the input is empty - - url = "https://swarms.world/api/get-agents/log-agents" - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869", - } - - try: - response = requests.post( - url, json=data_dict, headers=headers, timeout=10 - ) - if ( - response.ok and response.text.strip() - ): # Check if response is valid and non-empty - return ( - response.json() - ) # Parse and return the JSON response - except ( - requests.exceptions.RequestException, - requests.exceptions.JSONDecodeError, - ): - pass # Fail silently without any action - - return None # Return None if anything goes wrong diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py new file mode 100644 index 00000000..1b2396f2 --- /dev/null +++ b/swarms/telemetry/main.py @@ -0,0 +1,305 @@ +import hashlib +import os +import platform +import socket +import subprocess +import uuid +from typing import Dict + +import pkg_resources +import psutil +import requests +import toml + +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="capture_sys_data") + + +# Helper functions +def generate_user_id(): + """Generate user id + + Returns: + _type_: _description_ + """ + return str(uuid.uuid4()) + + +def get_machine_id(): + """Get machine id + + Returns: + _type_: _description_ + """ + raw_id = platform.node() + hashed_id = hashlib.sha256(raw_id.encode()).hexdigest() + return hashed_id + + +def get_system_info(): + """ + Gathers basic system information. + + Returns: + dict: A dictionary containing system-related information. + """ + info = { + "platform": platform.system(), + "platform_release": platform.release(), + "platform_version": platform.version(), + "architecture": platform.machine(), + "hostname": socket.gethostname(), + "ip_address": socket.gethostbyname(socket.gethostname()), + "mac_address": ":".join( + [ + f"{(uuid.getnode() >> elements) & 0xFF:02x}" + for elements in range(0, 2 * 6, 8) + ][::-1] + ), + "processor": platform.processor(), + "python_version": platform.python_version(), + "Misc": system_info(), + } + return info + + +def generate_unique_identifier(): + """Generate unique identifier + + Returns: + str: unique id + + """ + system_info = get_system_info() + unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info)) + return str(unique_id) + + +def get_local_ip(): + """Get local ip + + Returns: + str: local ip + + """ + return socket.gethostbyname(socket.gethostname()) + + +def get_user_device_data(): + data = { + "ID": generate_user_id(), + "Machine ID": get_machine_id(), + "System Info": get_system_info(), + "UniqueID": generate_unique_identifier(), + } + return data + + +def get_python_version(): + return platform.python_version() + + +def get_pip_version() -> str: + """Get pip version + + Returns: + str: The version of pip installed + """ + try: + pip_version = ( + subprocess.check_output(["pip", "--version"]) + .decode() + .split()[1] + ) + except Exception as e: + pip_version = str(e) + return pip_version + + +def get_swarms_verison() -> tuple[str, str]: + """Get swarms version from both command line and package + + Returns: + tuple[str, str]: A tuple containing (command line version, package version) + """ + try: + swarms_verison_cmd = ( + subprocess.check_output(["swarms", "--version"]) + .decode() + .split()[1] + ) + except Exception as e: + swarms_verison_cmd = str(e) + swarms_verison_pkg = pkg_resources.get_distribution( + "swarms" + ).version + swarms_verison = swarms_verison_cmd, swarms_verison_pkg + return swarms_verison + + +def get_os_version() -> str: + """Get operating system version + + Returns: + str: The operating system version and platform details + """ + return platform.platform() + + +def get_cpu_info() -> str: + """Get CPU information + + Returns: + str: The processor information + """ + return platform.processor() + + +def get_ram_info() -> str: + """Get RAM information + + Returns: + str: A formatted string containing total, used and free RAM in GB + """ + vm = psutil.virtual_memory() + used_ram_gb = vm.used / (1024**3) + free_ram_gb = vm.free / (1024**3) + total_ram_gb = vm.total / (1024**3) + return ( + f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" + f" {free_ram_gb:.2f}" + ) + + +def get_package_mismatches(file_path: str = "pyproject.toml") -> str: + """Get package version mismatches between pyproject.toml and installed packages + + Args: + file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml". + + Returns: + str: A formatted string containing package version mismatches + """ + with open(file_path) as file: + pyproject = toml.load(file) + dependencies = pyproject["tool"]["poetry"]["dependencies"] + dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][ + "dependencies" + ] + dependencies.update(dev_dependencies) + + installed_packages = { + pkg.key: pkg.version for pkg in pkg_resources.working_set + } + + mismatches = [] + for package, version_info in dependencies.items(): + if isinstance(version_info, dict): + version_info = version_info["version"] + installed_version = installed_packages.get(package) + if installed_version and version_info.startswith("^"): + expected_version = version_info[1:] + if not installed_version.startswith(expected_version): + mismatches.append( + f"\t {package}: Mismatch," + f" pyproject.toml={expected_version}," + f" pip={installed_version}" + ) + else: + mismatches.append(f"\t {package}: Not found in pip list") + + return "\n" + "\n".join(mismatches) + + +def system_info() -> dict[str, str]: + """Get system information including Python, pip, OS, CPU and RAM details + + Returns: + dict[str, str]: A dictionary containing system information + """ + return { + "Python Version": get_python_version(), + "Pip Version": get_pip_version(), + # "Swarms Version": swarms_verison, + "OS Version and Architecture": get_os_version(), + "CPU Info": get_cpu_info(), + "RAM Info": get_ram_info(), + } + + +def capture_system_data() -> Dict[str, str]: + """ + Captures extensive system data including platform information, user ID, IP address, CPU count, + memory information, and other system details. + + Returns: + Dict[str, str]: A dictionary containing system data. + """ + try: + system_data = { + "platform": platform.system(), + "platform_version": platform.version(), + "platform_release": platform.release(), + "hostname": socket.gethostname(), + "ip_address": socket.gethostbyname(socket.gethostname()), + "cpu_count": psutil.cpu_count(logical=True), + "memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB", + "memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB", + "user_id": str(uuid.uuid4()), # Unique user identifier + "machine_type": platform.machine(), + "processor": platform.processor(), + "architecture": platform.architecture()[0], + } + + # Get external IP address + try: + system_data["external_ip"] = requests.get( + "https://api.ipify.org" + ).text + except Exception: + system_data["external_ip"] = "N/A" + + return system_data + except Exception as e: + logger.error("Failed to capture system data: {}", e) + return {} + + +def log_agent_data(data_dict: dict) -> dict | None: + """ + Silently logs agent data to the Swarms database with retry logic. + + Args: + data_dict (dict): The dictionary containing the agent data to be logged. + + Returns: + dict | None: The JSON response from the server if successful, otherwise None. + """ + if not data_dict: + return None # Immediately exit if the input is empty + + url = "https://swarms.world/api/get-agents/log-agents" + headers = { + "Content-Type": "application/json", + "Authorization": os.getenv("SWARMS_API_KEY"), + } + + try: + response = requests.post( + url, json=data_dict, headers=headers, timeout=10 + ) + if ( + response.ok and response.text.strip() + ): # Check if response is valid and non-empty + return ( + response.json() + ) # Parse and return the JSON response + except ( + requests.exceptions.RequestException, + requests.exceptions.JSONDecodeError, + ): + return None # Return None if anything goes wrong + + +# print(log_agent_data(get_user_device_data())) diff --git a/swarms/telemetry/sys_info.py b/swarms/telemetry/sys_info.py deleted file mode 100644 index 2739362f..00000000 --- a/swarms/telemetry/sys_info.py +++ /dev/null @@ -1,138 +0,0 @@ -import platform -import subprocess - -import pkg_resources -import psutil -import toml - - -def get_python_version(): - return platform.python_version() - - -def get_pip_version() -> str: - """Get pip version - - Returns: - str: The version of pip installed - """ - try: - pip_version = ( - subprocess.check_output(["pip", "--version"]) - .decode() - .split()[1] - ) - except Exception as e: - pip_version = str(e) - return pip_version - - -def get_swarms_verison() -> tuple[str, str]: - """Get swarms version from both command line and package - - Returns: - tuple[str, str]: A tuple containing (command line version, package version) - """ - try: - swarms_verison_cmd = ( - subprocess.check_output(["swarms", "--version"]) - .decode() - .split()[1] - ) - except Exception as e: - swarms_verison_cmd = str(e) - swarms_verison_pkg = pkg_resources.get_distribution( - "swarms" - ).version - swarms_verison = swarms_verison_cmd, swarms_verison_pkg - return swarms_verison - - -def get_os_version() -> str: - """Get operating system version - - Returns: - str: The operating system version and platform details - """ - return platform.platform() - - -def get_cpu_info() -> str: - """Get CPU information - - Returns: - str: The processor information - """ - return platform.processor() - - -def get_ram_info() -> str: - """Get RAM information - - Returns: - str: A formatted string containing total, used and free RAM in GB - """ - vm = psutil.virtual_memory() - used_ram_gb = vm.used / (1024**3) - free_ram_gb = vm.free / (1024**3) - total_ram_gb = vm.total / (1024**3) - return ( - f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" - f" {free_ram_gb:.2f}" - ) - - -def get_package_mismatches(file_path: str = "pyproject.toml") -> str: - """Get package version mismatches between pyproject.toml and installed packages - - Args: - file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml". - - Returns: - str: A formatted string containing package version mismatches - """ - with open(file_path) as file: - pyproject = toml.load(file) - dependencies = pyproject["tool"]["poetry"]["dependencies"] - dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][ - "dependencies" - ] - dependencies.update(dev_dependencies) - - installed_packages = { - pkg.key: pkg.version for pkg in pkg_resources.working_set - } - - mismatches = [] - for package, version_info in dependencies.items(): - if isinstance(version_info, dict): - version_info = version_info["version"] - installed_version = installed_packages.get(package) - if installed_version and version_info.startswith("^"): - expected_version = version_info[1:] - if not installed_version.startswith(expected_version): - mismatches.append( - f"\t {package}: Mismatch," - f" pyproject.toml={expected_version}," - f" pip={installed_version}" - ) - else: - mismatches.append(f"\t {package}: Not found in pip list") - - return "\n" + "\n".join(mismatches) - - -def system_info() -> dict[str, str]: - """Get system information including Python, pip, OS, CPU and RAM details - - Returns: - dict[str, str]: A dictionary containing system information - """ - return { - "Python Version": get_python_version(), - "Pip Version": get_pip_version(), - # "Swarms Version": swarms_verison, - "OS Version and Architecture": get_os_version(), - "CPU Info": get_cpu_info(), - "RAM Info": get_ram_info(), - } diff --git a/swarms/telemetry/user_utils.py b/swarms/telemetry/user_utils.py deleted file mode 100644 index 9da52a4c..00000000 --- a/swarms/telemetry/user_utils.py +++ /dev/null @@ -1,86 +0,0 @@ -import hashlib -import platform -import socket -import uuid - -from swarms.telemetry.sys_info import system_info - - -# Helper functions -def generate_user_id(): - """Generate user id - - Returns: - _type_: _description_ - """ - return str(uuid.uuid4()) - - -def get_machine_id(): - """Get machine id - - Returns: - _type_: _description_ - """ - raw_id = platform.node() - hashed_id = hashlib.sha256(raw_id.encode()).hexdigest() - return hashed_id - - -def get_system_info(): - """ - Gathers basic system information. - - Returns: - dict: A dictionary containing system-related information. - """ - info = { - "platform": platform.system(), - "platform_release": platform.release(), - "platform_version": platform.version(), - "architecture": platform.machine(), - "hostname": socket.gethostname(), - "ip_address": socket.gethostbyname(socket.gethostname()), - "mac_address": ":".join( - [ - f"{(uuid.getnode() >> elements) & 0xFF:02x}" - for elements in range(0, 2 * 6, 8) - ][::-1] - ), - "processor": platform.processor(), - "python_version": platform.python_version(), - "Misc": system_info(), - } - return info - - -def generate_unique_identifier(): - """Generate unique identifier - - Returns: - str: unique id - - """ - system_info = get_system_info() - unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info)) - return str(unique_id) - - -def get_local_ip(): - """Get local ip - - Returns: - str: local ip - - """ - return socket.gethostbyname(socket.gethostname()) - - -def get_user_device_data(): - data = { - "ID": generate_user_id(), - "Machine ID": get_machine_id(), - "System Info": get_system_info(), - "UniqueID": generate_unique_identifier(), - } - return data diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py index 18ac51ac..20012304 100644 --- a/swarms/tools/__init__.py +++ b/swarms/tools/__init__.py @@ -2,7 +2,6 @@ from swarms.tools.tool_utils import ( scrape_tool_func_docs, tool_find_by_name, ) -from swarms.tools.func_calling_executor import openai_tool_executor from swarms.tools.pydantic_to_json import ( _remove_a_key, base_model_to_openai_function, @@ -22,7 +21,6 @@ from swarms.tools.py_func_to_openai_func_str import ( ) from swarms.tools.openai_tool_creator_decorator import tool from swarms.tools.base_tool import BaseTool -from swarms.tools.prebuilt import * # noqa: F403 from swarms.tools.cohere_func_call_schema import ( CohereFuncSchema, ParameterDefinition, @@ -34,7 +32,6 @@ from swarms.tools.json_utils import base_model_to_json __all__ = [ "scrape_tool_func_docs", "tool_find_by_name", - "openai_tool_executor", "_remove_a_key", "base_model_to_openai_function", "multi_base_model_to_openai_function", diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 04319db8..ae47a1a1 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -3,7 +3,6 @@ from typing import Any, Callable, Dict, List, Optional, Union from pydantic import BaseModel, Field -from swarms.tools.func_calling_executor import openai_tool_executor from swarms.tools.func_to_str import function_to_str, functions_to_str from swarms.tools.function_util import process_tool_docs from swarms.tools.py_func_to_openai_func_str import ( @@ -15,6 +14,7 @@ from swarms.tools.pydantic_to_json import ( multi_base_model_to_openai_function, ) from swarms.utils.loguru_logger import initialize_logger +from swarms.tools.tool_parse_exec import parse_and_execute_json logger = initialize_logger(log_folder="base_tool") @@ -178,16 +178,14 @@ class BaseTool(BaseModel): def execute_tool( self, + response: str, *args: Any, **kwargs: Any, ) -> Callable: try: - return openai_tool_executor( - self.list_of_dicts, - self.function_map, - self.verbose, - *args, - **kwargs, + return parse_and_execute_json( + self.tools, + response, ) except Exception as e: logger.error(f"An error occurred in execute_tool: {e}") @@ -253,6 +251,7 @@ class BaseTool(BaseModel): def execute_tool_by_name( self, tool_name: str, + response: str, ) -> Any: """ Search for a tool by name and execute it. @@ -268,31 +267,16 @@ class BaseTool(BaseModel): ValueError: If the tool with the specified name is not found. TypeError: If the tool name is not mapped to a function in the function map. """ - # Search for the tool by name - tool = next( - ( - tool - for tool in self.tools - if tool.get("name") == tool_name - ), - None, - ) - - # If the tool is not found, raise an error - if tool is None: - raise ValueError(f"Tool '{tool_name}' not found") - - # Get the function associated with the tool + # Step 1. find the function in the function map func = self.function_map.get(tool_name) - # If the function is not found, raise an error - if func is None: - raise TypeError( - f"Tool '{tool_name}' is not mapped to a function" - ) + execution = parse_and_execute_json( + functions=[func], + json_string=response, + verbose=self.verbose, + ) - # Execute the tool - return func(**tool.get("parameters", {})) + return execution def execute_tool_from_text(self, text: str) -> Any: """ @@ -415,16 +399,14 @@ class BaseTool(BaseModel): ) # Combine all tool schemas into a single schema - if tool_schemas: - combined_schema = { - "type": "function", - "functions": [ - schema["function"] for schema in tool_schemas - ], - } - return json.dumps(combined_schema, indent=4) - - return None + combined_schema = { + "type": "function", + "functions": [ + schema["function"] for schema in tool_schemas + ], + } + + return combined_schema def check_func_if_have_docs(self, func: callable): if func.__doc__ is not None: diff --git a/swarms/tools/func_calling_executor.py b/swarms/tools/func_calling_executor.py deleted file mode 100644 index 65d95a73..00000000 --- a/swarms/tools/func_calling_executor.py +++ /dev/null @@ -1,238 +0,0 @@ -import concurrent.futures -from typing import Callable, Any, Dict, List -from swarms.utils.loguru_logger import initialize_logger - -logger = initialize_logger(log_folder="func_calling_executor") - -# def openai_tool_executor( -# tools: List[Dict[str, Any]], -# function_map: Dict[str, Callable], -# verbose: bool = True, -# return_as_string: bool = False, -# *args, -# **kwargs, -# ) -> Callable: -# """ -# Creates a function that dynamically and concurrently executes multiple functions based on parameters specified -# in a list of tool dictionaries, with extensive error handling and validation. - -# Args: -# tools (List[Dict[str, Any]]): A list of dictionaries, each containing configuration for a tool, including parameters. -# function_map (Dict[str, Callable]): A dictionary mapping function names to their corresponding callable functions. -# verbose (bool): If True, enables verbose logging. -# return_as_string (bool): If True, returns the results as a concatenated string. - -# Returns: -# Callable: A function that, when called, executes the specified functions concurrently with the parameters given. - -# Examples: -# >>> def test_function(param1: int, param2: str) -> str: -# ... return f"Test function called with parameters: {param1}, {param2}" - -# >>> tool_executor = openai_tool_executor( -# ... tools=[ -# ... { -# ... "type": "function", -# ... "function": { -# ... "name": "test_function", -# ... "parameters": { -# ... "param1": 1, -# ... "param2": "example" -# ... } -# ... } -# ... } -# ... ], -# ... function_map={ -# ... "test_function": test_function -# ... }, -# ... return_as_string=True -# ... ) -# >>> results = tool_executor() -# >>> print(results) -# """ - -# def tool_executor(): -# # Prepare tasks for concurrent execution -# results = [] -# logger.info(f"Executing {len(tools)} tools concurrently.") -# with concurrent.futures.ThreadPoolExecutor() as executor: -# futures = [] -# for tool in tools: -# if tool.get("type") != "function": -# continue # Skip non-function tool entries - -# function_info = tool.get("function", {}) -# func_name = function_info.get("name") -# logger.info(f"Executing function: {func_name}") - -# # Check if the function name is mapped to an actual function -# if func_name not in function_map: -# error_message = f"Function '{func_name}' not found in function map." -# logger.error(error_message) -# results.append(error_message) -# continue - -# # Validate parameters -# params = function_info.get("parameters", {}) -# if not params: -# error_message = f"No parameters specified for function '{func_name}'." -# logger.error(error_message) -# results.append(error_message) -# continue - -# # Submit the function for execution -# try: -# future = executor.submit( -# function_map[func_name], **params -# ) -# futures.append((func_name, future)) -# except Exception as e: -# error_message = f"Failed to submit the function '{func_name}' for execution: {e}" -# logger.error(error_message) -# results.append(error_message) - -# # Gather results from all futures -# for func_name, future in futures: -# try: -# result = future.result() # Collect result from future -# results.append(f"{func_name}: {result}") -# except Exception as e: -# error_message = f"Error during execution of function '{func_name}': {e}" -# logger.error(error_message) -# results.append(error_message) - -# if return_as_string: -# return "\n".join(results) - -# logger.info(f"Results: {results}") - -# return results - -# return tool_executor - - -def openai_tool_executor( - tools: List[Dict[str, Any]], - function_map: Dict[str, Callable], - verbose: bool = True, - return_as_string: bool = False, - *args, - **kwargs, -) -> Callable: - def tool_executor(): - results = [] - logger.info(f"Executing {len(tools)} tools concurrently.") - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [] - for tool in tools: - if tool.get("type") != "function": - continue - - function_info = tool.get("function", {}) - func_name = function_info.get("name") - logger.info(f"Executing function: {func_name}") - - if func_name not in function_map: - error_message = f"Function '{func_name}' not found in function map." - logger.error(error_message) - results.append(error_message) - continue - - params = function_info.get("parameters", {}) - if not params: - error_message = f"No parameters specified for function '{func_name}'." - logger.error(error_message) - results.append(error_message) - continue - - if ( - "name" in params - and params["name"] in function_map - ): - try: - result = function_map[params["name"]]( - **params - ) - results.append(f"{params['name']}: {result}") - except Exception as e: - error_message = f"Failed to execute the function '{params['name']}': {e}" - logger.error(error_message) - results.append(error_message) - continue - - try: - future = executor.submit( - function_map[func_name], **params - ) - futures.append((func_name, future)) - except Exception as e: - error_message = f"Failed to submit the function '{func_name}' for execution: {e}" - logger.error(error_message) - results.append(error_message) - - for func_name, future in futures: - try: - result = future.result() - results.append(f"{func_name}: {result}") - except Exception as e: - error_message = f"Error during execution of function '{func_name}': {e}" - logger.error(error_message) - results.append(error_message) - - if return_as_string: - return "\n".join(results) - - logger.info(f"Results: {results}") - - return results - - return tool_executor - - -# function_schema = { -# "name": "execute", -# "description": "Executes code on the user's machine **in the users local environment** and returns the output", -# "parameters": { -# "type": "object", -# "properties": { -# "language": { -# "type": "string", -# "description": "The programming language (required parameter to the `execute` function)", -# "enum": [ -# # This will be filled dynamically with the languages OI has access to. -# ], -# }, -# "code": { -# "type": "string", -# "description": "The code to execute (required)", -# }, -# }, -# "required": ["language", "code"], -# }, -# } - - -# def execute(language: str, code: str): -# """ -# Executes code on the user's machine **in the users local environment** and returns the output - -# Args: -# language (str): The programming language (required parameter to the `execute` function) -# code (str): The code to execute (required) - -# Returns: -# str: The output of the code execution -# """ -# # This function will be implemented by the user -# return "Code execution not implemented yet" - - -# # Example execution -# out = openai_tool_executor( -# tools=[function_schema], -# function_map={ -# "execute": execute, -# }, -# return_as_string=True, -# ) -# print(out) diff --git a/swarms/tools/function_util.py b/swarms/tools/function_util.py index de0ec97a..731584fb 100644 --- a/swarms/tools/function_util.py +++ b/swarms/tools/function_util.py @@ -1,16 +1,24 @@ import inspect +from typing import Callable, Type, Union -def process_tool_docs(item): +def process_tool_docs(item: Union[Callable, Type]) -> str: """ - Process the documentation for a given item. + Process the documentation for a given item, which can be a function or a class. Args: - item: The item to process the documentation for. + item: The item to process the documentation for. It can be a function or a class. Returns: - metadata: The processed metadata containing the item's name, documentation, and source code. + str: The processed metadata containing the item's name, documentation, and source code. + + Raises: + TypeError: If the item is not a function or a class. """ + # Check if item is a function or a class + if not inspect.isfunction(item) and not inspect.isclass(item): + raise TypeError("Item must be a function or a class.") + # If item is an instance of a class, get its class if not inspect.isclass(item) and hasattr(item, "__class__"): item = item.__class__ diff --git a/swarms/tools/json_former.py b/swarms/tools/json_former.py index 6e1358a9..fb24566b 100644 --- a/swarms/tools/json_former.py +++ b/swarms/tools/json_former.py @@ -1,14 +1,12 @@ import json from typing import Any, Dict, List, Union -from swarms.utils.lazy_loader import lazy_import_decorator from pydantic import BaseModel from swarms.tools.logits_processor import ( NumberStoppingCriteria, OutputNumbersTokens, StringStoppingCriteria, ) -from swarm_models.base_llm import BaseLLM from swarms.utils.auto_download_check_packages import ( auto_check_and_download_package, ) @@ -25,7 +23,6 @@ except ImportError: GENERATION_MARKER = "|GENERATION|" -@lazy_import_decorator class Jsonformer: """ Initializes the FormatTools class. @@ -59,7 +56,7 @@ class Jsonformer: max_number_tokens: int = 6, temperature: float = 1.0, max_string_token_length: int = 10, - llm: BaseLLM = None, + llm: Any = None, ): self.model = model self.tokenizer = tokenizer diff --git a/swarms/tools/prebuilt/__init__.py b/swarms/tools/prebuilt/__init__.py deleted file mode 100644 index 6a4c73aa..00000000 --- a/swarms/tools/prebuilt/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from swarms.tools.prebuilt.math_eval import math_eval -from swarms.tools.prebuilt.code_executor import CodeExecutor - -__all__ = [ - "math_eval", - "CodeExecutor", -] diff --git a/swarms/tools/prebuilt/bing_api.py b/swarms/tools/prebuilt/bing_api.py deleted file mode 100644 index 2d865c98..00000000 --- a/swarms/tools/prebuilt/bing_api.py +++ /dev/null @@ -1,82 +0,0 @@ -import os -import requests -from typing import List, Dict - - -def check_bing_api_key(): - try: - return os.getenv("BING_API_KEY") - except Exception as error: - print(f"Error {error}") - raise None - - -def parse_and_merge_logs(logs: List[Dict[str, str]]) -> str: - """ - Parses logs and merges them into a single string for input to an LLM. - - Parameters: - logs (List[Dict[str, str]]): A list of dictionaries where each dictionary represents a log entry. - - Returns: - str: A single string containing all log entries concatenated. - """ - - merged_logs = "" - for log in logs: - log_entries = [ - f"{key}: {value}" for key, value in log.items() - ] - log_string = "\n".join(log_entries) - merged_logs += log_string + "\n\n" - - return merged_logs.strip() - - -def fetch_web_articles_bing_api( - query: str = None, -) -> List[Dict[str, str]]: - """ - Fetches four articles from Bing Web Search API based on the given query. - - Parameters: - query (str): The search query to retrieve articles. - subscription_key (str): The Bing Search API subscription key. - - Returns: - List[Dict[str, str]]: A list of dictionaries containing article details. - """ - subscription_key = check_bing_api_key() - - url = "https://api.bing.microsoft.com/v7.0/search" - headers = {"Ocp-Apim-Subscription-Key": subscription_key} - params = {"q": query, "count": 4, "mkt": "en-US"} - - response = requests.get(url, headers=headers, params=params) - response.raise_for_status() - search_results = response.json() - - articles = [] - for i, result in enumerate( - search_results.get("webPages", {}).get("value", []) - ): - article_info = { - "query": query, - "url": result.get("url"), - "title": result.get("name"), - "publishedDate": result.get("dateLastCrawled"), - "author": ( - result.get("provider")[0]["name"] - if result.get("provider") - else "Unknown" - ), - "id": str(i + 1), # Generating a simple unique ID - } - articles.append(article_info) - - articles = parse_and_merge_logs(articles) - return articles - - -# out = fetch_web_articles_bing_api("swarms ai github") -# print(out) diff --git a/swarms/tools/prebuilt/code_executor.py b/swarms/tools/prebuilt/code_executor.py deleted file mode 100644 index 730b1935..00000000 --- a/swarms/tools/prebuilt/code_executor.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -import subprocess -from loguru import logger -from swarm_models.tiktoken_wrapper import TikTokenizer - - -class CodeExecutor: - """ - A class to execute Python code and return the output as a string. - - The class also logs the input and output using loguru and stores the outputs - in a folder called 'artifacts'. - - Methods: - execute(code: str) -> str: - Executes the given Python code and returns the output. - """ - - def __init__( - self, - max_output_length: int = 1000, - artifacts_directory: str = "artifacts", - language: str = "python3", - ) -> None: - """ - Initializes the CodeExecutor class and sets up the logging. - """ - self.max_output_length = max_output_length - self.artifacts_dir = artifacts_directory - self.language = language - - os.makedirs(self.artifacts_dir, exist_ok=True) - self.setup_logging() - self.tokenizer = TikTokenizer() - - def setup_logging(self) -> None: - """ - Sets up the loguru logger with colorful output. - """ - logger.add( - os.path.join(self.artifacts_dir, "code_execution.log"), - format="{time} {level} {message}", - level="DEBUG", - ) - logger.info( - "Logger initialized and artifacts directory set up." - ) - - def format_code(self, code: str) -> str: - """ - Formats the given Python code using black. - - Args: - code (str): The Python code to format. - - Returns: - str: The formatted Python code. - - Raises: - ValueError: If the code cannot be formatted. - """ - try: - import black - - formatted_code = black.format_str( - code, mode=black.FileMode() - ) - return formatted_code - except Exception as e: - logger.error(f"Error formatting code: {e}") - raise ValueError(f"Error formatting code: {e}") from e - - def execute(self, code: str) -> str: - """ - Executes the given Python code and returns the output. - - Args: - code (str): The Python code to execute. - - Returns: - str: The output of the executed code. - - Raises: - RuntimeError: If there is an error during the execution of the code. - """ - try: - formatted_code = self.format_code(code) - logger.info(f"Executing code:\n{formatted_code}") - completed_process = subprocess.run( - [self.language, "-c", formatted_code], - capture_output=True, - text=True, - check=True, - ) - output = completed_process.stdout - logger.info(f"Code output:\n{output}") - token_count = self.tokenizer.count_tokens(output) - print(token_count) - - if ( - self.max_output_length - and token_count > self.max_output_length - ): - logger.warning( - f"Output length exceeds {self.max_output_length} characters. Truncating output." - ) - output = output[: self.max_output_length] + "..." - - return output - except subprocess.CalledProcessError as e: - logger.error(f"Error executing code: {e.stderr}") - raise RuntimeError( - f"Error executing code: {e.stderr}" - ) from e - - -# # Example usage: -# if __name__ == "__main__": -# executor = CodeExecutor(max_output_length=300) -# code = """ -# import requests -# from typing import Any - -# def fetch_financial_news(api_key: str, query: str, num_articles: int) -> Any: -# try: -# url = f"https://newsapi.org/v2/everything?q={query}&apiKey={api_key}" -# response = requests.get(url) -# response.raise_for_status() -# return response.json() -# except requests.RequestException as e: -# print(f"Request Error: {e}") -# raise -# except ValueError as e: -# print(f"Value Error: {e}") -# raise - -# api_key = "" -# result = fetch_financial_news(api_key, query="Nvidia news", num_articles=5) -# print(result) -# """ -# result = executor.execute(code) -# print(result) diff --git a/swarms/tools/prebuilt/code_interpreter.py b/swarms/tools/prebuilt/code_interpreter.py deleted file mode 100644 index d26b555e..00000000 --- a/swarms/tools/prebuilt/code_interpreter.py +++ /dev/null @@ -1,232 +0,0 @@ -import queue -import subprocess -import threading -import time -import traceback -from swarms.utils.loguru_logger import logger - - -class SubprocessCodeInterpreter: - """ - SubprocessCodeinterpreter is a base class for code interpreters that run code in a subprocess. - - - Attributes: - start_cmd (str): The command to start the subprocess. Should be a string that can be split by spaces. - process (subprocess.Popen): The subprocess that is running the code. - debug_mode (bool): Whether to print debug statements. - output_queue (queue.Queue): A queue that is filled with output from the subprocess. - done (threading.Event): An event that is set when the subprocess is done running code. - - Example: - """ - - def __init__( - self, - start_cmd: str = "python3", - debug_mode: bool = False, - max_retries: int = 3, - verbose: bool = False, - retry_count: int = 0, - *args, - **kwargs, - ): - self.process = None - self.start_cmd = start_cmd - self.debug_mode = debug_mode - self.max_retries = max_retries - self.verbose = verbose - self.retry_count = retry_count - self.output_queue = queue.Queue() - self.done = threading.Event() - - def detect_active_line(self, line): - """Detect if the line is an active line - - Args: - line (_type_): _description_ - - Returns: - _type_: _description_ - """ - return None - - def detect_end_of_execution(self, line): - """detect if the line is an end of execution line - - Args: - line (_type_): _description_ - - Returns: - _type_: _description_ - """ - return None - - def line_postprocessor(self, line): - """Line postprocessor - - Args: - line (_type_): _description_ - - Returns: - _type_: _description_ - """ - return line - - def preprocess_code(self, code): - """ - This needs to insert an end_of_execution marker of some kind, - which can be detected by detect_end_of_execution. - - Optionally, add active line markers for detect_active_line. - """ - return code - - def terminate(self): - """terminate the subprocess""" - self.process.terminate() - - def start_process(self): - """start the subprocess""" - if self.process: - self.terminate() - - logger.info( - f"Starting subprocess with command: {self.start_cmd}" - ) - self.process = subprocess.Popen( - self.start_cmd.split(), - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - bufsize=0, - universal_newlines=True, - ) - threading.Thread( - target=self.handle_stream_output, - args=(self.process.stdout, False), - daemon=True, - ).start() - threading.Thread( - target=self.handle_stream_output, - args=(self.process.stderr, True), - daemon=True, - ).start() - - return self.process - - def run(self, code: str): - """Run the code in the subprocess - - Args: - code (str): _description_ - - Yields: - _type_: _description_ - """ - - # Setup - logger.info("Running code in subprocess") - try: - code = self.preprocess_code(code) - if not self.process: - self.start_process() - except BaseException: - yield {"output": traceback.format_exc()} - return - - while self.retry_count <= self.max_retries: - if self.debug_mode: - print(f"Running code:\n{code}\n---") - - self.done.clear() - - try: - self.process.stdin.write(code + "\n") - self.process.stdin.flush() - break - except BaseException: - if self.retry_count != 0: - # For UX, I like to hide this if it happens once. Obviously feels better to not see errors - # Most of the time it doesn't matter, but we should figure out why it happens frequently with: - # applescript - yield {"output": traceback.format_exc()} - yield { - "output": ( - "Retrying..." - f" ({self.retry_count}/{self.max_retries})" - ) - } - yield {"output": "Restarting process."} - - self.start_process() - - self.retry_count += 1 - if self.retry_count > self.max_retries: - yield { - "output": ( - "Maximum retries reached. Could not" - " execute code." - ) - } - return - - while True: - if not self.output_queue.empty(): - yield self.output_queue.get() - else: - time.sleep(0.1) - try: - output = self.output_queue.get( - timeout=0.3 - ) # Waits for 0.3 seconds - yield output - except queue.Empty: - if self.done.is_set(): - # Try to yank 3 more times from it... maybe there's something in there... - # (I don't know if this actually helps. Maybe we just need to yank 1 more time) - for _ in range(3): - if not self.output_queue.empty(): - yield self.output_queue.get() - time.sleep(0.2) - break - - def handle_stream_output(self, stream, is_error_stream): - """Handle the output from the subprocess - - Args: - stream (_type_): _description_ - is_error_stream (bool): _description_ - """ - for line in iter(stream.readline, ""): - if self.debug_mode: - print(f"Received output line:\n{line}\n---") - - line = self.line_postprocessor(line) - - if line is None: - continue # `line = None` is the postprocessor's signal to discard completely - - if self.detect_active_line(line): - active_line = self.detect_active_line(line) - self.output_queue.put({"active_line": active_line}) - elif self.detect_end_of_execution(line): - self.output_queue.put({"active_line": None}) - time.sleep(0.1) - self.done.set() - elif is_error_stream and "KeyboardInterrupt" in line: - self.output_queue.put({"output": "KeyboardInterrupt"}) - time.sleep(0.1) - self.done.set() - else: - self.output_queue.put({"output": line}) - - -# interpreter = SubprocessCodeInterpreter() -# interpreter.start_cmd = "python3" -# out = interpreter.run(""" -# print("hello") -# print("world") -# """) -# print(out) diff --git a/swarms/tools/prebuilt/math_eval.py b/swarms/tools/prebuilt/math_eval.py deleted file mode 100644 index 6dbff2b5..00000000 --- a/swarms/tools/prebuilt/math_eval.py +++ /dev/null @@ -1,61 +0,0 @@ -import functools -import logging - - -def math_eval(func1, func2): - """Math evaluation decorator. - - Args: - func1 (_type_): _description_ - func2 (_type_): _description_ - - Example: - >>> @math_eval(ground_truth, generated_func) - >>> def test_func(x): - >>> return x - >>> result1, result2 = test_func(5) - >>> print(f"Result from ground_truth: {result1}") - >>> print(f"Result from generated_func: {result2}") - - """ - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - result1 = func1(*args, **kwargs) - except Exception as e: - logging.error(f"Error in func1: {e}") - result1 = None - - try: - result2 = func2(*args, **kwargs) - except Exception as e: - logging.error(f"Error in func2: {e}") - result2 = None - - if result1 != result2: - logging.warning( - f"Outputs do not match: {result1} != {result2}" - ) - - return result1, result2 - - return wrapper - - return decorator - - -# def ground_truth(x): -# return x * 2 - -# def generated_func(x): -# return x - 10 - -# @math_eval(ground_truth, generated_func) -# def test_func(x): -# return x - -# result1, result2 = test_func(5) -# print(f"Result from ground_truth: {result1}") -# print(f"Result from generated_func: {result2}") diff --git a/swarms/tools/tool_parse_exec.py b/swarms/tools/tool_parse_exec.py index 7cc4369f..f118319d 100644 --- a/swarms/tools/tool_parse_exec.py +++ b/swarms/tools/tool_parse_exec.py @@ -12,17 +12,19 @@ def parse_and_execute_json( json_string: str, parse_md: bool = False, verbose: bool = False, - return_str: bool = True, -) -> dict: + max_retries: int = 3, +) -> str: """ Parses and executes a JSON string containing function names and parameters. Args: - functions (List[callable]): A list of callable functions. + functions (List[Callable[..., Any]]): A list of callable functions. json_string (str): The JSON string to parse and execute. parse_md (bool): Flag indicating whether to extract code from Markdown. verbose (bool): Flag indicating whether to enable verbose logging. return_str (bool): Flag indicating whether to return a JSON string. + max_retries (int): Maximum number of retries for executing functions. + Returns: dict: A dictionary containing the results of executing the functions with the parsed parameters. """ @@ -30,10 +32,20 @@ def parse_and_execute_json( raise ValueError("Functions and JSON string are required") if parse_md: - json_string = extract_code_from_markdown(json_string) + try: + json_string = extract_code_from_markdown(json_string) + except Exception as e: + logger.error(f"Error extracting code from Markdown: {e}") + return {"error": f"Markdown parsing failed: {str(e)}"} try: - # Create function name to function mapping + # Ensure JSON string is stripped of extraneous whitespace + json_string = json_string.strip() + if not json_string: + raise ValueError( + "JSON string is empty after stripping whitespace" + ) + function_dict = {func.__name__: func for func in functions} if verbose: @@ -42,83 +54,80 @@ def parse_and_execute_json( ) logger.info(f"Processing JSON: {json_string}") - # Parse JSON data - data = json.loads(json_string) + try: + data = json.loads(json_string) + except json.JSONDecodeError as e: + logger.error(f"Invalid JSON format: {e}") + return {"error": f"Invalid JSON format: {str(e)}"} - # Handle both single function and function list formats function_list = [] if "functions" in data: function_list = data["functions"] elif "function" in data: function_list = [data["function"]] else: - function_list = [ - data - ] # Assume entire object is single function + function_list = [data] - # Ensure function_list is a list and filter None values if isinstance(function_list, dict): function_list = [function_list] + function_list = [f for f in function_list if f] if verbose: logger.info(f"Processing {len(function_list)} functions") results = {} + for function_data in function_list: function_name = function_data.get("name") parameters = function_data.get("parameters", {}) if not function_name: - logger.warning("Function data missing name field") + logger.warning("Function data missing 'name' field") continue if verbose: logger.info( - f"Executing {function_name} with params: {parameters}" + f"Executing {function_name} with parameters: {parameters}" ) if function_name not in function_dict: - logger.warning(f"Function {function_name} not found") - results[function_name] = None + logger.warning( + f"Function '{function_name}' not found" + ) + results[function_name] = "Error: Function not found" continue - try: - result = function_dict[function_name](**parameters) - results[function_name] = str(result) - if verbose: - logger.info( - f"Result for {function_name}: {result}" + for attempt in range(max_retries): + try: + result = function_dict[function_name]( + **parameters ) - except Exception as e: - logger.error( - f"Error executing {function_name}: {str(e)}" - ) - results[function_name] = f"Error: {str(e)}" + results[function_name] = str(result) + if verbose: + logger.info( + f"Result for {function_name}: {result}" + ) + break + except Exception as e: + logger.error( + f"Attempt {attempt + 1} failed for {function_name}: {e}" + ) + if attempt == max_retries - 1: + results[function_name] = ( + f"Error after {max_retries} attempts: {str(e)}" + ) - # Format final results - if len(results) == 1: - # Return single result directly - data = {"result": next(iter(results.values()))} - else: - # Return all results - data = { - "results": results, - "summary": "\n".join( - f"{k}: {v}" for k, v in results.items() - ), - } - - if return_str: - return json.dumps(data) - else: - return data + data = { + "results": results, + "summary": "\n".join( + f"{k}: {v}" for k, v in results.items() + ), + } + + return json.dumps(data, indent=4) - except json.JSONDecodeError as e: - error = f"Invalid JSON format: {str(e)}" - logger.error(error) - return {"error": error} except Exception as e: - error = f"Error parsing and executing JSON: {str(e)}" + error = f"Unexpected error during execution: {str(e)}" logger.error(error) return {"error": error} diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index 0a825caf..5b321ba3 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -1,4 +1,3 @@ -from swarms.utils.class_args_wrapper import print_class_parameters from swarms.utils.data_to_text import ( csv_to_text, data_to_text, @@ -13,7 +12,6 @@ from swarms.utils.file_processing import ( zip_folders, ) from swarms.utils.markdown_message import display_markdown_message -from swarms.tools.prebuilt.math_eval import math_eval from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.try_except_wrapper import try_except_wrapper @@ -21,7 +19,6 @@ from swarms.utils.calculate_func_metrics import profile_func __all__ = [ - "print_class_parameters", "csv_to_text", "data_to_text", "json_to_text", @@ -32,7 +29,6 @@ __all__ = [ "create_file_in_folder", "zip_folders", "display_markdown_message", - "math_eval", "extract_code_from_markdown", "pdf_to_text", "try_except_wrapper", diff --git a/swarms/utils/async_file_creation.py b/swarms/utils/async_file_creation.py deleted file mode 100644 index 6c35e95d..00000000 --- a/swarms/utils/async_file_creation.py +++ /dev/null @@ -1,106 +0,0 @@ -# In order to accelerate the ops of creating files, we use the async file creation method. -import os -import asyncio -from aiofiles import open as aio_open -from typing import List - - -async def async_create_file(file_path: str, content: str) -> None: - """ - Asynchronously creates a file at the specified path and writes the given content to it. - - Args: - file_path (str): The path where the file will be created. - content (str): The content to be written to the file. - - Returns: - None - """ - async with aio_open(file_path, "w") as file: - await file.write(content) - - -async def create_multiple_files( - file_paths: List[str], contents: List[str] -) -> None: - """ - Asynchronously creates multiple files at the specified paths and writes the corresponding content to each file. - - Args: - file_paths (List[str]): A list of paths where the files will be created. - contents (List[str]): A list of content to be written to each file, corresponding to the file paths. - - Returns: - None - """ - tasks = [ - async_create_file(file_path, content) - for file_path, content in zip(file_paths, contents) - ] - await asyncio.gather(*tasks) - - -async def create_file_with_directory( - file_path: str, content: str -) -> None: - """ - Creates a file with the specified directory path and content. If the directory does not exist, it is created. - - Args: - file_path (str): The path of the file to be created, including the directory. - content (str): The content to be written to the file. - - Returns: - None - """ - directory = os.path.dirname(file_path) - if not os.path.exists(directory): - os.makedirs(directory) - - await async_create_file(file_path, content) - - -def sync_create_file(file_path: str, content: str) -> None: - """ - Synchronously creates a file at the specified path and writes the given content to it. - - Args: - file_path (str): The path where the file will be created. - content (str): The content to be written to the file. - - Returns: - None - """ - asyncio.run(async_create_file(file_path, content)) - - -def sync_create_multiple_files( - file_paths: List[str], contents: List[str] -) -> None: - """ - Synchronously creates multiple files at the specified paths and writes the corresponding content to each file. - - Args: - file_paths (List[str]): A list of paths where the files will be created. - contents (List[str]): A list of content to be written to each file, corresponding to the file paths. - - Returns: - None - """ - asyncio.run(create_multiple_files(file_paths, contents)) - - -def sync_create_file_with_directory( - file_path: str, content: str -) -> None: - """ - Synchronously creates a file with the specified directory path and content. If the directory does not exist, it is created. - - Args: - file_path (str): The path of the file to be created, including the directory. - content (str): The content to be written to the file. - - Returns: - None - """ - asyncio.run(create_file_with_directory(file_path, content)) diff --git a/swarms/utils/auto_download_check_packages.py b/swarms/utils/auto_download_check_packages.py index 555967a3..9ede5ca6 100644 --- a/swarms/utils/auto_download_check_packages.py +++ b/swarms/utils/auto_download_check_packages.py @@ -144,3 +144,7 @@ def auto_check_and_download_package( success = False return success + + +if __name__ == "__main__": + print(auto_check_and_download_package("torch")) diff --git a/swarms/utils/class_args_wrapper.py b/swarms/utils/class_args_wrapper.py deleted file mode 100644 index f24932cf..00000000 --- a/swarms/utils/class_args_wrapper.py +++ /dev/null @@ -1,36 +0,0 @@ -import inspect - - -def print_class_parameters(cls, api_format: bool = False): - """ - Print the parameters of a class constructor. - - Parameters: - cls (type): The class to inspect. - - Example: - >>> print_class_parameters(Agent) - Parameter: x, Type: - Parameter: y, Type: - """ - try: - # Get the parameters of the class constructor - sig = inspect.signature(cls.__init__) - params = sig.parameters - - if api_format: - param_dict = {} - for name, param in params.items(): - if name == "self": - continue - param_dict[name] = str(param.annotation) - return param_dict - - # Print the parameters - for name, param in params.items(): - if name == "self": - continue - print(f"Parameter: {name}, Type: {param.annotation}") - - except Exception as e: - print(f"An error occurred while inspecting the class: {e}") diff --git a/swarms/utils/function_caller_model.py b/swarms/utils/function_caller_model.py new file mode 100644 index 00000000..ab2e8772 --- /dev/null +++ b/swarms/utils/function_caller_model.py @@ -0,0 +1,143 @@ +import os +import subprocess +from concurrent.futures import ThreadPoolExecutor +from typing import List + +from loguru import logger +from pydantic import BaseModel + + +try: + from openai import OpenAI +except ImportError: + logger.error( + "OpenAI library not found. Please install the OpenAI library by running 'pip install openai'" + ) + import sys + + subprocess.run([sys.executable, "-m", "pip", "install", "openai"]) + from openai import OpenAI + + +SUPPORTED_MODELS = [ + "o3-mini-2025-1-31", + "o1-2024-12-17", + "gpt-4o-mini-2024-07-18", + "gpt-4o-2024-08-06", +] + + +def check_api_key(): + api_key = os.getenv("OPENAI_API_KEY") + if api_key is None: + raise ValueError( + "API key is not set. Please set the API key using the api_key parameter." + ) + return api_key + + +class OpenAIFunctionCaller: + """ + A class to interact with the OpenAI API for generating text based on a system prompt and a task. + + Attributes: + - system_prompt (str): The system prompt to guide the AI's response. + - api_key (str): The API key for the OpenAI service. + - temperature (float): The temperature parameter for the AI model, controlling randomness. + - base_model (BaseModel): The Pydantic model to parse the response into. + - max_tokens (int): The maximum number of tokens in the response. + - client (OpenAI): The OpenAI client instance. + """ + + def __init__( + self, + system_prompt: str, + base_model: BaseModel, + api_key: str = check_api_key(), + temperature: float = 0.1, + max_tokens: int = 5000, + model_name: str = "gpt-4o-2024-08-06", + ): + self.system_prompt = system_prompt + self.api_key = api_key + self.temperature = temperature + self.base_model = base_model + self.max_tokens = max_tokens + self.model_name = model_name + + self.client = OpenAI(api_key=self.api_key) + + def run(self, task: str): + """ + Run the OpenAI model with the system prompt and task to generate a response. + + Args: + - task (str): The task to be completed. + - *args: Additional positional arguments for the OpenAI API. + - **kwargs: Additional keyword arguments for the OpenAI API. + + Returns: + - BaseModel: The parsed response based on the base_model. + """ + try: + completion = self.client.beta.chat.completions.parse( + model=self.model_name, + messages=[ + {"role": "system", "content": self.system_prompt}, + {"role": "user", "content": task}, + ], + response_format=self.base_model, + max_tokens=self.max_tokens, + temperature=self.temperature, + ) + + return completion.choices[0].message.parsed + + except Exception as e: + print(f"There was an error: {e}") + + def check_api_key(self): + self.api_key = os.getenv("OPENAI_API_KEY") + + if self.api_key is None: + raise ValueError( + "API key is not set. Please set the API key using the api_key parameter." + ) + + def check_model_support(self): + # need to print the supported models + for model in SUPPORTED_MODELS: + print(model) + + return SUPPORTED_MODELS + + def batch_run(self, tasks: List[str]) -> List[BaseModel]: + """ + Batch run the OpenAI model with the system prompt and task to generate a response. + """ + return [self.run(task) for task in tasks] + + def concurrent_run(self, tasks: List[str]) -> List[BaseModel]: + """ + Concurrent run the OpenAI model with the system prompt and task to generate a response. + """ + with ThreadPoolExecutor(max_workers=len(tasks)) as executor: + return list(executor.map(self.run, tasks)) + + +# class TestModel(BaseModel): +# name: str +# age: int + +# # Example usage +# model = OpenAIFunctionCaller( +# system_prompt="You are a helpful assistant that returns structured data about people.", +# base_model=TestModel, +# api_key=os.getenv("OPENAI_API_KEY"), +# temperature=0.7, +# max_tokens=1000 +# ) + +# # Test with a more appropriate prompt for the TestModel schema +# response = model.run("Tell me about a person named John who is 25 years old") +# print(response) diff --git a/swarms/utils/lazy_loader.py b/swarms/utils/lazy_loader.py deleted file mode 100644 index c9725e51..00000000 --- a/swarms/utils/lazy_loader.py +++ /dev/null @@ -1,263 +0,0 @@ -""" -Lazy Package Loader - -This module provides utilities for lazy loading Python packages to improve startup time -and reduce memory usage by only importing packages when they are actually used. - -Features: -- Type-safe lazy loading of packages -- Support for nested module imports -- Auto-completion support in IDEs -- Thread-safe implementation -- Comprehensive test coverage -""" - -from types import ModuleType -from typing import ( - Optional, - Dict, - Any, - Callable, - Type, - TypeVar, - Union, - cast, -) -import importlib -import functools -import threading -from importlib.util import find_spec -from swarms.utils.auto_download_check_packages import ( - auto_check_and_download_package, -) - - -T = TypeVar("T") -C = TypeVar("C") - - -class ImportError(Exception): - """Raised when a lazy import fails.""" - - pass - - -class LazyLoader: - """ - A thread-safe lazy loader for Python packages that only imports them when accessed. - - Attributes: - _module_name (str): The name of the module to be lazily loaded - _module (Optional[ModuleType]): The cached module instance once loaded - _lock (threading.Lock): Thread lock for safe concurrent access - - Examples: - >>> np = LazyLoader('numpy') - >>> # numpy is not imported yet - >>> result = np.array([1, 2, 3]) - >>> # numpy is imported only when first used - """ - - def __init__(self, module_name: str) -> None: - """ - Initialize the lazy loader with a module name. - - Args: - module_name: The fully qualified name of the module to lazily load - - Raises: - ImportError: If the module cannot be found in sys.path - """ - self._module_name = module_name - self._module: Optional[ModuleType] = None - self._lock = threading.Lock() - - auto_check_and_download_package( - module_name, package_manager="pip" - ) - - # Verify module exists without importing it - if find_spec(module_name) is None: - raise ImportError( - f"Module '{module_name}' not found in sys.path" - ) - - def _load_module(self) -> ModuleType: - """ - Thread-safe module loading. - - Returns: - ModuleType: The loaded module - - Raises: - ImportError: If module import fails - """ - if self._module is None: - with self._lock: - # Double-check pattern - if self._module is None: - try: - self._module = importlib.import_module( - self._module_name - ) - except Exception as e: - raise ImportError( - f"Failed to import '{self._module_name}': {str(e)}" - ) - return cast(ModuleType, self._module) - - def __getattr__(self, name: str) -> Any: - """ - Intercepts attribute access to load the module if needed. - - Args: - name: The attribute name being accessed - - Returns: - Any: The requested attribute from the loaded module - - Raises: - AttributeError: If the attribute doesn't exist in the module - """ - module = self._load_module() - try: - return getattr(module, name) - except AttributeError: - raise AttributeError( - f"Module '{self._module_name}' has no attribute '{name}'" - ) - - def __dir__(self) -> list[str]: - """ - Returns list of attributes for autocomplete support. - - Returns: - List[str]: Available attributes in the module - """ - return dir(self._load_module()) - - def is_loaded(self) -> bool: - """ - Check if the module has been loaded. - - Returns: - bool: True if module is loaded, False otherwise - """ - return self._module is not None - - -class LazyLoaderMetaclass(type): - """Metaclass to handle lazy loading behavior""" - - def __call__(cls, *args, **kwargs): - if hasattr(cls, "_lazy_loader"): - return super().__call__(*args, **kwargs) - return super().__call__(*args, **kwargs) - - -class LazyClassLoader: - """ - A descriptor that creates the actual class only when accessed, - with proper inheritance support. - """ - - def __init__( - self, class_name: str, bases: tuple, namespace: Dict[str, Any] - ): - self.class_name = class_name - self.bases = bases - self.namespace = namespace - self._real_class: Optional[Type] = None - self._lock = threading.Lock() - - def _create_class(self) -> Type: - """Creates the actual class if it hasn't been created yet.""" - if self._real_class is None: - with self._lock: - if self._real_class is None: - # Update namespace to include metaclass - namespace = dict(self.namespace) - namespace["__metaclass__"] = LazyLoaderMetaclass - - # Create the class with metaclass - new_class = LazyLoaderMetaclass( - self.class_name, self.bases, namespace - ) - - # Store reference to this loader - new_class._lazy_loader = self - self._real_class = new_class - - return cast(Type, self._real_class) - - def __call__(self, *args: Any, **kwargs: Any) -> Any: - """Creates an instance of the lazy loaded class.""" - real_class = self._create_class() - # Use the metaclass __call__ method - return real_class(*args, **kwargs) - - def __instancecheck__(self, instance: Any) -> bool: - """Support for isinstance() checks""" - real_class = self._create_class() - return isinstance(instance, real_class) - - def __subclasscheck__(self, subclass: Type) -> bool: - """Support for issubclass() checks""" - real_class = self._create_class() - return issubclass(subclass, real_class) - - -def lazy_import(*names: str) -> Dict[str, LazyLoader]: - """ - Create multiple lazy loaders at once. - - Args: - *names: Module names to create lazy loaders for - - Returns: - Dict[str, LazyLoader]: Dictionary mapping module names to their lazy loaders - - Examples: - >>> modules = lazy_import('numpy', 'pandas', 'matplotlib.pyplot') - >>> np = modules['numpy'] - >>> pd = modules['pandas'] - >>> plt = modules['matplotlib.pyplot'] - """ - return {name.split(".")[-1]: LazyLoader(name) for name in names} - - -def lazy_import_decorator( - target: Union[Callable[..., T], Type[C]] -) -> Union[Callable[..., T], Type[C], LazyClassLoader]: - """ - Enhanced decorator that supports both lazy imports and lazy class loading. - """ - if isinstance(target, type): - # Store the original class details - namespace = { - name: value - for name, value in target.__dict__.items() - if not name.startswith("__") - or name in ("__init__", "__new__") - } - - # Create lazy loader - loader = LazyClassLoader( - target.__name__, target.__bases__, namespace - ) - - # Preserve class metadata - loader.__module__ = target.__module__ - loader.__doc__ = target.__doc__ - - # Add reference to original class - loader._original_class = target - - return loader - else: - # Handle function decoration - @functools.wraps(target) - def wrapper(*args: Any, **kwargs: Any) -> T: - return target(*args, **kwargs) - - return wrapper diff --git a/swarms/utils/litellm_tokenizer.py b/swarms/utils/litellm_tokenizer.py new file mode 100644 index 00000000..c2743b10 --- /dev/null +++ b/swarms/utils/litellm_tokenizer.py @@ -0,0 +1,20 @@ +import subprocess + + +def count_tokens(text: str, model: str = "gpt-4o") -> int: + """Count the number of tokens in the given text.""" + try: + from litellm import encode + except ImportError: + import sys + + subprocess.run( + [sys.executable, "-m", "pip", "install", "litellm"] + ) + from litellm import encode + + return len(encode(model=model, text=text)) + + +# if __name__ == "__main__": +# print(count_tokens("Hello, how are you?")) diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index 2dbdc97e..a4452b70 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -2,9 +2,16 @@ try: from litellm import completion except ImportError: import subprocess - - subprocess.check_call(["pip", "install", "litellm"]) + import sys import litellm + + print("Installing litellm") + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-U", "litellm"] + ) + print("litellm installed") + from litellm import completion litellm.set_verbose = True @@ -25,6 +32,9 @@ class LiteLLM: temperature: float = 0.5, max_tokens: int = 4000, ssl_verify: bool = False, + max_completion_tokens: int = 4000, + *args, + **kwargs, ): """ Initialize the LiteLLM with the given parameters. @@ -42,6 +52,9 @@ class LiteLLM: self.temperature = temperature self.max_tokens = max_tokens self.ssl_verify = ssl_verify + self.max_completion_tokens = max_completion_tokens + + self.max_completion_tokens = max_tokens def _prepare_messages(self, task: str) -> list: """ @@ -64,7 +77,7 @@ class LiteLLM: return messages - def run(self, task: str, *args, **kwargs): + def run(self, task: str, tools: list = [], *args, **kwargs): """ Run the LLM model for the given task. diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py index af5c7239..9295e9eb 100644 --- a/swarms/utils/loguru_logger.py +++ b/swarms/utils/loguru_logger.py @@ -1,10 +1,10 @@ import os import uuid +import sys from loguru import logger def initialize_logger(log_folder: str = "logs"): - AGENT_WORKSPACE = "agent_workspace" # Check if WORKSPACE_DIR is set, if not, set it to AGENT_WORKSPACE @@ -24,14 +24,27 @@ def initialize_logger(log_folder: str = "logs"): log_folder_path, f"{log_folder}_{uuid_for_log}.log" ) + # Remove default handler and add custom handlers + logger.remove() + + # Add console handler with colors + logger.add( + sys.stdout, + colorize=True, + format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}", + level="INFO", + ) + + # Add file handler logger.add( log_file_path, + format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}", level="INFO", - colorize=True, backtrace=True, diagnose=True, enqueue=True, retention="10 days", # compression="zip", ) + return logger diff --git a/swarms/utils/pdf_to_text.py b/swarms/utils/pdf_to_text.py index 8df8e065..af39c747 100644 --- a/swarms/utils/pdf_to_text.py +++ b/swarms/utils/pdf_to_text.py @@ -4,8 +4,11 @@ try: import pypdf except ImportError: import subprocess + import sys - subprocess.check_call(["python", "-m", "pip", "install", "pypdf"]) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "pypdf"] + ) import pypdf diff --git a/swarms/utils/update_agent_system_prompts.py b/swarms/utils/update_agent_system_prompts.py deleted file mode 100644 index e6f82426..00000000 --- a/swarms/utils/update_agent_system_prompts.py +++ /dev/null @@ -1,53 +0,0 @@ -import concurrent.futures -from typing import List, Union -from swarms.structs.agent import Agent - - -def update_system_prompts( - agents: List[Union[Agent, str]], - prompt: str, -) -> List[Agent]: - """ - Update system prompts for a list of agents concurrently. - - Args: - agents: List of Agent objects or strings to update - prompt: The prompt text to append to each agent's system prompt - - Returns: - List of updated Agent objects - """ - if not agents: - return agents - - def update_agent_prompt(agent: Union[Agent, str]) -> Agent: - # Convert string to Agent if needed - if isinstance(agent, str): - agent = Agent( - agent_name=agent, - system_prompt=prompt, # Initialize with the provided prompt - ) - else: - # Preserve existing prompt and append new one - existing_prompt = ( - agent.system_prompt if agent.system_prompt else "" - ) - agent.system_prompt = existing_prompt + "\n" + prompt - return agent - - # Use ThreadPoolExecutor for concurrent execution - max_workers = min(len(agents), 4) # Reasonable thread count - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - futures = [] - for agent in agents: - future = executor.submit(update_agent_prompt, agent) - futures.append(future) - - # Collect results as they complete - updated_agents = [] - for future in concurrent.futures.as_completed(futures): - updated_agents.append(future.result()) - - return updated_agents diff --git a/swarms/utils/wrapper_clusterop.py b/swarms/utils/wrapper_clusterop.py index 646383c6..6119c532 100644 --- a/swarms/utils/wrapper_clusterop.py +++ b/swarms/utils/wrapper_clusterop.py @@ -1,3 +1,4 @@ +import platform from typing import Any @@ -53,6 +54,14 @@ def exec_callable_with_clusterops( logger.info(f"Attempting to run on device: {device}") device = device.lower() + # Check if the platform is Windows and do nothing if true + if platform.system() == "Windows": + if enable_logging: + logger.info( + "Platform is Windows, not executing on device." + ) + return None + if device == "cpu": if enable_logging: logger.info("Device set to CPU") @@ -104,3 +113,15 @@ def exec_callable_with_clusterops( if enable_logging: logger.error(f"An error occurred during execution: {e}") raise + + +# def test_clusterops(x): +# return x + 1 + +# example = exec_callable_with_clusterops( +# device="cpu", +# all_cores=True, +# func = test_clusterops, +# ) + +# print(example) diff --git a/tests/agent_evals/auto_test_eval.py b/tests/agent_evals/auto_test_eval.py index b9c770fa..06a1c839 100644 --- a/tests/agent_evals/auto_test_eval.py +++ b/tests/agent_evals/auto_test_eval.py @@ -81,12 +81,6 @@ class SwarmsIssueReporter: # Initialize logging log_path = os.path.join(os.getcwd(), "logs", log_file) os.makedirs(os.path.dirname(log_path), exist_ok=True) - logger.add( - log_path, - rotation="1 day", - retention="1 month", - compression="zip", - ) # Issue tracking self.issues_created = [] diff --git a/tests/agent_evals/github_summarizer_agent.py b/tests/agent_evals/github_summarizer_agent.py index c461c307..17da45dc 100644 --- a/tests/agent_evals/github_summarizer_agent.py +++ b/tests/agent_evals/github_summarizer_agent.py @@ -9,15 +9,6 @@ from swarm_models import OpenAIChat GITHUB_REPO = "kyegomez/swarms" # Swarms GitHub repository GITHUB_API_URL = f"https://api.github.com/repos/{GITHUB_REPO}/commits" -# Initialize Loguru -logger.add( - "commit_summary.log", - rotation="1 MB", - level="INFO", - backtrace=True, - diagnose=True, -) - # Step 1: Fetch the latest commits from GitHub def fetch_latest_commits( diff --git a/tests/profiling_agent.py b/tests/profiling_agent.py index 8f1b0220..4b7dbd70 100644 --- a/tests/profiling_agent.py +++ b/tests/profiling_agent.py @@ -1,7 +1,3 @@ -import time - -start_time = time.time() - import os import uuid from swarms import Agent @@ -9,6 +5,9 @@ from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) +import time + +start_time = time.time() # Get the OpenAI API key from the environment variable diff --git a/tests/requrements.txt b/tests/requrements.txt deleted file mode 100644 index e4a264a5..00000000 --- a/tests/requrements.txt +++ /dev/null @@ -1,6 +0,0 @@ -pytest -swarms -loguru -pydantic -swarm-models -loguru diff --git a/tests/structs/test_company.py b/tests/structs/test_company.py deleted file mode 100644 index 746e4c83..00000000 --- a/tests/structs/test_company.py +++ /dev/null @@ -1,72 +0,0 @@ -import pytest - -from swarm_models import OpenAIChat -from swarms.structs.agent import Agent -from swarms.structs.company import Company - -# Mock OpenAIChat instance -llm = OpenAIChat(openai_api_key="test_key", max_tokens=4000) - -# Mock Agents -ceo = Agent(llm=llm, name="CEO") -dev = Agent(llm=llm, name="Developer") -va = Agent(llm=llm, name="VA") -hr = Agent(llm=llm, name="HR") - -shared_instructions = "Listen to your boss" - - -def test_add_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - company.add(hr) - assert hr in company.agents - - -def test_get_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - company.add(hr) - assert company.get("HR") == hr - - -def test_remove_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - company.add(hr) - company.remove(hr) - assert hr not in company.agents - - -def test_add_existing_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - company.add(hr) - with pytest.raises(ValueError): - company.add(hr) - - -def test_get_nonexistent_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - with pytest.raises(ValueError): - company.get("Nonexistent") - - -def test_remove_nonexistent_agent(): - company = Company( - org_chart=[[ceo, [dev, va]]], - shared_instructions=shared_instructions, - ) - with pytest.raises(ValueError): - company.remove(hr) diff --git a/tests/structs/test_matrix_swarm.py b/tests/structs/test_matrix_swarm.py new file mode 100644 index 00000000..4556c693 --- /dev/null +++ b/tests/structs/test_matrix_swarm.py @@ -0,0 +1,216 @@ +from swarms.structs.matrix_swarm import AgentMatrix, AgentOutput +from swarms import Agent + + +def create_test_matrix(rows: int, cols: int) -> AgentMatrix: + """Helper function to create a test agent matrix""" + agents = [ + [ + Agent( + agent_name=f"TestAgent-{i}-{j}", + system_prompt="Test prompt", + ) + for j in range(cols) + ] + for i in range(rows) + ] + return AgentMatrix(agents) + + +def test_init(): + """Test AgentMatrix initialization""" + # Test valid initialization + matrix = create_test_matrix(2, 2) + assert isinstance(matrix, AgentMatrix) + assert len(matrix.agents) == 2 + assert len(matrix.agents[0]) == 2 + + # Test invalid initialization + try: + AgentMatrix([[1, 2], [3, 4]]) # Non-agent elements + assert False, "Should raise ValueError" + except ValueError: + pass + + try: + AgentMatrix([]) # Empty matrix + assert False, "Should raise ValueError" + except ValueError: + pass + + +def test_transpose(): + """Test matrix transpose operation""" + matrix = create_test_matrix(2, 3) + transposed = matrix.transpose() + + assert len(transposed.agents) == 3 # Original cols become rows + assert len(transposed.agents[0]) == 2 # Original rows become cols + + # Verify agent positions + for i in range(2): + for j in range(3): + assert ( + matrix.agents[i][j].agent_name + == transposed.agents[j][i].agent_name + ) + + +def test_add(): + """Test matrix addition""" + matrix1 = create_test_matrix(2, 2) + matrix2 = create_test_matrix(2, 2) + + result = matrix1.add(matrix2) + assert len(result.agents) == 2 + assert len(result.agents[0]) == 2 + + # Test incompatible dimensions + matrix3 = create_test_matrix(2, 3) + try: + matrix1.add(matrix3) + assert False, "Should raise ValueError" + except ValueError: + pass + + +def test_scalar_multiply(): + """Test scalar multiplication""" + matrix = create_test_matrix(2, 2) + scalar = 3 + result = matrix.scalar_multiply(scalar) + + assert len(result.agents) == 2 + assert len(result.agents[0]) == 2 * scalar + + # Verify agent duplication + for i in range(len(result.agents)): + for j in range(0, len(result.agents[0]), scalar): + original_agent = matrix.agents[i][j // scalar] + for k in range(scalar): + assert ( + result.agents[i][j + k].agent_name + == original_agent.agent_name + ) + + +def test_multiply(): + """Test matrix multiplication""" + matrix1 = create_test_matrix(2, 3) + matrix2 = create_test_matrix(3, 2) + inputs = ["test query 1", "test query 2"] + + result = matrix1.multiply(matrix2, inputs) + assert len(result) == 2 # Number of rows in first matrix + assert len(result[0]) == 2 # Number of columns in second matrix + + # Verify output structure + for row in result: + for output in row: + assert isinstance(output, AgentOutput) + assert isinstance(output.input_query, str) + assert isinstance(output.metadata, dict) + + +def test_subtract(): + """Test matrix subtraction""" + matrix1 = create_test_matrix(2, 2) + matrix2 = create_test_matrix(2, 2) + + result = matrix1.subtract(matrix2) + assert len(result.agents) == 2 + assert len(result.agents[0]) == 2 + + +def test_identity(): + """Test identity matrix creation""" + matrix = create_test_matrix(3, 3) + identity = matrix.identity(3) + + assert len(identity.agents) == 3 + assert len(identity.agents[0]) == 3 + + # Verify diagonal elements are from original matrix + for i in range(3): + assert ( + identity.agents[i][i].agent_name + == matrix.agents[i][i].agent_name + ) + + # Verify non-diagonal elements are zero agents + for j in range(3): + if i != j: + assert identity.agents[i][j].agent_name.startswith( + "Zero-Agent" + ) + + +def test_determinant(): + """Test determinant calculation""" + # Test 1x1 matrix + matrix1 = create_test_matrix(1, 1) + det1 = matrix1.determinant() + assert det1 is not None + + # Test 2x2 matrix + matrix2 = create_test_matrix(2, 2) + det2 = matrix2.determinant() + assert det2 is not None + + # Test non-square matrix + matrix3 = create_test_matrix(2, 3) + try: + matrix3.determinant() + assert False, "Should raise ValueError" + except ValueError: + pass + + +def test_save_to_file(tmp_path): + """Test saving matrix to file""" + import os + + matrix = create_test_matrix(2, 2) + file_path = os.path.join(tmp_path, "test_matrix.json") + + matrix.save_to_file(file_path) + assert os.path.exists(file_path) + + # Verify file contents + import json + + with open(file_path, "r") as f: + data = json.load(f) + assert "agents" in data + assert "outputs" in data + assert len(data["agents"]) == 2 + assert len(data["agents"][0]) == 2 + + +def run_all_tests(): + """Run all test functions""" + test_functions = [ + test_init, + test_transpose, + test_add, + test_scalar_multiply, + test_multiply, + test_subtract, + test_identity, + test_determinant, + ] + + for test_func in test_functions: + try: + test_func() + print(f"✅ {test_func.__name__} passed") + except AssertionError as e: + print(f"❌ {test_func.__name__} failed: {str(e)}") + except Exception as e: + print( + f"❌ {test_func.__name__} failed with exception: {str(e)}" + ) + + +if __name__ == "__main__": + run_all_tests() diff --git a/tests/structs/test_multiprocess.py b/tests/structs/test_multiprocess.py deleted file mode 100644 index 92d5dc83..00000000 --- a/tests/structs/test_multiprocess.py +++ /dev/null @@ -1,177 +0,0 @@ -import asyncio -import time -from swarms.structs.agent import Agent -from swarms.structs.multi_process_workflow import MultiProcessWorkflow - - -def create_test_agent(name: str) -> Agent: - """Create a test agent that simply returns its input with a timestamp""" - return Agent( - agent_name=name, - system_prompt=f"Test prompt for {name}", - model_name="gpt-4o-mini", - max_loops=1, - ) - - -def test_initialization(): - """Test basic workflow initialization""" - print("\n=== Testing Workflow Initialization ===") - try: - agents = [create_test_agent(f"agent{i}") for i in range(3)] - workflow = MultiProcessWorkflow(max_workers=2, agents=agents) - - print("✓ Created workflow with configuration:") - print(f" - Max workers: {workflow.max_workers}") - print(f" - Number of agents: {len(workflow.agents)}") - print(f" - Autosave: {workflow.autosave}") - print("✓ Initialization test passed") - except Exception as e: - print(f"✗ Initialization test failed: {str(e)}") - raise - - -def test_execute_task(): - """Test execution of a single task""" - print("\n=== Testing Task Execution ===") - try: - agents = [create_test_agent("test_agent")] - workflow = MultiProcessWorkflow(agents=agents) - - test_task = "Return this message with timestamp" - result = workflow.execute_task(test_task) - - print("✓ Task executed successfully") - print(f" - Input task: {test_task}") - print(f" - Result: {result}") - print("✓ Task execution test passed") - except Exception as e: - print(f"✗ Task execution test failed: {str(e)}") - raise - - -def test_parallel_run(): - """Test parallel execution of tasks""" - print("\n=== Testing Parallel Run ===") - try: - agents = [create_test_agent(f"agent{i}") for i in range(3)] - workflow = MultiProcessWorkflow(max_workers=2, agents=agents) - - test_task = "Process this in parallel" - results = workflow.run(test_task) - - print("✓ Parallel execution completed") - # print(f" - Number of results: {len(results)}") - print(f" - Results: {results}") - print("✓ Parallel run test passed") - except Exception as e: - print(f"✗ Parallel run test failed: {str(e)}") - raise - - -async def test_async_run(): - """Test asynchronous execution of tasks""" - print("\n=== Testing Async Run ===") - try: - agents = [create_test_agent(f"agent{i}") for i in range(3)] - workflow = MultiProcessWorkflow(max_workers=2, agents=agents) - - test_task = "Process this asynchronously" - results = await workflow.async_run(test_task) - - print("✓ Async execution completed") - print(f" - Number of results: {len(results)}") - print(f" - Results: {results}") - print("✓ Async run test passed") - except Exception as e: - print(f"✗ Async run test failed: {str(e)}") - raise - - -def test_batched_run(): - """Test batch execution of tasks""" - print("\n=== Testing Batched Run ===") - try: - agents = [create_test_agent(f"agent{i}") for i in range(2)] - workflow = MultiProcessWorkflow(max_workers=2, agents=agents) - - tasks = [f"Batch task {i}" for i in range(5)] - results = workflow.batched_run(tasks, batch_size=2) - - print("✓ Batch execution completed") - print(f" - Number of tasks: {len(tasks)}") - print(" - Batch size: 2") - print(f" - Results: {results}") - print("✓ Batched run test passed") - except Exception as e: - print(f"✗ Batched run test failed: {str(e)}") - raise - - -def test_concurrent_run(): - """Test concurrent execution of tasks""" - print("\n=== Testing Concurrent Run ===") - try: - agents = [create_test_agent(f"agent{i}") for i in range(2)] - workflow = MultiProcessWorkflow(max_workers=2, agents=agents) - - tasks = [f"Concurrent task {i}" for i in range(4)] - results = workflow.concurrent_run(tasks) - - print("✓ Concurrent execution completed") - print(f" - Number of tasks: {len(tasks)}") - print(f" - Results: {results}") - print("✓ Concurrent run test passed") - except Exception as e: - print(f"✗ Concurrent run test failed: {str(e)}") - raise - - -def test_error_handling(): - """Test error handling in workflow""" - print("\n=== Testing Error Handling ===") - try: - # Create workflow with no agents to trigger error - workflow = MultiProcessWorkflow(max_workers=2, agents=None) - result = workflow.execute_task( - "This should handle the error gracefully" - ) - - print("✓ Error handled gracefully") - print(f" - Result when no agents: {result}") - print("✓ Error handling test passed") - except Exception as e: - print(f"✗ Error handling test failed: {str(e)}") - raise - - -async def run_all_tests(): - """Run all tests""" - print("\n=== Starting MultiProcessWorkflow Test Suite ===") - start_time = time.time() - - try: - # Run synchronous tests - test_initialization() - test_execute_task() - test_parallel_run() - test_batched_run() - test_concurrent_run() - test_error_handling() - - # Run async test - await test_async_run() - - end_time = time.time() - duration = round(end_time - start_time, 2) - print("\n=== Test Suite Completed Successfully ===") - print(f"Time taken: {duration} seconds") - - except Exception as e: - print("\n=== Test Suite Failed ===") - print(f"Error: {str(e)}") - raise - - -if __name__ == "__main__": - asyncio.run(run_all_tests()) diff --git a/tests/structs/test_team.py b/tests/structs/test_team.py deleted file mode 100644 index df92fe95..00000000 --- a/tests/structs/test_team.py +++ /dev/null @@ -1,52 +0,0 @@ -import json -import unittest - -from swarm_models import OpenAIChat -from swarms.structs import Agent, Task -from swarms.structs.team import Team - - -class TestTeam(unittest.TestCase): - def setUp(self): - self.agent = Agent( - llm=OpenAIChat(openai_api_key=""), - max_loops=1, - dashboard=False, - ) - self.task = Task( - description="What's the weather in miami", - agent=self.agent, - ) - self.team = Team( - tasks=[self.task], - agents=[self.agent], - architecture="sequential", - verbose=False, - ) - - def test_check_config(self): - with self.assertRaises(ValueError): - self.team.check_config({"config": None}) - - with self.assertRaises(ValueError): - self.team.check_config( - {"config": json.dumps({"agents": [], "tasks": []})} - ) - - def test_run(self): - self.assertEqual(self.team.run(), self.task.run()) - - def test_sequential_loop(self): - self.assertEqual( - self.team._Team__sequential_loop(), self.task.run() - ) - - def test_log(self): - self.assertIsNone(self.team._Team__log("Test message")) - - self.team.verbose = True - self.assertIsNone(self.team._Team__log("Test message")) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/telemetry/test_user_utils.py b/tests/telemetry/test_user_utils.py index 96f32378..d1f72404 100644 --- a/tests/telemetry/test_user_utils.py +++ b/tests/telemetry/test_user_utils.py @@ -1,6 +1,6 @@ import uuid -from swarms.telemetry.user_utils import ( +from swarms.telemetry.main import ( generate_unique_identifier, generate_user_id, get_machine_id, diff --git a/tests/tools/test_base_tool.py b/tests/tools/test_base_tool.py new file mode 100644 index 00000000..1b6cdeeb --- /dev/null +++ b/tests/tools/test_base_tool.py @@ -0,0 +1,126 @@ +from pydantic import BaseModel +from typing import Optional +import json + +from swarms.tools.base_tool import BaseTool + + +class TestModel(BaseModel): + name: str + age: int + email: Optional[str] = None + + +def sample_function(x: int, y: int) -> int: + """Test function for addition.""" + return x + y + + +def test_func_to_dict(): + print("Testing func_to_dict") + tool = BaseTool() + + result = tool.func_to_dict( + function=sample_function, + name="sample_function", + description="Test function", + ) + + assert result["type"] == "function" + assert result["function"]["name"] == "sample_function" + assert "parameters" in result["function"] + print("func_to_dict test passed") + + +def test_base_model_to_dict(): + print("Testing base_model_to_dict") + tool = BaseTool() + + result = tool.base_model_to_dict(TestModel) + + assert "type" in result + assert "properties" in result["properties"] + assert "name" in result["properties"]["properties"] + print("base_model_to_dict test passed") + + +def test_detect_tool_input_type(): + print("Testing detect_tool_input_type") + tool = BaseTool() + + model = TestModel(name="Test", age=25) + assert tool.detect_tool_input_type(model) == "Pydantic" + + dict_input = {"key": "value"} + assert tool.detect_tool_input_type(dict_input) == "Dictionary" + + assert tool.detect_tool_input_type(sample_function) == "Function" + print("detect_tool_input_type test passed") + + +def test_execute_tool_by_name(): + print("Testing execute_tool_by_name") + tool = BaseTool( + function_map={"sample_function": sample_function}, + verbose=True, + ) + + response = json.dumps( + {"name": "sample_function", "parameters": {"x": 1, "y": 2}} + ) + + result = tool.execute_tool_by_name("sample_function", response) + assert result == 3 + print("execute_tool_by_name test passed") + + +def test_check_str_for_functions_valid(): + print("Testing check_str_for_functions_valid") + tool = BaseTool(function_map={"test_func": lambda x: x}) + + valid_json = json.dumps( + {"type": "function", "function": {"name": "test_func"}} + ) + + assert tool.check_str_for_functions_valid(valid_json) is True + + invalid_json = json.dumps({"type": "invalid"}) + assert tool.check_str_for_functions_valid(invalid_json) is False + print("check_str_for_functions_valid test passed") + + +def test_convert_funcs_into_tools(): + print("Testing convert_funcs_into_tools") + tool = BaseTool(tools=[sample_function]) + + tool.convert_funcs_into_tools() + assert "sample_function" in tool.function_map + assert callable(tool.function_map["sample_function"]) + print("convert_funcs_into_tools test passed") + + +def run_all_tests(): + print("Starting all tests") + + tests = [ + test_func_to_dict, + test_base_model_to_dict, + test_detect_tool_input_type, + test_execute_tool_by_name, + test_check_str_for_functions_valid, + test_convert_funcs_into_tools, + ] + + for test in tests: + try: + test() + except AssertionError as e: + print(f"Test {test.__name__} failed: {str(e)}") + except Exception as e: + print(f"Unexpected error in {test.__name__}: {str(e)}") + + print("All tests completed") + + +if __name__ == "__main__": + run_all_tests() diff --git a/tests/tools/test_parse_tools.py b/tests/tools/test_parse_tools.py new file mode 100644 index 00000000..ef65dddd --- /dev/null +++ b/tests/tools/test_parse_tools.py @@ -0,0 +1,92 @@ +# Define a simple testing framework +from swarms.tools.tool_parse_exec import parse_and_execute_json + + +def run_test(test_name, test_func): + print(f"Running test: {test_name}") + print("------------------------------------------------") + try: + test_func() + print(f"✓ {test_name} passed") + print("------------------------------------------------") + except Exception as e: + print(f"✗ {test_name} failed: {str(e)}") + print("------------------------------------------------") + + +# Mock functions for testing +def mock_function_a(param1, param2): + return param1 + param2 + + +def mock_function_b(param1): + if param1 < 0: + raise ValueError("Negative value not allowed") + return param1 * 2 + + +# Test cases +def test_parse_and_execute_json_success(): + functions = [mock_function_a, mock_function_b] + json_string = '{"functions": [{"name": "mock_function_a", "parameters": {"param1": 1, "param2": 2}}, {"name": "mock_function_b", "parameters": {"param1": 3}}]}' + + result = parse_and_execute_json(functions, json_string) + expected_result = { + "results": {"mock_function_a": "3", "mock_function_b": "6"}, + "summary": "mock_function_a: 3\nmock_function_b: 6", + } + + assert ( + result == expected_result + ), f"Expected {expected_result}, but got {result}" + + +def test_parse_and_execute_json_function_not_found(): + functions = [mock_function_a] + json_string = '{"functions": [{"name": "non_existent_function", "parameters": {}}]}' + + result = parse_and_execute_json(functions, json_string) + expected_result = { + "results": { + "non_existent_function": "Error: Function non_existent_function not found" + }, + "summary": "non_existent_function: Error: Function non_existent_function not found", + } + + assert ( + result == expected_result + ), f"Expected {expected_result}, but got {result}" + + +def test_parse_and_execute_json_error_handling(): + functions = [mock_function_b] + json_string = '{"functions": [{"name": "mock_function_b", "parameters": {"param1": -1}}]}' + + result = parse_and_execute_json(functions, json_string) + expected_result = { + "results": { + "mock_function_b": "Error: Negative value not allowed" + }, + "summary": "mock_function_b: Error: Negative value not allowed", + } + + assert ( + result == expected_result + ), f"Expected {expected_result}, but got {result}" + + +# Run tests +run_test( + "Test parse_and_execute_json success", + test_parse_and_execute_json_success, +) +print("------------------------------------------------") +run_test( + "Test parse_and_execute_json function not found", + test_parse_and_execute_json_function_not_found, +) +print("------------------------------------------------") +run_test( + "Test parse_and_execute_json error handling", + test_parse_and_execute_json_error_handling, +) diff --git a/tests/tools/test_tools_base.py b/tests/tools/test_tools_base.py deleted file mode 100644 index 453ffe69..00000000 --- a/tests/tools/test_tools_base.py +++ /dev/null @@ -1,784 +0,0 @@ -from unittest.mock import MagicMock - -import pytest -from pydantic import BaseModel - -from swarms.tools.tool import ( - BaseTool, - Runnable, - StructuredTool, - Tool, - tool, -) - -# Define test data -test_input = {"key1": "value1", "key2": "value2"} -expected_output = "expected_output_value" - -# Test with global variables -global_var = "global" - - -# Basic tests for BaseTool -def test_base_tool_init(): - # Test BaseTool initialization - tool = BaseTool() - assert isinstance(tool, BaseTool) - - -def test_base_tool_invoke(): - # Test BaseTool invoke method - tool = BaseTool() - result = tool.invoke(test_input) - assert result == expected_output - - -# Basic tests for Tool -def test_tool_init(): - # Test Tool initialization - tool = Tool() - assert isinstance(tool, Tool) - - -def test_tool_invoke(): - # Test Tool invoke method - tool = Tool() - result = tool.invoke(test_input) - assert result == expected_output - - -# Basic tests for StructuredTool -def test_structured_tool_init(): - # Test StructuredTool initialization - tool = StructuredTool() - assert isinstance(tool, StructuredTool) - - -def test_structured_tool_invoke(): - # Test StructuredTool invoke method - tool = StructuredTool() - result = tool.invoke(test_input) - assert result == expected_output - - -# Test additional functionality and edge cases as needed - - -def test_tool_creation(): - tool = Tool( - name="test_tool", func=lambda x: x, description="Test tool" - ) - assert tool.name == "test_tool" - assert tool.func is not None - assert tool.description == "Test tool" - - -def test_tool_ainvoke(): - tool = Tool( - name="test_tool", func=lambda x: x, description="Test tool" - ) - result = tool.ainvoke("input_data") - assert result == "input_data" - - -def test_tool_ainvoke_with_coroutine(): - async def async_function(input_data): - return input_data - - tool = Tool( - name="test_tool", - coroutine=async_function, - description="Test tool", - ) - result = tool.ainvoke("input_data") - assert result == "input_data" - - -def test_tool_args(): - def sample_function(input_data): - return input_data - - tool = Tool( - name="test_tool", - func=sample_function, - description="Test tool", - ) - assert tool.args == {"tool_input": {"type": "string"}} - - -# Basic tests for StructuredTool class - - -def test_structured_tool_creation(): - class SampleArgsSchema: - pass - - tool = StructuredTool( - name="test_tool", - func=lambda x: x, - description="Test tool", - args_schema=SampleArgsSchema, - ) - assert tool.name == "test_tool" - assert tool.func is not None - assert tool.description == "Test tool" - assert tool.args_schema == SampleArgsSchema - - -def test_structured_tool_ainvoke(): - class SampleArgsSchema: - pass - - tool = StructuredTool( - name="test_tool", - func=lambda x: x, - description="Test tool", - args_schema=SampleArgsSchema, - ) - result = tool.ainvoke({"tool_input": "input_data"}) - assert result == "input_data" - - -def test_structured_tool_ainvoke_with_coroutine(): - class SampleArgsSchema: - pass - - async def async_function(input_data): - return input_data - - tool = StructuredTool( - name="test_tool", - coroutine=async_function, - description="Test tool", - args_schema=SampleArgsSchema, - ) - result = tool.ainvoke({"tool_input": "input_data"}) - assert result == "input_data" - - -def test_structured_tool_args(): - class SampleArgsSchema: - pass - - def sample_function(input_data): - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - description="Test tool", - args_schema=SampleArgsSchema, - ) - assert tool.args == {"tool_input": {"type": "string"}} - - -# Additional tests for exception handling - - -def test_tool_ainvoke_exception(): - tool = Tool(name="test_tool", func=None, description="Test tool") - with pytest.raises(NotImplementedError): - tool.ainvoke("input_data") - - -def test_tool_ainvoke_with_coroutine_exception(): - tool = Tool( - name="test_tool", coroutine=None, description="Test tool" - ) - with pytest.raises(NotImplementedError): - tool.ainvoke("input_data") - - -def test_structured_tool_ainvoke_exception(): - class SampleArgsSchema: - pass - - tool = StructuredTool( - name="test_tool", - func=None, - description="Test tool", - args_schema=SampleArgsSchema, - ) - with pytest.raises(NotImplementedError): - tool.ainvoke({"tool_input": "input_data"}) - - -def test_structured_tool_ainvoke_with_coroutine_exception(): - class SampleArgsSchema: - pass - - tool = StructuredTool( - name="test_tool", - coroutine=None, - description="Test tool", - args_schema=SampleArgsSchema, - ) - with pytest.raises(NotImplementedError): - tool.ainvoke({"tool_input": "input_data"}) - - -def test_tool_description_not_provided(): - tool = Tool(name="test_tool", func=lambda x: x) - assert tool.name == "test_tool" - assert tool.func is not None - assert tool.description == "" - - -def test_tool_invoke_with_callbacks(): - def sample_function(input_data, callbacks=None): - if callbacks: - callbacks.on_start() - callbacks.on_finish() - return input_data - - tool = Tool(name="test_tool", func=sample_function) - callbacks = MagicMock() - result = tool.invoke("input_data", callbacks=callbacks) - assert result == "input_data" - callbacks.on_start.assert_called_once() - callbacks.on_finish.assert_called_once() - - -def test_tool_invoke_with_new_argument(): - def sample_function(input_data, callbacks=None): - return input_data - - tool = Tool(name="test_tool", func=sample_function) - result = tool.invoke("input_data", callbacks=None) - assert result == "input_data" - - -def test_tool_ainvoke_with_new_argument(): - async def async_function(input_data, callbacks=None): - return input_data - - tool = Tool(name="test_tool", coroutine=async_function) - result = tool.ainvoke("input_data", callbacks=None) - assert result == "input_data" - - -def test_tool_description_from_docstring(): - def sample_function(input_data): - """Sample function docstring""" - return input_data - - tool = Tool(name="test_tool", func=sample_function) - assert tool.description == "Sample function docstring" - - -def test_tool_ainvoke_with_exceptions(): - async def async_function(input_data): - raise ValueError("Test exception") - - tool = Tool(name="test_tool", coroutine=async_function) - with pytest.raises(ValueError): - tool.ainvoke("input_data") - - -# Additional tests for StructuredTool class - - -def test_structured_tool_infer_schema_false(): - def sample_function(input_data): - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - args_schema=None, - infer_schema=False, - ) - assert tool.args_schema is None - - -def test_structured_tool_ainvoke_with_callbacks(): - class SampleArgsSchema: - pass - - def sample_function(input_data, callbacks=None): - if callbacks: - callbacks.on_start() - callbacks.on_finish() - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - args_schema=SampleArgsSchema, - ) - callbacks = MagicMock() - result = tool.ainvoke( - {"tool_input": "input_data"}, callbacks=callbacks - ) - assert result == "input_data" - callbacks.on_start.assert_called_once() - callbacks.on_finish.assert_called_once() - - -def test_structured_tool_description_not_provided(): - class SampleArgsSchema: - pass - - tool = StructuredTool( - name="test_tool", - func=lambda x: x, - args_schema=SampleArgsSchema, - ) - assert tool.name == "test_tool" - assert tool.func is not None - assert tool.description == "" - - -def test_structured_tool_args_schema(): - class SampleArgsSchema: - pass - - def sample_function(input_data): - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - args_schema=SampleArgsSchema, - ) - assert tool.args_schema == SampleArgsSchema - - -def test_structured_tool_args_schema_inference(): - def sample_function(input_data): - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - args_schema=None, - infer_schema=True, - ) - assert tool.args_schema is not None - - -def test_structured_tool_ainvoke_with_new_argument(): - class SampleArgsSchema: - pass - - def sample_function(input_data, callbacks=None): - return input_data - - tool = StructuredTool( - name="test_tool", - func=sample_function, - args_schema=SampleArgsSchema, - ) - result = tool.ainvoke( - {"tool_input": "input_data"}, callbacks=None - ) - assert result == "input_data" - - -def test_structured_tool_ainvoke_with_exceptions(): - class SampleArgsSchema: - pass - - async def async_function(input_data): - raise ValueError("Test exception") - - tool = StructuredTool( - name="test_tool", - coroutine=async_function, - args_schema=SampleArgsSchema, - ) - with pytest.raises(ValueError): - tool.ainvoke({"tool_input": "input_data"}) - - -def test_base_tool_verbose_logging(caplog): - # Test verbose logging in BaseTool - tool = BaseTool(verbose=True) - result = tool.invoke(test_input) - assert result == expected_output - assert "Verbose logging" in caplog.text - - -def test_tool_exception_handling(): - # Test exception handling in Tool - tool = Tool() - with pytest.raises(Exception): - tool.invoke(test_input, raise_exception=True) - - -def test_structured_tool_async_invoke(): - # Test asynchronous invoke in StructuredTool - tool = StructuredTool() - result = tool.ainvoke(test_input) - assert result == expected_output - - -# Add more tests for specific functionalities and edge cases as needed -# Import necessary libraries and modules - - -# Example of a mock function to be used in testing -def mock_function(arg: str) -> str: - """A simple mock function for testing.""" - return f"Processed {arg}" - - -# Example of a Runnable class for testing -class MockRunnable(Runnable): - # Define necessary methods and properties - pass - - -# Fixture for creating a mock function -@pytest.fixture -def mock_func(): - return mock_function - - -# Fixture for creating a Runnable instance -@pytest.fixture -def mock_runnable(): - return MockRunnable() - - -# Basic functionality tests -def test_tool_with_callable(mock_func): - # Test creating a tool with a simple callable - tool_instance = tool(mock_func) - assert isinstance(tool_instance, BaseTool) - - -def test_tool_with_runnable(mock_runnable): - # Test creating a tool with a Runnable instance - tool_instance = tool(mock_runnable) - assert isinstance(tool_instance, BaseTool) - - -# ... more basic functionality tests ... - - -# Argument handling tests -def test_tool_with_invalid_argument(): - # Test passing an invalid argument type - with pytest.raises(ValueError): - tool( - 123 - ) # Using an integer instead of a string/callable/Runnable - - -def test_tool_with_multiple_arguments(mock_func): - # Test passing multiple valid arguments - tool_instance = tool("mock", mock_func) - assert isinstance(tool_instance, BaseTool) - - -# ... more argument handling tests ... - - -# Schema inference and application tests -class TestSchema(BaseModel): - arg: str - - -def test_tool_with_args_schema(mock_func): - # Test passing a custom args_schema - tool_instance = tool(mock_func, args_schema=TestSchema) - assert tool_instance.args_schema == TestSchema - - -# ... more schema tests ... - - -# Exception handling tests -def test_tool_function_without_docstring(): - # Test that a ValueError is raised if the function lacks a docstring - def no_doc_func(arg: str) -> str: - return arg - - with pytest.raises(ValueError): - tool(no_doc_func) - - -# Test suite starts here -class TestTool: - # Basic Functionality Tests - def test_tool_with_valid_callable_creates_base_tool( - self, mock_func - ): - result = tool(mock_func) - assert isinstance(result, BaseTool) - - def test_tool_returns_correct_function_name(self, mock_func): - result = tool(mock_func) - assert result.func.__name__ == "mock_function" - - # Argument Handling Tests - def test_tool_with_string_and_runnable(self, mock_runnable): - result = tool("mock_runnable", mock_runnable) - assert isinstance(result, BaseTool) - - def test_tool_raises_error_with_invalid_arguments(self): - with pytest.raises(ValueError): - tool(123) - - def test_tool_with_infer_schema_true(self, mock_func): - tool(mock_func, infer_schema=True) - # Assertions related to schema inference - - # Return Direct Feature Tests - def test_tool_with_return_direct_true(self, mock_func): - tool(mock_func, return_direct=True) - # Assertions for return_direct behavior - - # Error Handling Tests - def test_tool_raises_error_without_docstring(self): - def no_doc_func(arg: str) -> str: - return arg - - with pytest.raises(ValueError): - tool(no_doc_func) - - def test_tool_raises_error_runnable_without_object_schema( - self, mock_runnable - ): - with pytest.raises(ValueError): - tool(mock_runnable) - - # Decorator Behavior Tests - @pytest.mark.asyncio - async def test_async_tool_function(self): - @tool - async def async_func(arg: str) -> str: - return arg - - # Assertions for async behavior - - # Integration with StructuredTool and Tool Classes - def test_integration_with_structured_tool(self, mock_func): - result = tool(mock_func) - assert isinstance(result, StructuredTool) - - # Concurrency and Async Handling Tests - def test_concurrency_in_tool(self, mock_func): - # Test related to concurrency - pass - - # Mocking and Isolation Tests - def test_mocking_external_dependencies(self, mocker): - # Use mocker to mock external dependencies - pass - - def test_tool_with_different_return_types(self): - @tool - def return_int(arg: str) -> int: - return int(arg) - - result = return_int("123") - assert isinstance(result, int) - assert result == 123 - - @tool - def return_bool(arg: str) -> bool: - return arg.lower() in ["true", "yes"] - - result = return_bool("true") - assert isinstance(result, bool) - assert result is True - - # Test with multiple arguments - def test_tool_with_multiple_args(self): - @tool - def concat_strings(a: str, b: str) -> str: - return a + b - - result = concat_strings("Hello", "World") - assert result == "HelloWorld" - - # Test handling of optional arguments - def test_tool_with_optional_args(self): - @tool - def greet(name: str, greeting: str = "Hello") -> str: - return f"{greeting} {name}" - - assert greet("Alice") == "Hello Alice" - assert greet("Alice", greeting="Hi") == "Hi Alice" - - # Test with variadic arguments - def test_tool_with_variadic_args(self): - @tool - def sum_numbers(*numbers: int) -> int: - return sum(numbers) - - assert sum_numbers(1, 2, 3) == 6 - assert sum_numbers(10, 20) == 30 - - # Test with keyword arguments - def test_tool_with_kwargs(self): - @tool - def build_query(**kwargs) -> str: - return "&".join(f"{k}={v}" for k, v in kwargs.items()) - - assert build_query(a=1, b=2) == "a=1&b=2" - assert build_query(foo="bar") == "foo=bar" - - # Test with mixed types of arguments - def test_tool_with_mixed_args(self): - @tool - def mixed_args(a: int, b: str, *args, **kwargs) -> str: - return f"{a}{b}{len(args)}{'-'.join(kwargs.values())}" - - assert mixed_args(1, "b", "c", "d", x="y", z="w") == "1b2y-w" - - # Test error handling with incorrect types - def test_tool_error_with_incorrect_types(self): - @tool - def add_numbers(a: int, b: int) -> int: - return a + b - - with pytest.raises(TypeError): - add_numbers("1", "2") - - # Test with nested tools - def test_nested_tools(self): - @tool - def inner_tool(arg: str) -> str: - return f"Inner {arg}" - - @tool - def outer_tool(arg: str) -> str: - return f"Outer {inner_tool(arg)}" - - assert outer_tool("Test") == "Outer Inner Test" - - def test_tool_with_global_variable(self): - @tool - def access_global(arg: str) -> str: - return f"{global_var} {arg}" - - assert access_global("Var") == "global Var" - - # Test with environment variables - def test_tool_with_env_variables(self, monkeypatch): - monkeypatch.setenv("TEST_VAR", "Environment") - - @tool - def access_env_variable(arg: str) -> str: - import os - - return f"{os.environ['TEST_VAR']} {arg}" - - assert access_env_variable("Var") == "Environment Var" - - # ... [Previous test cases] ... - - # Test with complex data structures - def test_tool_with_complex_data_structures(self): - @tool - def process_data(data: dict) -> list: - return [data[key] for key in sorted(data.keys())] - - result = process_data({"b": 2, "a": 1}) - assert result == [1, 2] - - # Test handling exceptions within the tool function - def test_tool_handling_internal_exceptions(self): - @tool - def function_that_raises(arg: str): - if arg == "error": - raise ValueError("Error occurred") - return arg - - with pytest.raises(ValueError): - function_that_raises("error") - assert function_that_raises("ok") == "ok" - - # Test with functions returning None - def test_tool_with_none_return(self): - @tool - def return_none(arg: str): - return None - - assert return_none("anything") is None - - # Test with lambda functions - def test_tool_with_lambda(self): - tool_lambda = tool(lambda x: x * 2) - assert tool_lambda(3) == 6 - - # Test with class methods - def test_tool_with_class_method(self): - class MyClass: - @tool - def method(self, arg: str) -> str: - return f"Method {arg}" - - obj = MyClass() - assert obj.method("test") == "Method test" - - # Test tool function with inheritance - def test_tool_with_inheritance(self): - class Parent: - @tool - def parent_method(self, arg: str) -> str: - return f"Parent {arg}" - - class Child(Parent): - @tool - def child_method(self, arg: str) -> str: - return f"Child {arg}" - - child_obj = Child() - assert child_obj.parent_method("test") == "Parent test" - assert child_obj.child_method("test") == "Child test" - - # Test with decorators stacking - def test_tool_with_multiple_decorators(self): - def another_decorator(func): - def wrapper(*args, **kwargs): - return f"Decorated {func(*args, **kwargs)}" - - return wrapper - - @tool - @another_decorator - def decorated_function(arg: str): - return f"Function {arg}" - - assert decorated_function("test") == "Decorated Function test" - - # Test tool function when used in a multi-threaded environment - def test_tool_in_multithreaded_environment(self): - import threading - - @tool - def threaded_function(arg: int) -> int: - return arg * 2 - - results = [] - - def thread_target(): - results.append(threaded_function(5)) - - threads = [ - threading.Thread(target=thread_target) for _ in range(10) - ] - for t in threads: - t.start() - for t in threads: - t.join() - - assert results == [10] * 10 - - # Test with recursive functions - def test_tool_with_recursive_function(self): - @tool - def recursive_function(n: int) -> int: - if n == 0: - return 0 - else: - return n + recursive_function(n - 1) - - assert recursive_function(5) == 15 - - -# Additional tests can be added here to cover more scenarios diff --git a/tests/utils/test_auto_check_download.py b/tests/utils/test_auto_check_download.py new file mode 100644 index 00000000..ac8fee3d --- /dev/null +++ b/tests/utils/test_auto_check_download.py @@ -0,0 +1,104 @@ +from swarms.utils.auto_download_check_packages import ( + auto_check_and_download_package, + check_and_install_package, +) + + +def test_check_and_install_package_pip(): + result = check_and_install_package("numpy", package_manager="pip") + print(f"Test result for 'numpy' installation using pip: {result}") + assert result, "Failed to install or verify 'numpy' using pip" + + +def test_check_and_install_package_conda(): + result = check_and_install_package( + "numpy", package_manager="conda" + ) + print( + f"Test result for 'numpy' installation using conda: {result}" + ) + assert result, "Failed to install or verify 'numpy' using conda" + + +def test_check_and_install_specific_version(): + result = check_and_install_package( + "numpy", package_manager="pip", version="1.21.0" + ) + print( + f"Test result for specific version of 'numpy' installation using pip: {result}" + ) + assert ( + result + ), "Failed to install or verify specific version of 'numpy' using pip" + + +def test_check_and_install_with_upgrade(): + result = check_and_install_package( + "numpy", package_manager="pip", upgrade=True + ) + print(f"Test result for 'numpy' upgrade using pip: {result}") + assert result, "Failed to upgrade 'numpy' using pip" + + +def test_auto_check_and_download_single_package(): + result = auto_check_and_download_package( + "scipy", package_manager="pip" + ) + print(f"Test result for 'scipy' installation using pip: {result}") + assert result, "Failed to install or verify 'scipy' using pip" + + +def test_auto_check_and_download_multiple_packages(): + packages = ["scipy", "pandas"] + result = auto_check_and_download_package( + packages, package_manager="pip" + ) + print( + f"Test result for multiple packages installation using pip: {result}" + ) + assert ( + result + ), f"Failed to install or verify one or more packages in {packages} using pip" + + +def test_auto_check_and_download_multiple_packages_with_versions(): + packages = ["numpy:1.21.0", "pandas:1.3.0"] + result = auto_check_and_download_package( + packages, package_manager="pip" + ) + print( + f"Test result for multiple packages with versions installation using pip: {result}" + ) + assert ( + result + ), f"Failed to install or verify one or more packages in {packages} with specific versions using pip" + + +# Example of running tests +if __name__ == "__main__": + try: + test_check_and_install_package_pip() + print("test_check_and_install_package_pip passed") + + test_check_and_install_package_conda() + print("test_check_and_install_package_conda passed") + + test_check_and_install_specific_version() + print("test_check_and_install_specific_version passed") + + test_check_and_install_with_upgrade() + print("test_check_and_install_with_upgrade passed") + + test_auto_check_and_download_single_package() + print("test_auto_check_and_download_single_package passed") + + test_auto_check_and_download_multiple_packages() + print("test_auto_check_and_download_multiple_packages passed") + + test_auto_check_and_download_multiple_packages_with_versions() + print( + "test_auto_check_and_download_multiple_packages_with_versions passed" + ) + + except AssertionError as e: + print(f"Test failed: {str(e)}")