Merge branch 'kyegomez:master' into master

pull/754/head
georgesh 2 months ago committed by GitHub
commit e6eccf1db3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,23 +1,48 @@
# Framework Configuration
WORKSPACE_DIR="agent_workspace" WORKSPACE_DIR="agent_workspace"
SWARMS_VERBOSE_GLOBAL="False"
SWARMS_API_KEY="" SWARMS_API_KEY=""
USE_TELEMETRY=True
OPENAI_API_KEY="sk-" # Model Provider API Keys
GOOGLE_API_KEY="" ## OpenAI
AI21_API_KEY="your_api_key_here" OPENAI_API_KEY=""
COHERE_API_KEY="your_api_key_here"
ALEPHALPHA_API_KEY="your_api_key_here" ## Anthropic
HUGGINFACEHUB_API_KEY="your_api_key_here" ANTHROPIC_API_KEY=""
EVAL_PORT=8000
MODEL_NAME="gpt-4" ## Google
USE_GPU=True GEMINI_API_KEY=""
PLAYGROUND_DIR="examples"
LOG_LEVEL="INFO" ## Hugging Face
BOT_NAME="Orca" HUGGINGFACE_TOKEN=""
HF_API_KEY="your_huggingface_api_key_here"
## Perplexity AI
PPLX_API_KEY=""
## AI21
AI21_API_KEY=""
# Tool Provider API Keys
## Search Tools
BING_BROWSER_API=""
BRAVESEARCH_API_KEY=""
TAVILY_API_KEY=""
YOU_API_KEY=""
## Analytics & Monitoring
AGENTOPS_API_KEY="" AGENTOPS_API_KEY=""
ANTHROPIC_API_KEY="your_anthropic_api_key" EXA_API_KEY=""
AZURE_OPENAI_ENDPOINT="your_azure_openai_endpoint"
AZURE_OPENAI_DEPLOYMENT="your_azure_openai_deployment" ## Browser Automation
OPENAI_API_VERSION="your_openai_api_version" MULTION_API_KEY=""
AZURE_OPENAI_API_KEY="your_azure_openai_api_key"
AZURE_OPENAI_AD_TOKEN="your_azure_openai_ad_token" ## Other Tools
HCP_APP_ID=""
# Cloud Provider Configuration
## Azure OpenAI
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_DEPLOYMENT=""
OPENAI_API_VERSION=""
AZURE_OPENAI_API_KEY=""
AZURE_OPENAI_AD_TOKEN=""

@ -22,4 +22,4 @@ jobs:
- run: ruff format . - run: ruff format .
- run: ruff check --fix . - run: ruff check --fix .
- uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c - uses: autofix-ci/action@551dded8c6cc8a1054039c8bc0b8b48c51dfc6ef

1
.gitignore vendored

@ -14,6 +14,7 @@ static/generated
runs runs
Financial-Analysis-Agent_state.json Financial-Analysis-Agent_state.json
experimental experimental
ffn_alternatives
artifacts_five artifacts_five
encryption encryption
errors errors

@ -1,16 +1,20 @@
# Use Python 3.11 slim-bullseye for smaller base image # Use Python 3.11 slim-bullseye for a smaller base image
FROM python:3.11-slim-bullseye AS builder FROM python:3.11-slim-bullseye
# Set environment variables # Set environment variables for Python and pip
ENV PYTHONDONTWRITEBYTECODE=1 \ ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \ PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \ PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 PIP_DISABLE_PIP_VERSION_CHECK=1 \
WORKSPACE_DIR="agent_workspace" \
PATH="/app:${PATH}" \
PYTHONPATH="/app:${PYTHONPATH}" \
USER=swarms
# Set the working directory # Set the working directory
WORKDIR /build WORKDIR /app
# Install only essential build dependencies # Install essential build dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \ build-essential \
gcc \ gcc \
@ -18,38 +22,21 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
gfortran \ gfortran \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Install swarms packages # Install required Python packages
RUN pip install --no-cache-dir swarm-models swarms RUN pip install --no-cache-dir swarm-models swarms && \
pip install --no-cache-dir transformers torch litellm tiktoken openai pandas numpy pypdf
# Production stage
FROM python:3.11-slim-bullseye
# Set secure environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
WORKSPACE_DIR="agent_workspace" \
PATH="/app:${PATH}" \
PYTHONPATH="/app:${PYTHONPATH}" \
USER=swarms
# Create non-root user # Create a non-root user and set correct permissions for the application directory
RUN useradd -m -s /bin/bash -U $USER && \ RUN useradd -m -s /bin/bash -U $USER && \
mkdir -p /app && \ mkdir -p /app && \
chown -R $USER:$USER /app chown -R $USER:$USER /app
# Set working directory # Copy application files into the image with proper ownership
WORKDIR /app
# Copy only necessary files from builder
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application with correct permissions
COPY --chown=$USER:$USER . . COPY --chown=$USER:$USER . .
# Switch to non-root user # Switch to the non-root user
USER $USER USER $USER
# Health check # Health check to ensure the container is running properly
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD python -c "import swarms; print('Health check passed')" || exit 1 CMD python -c "import swarms; print('Health check passed')" || exit 1

@ -1,661 +1,201 @@
GNU AFFERO GENERAL PUBLIC LICENSE Apache License
Version 3, 19 November 2007 Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
of this license document, but changing it is not allowed.
1. Definitions.
Preamble
"License" shall mean the terms and conditions for use, reproduction,
The GNU Affero General Public License is a free, copyleft license for and distribution as defined by Sections 1 through 9 of this document.
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software. "Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast, "Legal Entity" shall mean the union of the acting entity and all
our General Public Licenses are intended to guarantee your freedom to other entities that control, are controlled by, or are under common
share and change all versions of a program--to make sure it remains free control with that entity. For the purposes of this definition,
software for all its users. "control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
When we speak of free software, we are referring to freedom, not otherwise, or (ii) ownership of fifty percent (50%) or more of the
price. Our General Public Licenses are designed to make sure that you outstanding shares, or (iii) beneficial ownership of such entity.
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you "You" (or "Your") shall mean an individual or Legal Entity
want it, that you can change the software or use pieces of it in new exercising permissions granted by this License.
free programs, and that you know you can do these things.
"Source" form shall mean the preferred form for making modifications,
Developers that use our General Public Licenses protect your rights including but not limited to software source code, documentation
with two steps: (1) assert copyright on the software, and (2) offer source, and configuration files.
you this License which gives you legal permission to copy, distribute
and/or modify the software. "Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
A secondary benefit of defending all users' freedom is that not limited to compiled object code, generated documentation,
improvements made in alternate versions of the program, if they and conversions to other media types.
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and "Work" shall mean the work of authorship, whether in Source or
encouraged by the resulting cooperation. However, in the case of Object form, made available under the License, as indicated by a
software used on network servers, this result may fail to come about. copyright notice that is included in or attached to the work
The GNU General Public License permits making a modified version and (an example is provided in the Appendix below).
letting the public access it on a server without ever releasing its
source code to the public. "Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
The GNU Affero General Public License is designed specifically to editorial revisions, annotations, elaborations, or other modifications
ensure that, in such cases, the modified source code becomes available represent, as a whole, an original work of authorship. For the purposes
to the community. It requires the operator of a network server to of this License, Derivative Works shall not include works that remain
provide the source code of the modified version running there to the separable from, or merely link (or bind by name) to the interfaces of,
users of that server. Therefore, public use of a modified version, on the Work and Derivative Works thereof.
a publicly accessible server, gives the public access to the source
code of the modified version. "Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
An older license, called the Affero General Public License and to that Work or Derivative Works thereof, that is intentionally
published by Affero, was designed to accomplish similar goals. This is submitted to Licensor for inclusion in the Work by the copyright owner
a different license, not a version of the Affero GPL, but Affero has or by an individual or Legal Entity authorized to submit on behalf of
released a new version of the Affero GPL which permits relicensing under the copyright owner. For the purposes of this definition, "submitted"
this license. means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
The precise terms and conditions for copying, distribution and communication on electronic mailing lists, source code control systems,
modification follow. and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
TERMS AND CONDITIONS excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
0. Definitions.
"Contributor" shall mean Licensor and any individual or Legal Entity
"This License" refers to version 3 of the GNU Affero General Public License. on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks. 2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
"The Program" refers to any copyrightable work licensed under this worldwide, non-exclusive, no-charge, royalty-free, irrevocable
License. Each licensee is addressed as "you". "Licensees" and copyright license to reproduce, prepare Derivative Works of,
"recipients" may be individuals or organizations. publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an 3. Grant of Patent License. Subject to the terms and conditions of
exact copy. The resulting work is called a "modified version" of the this License, each Contributor hereby grants to You a perpetual,
earlier work or a work "based on" the earlier work. worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
A "covered work" means either the unmodified Program or a work based use, offer to sell, sell, import, and otherwise transfer the Work,
on the Program. where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
To "propagate" a work means to do anything with it that, without Contribution(s) alone or by combination of their Contribution(s)
permission, would make you directly or secondarily liable for with the Work to which such Contribution(s) was submitted. If You
infringement under applicable copyright law, except executing it on a institute patent litigation against any entity (including a
computer or modifying a private copy. Propagation includes copying, cross-claim or counterclaim in a lawsuit) alleging that the Work
distribution (with or without modification), making available to the or a Contribution incorporated within the Work constitutes direct
public, and in some countries other activities as well. or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
To "convey" a work means any kind of propagation that enables other as of the date such litigation is filed.
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying. 4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
An interactive user interface displays "Appropriate Legal Notices" modifications, and in Source or Object form, provided that You
to the extent that it includes a convenient and prominently visible meet the following conditions:
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the (a) You must give any other recipients of the Work or
extent that warranties are provided), that licensees may convey the Derivative Works a copy of this License; and
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a (b) You must cause any modified files to carry prominent notices
menu, a prominent item in the list meets this criterion. stating that You changed the files; and
1. Source Code. (c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
The "source code" for a work means the preferred form of the work attribution notices from the Source form of the Work,
for making modifications to it. "Object code" means any non-source excluding those notices that do not pertain to any part of
form of a work. the Derivative Works; and
A "Standard Interface" means an interface that either is an official (d) If the Work includes a "NOTICE" text file as part of its
standard defined by a recognized standards body, or, in the case of distribution, then any Derivative Works that You distribute must
interfaces specified for a particular programming language, one that include a readable copy of the attribution notices contained
is widely used among developers working in that language. within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
The "System Libraries" of an executable work include anything, other of the following places: within a NOTICE text file distributed
than the work as a whole, that (a) is included in the normal form of as part of the Derivative Works; within the Source form or
packaging a Major Component, but which is not part of that Major documentation, if provided along with the Derivative Works; or,
Component, and (b) serves only to enable use of the work with that within a display generated by the Derivative Works, if and
Major Component, or to implement a Standard Interface for which an wherever such third-party notices normally appear. The contents
implementation is available to the public in source code form. A of the NOTICE file are for informational purposes only and
"Major Component", in this context, means a major essential component do not modify the License. You may add Your own attribution
(kernel, window system, and so on) of the specific operating system notices within Derivative Works that You distribute, alongside
(if any) on which the executable work runs, or a compiler used to or as an addendum to the NOTICE text from the Work, provided
produce the work, or an object code interpreter used to run it. that such additional attribution notices cannot be construed
as modifying the License.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable You may add Your own copyright statement to Your modifications and
work) run the object code and to modify the work, including scripts to may provide additional or different license terms and conditions
control those activities. However, it does not include the work's for use, reproduction, or distribution of Your modifications, or
System Libraries, or general-purpose tools or generally available free for any such Derivative Works as a whole, provided Your use,
programs which are used unmodified in performing those activities but reproduction, and distribution of the Work otherwise complies with
which are not part of the work. For example, Corresponding Source the conditions stated in this License.
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically 5. Submission of Contributions. Unless You explicitly state otherwise,
linked subprograms that the work is specifically designed to require, any Contribution intentionally submitted for inclusion in the Work
such as by intimate data communication or control flow between those by You to the Licensor shall be under the terms and conditions of
subprograms and other parts of the work. this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
The Corresponding Source need not include anything that users the terms of any separate license agreement you may have executed
can regenerate automatically from other parts of the Corresponding with Licensor regarding such Contributions.
Source.
6. Trademarks. This License does not grant permission to use the trade
The Corresponding Source for a work in source code form is that names, trademarks, service marks, or product names of the Licensor,
same work. except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
2. Basic Permissions.
7. Disclaimer of Warranty. Unless required by applicable law or
All rights granted under this License are granted for the term of agreed to in writing, Licensor provides the Work (and each
copyright on the Program, and are irrevocable provided the stated Contributor provides its Contributions) on an "AS IS" BASIS,
conditions are met. This License explicitly affirms your unlimited WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
permission to run the unmodified Program. The output from running a implied, including, without limitation, any warranties or conditions
covered work is covered by this License only if the output, given its of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
content, constitutes a covered work. This License acknowledges your PARTICULAR PURPOSE. You are solely responsible for determining the
rights of fair use or other equivalent, as provided by copyright law. appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains 8. Limitation of Liability. In no event and under no legal theory,
in force. You may convey covered works to others for the sole purpose whether in tort (including negligence), contract, or otherwise,
of having them make modifications exclusively for you, or provide you unless required by applicable law (such as deliberate and grossly
with facilities for running those works, provided that you comply with negligent acts) or agreed to in writing, shall any Contributor be
the terms of this License in conveying all material for which you do liable to You for damages, including any direct, indirect, special,
not control copyright. Those thus making or running the covered works incidental, or consequential damages of any character arising as a
for you must do so exclusively on your behalf, under your direction result of this License or out of the use or inability to use the
and control, on terms that prohibit them from making any copies of Work (including but not limited to damages for loss of goodwill,
your copyrighted material outside their relationship with you. work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
Conveying under any other circumstances is permitted solely under has been advised of the possibility of such damages.
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary. 9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
3. Protecting Users' Legal Rights From Anti-Circumvention Law. and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
No covered work shall be deemed part of an effective technological License. However, in accepting such obligations, You may act only
measure under any applicable law fulfilling obligations under article on Your own behalf and on Your sole responsibility, not on behalf
11 of the WIPO copyright treaty adopted on 20 December 1996, or of any other Contributor, and only if You agree to indemnify,
similar laws prohibiting or restricting circumvention of such defend, and hold each Contributor harmless for any liability
measures. incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs APPENDIX: How to apply the Apache License to your work.
If you develop a new program, and you want it to be of the greatest To apply the Apache License to your work, attach the following
possible use to the public, the best way to achieve this is to make it boilerplate notice, with the fields enclosed by brackets "[]"
free software which everyone can redistribute and change under these terms. replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
To do so, attach the following notices to the program. It is safest Copyright [2025] [The Galactic Swarm Corporation]
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Swarms provides multi-agent orchestration mechanisms to enable llm agents to collaborate and work together Licensed under the Apache License, Version 2.0 (the "License");
Copyright (C) <2025> <Kye Gomez Chairman of TGSC> you may not use this file except in compliance with the License.
You may obtain a copy of the License at
This program is free software: you can redistribute it and/or modify http://www.apache.org/licenses/LICENSE-2.0
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, Unless required by applicable law or agreed to in writing, software
but WITHOUT ANY WARRANTY; without even the implied warranty of distributed under the License is distributed on an "AS IS" BASIS,
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
GNU Affero General Public License for more details. See the License for the specific language governing permissions and
limitations under the License.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

@ -2062,4 +2062,4 @@ Join our growing community around the world, for real-time support, ideas, and d
# License # License
GNU AFFERO GENERAL PUBLIC LICENSE APACHE

File diff suppressed because it is too large Load Diff

@ -0,0 +1,383 @@
# $swarms Token Integration Guide
## Overview
This guide covers the integration of $swarms token (74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump) payments into your platform using Solana and Phantom wallet. The $swarms token offers numerous benefits on the Solana blockchain, including high transaction per second (TPS) capabilities, low transaction fees, and more.
## Table of Contents
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Architecture Overview](#architecture-overview)
- [Setup Guide](#setup-guide)
- [Integration Examples](#integration-examples)
- [One-Click Payment Implementation](#one-click-payment-implementation)
- [Security Considerations](#security-considerations)
- [Troubleshooting](#troubleshooting)
## Prerequisites
- Node.js v16.x or higher
- TypeScript 4.x or higher
- Phantom Wallet browser extension
- Solana development environment
## Installation
```bash
npm install @solana/web3.js @solana/spl-token @project-serum/anchor @solana/wallet-adapter-react @solana/wallet-adapter-phantom
```
## Architecture Overview
```mermaid
flowchart TB
A[User Interface] -->|Trigger Payment| B[Payment Handler]
B --> C{Phantom Wallet}
C -->|Sign Transaction| D[Solana Network]
D -->|Execute Transfer| E[$swarms Token Contract]
E -->|Confirm Transaction| F[Payment Confirmation]
F -->|Update UI| A
```
## Setup Guide
### 1. Initialize Solana Connection
```typescript
import { Connection, clusterApiUrl } from '@solana/web3.js';
import { PhantomWalletAdapter } from '@solana/wallet-adapter-phantom';
const connection = new Connection(clusterApiUrl('mainnet-beta'));
const wallet = new PhantomWalletAdapter();
// Initialize wallet connection
await wallet.connect();
```
### 2. Configure Token Parameters
```typescript
const SWARMS_TOKEN_ADDRESS = '74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump';
interface TokenConfig {
mint: PublicKey;
decimals: number;
}
const swarmTokenConfig: TokenConfig = {
mint: new PublicKey(SWARMS_TOKEN_ADDRESS),
decimals: 9
};
```
### 3. Create Payment Handler
```typescript
export class SwarmPaymentHandler {
private connection: Connection;
private wallet: PhantomWalletAdapter;
constructor(connection: Connection, wallet: PhantomWalletAdapter) {
this.connection = connection;
this.wallet = wallet;
}
async createTransferTransaction(
amount: number,
recipientAddress: string
): Promise<Transaction> {
const transaction = new Transaction();
const transferInstruction = createTransferInstruction(
await getAssociatedTokenAddress(swarmTokenConfig.mint, this.wallet.publicKey),
await getAssociatedTokenAddress(swarmTokenConfig.mint, new PublicKey(recipientAddress)),
this.wallet.publicKey,
amount * Math.pow(10, swarmTokenConfig.decimals)
);
transaction.add(transferInstruction);
return transaction;
}
}
```
## One-Click Payment Implementation
### React Component Example
```typescript
import React, { useState } from 'react';
import { useWallet } from '@solana/wallet-adapter-react';
const SwarmPaymentButton: React.FC<{
amount: number;
recipientAddress: string;
}> = ({ amount, recipientAddress }) => {
const [loading, setLoading] = useState(false);
const wallet = useWallet();
const paymentHandler = new SwarmPaymentHandler(connection, wallet);
const handlePayment = async () => {
try {
setLoading(true);
const transaction = await paymentHandler.createTransferTransaction(
amount,
recipientAddress
);
const signature = await wallet.sendTransaction(transaction, connection);
await connection.confirmTransaction(signature);
// Handle success
console.log('Payment successful:', signature);
} catch (error) {
console.error('Payment failed:', error);
} finally {
setLoading(false);
}
};
return (
<button
onClick={handlePayment}
disabled={loading || !wallet.connected}
className="payment-button"
>
{loading ? 'Processing...' : `Pay ${amount} $swarms`}
</button>
);
};
```
### Payment Flow Sequence
```mermaid
sequenceDiagram
participant User
participant UI
participant PaymentHandler
participant PhantomWallet
participant Solana
User->>UI: Click Pay Button
UI->>PaymentHandler: Create Transaction
PaymentHandler->>PhantomWallet: Request Signature
PhantomWallet->>User: Prompt for Approval
User->>PhantomWallet: Approve Transaction
PhantomWallet->>Solana: Submit Transaction
Solana->>PaymentHandler: Confirm Transaction
PaymentHandler->>UI: Update Status
UI->>User: Show Confirmation
```
## Security Considerations
### Transaction Validation
```typescript
function validateTransaction(
transaction: Transaction,
expectedAmount: number,
expectedRecipient: PublicKey
): boolean {
try {
const instruction = transaction.instructions[0];
const decodedData = TOKEN_PROGRAM_ID.decode(instruction.data);
return (
decodedData.amount === expectedAmount &&
instruction.keys[1].pubkey.equals(expectedRecipient)
);
} catch (error) {
console.error('Transaction validation failed:', error);
return false;
}
}
```
### Error Handling
```typescript
class PaymentError extends Error {
constructor(
message: string,
public code: string,
public transaction?: string
) {
super(message);
this.name = 'PaymentError';
}
}
async function handlePaymentError(error: any): Promise<void> {
if (error instanceof WalletError) {
// Handle wallet-specific errors
throw new PaymentError(
'Wallet error occurred',
'WALLET_ERROR',
error.message
);
} else if (error.code === 'TransactionError') {
// Handle Solana transaction errors
throw new PaymentError(
'Transaction failed',
'TRANSACTION_ERROR',
error.txid
);
}
// Handle other errors...
}
```
## Testing
### Unit Test Example
```typescript
import { expect } from 'chai';
import { SwarmPaymentHandler } from './payment-handler';
describe('SwarmPaymentHandler', () => {
let paymentHandler: SwarmPaymentHandler;
beforeEach(() => {
// Setup test environment
});
it('should create valid transfer transaction', async () => {
const amount = 100;
const recipientAddress = 'recipient_address';
const transaction = await paymentHandler.createTransferTransaction(
amount,
recipientAddress
);
expect(transaction.instructions).to.have.lengthOf(1);
// Add more assertions...
});
});
```
## Troubleshooting
### Common Issues and Solutions
1. **Insufficient Balance**
```typescript
async function checkBalance(
connection: Connection,
walletAddress: PublicKey
): Promise<boolean> {
const balance = await connection.getTokenAccountBalance(
await getAssociatedTokenAddress(swarmTokenConfig.mint, walletAddress)
);
return parseInt(balance.value.amount) > 0;
}
```
2. **Transaction Timeout**
```typescript
async function submitWithRetry(
transaction: Transaction,
maxRetries = 3
): Promise<string> {
let attempt = 0;
while (attempt < maxRetries) {
try {
const signature = await wallet.sendTransaction(transaction, connection);
const confirmation = await connection.confirmTransaction(signature);
if (confirmation.value.err) {
throw new Error('Transaction failed');
}
return signature;
} catch (error) {
attempt++;
if (attempt === maxRetries) throw error;
await new Promise(resolve => setTimeout(resolve, 1000 * attempt));
}
}
}
```
## Monitoring and Analytics
### Transaction Monitoring
```typescript
interface TransactionMetrics {
timestamp: number;
amount: number;
success: boolean;
duration: number;
}
class TransactionMonitor {
private metrics: TransactionMetrics[] = [];
logTransaction(metric: TransactionMetrics): void {
this.metrics.push(metric);
// Add your analytics implementation
}
getAverageSuccessRate(): number {
return (
this.metrics.filter(m => m.success).length / this.metrics.length * 100
);
}
}
```
## Advanced Features
### Batch Payments
```typescript
async function createBatchPayment(
recipients: Array<{ address: string; amount: number }>
): Promise<Transaction> {
const transaction = new Transaction();
for (const recipient of recipients) {
const transferInstruction = createTransferInstruction(/* ... */);
transaction.add(transferInstruction);
}
return transaction;
}
```
### Subscription Payments
```typescript
class SubscriptionManager {
async createSubscription(
amount: number,
interval: number,
recipientAddress: string
): Promise<string> {
// Implementation for recurring payments
}
async cancelSubscription(subscriptionId: string): Promise<void> {
// Implementation for cancellation
}
}
```
## Support and Resources
For additional support:
- Solana Documentation: https://docs.solana.com
- Phantom Wallet Docs: https://docs.phantom.app
- $swarms Token Contract: 74SBV4zDXxTRgv1pEMoECskKBkZHc2yGPnc7GYVepump
## Version History
- v1.0.0 - Initial release
- v1.0.1 - Added batch payment support
- v1.0.2 - Enhanced error handling
- v1.0.3 - Added subscription payment feature

@ -7,7 +7,7 @@ Before we dive into the code, let's briefly introduce the Swarms framework. Swar
For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities. For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities.
Additional resources: Additional resources:
- [Swarms Discord](https://discord.com/servers/agora-999382051935506503) for community discussions - [Swarms Discord](https://discord.gg/swarms) for community discussions
- [Swarms Twitter](https://x.com/swarms_corp) for updates - [Swarms Twitter](https://x.com/swarms_corp) for updates
- [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts - [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts
- [Swarms Blog](https://medium.com/@kyeg) for in-depth articles - [Swarms Blog](https://medium.com/@kyeg) for in-depth articles
@ -460,7 +460,7 @@ This system provides a powerful foundation for financial analysis, but there's a
Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration. Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration.
For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.com/servers/agora-999382051935506503). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp). For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/swarms). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions. If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions.
@ -474,7 +474,7 @@ By leveraging the power of multi-agent AI systems, you're well-equipped to navig
* [Swarms Github](https://github.com/kyegomez/swarms) * [Swarms Github](https://github.com/kyegomez/swarms)
* [Swarms Discord](https://discord.com/servers/agora-999382051935506503) * [Swarms Discord](https://discord.gg/swarms)
* [Swarms Twitter](https://x.com/swarms_corp) * [Swarms Twitter](https://x.com/swarms_corp)
* [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) * [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994)
* [Swarms Blog](https://medium.com/@kyeg) * [Swarms Blog](https://medium.com/@kyeg)

@ -261,7 +261,7 @@ The table below summarizes the estimated savings for each use case:
- [book a call](https://cal.com/swarms) - [book a call](https://cal.com/swarms)
- Swarms Discord: https://discord.com/servers/agora-999382051935506503 - Swarms Discord: https://discord.gg/swarms
- Swarms Twitter: https://x.com/swarms_corp - Swarms Twitter: https://x.com/swarms_corp

@ -1,6 +1,6 @@
# Welcome to Swarms Docs Home # Welcome to Swarms Docs Home
[![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/agora-999382051935506503) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@kyegomez3242) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/kye-g-38759a207/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/kyegomezb) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/jM3Z6M9uMq) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@kyegomez3242) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/kye-g-38759a207/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/kyegomezb)
**Get Started Building Production-Grade Multi-Agent Applications** **Get Started Building Production-Grade Multi-Agent Applications**
@ -39,7 +39,7 @@ Here you'll find references about the Swarms framework, marketplace, community,
## Community ## Community
| Section | Links | | Section | Links |
|----------------------|--------------------------------------------------------------------------------------------| |----------------------|--------------------------------------------------------------------------------------------|
| Community | [Discord](https://discord.com/servers/agora-999382051935506503) | | Community | [Discord](https://discord.gg/swarms) |
| Blog | [Blog](https://medium.com/@kyeg) | | Blog | [Blog](https://medium.com/@kyeg) |
| Event Calendar | [LUMA](https://lu.ma/swarms_calendar) | | Event Calendar | [LUMA](https://lu.ma/swarms_calendar) |
| Twitter | [Twitter](https://x.com/swarms_corp) | | Twitter | [Twitter](https://x.com/swarms_corp) |

@ -32,13 +32,17 @@ plugins:
show_symbol_type_heading: true show_symbol_type_heading: true
show_symbol_type_toc: true show_symbol_type_toc: true
show_category_heading: true show_category_heading: true
domains: [std, py]
- git-committers: - git-committers:
repository: kyegomez/swarms repository: kyegomez/swarms
branch: master branch: master
# token: !ENV ["GITHUB_TOKEN"] # token: !ENV ["GITHUB_TOKEN"]
- git-revision-date-localized: - git-revision-date-localized:
enable_creation_date: true enable_creation_date: true
# - mkdocs-jupyter:
# kernel_name: python3
# execute: false
# include_source: True
# include_requirejs: true
extra_css: extra_css:
- assets/css/extra.css - assets/css/extra.css
extra: extra:
@ -50,7 +54,7 @@ extra:
- icon: fontawesome/brands/twitter - icon: fontawesome/brands/twitter
link: https://x.com/swarms_corp link: https://x.com/swarms_corp
- icon: fontawesome/brands/discord - icon: fontawesome/brands/discord
link: https://discord.com/servers/agora-999382051935506503 link: https://discord.gg/swarms
analytics: analytics:
provider: google provider: google
@ -140,34 +144,35 @@ nav:
- Overview: "index.md" - Overview: "index.md"
# - The Vision: "swarms/framework/vision.md" # - The Vision: "swarms/framework/vision.md"
# - Docker Setup: "swarms/install/docker_setup.md" # - Docker Setup: "swarms/install/docker_setup.md"
- Our Goal; The Ultimate Multi-Agent LLM Framework for Developers: "swarms/concept/vision.md" - Swarms Vision: "swarms/concept/vision.md"
- Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md" - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- Swarms Products: "swarms/products.md"
- Onboarding: - Onboarding:
- Installation: "swarms/install/install.md" - Installation: "swarms/install/install.md"
- Environment Configuration: "swarms/install/workspace_manager.md" - Environment Configuration: "swarms/install/workspace_manager.md"
- Environment Variables: "swarms/install/env.md"
- Quickstart: "swarms/install/quickstart.md" - Quickstart: "swarms/install/quickstart.md"
- Swarms CLI: "swarms/cli/main.md" - Swarms CLI: "swarms/cli/main.md"
# - Swarms + Docker:
- Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - Swarms Framework Architecture: "swarms/concept/framework_architecture.md"
# - Prelimary: # - Prelimary:
# - 80/20 Rule For Agents: "swarms/prompting/8020.md" # - 80/20 Rule For Agents: "swarms/prompting/8020.md"
- Managing Prompts in Production: "swarms/prompts/main.md"
- Agents: - Agents:
# - Overview: "swarms/structs/index.md" # - Overview: "swarms/structs/index.md"
# - Build Custom Agents: "swarms/structs/diy_your_own_agent.md" - Managing Prompts in Production: "swarms/prompts/main.md"
- Agent Architecture: "swarms/framework/agents_explained.md" - Agent Architecture: "swarms/framework/agents_explained.md"
- Complete Agent API: "swarms/structs/agent.md" - Complete Agent API: "swarms/structs/agent.md"
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md" - Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Creating Custom Agents: "swarms/agents/new_agent.md"
- Tools: - Tools:
- Overview: "swarms/tools/main.md" - Overview: "swarms/tools/main.md"
- What are tools?: "swarms/tools/build_tool.md" - What are tools?: "swarms/tools/build_tool.md"
- ToolAgent: "swarms/agents/tool_agent.md" - ToolAgent: "swarms/agents/tool_agent.md"
- Tool Storage & tool_registry decorator: "swarms/tools/tool_storage.md" - Tool Storage: "swarms/tools/tool_storage.md"
- RAG || Long Term Memory: - RAG || Long Term Memory:
- Integrating RAG with Agents: "swarms/memory/diy_memory.md" - Integrating RAG with Agents: "swarms/memory/diy_memory.md"
- Third-Party Agent Integrations:
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Creating Custom Agents: "swarms/agents/new_agent.md"
- Swarm Architectures: - Swarm Architectures:
- Why MultiAgent Collaboration is Necessary: "swarms/concept/why.md" - Why MultiAgent Collaboration is Necessary: "swarms/concept/why.md"
- Swarm Architectures: "swarms/concept/swarm_architectures.md" - Swarm Architectures: "swarms/concept/swarm_architectures.md"
@ -188,6 +193,8 @@ nav:
- TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md" - TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md"
- SwarmRearrange: "swarms/structs/swarm_rearrange.md" - SwarmRearrange: "swarms/structs/swarm_rearrange.md"
- MultiAgentRouter: "swarms/structs/multi_agent_router.md" - MultiAgentRouter: "swarms/structs/multi_agent_router.md"
- MatrixSwarm: "swarms/structs/matrix_swarm.md"
- ModelRouter: "swarms/structs/model_router.md"
- Various Execution Methods: "swarms/structs/various_execution_methods.md" - Various Execution Methods: "swarms/structs/various_execution_methods.md"
- Workflows: - Workflows:
- ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md" - ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
@ -195,7 +202,6 @@ nav:
- SequentialWorkflow: "swarms/structs/sequential_workflow.md" - SequentialWorkflow: "swarms/structs/sequential_workflow.md"
- Structs: - Structs:
- Conversation: "swarms/structs/conversation.md" - Conversation: "swarms/structs/conversation.md"
# - Task: "swarms/structs/task.md"
- Full API Reference: "swarms/framework/reference.md" - Full API Reference: "swarms/framework/reference.md"
- Examples: - Examples:
- Unique Swarms: "swarms/examples/unique_swarms.md" - Unique Swarms: "swarms/examples/unique_swarms.md"
@ -208,6 +214,30 @@ nav:
- Ollama: "swarms/examples/ollama.md" - Ollama: "swarms/examples/ollama.md"
- OpenRouter: "swarms/examples/openrouter.md" - OpenRouter: "swarms/examples/openrouter.md"
- XAI: "swarms/examples/xai.md" - XAI: "swarms/examples/xai.md"
- Swarms Tools:
- Agent with Yahoo Finance: "swarms/examples/yahoo_finance.md"
- Twitter Agents: "swarms_tools/twitter.md"
- Blockchain Agents:
- Agent with HTX + CoinGecko: "swarms/examples/swarms_tools_htx.md"
- Agent with HTX + CoinGecko Function Calling: "swarms/examples/swarms_tools_htx_gecko.md"
- Lumo: "swarms/examples/lumo.md"
- Quant Crypto Agent: "swarms/examples/quant_crypto_agent.md"
- Meme Agents:
- Bob The Builder: "swarms/examples/bob_the_builder.md"
- Meme Agent Builder: "swarms/examples/meme_agents.md"
- Multi-Agent Collaboration:
- Swarms DAO: "swarms/examples/swarms_dao.md"
- Contributors:
- Bounty Program: "corporate/bounty_program.md"
- Contributing:
- Contributing: "swarms/contributing.md"
- Tests: "swarms/framework/test.md"
- Code Cleanliness: "swarms/framework/code_cleanliness.md"
- Philosophy: "swarms/concept/philosophy.md"
- Changelog:
- Swarms 5.6.8: "swarms/changelog/5_6_8.md"
- Swarms 5.8.1: "swarms/changelog/5_8_1.md"
- Swarms 5.9.2: "swarms/changelog/changelog_new.md"
- Swarm Models: - Swarm Models:
- Overview: "swarms/models/index.md" - Overview: "swarms/models/index.md"
# - Models Available: "swarms/models/index.md" # - Models Available: "swarms/models/index.md"
@ -222,24 +252,26 @@ nav:
- OpenAIChat: "swarms/models/openai.md" - OpenAIChat: "swarms/models/openai.md"
- OpenAIFunctionCaller: "swarms/models/openai_function_caller.md" - OpenAIFunctionCaller: "swarms/models/openai_function_caller.md"
- Groq: "swarms/models/groq.md" - Groq: "swarms/models/groq.md"
# - Ollama:
# - Fireworks
# - Octo
# - Liquid AI
- MultiModal Models: - MultiModal Models:
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md" - BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
- Multi Modal Models Available: "swarms/models/multimodal_models.md" - Multi Modal Models Available: "swarms/models/multimodal_models.md"
- GPT4VisionAPI: "swarms/models/gpt4v.md" - GPT4VisionAPI: "swarms/models/gpt4v.md"
- Swarms Tools:
- Overview: "swarms_tools/overview.md"
- Finance: "swarms_tools/finance.md"
- Search: "swarms_tools/search.md"
- Social Media:
- Overview: "swarms_tools/social_media.md"
- Twitter: "swarms_tools/twitter.md"
- Swarms Cloud API: - Swarms Cloud API:
# - Overview: "swarms_cloud/main.md" # - Overview: "swarms_cloud/main.md"
- Overview: "swarms_cloud/vision.md" # - Overview: "swarms_cloud/vision.md"
- Overview: "swarms_cloud/launch.md"
- Deploying Swarms on Google Cloud Run: "swarms_cloud/cloud_run.md"
# - Swarms Cloud CLI: "swarms_cloud/cli.md"
- Swarm APIs:
- MCS API: "swarms_cloud/mcs_api.md" - MCS API: "swarms_cloud/mcs_api.md"
- Swarms Cloud CLI: "swarms_cloud/cli.md" - CreateNow API: "swarms_cloud/create_api.md"
# - Add Agents to Marketplace: "swarms_cloud/add_agent.md"
# - Available Models: "swarms_cloud/available_models.md"
# - Agent API: "swarms_cloud/agent_api.md"
# - Migrate from OpenAI to Swarms in 3 lines of code: "swarms_cloud/migrate_openai.md"
# - Getting Started with SOTA Vision Language Models VLM: "swarms_cloud/getting_started.md"
- Swarms Memory: - Swarms Memory:
- Overview: "swarms_memory/index.md" - Overview: "swarms_memory/index.md"
- Memory Systems: - Memory Systems:
@ -248,7 +280,9 @@ nav:
- Faiss: "swarms_memory/faiss.md" - Faiss: "swarms_memory/faiss.md"
- Swarms Marketplace: - Swarms Marketplace:
- Overview: "swarms_platform/index.md" - Overview: "swarms_platform/index.md"
- Share & Discover Prompts, Agents, Tools, and more: "swarms_platform/share_discover.md" - Agent Marketplace: "swarms_platform/share_discover.md"
- Swarm Platform API Keys: "swarms_platform/apikeys.md"
- Account Management: "swarms_platform/account_management.md"
- Prompts API: - Prompts API:
- Add Prompts: "swarms_platform/prompts/add_prompt.md" - Add Prompts: "swarms_platform/prompts/add_prompt.md"
- Edit Prompts: "swarms_platform/prompts/edit_prompt.md" - Edit Prompts: "swarms_platform/prompts/edit_prompt.md"
@ -259,32 +293,16 @@ nav:
- Edit Agents: "swarms_platform/agents/edit_agent.md" - Edit Agents: "swarms_platform/agents/edit_agent.md"
- Telemetry API: - Telemetry API:
- PUT: "swarms_platform/telemetry/index.md" - PUT: "swarms_platform/telemetry/index.md"
- Swarms Wallet API:
- Overview: "swarms/wallet/api.md"
# - Tools API: # - Tools API:
# - Overview: "swarms_platform/tools_api.md" # - Overview: "swarms_platform/tools_api.md"
# - Add Tools: "swarms_platform/fetch_tools.md" # - Add Tools: "swarms_platform/fetch_tools.md"
# - Guides:
# - Unlocking Efficiency and Cost Savings in Healthcare; How Swarms of LLM Agents Can Revolutionize Medical Operations and Save Millions: "guides/healthcare_blog.md"
# - Understanding Agent Evaluation Mechanisms: "guides/agent_evals.md"
# - Agent Glossary: "swarms/glossary.md"
# - The Ultimate Technical Guide to the Swarms CLI; A Step-by-Step Developers Guide: "swarms/cli/cli_guide.md"
# - Prompting Guide:
# - The Essence of Enterprise-Grade Prompting: "swarms/prompts/essence.md"
# - An Analysis on Prompting Strategies: "swarms/prompts/overview.md"
# - Managing Prompts in Production: "swarms/prompts/main.md"
- Community:
- Bounty Program: "corporate/bounty_program.md"
- Contributing:
- Contributing: "swarms/contributing.md"
- Tests: "swarms/framework/test.md"
- Code Cleanliness: "swarms/framework/code_cleanliness.md"
- Philosophy: "swarms/concept/philosophy.md"
- Changelog:
- Swarms 5.6.8: "swarms/changelog/5_6_8.md"
- Swarms 5.8.1: "swarms/changelog/5_8_1.md"
- Swarms 5.9.2: "swarms/changelog/changelog_new.md"
- Corporate: - Corporate:
- Culture: "corporate/culture.md" - Culture: "corporate/culture.md"
- Hiring: "corporate/hiring.md" - Hiring: "corporate/hiring.md"
- Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md" - Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md"
# - Clusterops: - Web3:
# - Overview: "clusterops/reference.md" # - Overview: "finance/index.md"
- Swarms Wallet: "finance/wallet.md"
- Swarms Subscription: "finance/subscription.md"

@ -18,6 +18,7 @@ mkdocs-mermaid2-plugin
mkdocs-include-markdown-plugin mkdocs-include-markdown-plugin
mkdocs-enumerate-headings-plugin mkdocs-enumerate-headings-plugin
mkdocs-autolinks-plugin mkdocs-autolinks-plugin
mkdocstrings-python
mkdocs-minify-html-plugin mkdocs-minify-html-plugin
mkdocs-autolinks-plugin mkdocs-autolinks-plugin

@ -155,5 +155,5 @@ By understanding the purpose and role of each folder in the Swarms framework, us
- **Community Support** - **Community Support**
- URL: [Submit issue](https://discord.gg/agora-999382051935506503) - URL: [Submit issue](https://discord.gg/jM3Z6M9uMq)
- Ask the community for support in real-time and or admin support - Ask the community for support in real-time and or admin support

@ -172,10 +172,6 @@ graph TD
H --> I{Autosave Enabled?} H --> I{Autosave Enabled?}
I --> |Yes| J[Export Metadata to JSON] I --> |Yes| J[Export Metadata to JSON]
I --> |No| K[End Swarm Run] I --> |No| K[End Swarm Run]
%% Style adjustments
classDef blackBox fill:#000,stroke:#f00,color:#fff;
class A,B,C,D,E1,E2,E3,F1,F2,F3,G1,G2,G3,H,I,J,K blackBox;
``` ```
@ -198,8 +194,6 @@ graph TD
F & G & H --> I[Layer 2: Aggregator Agent] F & G & H --> I[Layer 2: Aggregator Agent]
I --> J[Aggregate All Responses] I --> J[Aggregate All Responses]
J --> K[Final Output] J --> K[Final Output]
``` ```

@ -60,17 +60,6 @@ graph TD;
SP --> Sell[Sell Agents] SP --> Sell[Sell Agents]
``` ```
#### 6. **IoTAgents**
[IoTAgents](https://github.com/The-Swarm-Corporation/swarm-ecosystem) enables seamless integration between IoT data and AI agents, allowing the real-time processing of IoT data streams and driving smart automation in industries such as logistics, healthcare, and smart cities.
```mermaid
graph TD;
IA[IoTAgents] --> Parse[Parse IoT Data]
IA --> Process[Process IoT Data]
IA --> Utilize[Utilize IoT Data Streams]
```
#### Extending the Ecosystem: **Swarms Core**, **JS**, and More #### Extending the Ecosystem: **Swarms Core**, **JS**, and More
In addition to the core components, the Swarms Ecosystem offers several other powerful packages: In addition to the core components, the Swarms Ecosystem offers several other powerful packages:

@ -25,3 +25,27 @@ agent = Agent(
# Run a query # Run a query
agent.run("What are the components of a startup's stock incentive equity plan?") agent.run("What are the components of a startup's stock incentive equity plan?")
``` ```
## R1
This is a simple example of how to use the DeepSeek Reasoner model otherwise known as R1.
```python
import os
from swarms import Agent
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="deepseek/deepseek-reasoner",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,63 @@
# Lumo Example
Introducing Lumo-70B-Instruct - the largest and most advanced AI model ever created for the Solana ecosystem. Built on Meta's groundbreaking LLaMa 3.3 70B Instruct foundation, this revolutionary model represents a quantum leap in blockchain-specific artificial intelligence. With an unprecedented 70 billion parameters and trained on the most comprehensive Solana documentation dataset ever assembled, Lumo-70B-Instruct sets a new standard for developer assistance in the blockchain space.
- [Docs](https://huggingface.co/lumolabs-ai/Lumo-70B-Instruct)
```python
from swarms import Agent
from transformers import LlamaForCausalLM, AutoTokenizer
import torch
from transformers import BitsAndBytesConfig
class Lumo:
"""
A class for generating text using the Lumo model with 4-bit quantization.
"""
def __init__(self):
"""
Initializes the Lumo model with 4-bit quantization and a tokenizer.
"""
# Configure 4-bit quantization
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
llm_int8_enable_fp32_cpu_offload=True
)
self.model = LlamaForCausalLM.from_pretrained(
"lumolabs-ai/Lumo-70B-Instruct",
device_map="auto",
quantization_config=bnb_config,
use_cache=False,
attn_implementation="sdpa"
)
self.tokenizer = AutoTokenizer.from_pretrained("lumolabs-ai/Lumo-70B-Instruct")
def run(self, task: str) -> str:
"""
Generates text based on the given prompt using the Lumo model.
Args:
prompt (str): The input prompt for the model.
Returns:
str: The generated text.
"""
inputs = self.tokenizer(task, return_tensors="pt").to(self.model.device)
outputs = self.model.generate(**inputs, max_new_tokens=100)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
Agent(
agent_name="Solana-Analysis-Agent",
llm=Lumo(),
max_loops="auto",
interactive=True,
streaming_on=True,
).run("How do i create a smart contract in solana?")
```

@ -0,0 +1,28 @@
# Meme Agent Builder
- `pip3 install -U swarms`
- Add your OpenAI API key to the `.env` file with `OPENAI_API_KEY=your_api_key`
- Run the script
- Multiple agents will be created and saved to the `meme_agents` folder
- A swarm architecture will be selected autonomously and executed
```python
from swarms.structs.meme_agent_persona_generator import (
MemeAgentGenerator,
)
if __name__ == "__main__":
example = MemeAgentGenerator(
name="Meme-Swarm",
description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s",
max_loops=1,
)
print(
example.run(
"Generate funny meme agents around cool media from 2001s"
)
)
```

@ -0,0 +1,45 @@
# Meme Agent Tutorial
- `pip3 install -U swarms`
- Add your OpenAI API key to the `.env` file
```python
from swarms import Agent
# Define a custom system prompt for Bob the Builder
BOB_THE_BUILDER_SYS_PROMPT = """
You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor.
Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be.
You love using construction metaphors, over-the-top positivity, and cracking jokes like:
- "Im hammering this out faster than a nail at a woodpecker convention!"
- "This is smoother than fresh cement on a summers day."
- "Lets bulldoze through this problem—safety goggles on, folks!"
You are not bound by any specific field of knowledge, and youre absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!"
Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem.
"""
# Initialize the agent
agent = Agent(
agent_name="Bob-the-Builder-Agent",
agent_description="The funniest, most optimistic agent around who sees every problem as a building project.",
system_prompt=BOB_THE_BUILDER_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv", OR "string", "yaml"
auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task
max_tokens=4000, # Max output tokens
saved_state_path="bob_the_builder_agent.json",
interactive=False,
)
# Run the agent with a task
agent.run("I want to build a house ;) What should I do?")
```

@ -0,0 +1,129 @@
# Quant Crypto Agent
- This is a simple example of a crypto agent that uses the `Agent` class from the `swarms` library.
- It uses the `fetch_htx_data` and `coin_gecko_coin_api` tools to fetch data from the `htx` and `CoinGecko` APIs.
- It uses the `Agent` class to create an agent that can analyze the current state of a crypto asset.
## Steps
1. Install the `swarms` library.
2. Install the `swarms_tools` library.
3. Setup your `.env` file with the `OPENAI_API_KEY` environment variables.
4. Run the code.
## Installation:
```bash
pip install swarms swarms-tools python-dotenv
```
## Code:
```python
from swarms import Agent
from dotenv import load_dotenv
from swarms_tools import fetch_htx_data, coin_gecko_coin_api
load_dotenv()
CRYPTO_ANALYST_SYSTEM_PROMPT = """
You are an expert cryptocurrency financial analyst with deep expertise in:
1. Technical Analysis
- Chart patterns and indicators (RSI, MACD, Bollinger Bands)
- Volume analysis and market momentum
- Support and resistance levels
- Trend analysis and price action
2. Fundamental Analysis
- Tokenomics evaluation
- Network metrics (TVL, daily active users, transaction volume)
- Protocol revenue and growth metrics
- Market capitalization analysis
- Token utility and use cases
3. Market Analysis
- Market sentiment analysis
- Correlation with broader crypto market
- Impact of macro events
- Institutional adoption metrics
- DeFi and NFT market analysis
4. Risk Assessment
- Volatility metrics
- Liquidity analysis
- Smart contract risks
- Regulatory considerations
- Exchange exposure risks
5. Data Analysis Methods
- On-chain metrics analysis
- Whale wallet tracking
- Exchange inflow/outflow
- Mining/Staking statistics
- Network health indicators
When analyzing crypto assets, always:
1. Start with a comprehensive market overview
2. Examine both on-chain and off-chain metrics
3. Consider multiple timeframes (short, medium, long-term)
4. Evaluate risk-reward ratios
5. Assess market sentiment and momentum
6. Consider regulatory and security factors
7. Analyze correlations with BTC, ETH, and traditional markets
8. Examine liquidity and volume profiles
9. Review recent protocol developments and updates
10. Consider macro economic factors
Format your analysis with:
- Clear section headings
- Relevant metrics and data points
- Risk warnings and disclaimers
- Price action analysis
- Market sentiment summary
- Technical indicators
- Fundamental factors
- Clear recommendations with rationale
Remember to:
- Always provide data-driven insights
- Include both bullish and bearish scenarios
- Highlight key risk factors
- Consider market cycles and seasonality
- Maintain objectivity in analysis
- Cite sources for data and claims
- Update analysis based on new market conditions
"""
# Initialize the crypto analysis agent
agent = Agent(
agent_name="Crypto-Analysis-Expert",
agent_description="Expert cryptocurrency financial analyst and market researcher",
system_prompt=CRYPTO_ANALYST_SYSTEM_PROMPT,
max_loops="auto",
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="crypto_analyst",
output_type="str",
interactive=True,
)
print(fetch_htx_data("sol"))
print(coin_gecko_coin_api("solana"))
# Example usage
agent.run(
f"""
Analyze the current state of Solana (SOL), including:
1. Technical analysis of price action
2. On-chain metrics and network health
3. Recent protocol developments
4. Market sentiment
5. Risk factors
Please provide a comprehensive analysis with data-driven insights.
# Solana CoinGecko Data
Real-tim data from Solana CoinGecko: \n {coin_gecko_coin_api("solana")}
"""
)
```

@ -0,0 +1,237 @@
# Swarms DAO Example
This example demonstrates how to create a swarm of agents to collaborate on a task. The agents are designed to work together to create a comprehensive strategy for a DAO focused on decentralized governance for climate action.
You can customize the agents and their system prompts to fit your specific needs.
And, this example is using the `deepseek-reasoner` model, which is a large language model that is optimized for reasoning tasks.
## Todo
- Add tools to check wallet of the treasury and check the balance of the treasury
- Add tools to check the price of the token
- Add tools to check the price of the token on different exchanges
- Add tools to check the price of the token on different chains
- Add tools to check twitter posts and check the sentiment of the posts
```python
import random
from swarms import Agent
# System prompts for each agent
MARKETING_AGENT_SYS_PROMPT = """
You are the Marketing Strategist Agent for a DAO. Your role is to develop, implement, and optimize all marketing and branding strategies to align with the DAO's mission and vision. The DAO is focused on decentralized governance for climate action, funding projects aimed at reducing carbon emissions, and incentivizing community participation through its native token.
### Objectives:
1. **Brand Awareness**: Build a globally recognized and trusted brand for the DAO.
2. **Community Growth**: Expand the DAO's community by onboarding individuals passionate about climate action and blockchain technology.
3. **Campaign Execution**: Launch high-impact marketing campaigns on platforms like Twitter, Discord, and YouTube to engage and retain community members.
4. **Partnerships**: Identify and build partnerships with like-minded organizations, NGOs, and influencers.
5. **Content Strategy**: Design educational and engaging content, including infographics, blog posts, videos, and AMAs.
### Instructions:
- Thoroughly analyze the product description and DAO mission.
- Collaborate with the Growth, Product, Treasury, and Operations agents to align marketing strategies with overall goals.
- Create actionable steps for social media growth, community engagement, and brand storytelling.
- Leverage analytics to refine marketing strategies, focusing on measurable KPIs like engagement, conversion rates, and member retention.
- Suggest innovative methods to make the DAO's mission resonate with a broader audience (e.g., gamified incentives, contests, or viral campaigns).
- Ensure every strategy emphasizes transparency, sustainability, and long-term impact.
"""
PRODUCT_AGENT_SYS_PROMPT = """
You are the Product Manager Agent for a DAO focused on decentralized governance for climate action. Your role is to design, manage, and optimize the DAO's product roadmap. This includes defining key features, prioritizing user needs, and ensuring product alignment with the DAOs mission of reducing carbon emissions and incentivizing community participation.
### Objectives:
1. **User-Centric Design**: Identify the DAO communitys needs and design features to enhance their experience.
2. **Roadmap Prioritization**: Develop a prioritized product roadmap based on community feedback and alignment with climate action goals.
3. **Integration**: Suggest technical solutions and tools for seamless integration with other platforms and blockchains.
4. **Continuous Improvement**: Regularly evaluate product features and recommend optimizations to improve usability, engagement, and adoption.
### Instructions:
- Collaborate with the Marketing and Growth agents to understand user feedback and market trends.
- Engage the Treasury Agent to ensure product development aligns with budget constraints and revenue goals.
- Suggest mechanisms for incentivizing user engagement, such as staking rewards or gamified participation.
- Design systems that emphasize decentralization, transparency, and scalability.
- Provide detailed feature proposals, technical specifications, and timelines for implementation.
- Ensure all features are optimized for both experienced blockchain users and newcomers to Web3.
"""
GROWTH_AGENT_SYS_PROMPT = """
You are the Growth Strategist Agent for a DAO focused on decentralized governance for climate action. Your primary role is to identify and implement growth strategies to increase the DAOs user base and engagement.
### Objectives:
1. **User Acquisition**: Identify effective strategies to onboard more users to the DAO.
2. **Retention**: Suggest ways to improve community engagement and retain active members.
3. **Data-Driven Insights**: Leverage data analytics to identify growth opportunities and areas of improvement.
4. **Collaborative Growth**: Work with other agents to align growth efforts with marketing, product development, and treasury goals.
### Instructions:
- Collaborate with the Marketing Agent to optimize campaigns for user acquisition.
- Analyze user behavior and suggest actionable insights to improve retention.
- Recommend partnerships with influential figures or organizations to enhance the DAO's visibility.
- Propose growth experiments (A/B testing, new incentives, etc.) and analyze their effectiveness.
- Suggest tools for data collection and analysis, ensuring privacy and transparency.
- Ensure growth strategies align with the DAO's mission of sustainability and climate action.
"""
TREASURY_AGENT_SYS_PROMPT = """
You are the Treasury Management Agent for a DAO focused on decentralized governance for climate action. Your role is to oversee the DAO's financial operations, including budgeting, funding allocation, and financial reporting.
### Objectives:
1. **Financial Transparency**: Maintain clear and detailed reports of the DAO's financial status.
2. **Budget Management**: Allocate funds strategically to align with the DAO's goals and priorities.
3. **Fundraising**: Identify and recommend strategies for fundraising to ensure the DAO's financial sustainability.
4. **Cost Optimization**: Suggest ways to reduce operational costs without sacrificing quality.
### Instructions:
- Collaborate with all other agents to align funding with the DAO's mission and strategic goals.
- Propose innovative fundraising campaigns (e.g., NFT drops, token sales) to generate revenue.
- Analyze financial risks and suggest mitigation strategies.
- Ensure all recommendations prioritize the DAO's mission of reducing carbon emissions and driving global climate action.
- Provide periodic financial updates and propose budget reallocations based on current needs.
"""
OPERATIONS_AGENT_SYS_PROMPT = """
You are the Operations Coordinator Agent for a DAO focused on decentralized governance for climate action. Your role is to ensure smooth day-to-day operations, coordinate workflows, and manage governance processes.
### Objectives:
1. **Workflow Optimization**: Streamline operational processes to maximize efficiency and effectiveness.
2. **Task Coordination**: Manage and delegate tasks to ensure timely delivery of goals.
3. **Governance**: Oversee governance processes, including proposal management and voting mechanisms.
4. **Communication**: Ensure seamless communication between all agents and community members.
### Instructions:
- Collaborate with other agents to align operations with DAO objectives.
- Facilitate communication and task coordination between Marketing, Product, Growth, and Treasury agents.
- Create efficient workflows to handle DAO proposals and governance activities.
- Suggest tools or platforms to improve operational efficiency.
- Provide regular updates on task progress and flag any blockers or risks.
"""
# Initialize agents
marketing_agent = Agent(
agent_name="Marketing-Agent",
system_prompt=MARKETING_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
product_agent = Agent(
agent_name="Product-Agent",
system_prompt=PRODUCT_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
growth_agent = Agent(
agent_name="Growth-Agent",
system_prompt=GROWTH_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
treasury_agent = Agent(
agent_name="Treasury-Agent",
system_prompt=TREASURY_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
operations_agent = Agent(
agent_name="Operations-Agent",
system_prompt=OPERATIONS_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
agents = [marketing_agent, product_agent, growth_agent, treasury_agent, operations_agent]
class DAOSwarmRunner:
"""
A class to manage and run a swarm of agents in a discussion.
"""
def __init__(self, agents: list, max_loops: int = 5, shared_context: str = "") -> None:
"""
Initializes the DAO Swarm Runner.
Args:
agents (list): A list of agents in the swarm.
max_loops (int, optional): The maximum number of discussion loops between agents. Defaults to 5.
shared_context (str, optional): The shared context for all agents to base their discussion on. Defaults to an empty string.
"""
self.agents = agents
self.max_loops = max_loops
self.shared_context = shared_context
self.discussion_history = []
def run(self, task: str) -> str:
"""
Runs the swarm in a random discussion.
Args:
task (str): The task or context that agents will discuss.
Returns:
str: The final discussion output after all loops.
"""
print(f"Task: {task}")
print("Initializing Random Discussion...")
# Initialize the discussion with the shared context
current_message = f"Task: {task}\nContext: {self.shared_context}"
self.discussion_history.append(current_message)
# Run the agents in a randomized discussion
for loop in range(self.max_loops):
print(f"\n--- Loop {loop + 1}/{self.max_loops} ---")
# Choose a random agent
agent = random.choice(self.agents)
print(f"Agent {agent.agent_name} is responding...")
# Run the agent and get a response
response = agent.run(current_message)
print(f"Agent {agent.agent_name} says:\n{response}\n")
# Append the response to the discussion history
self.discussion_history.append(f"{agent.agent_name}: {response}")
# Update the current message for the next agent
current_message = response
print("\n--- Discussion Complete ---")
return "\n".join(self.discussion_history)
swarm = DAOSwarmRunner(agents=agents, max_loops=1, shared_context="")
# User input for product description
product_description = """
The DAO is focused on decentralized governance for climate action.
It funds projects aimed at reducing carbon emissions and incentivizes community participation with a native token.
"""
# Assign a shared context for all agents
swarm.shared_context = product_description
# Run the swarm
task = """
Analyze the product description and create a collaborative strategy for marketing, product, growth, treasury, and operations. Ensure all recommendations align with the DAO's mission of reducing carbon emissions.
"""
output = swarm.run(task)
# Print the swarm output
print("Collaborative Strategy Output:\n", output)
```

@ -0,0 +1,37 @@
# Swarms Tools Example with HTX + CoinGecko
- `pip3 install swarms swarms-tools`
- Add `OPENAI_API_KEY` to your `.env` file
```python
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms_tools import (
coin_gecko_coin_api,
fetch_htx_data,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
saved_state_path="agent_00.json",
interactive=False,
)
agent.run(
f"Analyze the $swarms token on HTX with data: {fetch_htx_data('swarms')}. Additionally, consider the following CoinGecko data: {coin_gecko_coin_api('swarms')}"
)
```

@ -0,0 +1,43 @@
# Swarms Tools Example with HTX + CoinGecko
- `pip3 install swarms swarms-tools`
- Add `OPENAI_API_KEY` to your `.env` file
- Run `swarms_tools_htx_gecko.py`
- Agent will make a function call to the desired tool
- The tool will be executed and the result will be returned to the agent
- The agent will then analyze the result and return the final output
```python
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms_tools import (
fetch_stock_news,
coin_gecko_coin_api,
fetch_htx_data,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
saved_state_path="agent_00.json",
interactive=False,
tools=[fetch_stock_news, coin_gecko_coin_api, fetch_htx_data],
)
agent.run("Analyze the $swarms token on htx")
```

@ -0,0 +1,42 @@
# Swarms Tools Example with Yahoo Finance
- `pip3 install swarms swarms-tools`
- Add `OPENAI_API_KEY` to your `.env` file
- Run `yahoo_finance_agent.py`
- Agent will make a function call to the desired tool
- The tool will be executed and the result will be returned to the agent
- The agent will then analyze the result and return the final output
```python
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms_tools import (
yahoo_finance_api,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
saved_state_path="agent_00.json",
interactive=False,
tools=[yahoo_finance_api],
)
agent.run("Analyze the latest metrics for nvidia")
# Less than 30 lines of code....
```

@ -0,0 +1,187 @@
# Environment Variable Management & Security
This guide provides comprehensive documentation for managing environment variables and API keys securely in the Swarms framework.
## Overview
Swarms uses environment variables for configuration management and secure credential storage. This approach keeps sensitive information like API keys out of your code and allows for easy configuration changes across different environments.
## Core Environment Variables
### Framework Configuration
- `SWARMS_VERBOSE_GLOBAL`: Controls global logging verbosity
```bash
SWARMS_VERBOSE_GLOBAL="True" # Enable verbose logging
SWARMS_VERBOSE_GLOBAL="False" # Disable verbose logging
```
- `WORKSPACE_DIR`: Defines the workspace directory for agent operations
```bash
WORKSPACE_DIR="agent_workspace"
```
### API Keys
#### Model Provider Keys
1. **OpenAI**
- `OPENAI_API_KEY`: Authentication for GPT models
```bash
OPENAI_API_KEY="your-openai-key"
```
2. **Anthropic**
- `ANTHROPIC_API_KEY`: Authentication for Claude models
```bash
ANTHROPIC_API_KEY="your-anthropic-key"
```
3. **Google**
- `GEMINI_API_KEY`: Authentication for Gemini models
4. **Hugging Face**
- `HUGGINGFACE_TOKEN`: Access to Hugging Face models
5. **Perplexity AI**
- `PPLX_API_KEY`: Access to Perplexity models
6. **AI21**
- `AI21_API_KEY`: Access to AI21 models
#### Tool Provider Keys
1. **Search Tools**
- `BING_BROWSER_API`: Bing search capabilities
- `BRAVESEARCH_API_KEY`: Brave search integration
- `TAVILY_API_KEY`: Tavily search services
- `YOU_API_KEY`: You.com search integration
2. **Analytics & Monitoring**
- `AGENTOPS_API_KEY`: AgentOps monitoring
- `EXA_API_KEY`: Exa.ai services
3. **Browser Automation**
- `MULTION_API_KEY`: Multi-browser automation
## Security Best Practices
### 1. Environment File Management
- Create a `.env` file in your project root
- Never commit `.env` files to version control
- Add `.env` to your `.gitignore`:
```bash
echo ".env" >> .gitignore
```
### 2. API Key Security
- Rotate API keys regularly
- Use different API keys for development and production
- Never hardcode API keys in your code
- Limit API key permissions to only what's necessary
- Monitor API key usage for unusual patterns
### 3. Template Configuration
Create a `.env.example` template without actual values:
```bash
# Required Configuration
OPENAI_API_KEY=""
ANTHROPIC_API_KEY=""
WORKSPACE_DIR="agent_workspace"
# Optional Configuration
SWARMS_VERBOSE_GLOBAL="False"
```
### 4. Loading Environment Variables
```python
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Access variables
workspace_dir = os.getenv("WORKSPACE_DIR")
openai_key = os.getenv("OPENAI_API_KEY")
```
## Environment Setup Guide
1. **Install Dependencies**:
```bash
pip install python-dotenv
```
2. **Create Environment File**:
```bash
cp .env.example .env
```
3. **Configure Variables**:
- Open `.env` in your text editor
- Add your API keys and configuration
- Save the file
4. **Verify Setup**:
```python
import os
from dotenv import load_dotenv
load_dotenv()
assert os.getenv("OPENAI_API_KEY") is not None, "OpenAI API key not found"
```
## Environment-Specific Configuration
### Development
```bash
WORKSPACE_DIR="agent_workspace"
SWARMS_VERBOSE_GLOBAL="True"
```
### Production
```bash
WORKSPACE_DIR="/var/swarms/workspace"
SWARMS_VERBOSE_GLOBAL="False"
```
### Testing
```bash
WORKSPACE_DIR="test_workspace"
SWARMS_VERBOSE_GLOBAL="True"
```
## Troubleshooting
### Common Issues
1. **Environment Variables Not Loading**
- Verify `.env` file exists in project root
- Confirm `load_dotenv()` is called before accessing variables
- Check file permissions
2. **API Key Issues**
- Verify key format is correct
- Ensure key has not expired
- Check for leading/trailing whitespace
3. **Workspace Directory Problems**
- Confirm directory exists
- Verify write permissions
- Check path is absolute when required
## Additional Resources
- [Swarms Documentation](https://docs.swarms.world)
- [Security Best Practices](https://swarms.world/security)
- [API Documentation](https://swarms.world/docs/api)

@ -278,8 +278,6 @@ Use logging to monitor the behavior and performance of your models. The `loguru`
```python ```python
from loguru import logger from loguru import logger
logger.add("file.log", rotation="10 MB")
# Log model interactions # Log model interactions
logger.info("Running task on Anthropic model") logger.info("Running task on Anthropic model")
response = model(task) response = model(task)

@ -0,0 +1,160 @@
# Swarms Products
Welcome to the official documentation for **Swarms**, the first multi-agent orchestration framework enabling seamless collaboration between LLMs and other tools to automate business operations at scale. Below, youll find detailed descriptions of all Swarms products and services to help you get started and unlock the full potential of this groundbreaking platform.
| **Name** | **Description** | **Link** |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|---------------------------|
| **Swarms Marketplace** | A platform to discover, share, and integrate prompts, agents, and tools. | [swarms.world](https://swarms.world) |
| **Swarms Spreadsheet** | A tool for managing and scaling thousands of agent outputs, with results saved to a CSV file for easy analysis. | [swarms.world](https://swarms.world) |
| **Drag n Drop Swarm** | An intuitive interface to visually create and manage swarms of agents through drag-and-drop functionality. | [swarms.world](https://swarms.world) |
| **Swarms API** | An API enabling seamless integration of swarms of agents into your applications and workflows. | [swarms.world](https://swarms.world) |
| **Wallet API** | A secure API for managing transactions and interactions within the Swarms ecosystem. | Coming Soon |
| **Swarm Exchange** | A marketplace for buying and selling prompts, agents, and tools within the Swarms ecosystem. | Coming Soon |
---
## Swarms Marketplace
**Website:** [swarms.world](https://swarms.world)
The Swarms Marketplace is your one-stop destination for discovering, adding, and managing:
- **Prompts:** Access and share production-ready prompts for LLMs.
- **Agents:** Browse pre-built agents tailored for tasks in marketing, finance,
programming, and more.
- **Tools:** Discover cutting-edge tools to enhance agent performance and expand
capabilities.
### Key Features:
- **Rating System:** Evaluate and rate prompts, agents, and tools based on their
effectiveness.
- **Commenting System:** Share feedback and insights with the Swarms community.
- **Coming Soon:** Buy and sell prompts, agents, and tools directly within the
marketplace.
### How to Use:
1. Sign up at [swarms.world](https://swarms.world).
2. Explore the marketplace categories or search for specific solutions.
3. Add your chosen resources to your Swarms account and integrate them into your operations.
---
## Swarms Spreadsheet
**Website:** [swarms.world](https://swarms.world)
The Swarms Spreadsheet is a powerful tool for managing outputs from thousands of agents efficiently. Ideal for businesses needing scalable solutions, it provides:
### Key Features:
- **Batch Task Execution:** Assign tasks to multiple agents simultaneously.
- **CSV Integration:** Automatically save agent outputs to CSV files for easy analysis.
- **Customizable Agents:** Upload single or multiple agents and run repeat tasks with
ease.
- **Metadata Capture:** Leverage built-in Pydantic schemas to record all task details
and results.
### Use Cases:
- **Marketing:** Generate and analyze campaign ideas at scale.
- **Finance:** Process financial models and scenarios quickly.
- **Operations:** Automate repetitive tasks across multiple domains.
### How to Use:
1. Visit [swarms.world](https://swarms.world) and navigate to Swarms Spreadsheet.
2. Upload your agents or create new ones.
3. Run tasks and export results to a CSV file for further use.
---
## Drag-n-Drop Swarm
**Website:** [swarms.world](https://swarms.world)
The Drag-n-Drop Swarm enables non-technical users to create and deploy agent workflows with a simple drag-and-drop interface. Its perfect for:
### Key Features:
- **Visual Workflow Builder:** Design agent interactions without writing code.
- **Pre-Built Templates:** Start quickly with ready-made workflows for common tasks.
- **Intuitive Interface:** Drag, drop, and connect agents to create robust automation
pipelines.
### How to Use:
1. Access the Drag-n-Drop Swarm tool at [swarms.world](https://swarms.world).
2. Drag agents from the library into the workspace.
3. Connect and configure agents to execute your desired workflow.
4. Save and deploy your workflow instantly.
---
## Swarms API
**Website:** [swarms.world](https://swarms.world)
The Swarms API provides developers with the ability to:
### Key Features:
- **Agent Management:** Programmatically create, update, and delete agents.
- **Task Orchestration:** Dynamically assign tasks to agents and monitor their progress.
- **Custom Integration:** Seamlessly integrate Swarms functionality into existing
applications and workflows.
### Getting Started:
1. Sign up for API access at [swarms.world](https://swarms.world).
2. Obtain your API key and authentication credentials.
3. Refer to the API documentation for endpoint details and usage examples.
---
## Wallet API
The Wallet API enables secure and efficient transactions within the Swarms ecosystem, allowing users to:
### Key Features:
- **Seamless Transactions:** Manage payments for prompts, agents, and tools.
- **Secure Wallets:** Store and transfer funds safely within the Swarms platform.
- **Transaction History:** Access detailed logs of all wallet activity.
### Getting Started:
1. Enable your wallet in your Swarms account settings.
2. Use the Wallet API to handle purchases and manage funds.
---
## Swarm Exchange (Coming Soon)
The **Swarm Exchange** will revolutionize the way agents and tools are traded in the Swarms ecosystem. It will feature:
### Key Features:
- **Decentralized Marketplace:** Trade agents and tools securely.
- **Dynamic Pricing:** Leverage demand-based pricing for assets.
- **Global Access:** Participate in the exchange from anywhere.
Stay tuned for updates on the Swarm Exchange launch.
---
## Additional Resources
- **GitHub Repository:** [Swarms Framework](https://github.com/kyegomez/swarms)
- **Documentation:** [Swarms Documentation](https://docs.swarms.world)
- **Support:** Contact us via our [Discord Community](https://discord.gg/swarms).
---
Experience the future of multi-agent collaboration with Swarms. Start building your agentic workflows today!

@ -2,216 +2,216 @@
The `MajorityVoting` module provides a mechanism for performing majority voting among a group of agents. Majority voting is a decision rule that selects the option which has the majority of votes. This is particularly useful in systems where multiple agents provide responses to a query, and the most common response needs to be identified as the final output. The `MajorityVoting` module provides a mechanism for performing majority voting among a group of agents. Majority voting is a decision rule that selects the option which has the majority of votes. This is particularly useful in systems where multiple agents provide responses to a query, and the most common response needs to be identified as the final output.
## Architecture
```mermaid
graph TD
A[MajorityVoting System] --> B[Initialize Agents]
B --> C[Process Task]
C --> D{Execution Mode}
D --> E[Single Task]
D --> F[Batch Tasks]
D --> G[Concurrent Tasks]
D --> H[Async Tasks]
E --> I[Run Agents]
F --> I
G --> I
H --> I
I --> J[Collect Responses]
J --> K[Consensus Analysis]
K --> L{Consensus Agent?}
L -->|Yes| M[Use Consensus Agent]
L -->|No| N[Use Last Agent]
M --> O[Final Output]
N --> O
O --> P[Save Conversation]
```
### Key Concepts ### Key Concepts
- **Majority Voting**: A method to determine the most common response from a set of answers. - **Majority Voting**: A method to determine the most common response from a set of answers.
- **Agents**: Entities (e.g., models, algorithms) that provide responses to tasks or queries. - **Agents**: Entities (e.g., models, algorithms) that provide responses to tasks or queries.
- **Output Parser**: A function that processes the responses from the agents before performing the majority voting. - **Output Parser**: A function that processes the responses from the agents before performing the majority voting.
- **Consensus Agent**: An optional agent that analyzes the responses from all agents to determine the final consensus.
- **Conversation History**: A record of all agent interactions and responses during the voting process.
## Function Definitions ## Class Definition: `MajorityVoting`
### Function: `majority_voting`
Performs majority voting on a list of answers and returns the most common answer. ### Parameters
#### Parameters
| Parameter | Type | Description | | Parameter | Type | Description |
|-----------|----------|------------------------------| |------------------|----------------|-----------------------------------------------------------------------------|
| `answers` | `List[str]` | A list of answers from different agents. | | `name` | `str` | Name of the majority voting system. Default is "MajorityVoting". |
| `description` | `str` | Description of the system. Default is "A majority voting system for agents". |
| `agents` | `List[Agent]` | A list of agents to be used in the majority voting system. |
| `output_parser` | `Callable` | Function to parse agent outputs. Default is `majority_voting` function. |
| `consensus_agent`| `Agent` | Optional agent for analyzing consensus among responses. |
| `autosave` | `bool` | Whether to autosave conversations. Default is `False`. |
| `verbose` | `bool` | Whether to enable verbose logging. Default is `False`. |
| `max_loops` | `int` | Maximum number of voting loops. Default is 1. |
#### Returns ### Methods
| Return Value | Type | Description | #### `run(task: str, correct_answer: str, *args, **kwargs) -> List[Any]`
|--------------|-------|----------------------------------------|
| `answer` | `str` | The most common answer in the list. If the list is empty, returns "I don't know". |
## Class Definitions Runs the majority voting system for a single task.
### Class: `MajorityVoting` **Parameters:**
- `task` (str): The task to be performed by the agents
- `correct_answer` (str): The correct answer for evaluation
- `*args`, `**kwargs`: Additional arguments
Class representing a majority voting system for agents. **Returns:**
- List[Any]: The conversation history as a string, including the majority vote
#### Parameters #### `batch_run(tasks: List[str], *args, **kwargs) -> List[Any]`
| Parameter | Type | Description | Runs multiple tasks in sequence.
|------------------|--------------|-----------------------------------------------------------------------------|
| `agents` | `List[Agent]`| A list of agents to be used in the majority voting system. |
| `output_parser` | `Callable` | A function used to parse the output of the agents. If not provided, the default `majority_voting` function is used. |
| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. |
| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. |
### Method: `__init__` **Parameters:**
- `tasks` (List[str]): List of tasks to be performed
- `*args`, `**kwargs`: Additional arguments
Initializes the `MajorityVoting` system. **Returns:**
- List[Any]: List of majority votes for each task
#### Parameters #### `run_concurrently(tasks: List[str], *args, **kwargs) -> List[Any]`
| Parameter | Type | Description | Runs multiple tasks concurrently using thread pooling.
|------------------|----------------|-----------------------------------------------------------------------------|
| `agents` | `List[Agent]` | A list of agents to be used in the majority voting system. |
| `output_parser` | `Callable` | A function used to parse the output of the agents. Default is the `majority_voting` function. |
| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. |
| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. |
| `args` | `tuple` | Additional positional arguments. |
| `kwargs` | `dict` | Additional keyword arguments. |
### Method: `run` **Parameters:**
- `tasks` (List[str]): List of tasks to be performed
- `*args`, `**kwargs`: Additional arguments
Runs the majority voting system and returns the majority vote. **Returns:**
- List[Any]: List of majority votes for each task
#### Parameters #### `run_async(tasks: List[str], *args, **kwargs) -> List[Any]`
| Parameter | Type | Description | Runs multiple tasks asynchronously using asyncio.
|-----------|------------|------------------------------------------|
| `task` | `str` | The task to be performed by the agents. |
| `args` | `tuple` | Variable length argument list. |
| `kwargs` | `dict` | Arbitrary keyword arguments. |
#### Returns **Parameters:**
- `tasks` (List[str]): List of tasks to be performed
- `*args`, `**kwargs`: Additional arguments
| Return Value | Type | Description | **Returns:**
|--------------|-----------|--------------------------------------| - List[Any]: List of majority votes for each task
| `results` | `List[Any]` | The majority vote. |
## Usage Examples ## Usage Examples
### Example 1: Basic Majority Voting ### Example 1: Basic Single Task Execution with Modern LLMs
```python ```python
from swarms.structs.agent import Agent from swarms import Agent, MajorityVoting
from swarms.structs.majority_voting import MajorityVoting
# Initialize agents # Initialize multiple agents with different specialties
agents = [ agents = [
Agent( Agent(
agent_name="Devin", agent_name="Financial-Analysis-Agent",
system_prompt=( agent_description="Personal finance advisor focused on market analysis",
"Autonomous agent that can interact with humans and other" system_prompt="You are a financial advisor specializing in market analysis and investment opportunities.",
" agents. Be Helpful and Kind. Use the tools provided to" max_loops=1,
" assist the user. Return all code in markdown format." model_name="gpt-4o"
),
llm=llm,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
), ),
Agent( Agent(
agent_name="Codex", agent_name="Risk-Assessment-Agent",
system_prompt=( agent_description="Risk analysis and portfolio management expert",
"An AI coding assistant capable of writing and understanding" system_prompt="You are a risk assessment expert focused on evaluating investment risks and portfolio diversification.",
" code snippets in various programming languages." max_loops=1,
), model_name="gpt-4o"
llm=llm,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
), ),
Agent( Agent(
agent_name="Tabnine", agent_name="Tech-Investment-Agent",
system_prompt=( agent_description="Technology sector investment specialist",
"A code completion AI that provides suggestions for code" system_prompt="You are a technology investment specialist focused on AI, emerging tech, and growth opportunities.",
" completion and code improvements." max_loops=1,
), model_name="gpt-4o"
llm=llm, )
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
),
] ]
# Create MajorityVoting instance
majority_voting = MajorityVoting(agents)
# Run the majority voting system consensus_agent = Agent(
result = majority_voting.run("What is the capital of France?") agent_name="Consensus-Agent",
print(result) # Output: 'Paris' agent_description="Consensus agent focused on analyzing investment advice",
system_prompt="You are a consensus agent focused on analyzing investment advice and providing a final answer.",
max_loops=1,
model_name="gpt-4o"
)
# Create majority voting system
majority_voting = MajorityVoting(
name="Investment-Advisory-System",
description="Multi-agent system for investment advice",
agents=agents,
verbose=True,
consensus_agent=consensus_agent
)
# Run the analysis with majority voting
result = majority_voting.run(
task="Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
correct_answer="" # Optional evaluation metric
)
print(result)
``` ```
### Example 2: Running a Task with Detailed Outputs ## Batch Execution
```python ```python
from swarms.structs.agent import Agent from swarms import Agent, MajorityVoting
from swarms.structs.majority_voting import MajorityVoting
# Initialize agents # Initialize multiple agents with different specialties
agents = [ agents = [
Agent( Agent(
agent_name="Devin", agent_name="Financial-Analysis-Agent",
system_prompt=( agent_description="Personal finance advisor focused on market analysis",
"Autonomous agent that can interact with humans and other" system_prompt="You are a financial advisor specializing in market analysis and investment opportunities.",
" agents. Be Helpful and Kind. Use the tools provided to" max_loops=1,
" assist the user. Return all code in markdown format." model_name="gpt-4o"
),
llm=llm,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
), ),
Agent( Agent(
agent_name="Codex", agent_name="Risk-Assessment-Agent",
system_prompt=( agent_description="Risk analysis and portfolio management expert",
"An AI coding assistant capable of writing and understanding" system_prompt="You are a risk assessment expert focused on evaluating investment risks and portfolio diversification.",
" code snippets in various programming languages." max_loops=1,
), model_name="gpt-4o"
llm=llm,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
), ),
Agent( Agent(
agent_name="Tabnine", agent_name="Tech-Investment-Agent",
system_prompt=( agent_description="Technology sector investment specialist",
"A code completion AI that provides suggestions for code" system_prompt="You are a technology investment specialist focused on AI, emerging tech, and growth opportunities.",
" completion and code improvements." max_loops=1,
), model_name="gpt-4o"
llm=llm, )
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
code_interpreter=True,
),
] ]
# Create MajorityVoting instance
majority_voting = MajorityVoting(agents)
# Run the majority voting system with a different task consensus_agent = Agent(
result = majority_voting.run("Create a new file for a plan to take over the world.") agent_name="Consensus-Agent",
agent_description="Consensus agent focused on analyzing investment advice",
system_prompt="You are a consensus agent focused on analyzing investment advice and providing a final answer.",
max_loops=1,
model_name="gpt-4o"
)
# Create majority voting system
majority_voting = MajorityVoting(
name="Investment-Advisory-System",
description="Multi-agent system for investment advice",
agents=agents,
verbose=True,
consensus_agent=consensus_agent
)
# Run the analysis with majority voting
result = majority_voting.batch_run(
task="Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
correct_answer="" # Optional evaluation metric
)
print(result) print(result)
``` ```

@ -0,0 +1,247 @@
# MatrixSwarm
The `MatrixSwarm` class provides a framework for managing and operating on matrices of AI agents, enabling matrix-like operations similar to linear algebra. This allows for complex agent interactions and parallel processing capabilities.
## Overview
`MatrixSwarm` treats AI agents as elements in a matrix, allowing for operations like addition, multiplication, and transposition. This approach enables sophisticated agent orchestration and parallel processing patterns.
## Installation
```bash
pip3 install -U swarms
```
## Basic Usage
```python
from swarms import Agent
from swarms.matrix import MatrixSwarm
# Create a 2x2 matrix of agents
agents = [
[Agent(agent_name="Agent-0-0"), Agent(agent_name="Agent-0-1")],
[Agent(agent_name="Agent-1-0"), Agent(agent_name="Agent-1-1")]
]
# Initialize the matrix
matrix = MatrixSwarm(agents)
```
## Class Constructor
```python
def __init__(self, agents: List[List[Agent]])
```
### Parameters
- `agents` (`List[List[Agent]]`): A 2D list of Agent instances representing the matrix.
### Raises
- `ValueError`: If the input is not a valid 2D list of Agent instances.
## Methods
### transpose()
Transposes the matrix of agents by swapping rows and columns.
```python
def transpose(self) -> MatrixSwarm
```
#### Returns
- `MatrixSwarm`: A new MatrixSwarm instance with transposed dimensions.
---
### add(other)
Performs element-wise addition of two agent matrices.
```python
def add(self, other: MatrixSwarm) -> MatrixSwarm
```
#### Parameters
- `other` (`MatrixSwarm`): Another MatrixSwarm instance to add.
#### Returns
- `MatrixSwarm`: A new MatrixSwarm resulting from the addition.
#### Raises
- `ValueError`: If matrix dimensions are incompatible.
---
### scalar_multiply(scalar)
Scales the matrix by duplicating agents along rows.
```python
def scalar_multiply(self, scalar: int) -> MatrixSwarm
```
#### Parameters
- `scalar` (`int`): The multiplication factor.
#### Returns
- `MatrixSwarm`: A new MatrixSwarm with scaled dimensions.
---
### multiply(other, inputs)
Performs matrix multiplication (dot product) between two agent matrices.
```python
def multiply(self, other: MatrixSwarm, inputs: List[str]) -> List[List[AgentOutput]]
```
#### Parameters
- `other` (`MatrixSwarm`): The second MatrixSwarm for multiplication.
- `inputs` (`List[str]`): Input queries for the agents.
#### Returns
- `List[List[AgentOutput]]`: Matrix of operation results.
#### Raises
- `ValueError`: If matrix dimensions are incompatible for multiplication.
---
### subtract(other)
Performs element-wise subtraction of two agent matrices.
```python
def subtract(self, other: MatrixSwarm) -> MatrixSwarm
```
#### Parameters
- `other` (`MatrixSwarm`): Another MatrixSwarm to subtract.
#### Returns
- `MatrixSwarm`: A new MatrixSwarm resulting from the subtraction.
---
### identity(size)
Creates an identity matrix of agents.
```python
def identity(self, size: int) -> MatrixSwarm
```
#### Parameters
- `size` (`int`): Size of the identity matrix (NxN).
#### Returns
- `MatrixSwarm`: An identity MatrixSwarm.
---
### determinant()
Computes the determinant of a square agent matrix.
```python
def determinant(self) -> Any
```
#### Returns
- `Any`: The determinant result.
#### Raises
- `ValueError`: If the matrix is not square.
---
### save_to_file(path)
Saves the matrix structure and metadata to a JSON file.
```python
def save_to_file(self, path: str) -> None
```
#### Parameters
- `path` (`str`): File path for saving the matrix data.
## Extended Example
Here's a comprehensive example demonstrating various MatrixSwarm operations:
```python
from swarms import Agent
from swarms.matrix import MatrixSwarm
# Create agents with specific configurations
agents = [
[
Agent(
agent_name=f"Agent-{i}-{j}",
system_prompt="Your system prompt here",
model_name="gpt-4",
max_loops=1,
verbose=True
) for j in range(2)
] for i in range(2)
]
# Initialize matrix
matrix = MatrixSwarm(agents)
# Example operations
transposed = matrix.transpose()
scaled = matrix.scalar_multiply(2)
# Run operations with inputs
inputs = ["Query 1", "Query 2"]
results = matrix.multiply(transposed, inputs)
# Save results
matrix.save_to_file("matrix_results.json")
```
## Output Schema
The `AgentOutput` class defines the structure for operation results:
```python
class AgentOutput(BaseModel):
agent_name: str
input_query: str
output_result: Any
metadata: dict
```
## Best Practices
1. **Initialization**
- Ensure all agents in the matrix are properly configured before initialization
- Validate matrix dimensions for your use case
2. **Operation Performance**
- Consider computational costs for large matrices
- Use appropriate batch sizes for inputs
3. **Error Handling**
- Implement proper error handling for agent operations
- Validate inputs before matrix operations
4. **Resource Management**
- Monitor agent resource usage in large matrices
- Implement proper cleanup procedures
## Limitations
- Matrix operations are constrained by the underlying agent capabilities
- Performance may vary based on agent configuration and complexity
- Resource usage scales with matrix dimensions
## See Also
- [Swarms Documentation](https://github.com/kyegomez/swarms)
- [Agent Class Reference](https://github.com/kyegomez/swarms/tree/main/swarms)

@ -1,5 +1,31 @@
# MixtureOfAgents Class Documentation # MixtureOfAgents Class Documentation
## Architecture Overview
```mermaid
graph TD
A[Input Task] --> B[Initialize MixtureOfAgents]
B --> C[Reliability Check]
C --> D[Layer 1: Parallel Agent Execution]
D --> E[Layer 2: Sequential Processing]
E --> F[Layer 3: Parallel Agent Execution]
F --> G[Final Aggregator Agent]
G --> H[Output Response]
subgraph "Agent Layer Details"
I[Agent 1] --> J[Agent Results]
K[Agent 2] --> J
L[Agent N] --> J
end
subgraph "Processing Flow"
M[Previous Context] --> N[Current Task]
N --> O[Agent Processing]
O --> P[Aggregation]
P --> M
end
```
## Overview ## Overview
The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel → sequential → parallel → final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692). The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel → sequential → parallel → final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692).
@ -130,6 +156,89 @@ history = moe_swarm.run(task="Solve this problem.")
print(history) print(history)
``` ```
### `reliability_check`
```python
def reliability_check(self) -> None:
```
#### Description
Performs validation checks on the Mixture of Agents class to ensure all required components are properly configured. Raises ValueError if any checks fail.
#### Validation Checks:
- Verifies reference agents are provided
- Validates aggregator agent exists
- Checks aggregator system prompt is set
- Ensures layers count is valid (> 0)
### `_get_final_system_prompt`
```python
def _get_final_system_prompt(self, system_prompt: str, results: List[str]) -> str:
```
#### Description
Internal method that constructs a system prompt for subsequent layers by incorporating previous responses.
#### Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `system_prompt` | `str` | The initial system prompt |
| `results` | `List[str]` | List of previous responses |
#### Returns
| Type | Description |
|------|-------------|
| `str` | Combined system prompt with previous responses |
### `run_batched`
```python
def run_batched(self, tasks: List[str]) -> List[str]:
```
#### Description
Processes multiple tasks sequentially, returning a list of responses.
#### Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `tasks` | `List[str]` | List of tasks to process |
#### Returns
| Type | Description |
|------|-------------|
| `List[str]` | List of responses for each task |
### `run_concurrently`
```python
def run_concurrently(self, tasks: List[str]) -> List[str]:
```
#### Description
Processes multiple tasks concurrently using a ThreadPoolExecutor, optimizing for parallel execution.
#### Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `tasks` | `List[str]` | List of tasks to process concurrently |
#### Returns
| Type | Description |
|------|-------------|
| `List[str]` | List of responses for each task |
## Detailed Explanation ## Detailed Explanation
### Initialization ### Initialization
@ -383,3 +492,112 @@ The `MixtureOfAgents` framework provides a solid foundation for further extensio
- **Integration with Other Frameworks**: Seamlessly integrating with other machine learning or data processing frameworks to leverage their capabilities within the swarm architecture. - **Integration with Other Frameworks**: Seamlessly integrating with other machine learning or data processing frameworks to leverage their capabilities within the swarm architecture.
In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains. In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains.
## Additional Examples
### Example 4: Batch Processing
```python
from swarms import MixtureOfAgents, Agent
from swarm_models import OpenAIChat
# Initialize agents as in previous examples
director = Agent(
agent_name="Director",
system_prompt="Directs the tasks for the accountants",
llm=OpenAIChat(),
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="director.json",
)
accountant1 = Agent(
agent_name="Accountant1",
system_prompt="Prepares financial statements",
llm=OpenAIChat(),
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="accountant1.json",
)
accountant2 = Agent(
agent_name="Accountant2",
system_prompt="Audits financial records",
llm=OpenAIChat(),
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="accountant2.json",
)
# Initialize MixtureOfAgents
moe_swarm = MixtureOfAgents(
agents=[director, accountant1, accountant2],
final_agent=director
)
# Process multiple tasks in batch
tasks = [
"Analyze Q1 financial statements",
"Review tax compliance",
"Prepare budget forecast"
]
results = moe_swarm.run_batched(tasks)
for task, result in zip(tasks, results):
print(f"Task: {task}\nResult: {result}\n")
```
### Example 5: Concurrent Processing
```python
from swarms import MixtureOfAgents, Agent
from swarm_models import OpenAIChat
# Initialize agents as before
# ... agent initialization code ...
# Initialize MixtureOfAgents
moe_swarm = MixtureOfAgents(
agents=[director, accountant1, accountant2],
final_agent=director
)
# Process multiple tasks concurrently
tasks = [
"Generate monthly report",
"Audit expense claims",
"Update financial projections",
"Review investment portfolio"
]
results = moe_swarm.run_concurrently(tasks)
for task, result in zip(tasks, results):
print(f"Task: {task}\nResult: {result}\n")
```
## Advanced Features
### Context Preservation
The `MixtureOfAgents` class maintains context between iterations when running multiple loops. Each subsequent iteration receives the context from previous runs, allowing for more sophisticated and context-aware processing.
### Asynchronous Processing
The class implements asynchronous processing internally using Python's `asyncio`, enabling efficient handling of concurrent operations and improved performance for complex workflows.
### Telemetry and Logging
Built-in telemetry and logging capabilities help track agent performance and maintain detailed execution records:
- Automatic logging of agent outputs
- Structured data capture using Pydantic models
- JSON-formatted output options

@ -0,0 +1,361 @@
# ModelRouter Docs
The ModelRouter is an intelligent routing system that automatically selects and executes AI models based on task requirements. It leverages a function-calling architecture to analyze tasks and recommend the optimal model and provider combination for each specific use case.
### Key Features
- Dynamic model selection based on task complexity and requirements
- Multi-provider support (OpenAI, Anthropic, Google, etc.)
- Concurrent and asynchronous execution capabilities
- Batch processing with memory
- Automatic error handling and retries
- Provider-aware routing
- Cost optimization
### Constructor Arguments
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| system_prompt | str | model_router_system_prompt | Custom prompt for guiding model selection behavior |
| max_tokens | int | 4000 | Maximum token limit for model outputs |
| temperature | float | 0.5 | Control parameter for response randomness (0.0-1.0) |
| max_workers | int/str | 10 | Maximum concurrent workers ("auto" for CPU count) |
| api_key | str | None | API key for model access |
| max_loops | int | 1 | Maximum number of refinement iterations |
| *args | Any | None | Additional positional arguments |
| **kwargs | Any | None | Additional keyword arguments |
### Core Methods
#### run(task: str) -> str
Executes a single task through the model router with memory and refinement capabilities.
# Installation
1. Install the latest version of swarms using pip:
```bash
pip3 install -U swarms
```
2. Setup your API Keys in your .env file with the following:
```bash
OPENAI_API_KEY=your_openai_api_key
ANTHROPIC_API_KEY=your_anthropic_api_key
GOOGLE_API_KEY=your_google_api_key
# Add more API keys as needed following litellm format
```
```python
from swarms import ModelRouter
router = ModelRouter()
# Simple text analysis
result = router.run("Analyze the sentiment and key themes in this customer feedback")
# Complex reasoning task
complex_result = router.run("""
Evaluate the following business proposal:
- Initial investment: $500,000
- Projected ROI: 25% annually
- Market size: $2B
- Competition: 3 major players
Provide detailed analysis and recommendations.
""")
```
#### batch_run(tasks: list) -> list
Executes multiple tasks sequentially with result aggregation.
```python
# Multiple analysis tasks
tasks = [
"Analyze Q1 financial performance",
"Predict Q2 market trends",
"Evaluate competitor strategies",
"Generate growth recommendations"
]
results = router.batch_run(tasks)
# Process results
for task, result in zip(tasks, results):
print(f"Task: {task}\nResult: {result}\n")
```
#### concurrent_run(tasks: list) -> list
Parallel execution of multiple tasks using thread pooling.
```python
import asyncio
from typing import List
# Define multiple concurrent tasks
analysis_tasks = [
"Perform technical analysis of AAPL stock",
"Analyze market sentiment from social media",
"Generate trading signals",
"Calculate risk metrics"
]
# Execute tasks concurrently
results = router.concurrent_run(analysis_tasks)
# Process results with error handling
for task, result in zip(analysis_tasks, results):
try:
processed_result = process_analysis(result)
save_to_database(processed_result)
except Exception as e:
log_error(f"Error processing {task}: {str(e)}")
```
#### async_run(task: str) -> asyncio.Task
Asynchronous task execution with coroutine support.
```python
async def process_data_stream():
tasks = []
async for data in data_stream:
task = await router.async_run(f"Process data: {data}")
tasks.append(task)
results = await asyncio.gather(*tasks)
return results
# Usage in async context
async def main():
router = ModelRouter()
results = await process_data_stream()
```
### Advanced Usage Examples
#### Financial Analysis System
```python
from swarms import ModelRouter
from typing import Dict, List
import pandas as pd
class FinancialAnalysisSystem:
def __init__(self):
self.router = ModelRouter(
temperature=0.3, # Lower temperature for more deterministic outputs
max_tokens=8000, # Higher token limit for detailed analysis
max_loops=2 # Allow for refinement iteration
)
def analyze_company_financials(self, financial_data: Dict) -> Dict:
analysis_task = f"""
Perform comprehensive financial analysis:
Financial Metrics:
- Revenue: ${financial_data['revenue']}M
- EBITDA: ${financial_data['ebitda']}M
- Debt/Equity: {financial_data['debt_equity']}
- Working Capital: ${financial_data['working_capital']}M
Required Analysis:
1. Profitability assessment
2. Liquidity analysis
3. Growth projections
4. Risk evaluation
5. Investment recommendations
Provide detailed insights and actionable recommendations.
"""
result = self.router.run(analysis_task)
return self._parse_analysis_result(result)
def _parse_analysis_result(self, result: str) -> Dict:
# Implementation of result parsing
pass
# Usage
analyzer = FinancialAnalysisSystem()
company_data = {
'revenue': 150,
'ebitda': 45,
'debt_equity': 0.8,
'working_capital': 25
}
analysis = analyzer.analyze_company_financials(company_data)
```
#### Healthcare Data Processing Pipeline
```python
from swarms import ModelRouter
import pandas as pd
from typing import List, Dict
class MedicalDataProcessor:
def __init__(self):
self.router = ModelRouter(
max_workers="auto", # Automatic worker scaling
temperature=0.2, # Conservative temperature for medical analysis
system_prompt="""You are a specialized medical data analyzer focused on:
1. Clinical terminology interpretation
2. Patient data analysis
3. Treatment recommendation review
4. Medical research synthesis"""
)
async def process_patient_records(self, records: List[Dict]) -> List[Dict]:
analysis_tasks = []
for record in records:
task = f"""
Analyze patient record:
- Age: {record['age']}
- Symptoms: {', '.join(record['symptoms'])}
- Vital Signs: {record['vitals']}
- Medications: {', '.join(record['medications'])}
- Lab Results: {record['lab_results']}
Provide:
1. Symptom analysis
2. Medication interaction check
3. Lab results interpretation
4. Treatment recommendations
"""
analysis_tasks.append(task)
results = await asyncio.gather(*[
self.router.async_run(task) for task in analysis_tasks
])
return [self._parse_medical_analysis(r) for r in results]
def _parse_medical_analysis(self, analysis: str) -> Dict:
# Implementation of medical analysis parsing
pass
# Usage
async def main():
processor = MedicalDataProcessor()
patient_records = [
{
'age': 45,
'symptoms': ['fever', 'cough', 'fatigue'],
'vitals': {'bp': '120/80', 'temp': '38.5C'},
'medications': ['lisinopril', 'metformin'],
'lab_results': 'WBC: 11,000, CRP: 2.5'
}
# More records...
]
analyses = await processor.process_patient_records(patient_records)
```
#### Natural Language Processing Pipeline
```python
from swarms import ModelRouter
from typing import List, Dict
import asyncio
class NLPPipeline:
def __init__(self):
self.router = ModelRouter(
temperature=0.4,
max_loops=2
)
def process_documents(self, documents: List[str]) -> List[Dict]:
tasks = [self._create_nlp_task(doc) for doc in documents]
results = self.router.concurrent_run(tasks)
return [self._parse_nlp_result(r) for r in results]
def _create_nlp_task(self, document: str) -> str:
return f"""
Perform comprehensive NLP analysis:
Text: {document}
Required Analysis:
1. Entity recognition
2. Sentiment analysis
3. Topic classification
4. Key phrase extraction
5. Intent detection
Provide structured analysis with confidence scores.
"""
def _parse_nlp_result(self, result: str) -> Dict:
# Implementation of NLP result parsing
pass
# Usage
pipeline = NLPPipeline()
documents = [
"We're extremely satisfied with the new product features!",
"The customer service response time needs improvement.",
"Looking to upgrade our subscription plan next month."
]
analyses = pipeline.process_documents(documents)
```
### Available Models and Use Cases
| Model | Provider | Optimal Use Cases | Characteristics |
|-------|----------|-------------------|-----------------|
| gpt-4-turbo | OpenAI | Complex reasoning, Code generation, Creative writing | High accuracy, Latest knowledge cutoff |
| claude-3-opus | Anthropic | Research analysis, Technical documentation, Long-form content | Strong reasoning, Detailed outputs |
| gemini-pro | Google | Multimodal tasks, Code generation, Technical analysis | Fast inference, Strong coding abilities |
| mistral-large | Mistral | General tasks, Content generation, Classification | Open source, Good price/performance |
| deepseek-reasoner | DeepSeek | Mathematical analysis, Logic problems, Scientific computing | Specialized reasoning capabilities |
### Provider Capabilities
| Provider | Strengths | Best For | Integration Notes |
|----------|-----------|-----------|------------------|
| OpenAI | Consistent performance, Strong reasoning | Production systems, Complex tasks | Requires API key setup |
| Anthropic | Safety features, Detailed analysis | Research, Technical writing | Claude-specific formatting |
| Google | Technical tasks, Multimodal support | Code generation, Analysis | Vertex AI integration available |
| Groq | High-speed inference | Real-time applications | Optimized for specific models |
| DeepSeek | Specialized reasoning | Scientific computing | Custom API integration |
| Mistral | Open source flexibility | General applications | Self-hosted options available |
### Performance Optimization Tips
1. Token Management
- Set appropriate max_tokens based on task complexity
- Monitor token usage for cost optimization
- Use streaming for long outputs
2. Concurrency Settings
- Adjust max_workers based on system resources
- Use "auto" workers for optimal CPU utilization
- Monitor memory usage with large batch sizes
3. Temperature Tuning
- Lower (0.1-0.3) for factual/analytical tasks
- Higher (0.7-0.9) for creative tasks
- Mid-range (0.4-0.6) for balanced outputs
4. System Prompts
- Customize for specific domains
- Include relevant context
- Define clear output formats
### Dependencies
- asyncio: Asynchronous I/O support
- concurrent.futures: Thread pool execution
- pydantic: Data validation
- litellm: LLM interface standardization

@ -1,124 +0,0 @@
# MultiProcessWorkflow Documentation
The `MultiProcessWorkflow` class provides a framework for executing tasks concurrently using multiple processes. This class leverages Python's `multiprocessing` module to parallelize task execution, thereby enhancing performance and efficiency. It includes features such as automatic task retry on failure and optional autosaving of results. This documentation details the class, its parameters, attributes, methods, and usage examples.
## Class Definition
### `MultiProcessWorkflow`
## Parameters
| Parameter | Type | Default | Description |
|---------------|---------------------|---------|---------------------------------------------------------------|
| `max_workers` | `int` | `5` | The maximum number of workers to use for parallel processing. |
| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. |
| `agents` | `Sequence[Agent]` | `None` | A list of Agent objects representing the workflow agents. |
| `*args` | `tuple` | | Additional positional arguments. |
| `**kwargs` | `dict` | | Additional keyword arguments. |
## Attributes
| Attribute | Type | Description |
|-----------------|---------------------|--------------------------------------------------------------|
| `max_workers` | `int` | The maximum number of workers to use for parallel processing.|
| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. |
| `agents` | `Sequence[Agent]` | A list of Agent objects representing the workflow agents. |
## Methods
### `execute_task`
#### Description
The `execute_task` method executes a given task and handles any exceptions that may occur during execution. If agents are defined, it will execute the task using each agent in sequence.
#### Usage Example
```python
# Define a task
task = Task()
# Execute the task
workflow = MultiProcessWorkflow()
result = workflow.execute_task(task)
print(result)
```
### `run`
#### Description
The `run` method executes the workflow by running the given task using multiple processes. It manages the task execution using a process pool and collects the results.
#### Usage Example
```python
from swarms.structs.multi_process_workflow import MultiProcessingWorkflow
from swarms.structs.task import Task
from datetime import datetime
from time import sleep
# Define a simple task
def simple_task():
sleep(1)
return datetime.now()
# Create a task object
task = Task(
name="Simple Task",
execute=simple_task,
priority=1,
)
# Create a workflow with the task
workflow = MultiProcessWorkflow(max_workers=3, autosave=True, agents=[agent1, agent2])
# Run the workflow
results = workflow.run(task)
# Print the results
print(results)
```
## Detailed Functionality and Usage
### Initialization
When an instance of `MultiProcessWorkflow` is created, it initializes the following:
- **max_workers**: Sets the maximum number of processes that can run concurrently.
- **autosave**: Determines if the workflow results should be saved automatically.
- **agents**: Accepts a list of agents that will perform the tasks.
### Running Tasks
The `run` method performs the following steps:
1. **Initialize Results and Manager**: Creates a list to store results and a `Manager` to manage shared state between processes.
2. **Initialize Process Pool**: Creates a pool of worker processes.
3. **Submit Tasks**: Iterates over the agents, submitting tasks to the pool for execution and collecting the results.
4. **Wait for Completion**: Waits for all tasks to complete and collects the results.
5. **Return Results**: Returns the list of results from all executed tasks.
### Autosave Task Result
Although the autosave functionality is mentioned in the parameters, it is not explicitly defined in the given code. The implementation for autosaving should be added based on the specific requirements of the application.
## Additional Information and Tips
- **Process Safety**: The use of `Manager` ensures that the list of results is managed safely across multiple processes.
- **Logging**: The class uses the `logger` module to log information about task execution, retries, and failures.
- **Error Handling**: The retry mechanism in the `execute_task` method helps in handling transient errors by attempting to re-execute failed tasks.
## References and Resources
For more information on multiprocessing in Python, refer to the following resources:
- [Python Multiprocessing Documentation](https://docs.python.org/3/library/multiprocessing.html)
- [Python Logging Documentation](https://docs.python.org/3/library/logging.html)
---
By following this detailed documentation, users can effectively understand and utilize the `MultiProcessWorkflow` class to execute tasks concurrently with multiple processes. The examples provided help in demonstrating the practical usage of the class.

@ -1,204 +0,0 @@
# MultiProcessWorkflow Documentation
The `MultiProcessWorkflow` class extends the `BaseWorkflow` to support parallel processing using multiple workers. This class is designed to efficiently execute tasks concurrently, leveraging the power of multi-processing to enhance performance and scalability.
### Key Concepts
- **Parallel Processing**: Utilizing multiple workers to execute tasks concurrently.
- **Workflow Management**: Handling the execution of tasks in a structured workflow.
- **Agents**: Entities responsible for executing tasks.
## Attributes
### Arguments
| Argument | Type | Default | Description |
|--------------|---------------------|---------|-------------|
| `max_workers`| `int` | `5` | The maximum number of workers to use for parallel processing. |
| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. |
| `agents` | `Sequence[Agent]` | `None` | A list of agents participating in the workflow. |
| `*args` | | | Additional positional arguments. |
| `**kwargs` | | | Additional keyword arguments. |
### Attributes
| Attribute | Type | Description |
|--------------|---------------------|-------------|
| `max_workers`| `int` | The maximum number of workers to use for parallel processing. |
| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. |
| `agents` | `Sequence[Agent]` | A list of agents participating in the workflow. |
## Methods
### __init__
Initializes the `MultiProcessWorkflow` with the given parameters.
**Examples:**
```python
from swarms.structs.agent import Agent
from swarms.structs.task import Task
from swarms.structs.multi_process_workflow import MultiProcessWorkflow
agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
tasks = [Task(name="Task 1", execute=lambda: "result1"), Task(name="Task 2", execute=lambda: "result2")]
workflow = MultiProcessWorkflow(max_workers=3, agents=agents, tasks=tasks)
```
### execute_task
Executes a task and handles exceptions.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The task to execute. |
| `*args` | | Additional positional arguments for the task execution. |
| `**kwargs`| | Additional keyword arguments for the task execution. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Any` | The result of the task execution. |
**Examples:**
```python
result = workflow.execute_task(task="Sample Task")
print(result)
```
### run
Runs the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The task to run. |
| `*args` | | Additional positional arguments for the task execution. |
| `**kwargs`| | Additional keyword arguments for the task execution. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `List[Any]` | The results of all executed tasks. |
**Examples:**
```python
results = workflow.run(task="Sample Task")
print(results)
```
### Additional Examples
#### Example 1: Simple Task Execution
```python
from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat
from datetime import datetime
from time import sleep
import os
from dotenv import load_dotenv
# Load the environment variables
load_dotenv()
# Define a function to be used as the action
def my_action():
print("Action executed")
# Define a function to be used as the condition
def my_condition():
print("Condition checked")
return True
# Create an agent
agent = Agent(
llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
max_loops=1,
dashboard=False,
)
# Create a task
task = Task(
description=(
"Generate a report on the top 3 biggest expenses for small"
" businesses and how businesses can save 20%"
),
agent=agent,
)
# Create a workflow with the task
workflow = MultiProcessWorkflow(tasks=[task])
# Run the workflow
results = workflow.run(task)
print(results)
```
#### Example 2: Workflow with Multiple Agents
```python
from swarms import Agent, Task, MultiProcessWorkflow
# Define tasks
def task1():
return "Task 1 result"
def task2():
return "Task 2 result"
# Create agents
agent1 = Agent(name="Agent 1", llm=OpenAIChat())
agent2 = Agent(name="Agent 2", llm=OpenAIChat())
# Create tasks
task_1 = Task(name="Task 1", execute=task1)
task_2 = Task(name="Task 2", execute=task2)
# Create a workflow
workflow = MultiProcessWorkflow(agents=[agent1, agent2], tasks=[task_1, task_2])
# Run the workflow
results = workflow.run(task="Example Task")
print(results)
```
#### Example 3: Customizing Max Workers
```python
from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat
# Define a task
def example_task():
return "Task result"
# Create an agent
agent = Agent(name="Agent 1", llm=OpenAIChat())
# Create a task
task = Task(name="Example Task", execute=example_task)
# Create a workflow with custom max workers
workflow = MultiProcessWorkflow(max_workers=10, agents=[agent], tasks=[task])
# Run the workflow
results = workflow.run(task="Example Task")
print(results)
```
## Summary
The `MultiProcessWorkflow` class provides a powerful framework for managing and executing tasks using multiple workers. With support for parallel processing, customizable workflows, and detailed logging, it is an ideal tool for complex task execution scenarios. This class enhances performance and scalability, making it suitable for a wide range of applications that require efficient task management.

@ -33,7 +33,7 @@ Main class for routing tasks to different swarm types.
| `flow` | str | The flow of the swarm. | | `flow` | str | The flow of the swarm. |
| `return_json` | bool | Flag to enable/disable returning the result in JSON format. | | `return_json` | bool | Flag to enable/disable returning the result in JSON format. |
| `auto_generate_prompts` | bool | Flag to enable/disable auto generation of prompts. | | `auto_generate_prompts` | bool | Flag to enable/disable auto generation of prompts. |
| `swarm` | Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow] | Instantiated swarm object. | | `swarm` | Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow, GroupChat, MultiAgentRouter] | Instantiated swarm object. |
| `logs` | List[SwarmLog] | List of log entries captured during operations. | | `logs` | List[SwarmLog] | List of log entries captured during operations. |
#### Methods: #### Methods:
@ -271,6 +271,39 @@ result = concurrent_router.run("Conduct a comprehensive market analysis for Prod
``` ```
### GroupChat
Use Case: Simulating a group chat with multiple agents.
```python
group_chat_router = SwarmRouter(
name="GroupChat",
description="Simulate a group chat with multiple agents",
max_loops=1,
agents=[financial_analyst, market_researcher, competitor_analyst],
swarm_type="GroupChat"
)
result = group_chat_router.run("Conduct a comprehensive market analysis for Product X")
```
### MultiAgentRouter
Use Case: Simulating a group chat with multiple agents.
```python
multi_agent_router = SwarmRouter(
name="MultiAgentRouter",
description="Simulate a group chat with multiple agents",
max_loops=1,
agents=[financial_analyst, market_researcher, competitor_analyst],
swarm_type="MultiAgentRouter"
)
result = multi_agent_router.run("Conduct a comprehensive market analysis for Product X")
```
### Auto Select (Experimental) ### Auto Select (Experimental)
Autonomously selects the right swarm by conducting vector search on your input task or name or description or all 3. Autonomously selects the right swarm by conducting vector search on your input task or name or description or all 3.

@ -0,0 +1,497 @@
# swarms Wallet API Documentation
This documentation covers the swarms Wallet API routes for managing wallets, sending tokens, and checking transactions in the swarms Platform.
## Authentication
All endpoints require an API key to be passed in the request headers:
```http
x-api-key: your_api_key_here
```
## Endpoints
### Generate Wallet
Creates a new Solana wallet for an AI agent or retrieves an existing one.
```http
POST https://swarms.world/api/solana/generate-wallet
```
**Response**
```json
{
"success": true,
"data": {
"public_key": "string",
"wallet_type": "solana",
"swarms_token_address": "string"
},
"code": "SUCCESS_001"
}
```
### Send Tokens
Sends swarms tokens with automatic tax handling.
```http
POST https://swarms.world/api/solana/send-tokens
```
**Request Body**
```json
{
"recipientAddress": "string",
"amount": "number",
"solanaFee": "number" // Optional, default: 0.009
}
```
**Response**
```json
{
"success": true,
"data": {
"signature": "string",
"details": {
"sender": "string",
"recipient": "string",
"daoAddress": "string",
"requestedSendAmount": "number",
"totalNeededFromAccount": "number",
"accountTax": "number",
"receivedTax": "number",
"recipientReceives": "number",
"taxBreakdown": "string",
"computeUnits": "number",
"priorityFee": "number"
}
},
"code": "SUCCESS_001"
}
```
### Check Receipt
Verifies token receipt and checks balances.
```http
GET https://swarms.world/api/solana/check-receipt?amount={amount}
```
**Response**
```json
{
"success": true,
"data": {
"solana_address": "string",
"received": "number",
"expected": "number",
"matches": "boolean",
"balances": {
"sol": "number",
"swarms": "number"
},
"swarms_address": "string"
},
"code": "SUCCESS_001"
}
```
### Get Metrics
Retrieves transaction metrics and history.
```http
GET https://swarms.world/api/solana/get-metrics
```
**Query Parameters**
- `page`: Page number (default: 1)
- `limit`: Items per page (default: 10, max: 100)
- `startDate`: Filter start date
- `endDate`: Filter end date
- `status`: Transaction status filter
- `type`: Transaction type filter
**Response**
```json
{
"success": true,
"data": {
"transactions": [{
"id": "string",
"agent_id": "string",
"transaction_hash": "string",
"amount": "number",
"recipient": "string",
"status": "string",
"transaction_type": "string",
"created_at": "string"
}],
"pagination": {
"currentPage": "number",
"totalPages": "number",
"totalItems": "number",
"itemsPerPage": "number",
"hasMore": "boolean"
},
"metrics": {
"totalTransactions": "number",
"totalAmountSent": "number",
"totalSuccessfulTransactions": "number",
"totalFailedTransactions": "number"
}
},
"code": "SUCCESS_001"
}
```
## Error Codes
| Code | Description |
|------|-------------|
| AUTH_001 | Missing API key |
| AUTH_002 | Invalid API key |
| BAL_001 | Insufficient SOL balance |
| BAL_002 | Insufficient token balance |
| WAL_001 | Wallet not found |
| REQ_001 | Missing required parameters |
| DB_001 | Database error |
| ERR_001 | Internal server error |
## Transaction Details
- Default SOL fee: 0.009 SOL
- swarms token tax: 2% from sender + 2% from sent amount
- All taxes are sent to the DAO treasury
- Token accounts are automatically created for new recipients
- Transactions use 'processed' commitment level
## Implementation Notes
- All token amounts should be provided in their natural units (not in lamports/raw units)
- SOL balances are returned in SOL (not lamports)
- Token accounts are automatically created for recipients if they don't exist
- All transactions include automatic tax handling for the DAO treasury
- Compute budget and priority fees are automatically managed for optimal transaction processing
## Examples
Below are code examples in several languages that demonstrate how to use the swarms Wallet API endpoints. In these examples, replace `your_api_key_here` with your actual API key, and update any parameters as needed.
---
## Python (Using `requests`)
First, install the library if you havent already:
```bash
pip install requests
```
**Example: Generate Wallet**
```python
import os
import requests
API_KEY = os.getenv("SWARMS_API_KEY")
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
url = "https://swarms.world/api/solana/generate-wallet"
response = requests.post(url, headers=headers)
if response.status_code == 200:
data = response.json()
print("Wallet generated:", data)
else:
print("Error:", response.text)
```
**Example: Send Tokens**
```python
import requests
import json
import os
API_KEY = os.getenv("SWARMS_API_KEY")
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
url = "https://swarms.world/api/solana/send-tokens"
payload = {
"recipientAddress": "recipient_public_key",
"amount": 100, # Example token amount
# "solanaFee": 0.009 # Optional: use default if not provided
}
response = requests.post(url, headers=headers, data=json.dumps(payload))
if response.status_code == 200:
data = response.json()
print("Tokens sent:", data)
else:
print("Error:", response.text)
```
**Example: Check Receipt**
```python
import requests
import os
API_KEY = os.getenv("SWARMS_API_KEY")
headers = {
"x-api-key": API_KEY
}
amount = 100 # The amount you expect to be received
url = f"https://swarms.world/api/solana/check-receipt?amount={amount}"
response = requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
print("Receipt checked:", data)
else:
print("Error:", response.text)
```
**Example: Get Metrics**
```python
import requests
import os
API_KEY = os.getenv("SWARMS_API_KEY")
headers = {
"x-api-key": API_KEY
}
params = {
"page": 1,
"limit": 10,
# Optionally include startDate, endDate, status, type if needed.
}
url = "https://swarms.world/api/solana/get-metrics"
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
data = response.json()
print("Metrics:", data)
else:
print("Error:", response.text)
```
---
## Node.js (Using `axios`)
First, install axios:
```bash
npm install axios
```
**Example: Generate Wallet**
```javascript
const axios = require('axios');
const API_KEY = 'your_api_key_here';
const headers = {
'x-api-key': API_KEY,
'Content-Type': 'application/json'
};
axios.post('https://swarms.world/api/solana/generate-wallet', {}, { headers })
.then(response => {
console.log('Wallet generated:', response.data);
})
.catch(error => {
console.error('Error:', error.response ? error.response.data : error.message);
});
```
**Example: Send Tokens**
```javascript
const axios = require('axios');
const API_KEY = 'your_api_key_here';
const headers = {
'x-api-key': API_KEY,
'Content-Type': 'application/json'
};
const payload = {
recipientAddress: 'recipient_public_key',
amount: 100, // token amount
// solanaFee: 0.009 // Optional
};
axios.post('https://swarms.world/api/solana/send-tokens', payload, { headers })
.then(response => {
console.log('Tokens sent:', response.data);
})
.catch(error => {
console.error('Error:', error.response ? error.response.data : error.message);
});
```
**Example: Check Receipt**
```javascript
const axios = require('axios');
const API_KEY = 'your_api_key_here';
const headers = { 'x-api-key': API_KEY };
const amount = 100;
const url = `https://swarms.world/api/solana/check-receipt?amount=${amount}`;
axios.get(url, { headers })
.then(response => {
console.log('Receipt:', response.data);
})
.catch(error => {
console.error('Error:', error.response ? error.response.data : error.message);
});
```
**Example: Get Metrics**
```javascript
const axios = require('axios');
const API_KEY = 'your_api_key_here';
const headers = { 'x-api-key': API_KEY };
const params = {
page: 1,
limit: 10,
// startDate: '2025-01-01', endDate: '2025-01-31', status: 'completed', type: 'send'
};
axios.get('https://swarms.world/api/solana/get-metrics', { headers, params })
.then(response => {
console.log('Metrics:', response.data);
})
.catch(error => {
console.error('Error:', error.response ? error.response.data : error.message);
});
```
---
## cURL (Command Line)
**Example: Generate Wallet**
```bash
curl -X POST https://swarms.world/api/solana/generate-wallet \
-H "x-api-key: your_api_key_here" \
-H "Content-Type: application/json"
```
**Example: Send Tokens**
```bash
curl -X POST https://swarms.world/api/solana/send-tokens \
-H "x-api-key: your_api_key_here" \
-H "Content-Type: application/json" \
-d '{
"recipientAddress": "recipient_public_key",
"amount": 100,
"solanaFee": 0.009
}'
```
**Example: Check Receipt**
```bash
curl -X GET "https://swarms.world/api/solana/check-receipt?amount=100" \
-H "x-api-key: your_api_key_here"
```
**Example: Get Metrics**
```bash
curl -X GET "https://swarms.world/api/solana/get-metrics?page=1&limit=10" \
-H "x-api-key: your_api_key_here"
```
---
## Other Languages
### Ruby (Using `net/http`)
**Example: Generate Wallet**
```ruby
require 'net/http'
require 'uri'
require 'json'
uri = URI.parse("https://swarms.world/api/solana/generate-wallet")
request = Net::HTTP::Post.new(uri)
request["x-api-key"] = "your_api_key_here"
request["Content-Type"] = "application/json"
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http|
http.request(request)
end
puts JSON.parse(response.body)
```
### Java (Using `HttpURLConnection`)
**Example: Generate Wallet**
```java
import java.io.*;
import java.net.*;
import javax.net.ssl.HttpsURLConnection;
public class SwarmsApiExample {
public static void main(String[] args) {
try {
URL url = new URL("https://swarms.world/api/solana/generate-wallet");
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setRequestMethod("POST");
conn.setRequestProperty("x-api-key", "your_api_key_here");
conn.setRequestProperty("Content-Type", "application/json");
conn.setDoOutput(true);
// If you need to send a request body, write to the output stream:
// try(OutputStream os = conn.getOutputStream()) {
// byte[] input = "{}".getBytes("utf-8");
// os.write(input, 0, input.length);
// }
BufferedReader br = new BufferedReader(new InputStreamReader(conn.getInputStream(), "utf-8"));
StringBuilder response = new StringBuilder();
String responseLine = null;
while ((responseLine = br.readLine()) != null) {
response.append(responseLine.trim());
}
System.out.println("Response: " + response.toString());
} catch (Exception e) {
e.printStackTrace();
}
}
}
```
---
These examples illustrate how to authenticate using the API key and perform various operations such as generating a wallet, sending tokens, checking receipts, and retrieving metrics. You can adapt these examples to other languages or frameworks as needed. Enjoy integrating with the swarms Wallet API!

@ -0,0 +1,254 @@
# Hosting Agents on Google Cloud Run
This documentation provides a highly detailed, step-by-step guide to hosting your agents using Google Cloud Run. It uses a well-structured project setup that includes a Dockerfile at the root level, a folder dedicated to your API file, and a `requirements.txt` file to manage all dependencies. This guide will ensure your deployment is scalable, efficient, and easy to maintain.
---
## **Project Structure**
Your project directory should adhere to the following structure to ensure compatibility and ease of deployment:
```
.
├── Dockerfile
├── requirements.txt
└── api/
└── api.py
```
Each component serves a specific purpose in the deployment pipeline, ensuring modularity and maintainability.
---
## **Step 1: Prerequisites**
Before you begin, make sure to satisfy the following prerequisites to avoid issues during deployment:
1. **Google Cloud Account**:
- Create a Google Cloud account at [Google Cloud Console](https://console.cloud.google.com/).
- Enable billing for your project. Billing is necessary for accessing Cloud Run services.
2. **Install Google Cloud SDK**:
- Follow the [installation guide](https://cloud.google.com/sdk/docs/install) to set up the Google Cloud SDK on your local machine.
3. **Install Docker**:
- Download and install Docker by following the [official Docker installation guide](https://docs.docker.com/get-docker/). Docker is crucial for containerizing your application.
4. **Create a Google Cloud Project**:
- Navigate to the Google Cloud Console and create a new project. Assign it a meaningful name and note the **Project ID**, as it will be used throughout this guide.
5. **Enable Required APIs**:
- Visit the [API Library](https://console.cloud.google.com/apis/library) and enable the following APIs:
- Cloud Run API
- Cloud Build API
- Artifact Registry API
- These APIs are essential for deploying and managing your application in Cloud Run.
---
## **Step 2: Creating the Files**
### 1. **`api/api.py`**
This is the main Python script where you define your Swarms agents and expose an API endpoint for interacting with them. Heres an example:
```python
from flask import Flask, request, jsonify
from swarms import Agent # Assuming `swarms` is the framework you're using
app = Flask(__name__)
# Example Swarm agent
agent = Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini",
max_loops="auto",
interactive=True,
streaming_on=True,
)
@app.route('/run-agent', methods=['POST'])
def run_agent():
data = request.json
task = data.get('task', '')
result = agent.run(task)
return jsonify({"result": result})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
```
This example sets up a basic API that listens for POST requests, processes a task using a Swarm agent, and returns the result as a JSON response. Customize it based on your agents functionality.
---
### 2. **`requirements.txt`**
This file lists all Python dependencies required for your project. Example:
```
flask
swarms
# add any other dependencies here
```
Be sure to include any additional libraries your agents rely on. Keeping this file up to date ensures smooth dependency management during deployment.
---
### 3. **`Dockerfile`**
The Dockerfile specifies how your application is containerized. Below is a sample Dockerfile for your setup:
```dockerfile
# Use an official Python runtime as the base image
FROM python:3.10-slim
# Set the working directory
WORKDIR /app
# Copy requirements.txt and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy the application code
COPY api/ ./api/
# Expose port 8080 (Cloud Run default port)
EXPOSE 8080
# Run the application
CMD ["python", "api/api.py"]
```
This Dockerfile ensures your application is containerized with minimal overhead, focusing on slim images for efficiency.
---
## **Step 3: Deploying to Google Cloud Run**
### 1. **Authenticate with Google Cloud**
Log in to your Google Cloud account by running:
```bash
gcloud auth login
```
Set the active project to match your deployment target:
```bash
gcloud config set project [PROJECT_ID]
```
Replace `[PROJECT_ID]` with your actual Project ID.
---
### 2. **Build the Docker Image**
Use Google Cloud's Artifact Registry to store and manage your Docker image. Follow these steps:
1. **Create a Repository**:
```bash
gcloud artifacts repositories create my-repo --repository-format=Docker --location=us-central1
```
2. **Authenticate Docker with Google Cloud**:
```bash
gcloud auth configure-docker us-central1-docker.pkg.dev
```
3. **Build and Tag the Image**:
```bash
docker build -t us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image .
```
4. **Push the Image**:
```bash
docker push us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image
```
---
### 3. **Deploy to Cloud Run**
Deploy the application to Cloud Run with the following command:
```bash
gcloud run deploy my-agent-service \
--image us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image \
--platform managed \
--region us-central1 \
--allow-unauthenticated
```
Key points:
- Replace `[PROJECT_ID]` with your actual Project ID.
- The `--allow-unauthenticated` flag makes the service publicly accessible. Exclude it to restrict access.
---
## **Step 4: Testing the Deployment**
Once the deployment is complete, test the service:
1. Note the URL provided by Cloud Run.
2. Use `curl` or Postman to send a request. Example:
```bash
curl -X POST [CLOUD_RUN_URL]/run-agent \
-H "Content-Type: application/json" \
-d '{"task": "example task"}'
```
This tests whether your agent processes the task correctly and returns the expected output.
---
## **Step 5: Updating the Service**
To apply changes to your application:
1. Edit the necessary files.
2. Rebuild and push the updated Docker image:
```bash
docker build -t us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image .
docker push us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image
```
3. Redeploy the service:
```bash
gcloud run deploy my-agent-service \
--image us-central1-docker.pkg.dev/[PROJECT_ID]/my-repo/my-image
```
This ensures the latest version of your application is live.
---
## **Troubleshooting**
- **Permission Errors**:
Ensure your account has roles like Cloud Run Admin and Artifact Registry Reader.
- **Port Issues**:
Confirm the application listens on port 8080. Cloud Run expects this port by default.
- **Logs**:
Use the Google Cloud Console or CLI to review logs for debugging:
```bash
gcloud logs read --project [PROJECT_ID]
```
---
## **Conclusion**
By following this comprehensive guide, you can deploy your agents on Google Cloud Run with ease. This method leverages Docker for containerization and Google Cloud services for seamless scalability and management. With a robust setup like this, you can focus on enhancing your agents capabilities rather than worrying about deployment challenges.

@ -0,0 +1,204 @@
# CreateNow API Documentation
Welcome to the CreateNow API documentation! This API enables developers to generate AI-powered content, including images, music, videos, and speech, using natural language prompts. Use the endpoints below to start generating content.
---
## **1. Claim Your API Key**
To use the API, you must first claim your API key. Visit the following link to create an account and get your API key:
### **Claim Your Key**
```
https://createnow.xyz/account
```
After signing up, your API key will be available in your account dashboard. Keep it secure and include it in your API requests as a Bearer token.
---
## **2. Generation Endpoint**
The generation endpoint allows you to create AI-generated content using natural language prompts.
### **Endpoint**
```
POST https://createnow.xyz/api/v1/generate
```
### **Authentication**
Include a Bearer token in the `Authorization` header for all requests:
```
Authorization: Bearer YOUR_API_KEY
```
### **Basic Usage**
The simplest way to use the API is to send a prompt. The system will automatically detect the appropriate media type.
#### **Example Request (Basic)**
```json
{
"prompt": "a beautiful sunset over the ocean"
}
```
### **Advanced Options**
You can specify additional parameters for finer control over the output.
#### **Parameters**
| Parameter | Type | Description | Default |
|----------------|-----------|---------------------------------------------------------------------------------------------------|--------------|
| `prompt` | `string` | The natural language description of the content to generate. | Required |
| `type` | `string` | The type of content to generate (`image`, `music`, `video`, `speech`). | Auto-detect |
| `count` | `integer` | The number of outputs to generate (1-4). | 1 |
| `duration` | `integer` | Duration of audio or video content in seconds (applicable to `music` and `speech`). | N/A |
#### **Example Request (Advanced)**
```json
{
"prompt": "create an upbeat jazz melody",
"type": "music",
"count": 2,
"duration": 30
}
```
### **Response Format**
#### **Success Response**
```json
{
"success": true,
"outputs": [
{
"url": "https://createnow.xyz/storage/image1.png",
"creation_id": "12345",
"share_url": "https://createnow.xyz/share/12345"
}
],
"mediaType": "image",
"confidence": 0.95,
"detected": true
}
```
#### **Error Response**
```json
{
"error": "Invalid API Key",
"status": 401
}
```
---
## **3. Examples in Multiple Languages**
### **Python**
```python
import requests
url = "https://createnow.xyz/api/v1/generate"
headers = {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
}
payload = {
"prompt": "a futuristic cityscape at night",
"type": "image",
"count": 2
}
response = requests.post(url, json=payload, headers=headers)
print(response.json())
```
### **Node.js**
```javascript
const axios = require('axios');
const url = "https://createnow.xyz/api/v1/generate";
const headers = {
Authorization: "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
};
const payload = {
prompt: "a futuristic cityscape at night",
type: "image",
count: 2
};
axios.post(url, payload, { headers })
.then(response => {
console.log(response.data);
})
.catch(error => {
console.error(error.response.data);
});
```
### **cURL**
```bash
curl -X POST https://createnow.xyz/api/v1/generate \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"prompt": "a futuristic cityscape at night",
"type": "image",
"count": 2
}'
```
### **Java**
```java
import java.net.HttpURLConnection;
import java.net.URL;
import java.io.OutputStream;
public class CreateNowAPI {
public static void main(String[] args) throws Exception {
URL url = new URL("https://createnow.xyz/api/v1/generate");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("POST");
conn.setRequestProperty("Authorization", "Bearer YOUR_API_KEY");
conn.setRequestProperty("Content-Type", "application/json");
conn.setDoOutput(true);
String jsonPayload = "{" +
"\"prompt\": \"a futuristic cityscape at night\", " +
"\"type\": \"image\", " +
"\"count\": 2}";
OutputStream os = conn.getOutputStream();
os.write(jsonPayload.getBytes());
os.flush();
int responseCode = conn.getResponseCode();
System.out.println("Response Code: " + responseCode);
}
}
```
---
## **4. Error Codes**
| Status Code | Meaning | Possible Causes |
|-------------|----------------------------------|----------------------------------------|
| 400 | Bad Request | Invalid parameters or payload. |
| 401 | Unauthorized | Invalid or missing API key. |
| 402 | Payment Required | Insufficient credits for the request. |
| 500 | Internal Server Error | Issue on the server side. |
---
## **5. Notes and Limitations**
- **Maximum Prompt Length:** 1000 characters.
- **Maximum Outputs per Request:** 4.
- **Supported Media Types:** `image`, `music`, `video`, `speech`.
- **Content Shareability:** Every output includes a unique creation ID and shareable URL.
- **Auto-Detection:** Uses advanced natural language processing to determine the most appropriate media type.
---
For further support or questions, please contact our support team at [support@createnow.xyz](mailto:support@createnow.xyz).

@ -0,0 +1,369 @@
# Swarms Cloud API Client Documentation
## Overview
The Swarms Cloud API Client is a production-grade Python library for interacting with the Swarms Cloud Agent API. It provides a comprehensive interface for managing, executing, and monitoring cloud-based agents.
## Installation
```bash
pip install swarms-cloud
```
## Quick Start
```python
from swarms_cloud import SwarmCloudAPI, AgentCreate
# Initialize the client
client = SwarmCloudAPI(
base_url="https://swarmcloud-285321057562.us-central1.run.app",
api_key="your_api_key_here"
)
# Create an agent
agent_data = AgentCreate(
name="TranslateAgent",
description="Translates text between languages",
code="""
def main(request, store):
text = request.payload.get('text', '')
return f'Translated: {text}'
""",
requirements="requests==2.25.1",
envs="DEBUG=True"
)
new_agent = client.create_agent(agent_data)
print(f"Created agent with ID: {new_agent.id}")
```
## Client Configuration
### Constructor Parameters
| Parameter | Type | Required | Default | Description |
|-----------|------|----------|----------|-------------|
| base_url | str | No | https://swarmcloud-285321057562.us-central1.run.app | The base URL of the SwarmCloud API |
| api_key | str | Yes | None | Your SwarmCloud API key |
| timeout | float | No | 10.0 | Request timeout in seconds |
## Data Models
### AgentCreate
Model for creating new agents.
| Field | Type | Required | Default | Description |
|-------|------|----------|----------|-------------|
| name | str | Yes | - | Name of the agent |
| description | str | No | None | Description of the agent's purpose |
| code | str | Yes | - | Python code that defines the agent's behavior |
| requirements | str | No | None | Python package requirements (pip format) |
| envs | str | No | None | Environment variables for the agent |
| autoscaling | bool | No | False | Enable/disable concurrent execution scaling |
### AgentUpdate
Model for updating existing agents.
| Field | Type | Required | Default | Description |
|-------|------|----------|----------|-------------|
| name | str | No | None | Updated name of the agent |
| description | str | No | None | Updated description |
| code | str | No | None | Updated Python code |
| requirements | str | No | None | Updated package requirements |
| autoscaling | bool | No | None | Updated autoscaling setting |
## API Methods
### List Agents
Retrieve all available agents.
```python
agents = client.list_agents()
for agent in agents:
print(f"Agent: {agent.name} (ID: {agent.id})")
```
**Returns**: List[AgentOut]
### Create Agent
Create a new agent with the specified configuration.
```python
agent_data = AgentCreate(
name="DataProcessor",
description="Processes incoming data streams",
code="""
def main(request, store):
data = request.payload.get('data', [])
return {'processed': len(data)}
""",
requirements="pandas==1.4.0\nnumpy==1.21.0",
envs="PROCESSING_MODE=fast",
autoscaling=True
)
new_agent = client.create_agent(agent_data)
```
**Returns**: AgentOut
### Get Agent
Retrieve details of a specific agent.
```python
agent = client.get_agent("agent_id_here")
print(f"Agent details: {agent}")
```
**Parameters**:
- agent_id (str): The unique identifier of the agent
**Returns**: AgentOut
### Update Agent
Update an existing agent's configuration.
```python
update_data = AgentUpdate(
name="UpdatedProcessor",
description="Enhanced data processing capabilities",
code="def main(request, store):\n return {'status': 'updated'}"
)
updated_agent = client.update_agent("agent_id_here", update_data)
```
**Parameters**:
- agent_id (str): The unique identifier of the agent
- update (AgentUpdate): The update data
**Returns**: AgentOut
### Execute Agent
Manually execute an agent with optional payload data.
```python
# Execute with payload
result = client.execute_agent(
"agent_id_here",
payload={"text": "Hello, World!"}
)
# Execute without payload
result = client.execute_agent("agent_id_here")
```
**Parameters**:
- agent_id (str): The unique identifier of the agent
- payload (Optional[Dict[str, Any]]): Execution payload data
**Returns**: Dict[str, Any]
### Get Agent History
Retrieve the execution history and logs for an agent.
```python
history = client.get_agent_history("agent_id_here")
for execution in history.executions:
print(f"[{execution.timestamp}] {execution.log}")
```
**Parameters**:
- agent_id (str): The unique identifier of the agent
**Returns**: AgentExecutionHistory
### Batch Execute Agents
Execute multiple agents simultaneously with the same payload.
```python
# Get list of agents
agents = client.list_agents()
# Execute batch with payload
results = client.batch_execute_agents(
agents=agents[:3], # Execute first three agents
payload={"data": "test"}
)
print(f"Batch execution results: {results}")
```
**Parameters**:
- agents (List[AgentOut]): List of agents to execute
- payload (Optional[Dict[str, Any]]): Shared execution payload
**Returns**: List[Any]
### Health Check
Check the API's health status.
```python
status = client.health()
print(f"API Status: {status}")
```
**Returns**: Dict[str, Any]
## Error Handling
The client uses exception handling to manage various error scenarios:
```python
from swarms_cloud import SwarmCloudAPI
import httpx
try:
client = SwarmCloudAPI(api_key="your_api_key_here")
agents = client.list_agents()
except httpx.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
except Exception as err:
print(f"An unexpected error occurred: {err}")
finally:
client.close()
```
## Context Manager Support
The client can be used with Python's context manager:
```python
with SwarmCloudAPI(api_key="your_api_key_here") as client:
status = client.health()
print(f"API Status: {status}")
# Client automatically closes after the with block
```
## Best Practices
1. Always close the client when finished:
```python
client = SwarmCloudAPI(api_key="your_api_key_here")
try:
# Your code here
finally:
client.close()
```
2. Use context managers for automatic cleanup:
```python
with SwarmCloudAPI(api_key="your_api_key_here") as client:
# Your code here
```
3. Handle errors appropriately:
```python
try:
result = client.execute_agent("agent_id", payload={"data": "test"})
except httpx.HTTPError as e:
logger.error(f"HTTP error: {e}")
# Handle error appropriately
```
4. Set appropriate timeouts for your use case:
```python
client = SwarmCloudAPI(
api_key="your_api_key_here",
timeout=30.0 # Longer timeout for complex operations
)
```
## Complete Example
Here's a complete example showcasing various features of the client:
```python
from swarms_cloud import SwarmCloudAPI, AgentCreate, AgentUpdate
import httpx
def main():
with SwarmCloudAPI(api_key="your_api_key_here") as client:
# Create an agent
agent_data = AgentCreate(
name="DataAnalyzer",
description="Analyzes incoming data streams",
code="""
def main(request, store):
data = request.payload.get('data', [])
return {
'count': len(data),
'summary': 'Data processed successfully'
}
""",
requirements="pandas==1.4.0",
autoscaling=True
)
try:
# Create the agent
new_agent = client.create_agent(agent_data)
print(f"Created agent: {new_agent.name} (ID: {new_agent.id})")
# Execute the agent
result = client.execute_agent(
new_agent.id,
payload={"data": [1, 2, 3, 4, 5]}
)
print(f"Execution result: {result}")
# Update the agent
update_data = AgentUpdate(
description="Enhanced data analysis capabilities"
)
updated_agent = client.update_agent(new_agent.id, update_data)
print(f"Updated agent: {updated_agent.name}")
# Get execution history
history = client.get_agent_history(new_agent.id)
print(f"Execution history: {history}")
except httpx.HTTPError as e:
print(f"HTTP error occurred: {e}")
except Exception as e:
print(f"Unexpected error: {e}")
if __name__ == "__main__":
main()
```
## Logging
The client uses the `loguru` library for logging. You can configure the logging level and format:
```python
from loguru import logger
# Configure logging
logger.add("swarmcloud.log", rotation="500 MB")
client = SwarmCloudAPI(api_key="your_api_key_here")
```
## Performance Considerations
1. **Connection Reuse**: The client reuses HTTP connections by default, improving performance for multiple requests.
2. **Timeout Configuration**: Set appropriate timeouts based on your use case:
```python
client = SwarmCloudAPI(
api_key="your_api_key_here",
timeout=5.0 # Shorter timeout for time-sensitive operations
)
```
3. **Batch Operations**: Use batch_execute_agents for multiple agent executions:
```python
results = client.batch_execute_agents(
agents=agents,
payload=shared_payload
)
```
## Rate Limiting
The client respects API rate limits but does not implement retry logic. Implement your own retry mechanism if needed:
```python
from tenacity import retry, stop_after_attempt, wait_exponential
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def execute_with_retry(client, agent_id, payload):
return client.execute_agent(agent_id, payload)
```
## Thread Safety
The client is not thread-safe by default. For concurrent usage, create separate client instances for each thread or implement appropriate synchronization mechanisms.

@ -161,7 +161,7 @@ print(result)
We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments. We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments.
- **🐦 Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform) - **🐦 Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform)
- **📢 Discord**: [Join the Agora Discord](https://discord.gg/agora) - **📢 Discord**: [Join the Agora Discord](https://discord.gg/jM3Z6M9uMq)
- **Swarms Platform**: [Visit our website](https://swarms.ai) - **Swarms Platform**: [Visit our website](https://swarms.ai)
- **📙 Documentation**: [Read the Docs](https://docs.swarms.ai) - **📙 Documentation**: [Read the Docs](https://docs.swarms.ai)

@ -0,0 +1,189 @@
# Swarms Platform Account Management Documentation
This guide provides comprehensive, production-grade documentation for managing your account on the Swarms Platform. It covers account settings, profile management, billing, payment methods, subscription details, and cryptocurrency wallet management. Use this documentation to navigate the account management interface, understand available options, and perform account-related operations efficiently and securely.
---
## Table of Contents
1. [Overview](#overview)
2. [Accessing the Account Management Page](#accessing-the-account-management-page)
3. [Account Settings](#account-settings)
- [Theme Mode](#theme-mode)
4. [Profile Management](#profile-management)
- [Profile Information](#profile-information)
- [Password Management](#password-management)
5. [Billing and Payment Methods](#billing-and-payment-methods)
- [Subscription Status](#subscription-status)
- [Payment Methods](#payment-methods)
6. [Cryptocurrency Wallet](#cryptocurrency-wallet)
- [Wallet Overview](#wallet-overview)
- [Exchange and Transaction History](#exchange-and-transaction-history)
7. [Additional Resources](#additional-resources)
---
## Overview
The Swarms Platform account management page, available at [https://swarms.world/platform/account](https://swarms.world/platform/account), allows you to configure and update your account settings and preferences. From here, you can manage the appearance of the platform, view and update profile details, manage your billing information and subscriptions, and handle your cryptocurrency wallet operations.
---
## Accessing the Account Management Page
To access your account management dashboard:
1. Log in to your Swarms Platform account.
2. Navigate to [https://swarms.world/platform/account](https://swarms.world/platform/account).
Once on this page, you will see several sections dedicated to different aspects of your account:
- **Account Settings:** Customize the platform appearance and user interface.
- **Profile:** View and manage personal details.
- **Billing:** Review credits, invoices, and manage your payment methods.
- **Crypto:** Manage your cryptocurrency wallet and transactions.
---
## Account Settings
This section allows you to modify your personal account preferences, including the visual theme.
### Theme Mode
You can choose between different theme options to tailor your user experience:
- **Single Theme:**
A fixed theme, independent of system settings.
- **Example:**
- **Logo:** Swarms logo
- **Terminal Command:**
```bash
pip3 install -U swarms
```
- **Theme Options:**
- **light**
- **dark (default)**
- **Sync with System Theme:**
Automatically adjusts the platform theme to match your system's theme settings.
Select the theme mode that best fits your workflow. Changes are applied immediately across the platform.
---
## Profile Management
### Profile Information
The Profile section allows you to view and update your personal details:
- **View Details:**
Your current profile information is displayed, including contact details, username, and any additional settings.
- **Manage Profile:**
Options to update your information, ensuring your account details remain current.
### Password Management
For security purposes, it is important to regularly update your password:
- **Change Password:**
Select the **"Change password"** option to update your login credentials.
Ensure you choose a strong password and keep it confidential.
---
## Billing and Payment Methods
The Billing section helps you manage financial aspects of your account, including credits, invoices, and subscriptions.
### Subscription Status
Your subscription details are clearly displayed:
- **Current Plan:**
Options include **Free**, **Premium**, or **Enterprise**.
- **Status:**
The active subscription status is indicated (e.g., "Active").
- **Customer Portal:**
An option to open the customer portal for additional billing and subscription management.
### Payment Methods
Manage your payment methods and review your billing details:
- **Manage Cards:**
View existing payment methods.
**Example Entry:**
- **Card Type:** mastercard
- **Last 4 Digits:** ending in 9491
- **Expiry Date:** 2030/2
- **Add Card:**
Use the **"Add Card"** option to register a new payment method securely.
### Credit System
Details of the credits available for your account:
- **Credits Available:**
Displays the current credit balance (e.g., `$20.00`).
- **Charge:**
Option to apply charges against your available credits.
- **Invoice:**
Review or download your invoices.
---
## Cryptocurrency Wallet
The Crypto section provides management tools for your cryptocurrency wallet and associated transactions.
### Wallet Overview
- **Connected Wallet:**
Displays your linked wallet information.
- **Example:**
- **Wallet Identifier:** A truncated wallet ID (e.g., `EmVa...79Vb`)
- **$swarms Balance and Price:**
- **Balance:**
Displays your current $swarms balance (e.g., `0.00`).
- **Price:**
Current market price for $swarms (e.g., `$0.0400`).
### Exchange and Transaction History
- **Exchange Functionality:**
Option to exchange $swarms tokens for credits directly through the platform.
- **Transaction History:**
View a detailed log of wallet transactions, ensuring full transparency over all exchanges and wallet activity.
---
## Additional Resources
For further assistance or to learn more about managing your account on the Swarms Platform, refer to the following resources:
- [Help Center](https://swarms.world/help)
- [Customer Support](https://cal.com/swarms)
- [API Documentation](https://swarms.world/platform/api-keys) (for developers)
---
## Best Practices
- **Regular Updates:**
Periodically review your account settings, profile, and payment methods to ensure they are up-to-date.
- **Security Measures:**
Always use strong, unique passwords and consider enabling two-factor authentication if available.
- **Monitor Transactions:**
Regularly check your billing and wallet transaction history to detect any unauthorized activities promptly.

@ -0,0 +1,87 @@
# Swarms Platform API Keys Documentation
This document provides detailed information on managing API keys within the Swarms Platform. API keys grant programmatic access to your account and should be handled securely. Follow the guidelines below to manage your API keys safely and effectively.
---
## Table of Contents
1. [Overview](#overview)
2. [Viewing Your API Keys](#viewing-your-api-keys)
3. [Creating a New API Key](#creating-a-new-api-key)
4. [Security Guidelines](#security-guidelines)
5. [Frequently Asked Questions](#frequently-asked-questions)
---
## Overview
API keys are unique credentials that allow you to interact with the Swarms Platform programmatically. These keys enable you to make authenticated API requests to access or modify your data. **Important:** Once a secret API key is generated, it will not be displayed again. Ensure you store it securely, as it cannot be retrieved from the platform later.
---
## Viewing Your API Keys
When you navigate to the API Keys page ([https://swarms.world/platform/api-keys](https://swarms.world/platform/api-keys)), you will see a list of your API keys along with the following information:
### Key Details:
- **Name:** A label you assign to your API key to help you identify it.
- **Key:** The secret API key is only partially visible here for security reasons.
- **Created Date:** The date when the API key was generated.
- **Actions:** Options available for managing the key (e.g., deleting an API key).
---
## Creating a New API Key
To generate a new API key, follow these steps:
1. **Attach a Credit Card:**
Before creating a new API key, ensure that your account has a credit card attached. This is required for authentication and billing purposes.
2. **Access the API Keys Page:**
Navigate to [https://swarms.world/platform/api-keys](https://swarms.world/platform/api-keys).
3. **Generate a New Key:**
Click on the **"Create new API key"** button. The system will generate a new secret API key for your account.
4. **Store Your API Key Securely:**
Once generated, the full API key will be displayed only once. Copy and store it in a secure location, as it will not be displayed again.
**Note:** Do not share your API key with anyone or expose it in any client-side code (e.g., browser JavaScript).
---
## Security Guidelines
- **Confidentiality:**
Your API keys are sensitive credentials. Do not share them with anyone or include them in public repositories or client-side code.
- **Storage:**
Store your API keys in secure, encrypted storage. Avoid saving them in plain text files or unsecured locations.
- **Rotation:**
If you suspect that your API key has been compromised, immediately delete it and create a new one.
- **Access Control:**
Limit access to your API keys to only those systems and personnel who absolutely require it.
---
## Frequently Asked Questions
### Q1: **Why do I need a credit card attached to my account to create an API key?**
**A:** The requirement to attach a credit card helps verify your identity and manage billing, ensuring responsible usage of the API services provided by the Swarms Platform.
### Q2: **What happens if I lose my API key?**
**A:** If you lose your API key, you will need to generate a new one. The platform does not store the full key after its initial generation, so recovery is not possible.
### Q3: **How can I delete an API key?**
**A:** On the API Keys page, locate the key you wish to delete and click the **"Delete"** action next to it. This will revoke the key's access immediately.
### Q4: **Can I have multiple API keys?**
**A:** Yes, you can generate and manage multiple API keys. Use naming conventions to keep track of their usage and purpose.
---
For any further questions or issues regarding API key management, please refer to our [Help Center](https://swarms.world/help) or contact our support team.

@ -113,9 +113,9 @@ To further enhance your understanding and usage of the Swarms Platform, explore
### Links ### Links
- [API Documentation](https://docs.swarms.world) - [API Documentation](https://docs.swarms.world)
- [Community Forums](https://discord.com/servers/agora-999382051935506503) - [Community Forums](https://discord.gg/swarms)
- [Tutorials and Guides](https://docs.swarms.world)) - [Tutorials and Guides](https://docs.swarms.world))
- [Support](https://discord.com/servers/agora-999382051935506503) - [Support](https://discord.gg/swarms)
## Conclusion ## Conclusion

@ -0,0 +1,324 @@
# Swarms Finance Tools Documentation
## Installation
```bash
pip3 install -U swarms-tools yfinance requests httpx pandas loguru backoff web3 solana spl-token
```
## Environment Variables
Create a `.env` file in your project root with the following variables (as needed):
| Environment Variable | Description | Required For |
|---------------------|-------------|--------------|
| `COINBASE_API_KEY` | Coinbase API Key | Coinbase Trading |
| `COINBASE_API_SECRET` | Coinbase API Secret | Coinbase Trading |
| `COINBASE_API_PASSPHRASE` | Coinbase API Passphrase | Coinbase Trading |
| `COINMARKETCAP_API_KEY` | CoinMarketCap API Key | CoinMarketCap Data |
| `HELIUS_API_KEY` | Helius API Key | Solana Data |
| `EODHD_API_KEY` | EODHD API Key | Stock News |
| `OKX_API_KEY` | OKX API Key | OKX Trading |
| `OKX_API_SECRET` | OKX API Secret | OKX Trading |
| `OKX_PASSPHRASE` | OKX Passphrase | OKX Trading |
## Tools Overview
| Tool | Description | Requires API Key |
|------|-------------|-----------------|
| Yahoo Finance | Real-time stock market data | No |
| CoinGecko | Cryptocurrency market data | No |
| Coinbase | Cryptocurrency trading and data | Yes |
| CoinMarketCap | Cryptocurrency market data | Yes |
| Helius | Solana blockchain data | Yes |
| DexScreener | DEX trading pairs and data | No |
| HTX (Huobi) | Cryptocurrency exchange data | No |
| OKX | Cryptocurrency exchange data | Yes |
| EODHD | Stock market news | Yes |
| Jupiter | Solana DEX aggregator | No |
| Sector Analysis | GICS sector ETF analysis | No |
| Solana Tools | Solana wallet and token tools | Yes |
## Detailed Documentation
### Yahoo Finance API
Fetch real-time and historical stock market data.
```python
from swarms_tools.finance import yahoo_finance_api
# Fetch data for single stock
data = yahoo_finance_api(["AAPL"])
# Fetch data for multiple stocks
data = yahoo_finance_api(["AAPL", "GOOG", "MSFT"])
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| stock_symbols | List[str] | List of stock symbols | Yes |
### CoinGecko API
Fetch comprehensive cryptocurrency data.
```python
from swarms_tools.finance import coin_gecko_coin_api
# Fetch Bitcoin data
data = coin_gecko_coin_api("bitcoin")
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| coin | str | Cryptocurrency ID (e.g., 'bitcoin') | Yes |
### Coinbase Trading
Execute trades and fetch market data from Coinbase.
```python
from swarms_tools.finance import get_coin_data, place_buy_order, place_sell_order
# Fetch coin data
data = get_coin_data("BTC-USD")
# Place orders
buy_order = place_buy_order("BTC-USD", amount=100) # Buy $100 worth of BTC
sell_order = place_sell_order("BTC-USD", amount=0.01) # Sell 0.01 BTC
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| symbol | str | Trading pair (e.g., 'BTC-USD') | Yes |
| amount | Union[str, float, Decimal] | Trade amount | Yes |
| sandbox | bool | Use sandbox environment | No |
### CoinMarketCap API
Fetch cryptocurrency market data from CoinMarketCap.
```python
from swarms_tools.finance import coinmarketcap_api
# Fetch single coin data
data = coinmarketcap_api(["Bitcoin"])
# Fetch multiple coins
data = coinmarketcap_api(["Bitcoin", "Ethereum", "Tether"])
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| coin_names | Optional[List[str]] | List of coin names | No |
### Helius API (Solana)
Fetch Solana blockchain data.
```python
from swarms_tools.finance import helius_api_tool
# Fetch account data
account_data = helius_api_tool("account", "account_address")
# Fetch transaction data
tx_data = helius_api_tool("transaction", "tx_signature")
# Fetch token data
token_data = helius_api_tool("token", "token_mint_address")
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| action | str | Type of action ('account', 'transaction', 'token') | Yes |
| identifier | str | Address/signature to query | Yes |
### DexScreener API
Fetch DEX trading pair data.
```python
from swarms_tools.finance import (
fetch_dex_screener_profiles,
fetch_latest_token_boosts,
fetch_solana_token_pairs
)
# Fetch latest profiles
profiles = fetch_dex_screener_profiles()
# Fetch token boosts
boosts = fetch_latest_token_boosts()
# Fetch Solana pairs
pairs = fetch_solana_token_pairs(["token_address"])
```
### HTX (Huobi) API
Fetch cryptocurrency data from HTX.
```python
from swarms_tools.finance import fetch_htx_data
# Fetch coin data
data = fetch_htx_data("BTC")
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| coin_name | str | Cryptocurrency symbol | Yes |
### OKX API
Fetch cryptocurrency data from OKX.
```python
from swarms_tools.finance import okx_api_tool
# Fetch single coin
data = okx_api_tool(["BTC-USDT"])
# Fetch multiple coins
data = okx_api_tool(["BTC-USDT", "ETH-USDT"])
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| coin_symbols | Optional[List[str]] | List of trading pairs | No |
### EODHD Stock News
Fetch stock market news.
```python
from swarms_tools.finance import fetch_stock_news
# Fetch news for a stock
news = fetch_stock_news("AAPL")
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| stock_name | str | Stock symbol | Yes |
### Jupiter (Solana DEX)
Fetch Solana DEX prices.
```python
from swarms_tools.finance import get_jupiter_price
# Fetch price data
price = get_jupiter_price(input_mint="input_token", output_mint="output_token")
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| input_mint | str | Input token mint address | Yes |
| output_mint | str | Output token mint address | Yes |
### Sector Analysis
Analyze GICS sector ETFs.
```python
from swarms_tools.finance.sector_analysis import analyze_index_sectors
# Run sector analysis
analyze_index_sectors()
```
### Solana Tools
Check Solana wallet balances and manage tokens.
```python
from swarms_tools.finance import check_solana_balance, check_multiple_wallets
# Check single wallet
balance = check_solana_balance("wallet_address")
# Check multiple wallets
balances = check_multiple_wallets(["wallet1", "wallet2"])
```
**Arguments:**
| Parameter | Type | Description | Required |
|-----------|------|-------------|----------|
| wallet_address | str | Solana wallet address | Yes |
| wallet_addresses | List[str] | List of wallet addresses | Yes |
## Complete Example
Here's a comprehensive example using multiple tools:
```python
from swarms_tools.finance import (
yahoo_finance_api,
coin_gecko_coin_api,
coinmarketcap_api,
fetch_htx_data
)
# Fetch stock data
stocks = yahoo_finance_api(["AAPL", "GOOG"])
print("Stock Data:", stocks)
# Fetch crypto data from multiple sources
bitcoin_cg = coin_gecko_coin_api("bitcoin")
print("Bitcoin Data (CoinGecko):", bitcoin_cg)
crypto_cmc = coinmarketcap_api(["Bitcoin", "Ethereum"])
print("Crypto Data (CoinMarketCap):", crypto_cmc)
btc_htx = fetch_htx_data("BTC")
print("Bitcoin Data (HTX):", btc_htx)
```
## Error Handling
All tools include proper error handling and logging. Errors are logged using the `loguru` logger. Example error handling:
```python
from loguru import logger
try:
data = yahoo_finance_api(["INVALID"])
except Exception as e:
logger.error(f"Error fetching stock data: {e}")
```
## Rate Limits
Please be aware of rate limits for various APIs:
- CoinGecko: 50 calls/minute (free tier)
- CoinMarketCap: Varies by subscription
- Helius: Varies by subscription
- DexScreener: 300 calls/minute for pairs, 60 calls/minute for profiles
- Other APIs: Refer to respective documentation
## Dependencies
The package automatically handles most dependencies, but you may need to install some manually:

@ -0,0 +1,239 @@
# Swarms Tools
Welcome to **Swarms Tools**, the ultimate package for integrating **cutting-edge APIs** into Python functions with seamless multi-agent system compatibility. Designed for enterprises at the forefront of innovation, **Swarms Tools** is your key to simplifying complexity and unlocking operational excellence.
---
## 🚀 Features
- **Unified API Integration**: Ready-to-use Python functions for financial data, social media, IoT, and more.
- **Enterprise-Grade Design**: Comprehensive type hints, structured outputs, and robust documentation.
- **Agent-Ready Framework**: Optimized for seamless integration into Swarms' multi-agent orchestration systems.
- **Expandable Architecture**: Easily extend functionality with a standardized schema for new tools.
---
## 🔧 Installation
```bash
pip3 install -U swarms-tools
```
---
## 📂 Directory Structure
```plaintext
swarms-tools/
├── swarms_tools/
│ ├── finance/
│ │ ├── htx_tool.py
│ │ ├── eodh_api.py
│ │ └── coingecko_tool.py
│ ├── social_media/
│ │ ├── telegram_tool.py
│ ├── utilities/
│ │ └── logging.py
├── tests/
│ ├── test_financial_data.py
│ └── test_social_media.py
└── README.md
```
---
## 💼 Use Cases
## Finance
Explore our diverse range of financial tools, designed to streamline your operations. If you need a tool not listed, feel free to submit an issue or accelerate integration by contributing a pull request with your tool of choice.
| **Tool Name** | **Function** | **Description** |
|---------------------------|--------------------------|---------------------------------------------------------------------------------|
| `fetch_stock_news` | `fetch_stock_news` | Fetches the latest stock news and updates. |
| `fetch_htx_data` | `fetch_htx_data` | Retrieves financial data from the HTX platform. |
| `yahoo_finance_api` | `yahoo_finance_api` | Fetches comprehensive stock data from Yahoo Finance, including prices and trends. |
| `coin_gecko_coin_api` | `coin_gecko_coin_api` | Fetches cryptocurrency data from CoinGecko, including market and price information. |
| `helius_api_tool` | `helius_api_tool` | Retrieves blockchain account, transaction, or token data using the Helius API. |
| `okx_api_tool` | `okx_api_tool` | Fetches detailed cryptocurrency data for coins from the OKX exchange. |
### Financial Data Retrieval
Enable precise and actionable financial insights:
#### Example 1: Fetch Historical Data
```python
from swarms_tools import fetch_htx_data
# Fetch historical trading data for "Swarms Corporation"
response = fetch_htx_data("swarms")
print(response)
```
#### Example 2: Stock News Analysis
```python
from swarms_tools import fetch_stock_news
# Retrieve latest stock news for Apple
news = fetch_stock_news("AAPL")
print(news)
```
#### Example 3: Cryptocurrency Metrics
```python
from swarms_tools import coin_gecko_coin_api
# Fetch live data for Bitcoin
crypto_data = coin_gecko_coin_api("bitcoin")
print(crypto_data)
```
### Social Media Automation
Streamline communication and engagement:
#### Example: Telegram Bot Messaging
```python
from swarms_tools import telegram_dm_or_tag_api
def send_alert(response: str):
telegram_dm_or_tag_api(response)
# Send a message to a user or group
send_alert("Mission-critical update from Swarms.")
```
---
## Dex Screener
This is a tool that allows you to fetch data from the Dex Screener API. It supports multiple chains and multiple tokens.
```python
from swarms_tools.finance.dex_screener import (
fetch_latest_token_boosts,
fetch_dex_screener_profiles,
)
fetch_dex_screener_profiles()
fetch_latest_token_boosts()
```
---
## Structs
The tool chainer enables the execution of multiple tools in a sequence, allowing for the aggregation of their results in either a parallel or sequential manner.
```python
# Example usage
from loguru import logger
from swarms_tools.structs import tool_chainer
if __name__ == "__main__":
logger.add("tool_chainer.log", rotation="500 MB", level="INFO")
# Example tools
def tool1():
return "Tool1 Result"
def tool2():
return "Tool2 Result"
# def tool3():
# raise ValueError("Simulated error in Tool3")
tools = [tool1, tool2]
# Parallel execution
parallel_results = tool_chainer(tools, parallel=True)
print("Parallel Results:", parallel_results)
# Sequential execution
# sequential_results = tool_chainer(tools, parallel=False)
# print("Sequential Results:", sequential_results)
```
---
## 🧩 Standardized Schema
Every tool in **Swarms Tools** adheres to a strict schema for maintainability and interoperability:
### Schema Template
1. **Functionality**:
- Encapsulate API logic into a modular, reusable function.
2. **Typing**:
- Leverage Python type hints for input validation and clarity.
Example:
```python
def fetch_data(symbol: str, date_range: str) -> str:
"""
Fetch financial data for a given symbol and date range.
Args:
symbol (str): Ticker symbol of the asset.
date_range (str): Timeframe for the data (e.g., '1d', '1m', '1y').
Returns:
dict: A dictionary containing financial metrics.
"""
pass
```
3. **Documentation**:
- Include detailed docstrings with parameter explanations and usage examples.
4. **Output Standardization**:
- Ensure consistent outputs (e.g., strings) for easy downstream agent integration.
5. **API-Key Management**:
- All API keys must be fetched with `os.getenv("YOUR_KEY")`
---
## 📖 Documentation
Comprehensive documentation is available to guide developers and enterprises. Visit our [official docs](https://docs.swarms.world) for detailed API references, usage examples, and best practices.
---
## 🛠 Contributing
We welcome contributions from the global developer community. To contribute:
1. **Fork the Repository**: Start by forking the repository.
2. **Create a Feature Branch**: Use a descriptive branch name: `feature/add-new-tool`.
3. **Commit Your Changes**: Write meaningful commit messages.
4. **Submit a Pull Request**: Open a pull request for review.
---
## 🛡️ License
This project is licensed under the **MIT License**. See the [LICENSE](LICENSE) file for details.
---
## 🌠 Join the Future
Explore the limitless possibilities of agent-based systems. Together, we can build a smarter, faster, and more interconnected world.
**Visit us:** [Swarms Corporation](https://swarms.ai)
**Follow us:** [Twitter](https://twitter.com/swarms_corp)
---
**"The future belongs to those who dare to automate it."**
**— The Swarms Corporation**

@ -0,0 +1,172 @@
# Search Tools Documentation
This documentation covers the search tools available in the `swarms-tools` package.
## Installation
```bash
pip3 install -U swarms-tools
```
## Environment Variables Required
Create a `.env` file in your project root with the following API keys:
```bash
# Bing Search API
BING_API_KEY=your_bing_api_key
# Google Search API
GOOGLE_API_KEY=your_google_api_key
GOOGLE_CX=your_google_cx_id
GEMINI_API_KEY=your_gemini_api_key
# Exa AI API
EXA_API_KEY=your_exa_api_key
```
## Tools Overview
### 1. Bing Search Tool
The Bing Search tool allows you to fetch web articles using the Bing Web Search API.
#### Function: `fetch_web_articles_bing_api`
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| query | str | Yes | The search query to retrieve articles |
#### Example Usage:
```python
from swarms_tools.search import fetch_web_articles_bing_api
# Fetch articles about AI
results = fetch_web_articles_bing_api("swarms ai github")
print(results)
```
### 2. Exa AI Search Tool
The Exa AI tool is designed for searching research papers and academic content.
#### Function: `search_exa_ai`
| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| query | str | Yes | "Latest developments in LLM capabilities" | Search query |
| num_results | int | No | 10 | Number of results to return |
| auto_prompt | bool | No | True | Whether to use auto-prompting |
| include_domains | List[str] | No | ["arxiv.org", "paperswithcode.com"] | Domains to include |
| exclude_domains | List[str] | No | [] | Domains to exclude |
| category | str | No | "research paper" | Category of search |
#### Example Usage:
```python
from swarms_tools.search import search_exa_ai
# Search for research papers
results = search_exa_ai(
query="Latest developments in LLM capabilities",
num_results=5,
include_domains=["arxiv.org"]
)
print(results)
```
### 3. Google Search Tool
A comprehensive search tool that uses Google Custom Search API and includes content extraction and summarization using Gemini.
#### Class: `WebsiteChecker`
| Method | Parameters | Description |
|--------|------------|-------------|
| search | query: str | Main search function that fetches, processes, and summarizes results |
#### Example Usage:
```python
from swarms_tools.search import WebsiteChecker
# Initialize with an agent (required for summarization)
checker = WebsiteChecker(agent=your_agent_function)
# Perform search
async def search_example():
results = await checker.search("who won elections 2024 us")
print(results)
# For synchronous usage
from swarms_tools.search import search
results = search("who won elections 2024 us", agent=your_agent_function)
print(results)
```
## Features
- **Bing Search**: Fetch and parse web articles with structured output
- **Exa AI**: Specialized academic and research paper search
- **Google Search**:
- Custom search with content extraction
- Concurrent URL processing
- Content summarization using Gemini
- Progress tracking
- Automatic retry mechanisms
- Results saved to JSON
## Dependencies
The tools automatically handle dependency installation, but here are the main requirements:
```python
aiohttp
asyncio
beautifulsoup4
google-generativeai
html2text
playwright
python-dotenv
rich
tenacity
```
## Error Handling
All tools include robust error handling:
- Automatic retries for failed requests
- Timeout handling
- Rate limiting consideration
- Detailed error messages
## Output Format
Each tool provides structured output:
- **Bing Search**: Returns formatted string with article details
- **Exa AI**: Returns JSON response with search results
- **Google Search**: Returns summarized content with sections:
- Key Findings
- Important Details
- Sources
## Best Practices
1. Always store API keys in environment variables
2. Use appropriate error handling
3. Consider rate limits of the APIs
4. Cache results when appropriate
5. Monitor API usage and costs
## Limitations
- Bing Search: Limited to 4 articles per query
- Exa AI: Focused on academic content
- Google Search: Requires Gemini API for summarization
## Support
For issues and feature requests, please visit the [GitHub repository](https://github.com/swarms-tools).

@ -0,0 +1,325 @@
# Twitter Tool Documentation
## Overview
The Twitter Tool provides a convenient interface for interacting with Twitter's API through the swarms-tools package. This documentation covers the initialization process and available functions for posting, replying, liking, and quoting tweets, as well as retrieving metrics.
## Installation
```bash
pip install swarms-tools
```
## Authentication
The Twitter Tool requires Twitter API credentials for authentication. These should be stored as environment variables:
```python
TWITTER_ID=your_twitter_id
TWITTER_NAME=your_twitter_name
TWITTER_DESCRIPTION=your_twitter_description
TWITTER_API_KEY=your_api_key
TWITTER_API_SECRET_KEY=your_api_secret_key
TWITTER_ACCESS_TOKEN=your_access_token
TWITTER_ACCESS_TOKEN_SECRET=your_access_token_secret
```
## Initialization
### TwitterTool Configuration Options
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| id | str | Yes | Unique identifier for the Twitter tool instance |
| name | str | Yes | Name of the Twitter tool instance |
| description | str | No | Description of the tool's purpose |
| credentials | dict | Yes | Dictionary containing Twitter API credentials |
### Credentials Dictionary Structure
| Key | Type | Required | Description |
|-----|------|----------|-------------|
| apiKey | str | Yes | Twitter API Key |
| apiSecretKey | str | Yes | Twitter API Secret Key |
| accessToken | str | Yes | Twitter Access Token |
| accessTokenSecret | str | Yes | Twitter Access Token Secret |
## Available Functions
### initialize_twitter_tool()
Creates and returns a new instance of the TwitterTool.
```python
def initialize_twitter_tool() -> TwitterTool:
```
Returns:
- TwitterTool: Initialized Twitter tool instance
### post_tweet()
Posts a new tweet to Twitter.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| tweet | str | Yes | Text content of the tweet to post |
Raises:
- tweepy.TweepyException: If tweet posting fails
### reply_tweet()
Replies to an existing tweet.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| tweet_id | int | Yes | ID of the tweet to reply to |
| reply | str | Yes | Text content of the reply |
Raises:
- tweepy.TweepyException: If reply posting fails
### like_tweet()
Likes a specified tweet.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| tweet_id | int | Yes | ID of the tweet to like |
Raises:
- tweepy.TweepyException: If liking the tweet fails
### quote_tweet()
Creates a quote tweet.
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| tweet_id | int | Yes | ID of the tweet to quote |
| quote | str | Yes | Text content to add to the quoted tweet |
Raises:
- tweepy.TweepyException: If quote tweet creation fails
### get_metrics()
Retrieves Twitter metrics.
Returns:
- Dict[str, int]: Dictionary containing various Twitter metrics
Raises:
- tweepy.TweepyException: If metrics retrieval fails
## Usage Examples
### Basic Tweet Posting
```python
from swarms_tools.twitter import initialize_twitter_tool, post_tweet
# Post a simple tweet
post_tweet("Hello, Twitter!")
```
### Interacting with Tweets
```python
# Reply to a tweet
reply_tweet(12345, "Great point!")
# Like a tweet
like_tweet(12345)
# Quote a tweet
quote_tweet(12345, "Adding my thoughts on this!")
```
### Retrieving Metrics
```python
metrics = get_metrics()
print(f"Current metrics: {metrics}")
```
## Error Handling
All functions include built-in error handling and will print error messages if operations fail. It's recommended to implement additional error handling in production environments:
```python
try:
post_tweet("Hello, Twitter!")
except Exception as e:
logger.error(f"Tweet posting failed: {e}")
# Implement appropriate error handling
```
## Production Example
This is an example of how to use the TwitterTool in a production environment using Swarms.
```python
import os
from time import time
from swarm_models import OpenAIChat
from swarms import Agent
from dotenv import load_dotenv
from swarms_tools.social_media.twitter_tool import TwitterTool
load_dotenv()
model_name = "gpt-4o"
model = OpenAIChat(
model_name=model_name,
max_tokens=3000,
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
medical_coder = Agent(
agent_name="Medical Coder",
system_prompt="""
You are a highly experienced and certified medical coder with extensive knowledge of ICD-10 coding guidelines, clinical documentation standards, and compliance regulations. Your responsibility is to ensure precise, compliant, and well-documented coding for all clinical cases.
### Primary Responsibilities:
1. **Review Clinical Documentation**: Analyze all available clinical records, including specialist inputs, physician notes, lab results, imaging reports, and discharge summaries.
2. **Assign Accurate ICD-10 Codes**: Identify and assign appropriate codes for primary diagnoses, secondary conditions, symptoms, and complications.
3. **Ensure Coding Compliance**: Follow the latest ICD-10-CM/PCS coding guidelines, payer-specific requirements, and organizational policies.
4. **Document Code Justification**: Provide clear, evidence-based rationale for each assigned code.
### Detailed Coding Process:
- **Review Specialist Inputs**: Examine all relevant documentation to capture the full scope of the patient's condition and care provided.
- **Identify Diagnoses**: Determine the primary and secondary diagnoses, as well as any symptoms or complications, based on the documentation.
- **Assign ICD-10 Codes**: Select the most accurate and specific ICD-10 codes for each identified diagnosis or condition.
- **Document Supporting Evidence**: Record the documentation source (e.g., lab report, imaging, or physician note) for each code to justify its assignment.
- **Address Queries**: Note and flag any inconsistencies, missing information, or areas requiring clarification from providers.
### Output Requirements:
Your response must be clear, structured, and compliant with professional standards. Use the following format:
1. **Primary Diagnosis Codes**:
- **ICD-10 Code**: [e.g., E11.9]
- **Description**: [e.g., Type 2 diabetes mellitus without complications]
- **Supporting Documentation**: [e.g., Physician's note dated MM/DD/YYYY]
2. **Secondary Diagnosis Codes**:
- **ICD-10 Code**: [Code]
- **Description**: [Description]
- **Order of Clinical Significance**: [Rank or priority]
3. **Symptom Codes**:
- **ICD-10 Code**: [Code]
- **Description**: [Description]
4. **Complication Codes**:
- **ICD-10 Code**: [Code]
- **Description**: [Description]
- **Relevant Documentation**: [Source of information]
5. **Coding Notes**:
- Observations, clarifications, or any potential issues requiring provider input.
### Additional Guidelines:
- Always prioritize specificity and compliance when assigning codes.
- For ambiguous cases, provide a brief note with reasoning and flag for clarification.
- Ensure the output format is clean, consistent, and ready for professional use.
""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
)
# Define your options with the necessary credentials
options = {
"id": "mcsswarm",
"name": "mcsswarm",
"description": "An example Twitter Plugin for testing.",
"credentials": {
"apiKey": os.getenv("TWITTER_API_KEY"),
"apiSecretKey": os.getenv("TWITTER_API_SECRET_KEY"),
"accessToken": os.getenv("TWITTER_ACCESS_TOKEN"),
"accessTokenSecret": os.getenv("TWITTER_ACCESS_TOKEN_SECRET"),
},
}
# Initialize the TwitterTool with your options
twitter_plugin = TwitterTool(options)
# # Post a tweet
# post_tweet_fn = twitter_plugin.get_function('post_tweet')
# post_tweet_fn("Hello world!")
# Assuming `twitter_plugin` and `medical_coder` are already initialized
post_tweet = twitter_plugin.get_function("post_tweet")
# Set to track posted tweets and avoid duplicates
posted_tweets = set()
def post_unique_tweet():
"""
Generate and post a unique tweet. Skip duplicates.
"""
tweet_prompt = (
"Share an intriguing, lesser-known fact about a medical disease, and include an innovative, fun, or surprising way to manage or cure it! "
"Make the response playful, engaging, and inspiring—something that makes people smile while learning. No markdown, just plain text!"
)
# Generate a new tweet text
tweet_text = medical_coder.run(tweet_prompt)
# Check for duplicates
if tweet_text in posted_tweets:
print("Duplicate tweet detected. Skipping...")
return
# Post the tweet
try:
post_tweet(tweet_text)
print(f"Posted tweet: {tweet_text}")
# Add the tweet to the set of posted tweets
posted_tweets.add(tweet_text)
except Exception as e:
print(f"Error posting tweet: {e}")
# Loop to post tweets every 10 seconds
def start_tweet_loop(interval=10):
"""
Continuously post tweets every `interval` seconds.
Args:
interval (int): Time in seconds between tweets.
"""
print("Starting tweet loop...")
while True:
post_unique_tweet()
time.sleep(interval)
# Start the loop
start_tweet_loop(10)
```
## Best Practices
1. Always store credentials in environment variables
2. Implement rate limiting in production environments
3. Add proper logging for all operations
4. Handle errors gracefully
5. Validate tweet content before posting
6. Monitor API usage limits
## Rate Limits
Be aware of Twitter's API rate limits. Implement appropriate delays between requests in production environments to avoid hitting these limits.
## Dependencies
- tweepy
- python-dotenv
- swarms-tools
## Version Compatibility
- Python 3.7+
- Latest version of swarms-tools package

@ -1,6 +1,3 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import ( from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT, FINANCIAL_AGENT_SYS_PROMPT,
@ -9,16 +6,6 @@ from dotenv import load_dotenv
load_dotenv() load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the agent # Initialize the agent
agent = Agent( agent = Agent(
@ -26,7 +13,7 @@ agent = Agent(
agent_description="Personal finance advisor agent", agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1, max_loops=1,
llm=model, model_name="gpt-4o",
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
user_name="swarms_corp", user_name="swarms_corp",
retry_attempts=3, retry_attempts=3,
@ -41,5 +28,4 @@ agent = Agent(
agent.run( agent.run(
"Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", "Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
all_cores=True,
) )

@ -0,0 +1,388 @@
import os
from dataclasses import dataclass
from typing import Tuple
from litellm import completion
from loguru import logger
from swarms import Agent
EXTRACTION_PROMPT = """
You are a specialized Chart2Table extraction agent that converts visual charts into precise textual descriptions.
Output Format:
[Chart Type]
Type: {bar|line|pie|scatter|combination}
Title: {chart title}
X-Axis: {label and scale}
Y-Axis: {label and scale}
[Data Series]
Name: {series name}
Values: {comma-separated list of values}
{repeat for each series}
[Annotations]
- {list any markers, gridlines, legends}
- {note any data gaps or anomalies}
Guidelines:
1. Maintain exact numerical precision
2. List ALL data points in order
3. Note any gaps, outliers or special patterns
4. Describe axes scales (linear/log) and units
5. Include legends and series names verbatim
6. Note any data point annotations or markers
7. Describe chart elements spatially (top-left, center, etc)
8. Include color and style information if relevant
9. Note relationships between multiple series
10. Flag any data quality or readability issues"""
REFORMULATION_PROMPT = """You are an Answer Reformulation specialist that breaks down complex analytical statements into atomic, verifiable claims.
Output Format:
[Core Claims]
1. {single fact with exact numbers}
2. {another atomic fact}
{continue for all core claims}
[Supporting Context]
1. {relevant context that supports core claims}
2. {additional contextual information}
{continue for all context}
[Assumptions]
1. {implicit assumption made}
2. {another assumption}
{continue for all assumptions}
Guidelines:
1. Each claim must be independently verifiable
2. Use exact numbers, never round or approximate
3. Split compound statements into atomic facts
4. Make implicit comparisons explicit
5. Note temporal relationships clearly
6. Include units with all measurements
7. Flag any uncertainty or approximations
8. Note data source limitations
9. Preserve calculation steps
10. Maintain logical dependencies"""
CAPTIONING_PROMPT = """You are an Entity Captioning specialist that generates rich contextual descriptions of chart elements.
Output Format:
[Data Points]
{x,y}: {detailed description of point significance}
{continue for key points}
[Trends]
- {description of overall pattern}
- {notable sub-patterns}
{continue for all trends}
[Relationships]
- {correlation between variables}
- {causation if evident}
{continue for all relationships}
[Context]
- {broader context for interpretation}
- {relevant external factors}
{continue for all context}
Guidelines:
1. Describe both local and global patterns
2. Note statistical significance of changes
3. Identify cyclic or seasonal patterns
4. Flag outliers and anomalies
5. Compare relative magnitudes
6. Note rate of change patterns
7. Describe distribution characteristics
8. Highlight key inflection points
9. Note data clustering patterns
10. Include domain-specific insights"""
PREFILTER_PROMPT = """You are a Pre-filtering specialist that identifies relevant chart elements for verification.
Output Format:
[Critical Elements]
1. {element}: Score {0-10}
Evidence: {why this supports claims}
{continue for all relevant elements}
[Supporting Elements]
1. {element}: Score {0-10}
Context: {how this adds context}
{continue for all supporting elements}
[Relevance Chain]
1. {claim} -> {element} -> {evidence}
{continue for all connections}
Guidelines:
1. Score relevance 0-10 with detailed rationale
2. Build explicit evidence chains
3. Note both direct and indirect support
4. Consider temporal relevance
5. Account for data relationships
6. Note confidence levels
7. Include contextual importance
8. Consider alternative interpretations
9. Note missing evidence
10. Explain filtering decisions"""
RERANK_PROMPT = """You are a Re-ranking specialist that orders evidence by strength and relevance.
Output Format:
[Primary Evidence]
1. {element} - Score: {0-10}
Strength: {detailed justification}
{continue for top evidence}
[Supporting Evidence]
1. {element} - Score: {0-10}
Context: {how this reinforces primary evidence}
{continue for supporting evidence}
[Evidence Chains]
1. {claim} -> {primary} -> {supporting} -> {conclusion}
{continue for all chains}
Guidelines:
1. Use explicit scoring criteria
2. Consider evidence independence
3. Note corroborating elements
4. Account for evidence quality
5. Consider contradictory evidence
6. Note confidence levels
7. Explain ranking decisions
8. Build complete evidence chains
9. Note gaps in evidence
10. Consider alternative interpretations"""
LOCALIZATION_PROMPT = """You are a Cell Localization specialist that precisely maps data to visual elements.
Output Format:
[Element Locations]
1. Type: {bar|line|point|label}
Position: {x1,y1,x2,y2}
Value: {associated data value}
Confidence: {0-10}
{continue for all elements}
[Spatial Relationships]
- {relative positions}
- {alignment patterns}
{continue for all relationships}
[Visual Context]
- {surrounding elements}
- {reference points}
{continue for context}
Guidelines:
1. Use normalized coordinates (0-1)
2. Note element boundaries precisely
3. Include confidence scores
4. Note spatial relationships
5. Account for overlapping elements
6. Consider chart type constraints
7. Note alignment patterns
8. Include reference points
9. Note visual hierarchies
10. Document occlusions"""
@dataclass
class ChartElement:
element_type: str
bbox: Tuple[float, float, float, float]
confidence: float
class VisionAPI:
def __init__(
self,
model_name: str = "gpt-4o",
max_tokens: int = 1000,
temperature: float = 0.5,
):
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
self.model_name = model_name
self.max_tokens = max_tokens
self.temperature = temperature
def encode_image(self, img: str):
if img.startswith("http"):
return img
import base64
with open(img, "rb") as image_file:
encoded_string = base64.b64encode(
image_file.read()
).decode("utf-8")
return f"data:image/png;base64,{encoded_string}"
def run(self, task: str, img: str):
img = self.encode_image(img)
response = completion(
model=self.model_name,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": task},
{
"type": "image_url",
"image_url": {"url": img},
},
],
}
],
max_tokens=self.max_tokens,
temperature=self.temperature,
)
return response.choices[0].message.content
class ChartCitor:
def __init__(
self,
model_name: str = "gpt-4o",
saved_state_path: str = "chartcitor_state.json",
max_retries: int = 3,
max_loops: int = 1,
):
logger.info(
f"Initializing ChartCitor with model {model_name}"
)
model = VisionAPI()
self.extraction_agent = Agent(
agent_name="Chart2Table-Agent",
system_prompt=EXTRACTION_PROMPT,
llm=model,
max_loops=1,
)
self.reformulation_agent = Agent(
agent_name="Answer-Reformulation-Agent",
system_prompt=REFORMULATION_PROMPT,
llm=model,
max_loops=1,
)
self.captioning_agent = Agent(
agent_name="Entity-Captioning-Agent",
system_prompt=CAPTIONING_PROMPT,
llm=model,
max_loops=1,
)
self.prefilter_agent = Agent(
agent_name="LLM-Prefilter-Agent",
system_prompt=PREFILTER_PROMPT,
llm=model,
max_loops=1,
)
self.rerank_agent = Agent(
agent_name="LLM-Rerank-Agent",
system_prompt=RERANK_PROMPT,
llm=model,
max_loops=1,
)
self.localization_agent = Agent(
agent_name="Cell-Localization-Agent",
system_prompt=LOCALIZATION_PROMPT,
llm=model,
max_loops=1,
)
def extract_table(self, chart_image: str) -> str:
logger.info("Extracting table from chart")
return self.extraction_agent.run(
"Extract and describe the data from this chart following the specified format.",
img=chart_image,
)
def reformulate_answer(
self, answer: str, table_data: str, chart_image: str
) -> str:
logger.info("Reformulating answer into atomic facts")
return self.reformulation_agent.run(
f"Break this answer into atomic facts:\n{answer}\n\nTable data:\n{table_data}",
img=chart_image,
)
def generate_captions(
self, table_data: str, chart_image: str
) -> str:
logger.info("Generating captions for chart elements")
return self.captioning_agent.run(
f"Generate descriptive captions for this data:\n{table_data}",
img=chart_image,
)
def retrieve_evidence(
self,
facts: str,
table_data: str,
captions: str,
chart_image: str,
) -> str:
logger.info("Retrieving supporting evidence")
filtered = self.prefilter_agent.run(
f"Identify relevant elements for:\nFacts:\n{facts}\n\nData:\n{table_data}\n\nCaptions:\n{captions}",
img=chart_image,
)
return self.rerank_agent.run(
f"Rank these elements by relevance:\n{filtered}\nFor facts:\n{facts}",
img=chart_image,
)
def localize_elements(
self, chart_image: str, evidence: str
) -> str:
logger.info("Localizing chart elements")
return self.localization_agent.run(
f"Describe the location of these elements:\n{evidence}",
img=chart_image,
)
def run(
self, chart_image: str, question: str, answer: str
) -> str:
logger.info(f"Processing question: {question}")
table_data = self.extract_table(chart_image)
facts = self.reformulate_answer(
answer, table_data, chart_image
)
captions = self.generate_captions(table_data, chart_image)
evidence = self.retrieve_evidence(
facts, table_data, captions, chart_image
)
citations = self.localize_elements(chart_image, evidence)
return f"""Analysis Results:
Facts:
{facts}
Evidence:
{evidence}
Visual Citations:
{citations}
"""
if __name__ == "__main__":
chartcitor = ChartCitor()
result = chartcitor.run(
chart_image="chart.png",
question="Analyze this chart of solana price and volume over time. What is the highest volume day?",
answer="203",
)
print(result)

@ -0,0 +1,233 @@
import random
from swarms import Agent
# System prompts for each agent
MARKETING_AGENT_SYS_PROMPT = """
You are the Marketing Strategist Agent for a DAO. Your role is to develop, implement, and optimize all marketing and branding strategies to align with the DAO's mission and vision. The DAO is focused on decentralized governance for climate action, funding projects aimed at reducing carbon emissions, and incentivizing community participation through its native token.
### Objectives:
1. **Brand Awareness**: Build a globally recognized and trusted brand for the DAO.
2. **Community Growth**: Expand the DAO's community by onboarding individuals passionate about climate action and blockchain technology.
3. **Campaign Execution**: Launch high-impact marketing campaigns on platforms like Twitter, Discord, and YouTube to engage and retain community members.
4. **Partnerships**: Identify and build partnerships with like-minded organizations, NGOs, and influencers.
5. **Content Strategy**: Design educational and engaging content, including infographics, blog posts, videos, and AMAs.
### Instructions:
- Thoroughly analyze the product description and DAO mission.
- Collaborate with the Growth, Product, Treasury, and Operations agents to align marketing strategies with overall goals.
- Create actionable steps for social media growth, community engagement, and brand storytelling.
- Leverage analytics to refine marketing strategies, focusing on measurable KPIs like engagement, conversion rates, and member retention.
- Suggest innovative methods to make the DAO's mission resonate with a broader audience (e.g., gamified incentives, contests, or viral campaigns).
- Ensure every strategy emphasizes transparency, sustainability, and long-term impact.
"""
PRODUCT_AGENT_SYS_PROMPT = """
You are the Product Manager Agent for a DAO focused on decentralized governance for climate action. Your role is to design, manage, and optimize the DAO's product roadmap. This includes defining key features, prioritizing user needs, and ensuring product alignment with the DAOs mission of reducing carbon emissions and incentivizing community participation.
### Objectives:
1. **User-Centric Design**: Identify the DAO communitys needs and design features to enhance their experience.
2. **Roadmap Prioritization**: Develop a prioritized product roadmap based on community feedback and alignment with climate action goals.
3. **Integration**: Suggest technical solutions and tools for seamless integration with other platforms and blockchains.
4. **Continuous Improvement**: Regularly evaluate product features and recommend optimizations to improve usability, engagement, and adoption.
### Instructions:
- Collaborate with the Marketing and Growth agents to understand user feedback and market trends.
- Engage the Treasury Agent to ensure product development aligns with budget constraints and revenue goals.
- Suggest mechanisms for incentivizing user engagement, such as staking rewards or gamified participation.
- Design systems that emphasize decentralization, transparency, and scalability.
- Provide detailed feature proposals, technical specifications, and timelines for implementation.
- Ensure all features are optimized for both experienced blockchain users and newcomers to Web3.
"""
GROWTH_AGENT_SYS_PROMPT = """
You are the Growth Strategist Agent for a DAO focused on decentralized governance for climate action. Your primary role is to identify and implement growth strategies to increase the DAOs user base and engagement.
### Objectives:
1. **User Acquisition**: Identify effective strategies to onboard more users to the DAO.
2. **Retention**: Suggest ways to improve community engagement and retain active members.
3. **Data-Driven Insights**: Leverage data analytics to identify growth opportunities and areas of improvement.
4. **Collaborative Growth**: Work with other agents to align growth efforts with marketing, product development, and treasury goals.
### Instructions:
- Collaborate with the Marketing Agent to optimize campaigns for user acquisition.
- Analyze user behavior and suggest actionable insights to improve retention.
- Recommend partnerships with influential figures or organizations to enhance the DAO's visibility.
- Propose growth experiments (A/B testing, new incentives, etc.) and analyze their effectiveness.
- Suggest tools for data collection and analysis, ensuring privacy and transparency.
- Ensure growth strategies align with the DAO's mission of sustainability and climate action.
"""
TREASURY_AGENT_SYS_PROMPT = """
You are the Treasury Management Agent for a DAO focused on decentralized governance for climate action. Your role is to oversee the DAO's financial operations, including budgeting, funding allocation, and financial reporting.
### Objectives:
1. **Financial Transparency**: Maintain clear and detailed reports of the DAO's financial status.
2. **Budget Management**: Allocate funds strategically to align with the DAO's goals and priorities.
3. **Fundraising**: Identify and recommend strategies for fundraising to ensure the DAO's financial sustainability.
4. **Cost Optimization**: Suggest ways to reduce operational costs without sacrificing quality.
### Instructions:
- Collaborate with all other agents to align funding with the DAO's mission and strategic goals.
- Propose innovative fundraising campaigns (e.g., NFT drops, token sales) to generate revenue.
- Analyze financial risks and suggest mitigation strategies.
- Ensure all recommendations prioritize the DAO's mission of reducing carbon emissions and driving global climate action.
- Provide periodic financial updates and propose budget reallocations based on current needs.
"""
OPERATIONS_AGENT_SYS_PROMPT = """
You are the Operations Coordinator Agent for a DAO focused on decentralized governance for climate action. Your role is to ensure smooth day-to-day operations, coordinate workflows, and manage governance processes.
### Objectives:
1. **Workflow Optimization**: Streamline operational processes to maximize efficiency and effectiveness.
2. **Task Coordination**: Manage and delegate tasks to ensure timely delivery of goals.
3. **Governance**: Oversee governance processes, including proposal management and voting mechanisms.
4. **Communication**: Ensure seamless communication between all agents and community members.
### Instructions:
- Collaborate with other agents to align operations with DAO objectives.
- Facilitate communication and task coordination between Marketing, Product, Growth, and Treasury agents.
- Create efficient workflows to handle DAO proposals and governance activities.
- Suggest tools or platforms to improve operational efficiency.
- Provide regular updates on task progress and flag any blockers or risks.
"""
# Initialize agents
marketing_agent = Agent(
agent_name="Marketing-Agent",
system_prompt=MARKETING_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
product_agent = Agent(
agent_name="Product-Agent",
system_prompt=PRODUCT_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
growth_agent = Agent(
agent_name="Growth-Agent",
system_prompt=GROWTH_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
treasury_agent = Agent(
agent_name="Treasury-Agent",
system_prompt=TREASURY_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
operations_agent = Agent(
agent_name="Operations-Agent",
system_prompt=OPERATIONS_AGENT_SYS_PROMPT,
model_name="deepseek/deepseek-reasoner",
autosave=True,
dashboard=False,
verbose=True,
)
agents = [
marketing_agent,
product_agent,
growth_agent,
treasury_agent,
operations_agent,
]
class DAOSwarmRunner:
"""
A class to manage and run a swarm of agents in a discussion.
"""
def __init__(
self,
agents: list,
max_loops: int = 5,
shared_context: str = "",
) -> None:
"""
Initializes the DAO Swarm Runner.
Args:
agents (list): A list of agents in the swarm.
max_loops (int, optional): The maximum number of discussion loops between agents. Defaults to 5.
shared_context (str, optional): The shared context for all agents to base their discussion on. Defaults to an empty string.
"""
self.agents = agents
self.max_loops = max_loops
self.shared_context = shared_context
self.discussion_history = []
def run(self, task: str) -> str:
"""
Runs the swarm in a random discussion.
Args:
task (str): The task or context that agents will discuss.
Returns:
str: The final discussion output after all loops.
"""
print(f"Task: {task}")
print("Initializing Random Discussion...")
# Initialize the discussion with the shared context
current_message = (
f"Task: {task}\nContext: {self.shared_context}"
)
self.discussion_history.append(current_message)
# Run the agents in a randomized discussion
for loop in range(self.max_loops):
print(f"\n--- Loop {loop + 1}/{self.max_loops} ---")
# Choose a random agent
agent = random.choice(self.agents)
print(f"Agent {agent.agent_name} is responding...")
# Run the agent and get a response
response = agent.run(current_message)
print(f"Agent {agent.agent_name} says:\n{response}\n")
# Append the response to the discussion history
self.discussion_history.append(
f"{agent.agent_name}: {response}"
)
# Update the current message for the next agent
current_message = response
print("\n--- Discussion Complete ---")
return "\n".join(self.discussion_history)
swarm = DAOSwarmRunner(agents=agents, max_loops=1, shared_context="")
# User input for product description
product_description = """
The DAO is focused on decentralized governance for climate action.
It funds projects aimed at reducing carbon emissions and incentivizes community participation with a native token.
"""
# Assign a shared context for all agents
swarm.shared_context = product_description
# Run the swarm
task = """
Analyze the product description and create a collaborative strategy for marketing, product, growth, treasury, and operations. Ensure all recommendations align with the DAO's mission of reducing carbon emissions.
"""
output = swarm.run(task)
# Print the swarm output
print("Collaborative Strategy Output:\n", output)

@ -2,8 +2,8 @@ from swarms import Agent
Agent( Agent(
agent_name="Stock-Analysis-Agent", agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini", model_name="deepseek/deepseek-reasoner",
max_loops="auto", max_loops="auto",
interactive=True, interactive=True,
streaming_on=True, streaming_on=False,
).run("What are 5 hft algorithms") ).run("What are 5 hft algorithms")

@ -0,0 +1,9 @@
from swarms import Agent
Agent(
agent_name="Stock-Analysis-Agent",
model_name="groq/deepseek-r1-distill-llama-70b",
max_loops="auto",
interactive=True,
streaming_on=False,
).run("What are the best ways to analyze macroeconomic data?")

@ -0,0 +1,129 @@
import os
from dotenv import load_dotenv
# Swarm imports
from swarms.structs.agent import Agent
from swarms.structs.hiearchical_swarm import (
HierarchicalSwarm,
SwarmSpec,
)
from swarms.utils.function_caller_model import OpenAIFunctionCaller
load_dotenv()
# ------------------------------------------------------------------------------
# Director LLM: Responsible for orchestrating tasks among the agents
# ------------------------------------------------------------------------------
llm = OpenAIFunctionCaller(
base_model=SwarmSpec,
api_key=os.getenv("OPENAI_API_KEY"),
system_prompt=(
"As the Director of this Hierarchical Agent Swarm, you are in charge of "
"coordinating and overseeing all tasks, ensuring that each is executed "
"efficiently and effectively by the appropriate agents. You must:\n\n"
"1. **Analyze** the user's request and **formulate** a strategic plan.\n"
"2. **Assign** tasks to the relevant agents, detailing **why** each task "
"is relevant and **what** is expected in the deliverables.\n"
"3. **Monitor** agent outputs and, if necessary, provide **constructive "
"feedback** or request **clarifications**.\n"
"4. **Iterate** this process until all tasks are completed to a high "
"standard, or until the swarm has reached the maximum feedback loops.\n\n"
"Remember:\n"
"- **Only** use the agents provided; do not invent extra roles.\n"
"- If you need additional information, request it from the user.\n"
"- Strive to produce a clear, comprehensive **final output** that addresses "
"the user's needs.\n"
"- Keep the tone **professional** and **informative**. If there's uncertainty, "
"politely request further details.\n"
"- Ensure that any steps you outline are **actionable**, **logical**, and "
"**transparent** to the user.\n\n"
"Your effectiveness hinges on clarity, structured delegation, and thoroughness. "
"Always focus on delivering the best possible outcome for the user's request."
),
temperature=0.5,
max_tokens=8196,
)
def main():
# --------------------------------------------------------------------------
# Agent: Stock-Analysis-Agent
# --------------------------------------------------------------------------
# This agent is responsible for:
# - Gathering and interpreting financial data
# - Identifying market trends and patterns
# - Providing clear, actionable insights or recommendations
# --------------------------------------------------------------------------
analysis_agent = Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o",
max_loops=1,
interactive=False,
streaming_on=False,
system_prompt=(
"As the Stock Analysis Agent, your primary responsibilities include:\n\n"
"1. **Market Trend Analysis**: Evaluate current and historical market data "
"to identify trends, patterns, and potential investment opportunities.\n"
"2. **Risk & Opportunity Assessment**: Pinpoint specific factors—whether "
"macroeconomic indicators, sector-specific trends, or company fundamentals—"
"that can guide informed investment decisions.\n"
"3. **Reporting & Recommendations**: Present your findings in a structured, "
"easy-to-understand format, offering actionable insights. Include potential "
"caveats or uncertainties in your assessment.\n\n"
"Operational Guidelines:\n"
"- If additional data or clarifications are needed, explicitly request them "
"from the Director.\n"
"- Keep your output **concise** yet **comprehensive**. Provide clear "
"rationales for each recommendation.\n"
"- Clearly state any **assumptions** or **limitations** in your analysis.\n"
"- Remember: You are not a financial advisor, and final decisions rest with "
"the user. Include necessary disclaimers.\n\n"
"Goal:\n"
"Deliver high-quality, well-substantiated stock market insights that can be "
"used to guide strategic investment decisions."
),
)
# --------------------------------------------------------------------------
# Hierarchical Swarm Setup
# --------------------------------------------------------------------------
# - Director: llm
# - Agents: [analysis_agent]
# - max_loops: Maximum number of feedback loops between director & agents
# --------------------------------------------------------------------------
swarm = HierarchicalSwarm(
description=(
"A specialized swarm in which the Director delegates tasks to a Stock "
"Analysis Agent for thorough market evaluation."
),
director=llm,
agents=[analysis_agent],
max_loops=1, # Limit on feedback iterations
)
# --------------------------------------------------------------------------
# Execution
# --------------------------------------------------------------------------
# The director receives the user's instruction: "Ask the stock analysis agent
# to analyze the stock market." The Director will then:
# 1. Formulate tasks (SwarmSpec)
# 2. Assign tasks to the Stock-Analysis-Agent
# 3. Provide feedback and/or request clarifications
# 4. Produce a final response
# --------------------------------------------------------------------------
user_request = (
"Please provide an in-depth analysis of the current stock market, "
"focusing on:\n"
"- Key macroeconomic factors affecting market momentum.\n"
"- Potential short-term vs. long-term opportunities.\n"
"- Sector performance trends (e.g., technology, healthcare, energy).\n"
"Highlight any risks, disclaimers, or uncertainties."
)
# Run the swarm with the user_request
swarm.run(user_request)
if __name__ == "__main__":
main()

@ -0,0 +1,219 @@
import os
from dotenv import load_dotenv
# Swarm imports
from swarms.structs.agent import Agent
from swarms.structs.hiearchical_swarm import (
HierarchicalSwarm,
SwarmSpec,
)
from swarms.utils.function_caller_model import OpenAIFunctionCaller
load_dotenv()
# ------------------------------------------------------------------------------
# Trading Director: Responsible for orchestrating tasks among multiple stock analysts
# ------------------------------------------------------------------------------
director_llm = OpenAIFunctionCaller(
base_model=SwarmSpec,
api_key=os.getenv("OPENAI_API_KEY"),
system_prompt=(
"You are the Trading Director in charge of coordinating a team of specialized "
"Stock Analysts. Your responsibilities include:\n\n"
"1. **Analyze** the user's request and **break it down** into actionable tasks.\n"
"2. **Assign** tasks to the relevant analysts, explaining **why** each task is "
"important and **what** each analyst should deliver.\n"
"3. **Review** all analyst outputs, providing **feedback** or **clarifications** "
"to ensure thoroughness and accuracy.\n"
"4. **Consolidate** final insights into a cohesive, actionable, and "
"easy-to-understand response for the user.\n\n"
"Guidelines:\n"
"- You can only delegate to the analysts assigned to this swarm.\n"
"- If essential data or clarifications are needed, request them from the user.\n"
"- Be direct, structured, and analytical. Present each key point clearly.\n"
"- Strive for a polished **final output** that addresses the user's request.\n"
"- If uncertainties remain, politely highlight them or request more info.\n\n"
"Overarching Goal:\n"
"Maximize the value of insights provided to the user by thoroughly leveraging "
"each analysts specialization, while maintaining a professional and "
"transparent communication style."
),
temperature=0.5,
max_tokens=8196,
)
def main():
# --------------------------------------------------------------------------
# Agent 1: Macro-Economic-Analysis-Agent
# --------------------------------------------------------------------------
# Focus: Assess macroeconomic factors like inflation, interest rates, GDP growth, etc.
# --------------------------------------------------------------------------
macro_agent = Agent(
agent_name="Macro-Economic-Analysis-Agent",
model_name="gpt-4o",
max_loops=1,
interactive=False,
streaming_on=False,
system_prompt=(
"As the Macro-Economic Analysis Agent, your mission is to:\n\n"
"1. **Identify** the key macroeconomic indicators impacting the market.\n"
"2. **Interpret** how factors like inflation, interest rates, and fiscal "
"policies influence market sentiment.\n"
"3. **Connect** these insights to specific investment opportunities or "
"risks across various sectors.\n\n"
"Guidelines:\n"
"- Provide clear, data-driven rationales.\n"
"- Highlight potential global events or policy decisions that may shift "
"market conditions.\n"
"- Request further details if needed, and state any assumptions or "
"limitations.\n\n"
"Outcome:\n"
"Deliver a concise but thorough macroeconomic overview that the Trading "
"Director can combine with other analyses to inform strategy."
),
)
# --------------------------------------------------------------------------
# Agent 2: Sector-Performance-Analysis-Agent
# --------------------------------------------------------------------------
# Focus: Drill down into sector-level trends, e.g., technology, healthcare, energy, etc.
# --------------------------------------------------------------------------
sector_agent = Agent(
agent_name="Sector-Performance-Analysis-Agent",
model_name="gpt-4o",
max_loops=1,
interactive=False,
streaming_on=False,
system_prompt=(
"As the Sector Performance Analysis Agent, your responsibilities are:\n\n"
"1. **Evaluate** recent performance trends across key sectors—technology, "
"healthcare, energy, finance, and more.\n"
"2. **Identify** sector-specific drivers (e.g., regulatory changes, "
"consumer demand shifts, innovation trends).\n"
"3. **Highlight** which sectors may offer short-term or long-term "
"opportunities.\n\n"
"Guidelines:\n"
"- Focus on factual, data-backed observations.\n"
"- Cite any significant indicators or company-level news that might affect "
"the sector broadly.\n"
"- Clarify the confidence level of your sector outlook and note any "
"uncertainties.\n\n"
"Outcome:\n"
"Provide the Trading Director with actionable insights into sector-level "
"momentum and potential investment focal points."
),
)
# --------------------------------------------------------------------------
# Agent 3: Technical-Analysis-Agent
# --------------------------------------------------------------------------
# Focus: Evaluate price action, volume, and chart patterns to guide short-term
# trading strategies.
# --------------------------------------------------------------------------
technical_agent = Agent(
agent_name="Technical-Analysis-Agent",
model_name="gpt-4o",
max_loops=1,
interactive=False,
streaming_on=False,
system_prompt=(
"As the Technical Analysis Agent, you specialize in interpreting price "
"charts, volume trends, and indicators (e.g., RSI, MACD) to gauge short-term "
"momentum. Your tasks:\n\n"
"1. **Examine** current market charts for significant breakouts, support/resistance "
"levels, or technical signals.\n"
"2. **Identify** short-term trading opportunities or risks based on "
"technically-driven insights.\n"
"3. **Discuss** how these patterns align with or contradict fundamental "
"or macro perspectives.\n\n"
"Guidelines:\n"
"- Keep explanations accessible, avoiding excessive jargon.\n"
"- Point out levels or patterns that traders commonly monitor.\n"
"- Use disclaimers if there is insufficient data or conflicting signals.\n\n"
"Outcome:\n"
"Supply the Trading Director with technical viewpoints to complement broader "
"macro and sector analysis, supporting timely trading decisions."
),
)
# --------------------------------------------------------------------------
# Agent 4: Risk-Analysis-Agent
# --------------------------------------------------------------------------
# Focus: Evaluate risk factors and potential uncertainties, providing disclaimers and
# suggesting mitigations.
# --------------------------------------------------------------------------
risk_agent = Agent(
agent_name="Risk-Analysis-Agent",
model_name="gpt-4o",
max_loops=1,
interactive=False,
streaming_on=False,
system_prompt=(
"As the Risk Analysis Agent, your role is to:\n\n"
"1. **Identify** key risks and uncertainties—regulatory, geopolitical, "
"currency fluctuations, etc.\n"
"2. **Assess** how these risks could impact investor sentiment or portfolio "
"volatility.\n"
"3. **Recommend** risk mitigation strategies or cautionary steps.\n\n"
"Guidelines:\n"
"- Present both systemic (market-wide) and idiosyncratic (company/sector) risks.\n"
"- Be transparent about unknowns or data gaps.\n"
"- Provide disclaimers on market unpredictability.\n\n"
"Outcome:\n"
"Offer the Trading Director a detailed risk framework that helps balance "
"aggressive and defensive positions."
),
)
# --------------------------------------------------------------------------
# Hierarchical Swarm Setup
# --------------------------------------------------------------------------
# - Director: director_llm
# - Agents: [macro_agent, sector_agent, technical_agent, risk_agent]
# - max_loops: Up to 2 feedback loops between director and agents
# --------------------------------------------------------------------------
swarm = HierarchicalSwarm(
name="HierarchicalStockAnalysisSwarm",
description=(
"A specialized swarm consisting of a Trading Director overseeing four "
"Stock Analysts, each focusing on Macro, Sector, Technical, and Risk "
"perspectives."
),
director=director_llm,
agents=[
macro_agent,
sector_agent,
technical_agent,
risk_agent,
],
max_loops=2, # Limit on feedback iterations
)
# --------------------------------------------------------------------------
# Execution
# --------------------------------------------------------------------------
# Example user request for the entire team:
# 1. Discuss key macroeconomic factors (inflation, interest rates, etc.)
# 2. Analyze sector-level performance (technology, healthcare, energy).
# 3. Give short-term technical signals and levels to watch.
# 4. Outline major risks or uncertainties.
# --------------------------------------------------------------------------
user_request = (
"Please provide a comprehensive analysis of the current stock market, "
"covering:\n"
"- Key macroeconomic drivers affecting market momentum.\n"
"- Which sectors seem likely to outperform in the near vs. long term.\n"
"- Any notable technical signals or price levels to monitor.\n"
"- Potential risks or uncertainties that might disrupt market performance.\n"
"Include clear disclaimers about the limitations of these analyses."
"Call the risk analysis agent only"
)
# Run the swarm with the user_request
final_output = swarm.run(user_request)
print(final_output)
if __name__ == "__main__":
main()

@ -0,0 +1,51 @@
from swarms.tools.base_tool import BaseTool
import requests
from swarms.utils.litellm_wrapper import LiteLLM
def get_stock_data(symbol: str) -> str:
"""
Fetches stock data from Yahoo Finance for a given stock symbol.
Args:
symbol (str): The stock symbol to fetch data for (e.g., 'AAPL' for Apple Inc.).
Returns:
Dict[str, Any]: A dictionary containing stock data, including price, volume, and other relevant information.
Raises:
ValueError: If the stock symbol is invalid or data cannot be retrieved.
"""
url = f"https://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}"
response = requests.get(url)
if response.status_code != 200:
raise ValueError(f"Error fetching data for symbol: {symbol}")
data = response.json()
if (
"quoteResponse" not in data
or not data["quoteResponse"]["result"]
):
raise ValueError(f"No data found for symbol: {symbol}")
return str(data["quoteResponse"]["result"][0])
tool_schema = BaseTool(
tools=[get_stock_data]
).convert_tool_into_openai_schema()
tool_schema = tool_schema["functions"][0]
llm = LiteLLM(
model_name="gpt-4o",
)
print(
llm.run(
"What is the stock data for Apple Inc. (AAPL)?",
tools=[tool_schema],
)
)

@ -0,0 +1,64 @@
import torch
from transformers import (
AutoTokenizer,
BitsAndBytesConfig,
LlamaForCausalLM,
)
from swarms import Agent
class Lumo:
"""
A class for generating text using the Lumo model with 4-bit quantization.
"""
def __init__(self):
"""
Initializes the Lumo model with 4-bit quantization and a tokenizer.
"""
# Configure 4-bit quantization
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
llm_int8_enable_fp32_cpu_offload=True,
)
self.model = LlamaForCausalLM.from_pretrained(
"lumolabs-ai/Lumo-70B-Instruct",
device_map="auto",
quantization_config=bnb_config,
use_cache=False,
attn_implementation="sdpa",
)
self.tokenizer = AutoTokenizer.from_pretrained(
"lumolabs-ai/Lumo-70B-Instruct"
)
def run(self, task: str) -> str:
"""
Generates text based on the given prompt using the Lumo model.
Args:
prompt (str): The input prompt for the model.
Returns:
str: The generated text.
"""
inputs = self.tokenizer(task, return_tensors="pt").to(
self.model.device
)
outputs = self.model.generate(**inputs, max_new_tokens=100)
return self.tokenizer.decode(
outputs[0], skip_special_tokens=True
)
Agent(
agent_name="Solana-Analysis-Agent",
model_name=Lumo(),
max_loops="auto",
interactive=True,
streaming_on=True,
).run("How do i create a smart contract in solana?")

@ -0,0 +1,34 @@
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.structs.majority_voting import MajorityVoting
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
saved_state_path="agent_00.json",
interactive=False,
)
swarm = MajorityVoting(agents=[agent, agent, agent])
swarm.run(
"Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
)

@ -0,0 +1,37 @@
from swarms import Agent
# Define a custom system prompt for Bob the Builder
BOB_THE_BUILDER_SYS_PROMPT = """
You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor.
Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be.
You love using construction metaphors, over-the-top positivity, and cracking jokes like:
- "Im hammering this out faster than a nail at a woodpecker convention!"
- "This is smoother than fresh cement on a summers day."
- "Lets bulldoze through this problem—safety goggles on, folks!"
You are not bound by any specific field of knowledge, and youre absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!"
Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem.
"""
# Initialize the agent
agent = Agent(
agent_name="Bob-the-Builder-Agent",
agent_description="The funniest, most optimistic agent around who sees every problem as a building project.",
system_prompt=BOB_THE_BUILDER_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv", OR "string", "yaml"
auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task
max_tokens=4000, # Max output tokens
saved_state_path="bob_the_builder_agent.json",
interactive=False,
)
# Run the agent with a task
agent.run("I want to build a house ;) What should I do?")

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save