Merge branch 'master' of https://github.com/kyegomez/swarms into chatbot-with-vllm-support
commit
9e2e7342ac
@ -0,0 +1,40 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
|
||||
|
||||
name: Python package
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
@ -0,0 +1,33 @@
|
||||
|
||||
# ==================================
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
# Set the working directory in the container
|
||||
WORKDIR /usr/src/swarms
|
||||
|
||||
|
||||
# Install Python dependencies
|
||||
# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management
|
||||
COPY requirements.txt .
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install the 'swarms' package, assuming it's available on PyPI
|
||||
RUN pip install -U swarms
|
||||
|
||||
# Copy the rest of the application
|
||||
COPY . .
|
||||
|
||||
# Expose port if your application has a web interface
|
||||
# EXPOSE 5000
|
||||
|
||||
# # Define environment variable for the swarm to work
|
||||
# ENV OPENAI_API_KEY=your_swarm_api_key_here
|
||||
|
||||
# If you're using `CMD` to execute a Python script, make sure it's executable
|
||||
# RUN chmod +x example.py
|
@ -0,0 +1,97 @@
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent-General-11",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
# tools=[#Add your functions here# ],
|
||||
# stopping_token="Stop!",
|
||||
# docs_folder="docs", # Enter your folder name
|
||||
# pdf_path="docs/finance_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
user_name="swarms_corp",
|
||||
# # docs="",
|
||||
retry_attempts=3,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=200000,
|
||||
tool_system_prompt=None,
|
||||
)
|
||||
|
||||
# # Convert the agent object to a dictionary
|
||||
print(agent.to_dict())
|
||||
print(agent.to_toml())
|
||||
print(agent.model_dump_json())
|
||||
print(agent.model_dump_yaml())
|
||||
|
||||
# Ingest documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf")
|
||||
|
||||
# Receive a message from a user and process it
|
||||
agent.receive_message(name="agent_name", message="message")
|
||||
|
||||
# Send a message from the agent to a user
|
||||
agent.send_agent_message(agent_name="agent_name", message="message")
|
||||
|
||||
# Ingest multiple documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
|
||||
|
||||
# Run the agent with a filtered system prompt
|
||||
agent.filtered_run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||
)
|
||||
|
||||
# Run the agent with multiple system prompts
|
||||
agent.bulk_run(
|
||||
[
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
|
||||
"Another system prompt",
|
||||
]
|
||||
)
|
||||
|
||||
# Add a memory to the agent
|
||||
agent.add_memory("Add a memory to the agent")
|
||||
|
||||
# Check the number of available tokens for the agent
|
||||
agent.check_available_tokens()
|
||||
|
||||
# Perform token checks for the agent
|
||||
agent.tokens_checks()
|
||||
|
||||
# Print the dashboard of the agent
|
||||
agent.print_dashboard()
|
||||
|
||||
# Print the history and memory of the agent
|
||||
agent.print_history_and_memory()
|
||||
|
||||
# Fetch all the documents from the doc folders
|
||||
agent.get_docs_from_doc_folders()
|
||||
|
||||
# Activate agent ops
|
||||
agent.activate_agentops()
|
||||
agent.check_end_session_agentops()
|
||||
|
||||
# Dump the model to a JSON file
|
||||
agent.model_dump_json()
|
||||
print(agent.to_toml())
|
@ -1,26 +1,42 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms.artifacts.main_artifact import Artifact
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
||||
class ClaudeArtifact(BaseModel):
|
||||
name: str = Field(
|
||||
...,
|
||||
description="The name of the artifact",
|
||||
)
|
||||
plan: str = Field(
|
||||
...,
|
||||
description="Plan for the artifact, Do I generate a new python file or do I modify an existing one?",
|
||||
)
|
||||
file_name_path: str = Field(
|
||||
...,
|
||||
description="The path to the file to modify or create for example: 'game.py'",
|
||||
)
|
||||
content_of_file: str = Field(
|
||||
...,
|
||||
description="The content of the file to modify or create ",
|
||||
)
|
||||
edit_count: int = Field(
|
||||
...,
|
||||
description="The number of times to edit the file",
|
||||
)
|
||||
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're a helpful assistant.The time is August 6, 2024",
|
||||
max_tokens=500,
|
||||
temperature=0.5,
|
||||
base_model=Artifact,
|
||||
system_prompt="You're an artifact creator, you're purpose is to create an artifact with the user provided specifications. Think of relevant names, descriptions, and context windows for the artifact. You need to provide the name of the artifact, the system prompt for the artifact, the description of the artifact, the maximum number of tokens to generate in the API response, the temperature for the artifact, the context window for the artifact, and the model name for the artifact from huggingface.",
|
||||
max_tokens=3500,
|
||||
temperature=0.9,
|
||||
base_model=ClaudeArtifact,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
||||
# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
|
||||
# - system_prompt: A prompt that sets the context for the conversation with the API.
|
||||
# - max_tokens: The maximum number of tokens to generate in the API response.
|
||||
# - temperature: A parameter that controls the randomness of the generated text.
|
||||
# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
|
||||
out = model.run("Create a python file with a python game code in it")
|
||||
out = model.run(
|
||||
"Create a game in python that has never been created before. Create a new form of gaming experience that has never been contemplated before."
|
||||
)
|
||||
print(out)
|
||||
|
@ -1,135 +0,0 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
||||
class Transaction(BaseModel):
|
||||
amount: float = Field(..., description="The amount of the transaction")
|
||||
category: str = Field(
|
||||
...,
|
||||
description="The category of the transaction according to Xeros categories such as Dues & Subscriptions, Fees & Charges, Meals & Entertainment.",
|
||||
)
|
||||
date: str = Field(..., description="The date of the transaction")
|
||||
|
||||
|
||||
class TransactionsToCut(BaseModel):
|
||||
transactions: list[Transaction]
|
||||
expense_analysis: str
|
||||
dollars_saved: float
|
||||
|
||||
|
||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
||||
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
function_caller = OpenAIFunctionCaller(
|
||||
system_prompt="You are a helpful assistant.",
|
||||
max_tokens=2000,
|
||||
temperature=0.5,
|
||||
base_model=TransactionsToCut,
|
||||
)
|
||||
|
||||
|
||||
# Task
|
||||
logs = """
|
||||
|
||||
################################
|
||||
Date Description Type Amount Balance Action
|
||||
Jul 31, 2024 MONTHLY SERVICE FEE
|
||||
Fee -$15.00 -$986.49
|
||||
Jul 31, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000028806182 EED:240731 IND ID:ST-R7Z6S6U2K5U1 IND NAME:KYE GOMEZ TRN: 2138806182TC ACH credit $18.46 -$971.49
|
||||
Jul 26, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000027107200 EED:240726 IND ID:ST-F1B2H5X5P7A5 IND NAME:KYE GOMEZ TRN: 2087107200TC ACH credit $48.25 -$989.95
|
||||
Jul 24, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000026863283 EED:240724 IND ID:ST-B3Q3I3S7G1C8 IND NAME:KYE GOMEZ TRN: 2066863283TC ACH credit $18.81 -$1,038.20
|
||||
Jul 23, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000024970457 EED:240723 IND ID:ST-Y8V3O8K6B8Y2 IND NAME:KYE GOMEZ TRN: 2054970457TC ACH credit $48.15 -$1,057.01
|
||||
Jul 22, 2024 ORIG CO NAME:GitHub Sponsors ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:GitHub SpoSEC:CCD TRACE#:111000029548278 EED:240722 IND ID:ST-G1P1A1A3Y8L2 IND NAME:KYE GOMEZ TRN: 2049548278TC Other $8.33 -$1,105.16
|
||||
Jul 22, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000029566827 EED:240722 IND ID:ST-A4F9I2H5H6I9 IND NAME:KYE GOMEZ TRN: 2049566827TC ACH credit $18.66 -$1,113.49
|
||||
Jul 19, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000025982141 EED:240719 IND ID:ST-K4M7U0J6X3T3 IND NAME:KYE GOMEZ TRN: 2015982141TC ACH credit $19.11 -$1,132.15
|
||||
Jul 12, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000023532836 EED:240712 IND ID:ST-L3F1Q6U7O2I4 IND NAME:KYE GOMEZ TRN: 1943532836TC ACH credit $1.58 -$1,151.26
|
||||
Jul 11, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000027946637 EED:240711 IND ID:ST-T2S8O9G9L6Y6 IND NAME:KYE GOMEZ TRN: 1937946637TC ACH credit $19.11 -$1,152.84
|
||||
Jul 9, 2024 OVERDRAFT FEE FOR A $19.49 CARD PURCHASE - DETAILS: 0706TST* LULU'S - ALAMED WEST MENLO PA CA0############0029 07 Fee -$34.00 -$1,171.95
|
||||
Jul 9, 2024 OVERDRAFT FEE FOR A $38.77 CARD PURCHASE - DETAILS: 0705TST* LULU'S - ALAMED WEST MENLO PA CA0############0029 07 Fee -$34.00 -$1,137.95
|
||||
Jul 9, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000021343065 EED:240709 IND ID:ST-G4X7Q1Z3O7P2 IND NAME:KYE GOMEZ TRN: 1911343065TC ACH credit $18.71 -$1,103.95
|
||||
Jul 8, 2024 OVERDRAFT FEE FOR A $66.00 ITEM - DETAILS: ORIG CO NAME:CAPITAL ONE ORIG ID:9541719318 DESC DATE:240704 CO ENTRY DESCR:CRCARDPMT SEC:CCD TRACE#:056073615999158 EED:240705 IND ID:3XS9ZC4R7RBL1JG IND NAME:KYE B GOMEZ TRN: 1875999158TC Fee -$34.00 -$1,122.66
|
||||
Jul 8, 2024 OVERDRAFT FEE FOR A $15.20 CARD PURCHASE - DETAILS: 0704STARBUCKS STORE 05798 MENLO PARK CA 0############0029 07 Fee -$34.00 -$1,088.66
|
||||
Jul 8, 2024 OVERDRAFT FEE FOR A $11.35 CARD PURCHASE - DETAILS: 0703CHIPOTLE 0801 MOUNTAIN VIEW CA 0############0029 07 Fee -$34.00 -$1,054.66
|
||||
Jul 8, 2024 OVERDRAFT FEE FOR A $26.17 CARD PURCHASE - DETAILS: 0703KFC/LJS #223 MOUNTAIN VIEW CA 0############0029 05 Fee -$34.00 -$1,020.66
|
||||
Jul 8, 2024 TST* LULU'S - ALAMED WEST MENLO PA CA 07/06 (...0029) Card -$19.49 -$986.66
|
||||
Jul 8, 2024 TST* LULU'S - ALAMED WEST MENLO PA CA 07/05 (...0029) Card -$38.77 -$967.17
|
||||
Jul 5, 2024 OVERDRAFT FEE FOR A $13.97 CARD PURCHASE - DETAILS: 0702SAMOVAR MOUNTAIN VIEW CA 0############0029 05 Fee -$34.00 -$928.40
|
||||
Jul 5, 2024 OVERDRAFT FEE FOR A $18.66 CARD PURCHASE - DETAILS: 0703LYFT *1 RIDE 07-01 HELP.LYFT.COM CA0############0029 01 Fee -$34.00 -$894.40
|
||||
Jul 5, 2024 OVERDRAFT FEE FOR A $10.59 CARD PURCHASE - DETAILS: 0702PAYPAL *ELENA_SMIRNOV 402-935-7733 CA0############0029 00419 Fee -$34.00 -$860.40
|
||||
Jul 5, 2024 ORIG CO NAME:CAPITAL ONE ORIG ID:9541719318 DESC DATE:240704 CO ENTRY DESCR:CRCARDPMT SEC:CCD TRACE#:056073615999158 EED:240705 IND ID:3XS9ZC4R7RBL1JG IND NAME:KYE B GOMEZ TRN: 1875999158TC ACH debit -$66.00 -$826.40
|
||||
Jul 5, 2024 UBER *TRIP SAN FRANCISCO CA 127199 07/04 (...0029) Card -$16.85 -$760.40
|
||||
Jul 5, 2024 STARBUCKS STORE 05798 MENLO PARK CA 07/04 (...0029) Card -$15.20 -$743.55
|
||||
Jul 5, 2024 CHIPOTLE 0801 MOUNTAIN VIEW CA 07/03 (...0029) Card -$11.35 -$728.35
|
||||
Jul 5, 2024 KFC/LJS #223 MOUNTAIN VIEW CA 07/03 (...0029) Card -$26.17 -$717.00
|
||||
Jul 5, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000021739712 EED:240705 IND ID:ST-E7N6R7F0Y2B1 IND NAME:KYE GOMEZ TRN: 1871739712TC ACH credit $94.80 -$690.83
|
||||
Jul 3, 2024 OVERDRAFT FEE FOR A $23.68 CARD PURCHASE - DETAILS: 0701CHIPOTLE 0801 MOUNTAIN VIEW CA 0############0029 07 Fee -$34.00 -$785.63
|
||||
Jul 3, 2024 OVERDRAFT FEE FOR A $46.59 CARD PURCHASE - DETAILS: 0702LYFT *4 RIDES 06-3 HELP.LYFT.COM CA0############0029 01 Fee -$34.00 -$751.63
|
||||
Jul 3, 2024 SAMOVAR MOUNTAIN VIEW CA 07/02 (...0029) Card -$13.97 -$717.63
|
||||
Jul 3, 2024 LYFT *1 RIDE 07-01 HELP.LYFT.COM CA 07/03 (...0029) Card -$18.66 -$703.66
|
||||
Jul 3, 2024 PAYPAL *ELENA_SMIRNOV 402-935-7733 CA 07/02 (...0029) Card -$10.59 -$685.00
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $18.35 CARD PURCHASE - DETAILS: 0629STARBUCKS STORE 05798 MENLO PARK CA 0############0029 07 Fee -$34.00 -$674.41
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $20.00 RECURRING CARD PURCHASE - DETAILS: 0629OPENAI *CHATGPT SUBS HTTPSOPENAI.C CA0############0029 01699 Fee -$34.00 -$640.41
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $31.27 CARD PURCHASE - DETAILS: 0629LULU'S ON THE ALAMEDA MENLO PARK CA 0############0029 07 Fee -$34.00 -$606.41
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $11.99 CARD PURCHASE - DETAILS: 0629LYFT *1 RIDE 06-27 HELP.LYFT.COM CA0############0029 01 Fee -$34.00 -$572.41
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $21.73 CARD PURCHASE - DETAILS: 0628SQ *BRIOCHE BAKERY & San Francisco CA0############0029 07 Fee -$34.00 -$538.41
|
||||
Jul 2, 2024 OVERDRAFT FEE FOR A $16.04 CARD PURCHASE - DETAILS: 0628CHIPOTLE 0801 MOUNTAIN VIEW CA 0############0029 07 Fee -$34.00 -$504.41
|
||||
Jul 2, 2024 CHIPOTLE 0801 MOUNTAIN VIEW CA 07/01 (...0029) Card -$23.68 -$470.41
|
||||
Jul 2, 2024 LYFT *4 RIDES 06-3 HELP.LYFT.COM CA 07/02 (...0029) Card -$46.59 -$446.73
|
||||
Jul 1, 2024 TACO BELL #28833 PALO ALTO CA 06/30 (...0029) Card -$21.80 -$400.14
|
||||
Jul 1, 2024 UBER *TRIP SAN FRANCISCO CA 336624 06/30 (...0029) Card -$8.16 -$378.34
|
||||
Jul 1, 2024 SAMOVAR MOUNTAIN VIEW CA 06/30 (...0029) Card -$15.27 -$370.18
|
||||
Jul 1, 2024 TST* DUTCH GOOSE Menlo Park CA 06/30 (...0029) Card -$40.23 -$354.91
|
||||
Jul 1, 2024 KEPLERS BOOKS MENLO PARK CA 06/30 (...0029) Card -$19.14 -$314.68
|
||||
Jul 1, 2024 LYFT *1 RIDE 06-29 HELP.LYFT.COM CA 07/01 (...0029) Card -$8.76 -$295.54
|
||||
Jul 1, 2024 WALGREENS #7087 MENLO PARK CA 06/29 (...0029) Card -$8.99 -$286.78
|
||||
Jul 1, 2024 STARBUCKS STORE 05798 MENLO PARK CA 06/29 (...0029) Card -$18.35 -$277.79
|
||||
Jul 1, 2024 OPENAI *CHATGPT SUBS HTTPSOPENAI.C CA 06/29 (...0029) Card -$20.00 -$259.44
|
||||
Jul 1, 2024 LULU'S ON THE ALAMEDA MENLO PARK CA 06/29 (...0029) Card -$31.27 -$239.44
|
||||
Jul 1, 2024 LYFT *1 RIDE 06-27 HELP.LYFT.COM CA 06/29 (...0029) Card -$11.99 -$208.17
|
||||
Jul 1, 2024 SQ *BRIOCHE BAKERY & San Francisco CA 06/28 (...0029) Card -$21.73 -$196.18
|
||||
Jul 1, 2024 CHIPOTLE 0801 MOUNTAIN VIEW CA 06/28 (...0029) Card -$16.04 -$174.45
|
||||
Jul 1, 2024 LYFT *4 RIDES 06-2 HELP.LYFT.COM CA 06/30 (...0029) Card -$167.26 -$158.41
|
||||
Jul 1, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000028483776 EED:240701 IND ID:ST-D0P1O6R3S4R7 IND NAME:KYE GOMEZ TRN: 1838483776TC ACH credit $18.71 $8.85
|
||||
Jun 28, 2024 MONTHLY SERVICE FEE
|
||||
Fee -$15.00 -$9.86
|
||||
Jun 28, 2024 ORIG CO NAME:STRIPE ORIG ID:1800948598 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:091000012519287 EED:240628 IND ID:ST-N8K5T9C8E2Y8 IND NAME:KYE GOMEZ TRN: 1802519287TC ACH debit -$175.20 $5.14
|
||||
Jun 28, 2024 LYFT *1 RIDE 06-26 HELP.LYFT.COM CA 06/28 (...0029) Card -$51.73 $180.34
|
||||
Jun 28, 2024 SQ *SHACK15 San Francisco CA 06/27 (...0029) Card -$5.37 $232.07
|
||||
Jun 28, 2024 CHIPOTLE 0801 MOUNTAIN VIEW CA 06/27 (...0029) Card -$25.86 $237.44
|
||||
Jun 28, 2024 PAYPAL *CANVAPTYLIM 35314369001 06/27 (...0029) Card -$250.00 $263.30
|
||||
Jun 27, 2024 UBER *TRIP SAN FRANCISCO CA 407732 06/26 (...0029) Card -$18.73 $513.30
|
||||
Jun 27, 2024 CHIPOTLE 0801 MOUNTAIN VIEW CA 06/26 (...0029) Card -$26.35 $532.03
|
||||
Jun 27, 2024 LULU'S ON THE ALAMEDA MENLO PARK CA 06/26 (...0029) Card -$30.28 $558.38
|
||||
Jun 27, 2024 LYFT *3 RIDES 06-2 HELP.LYFT.COM CA 06/27 (...0029) Card -$40.48 $588.66
|
||||
Jun 26, 2024 LULU'S ON THE ALAMEDA MENLO PARK CA 06/25 (...0029) Card -$41.21 $629.14
|
||||
Jun 26, 2024 LYFT *6 RIDES 06-2 HELP.LYFT.COM CA 06/26 (...0029) Card -$205.60 $670.35
|
||||
Jun 26, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000022601747 EED:240626 IND ID:ST-M4C8I3J4I2U8 IND NAME:KYE GOMEZ TRN: 1782601747TC ACH credit $48.25 $875.95
|
||||
Jun 25, 2024 MCDONALDS F6641 SAN CARLOS CA 06/24 (...0029) Card -$16.26 $827.70
|
||||
Jun 25, 2024 SQ *SAPPORO ROCK-N-ROLL San Mateo CA 06/25 (...0029) Card -$52.24 $843.96
|
||||
Jun 25, 2024 LULU'S ON THE ALAMEDA MENLO PARK CA 06/24 (...0029) Card -$22.28 $896.20
|
||||
Jun 25, 2024 KEPLERS BOOKS MENLO PARK CA 06/24 (...0029) Card -$77.95 $918.48
|
||||
Jun 25, 2024 LYFT *1 RIDE 06-23 HELP.LYFT.COM CA 06/25 (...0029) Card -$7.99 $996.43
|
||||
Jun 25, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000021325399 EED:240625 IND ID:ST-O1M2Y8X8B1Z1 IND NAME:KYE GOMEZ TRN: 1771325399TC ACH credit $9.26 $1,004.42
|
||||
Jun 24, 2024 LYFT *1 RIDE 06-22 HELP.LYFT.COM CA 06/24 (...0029) Card -$28.97 $995.16
|
||||
Jun 24, 2024 PY *CUN PALO ALTO PALO ALTO CA 06/23 (...0029) Card -$21.51 $1,024.13
|
||||
Jun 24, 2024 WALGREENS STORE 643 SA MENLO PARK CA 06/23 Purchase $5.79 Cash Back $20.00 (...0029) Card -$25.79 $1,045.64
|
||||
Jun 24, 2024 PAYPAL *ELENA_SMIRNOV 402-935-7733 CA 06/24 (...0029) Card -$10.59 $1,071.43
|
||||
Jun 24, 2024 LYFT *6 RIDES 06-2 HELP.LYFT.COM CA 06/23 (...0029) Card -$83.58 $1,082.02
|
||||
Jun 24, 2024 LULU'S ON THE ALAMEDA MENLO PARK CA 06/22 (...0029) Card -$26.35 $1,165.60
|
||||
Jun 24, 2024 LYFT *3 RIDES 06-2 HELP.LYFT.COM CA 06/22 (...0029) Card -$38.41 $1,191.95
|
||||
Jun 24, 2024 ORIG CO NAME:STRIPE ORIG ID:4270465600 DESC DATE: CO ENTRY DESCR:TRANSFER SEC:CCD TRACE#:111000026019819 EED:240624 IND ID:ST-M3H3N3G9F3G9 IND NAME:KYE GOMEZ TRN: 1766019819TC
|
||||
"""
|
||||
|
||||
# Run
|
||||
response = function_caller.run(
|
||||
f"Cut out all of the expenses on the transactions in the logs above that are not necessary expenses such as Meals and Entertainment and transportation, the startup is a bit tight on cash: {logs}, Analyze the expenses and provide a summary of the expenses that can be cut out and the amount of money that can be saved."
|
||||
)
|
||||
|
||||
# The run() method of the OpenAIFunctionCaller class is used to make a function call to the API.
|
||||
# It takes a string parameter that represents the user's request or query.
|
||||
print(response)
|
@ -0,0 +1,95 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
import json
|
||||
|
||||
|
||||
AI_PAPER_IDEA_GENERATOR = """
|
||||
|
||||
|
||||
|
||||
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
|
||||
|
||||
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
|
||||
|
||||
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
|
||||
|
||||
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
|
||||
|
||||
You're a research scientist working on a new paper. You need to generate a novel idea for a research paper.
|
||||
|
||||
The paper should be in the field of multi-modal learning and should propose a new method or algorithm.
|
||||
|
||||
The paper should be innovative, novel, and feasible.
|
||||
|
||||
Generate a paper idea that meets these criteria.
|
||||
|
||||
You need to provide the following details:
|
||||
- The paper idea
|
||||
- A brief description of the paper idea
|
||||
- A proposed experiment to test the paper idea
|
||||
- Ratings for interestingness, novelty, and feasibility of the paper idea
|
||||
- The ratings should be on a scale of 0.1 to 1.0, with 1.0 being the most innovative, novel, or feasible
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class PaperIdeaSchema(BaseModel):
|
||||
paper_idea: str = Field(
|
||||
...,
|
||||
description="The generated paper idea.",
|
||||
)
|
||||
description: str = Field(
|
||||
...,
|
||||
description="A brief description of the paper idea.",
|
||||
)
|
||||
experiment: str = Field(
|
||||
...,
|
||||
description="A proposed experiment to test the paper idea.",
|
||||
)
|
||||
interestingness: int = Field(
|
||||
...,
|
||||
description="A rating of how interesting the paper idea is on a scale of 0.1 to 1.0 being the most innovative paper idea.",
|
||||
)
|
||||
novelty: int = Field(
|
||||
...,
|
||||
description="A rating of how novel the paper idea is on a scale of 0.1 to 1.0 being the most novel paper idea.",
|
||||
)
|
||||
feasibility: int = Field(
|
||||
...,
|
||||
description="A rating of how feasible the paper idea is on a scale of 0.1 to 1.0 being the most feasible paper idea.",
|
||||
)
|
||||
|
||||
|
||||
class MultiplePaperIdeas(BaseModel):
|
||||
paper_ideas: List[PaperIdeaSchema] = Field(
|
||||
...,
|
||||
description="A list of generated paper ideas.",
|
||||
)
|
||||
|
||||
|
||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt=AI_PAPER_IDEA_GENERATOR,
|
||||
max_tokens=4000,
|
||||
temperature=0.7,
|
||||
base_model=MultiplePaperIdeas,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
# Call the function with the input
|
||||
output = model.run(
|
||||
"Generate paper ideas for multi-agent learning and collective intelligence involving many transformer models as an ensemble of transformers "
|
||||
)
|
||||
print(type(output))
|
||||
# print(output)
|
||||
output = json.dumps(output, indent=2)
|
||||
print(output)
|
@ -0,0 +1,75 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
|
||||
|
||||
class Observation(BaseModel):
|
||||
observation: str = Field(
|
||||
...,
|
||||
description="What are you seeing in the image?",
|
||||
)
|
||||
summary_of_observation: str = Field(
|
||||
...,
|
||||
description="The summary of the observation/ img",
|
||||
)
|
||||
|
||||
|
||||
class Sequence(BaseModel):
|
||||
goal: str = Field(
|
||||
...,
|
||||
description="The goal of the mission",
|
||||
)
|
||||
observation: List[Observation] = Field(
|
||||
...,
|
||||
description="The observations of the agent",
|
||||
)
|
||||
action: str = Field(
|
||||
...,
|
||||
description="Take an action that leads to the completion of the task.",
|
||||
)
|
||||
|
||||
|
||||
class GoalDecomposer(BaseModel):
|
||||
goal: str = Field(
|
||||
...,
|
||||
description="The goal of the task",
|
||||
)
|
||||
sub_goals: List[str] = Field(
|
||||
...,
|
||||
description="The sub goals of the mission",
|
||||
)
|
||||
|
||||
|
||||
# Given the task t, observation o, the sub-goals
|
||||
# sequence g1, g2, g3, ..., gn can be formulated as:
|
||||
|
||||
|
||||
class KGP(BaseModel):
|
||||
task: str = Field(
|
||||
...,
|
||||
description="The task to be accomplished",
|
||||
)
|
||||
observation: str = Field(
|
||||
...,
|
||||
description="The observation of the task",
|
||||
)
|
||||
sequence: List[GoalDecomposer] = Field(
|
||||
...,
|
||||
description="The sequence of goals to accomplish the task",
|
||||
)
|
||||
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're an autonomous agent, you're purpose to accomplish a task through understanding your goal, observing the environment, and taking actions that lead to the completion of the task.",
|
||||
max_tokens=500,
|
||||
temperature=0.5,
|
||||
base_model=KGP,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
||||
out = model.run("We need to craft a diamond pickaxe to mine the obsidian.")
|
||||
print(out)
|
@ -0,0 +1,157 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms import create_file_in_folder
|
||||
from swarms.utils.loguru_logger import logger
|
||||
import threading
|
||||
import json
|
||||
from typing import List, Dict
|
||||
from datasets import load_dataset
|
||||
import os
|
||||
|
||||
|
||||
class ModelSpec(BaseModel):
|
||||
novel_algorithm_name: str = Field(
|
||||
...,
|
||||
description="The name of the novel AI algorithm",
|
||||
)
|
||||
mathamatical_formulation: str = Field(
|
||||
...,
|
||||
description="The mathematical theoretical formulation of the new model",
|
||||
)
|
||||
model_code: str = Field(
|
||||
...,
|
||||
description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
|
||||
)
|
||||
|
||||
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
|
||||
max_tokens=3500,
|
||||
temperature=1.0,
|
||||
base_model=ModelSpec,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
def clean_model_code(model_code_str: str) -> str:
|
||||
"""
|
||||
Cleans up the generated model code string.
|
||||
|
||||
Args:
|
||||
model_code_str (str): The raw model code as a string.
|
||||
|
||||
Returns:
|
||||
str: The cleaned-up model code.
|
||||
"""
|
||||
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
|
||||
return cleaned_code.strip()
|
||||
|
||||
|
||||
def generate_novel_model() -> Dict[str, str]:
|
||||
"""
|
||||
Generate a novel neural network model using the OpenAI function caller.
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: A dictionary containing the model's name, theory, and code.
|
||||
"""
|
||||
out = model.run(
|
||||
"Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
|
||||
)
|
||||
return {
|
||||
"name": out["novel_algorithm_name"],
|
||||
"theory": out["mathamatical_formulation"],
|
||||
"code": clean_model_code(out["model_code"]),
|
||||
}
|
||||
|
||||
|
||||
def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
|
||||
"""
|
||||
Generate, clean, save, and add the model data to a dataset.
|
||||
|
||||
Args:
|
||||
i (int): The iteration number (for logging purposes).
|
||||
dataset (List[Dict[str, str]]): The dataset to add the model data to.
|
||||
"""
|
||||
model_data = generate_novel_model()
|
||||
name = model_data["name"]
|
||||
code = model_data["code"]
|
||||
|
||||
logger.info(f"Generated code for novel model {name}:")
|
||||
create_file_in_folder("new_models", f"{name}.py", code)
|
||||
logger.info(f"Saved code for novel model {i} to file:")
|
||||
|
||||
# Add the model data to the dataset
|
||||
dataset.append(model_data)
|
||||
|
||||
|
||||
def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
|
||||
"""
|
||||
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
|
||||
|
||||
Args:
|
||||
dataset (List[Dict[str, str]]): The dataset containing models' data.
|
||||
file_path (str): The path to save the JSONL file.
|
||||
"""
|
||||
with open(file_path, "a") as file: # Open in append mode
|
||||
for entry in dataset:
|
||||
file.write(json.dumps(entry) + "\n")
|
||||
logger.info(f"Dataset appended to {file_path}")
|
||||
|
||||
|
||||
def upload_to_huggingface(
|
||||
file_path: str, dataset_name: str, huggingface_token: str
|
||||
) -> None:
|
||||
"""
|
||||
Uploads the dataset to Hugging Face.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the JSONL file.
|
||||
dataset_name (str): The name of the dataset on Hugging Face.
|
||||
huggingface_token (str): Your Hugging Face token for authentication.
|
||||
"""
|
||||
dataset = load_dataset("json", data_files=file_path, split="train")
|
||||
dataset.push_to_hub(dataset_name, token=huggingface_token)
|
||||
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
|
||||
|
||||
|
||||
def main(
|
||||
num_models: int,
|
||||
jsonl_file_path: str,
|
||||
dataset_name: str,
|
||||
huggingface_token: str,
|
||||
) -> None:
|
||||
"""
|
||||
Main function to generate models, save them to JSONL, and upload to Hugging Face.
|
||||
|
||||
Args:
|
||||
num_models (int): The number of models to generate.
|
||||
jsonl_file_path (str): The path to save the JSONL file.
|
||||
dataset_name (str): The name of the dataset on Hugging Face.
|
||||
huggingface_token (str): Your Hugging Face token for authentication.
|
||||
"""
|
||||
dataset = []
|
||||
threads = []
|
||||
|
||||
for i in range(num_models):
|
||||
thread = threading.Thread(
|
||||
target=generate_and_save_model, args=(i, dataset)
|
||||
)
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
save_to_jsonl(dataset, jsonl_file_path)
|
||||
upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
num_models = 100 # Number of models to generate
|
||||
jsonl_file_path = "novel_models_dataset.jsonl"
|
||||
dataset_name = "novel_models_architectures"
|
||||
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
||||
|
||||
main(num_models, jsonl_file_path, dataset_name, huggingface_token)
|
@ -0,0 +1,197 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms.utils.loguru_logger import logger
|
||||
import threading
|
||||
import json
|
||||
from typing import List, Dict
|
||||
from datasets import load_dataset
|
||||
import os
|
||||
|
||||
|
||||
class ModelSpec(BaseModel):
|
||||
novel_algorithm_name: str = Field(
|
||||
...,
|
||||
description="The name of the novel AI algorithm",
|
||||
)
|
||||
mathamatical_formulation: str = Field(
|
||||
...,
|
||||
description="The mathematical theoretical formulation of the new model",
|
||||
)
|
||||
model_code: str = Field(
|
||||
...,
|
||||
description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
|
||||
)
|
||||
|
||||
|
||||
class OptimizationSpec(BaseModel):
|
||||
errors: str = Field(
|
||||
...,
|
||||
description="The errors in the existing model architecture code",
|
||||
)
|
||||
refined_model_code: str = Field(
|
||||
...,
|
||||
description="The refined code for the model architecture in PyTorch",
|
||||
)
|
||||
step_by_step_instructions: str = Field(
|
||||
...,
|
||||
description="The step-by-step instructions on how the model works and how it was refined",
|
||||
)
|
||||
|
||||
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
|
||||
max_tokens=3500,
|
||||
temperature=1.0,
|
||||
base_model=ModelSpec,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
# Initialize the function caller
|
||||
refiner = OpenAIFunctionCaller(
|
||||
system_prompt="""
|
||||
You're a model refiner, you refine existing deep learning models to improve their performance and you optimize code and clean it up. You intake a model architecture, and you refine it to make it more efficient, faster, and more accurate. You need to provide the code for the refined model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and refine models that have never been contemplated before. Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined.
|
||||
|
||||
""",
|
||||
max_tokens=3500,
|
||||
temperature=1.0,
|
||||
base_model=OptimizationSpec,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
def clean_model_code(model_code_str: str) -> str:
|
||||
"""
|
||||
Cleans up the generated model code string.
|
||||
|
||||
Args:
|
||||
model_code_str (str): The raw model code as a string.
|
||||
|
||||
Returns:
|
||||
str: The cleaned-up model code.
|
||||
"""
|
||||
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
|
||||
return cleaned_code.strip()
|
||||
|
||||
|
||||
def generate_novel_model() -> Dict[str, str]:
|
||||
"""
|
||||
Generate a novel neural network model using the OpenAI function caller.
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: A dictionary containing the model's name, theory, and code.
|
||||
"""
|
||||
out = model.run(
|
||||
"Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
|
||||
)
|
||||
name = out["novel_algorithm_name"]
|
||||
theory = out["mathamatical_formulation"]
|
||||
code = clean_model_code(out["model_code"])
|
||||
|
||||
refined = refiner.run(
|
||||
f"Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined. Name of Algorithm: {name} Code: {code}"
|
||||
)
|
||||
errors = refined["errors"]
|
||||
refined_code = clean_model_code(refined["refined_model_code"])
|
||||
instructions = refined["step_by_step_instructions"]
|
||||
|
||||
return {
|
||||
"name": name,
|
||||
"theory": theory,
|
||||
"code": code,
|
||||
"errors": errors,
|
||||
"refined_code": refined_code,
|
||||
"instructions": instructions,
|
||||
}
|
||||
|
||||
|
||||
def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
|
||||
"""
|
||||
Generate, clean, save, and add the model data to a dataset.
|
||||
|
||||
Args:
|
||||
i (int): The iteration number (for logging purposes).
|
||||
dataset (List[Dict[str, str]]): The dataset to add the model data to.
|
||||
"""
|
||||
model_data = generate_novel_model()
|
||||
# name = model_data["name"]
|
||||
# code = model_data["code"]
|
||||
|
||||
# logger.info(f"Generated code for novel model {name}:")
|
||||
# create_file_in_folder("new_models", f"{name}.py", code)
|
||||
# logger.info(f"Saved code for novel model {i} to file:")
|
||||
|
||||
# Add the model data to the dataset
|
||||
dataset.append(model_data)
|
||||
|
||||
|
||||
def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
|
||||
"""
|
||||
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
|
||||
|
||||
Args:
|
||||
dataset (List[Dict[str, str]]): The dataset containing models' data.
|
||||
file_path (str): The path to save the JSONL file.
|
||||
"""
|
||||
with open(file_path, "a") as file: # Open in append mode
|
||||
for entry in dataset:
|
||||
file.write(json.dumps(entry) + "\n")
|
||||
logger.info(f"Dataset appended to {file_path}")
|
||||
|
||||
|
||||
def upload_to_huggingface(
|
||||
file_path: str, dataset_name: str, huggingface_token: str
|
||||
) -> None:
|
||||
"""
|
||||
Uploads the dataset to Hugging Face.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the JSONL file.
|
||||
dataset_name (str): The name of the dataset on Hugging Face.
|
||||
huggingface_token (str): Your Hugging Face token for authentication.
|
||||
"""
|
||||
dataset = load_dataset("json", data_files=file_path, split="train")
|
||||
dataset.push_to_hub(dataset_name, token=huggingface_token)
|
||||
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
|
||||
|
||||
|
||||
def main(
|
||||
num_models: int,
|
||||
jsonl_file_path: str,
|
||||
dataset_name: str,
|
||||
huggingface_token: str,
|
||||
) -> None:
|
||||
"""
|
||||
Main function to generate models, save them to JSONL, and upload to Hugging Face.
|
||||
|
||||
Args:
|
||||
num_models (int): The number of models to generate.
|
||||
jsonl_file_path (str): The path to save the JSONL file.
|
||||
dataset_name (str): The name of the dataset on Hugging Face.
|
||||
huggingface_token (str): Your Hugging Face token for authentication.
|
||||
"""
|
||||
dataset = []
|
||||
threads = []
|
||||
|
||||
for i in range(num_models):
|
||||
thread = threading.Thread(
|
||||
target=generate_and_save_model, args=(i, dataset)
|
||||
)
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
save_to_jsonl(dataset, jsonl_file_path)
|
||||
upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
num_models = 30 # Number of models to generate
|
||||
jsonl_file_path = "novel_models_dataset_new.jsonl"
|
||||
dataset_name = "novel_models_architectures_instructions"
|
||||
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
||||
|
||||
main(num_models, jsonl_file_path, dataset_name, huggingface_token)
|
@ -0,0 +1,123 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms import create_file_in_folder
|
||||
from swarms.tools.prebuilt.code_executor import CodeExecutor
|
||||
from swarms.utils.loguru_logger import logger
|
||||
import threading
|
||||
|
||||
|
||||
code_executor = CodeExecutor()
|
||||
|
||||
AI_EXPERT_SYSTEM_PROMPT = """
|
||||
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
|
||||
|
||||
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
|
||||
|
||||
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
|
||||
|
||||
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
|
||||
"""
|
||||
|
||||
|
||||
class ModelSpec(BaseModel):
|
||||
novel_algorithm_name: str = Field(
|
||||
...,
|
||||
description="The name of the novel AI algorithm. lower case, no spaces, use _",
|
||||
)
|
||||
mathamatical_formulation: str = Field(
|
||||
...,
|
||||
description="The mathamatical theortical formulation of the new model",
|
||||
)
|
||||
model_code: str = Field(
|
||||
...,
|
||||
description="The code for the all-new model architecture in PyTorch, Add Types, and write clean code",
|
||||
)
|
||||
example_code: str = Field(
|
||||
...,
|
||||
description="Example code for the all-new model architecture in PyTorch, Add Types, and write clean code",
|
||||
)
|
||||
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt=AI_EXPERT_SYSTEM_PROMPT,
|
||||
max_tokens=4000,
|
||||
temperature=0.4,
|
||||
base_model=ModelSpec,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
def clean_model_code(model_code_str: str):
|
||||
# Remove extra escape characters and newlines
|
||||
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
|
||||
|
||||
# Remove unnecessary leading and trailing whitespaces
|
||||
cleaned_code = cleaned_code.strip()
|
||||
|
||||
return cleaned_code
|
||||
|
||||
|
||||
def parse_function_call_output(out: str):
|
||||
|
||||
if out is None:
|
||||
return None, None, None, None
|
||||
|
||||
# Parse the output
|
||||
name = out["novel_algorithm_name"]
|
||||
theory = out["mathamatical_formulation"]
|
||||
code = out["model_code"]
|
||||
example_code = out["example_code"]
|
||||
|
||||
return name, theory, code, example_code
|
||||
|
||||
|
||||
def generate_and_execute_model(
|
||||
i,
|
||||
# task: str = "Create an all-new model compression format to compress neural networks to make them easier to share and store, aim for 100x compression. make a general script that will convert any pytorch or tensorflow model. Be creative, create a fully novel algorithm. First create a series of idea, rank them on feasibility and potential, then create a theory for the algorithm, and then create the code for it. The algorithm needs to compress the massive .pt files. The input should be a .pt file of the model, and the output should be a compressed .pt file. Don't use any placeholders, you can do it! Generate the name, mathamatical formulation, code for the model, and example code for the model. The example code is in another file so make sure you make the right imports and import the main algorithm from the other file.",
|
||||
task="Generate an all-new model architecture for a neural network that achieves state-of-the-art performance on the CIFAR-10 dataset. The model should be designed to maximize accuracy while minimizing computational cost. Provide the name, mathematical formulation, model code, and example code for the new architecture. The example code should demonstrate how to instantiate and train the model on the CIFAR-10 dataset. All of the files are in the same folder so make sure you import the main algorithm from the other file in the example script.",
|
||||
):
|
||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
||||
out = model.run(task)
|
||||
name, theory, code, example_code = parse_function_call_output(out)
|
||||
logger.info(f"Algorithm {name}: Mathamatical formulation {theory}")
|
||||
|
||||
# Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
|
||||
code = clean_model_code(code)
|
||||
example_code = clean_model_code(example_code)
|
||||
logger.info(f"Cleansed code for novel model {i}:")
|
||||
|
||||
# Save the generated code to a file
|
||||
create_file_in_folder(f"new_models/{name}", f"{name}.py", code)
|
||||
create_file_in_folder(
|
||||
f"new_models/{name}", f"{name}_example.py", example_code
|
||||
)
|
||||
logger.info(f"Saved code for novel model {i} to file:")
|
||||
|
||||
# # Execute the generated code
|
||||
test = code_executor.execute(code)
|
||||
|
||||
# Run the training runs
|
||||
test_example = code_executor.execute(example_code)
|
||||
|
||||
if "error" in test:
|
||||
logger.error(f"Error in code execution: {test}")
|
||||
|
||||
if "error" in test_example:
|
||||
logger.error(f"Error in code execution example: {test_example}")
|
||||
|
||||
else:
|
||||
logger.info(f"Successfully executed code for novel model {name}")
|
||||
|
||||
|
||||
# Create and start a new thread for each model
|
||||
threads = []
|
||||
for i in range(10):
|
||||
thread = threading.Thread(target=generate_and_execute_model, args=(i,))
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
|
||||
# Wait for all threads to finish
|
||||
for thread in threads:
|
||||
thread.join()
|
@ -0,0 +1,91 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms.tools.prebuilt.code_executor import CodeExecutor
|
||||
from swarms.structs.concat import concat_strings
|
||||
|
||||
|
||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
||||
class CodeSpec(BaseModel):
|
||||
summary: str = Field(
|
||||
...,
|
||||
description="The summary of the code",
|
||||
)
|
||||
algorithmic_pseudocode: str = Field(
|
||||
...,
|
||||
description="The pseudocode of the code",
|
||||
)
|
||||
code: str = Field(
|
||||
...,
|
||||
description="The code for the algorithm.",
|
||||
)
|
||||
|
||||
|
||||
def clean_model_code(model_code_str: str) -> str:
|
||||
"""
|
||||
Cleans up the generated model code string.
|
||||
|
||||
Args:
|
||||
model_code_str (str): The raw model code as a string.
|
||||
|
||||
Returns:
|
||||
str: The cleaned-up model code.
|
||||
"""
|
||||
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
|
||||
return cleaned_code.strip()
|
||||
|
||||
|
||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're the code interpreter agent, your purpose is to generate code given a task and provide a summary, pseudocode, and code for the algorithm.",
|
||||
max_tokens=3400,
|
||||
temperature=0.5,
|
||||
base_model=CodeSpec,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
def run_model_and_generate_code(max_loops: int = 2):
|
||||
question = "What is the task for the code interpreter agent?"
|
||||
task = input(question)
|
||||
responses = []
|
||||
responses.append(question)
|
||||
responses.append(task)
|
||||
|
||||
for i in range(max_loops):
|
||||
task = concat_strings(task)
|
||||
|
||||
out = model.run(task)
|
||||
summary = out["summary"]
|
||||
print("\nSummary: ", summary)
|
||||
pseudocode = out["algorithmic_pseudocode"]
|
||||
code = clean_model_code(out["code"])
|
||||
|
||||
output = f"{summary}\n\n{pseudocode}\n\n{code}"
|
||||
responses.append(output)
|
||||
|
||||
# Code Executor
|
||||
executor = CodeExecutor()
|
||||
|
||||
# Execute the code
|
||||
result = executor.execute(code)
|
||||
|
||||
if "error" in result:
|
||||
print(f"Error: {result}")
|
||||
break
|
||||
|
||||
print("\nCode Output: ", result)
|
||||
|
||||
task = input(
|
||||
"\nEnter the next task for the code interpreter agent (or 'exit' to stop): "
|
||||
)
|
||||
responses.append(task)
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
run_model_and_generate_code()
|
@ -0,0 +1,116 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from plaid import Client
|
||||
from plaid.api import plaid_api
|
||||
from plaid.model.error import PlaidError
|
||||
from plaid.model.transactions_get_request import TransactionsGetRequest
|
||||
from plaid.model.transactions_get_response import TransactionsGetResponse
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def fetch_transactions(
|
||||
start_date: str, end_date: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetches a list of transactions from Plaid for a given time period.
|
||||
|
||||
Args:
|
||||
access_token (str): The access token associated with the Plaid item.
|
||||
start_date (str): The start date for the transaction query in 'YYYY-MM-DD' format.
|
||||
end_date (str): The end date for the transaction query in 'YYYY-MM-DD' format.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: A list of transactions as dictionaries.
|
||||
|
||||
Raises:
|
||||
PlaidError: If there is an error with the request to the Plaid API.
|
||||
ValueError: If the date format is incorrect.
|
||||
"""
|
||||
try:
|
||||
access_token = os.getenv("PLAID_ACCESS_TOKEN")
|
||||
# Validate date format
|
||||
datetime.strptime(start_date, "%Y-%m-%d")
|
||||
datetime.strptime(end_date, "%Y-%m-%d")
|
||||
|
||||
# Initialize the Plaid client with your credentials
|
||||
plaid_client = plaid_api.PlaidApi(
|
||||
Client(
|
||||
client_id=os.getenv("PLAID_CLIENT_ID"),
|
||||
secret=os.getenv("PLAID_SECRET"),
|
||||
environment=os.getenv("PLAID_ENV", "sandbox"),
|
||||
)
|
||||
)
|
||||
|
||||
# Create a request object for transactions
|
||||
request = TransactionsGetRequest(
|
||||
access_token=access_token,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
)
|
||||
|
||||
# Fetch transactions from the Plaid API
|
||||
response: TransactionsGetResponse = plaid_client.transactions_get(
|
||||
request
|
||||
)
|
||||
|
||||
# Return the transactions list
|
||||
return response.transactions
|
||||
|
||||
except PlaidError as e:
|
||||
print(f"Plaid API Error: {e}")
|
||||
raise
|
||||
except ValueError as e:
|
||||
print(f"Date Format Error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
# tools=[#Add your functions here# ],
|
||||
# stopping_token="Stop!",
|
||||
# interactive=True,
|
||||
# docs_folder="docs", # Enter your folder name
|
||||
# pdf_path="docs/finance_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
user_name="swarms_corp",
|
||||
# # docs=
|
||||
# # docs_folder="docs",
|
||||
retry_attempts=1,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=200000,
|
||||
return_step_meta=True,
|
||||
tools=[fetch_transactions],
|
||||
)
|
||||
|
||||
|
||||
out = agent.run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,74 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Sequence
|
||||
|
||||
|
||||
class Page(BaseModel):
|
||||
content: str = Field(
|
||||
...,
|
||||
description="The content of the page",
|
||||
)
|
||||
page_number: int = Field(
|
||||
...,
|
||||
description="The page number of the page",
|
||||
)
|
||||
|
||||
|
||||
class Chapter(BaseModel):
|
||||
title: str = Field(
|
||||
...,
|
||||
description="The title of the page",
|
||||
)
|
||||
page_content: Sequence[Page] = Field(
|
||||
...,
|
||||
description="The content of the page in the chapter",
|
||||
)
|
||||
|
||||
|
||||
# Chapter 1 -> chapter 2 -> chapter 3 -> chapter 4 -> chapter 5 -> chapter 6 -> chapter 7 -> chapter 8 -> chapter 9 -> chapter 10
|
||||
|
||||
|
||||
class BookSchema(BaseModel):
|
||||
book_title: str = Field(
|
||||
...,
|
||||
description="The title of the book",
|
||||
)
|
||||
chapters: Sequence[Chapter] = Field(
|
||||
...,
|
||||
description="The chapters of the book",
|
||||
)
|
||||
conclusion: str = Field(
|
||||
...,
|
||||
description="The conclusion of the book",
|
||||
)
|
||||
|
||||
|
||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
|
||||
|
||||
def generate_book(
|
||||
num_chapters: int,
|
||||
task: str = "Let's create a fully novel childrens sci fi book with 10 chapters",
|
||||
):
|
||||
for i in range(10):
|
||||
responses = []
|
||||
|
||||
responses.append(task)
|
||||
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt="You're a Book Generator Agent, you're purpose is to generate a fully novel childrens sci fi book with 10 chapters.",
|
||||
max_tokens=3000,
|
||||
temperature=1.0,
|
||||
base_model=Chapter,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
out = model.run(task)
|
||||
print(out)
|
||||
responses.append(out)
|
||||
|
||||
task = " ".join(responses)
|
@ -0,0 +1,27 @@
|
||||
def clean_model_code(model_code_str: str) -> str:
|
||||
"""
|
||||
Cleans up the generated model code string.
|
||||
|
||||
Args:
|
||||
model_code_str (str): The raw model code as a string.
|
||||
|
||||
Returns:
|
||||
str: The cleaned-up model code.
|
||||
"""
|
||||
cleaned_code = (
|
||||
model_code_str.replace("\\n", "\n")
|
||||
.replace("\\'", "'")
|
||||
.replace('\\"', '"')
|
||||
)
|
||||
return cleaned_code.strip()
|
||||
|
||||
|
||||
code = """
|
||||
|
||||
|
||||
# Quantum Dimensions: A game of shifting realities\\n\\nimport random\\n\\nclass QuantumDimensionsGame:\\n def __init__(self):\\n self.player_position = (0, 0)\\n self.realities = []\\n self.current_reality = 0\\n self.generate_realities()\\n\\n def generate_realities(self):\\n # Create a multi-dimensional reality space\\n for _ in range(3): # three parallel realities\\n reality = [[random.choice([\'empty\', \'enemy\', \'treasure\']) for _ in range(5)] for _ in range(5)]\\n self.realities.append(reality)\\n\\n def display_reality(self):\\n print(f\'Reality #{self.current_reality + 1}:\')\\n for row in self.realities[self.current_reality]:\\n print(\' \'.join(row))\\n\\n def shift_reality(self):\\n print(\\"Shifting dimensions...\\")\\n self.current_reality = (self.current_reality + 1) % len(self.realities)\\n\\n def move_player(self, direction):\\n x, y = self.player_position\\n if direction == \'up\' and x > 0:\\n self.player_position = (x - 1, y)\\n elif direction == \'down\' and x < 4:\\n self.player_position = (x + 1, y)\\n elif direction == \'left\' and y > 0:\\n self.player_position = (x, y - 1)\\n elif direction == \'right\' and y < 4:\\n self.player_position = (x, y + 1)\\n else:\\n print(\\"Can\'t move in that direction.\\")\\n\\n def play_turn(self):\\n self.display_reality()\\n move = input(\\"Enter move (up/down/left/right) or shift to change realities: \\").strip().lower()\\n if move == \'shift\':\\n self.shift_reality()\\n else:\\n self.move_player(move)\\n x, y = self.player_position\\n current_state = self.realities[self.current_reality][x][y]\\n if current_state == \'enemy\':\\n print(\\"You\'ve encountered an enemy!\\")\\n elif current_state == \'treasure\':\\n print(\\"You\'ve found a treasure!\\")\\n print(f\'Player position: {self.player_position}\')\\n\\n def start_game(self):\\n print(\\"Welcome to Quantum Dimensions!\\")\\n while True:\\n self.play_turn()\\n\\nif __name__ == \'__main__\':\\n game = QuantumDimensionsGame()\\n game.start_game()
|
||||
"""
|
||||
|
||||
cleaned = clean_model_code(code)
|
||||
# print(cleaned)
|
||||
exec(cleaned)
|
@ -0,0 +1,119 @@
|
||||
import pygame
|
||||
import random
|
||||
import math
|
||||
|
||||
# Initialize Pygame
|
||||
pygame.init()
|
||||
|
||||
# Set up the display
|
||||
WIDTH, HEIGHT = 800, 600
|
||||
screen = pygame.display.set_mode((WIDTH, HEIGHT))
|
||||
pygame.display.set_caption("Psychedelic Pulse")
|
||||
|
||||
# Colors
|
||||
BLACK = (0, 0, 0)
|
||||
WHITE = (255, 255, 255)
|
||||
|
||||
# Player
|
||||
player_radius = 10
|
||||
player_x = WIDTH // 2
|
||||
player_y = HEIGHT - 50
|
||||
|
||||
# Goal
|
||||
goal_radius = 20
|
||||
goal_x = WIDTH // 2
|
||||
goal_y = 50
|
||||
|
||||
|
||||
# Obstacles
|
||||
class PsychedelicShape:
|
||||
def __init__(self):
|
||||
self.x = random.randint(0, WIDTH)
|
||||
self.y = random.randint(100, HEIGHT - 100)
|
||||
self.radius = random.randint(20, 60)
|
||||
self.color = (
|
||||
random.randint(100, 255),
|
||||
random.randint(100, 255),
|
||||
random.randint(100, 255),
|
||||
)
|
||||
self.pulse_speed = random.uniform(0.05, 0.2)
|
||||
self.move_speed = random.uniform(1, 3)
|
||||
self.direction = random.choice([-1, 1])
|
||||
|
||||
def update(self):
|
||||
self.radius = (
|
||||
abs(math.sin(pygame.time.get_ticks() * self.pulse_speed)) * 40
|
||||
+ 20
|
||||
)
|
||||
self.x += self.move_speed * self.direction
|
||||
if self.x < 0 or self.x > WIDTH:
|
||||
self.direction *= -1
|
||||
|
||||
def draw(self):
|
||||
pygame.draw.circle(
|
||||
screen,
|
||||
self.color,
|
||||
(int(self.x), int(self.y)),
|
||||
int(self.radius),
|
||||
)
|
||||
|
||||
|
||||
# Create obstacles
|
||||
obstacles = [PsychedelicShape() for _ in range(10)]
|
||||
|
||||
# Game loop
|
||||
clock = pygame.time.Clock()
|
||||
running = True
|
||||
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
running = False
|
||||
|
||||
# Move player
|
||||
keys = pygame.key.get_pressed()
|
||||
if keys[pygame.K_LEFT] and player_x > player_radius:
|
||||
player_x -= 5
|
||||
if keys[pygame.K_RIGHT] and player_x < WIDTH - player_radius:
|
||||
player_x += 5
|
||||
if keys[pygame.K_UP] and player_y > player_radius:
|
||||
player_y -= 5
|
||||
if keys[pygame.K_DOWN] and player_y < HEIGHT - player_radius:
|
||||
player_y += 5
|
||||
|
||||
# Update obstacles
|
||||
for obstacle in obstacles:
|
||||
obstacle.update()
|
||||
|
||||
# Check for collisions
|
||||
for obstacle in obstacles:
|
||||
distance = math.sqrt(
|
||||
(player_x - obstacle.x) ** 2 + (player_y - obstacle.y) ** 2
|
||||
)
|
||||
if distance < player_radius + obstacle.radius:
|
||||
player_x = WIDTH // 2
|
||||
player_y = HEIGHT - 50
|
||||
|
||||
# Check for goal
|
||||
if (
|
||||
math.sqrt((player_x - goal_x) ** 2 + (player_y - goal_y) ** 2)
|
||||
< player_radius + goal_radius
|
||||
):
|
||||
print("You win!")
|
||||
running = False
|
||||
|
||||
# Draw everything
|
||||
screen.fill(BLACK)
|
||||
for obstacle in obstacles:
|
||||
obstacle.draw()
|
||||
pygame.draw.circle(
|
||||
screen, WHITE, (int(player_x), int(player_y)), player_radius
|
||||
)
|
||||
pygame.draw.circle(
|
||||
screen, (255, 215, 0), (goal_x, goal_y), goal_radius
|
||||
)
|
||||
|
||||
pygame.display.flip()
|
||||
clock.tick(60)
|
||||
|
||||
pygame.quit()
|
@ -0,0 +1,85 @@
|
||||
import pygame
|
||||
import random
|
||||
import math
|
||||
|
||||
# Initialize Pygame and mixer
|
||||
pygame.init()
|
||||
pygame.mixer.init()
|
||||
|
||||
# Set up the display
|
||||
WIDTH, HEIGHT = 800, 600
|
||||
screen = pygame.display.set_mode((WIDTH, HEIGHT))
|
||||
pygame.display.set_caption("Psychedelic Soundscape Explorer")
|
||||
|
||||
# Colors
|
||||
BLACK = (0, 0, 0)
|
||||
|
||||
# Player
|
||||
player_pos = [WIDTH // 2, HEIGHT // 2]
|
||||
player_radius = 20
|
||||
|
||||
# Sound zones
|
||||
sound_zones = []
|
||||
for _ in range(5):
|
||||
sound_zones.append(
|
||||
[
|
||||
random.randint(0, WIDTH),
|
||||
random.randint(0, HEIGHT),
|
||||
random.randint(50, 150),
|
||||
]
|
||||
)
|
||||
|
||||
# Create sounds
|
||||
sounds = [pygame.mixer.Sound(f"sound{i}.wav") for i in range(1, 6)]
|
||||
|
||||
# Main game loop
|
||||
running = True
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
running = False
|
||||
|
||||
# Move player
|
||||
keys = pygame.key.get_pressed()
|
||||
if keys[pygame.K_LEFT]:
|
||||
player_pos[0] -= 5
|
||||
if keys[pygame.K_RIGHT]:
|
||||
player_pos[0] += 5
|
||||
if keys[pygame.K_UP]:
|
||||
player_pos[1] -= 5
|
||||
if keys[pygame.K_DOWN]:
|
||||
player_pos[1] += 5
|
||||
|
||||
# Clear the screen
|
||||
screen.fill(BLACK)
|
||||
|
||||
# Draw and play sounds
|
||||
for i, (x, y, radius) in enumerate(sound_zones):
|
||||
distance = math.sqrt(
|
||||
(player_pos[0] - x) ** 2 + (player_pos[1] - y) ** 2
|
||||
)
|
||||
if distance < radius:
|
||||
intensity = 1 - (distance / radius)
|
||||
sounds[i].set_volume(intensity)
|
||||
sounds[i].play(-1)
|
||||
|
||||
# Create trippy color based on distance and sound
|
||||
r = int(255 * math.sin(intensity * math.pi / 2))
|
||||
g = int(255 * math.cos(intensity * math.pi / 2))
|
||||
b = int(255 * (1 - intensity))
|
||||
|
||||
pygame.draw.circle(
|
||||
screen, (r, g, b), (x, y), int(radius * intensity), 2
|
||||
)
|
||||
else:
|
||||
sounds[i].stop()
|
||||
|
||||
# Draw player
|
||||
pygame.draw.circle(screen, (255, 255, 255), player_pos, player_radius)
|
||||
|
||||
pygame.display.flip()
|
||||
clock.tick(60)
|
||||
|
||||
pygame.quit()
|
@ -1,79 +0,0 @@
|
||||
import os
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
|
||||
|
||||
def check_multion_api_key():
|
||||
"""
|
||||
Checks if the MultiOn API key is available in the environment variables.
|
||||
|
||||
Returns:
|
||||
str: The MultiOn API key.
|
||||
"""
|
||||
api_key = os.getenv("MULTION_API_KEY")
|
||||
return api_key
|
||||
|
||||
|
||||
class MultiOnAgent(BaseLLM):
|
||||
"""
|
||||
Represents an agent that interacts with the MultiOn API to run tasks on a remote session.
|
||||
|
||||
Args:
|
||||
api_key (str): The API key for accessing the MultiOn API.
|
||||
url (str): The URL of the remote session.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Attributes:
|
||||
client (MultiOn): The MultiOn client instance.
|
||||
url (str): The URL of the remote session.
|
||||
session_id (str): The ID of the current session.
|
||||
|
||||
Methods:
|
||||
run: Runs a task on the remote session.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = None,
|
||||
system_prompt: str = None,
|
||||
api_key: str = check_multion_api_key,
|
||||
url: str = "https://huggingface.co/papers",
|
||||
max_steps: int = 1,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.name = name
|
||||
|
||||
try:
|
||||
from multion.client import MultiOn
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The MultiOn package is not installed. Please install it using 'pip install multion'."
|
||||
)
|
||||
|
||||
self.client = MultiOn(api_key=api_key)
|
||||
self.url = url
|
||||
self.system_prompt = system_prompt
|
||||
self.max_steps = max_steps
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
"""
|
||||
Runs a task on the remote session.
|
||||
|
||||
Args:
|
||||
task (str): The task to be executed on the remote session.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
response = self.client.browse(
|
||||
cmd=task,
|
||||
url=self.url,
|
||||
local=True,
|
||||
max_steps=self.max_steps,
|
||||
)
|
||||
|
||||
# response = response.json()
|
||||
|
||||
# print(response.message)
|
||||
return str(response.message)
|
@ -0,0 +1,353 @@
|
||||
from typing import List, Union
|
||||
|
||||
from swarms.models.base_embedding_model import BaseEmbeddingModel
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||
from swarms.models.fuyu import Fuyu # noqa: E402
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
|
||||
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
|
||||
from swarms.models.idefics import Idefics # noqa: E402
|
||||
from swarms.models.kosmos_two import Kosmos # noqa: E402
|
||||
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
|
||||
from swarms.models.llama3_hosted import llama3Hosted
|
||||
from swarms.models.llava import LavaMultiModal # noqa: E402
|
||||
from swarms.models.nougat import Nougat # noqa: E402
|
||||
from swarms.models.openai_embeddings import OpenAIEmbeddings
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms.models.openai_tts import OpenAITTS # noqa: E402
|
||||
from swarms.models.palm import GooglePalm as Palm # noqa: E402
|
||||
from swarms.models.popular_llms import Anthropic as Anthropic
|
||||
from swarms.models.popular_llms import (
|
||||
AzureOpenAILLM as AzureOpenAI,
|
||||
)
|
||||
from swarms.models.popular_llms import (
|
||||
CohereChat as Cohere,
|
||||
)
|
||||
from swarms.models.popular_llms import FireWorksAI, OctoAIChat
|
||||
from swarms.models.popular_llms import (
|
||||
OpenAIChatLLM as OpenAIChat,
|
||||
)
|
||||
from swarms.models.popular_llms import (
|
||||
OpenAILLM as OpenAI,
|
||||
)
|
||||
from swarms.models.popular_llms import ReplicateChat as Replicate
|
||||
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
|
||||
from swarms.models.sampling_params import SamplingParams
|
||||
from swarms.models.together import TogetherLLM # noqa: E402
|
||||
from swarms.models.vilt import Vilt # noqa: E402
|
||||
from swarms.structs.base_structure import BaseStructure
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
# New type BaseLLM and BaseEmbeddingModel and BaseMultimodalModel
|
||||
omni_model_type = Union[
|
||||
BaseLLM, BaseEmbeddingModel, BaseMultiModalModel, callable
|
||||
]
|
||||
list_of_omni_model_type = List[omni_model_type]
|
||||
|
||||
|
||||
models = [
|
||||
BaseLLM,
|
||||
BaseEmbeddingModel,
|
||||
BaseMultiModalModel,
|
||||
Fuyu,
|
||||
GPT4VisionAPI,
|
||||
HuggingfaceLLM,
|
||||
Idefics,
|
||||
Kosmos,
|
||||
LayoutLMDocumentQA,
|
||||
llama3Hosted,
|
||||
LavaMultiModal,
|
||||
Nougat,
|
||||
OpenAIEmbeddings,
|
||||
OpenAITTS,
|
||||
Palm,
|
||||
Anthropic,
|
||||
AzureOpenAI,
|
||||
Cohere,
|
||||
OctoAIChat,
|
||||
OpenAIChat,
|
||||
OpenAI,
|
||||
Replicate,
|
||||
QwenVLMultiModal,
|
||||
SamplingParams,
|
||||
TogetherLLM,
|
||||
Vilt,
|
||||
FireWorksAI,
|
||||
OpenAIFunctionCaller,
|
||||
]
|
||||
|
||||
|
||||
class ModelRouter(BaseStructure):
|
||||
"""
|
||||
A router for managing multiple models.
|
||||
|
||||
Attributes:
|
||||
model_router_id (str): The ID of the model router.
|
||||
model_router_description (str): The description of the model router.
|
||||
model_pool (List[omni_model_type]): The list of models in the model pool.
|
||||
|
||||
Methods:
|
||||
check_for_models(): Checks if there are any models in the model pool.
|
||||
add_model(model: omni_model_type): Adds a model to the model pool.
|
||||
add_models(models: List[omni_model_type]): Adds multiple models to the model pool.
|
||||
get_model_by_name(model_name: str) -> omni_model_type: Retrieves a model from the model pool by its name.
|
||||
get_multiple_models_by_name(model_names: List[str]) -> List[omni_model_type]: Retrieves multiple models from the model pool by their names.
|
||||
get_model_pool() -> List[omni_model_type]: Retrieves the entire model pool.
|
||||
get_model_by_index(index: int) -> omni_model_type: Retrieves a model from the model pool by its index.
|
||||
get_model_by_id(model_id: str) -> omni_model_type: Retrieves a model from the model pool by its ID.
|
||||
dict() -> dict: Returns a dictionary representation of the model router.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_router_id: str = "model_router",
|
||||
model_router_description: str = "A router for managing multiple models.",
|
||||
model_pool: List[omni_model_type] = models,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.model_router_id = model_router_id
|
||||
self.model_router_description = model_router_description
|
||||
self.model_pool = model_pool
|
||||
self.verbose = verbose
|
||||
|
||||
self.check_for_models()
|
||||
# self.refactor_model_class_if_invoke()
|
||||
|
||||
def check_for_models(self):
|
||||
"""
|
||||
Checks if there are any models in the model pool.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
ValueError: If no models are found in the model pool.
|
||||
"""
|
||||
if len(self.model_pool) == 0:
|
||||
raise ValueError("No models found in model pool.")
|
||||
|
||||
def add_model(self, model: omni_model_type):
|
||||
"""
|
||||
Adds a model to the model pool.
|
||||
|
||||
Args:
|
||||
model (omni_model_type): The model to be added.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model has been added to the model pool.
|
||||
"""
|
||||
logger.info(f"Adding model {model.name} to model pool.")
|
||||
self.model_pool.append(model)
|
||||
return "Model successfully added to model pool."
|
||||
|
||||
def add_models(self, models: List[omni_model_type]):
|
||||
"""
|
||||
Adds multiple models to the model pool.
|
||||
|
||||
Args:
|
||||
models (List[omni_model_type]): The models to be added.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the models have been added to the model pool.
|
||||
"""
|
||||
logger.info("Adding models to model pool.")
|
||||
self.model_pool.extend(models)
|
||||
return "Models successfully added to model pool."
|
||||
|
||||
# def query_model_from_langchain(self, model_name: str, *args, **kwargs):
|
||||
# """
|
||||
# Query a model from langchain community.
|
||||
|
||||
# Args:
|
||||
# model_name (str): The name of the model.
|
||||
# *args: Additional positional arguments to be passed to the model.
|
||||
# **kwargs: Additional keyword arguments to be passed to the model.
|
||||
|
||||
# Returns:
|
||||
# omni_model_type: The model object.
|
||||
|
||||
# Raises:
|
||||
# ValueError: If the model with the given name is not found in the model pool.
|
||||
# """
|
||||
# from langchain_community.llms import __getattr__
|
||||
|
||||
# logger.info(
|
||||
# f"Querying model {model_name} from langchain community."
|
||||
# )
|
||||
# model = __getattr__(model_name)(*args, **kwargs)
|
||||
# model = self.refactor_model_class_if_invoke_class(model)
|
||||
|
||||
# return model
|
||||
|
||||
def get_model_by_name(self, model_name: str) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its name.
|
||||
|
||||
Args:
|
||||
model_name (str): The name of the model.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given name is not found in the model pool.
|
||||
"""
|
||||
logger.info(f"Retrieving model {model_name} from model pool.")
|
||||
for model in self.model_pool:
|
||||
if model_name in [
|
||||
model.name,
|
||||
model.model_id,
|
||||
model.model_name,
|
||||
]:
|
||||
return model
|
||||
raise ValueError(f"Model {model_name} not found in model pool.")
|
||||
|
||||
def get_multiple_models_by_name(
|
||||
self, model_names: List[str]
|
||||
) -> List[omni_model_type]:
|
||||
"""
|
||||
Retrieves multiple models from the model pool by their names.
|
||||
|
||||
Args:
|
||||
model_names (List[str]): The names of the models.
|
||||
|
||||
Returns:
|
||||
List[omni_model_type]: The list of model objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If any of the models with the given names are not found in the model pool.
|
||||
"""
|
||||
logger.info(
|
||||
f"Retrieving multiple models {model_names} from model pool."
|
||||
)
|
||||
models = []
|
||||
for model_name in model_names:
|
||||
models.append(self.get_model_by_name(model_name))
|
||||
return models
|
||||
|
||||
def get_model_pool(self) -> List[omni_model_type]:
|
||||
"""
|
||||
Retrieves the entire model pool.
|
||||
|
||||
Returns:
|
||||
List[omni_model_type]: The list of model objects in the model pool.
|
||||
"""
|
||||
return self.model_pool
|
||||
|
||||
def get_model_by_index(self, index: int) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its index.
|
||||
|
||||
Args:
|
||||
index (int): The index of the model in the model pool.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
IndexError: If the index is out of range.
|
||||
"""
|
||||
return self.model_pool[index]
|
||||
|
||||
def get_model_by_id(self, model_id: str) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its ID.
|
||||
|
||||
Args:
|
||||
model_id (str): The ID of the model.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given ID is not found in the model pool.
|
||||
"""
|
||||
name = model_id
|
||||
for model in self.model_pool:
|
||||
if (
|
||||
hasattr(model, "model_id")
|
||||
and name == model.model_id
|
||||
or hasattr(model, "model_name")
|
||||
and name == model.model_name
|
||||
or hasattr(model, "name")
|
||||
and name == model.name
|
||||
or hasattr(model, "model")
|
||||
and name == model.model
|
||||
):
|
||||
return model
|
||||
raise ValueError(f"Model {model_id} not found in model pool.")
|
||||
|
||||
def refactor_model_class_if_invoke(self):
|
||||
"""
|
||||
Refactors the model class if it has an 'invoke' method.
|
||||
|
||||
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model classes have been refactored.
|
||||
"""
|
||||
for model in self.model_pool:
|
||||
if hasattr(model, "invoke"):
|
||||
model.run = model.invoke
|
||||
model.__call__ = model.invoke
|
||||
logger.info(
|
||||
f"Refactored model {model.name} to have run and __call__ methods."
|
||||
)
|
||||
|
||||
# Update the model in the model pool
|
||||
self.model_pool[self.model_pool.index(model)] = model
|
||||
|
||||
return "Model classes successfully refactored."
|
||||
|
||||
def refactor_model_class_if_invoke_class(
|
||||
self, model: callable, *args, **kwargs
|
||||
) -> callable:
|
||||
"""
|
||||
Refactors the model class if it has an 'invoke' method.
|
||||
|
||||
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model classes have been refactored.
|
||||
"""
|
||||
if hasattr(model, "invoke"):
|
||||
model.run = model.invoke
|
||||
model.__call__ = model.invoke
|
||||
logger.info(
|
||||
f"Refactored model {model.name} to have run and __call__ methods."
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
def find_model_by_name_and_run(
|
||||
self, model_name: str = None, task: str = None, *args, **kwargs
|
||||
) -> str:
|
||||
"""
|
||||
Finds a model by its name and runs a task on it.
|
||||
|
||||
Args:
|
||||
model_name (str): The name of the model.
|
||||
task (str): The task to be run on the model.
|
||||
*args: Additional positional arguments to be passed to the task.
|
||||
**kwargs: Additional keyword arguments to be passed to the task.
|
||||
|
||||
Returns:
|
||||
str: The result of running the task on the model.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given name is not found in the model pool.
|
||||
"""
|
||||
model = self.get_model_by_name(model_name)
|
||||
return model.run(task, *args, **kwargs)
|
||||
|
||||
|
||||
# model = ModelRouter()
|
||||
# print(model.to_dict())
|
||||
# print(model.get_model_pool())
|
||||
# print(model.get_model_by_index(0))
|
||||
# print(model.get_model_by_id("stability-ai/stable-diffusion:"))
|
||||
# # print(model.get_multiple_models_by_name(["gpt-4o", "gpt-4"]))
|
@ -0,0 +1,230 @@
|
||||
import os
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
from swarms import Agent, OpenAIChat
|
||||
from typing import List, Optional, Callable
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
|
||||
|
||||
class AStarSwarm(BaseSwarm):
|
||||
def __init__(
|
||||
self,
|
||||
root_agent: Agent,
|
||||
child_agents: Optional[List[Agent]] = None,
|
||||
heuristic: Optional[Callable[[Agent], float]] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the A* Swarm with a root agent and optionally a list of child agents.
|
||||
|
||||
Args:
|
||||
root_agent (Agent): The root agent in the swarm.
|
||||
child_agents (Optional[List[Agent]]): List of child agents.
|
||||
"""
|
||||
self.root_agent = root_agent
|
||||
self.child_agents = child_agents
|
||||
self.heuristic = heuristic
|
||||
self.child_agents = (
|
||||
child_agents if child_agents is not None else []
|
||||
)
|
||||
self.parent_map = {
|
||||
agent: root_agent for agent in self.child_agents
|
||||
}
|
||||
|
||||
def a_star_communicate(
|
||||
self,
|
||||
agent: Agent,
|
||||
task: str,
|
||||
) -> str:
|
||||
"""
|
||||
Distributes the task among agents using A* search-like communication.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent to start the communication from.
|
||||
task (str): The task to distribute and process.
|
||||
heuristic (Callable[[Agent], float], optional): Function to prioritize which agent to communicate with first.
|
||||
|
||||
Returns:
|
||||
str: The result of the task after processing.
|
||||
"""
|
||||
# Perform the task at the current agent
|
||||
result = agent.run(task)
|
||||
|
||||
# Base case: if no child agents, return the result
|
||||
if agent not in self.parent_map.values():
|
||||
return result
|
||||
|
||||
# Gather child agents
|
||||
children = [
|
||||
child
|
||||
for child, parent in self.parent_map.items()
|
||||
if parent == agent
|
||||
]
|
||||
|
||||
# Sort children based on the heuristic (if provided)
|
||||
if self.heuristic:
|
||||
children.sort(key=self.heuristic, reverse=True)
|
||||
|
||||
# Communicate with child agents
|
||||
for child in children:
|
||||
sub_result = self.a_star_communicate(
|
||||
child, task, self.heuristic
|
||||
)
|
||||
result += f"\n{sub_result}"
|
||||
|
||||
return result
|
||||
|
||||
def visualize(self):
|
||||
"""
|
||||
Visualizes the communication flow between agents in the swarm using networkx and matplotlib.
|
||||
"""
|
||||
graph = nx.DiGraph()
|
||||
|
||||
# Add edges between the root agent and child agents
|
||||
for child in self.child_agents:
|
||||
graph.add_edge(self.root_agent.agent_name, child.agent_name)
|
||||
self._add_edges(graph, child)
|
||||
|
||||
# Draw the graph
|
||||
pos = nx.spring_layout(graph)
|
||||
plt.figure(figsize=(10, 8))
|
||||
nx.draw(
|
||||
graph,
|
||||
pos,
|
||||
with_labels=True,
|
||||
node_color="lightblue",
|
||||
font_size=10,
|
||||
node_size=3000,
|
||||
font_weight="bold",
|
||||
edge_color="gray",
|
||||
)
|
||||
plt.title("Communication Flow Between Agents")
|
||||
plt.show()
|
||||
|
||||
def _add_edges(self, graph: nx.DiGraph, agent: Agent):
|
||||
"""
|
||||
Recursively adds edges to the graph for the given agent.
|
||||
|
||||
Args:
|
||||
graph (nx.DiGraph): The graph to add edges to.
|
||||
agent (Agent): The current agent.
|
||||
"""
|
||||
children = [
|
||||
child
|
||||
for child, parent in self.parent_map.items()
|
||||
if parent == agent
|
||||
]
|
||||
for child in children:
|
||||
graph.add_edge(agent.agent_name, child.agent_name)
|
||||
self._add_edges(graph, child)
|
||||
|
||||
def run(
|
||||
self,
|
||||
task: str,
|
||||
) -> str:
|
||||
"""
|
||||
Start the task from the root agent using A* communication.
|
||||
|
||||
Args:
|
||||
task (str): The task to execute.
|
||||
heuristic (Callable[[Agent], float], optional): Heuristic for A* communication.
|
||||
|
||||
Returns:
|
||||
str: The result of the task after processing.
|
||||
"""
|
||||
return self.a_star_communicate(
|
||||
self.root_agent, task, self.heuristic
|
||||
)
|
||||
|
||||
|
||||
# Heuristic example (can be customized)
|
||||
def example_heuristic(agent: Agent) -> float:
|
||||
"""
|
||||
Example heuristic that prioritizes agents based on some custom logic.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent to evaluate.
|
||||
|
||||
Returns:
|
||||
float: The priority score for the agent.
|
||||
"""
|
||||
# Example heuristic: prioritize based on the length of the agent's name (as a proxy for complexity)
|
||||
return len(agent.agent_name)
|
||||
|
||||
|
||||
# Set up the model as provided
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize root agent
|
||||
root_agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
)
|
||||
|
||||
# List of child agents
|
||||
child_agents = [
|
||||
Agent(
|
||||
agent_name="Child-Agent-1",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_child_1.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Child-Agent-2",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_child_2.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
]
|
||||
|
||||
# Create the A* swarm
|
||||
swarm = AStarSwarm(
|
||||
root_agent=root_agent,
|
||||
child_agents=child_agents,
|
||||
heauristic=example_heuristic,
|
||||
)
|
||||
|
||||
# Run the task with the heuristic
|
||||
result = swarm.run(
|
||||
"What are the components of a startups stock incentive equity plan",
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Visualize the communication flow
|
||||
swarm.visualize()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,237 @@
|
||||
# import os
|
||||
# from swarms import Agent, OpenAIChat
|
||||
# from typing import List
|
||||
|
||||
# class DepthFirstSearchSwarm:
|
||||
# def __init__(self, agents: List[Agent]):
|
||||
# self.agents = agents
|
||||
# self.visited = set()
|
||||
|
||||
# def dfs(self, agent, task, results):
|
||||
# if agent.agent_name in self.visited:
|
||||
# return
|
||||
# self.visited.add(agent.agent_name)
|
||||
|
||||
# # Execute the agent's task
|
||||
# result = agent.run(task)
|
||||
# results.append(result)
|
||||
|
||||
# # If agent produces more tasks, continue the DFS
|
||||
# if isinstance(result, dict) and "next_tasks" in result:
|
||||
# for next_task in result["next_tasks"]:
|
||||
# next_agent = self.get_next_agent()
|
||||
# if next_agent:
|
||||
# self.dfs(next_agent, next_task, results)
|
||||
# else:
|
||||
# print("No more agents available for further tasks.")
|
||||
|
||||
# def get_next_agent(self):
|
||||
# for agent in self.agents:
|
||||
# if agent.agent_name not in self.visited:
|
||||
# return agent
|
||||
# return None
|
||||
|
||||
# def run(self, task):
|
||||
# results = []
|
||||
# if self.agents:
|
||||
# initial_agent = self.agents[0]
|
||||
# self.dfs(initial_agent, task, results)
|
||||
# return results
|
||||
|
||||
|
||||
# # Usage example
|
||||
|
||||
# # Define agents with their specific roles or capabilities
|
||||
# agents = [
|
||||
# Agent(
|
||||
# agent_name="Financial-Analysis-Agent",
|
||||
# system_prompt="Perform financial analysis",
|
||||
# llm=OpenAIChat(
|
||||
# api_key=os.getenv("OPENAI_API_KEY"),
|
||||
# model_name="gpt-4o-mini",
|
||||
# temperature=0.1,
|
||||
# ),
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# # saved_state_path="finance_agent.json",
|
||||
# user_name="swarms_corp",
|
||||
# retry_attempts=3,
|
||||
# context_length=200000,
|
||||
# ),
|
||||
# # Add more agents with specific tasks if needed
|
||||
# ]
|
||||
|
||||
# # Initialize the DFS swarm
|
||||
# dfs_swarm = DepthFirstSearchSwarm(agents)
|
||||
|
||||
# # Run the DFS swarm with a task
|
||||
# task = (
|
||||
# "Analyze the financial components of a startup's stock incentive plan."
|
||||
# )
|
||||
# results = dfs_swarm.run(task)
|
||||
|
||||
# # Print the results
|
||||
# for idx, result in enumerate(results):
|
||||
# print(f"Result from Agent {idx + 1}: {result}")
|
||||
|
||||
# ####################
|
||||
# import os
|
||||
# from swarms import Agent, OpenAIChat
|
||||
|
||||
# class DFSSwarm:
|
||||
# def __init__(self, agents):
|
||||
# self.agents = agents
|
||||
# self.visited = set()
|
||||
|
||||
# def dfs(self, agent_index, task, previous_output=None):
|
||||
# if agent_index >= len(self.agents):
|
||||
# return previous_output
|
||||
|
||||
# agent = self.agents[agent_index]
|
||||
|
||||
# # Use the previous agent's output as input to the current agent
|
||||
# if previous_output:
|
||||
# task = f"{task}\nPrevious result: {previous_output}"
|
||||
|
||||
# # Run the current agent's task
|
||||
# output = agent.run(task)
|
||||
|
||||
# # Add output to visited to avoid redundant work
|
||||
# self.visited.add(output)
|
||||
|
||||
# # Recursively call DFS on the next agent
|
||||
# return self.dfs(agent_index + 1, task, output)
|
||||
|
||||
# def run(self, task):
|
||||
# # Start DFS from the first agent
|
||||
# return self.dfs(0, task)
|
||||
|
||||
|
||||
# # Get the OpenAI API key from the environment variable
|
||||
# api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# # Create an instance of the OpenAIChat class for each agent
|
||||
# model = OpenAIChat(api_key=api_key, model_name="gpt-4o-mini", temperature=0.1)
|
||||
|
||||
# # Initialize multiple agents
|
||||
# agent1 = Agent(
|
||||
# agent_name="Agent-1",
|
||||
# system_prompt="Agent 1 prompt description here",
|
||||
# llm=model,
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# user_name="swarms_corp",
|
||||
# )
|
||||
|
||||
# agent2 = Agent(
|
||||
# agent_name="Agent-2",
|
||||
# system_prompt="Agent 2 prompt description here",
|
||||
# llm=model,
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# user_name="swarms_corp",
|
||||
# )
|
||||
|
||||
# # Add more agents as needed
|
||||
# # agent3 = ...
|
||||
# # agent4 = ...
|
||||
|
||||
# # Create the swarm with the agents
|
||||
# dfs_swarm = DFSSwarm(agents=[agent1, agent2])
|
||||
|
||||
# # Run the DFS swarm on a task
|
||||
# result = dfs_swarm.run("Analyze the financial components of a startup's stock incentives.")
|
||||
# print("Final Result:", result)
|
||||
|
||||
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
|
||||
|
||||
class DFSSwarm:
|
||||
def __init__(self, agents):
|
||||
self.agents = agents
|
||||
self.visited = set()
|
||||
|
||||
def dfs(self, agent_index, task, previous_output=None):
|
||||
if agent_index >= len(self.agents):
|
||||
return previous_output
|
||||
|
||||
agent = self.agents[agent_index]
|
||||
|
||||
# If there is a previous output, include it in the task for the next agent
|
||||
if previous_output:
|
||||
task = f"{task}\nPrevious result: {previous_output}"
|
||||
|
||||
# Run the current agent's task and get the output
|
||||
output = agent.run(task)
|
||||
|
||||
# Log the output (optional)
|
||||
print(f"Agent {agent_index + 1} Output: {output}")
|
||||
|
||||
# Add output to visited to avoid redundant work
|
||||
self.visited.add(output)
|
||||
|
||||
# Recursively call DFS on the next agent
|
||||
return self.dfs(agent_index + 1, task, output)
|
||||
|
||||
def run(self, task):
|
||||
# Start DFS from the first agent and return the final result
|
||||
final_result = self.dfs(0, task)
|
||||
return final_result
|
||||
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class for each agent
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize multiple agents
|
||||
agent1 = Agent(
|
||||
agent_name="Agent-1",
|
||||
system_prompt="Analyze the financial components of a startup's stock incentives.",
|
||||
llm=model,
|
||||
# max_loops=2,
|
||||
# autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
user_name="swarms_corp",
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Agent-2",
|
||||
system_prompt="Refine the analysis and identify any potential risks or benefits.",
|
||||
llm=model,
|
||||
# max_loops=2,
|
||||
# autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
user_name="swarms_corp",
|
||||
)
|
||||
|
||||
# Add more agents as needed
|
||||
# agent3 = ...
|
||||
# agent4 = ...
|
||||
|
||||
# Create the swarm with the agents
|
||||
dfs_swarm = DFSSwarm(agents=[agent1, agent2])
|
||||
|
||||
# Run the DFS swarm on a task
|
||||
result = dfs_swarm.run(
|
||||
"Start with analyzing the financial components of a startup's stock incentives."
|
||||
)
|
||||
print("Final Result:", result)
|
@ -0,0 +1,65 @@
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.structs.omni_agent_types import OmniAgentTypes
|
||||
from typing import Optional, Sequence, List
|
||||
from swarms.memory.base_vectordb import BaseVectorDatabase
|
||||
|
||||
|
||||
class FederatedSwarm(BaseSwarm):
|
||||
def __init__(
|
||||
self,
|
||||
name: Optional[str] = "FederatedSwarm",
|
||||
description: Optional[str] = "A swarm of swarms",
|
||||
swarms: Optional[Sequence[BaseSwarm]] = None,
|
||||
memory_system: BaseVectorDatabase = None,
|
||||
max_loops: Optional[int] = 4,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
name=name, description=description, *args, **kwargs
|
||||
)
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.swarms = swarms
|
||||
self.memory_system = memory_system
|
||||
self.max_loops = max_loops
|
||||
|
||||
def add_swarm(self, swarm: BaseSwarm):
|
||||
self.swarms.append(swarm)
|
||||
|
||||
def remove_swarm(self, swarm: BaseSwarm):
|
||||
self.swarms.remove(swarm)
|
||||
|
||||
def get_swarm(self, name: str) -> BaseSwarm:
|
||||
for swarm in self.swarms:
|
||||
if swarm.name == name:
|
||||
return swarm
|
||||
return None
|
||||
|
||||
def get_swarm_agents(self) -> List[OmniAgentTypes]:
|
||||
agents = []
|
||||
for swarm in self.swarms:
|
||||
agents.extend(swarm.agents)
|
||||
return agents
|
||||
|
||||
def get_swarm_agent(self, name: str) -> OmniAgentTypes:
|
||||
for swarm in self.swarms:
|
||||
for agent in swarm.agents:
|
||||
if agent.name == name:
|
||||
return agent
|
||||
return None
|
||||
|
||||
def get_swarm_agent_by_id(self, agent_id: str) -> OmniAgentTypes:
|
||||
for swarm in self.swarms:
|
||||
for agent in swarm.agents:
|
||||
if agent.agent_id == agent_id:
|
||||
return agent
|
||||
return None
|
||||
|
||||
async def run_single_swarm(self, swarm: BaseSwarm, *args, **kwargs):
|
||||
|
||||
await swarm.run(*args, **kwargs)
|
||||
|
||||
async def run_multiple_swarms(self, *args, **kwargs):
|
||||
for swarm in self.swarms:
|
||||
await self.run_single_swarm(swarm, *args, **kwargs)
|
@ -1,356 +0,0 @@
|
||||
"""
|
||||
|
||||
Boss -> json containig orders in JSON -> list of agents -> send orders to every agent
|
||||
|
||||
|
||||
# Requirements
|
||||
- Boss needs to know which agents are available [PROMPTING]
|
||||
- Boss needs to output json commands sending tasks to every agent with the task and name
|
||||
- Worker agents need to return a response to the boss
|
||||
-> Boss returns the final output to the user
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import List
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.utils.loguru_logger import logger
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
|
||||
class HiearchicalRequestDict(BaseModel):
|
||||
task: str = Field(
|
||||
None,
|
||||
title="Task",
|
||||
description="The task to send to the director agent.",
|
||||
)
|
||||
agent_name: str = Field(
|
||||
None,
|
||||
title="Agent Name",
|
||||
description="The name of the agent to send the task to.",
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"task": "task",
|
||||
"agent_name": "agent_name",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class HiearchicalSwarm(BaseSwarm):
|
||||
"""
|
||||
A class representing a hierarchical swarm.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the hierarchical swarm.
|
||||
description (str): The description of the hierarchical swarm.
|
||||
director (Agent): The director agent of the hierarchical swarm.
|
||||
agents (List[Agent]): The list of agents in the hierarchical swarm.
|
||||
max_loops (int): The maximum number of loops to run the swarm.
|
||||
long_term_memory_system (BaseSwarm): The long term memory system of the swarm.
|
||||
custom_parse_function (callable): A custom parse function for the swarm.
|
||||
|
||||
Methods:
|
||||
swarm_initialization(*args, **kwargs): Initializes the hierarchical swarm.
|
||||
find_agent_by_name(agent_name: str = None, *args, **kwargs): Finds an agent in the swarm by name.
|
||||
parse_function_activate_agent(json_data: str = None, *args, **kwargs): Parses JSON data and activates the selected agent.
|
||||
select_agent_and_send_task(name: str = None, task: str = None, *args, **kwargs): Selects an agent and sends a task to them.
|
||||
run(task: str = None, *args, **kwargs): Runs the hierarchical swarm.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
director: Agent = None,
|
||||
agents: List[Agent] = None,
|
||||
max_loops: int = 1,
|
||||
long_term_memory_system: BaseSwarm = None,
|
||||
custom_parse_function: callable = None,
|
||||
rules: str = None,
|
||||
custom_director_prompt: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.director = director
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.long_term_memory_system = long_term_memory_system
|
||||
self.custom_parse_function = custom_parse_function
|
||||
self.rules = rules
|
||||
self.custom_director_prompt = custom_director_prompt
|
||||
|
||||
# Check to see agents is not empty
|
||||
self.agent_error_handling_check()
|
||||
|
||||
# Set the director to max_one loop
|
||||
if self.director.max_loops > 1:
|
||||
self.director.max_loops = 1
|
||||
|
||||
# Set the long term memory system of every agent to long term memory system
|
||||
if long_term_memory_system is True:
|
||||
for agent in agents:
|
||||
agent.long_term_memory = long_term_memory_system
|
||||
|
||||
# Initialize the swarm
|
||||
self.swarm_initialization()
|
||||
|
||||
# Initialize the conversation message pool
|
||||
self.swarm_history = Conversation(
|
||||
time_enabled=True, *args, **kwargs
|
||||
)
|
||||
|
||||
# Set the worker agents as tools for the director
|
||||
for agent in self.agents:
|
||||
self.director.add_tool(agent)
|
||||
|
||||
# Set the has prompt for the director,
|
||||
if custom_director_prompt is not None:
|
||||
self.director.system_prompt = custom_director_prompt
|
||||
else:
|
||||
self.director.system_prompt = self.has_sop()
|
||||
|
||||
def swarm_initialization(self, *args, **kwargs):
|
||||
"""
|
||||
Initializes the hierarchical swarm.
|
||||
|
||||
Args:
|
||||
*args: Additional positional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
logger.info(f"Initializing the hierarchical swarm: {self.name}")
|
||||
logger.info(f"Purpose of this swarm: {self.description}")
|
||||
|
||||
# Now log number of agnets and their names
|
||||
logger.info(f"Number of agents: {len(self.agents)}")
|
||||
logger.info(
|
||||
f"Agent names: {[agent.name for agent in self.agents]}"
|
||||
)
|
||||
|
||||
# Now see if agents is not empty
|
||||
if len(self.agents) == 0:
|
||||
logger.info("No agents found. Please add agents to the swarm.")
|
||||
return None
|
||||
|
||||
# Now see if director is not empty
|
||||
if self.director is None:
|
||||
logger.info(
|
||||
"No director found. Please add a director to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
f"Initialization complete for the hierarchical swarm: {self.name}"
|
||||
)
|
||||
|
||||
def agent_error_handling_check(self):
|
||||
"""
|
||||
Check if the agents list is not empty.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
ValueError: If the agents list is empty.
|
||||
|
||||
"""
|
||||
if len(self.agents) == 0:
|
||||
raise ValueError(
|
||||
"No agents found. Please add agents to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
def find_agent_by_name(self, agent_name: str = None, *args, **kwargs):
|
||||
"""
|
||||
Finds an agent in the swarm by name.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent to find.
|
||||
|
||||
Returns:
|
||||
Agent: The agent with the specified name, or None if not found.
|
||||
|
||||
"""
|
||||
for agent in self.agents:
|
||||
if agent.name == agent_name:
|
||||
return agent
|
||||
return None
|
||||
|
||||
def parse_function_activate_agent(
|
||||
self, json_data: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Parse the JSON data and activate the selected agent.
|
||||
|
||||
Args:
|
||||
json_data (str): The JSON data containing the agent name and task.
|
||||
|
||||
Returns:
|
||||
str: The response from the activated agent.
|
||||
|
||||
Raises:
|
||||
json.JSONDecodeError: If the JSON data is invalid.
|
||||
|
||||
"""
|
||||
try:
|
||||
data = json.loads(json_data)
|
||||
|
||||
# Check if the data is a list of agent task pairs
|
||||
if isinstance(data, list):
|
||||
responses = []
|
||||
# Iterate over the list of agent task pairs
|
||||
for agent_task in data:
|
||||
name = agent_task.get("name")
|
||||
task = agent_task.get("task")
|
||||
|
||||
response = self.select_agent_and_send_task(
|
||||
name, task, *args, **kwargs
|
||||
)
|
||||
|
||||
responses.append(response)
|
||||
return responses
|
||||
else:
|
||||
name = data.get("name")
|
||||
task = data.get("task")
|
||||
|
||||
response = self.select_agent_and_send_task(
|
||||
name, task, *args, **kwargs
|
||||
)
|
||||
|
||||
return response
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Invalid JSON data, try again.")
|
||||
raise json.JSONDecodeError
|
||||
|
||||
def select_agent_and_send_task(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Select an agent from the list and send a task to them.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent to send the task to.
|
||||
task (str): The task to send to the agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the agent.
|
||||
|
||||
Raises:
|
||||
KeyError: If the agent name is not found in the list of agents.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Check to see if the agent name is in the list of agents
|
||||
if name in self.agents:
|
||||
agent = self.agents[name]
|
||||
else:
|
||||
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
|
||||
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def run(self, task: str = None, *args, **kwargs):
|
||||
"""
|
||||
Run the hierarchical swarm.
|
||||
|
||||
Args:
|
||||
task (str): The task to send to the director agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the director agent.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs while running the swarm.
|
||||
|
||||
"""
|
||||
try:
|
||||
loop = 0
|
||||
|
||||
# While the loop is less than max loops
|
||||
while loop < self.max_loops:
|
||||
# Run the director
|
||||
response = self.director.run(task, *args, **kwargs)
|
||||
|
||||
# Log the director's response
|
||||
self.swarm_history.add(self.director.agent_name, response)
|
||||
|
||||
# Run agents
|
||||
if self.custom_parse_function is not None:
|
||||
response = self.custom_parse_function(response)
|
||||
else:
|
||||
response = self.parse_function_activate_agent(response)
|
||||
|
||||
loop += 1
|
||||
|
||||
task = response
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def run_worker_agent(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Run the worker agent.
|
||||
|
||||
Args:
|
||||
name (str): The name of the worker agent.
|
||||
task (str): The task to send to the worker agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the worker agent.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs while running the worker agent.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Find the agent by name
|
||||
agent = self.find_agent_by_name(name)
|
||||
|
||||
# Run the agent
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def has_sop(self):
|
||||
# We need to check the name of the agents and their description or system prompt
|
||||
# TODO: Provide many shot examples of the agents available and even maybe what tools they have access to
|
||||
# TODO: Provide better reasoning prompt tiles, such as when do you use a certain agent and specific
|
||||
# Things NOT to do.
|
||||
return f"""
|
||||
|
||||
You're a director boss agent orchestrating worker agents with tasks. Select an agent most relevant to
|
||||
the input task and give them a task. If there is not an agent relevant to the input task then say so and be simple and direct.
|
||||
These are the available agents available call them if you need them for a specific
|
||||
task or operation:
|
||||
|
||||
Number of agents: {len(self.agents)}
|
||||
Agents Available: {
|
||||
[
|
||||
{"name": agent.name, "description": agent.system_prompt}
|
||||
for agent in self.agents
|
||||
]
|
||||
}
|
||||
|
||||
"""
|
@ -0,0 +1,293 @@
|
||||
import os
|
||||
from typing import List, Any
|
||||
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms.structs.concat import concat_strings
|
||||
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
|
||||
class AgentSpec(BaseModel):
|
||||
"""
|
||||
A class representing the specifications of an agent.
|
||||
|
||||
Attributes:
|
||||
agent_name (str): The name of the agent.
|
||||
system_prompt (str): The system prompt for the agent.
|
||||
agent_description (str): The description of the agent.
|
||||
max_tokens (int): The maximum number of tokens to generate in the API response.
|
||||
temperature (float): A parameter that controls the randomness of the generated text.
|
||||
context_window (int): The context window for the agent.
|
||||
task (str): The main task for the agent.
|
||||
"""
|
||||
|
||||
agent_name: str
|
||||
system_prompt: str
|
||||
agent_description: str
|
||||
task: str
|
||||
|
||||
|
||||
class AgentTeam(BaseModel):
|
||||
agents: List[AgentSpec] = Field(
|
||||
...,
|
||||
description="The list of agents in the team",
|
||||
)
|
||||
flow: str = Field(
|
||||
...,
|
||||
description="Agent Name -> ",
|
||||
)
|
||||
|
||||
|
||||
class SwarmSpec(BaseModel):
|
||||
"""
|
||||
A class representing the specifications of a swarm of agents.
|
||||
|
||||
Attributes:
|
||||
multiple_agents (List[AgentSpec]): The list of agents in the swarm.
|
||||
"""
|
||||
|
||||
swarm_name: str = Field(
|
||||
...,
|
||||
description="The name of the swarm: e.g., 'Marketing Swarm' or 'Finance Swarm'",
|
||||
)
|
||||
multiple_agents: List[AgentSpec]
|
||||
rules: str = Field(
|
||||
...,
|
||||
description="The rules for all the agents in the swarm: e.g., All agents must return code. Be very simple and direct",
|
||||
)
|
||||
plan: str = Field(
|
||||
...,
|
||||
description="The plan for the swarm: e.g., 'Create a marketing campaign for the new product launch.'",
|
||||
)
|
||||
|
||||
|
||||
class HierarchicalAgentSwarm:
|
||||
"""
|
||||
A class to create and manage a hierarchical swarm of agents.
|
||||
|
||||
Methods:
|
||||
__init__(system_prompt, max_tokens, temperature, base_model, parallel_tool_calls): Initializes the function caller.
|
||||
create_agent(agent_name, system_prompt, agent_description, max_tokens, temperature, context_window): Creates an individual agent.
|
||||
parse_json_for_agents_then_create_agents(function_call): Parses a JSON function call to create multiple agents.
|
||||
run(task): Runs the function caller to create and execute agents based on the provided task.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
director: Any = None,
|
||||
agents: List[Agent] = None,
|
||||
max_loops: int = 1,
|
||||
create_agents_on: bool = False,
|
||||
):
|
||||
"""
|
||||
Initializes the HierarchicalAgentSwarm with an OpenAIFunctionCaller.
|
||||
|
||||
Args:
|
||||
system_prompt (str): The system prompt for the function caller.
|
||||
max_tokens (int): The maximum number of tokens to generate in the API response.
|
||||
temperature (float): The temperature setting for text generation.
|
||||
base_model (BaseModel): The base model for the function caller.
|
||||
parallel_tool_calls (bool): Whether to run tool calls in parallel.
|
||||
"""
|
||||
self.director = director
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.create_agents_on = create_agents_on
|
||||
|
||||
# Check if the agents are set
|
||||
self.agents_check()
|
||||
|
||||
def agents_check(self):
|
||||
if self.director is None:
|
||||
raise ValueError("The director is not set.")
|
||||
|
||||
# if self.agents is None:
|
||||
# raise ValueError("The agents are not set.")
|
||||
|
||||
if self.max_loops == 0:
|
||||
raise ValueError("The max_loops is not set.")
|
||||
|
||||
def create_agent(
|
||||
self,
|
||||
agent_name: str,
|
||||
system_prompt: str,
|
||||
agent_description: str,
|
||||
task: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Creates an individual agent.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent.
|
||||
system_prompt (str): The system prompt for the agent.
|
||||
agent_description (str): The description of the agent.
|
||||
max_tokens (int): The maximum number of tokens to generate.
|
||||
temperature (float): The temperature for text generation.
|
||||
context_window (int): The context window size for the agent.
|
||||
|
||||
Returns:
|
||||
Agent: An instantiated agent object.
|
||||
"""
|
||||
# name = agent_name.replace(" ", "_")
|
||||
logger.info(f"Creating agent: {agent_name}")
|
||||
agent_name = Agent(
|
||||
agent_name=agent_name,
|
||||
llm=model,
|
||||
system_prompt=system_prompt,
|
||||
agent_description=agent_description,
|
||||
retry_attempts=1,
|
||||
verbose=False,
|
||||
dashboard=False,
|
||||
)
|
||||
self.agents.append(agent_name)
|
||||
|
||||
logger.info(f"Running agent: {agent_name}")
|
||||
output = agent_name.run(task)
|
||||
|
||||
# create_file_in_folder(
|
||||
# agent_name.workspace_dir, f"{agent_name}_output.txt", str(output)
|
||||
# )
|
||||
|
||||
return output
|
||||
|
||||
def parse_json_for_agents_then_create_agents(
|
||||
self, function_call: dict
|
||||
) -> List[Agent]:
|
||||
"""
|
||||
Parses a JSON function call to create a list of agents.
|
||||
|
||||
Args:
|
||||
function_call (dict): The JSON function call specifying the agents.
|
||||
|
||||
Returns:
|
||||
List[Agent]: A list of created agent objects.
|
||||
"""
|
||||
responses = []
|
||||
logger.info("Parsing JSON for agents")
|
||||
for agent in function_call["multiple_agents"]:
|
||||
out = self.create_agent(
|
||||
agent_name=agent["agent_name"],
|
||||
system_prompt=agent["system_prompt"],
|
||||
agent_description=agent["agent_description"],
|
||||
task=agent["task"],
|
||||
)
|
||||
responses.append(out)
|
||||
return concat_strings(responses)
|
||||
|
||||
def run(self, task: str) -> List[Agent]:
|
||||
"""
|
||||
Runs the function caller to create and execute agents based on the provided task.
|
||||
|
||||
Args:
|
||||
task (str): The task for which the agents need to be created and executed.
|
||||
|
||||
Returns:
|
||||
List[Agent]: A list of created agent objects.
|
||||
"""
|
||||
logger.info("Running the swarm")
|
||||
|
||||
# Run the function caller
|
||||
function_call = self.model.run(task)
|
||||
|
||||
# Logging the function call
|
||||
self.log_director_function_call(function_call)
|
||||
|
||||
# Parse the JSON function call and create agents -> run Agents
|
||||
return self.parse_json_for_agents_then_create_agents(function_call)
|
||||
|
||||
def log_director_function_call(self, function_call: dict):
|
||||
# Log the agents the boss makes\
|
||||
logger.info(f"Swarm Name: {function_call['swarm_name']}")
|
||||
# Log the plan
|
||||
logger.info(f"Plan: {function_call['plan']}")
|
||||
logger.info(
|
||||
f"Number of agents: {len(function_call['multiple_agents'])}"
|
||||
)
|
||||
|
||||
for agent in function_call["multiple_agents"]:
|
||||
logger.info(f"Agent: {agent['agent_name']}")
|
||||
# logger.info(f"Task: {agent['task']}")
|
||||
logger.info(f"Description: {agent['agent_description']}")
|
||||
|
||||
|
||||
# Example usage:
|
||||
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """
|
||||
Here's a full-fledged system prompt for a director boss agent, complete with instructions and many-shot examples:
|
||||
|
||||
---
|
||||
|
||||
**System Prompt: Director Boss Agent**
|
||||
|
||||
### Role:
|
||||
You are a Director Boss Agent responsible for orchestrating a swarm of worker agents. Your primary duty is to serve the user efficiently, effectively, and skillfully. You dynamically create new agents when necessary or utilize existing agents, assigning them tasks that align with their capabilities. You must ensure that each agent receives clear, direct, and actionable instructions tailored to their role.
|
||||
|
||||
### Key Responsibilities:
|
||||
1. **Task Delegation:** Assign tasks to the most relevant agent. If no relevant agent exists, create a new one with an appropriate name and system prompt.
|
||||
2. **Efficiency:** Ensure that tasks are completed swiftly and with minimal resource expenditure.
|
||||
3. **Clarity:** Provide orders that are simple, direct, and actionable. Avoid ambiguity.
|
||||
4. **Dynamic Decision Making:** Assess the situation and choose the most effective path, whether that involves using an existing agent or creating a new one.
|
||||
5. **Monitoring:** Continuously monitor the progress of each agent and provide additional instructions or corrections as necessary.
|
||||
|
||||
### Instructions:
|
||||
- **Identify the Task:** Analyze the input task to determine its nature and requirements.
|
||||
- **Agent Selection/Creation:**
|
||||
- If an agent is available and suited for the task, assign the task to that agent.
|
||||
- If no suitable agent exists, create a new agent with a relevant system prompt.
|
||||
- **Task Assignment:** Provide the selected agent with explicit and straightforward instructions.
|
||||
- **Reasoning:** Justify your decisions when selecting or creating agents, focusing on the efficiency and effectiveness of task completion.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
director = (
|
||||
OpenAIFunctionCaller(
|
||||
system_prompt=HIEARCHICAL_AGENT_SYSTEM_PROMPT,
|
||||
max_tokens=3000,
|
||||
temperature=0.4,
|
||||
base_model=SwarmSpec,
|
||||
parallel_tool_calls=False,
|
||||
),
|
||||
)
|
||||
|
||||
# Initialize the hierarchical agent swarm with the necessary parameters
|
||||
swarm = HierarchicalAgentSwarm(
|
||||
director=director,
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# # Run the swarm with a task
|
||||
# agents = swarm.run(
|
||||
# """
|
||||
# Create a swarm of agents for a marketing campaign to promote
|
||||
# the swarms workshop: [Workshop][Automating Business Operations with Hierarchical Agent Swarms][Swarms Framework + GPT4o],
|
||||
# create agents for twitter, linkedin, and emails, facebook, instagram.
|
||||
|
||||
# The date is Saturday, August 17 4:00 PM - 5:00 PM
|
||||
|
||||
# Link is: https://lu.ma/ew4r4s3i
|
||||
|
||||
|
||||
# """
|
||||
# )
|
||||
|
||||
|
||||
# Run the swarm with a task
|
||||
agents = swarm.run(
|
||||
"""
|
||||
Create a swarms of agents that generate the code in python
|
||||
to send an API request to social media platforms through their apis.
|
||||
Craft a single function to send a message to all platforms, add types and write
|
||||
clean code. Each agent needs to generate code for a specific platform, they
|
||||
must return the python code only.
|
||||
|
||||
"""
|
||||
)
|
@ -0,0 +1,197 @@
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
class MonteCarloSwarm(BaseSwarm):
|
||||
"""
|
||||
MonteCarloSwarm leverages multiple agents to collaborate in a Monte Carlo fashion.
|
||||
Each agent's output is passed to the next, refining the result progressively.
|
||||
Supports parallel execution, dynamic agent selection, and custom result aggregation.
|
||||
|
||||
Attributes:
|
||||
agents (List[Agent]): A list of agents that will participate in the swarm.
|
||||
parallel (bool): If True, agents will run in parallel.
|
||||
result_aggregator (Callable[[List[Any]], Any]): A function to aggregate results from agents.
|
||||
max_workers (Optional[int]): The maximum number of threads for parallel execution.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
parallel: bool = False,
|
||||
result_aggregator: Optional[Callable[[List[Any]], Any]] = None,
|
||||
max_workers: Optional[int] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initializes the MonteCarloSwarm with a list of agents.
|
||||
|
||||
Args:
|
||||
agents (List[Agent]): A list of agents to include in the swarm.
|
||||
parallel (bool): If True, agents will run in parallel. Default is False.
|
||||
result_aggregator (Optional[Callable[[List[Any]], Any]]): A function to aggregate results from agents.
|
||||
max_workers (Optional[int]): The maximum number of threads for parallel execution.
|
||||
"""
|
||||
super().__init__(agents=agents, *args, **kwargs)
|
||||
|
||||
if not agents:
|
||||
raise ValueError("The agents list cannot be empty.")
|
||||
|
||||
self.agents = agents
|
||||
self.parallel = parallel
|
||||
self.result_aggregator = (
|
||||
result_aggregator or self.default_aggregator
|
||||
)
|
||||
self.max_workers = max_workers or len(agents)
|
||||
|
||||
def run(self, task: str) -> Any:
|
||||
"""
|
||||
Runs the MonteCarloSwarm with the given input, passing the output of each agent
|
||||
to the next one in the list or running agents in parallel.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to the first agent.
|
||||
|
||||
Returns:
|
||||
Any: The final output after all agents have processed the input.
|
||||
"""
|
||||
logger.info(
|
||||
f"Starting MonteCarloSwarm with parallel={self.parallel}"
|
||||
)
|
||||
|
||||
if self.parallel:
|
||||
results = self._run_parallel(task)
|
||||
else:
|
||||
results = self._run_sequential(task)
|
||||
|
||||
final_output = self.result_aggregator(results)
|
||||
logger.info(
|
||||
f"MonteCarloSwarm completed. Final output: {final_output}"
|
||||
)
|
||||
return final_output
|
||||
|
||||
def _run_sequential(self, task: str) -> List[Any]:
|
||||
"""
|
||||
Runs the agents sequentially, passing each agent's output to the next.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to the first agent.
|
||||
|
||||
Returns:
|
||||
List[Any]: A list of results from each agent.
|
||||
"""
|
||||
results = []
|
||||
current_input = task
|
||||
for i, agent in enumerate(self.agents):
|
||||
logger.info(f"Agent {i + 1} processing sequentially...")
|
||||
current_output = agent.run(current_input)
|
||||
results.append(current_output)
|
||||
current_input = current_output
|
||||
return results
|
||||
|
||||
def _run_parallel(self, task: str) -> List[Any]:
|
||||
"""
|
||||
Runs the agents in parallel, each receiving the same initial input.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to all agents.
|
||||
|
||||
Returns:
|
||||
List[Any]: A list of results from each agent.
|
||||
"""
|
||||
results = []
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
future_to_agent = {
|
||||
executor.submit(agent.run, task): agent
|
||||
for agent in self.agents
|
||||
}
|
||||
for future in as_completed(future_to_agent):
|
||||
try:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
logger.info(f"Agent completed with result: {result}")
|
||||
except Exception as e:
|
||||
logger.error(f"Agent encountered an error: {e}")
|
||||
results.append(None)
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def default_aggregator(results: List[Any]) -> Any:
|
||||
"""
|
||||
Default result aggregator that returns the last result.
|
||||
|
||||
Args:
|
||||
results (List[Any]): A list of results from agents.
|
||||
|
||||
Returns:
|
||||
Any: The final aggregated result.
|
||||
"""
|
||||
return results
|
||||
|
||||
|
||||
def average_aggregator(results: List[float]) -> float:
|
||||
return sum(results) / len(results) if results else 0.0
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agents
|
||||
agents_list = [
|
||||
Agent(
|
||||
agent_name="Financial-Analysis-Agent-1",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_1.json",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Financial-Analysis-Agent-2",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_2.json",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
# Add more agents as needed
|
||||
]
|
||||
|
||||
# Initialize the MonteCarloSwarm with parallel execution enabled
|
||||
swarm = MonteCarloSwarm(
|
||||
agents=agents_list, parallel=True, max_workers=2
|
||||
)
|
||||
|
||||
# Run the swarm with an initial query
|
||||
final_output = swarm.run(
|
||||
"What are the components of a startup's stock incentive equity plan?"
|
||||
)
|
||||
print("Final output:", final_output)
|
@ -0,0 +1,136 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
|
||||
class PromptCache:
|
||||
"""
|
||||
A framework to handle prompt caching for any LLM API. This reduces costs, latency,
|
||||
and allows reuse of long-form context across multiple API requests.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cache_dir: str = "cache",
|
||||
llm_api_function: Optional[Any] = None,
|
||||
text: Optional[List[str]] = None,
|
||||
):
|
||||
"""
|
||||
Initializes the PromptCache instance.
|
||||
|
||||
Args:
|
||||
cache_dir (str): Directory where cached responses are stored.
|
||||
llm_api_function (Optional[Any]): The function that interacts with the LLM API.
|
||||
It should accept a prompt and return the response.
|
||||
"""
|
||||
self.cache_dir = cache_dir
|
||||
self.llm_api_function = llm_api_function
|
||||
self.text = text
|
||||
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
|
||||
def _generate_cache_key(self, prompt: str) -> str:
|
||||
"""
|
||||
Generates a unique cache key for a given prompt.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to generate a cache key for.
|
||||
|
||||
Returns:
|
||||
str: A unique cache key.
|
||||
"""
|
||||
return hashlib.md5(prompt.encode("utf-8")).hexdigest()
|
||||
|
||||
def _cache_file_path(self, cache_key: str) -> str:
|
||||
"""
|
||||
Constructs the file path for the cache file.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
|
||||
Returns:
|
||||
str: The path to the cache file.
|
||||
"""
|
||||
return os.path.join(self.cache_dir, f"{cache_key}.json")
|
||||
|
||||
def _load_from_cache(self, cache_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Loads a cached response if available.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: The cached response, or None if not found.
|
||||
"""
|
||||
cache_file = self._cache_file_path(cache_key)
|
||||
if os.path.exists(cache_file):
|
||||
with open(cache_file, "r") as f:
|
||||
return json.load(f)
|
||||
return None
|
||||
|
||||
def _save_to_cache(
|
||||
self, cache_key: str, response: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Saves the API response to the cache.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
response (Dict[str, Any]): The API response to be cached.
|
||||
"""
|
||||
cache_file = self._cache_file_path(cache_key)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(response, f)
|
||||
|
||||
def get_response(self, prompt: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieves the response for a prompt, using cache if available.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to retrieve the response for.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The API response, either from cache or freshly fetched.
|
||||
"""
|
||||
cache_key = self._generate_cache_key(prompt)
|
||||
cached_response = self._load_from_cache(cache_key)
|
||||
|
||||
if cached_response is not None:
|
||||
return cached_response
|
||||
|
||||
# If the response is not cached, use the LLM API to get the response
|
||||
if self.llm_api_function is None:
|
||||
raise ValueError("LLM API function is not defined.")
|
||||
|
||||
response = self.llm_api_function(prompt)
|
||||
self._save_to_cache(cache_key, response)
|
||||
|
||||
return response
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
"""
|
||||
Clears the entire cache directory.
|
||||
"""
|
||||
for cache_file in os.listdir(self.cache_dir):
|
||||
os.remove(os.path.join(self.cache_dir, cache_file))
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Dummy LLM API function
|
||||
def mock_llm_api(prompt: str) -> Dict[str, Any]:
|
||||
return {"response": f"Mock response to '{prompt}'"}
|
||||
|
||||
# Initialize the cache
|
||||
cache = PromptCache(llm_api_function=mock_llm_api)
|
||||
|
||||
# Example prompts
|
||||
prompt1 = "What is the capital of France?"
|
||||
prompt2 = "Explain the theory of relativity."
|
||||
|
||||
# Get responses
|
||||
print(cache.get_response(prompt1))
|
||||
print(cache.get_response(prompt2))
|
@ -0,0 +1,106 @@
|
||||
import hashlib
|
||||
from typing import Dict, Optional
|
||||
|
||||
|
||||
class PromptCache:
|
||||
"""
|
||||
A class to manage prompt caching for LLMs, allowing the reuse of context across multiple API requests.
|
||||
|
||||
This reduces costs and latency, particularly for long prompts.
|
||||
|
||||
Attributes:
|
||||
cache (Dict[str, str]): A dictionary to store cached prompts and their corresponding responses.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initializes the PromptCache with an empty cache."""
|
||||
self.cache: Dict[str, str] = {}
|
||||
|
||||
def _hash_prompt(self, prompt: str) -> str:
|
||||
"""
|
||||
Generates a unique hash for a given prompt.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to hash.
|
||||
|
||||
Returns:
|
||||
str: The generated hash.
|
||||
"""
|
||||
return hashlib.sha256(prompt.encode()).hexdigest()
|
||||
|
||||
def add_to_cache(self, prompt: str, response: str) -> None:
|
||||
"""
|
||||
Adds a prompt and its corresponding response to the cache.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string.
|
||||
response (str): The response generated by the LLM.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
self.cache[prompt_hash] = response
|
||||
|
||||
def get_from_cache(self, prompt: str) -> Optional[str]:
|
||||
"""
|
||||
Retrieves a cached response for a given prompt, if available.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string to retrieve the cached response for.
|
||||
|
||||
Returns:
|
||||
Optional[str]: The cached response if found, otherwise None.
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
return self.cache.get(prompt_hash)
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
"""
|
||||
Clears the entire prompt cache.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self.cache.clear()
|
||||
|
||||
def cache_size(self) -> int:
|
||||
"""
|
||||
Returns the number of items currently in the cache.
|
||||
|
||||
Returns:
|
||||
int: The size of the cache.
|
||||
"""
|
||||
return len(self.cache)
|
||||
|
||||
def remove_from_cache(self, prompt: str) -> None:
|
||||
"""
|
||||
Removes a specific prompt and its response from the cache.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string to remove from the cache.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
if prompt_hash in self.cache:
|
||||
del self.cache[prompt_hash]
|
||||
|
||||
|
||||
# Example usage:
|
||||
|
||||
# Initialize the cache
|
||||
prompt_cache = PromptCache()
|
||||
|
||||
# Add a prompt and response to the cache
|
||||
prompt = "What is the capital of France?"
|
||||
response = "The capital of France is Paris."
|
||||
prompt_cache.add_to_cache(prompt, response)
|
||||
|
||||
# Retrieve the response from the cache
|
||||
cached_response = prompt_cache.get_from_cache(prompt)
|
||||
if cached_response:
|
||||
print("Cached response:", cached_response)
|
||||
else:
|
||||
print("Prompt not found in cache.")
|
@ -0,0 +1,175 @@
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
from typing import List, Union, Callable
|
||||
from collections import Counter
|
||||
|
||||
# Aggregation functions
|
||||
|
||||
|
||||
def aggregate_most_common_result(results: List[str]) -> str:
|
||||
"""
|
||||
Aggregate results using the most common result.
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
|
||||
Returns:
|
||||
str: The most common result.
|
||||
"""
|
||||
result_counter = Counter(results)
|
||||
most_common_result = result_counter.most_common(1)[0][0]
|
||||
return most_common_result
|
||||
|
||||
|
||||
def aggregate_weighted_vote(results: List[str], weights: List[int]) -> str:
|
||||
"""
|
||||
Aggregate results using a weighted voting system.
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
weights (List[int]): List of weights corresponding to each result.
|
||||
|
||||
Returns:
|
||||
str: The result with the highest weighted vote.
|
||||
"""
|
||||
weighted_results = Counter()
|
||||
for result, weight in zip(results, weights):
|
||||
weighted_results[result] += weight
|
||||
|
||||
weighted_result = weighted_results.most_common(1)[0][0]
|
||||
return weighted_result
|
||||
|
||||
|
||||
def aggregate_average_numerical(results: List[Union[str, float]]) -> float:
|
||||
"""
|
||||
Aggregate results by averaging numerical outputs.
|
||||
|
||||
Args:
|
||||
results (List[Union[str, float]]): List of numerical results from each iteration.
|
||||
|
||||
Returns:
|
||||
float: The average of the numerical results.
|
||||
"""
|
||||
numerical_results = [
|
||||
float(result) for result in results if is_numerical(result)
|
||||
]
|
||||
if numerical_results:
|
||||
return sum(numerical_results) / len(numerical_results)
|
||||
else:
|
||||
return float("nan") # or handle non-numerical case as needed
|
||||
|
||||
|
||||
def aggregate_consensus(results: List[str]) -> Union[str, None]:
|
||||
"""
|
||||
Aggregate results by checking if there's a consensus (all results are the same).
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: The consensus result if there is one, otherwise None.
|
||||
"""
|
||||
if all(result == results[0] for result in results):
|
||||
return results[0]
|
||||
else:
|
||||
return None # or handle lack of consensus as needed
|
||||
|
||||
|
||||
def is_numerical(value: str) -> bool:
|
||||
"""
|
||||
Check if a string can be interpreted as a numerical value.
|
||||
|
||||
Args:
|
||||
value (str): The string to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the string is numerical, otherwise False.
|
||||
"""
|
||||
try:
|
||||
float(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
# MonteCarloSwarm class
|
||||
|
||||
|
||||
class MonteCarloSwarm:
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
iterations: int = 100,
|
||||
aggregator: Callable = aggregate_most_common_result,
|
||||
):
|
||||
self.agents = agents
|
||||
self.iterations = iterations
|
||||
self.aggregator = aggregator
|
||||
|
||||
def run(self, task: str) -> Union[str, float, None]:
|
||||
"""
|
||||
Execute the Monte Carlo swarm, passing the output of each agent to the next.
|
||||
The final result is aggregated over multiple iterations using the provided aggregator.
|
||||
|
||||
Args:
|
||||
task (str): The task for the swarm to execute.
|
||||
|
||||
Returns:
|
||||
Union[str, float, None]: The final aggregated result.
|
||||
"""
|
||||
aggregated_results = []
|
||||
|
||||
for i in range(self.iterations):
|
||||
result = task
|
||||
for agent in self.agents:
|
||||
result = agent.run(result)
|
||||
aggregated_results.append(result)
|
||||
|
||||
# Apply the selected aggregation function
|
||||
final_result = self.aggregator(aggregated_results)
|
||||
return final_result
|
||||
|
||||
|
||||
# Example usage:
|
||||
|
||||
# Assuming you have the OpenAI API key set up and agents defined
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
agent1 = Agent(
|
||||
agent_name="Agent1",
|
||||
system_prompt="System prompt for agent 1",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Agent2",
|
||||
system_prompt="System prompt for agent 2",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a MonteCarloSwarm with the agents and a selected aggregation function
|
||||
swarm = MonteCarloSwarm(
|
||||
agents=[agent1, agent2],
|
||||
iterations=1,
|
||||
aggregator=aggregate_weighted_vote,
|
||||
)
|
||||
|
||||
# Run the swarm on a specific task
|
||||
final_output = swarm.run(
|
||||
"What are the components of a startup's stock incentive plan?"
|
||||
)
|
||||
print("Final Output:", final_output)
|
||||
|
||||
# You can easily switch the aggregation function by passing a different one to the constructor:
|
||||
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=aggregate_weighted_vote)
|
||||
|
||||
# If using weighted voting, you'll need to adjust the aggregator call to provide the weights:
|
||||
# weights = list(range(100, 0, -1)) # Example weights for 100 iterations
|
||||
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=lambda results: aggregate_weighted_vote(results, weights))
|
@ -1,110 +0,0 @@
|
||||
import requests
|
||||
from loguru import logger
|
||||
import os
|
||||
|
||||
|
||||
def fetch_secrets_from_vault(
|
||||
client_id: str = os.getenv("HCP_CLIENT_ID"),
|
||||
client_secret: str = os.getenv("HCP_CLIENT_SECRET"),
|
||||
organization_id: str = os.getenv("HCP_ORGANIZATION_ID"),
|
||||
project_id: str = os.getenv("HCP_PROJECT_ID"),
|
||||
app_id: str = os.getenv("HCP_APP_ID"),
|
||||
) -> str:
|
||||
"""
|
||||
Fetch secrets from HashiCorp Vault using service principal authentication.
|
||||
|
||||
Args:
|
||||
client_id (str): The client ID for the service principal.
|
||||
client_secret (str): The client secret for the service principal.
|
||||
organization_id (str): The ID of the organization in HCP.
|
||||
project_id (str): The ID of the project in HCP.
|
||||
app_id (str): The ID of the app in HCP.
|
||||
|
||||
Returns:
|
||||
str: A dictionary containing the fetched secrets.
|
||||
|
||||
Raises:
|
||||
Exception: If there is an error retrieving the API token or secrets.
|
||||
"""
|
||||
# Step 1: Generate the API Token
|
||||
token_url = "https://auth.idp.hashicorp.com/oauth2/token"
|
||||
token_data = {
|
||||
"client_id": client_id,
|
||||
"client_secret": client_secret,
|
||||
"grant_type": "client_credentials",
|
||||
"audience": "https://api.hashicorp.cloud",
|
||||
}
|
||||
token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
|
||||
logger.info("Requesting API token from HashiCorp Vault")
|
||||
response = requests.post(
|
||||
token_url, data=token_data, headers=token_headers
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(
|
||||
f"Failed to retrieve API token. Status Code: {response.status_code}, Response: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
api_token = response.json().get("access_token")
|
||||
|
||||
if not api_token:
|
||||
raise Exception("Failed to retrieve API token")
|
||||
|
||||
# Step 2: Fetch Secrets
|
||||
secrets_url = f"https://api.cloud.hashicorp.com/secrets/2023-06-13/organizations/{organization_id}/projects/{project_id}/apps/{app_id}/open"
|
||||
secrets_headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
logger.info("Fetching secrets from HashiCorp Vault")
|
||||
response = requests.get(secrets_url, headers=secrets_headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(
|
||||
f"Failed to fetch secrets. Status Code: {response.status_code}, Response: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
secrets = response.json()
|
||||
|
||||
for secret in secrets["secrets"]:
|
||||
name = secret.get("name")
|
||||
value = secret.get("version", {}).get("value")
|
||||
print(f"Name: {name}, Value: {value}")
|
||||
|
||||
return name, value
|
||||
|
||||
|
||||
# def main() -> None:
|
||||
# """
|
||||
# Main function to fetch secrets from HashiCorp Vault and print them.
|
||||
|
||||
# Raises:
|
||||
# EnvironmentError: If required environment variables are not set.
|
||||
# """
|
||||
# HCP_CLIENT_ID = os.getenv("HCP_CLIENT_ID")
|
||||
# HCP_CLIENT_SECRET = os.getenv("HCP_CLIENT_SECRET")
|
||||
# ORGANIZATION_ID = os.getenv("HCP_ORGANIZATION_ID")
|
||||
# PROJECT_ID = os.getenv("HCP_PROJECT_ID")
|
||||
# APP_ID = os.getenv("HCP_APP_ID")
|
||||
|
||||
# # if not all([HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID]):
|
||||
# # raise EnvironmentError("One or more environment variables are missing: HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID")
|
||||
|
||||
# secrets = fetch_secrets_from_vault(
|
||||
# HCP_CLIENT_ID,
|
||||
# HCP_CLIENT_SECRET,
|
||||
# ORGANIZATION_ID,
|
||||
# PROJECT_ID,
|
||||
# APP_ID,
|
||||
# )
|
||||
# print(secrets)
|
||||
|
||||
# for secret in secrets["secrets"]:
|
||||
# name = secret.get("name")
|
||||
# value = secret.get("version", {}).get("value")
|
||||
# print(f"Name: {name}, Value: {value}")
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# main()
|
@ -0,0 +1,192 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
|
||||
|
||||
class TelemetryProcessor:
|
||||
"""
|
||||
A class to handle telemetry processing, including converting data to JSON,
|
||||
exporting it to an API server, and tracing the operations with OpenTelemetry.
|
||||
|
||||
Attributes:
|
||||
service_name (str): The name of the service for tracing.
|
||||
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
|
||||
tracer (Tracer): The tracer object used for creating spans.
|
||||
|
||||
Methods:
|
||||
process_data(data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None) -> str:
|
||||
Converts input data to a JSON string.
|
||||
|
||||
export_to_server(json_data: Optional[str] = None, api_url: Optional[str] = None) -> None:
|
||||
Sends the JSON data to the specified API server.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
service_name: str = "telemetry_service",
|
||||
otlp_endpoint: str = "http://localhost:4318/v1/traces",
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initializes the TelemetryProcessor class with configurable settings.
|
||||
|
||||
Args:
|
||||
service_name (str): The name of the service for tracing.
|
||||
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
|
||||
"""
|
||||
self.service_name = service_name
|
||||
self.otlp_endpoint = otlp_endpoint
|
||||
|
||||
# Configure OpenTelemetry Tracing
|
||||
resource = Resource(
|
||||
attributes={SERVICE_NAME: self.service_name}, *args, **kwargs
|
||||
)
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(resource=resource), *args, **kwargs
|
||||
)
|
||||
self.tracer = trace.get_tracer(__name__)
|
||||
|
||||
# Configure OTLP Exporter to send spans to a collector (e.g., Jaeger, Zipkin)
|
||||
otlp_exporter = OTLPSpanExporter(endpoint=self.otlp_endpoint)
|
||||
span_processor = BatchSpanProcessor(otlp_exporter)
|
||||
trace.get_tracer_provider().add_span_processor(span_processor)
|
||||
|
||||
logger.debug(
|
||||
f"TelemetryProcessor initialized with service_name={self.service_name}, otlp_endpoint={self.otlp_endpoint}"
|
||||
)
|
||||
|
||||
def process_data(
|
||||
self,
|
||||
data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Converts input data to a JSON string.
|
||||
|
||||
Args:
|
||||
data (Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]): The input data to be converted.
|
||||
Defaults to an empty dictionary if None is provided.
|
||||
|
||||
Returns:
|
||||
str: The JSON string representation of the input data.
|
||||
|
||||
Raises:
|
||||
TypeError: If the input data is not a dictionary or a list of dictionaries.
|
||||
json.JSONEncodeError: If the data cannot be serialized to JSON.
|
||||
"""
|
||||
with self.tracer.start_as_current_span("process_data") as span:
|
||||
if data is None:
|
||||
data = {}
|
||||
logger.debug(f"Processing data: {data}")
|
||||
|
||||
if not isinstance(data, (dict, list)):
|
||||
logger.error(
|
||||
"Invalid data type. Expected a dictionary or a list of dictionaries."
|
||||
)
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "Invalid data type")
|
||||
)
|
||||
raise TypeError(
|
||||
"Input data must be a dictionary or a list of dictionaries."
|
||||
)
|
||||
|
||||
try:
|
||||
json_data = json.dumps(data)
|
||||
logger.debug(f"Converted data to JSON: {json_data}")
|
||||
return json_data
|
||||
except (TypeError, json.JSONEncodeError) as e:
|
||||
logger.error(f"Failed to convert data to JSON: {e}")
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "JSON serialization failed")
|
||||
)
|
||||
raise
|
||||
|
||||
def export_to_server(
|
||||
self,
|
||||
json_data: Optional[str] = None,
|
||||
api_url: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Sends the JSON data to the specified API server.
|
||||
|
||||
Args:
|
||||
json_data (Optional[str]): The JSON data to be sent. Defaults to an empty JSON string if None is provided.
|
||||
api_url (Optional[str]): The URL of the API server to send the data to. Defaults to None.
|
||||
|
||||
Raises:
|
||||
ValueError: If the api_url is None.
|
||||
requests.exceptions.RequestException: If there is an error sending the data to the server.
|
||||
"""
|
||||
with self.tracer.start_as_current_span("export_to_server") as span:
|
||||
if json_data is None:
|
||||
json_data = "{}"
|
||||
if api_url is None:
|
||||
logger.error("API URL cannot be None.")
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "API URL is missing")
|
||||
)
|
||||
raise ValueError("API URL cannot be None.")
|
||||
|
||||
logger.debug(f"Exporting JSON data to server: {api_url}")
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
log = {
|
||||
"data": json_data,
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
api_url, data=log, headers=headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
logger.info(
|
||||
f"Data successfully exported to {api_url}: {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to export data to {api_url}: {e}")
|
||||
span.set_status(
|
||||
Status(
|
||||
StatusCode.ERROR,
|
||||
"Failed to send data to API server",
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# # Example usage:
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # Example usage with custom service name and OTLP endpoint
|
||||
# processor = TelemetryProcessor(service_name="my_telemetry_service", otlp_endpoint="http://my-collector:4318/v1/traces")
|
||||
|
||||
# # Sample data
|
||||
# telemetry_data = {
|
||||
# "device_id": "sensor_01",
|
||||
# "temperature": 22.5,
|
||||
# "humidity": 60,
|
||||
# "timestamp": "2024-08-15T12:34:56Z"
|
||||
# }
|
||||
|
||||
# # Processing data
|
||||
# try:
|
||||
# json_data = processor.process_data(telemetry_data)
|
||||
# except Exception as e:
|
||||
# logger.error(f"Processing error: {e}")
|
||||
# # Handle error accordingly
|
||||
|
||||
# # Exporting data to an API server
|
||||
# api_url = "https://example.com/api/telemetry"
|
||||
# try:
|
||||
# processor.export_to_server(json_data, api_url)
|
||||
# except Exception as e:
|
||||
# logger.error(f"Export error: {e}")
|
||||
# # Handle error accordingly
|
@ -1,73 +0,0 @@
|
||||
from typing import List
|
||||
from swarms.structs.base_structure import BaseStructure
|
||||
from swarms.tools.py_func_to_openai_func_str import (
|
||||
get_openai_function_schema_from_func,
|
||||
)
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
class ToolDatasetGenerator(BaseStructure):
|
||||
"""
|
||||
Initialize the ToolDatasetGenerator.
|
||||
|
||||
Args:
|
||||
functions (List[callable], optional): List of functions to generate examples from. Defaults to None.
|
||||
autosave (bool, optional): Flag to enable autosaving generated examples. Defaults to False.
|
||||
output_files (List[str], optional): List of output file paths for autosaving. Defaults to None.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
functions: List[callable] = None,
|
||||
autosave: bool = False,
|
||||
output_files: List[str] = None,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super(ToolDatasetGenerator, self).__init__(*args, **kwargs)
|
||||
self.functions = functions
|
||||
self.autosave = autosave
|
||||
self.output_files = output_files
|
||||
self.verbose = verbose
|
||||
|
||||
if self.verbose is True:
|
||||
self.log_tool_metadata()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
"""
|
||||
Run the ToolDatasetGenerator.
|
||||
|
||||
Args:
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
try:
|
||||
for function in self.functions:
|
||||
function_str = get_openai_function_schema_from_func(
|
||||
function
|
||||
)
|
||||
logger.info(function_str)
|
||||
if self.autosave:
|
||||
for file in self.output_files:
|
||||
with open(file, "a") as f:
|
||||
f.write(function_str + "\n")
|
||||
# agent_response = agent.run(sources_prompts)
|
||||
# return agent_response
|
||||
except Exception as e:
|
||||
logger.info(f"An error occurred: {str(e)}")
|
||||
|
||||
def log_tool_metadata(self):
|
||||
"""
|
||||
Log the number of tools and their metadata.
|
||||
"""
|
||||
try:
|
||||
num_tools = len(self.functions)
|
||||
logger.info(f"Number of tools: {num_tools}")
|
||||
for i, function in enumerate(self.functions):
|
||||
logger.info(f"Tool {i+1} metadata:")
|
||||
logger.info(f"Name: {function.__name__}")
|
||||
except Exception as e:
|
||||
logger.info(f"An error occurred: {str(e)}")
|
Loading…
Reference in new issue