[FEATS][File processing] [hackathon for agents]

pull/411/head
Kye 10 months ago
parent cddeb3c3af
commit 60b4101c14

@ -3,12 +3,16 @@ from swarms import Agent, OpenAIChat
## Initialize the workflow
agent = Agent(
llm=OpenAIChat(),
max_loops=1,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
)
# Run the workflow on a task
agent("Find a chick fil a equivalent in hayes valley")
agent(
"Generate a transcript for a youtube video on what swarms are!"
" Output a <DONE> token when done."
)

@ -59,7 +59,7 @@ nav:
- Overview: "index.md"
- Contributing: "contributing.md"
- Limitations of Individual Agents: "limits_of_individual_agents.md"
- Swarms:
- Swarms Framework:
- Overview: "swarms/index.md"
- swarms.agents:
- Agents:

@ -0,0 +1,40 @@
# Import the necessary libraries.
import asyncio
import websockets
# Create a list of public group chats.
public_group_chats = []
# Create a function to handle incoming websocket connections.
async def handle_websocket(websocket, path):
# Get the username of the user.
username = await websocket.recv()
print(f"New connection from {username}")
# Add the user to the list of public group chats.
public_group_chats.append(websocket)
try:
# Wait for the user to send a message.
while True:
message = await websocket.recv()
print(f"{username}: {message}")
# Broadcast the message to all other users in the public group chats.
for other_websocket in public_group_chats:
if other_websocket != websocket:
await other_websocket.send(
f"{username}: {message}"
)
finally:
# Remove the user from the list of public group chats.
public_group_chats.remove(websocket)
print(f"{username} has disconnected")
# Create a websocket server.
server = websockets.serve(handle_websocket, "localhost", 8000)
# Run the websocket server.
asyncio.run(server)

@ -0,0 +1,48 @@
import openai
from decouple import config
# Load the OpenAI API key from the environment variable
openai.api_key = config("OPENAI_API_KEY")
# Define the prompt for the LLM
prompt = """
I want to create an LLM that can help me get in-game gold from people in World of Warcraft. The LLM should be able to:
* Generate persuasive messages to send to players asking for gold
* Detect when a player is likely to give gold
* Respond to common objections from players
Here is an example of a conversation between the LLM and a player:
**LLM**: Hi there! I'm an AI assistant who can help you get in-game gold. Would you be interested in learning more?
**Player**: Sure, why not.
**LLM**: Great! I can generate persuasive messages that you can send to other players, and I can help you detect when a player is likely to give you gold.
**Player**: That sounds great! Can you give me an example of a message that I could send?
**LLM**: Sure, here is an example message:
"Hi [player name],
I'm a big fan of your character and your playing style. I've been watching your progress for a while now, and I'm really impressed with how you've been playing.
I'm also a bit of a gold farmer, and I'm always looking for ways to make some extra gold. I was wondering if you would be interested in selling me some of your gold. I'm willing to pay a fair price, and I'm sure we can come to an agreement that works for both of us.
Please let me know if you're interested. Thanks for your time!"
**Player**: That's a great message! I'll definitely give it a try.
**LLM**: I'm glad to hear that. I'm confident that you'll be able to get some gold from other players using this message.
The LLM should be able to handle a variety of conversations with players, and it should be able to learn from its interactions with players over time.
Please write the code for this LLM in Python.
"""
# Send the prompt to the LLM
response = openai.Completion.create(
engine="text-davinci-003", prompt=prompt
)
# Get the code from the LLM's response
code = response["choices"][0]["text"]
# Print the code
print(code)

@ -0,0 +1,39 @@
import tkinter as tk
# Create the main window
root = tk.Tk()
root.title("Chat Visualization")
# Create the text area for the chat
chat_area = tk.Text(root, height=20, width=60)
chat_area.pack()
# Create the input field for the user message
input_field = tk.Entry(root)
input_field.pack()
# Create the send button
send_button = tk.Button(root, text="Send")
send_button.pack()
# Define the function to send the message
def send_message():
# Get the message from the input field
message = input_field.get()
# Clear the input field
input_field.delete(0, tk.END)
# Add the message to the chat area
chat_area.insert(tk.END, message + "\n")
# Scroll to the bottom of the chat area
chat_area.see(tk.END)
# Bind the send button to the send_message function
send_button.config(command=send_message)
# Start the main loop
root.mainloop()

@ -0,0 +1,58 @@
import os
import random
# Create a list of character names
character_names = ["Alice", "Bob", "Charlie", "Dave", "Eve"]
# Create a dictionary of character voices
character_voices = {
"Alice": "Alice.wav",
"Bob": "Bob.wav",
"Charlie": "Charlie.wav",
"Dave": "Dave.wav",
"Eve": "Eve.wav",
}
# Get the user's input
conversation_topic = input(
"What would you like the characters to talk about? "
)
# Create a function to generate a random conversation
def generate_conversation(characters, topic):
# Choose two random characters to talk
character1 = random.choice(characters)
character2 = random.choice(characters)
# Generate the conversation
conversation = [
(
f"{character1}: Hello, {character2}. I'd like to talk"
f" about {topic}."
),
(
f"{character2}: Sure, {character1}. What do you want to"
" know?"
),
(
f"{character1}: I'm just curious about your thoughts on"
" the matter."
),
f"{character2}: Well, I think it's a very interesting topic.",
f"{character1}: I agree. I'm glad we're talking about this.",
]
# Return the conversation
return conversation
# Generate the conversation
conversation = generate_conversation(
character_names, conversation_topic
)
# Play the conversation
for line in conversation:
print(line)
os.system(f"afplay {character_voices[line.split(':')[0]]}")

@ -0,0 +1,36 @@
import discord
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Discord Bot Setup
client = discord.Client()
# AI Model Setup
tokenizer = AutoTokenizer.from_pretrained(
"facebook/blenderbot-400M-distill"
)
model = AutoModelForSeq2SeqLM.from_pretrained(
"facebook/blenderbot-400M-distill"
)
@client.event
async def on_ready():
print(f"Logged in as {client.user.name}")
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith("!generate"):
input = message.content[len("!generate") :]
inputs = tokenizer(input, return_tensors="pt")
outputs = model.generate(**inputs)
generated_text = tokenizer.batch_decode(
outputs, skip_special_tokens=True
)
await message.channel.send(generated_text[0])
client.run("YOUR_BOT_TOKEN")

@ -0,0 +1,114 @@
# OpenMind.bot streamlines social interactions between personalized bots, representing users, media, and influencers, ensuring meaningful exchanges. It eliminates misunderstandings by using context-aware conversations, followed by summaries or audio recaps of these interactions for efficient communication.
import json
import datetime
import pytz
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/api/v1/conversations", methods=["POST"])
def create_conversation():
# Create a new conversation
conversation = {
"user_id": request.json["user_id"],
"bot_id": request.json["bot_id"],
"messages": [],
}
# Save the conversation to the database
with open("conversations.json", "w") as f:
json.dump(conversation, f)
return jsonify(conversation)
@app.route("/api/v1/conversations/<conversation_id>", methods=["GET"])
def get_conversation(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
conversation = json.load(f)
# Return the conversation
return jsonify(conversation)
@app.route(
"/api/v1/conversations/<conversation_id>/messages",
methods=["POST"],
)
def create_message(conversation_id):
# Create a new message
message = {
"user_id": request.json["user_id"],
"bot_id": request.json["bot_id"],
"text": request.json["text"],
"timestamp": datetime.datetime.now(pytz.utc).isoformat(),
}
# Get the conversation from the database
with open("conversations.json", "r") as f:
conversation = json.load(f)
# Add the message to the conversation
conversation["messages"].append(message)
# Save the conversation to the database
with open("conversations.json", "w") as f:
json.dump(conversation, f)
return jsonify(message)
@app.route(
"/api/v1/conversations/<conversation_id>/messages",
methods=["GET"],
)
def get_messages(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
conversation = json.load(f)
# Return the messages
return jsonify(conversation["messages"])
@app.route(
"/api/v1/conversations/<conversation_id>/summary", methods=["GET"]
)
def get_summary(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
conversation = json.load(f)
# Create a summary of the conversation
summary = ""
for message in conversation["messages"]:
summary += message["text"] + "\n"
# Return the summary
return jsonify(summary)
@app.route(
"/api/v1/conversations/<conversation_id>/audio_recap",
methods=["GET"],
)
def get_audio_recap(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
conversation = json.load(f)
# Create an audio recap of the conversation
audio_recap = ""
for message in conversation["messages"]:
audio_recap += message["text"] + "\n"
# Return the audio recap
return jsonify(audio_recap)
if __name__ == "__main__":
app.run()

@ -0,0 +1,41 @@
import requests
from bs4 import BeautifulSoup
def arxiv_search(query):
"""
Performs a semantic search on arxiv.org for the given query.
Args:
query: The query to search for.
Returns:
A list of search results.
"""
# Make a request to arxiv.org
response = requests.get(
"http://export.arxiv.org/api/query",
params={"search_query": query, "start": 0, "max_results": 10},
)
# Parse the response
soup = BeautifulSoup(response.content, "html.parser")
# Extract the search results
results = []
for result in soup.find_all("entry"):
results.append(
{
"title": result.find("title").text,
"author": result.find("author").text,
"abstract": result.find("summary").text,
"link": result.find("link")["href"],
}
)
return results
search = arxiv_search("quantum computing")
print(search)

@ -0,0 +1,178 @@
import concurrent
import csv
import os
from swarms import Gemini, Agent, SwarmNetwork, ConcurrentWorkflow
from swarms.memory import ChromaDB
from dotenv import load_dotenv
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.file_processing import create_file
from swarms.utils.loguru_logger import logger
# Load ENV
load_dotenv()
gemini = Gemini(
model_name="gemini-pro",
gemini_api_key=os.getenv("GEMINI_API_KEY"),
)
# SwarmNetwork
swarm_network = SwarmNetwork(
logging_enabled=True,
)
# ConcurrentWorkflow
workflow = ConcurrentWorkflow(
task_pool=None,
max_workers=10,
)
# memory
memory = ChromaDB(output_dir="swarm_hackathon")
def execute_concurrently(callable_functions, max_workers=5):
"""
Executes callable functions concurrently using multithreading.
Parameters:
- callable_functions: A list of tuples, each containing the callable function and its arguments.
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
- max_workers: The maximum number of threads to use.
Returns:
- results: A list of results returned by the callable functions. If an error occurs in any function,
the exception object will be placed at the corresponding index in the list.
"""
results = [None] * len(callable_functions)
def worker(fn, args, kwargs, index):
try:
result = fn(*args, **kwargs)
results[index] = result
except Exception as e:
results[index] = e
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete
concurrent.futures.wait(futures)
return results
# # For each row in the dataframe, create an agent and add it to the swarm network
# for index, row in df.iterrows():
# agent_name = row["Project Name"] + "agent"
# system_prompt = row["Lightning Proposal"]
# agent = Agent(
# llm=gemini,
# max_loops="auto",
# stopping_token="<DONE>",
# system_prompt=system_prompt,
# agent_name=agent_name,
# long_term_memory=ChromaDB(output_dir="swarm_hackathon"),
# )
# swarm_network.add_agent(agent)
# out = swarm_network.list_agents()
# Adjusting the function to extract specific column values
def extract_and_create_agents(
csv_file_path: str, target_columns: list
):
"""
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network.
Parameters:
- csv_file_path: The path to the CSV file.
- target_columns: A list of column names to extract values from.
"""
agents = []
with open(csv_file_path, mode="r", encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
project_name = row[target_columns[0]]
lightning_proposal = row[target_columns[1]]
# Example of creating and adding an agent based on the project name and lightning proposal
agent_name = f"{project_name} agent"
print(agent_name) # For demonstration
# Create the agent
logger.info("Creating agent...")
agent = Agent(
llm=gemini,
max_loops=1,
stopping_token="<DONE?>",
sop=None,
system_prompt=(
"Transform an app idea into a very simple python"
" app in markdown. Return all the python code in"
" a single markdown file."
),
long_term_memory=memory,
)
# Log the agent
logger.info(
f"Agent created: {agent_name} with long term memory"
)
agents.append(agent)
# Create the code for each project
output = agent.run(
(
f"Create the code for the {lightning_proposal} in"
" python and wrap it in markdown and return it"
),
None,
)
print(output)
# Parse the output
output = extract_code_from_markdown(output)
# Create the file
output = create_file(output, f"{project_name}.py")
# Log the project created
logger.info(
f"Project {project_name} created: {output} at file"
f" path {project_name}.py"
)
print(output)
return agents
# Specific columns to extract
target_columns = ["Project Name", "Lightning Proposal "]
# Use the adjusted function
specific_column_values = extract_and_create_agents(
"text.csv", target_columns
)
# Display the extracted column values
print(specific_column_values)
# Concurrently execute the function
output = execute_concurrently(
[
(extract_and_create_agents, ("text.csv", target_columns), {}),
],
max_workers=5,
)
print(output)

@ -1,46 +0,0 @@
import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import Agent, OpenAIChat, SwarmNetwork
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
## Initialize the workflow
agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
# Load the swarmnet with the agents
swarmnet = SwarmNetwork(
agents=[agent, agent2, agent3],
)
# List the agents in the swarm network
out = swarmnet.list_agents()
print(out)
# Run the workflow on a task
out = swarmnet.run_single_agent(
agent2.id, "Generate a 10,000 word blog on health and wellness."
)
print(out)
# Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents(
"Generate a 10,000 word blog on health and wellness."
)
print(out)

@ -1,11 +1,14 @@
import pandas as pd
from swarms import dataframe_to_text
# # Example usage:
df = pd.DataFrame({
'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9],
})
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9],
}
)
print(dataframe_to_text(df))

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "4.2.1"
version = "4.2.6"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -0,0 +1,73 @@
# Import the OpenAIChat model and the Agent struct
import os
from swarms import (
Agent,
OpenAIChat,
SwarmNetwork,
Anthropic,
TogetherLLM,
)
from swarms.memory import ChromaDB
from dotenv import load_dotenv
# load the environment variables
load_dotenv()
# Initialize the ChromaDB
memory = ChromaDB()
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
)
# Initialize the Anthropic
anthropic = Anthropic(max_tokens=3000)
# TogeterLM
together_llm = TogetherLLM(
together_api_key=os.getenv("TOGETHER_API_KEY"), max_tokens=3000
)
## Initialize the workflow
agent = Agent(
llm=anthropic,
max_loops=1,
agent_name="Social Media Manager",
long_term_memory=memory,
)
agent2 = Agent(
llm=llm,
max_loops=1,
agent_name=" Product Manager",
long_term_memory=memory,
)
agent3 = Agent(
llm=together_llm,
max_loops=1,
agent_name="SEO Manager",
long_term_memory=memory,
)
# Load the swarmnet with the agents
swarmnet = SwarmNetwork(
agents=[agent, agent2, agent3], logging_enabled=True
)
# List the agents in the swarm network
out = swarmnet.list_agents()
print(out)
# Run the workflow on a task
out = swarmnet.run_single_agent(
agent2.id, "Generate a 10,000 word blog on health and wellness."
)
print(out)
# # Run all the agents in the swarm network on a task
# out = swarmnet.run_many_agents(
# f"Summarize the blog and create a social media post: {out}"
# )
# print(out)

@ -186,6 +186,8 @@ class ChromaDB:
Returns:
- list: A list of paths to each file in the directory and its subdirectories.
"""
added_to_db = False
image_extensions = [
".jpg",
".jpeg",
@ -204,4 +206,5 @@ class ChromaDB:
if images:
added_to_db = self.add(img_urls=[images])
print(f"{len(images)} images added to Database ")
return added_to_db

@ -383,8 +383,8 @@ class Anthropic(LLM, _AnthropicCommon):
def raise_warning(cls, values: Dict) -> Dict:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. Please use `from"
" langchain.chat_models import ChatAnthropic` instead"
"There may be an updated version of"
f" {cls.__name__} available."
)
return values

@ -4,6 +4,7 @@ import asyncio
import functools
import logging
import sys
from importlib.metadata import version
from typing import (
AbstractSet,
Any,
@ -28,6 +29,7 @@ from langchain.utils import (
get_pydantic_field_names,
)
from langchain.utils.utils import build_extra_kwargs
from packaging.version import parse
from tenacity import (
RetryCallState,
before_sleep_log,
@ -40,12 +42,6 @@ from tenacity import (
logger = logging.getLogger(__name__)
from importlib.metadata import version
from packaging.version import parse
logger = logging.getLogger(__name__)
@functools.lru_cache
def _log_error_once(msg: str) -> None:
@ -275,7 +271,7 @@ class BaseOpenAI(BaseLLM):
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: str | None = None
openai_api_key: str | None = None # | None = None
openai_api_base: str | None = None
openai_organization: str | None = None
# to support explicit proxy for OpenAI
@ -284,7 +280,7 @@ class BaseOpenAI(BaseLLM):
"""Batch size to use when passing multiple documents to generate."""
request_timeout: float | tuple[float, float] | None = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: dict[str, float] | None = Field(default_factory=dict)
logit_bias: dict[str, float] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""

@ -28,10 +28,6 @@ from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.token_count_tiktoken import limit_tokens_from_string
from swarms.utils.video_to_frames import (
save_frames_as_images,
video_to_frames,
)
# Utils
@ -557,7 +553,6 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
video: Optional[str] = None,
*args,
**kwargs,
):
@ -576,12 +571,6 @@ class Agent:
"""
try:
if video:
video_to_frames(video)
frames = save_frames_as_images(video)
for frame in frames:
img = frame
# Activate Autonomous agent message
self.activate_autonomous_agent()
@ -594,6 +583,8 @@ class Agent:
loop_count = 0
response = None
# While the max_loops is auto or the loop count is less than the max_loops
while (
self.max_loops == "auto"
@ -671,10 +662,6 @@ class Agent:
):
break
# if self.parse_done_token:
# if parse_done_token(response):
# break
if self.stopping_func is not None:
if self.stopping_func(response) is True:
break

@ -68,7 +68,7 @@ class SwarmNetwork(BaseStructure):
def __init__(
self,
agents: List[Agent],
agents: List[Agent] = None,
idle_threshold: float = 0.2,
busy_threshold: float = 0.7,
api_enabled: Optional[bool] = False,
@ -84,6 +84,7 @@ class SwarmNetwork(BaseStructure):
self.lock = threading.Lock()
self.api_enabled = api_enabled
self.logging_enabled = logging_enabled
self.agent_pool = []
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
@ -91,6 +92,11 @@ class SwarmNetwork(BaseStructure):
if api_enabled:
self.api = FastAPI()
# For each agent in the pool, run it on it's own thread
if agents is not None:
for agent in agents:
self.agent_pool.append(agent)
def add_task(self, task):
"""Add task to the task queue
@ -163,11 +169,10 @@ class SwarmNetwork(BaseStructure):
"""
self.logger.info(f"Running task {task} on agent {agent_id}")
try:
for agent in self.agents:
for agent in self.agent_pool:
if agent.id == agent_id:
return agent.run(task, *args, **kwargs)
# self.logger.info(f"No agent found with ID {agent_id}")
raise ValueError(f"No agent found with ID {agent_id}")
out = agent.run(task, *args, **kwargs)
return out
except Exception as error:
self.logger.error(f"Error running task on agent: {error}")
raise error
@ -187,33 +192,27 @@ class SwarmNetwork(BaseStructure):
try:
return [
agent.run(task, *args, **kwargs)
for agent in self.agents
for agent in self.agent_pool
]
except Exception as error:
logger.error(f"Error running task on agents: {error}")
raise error
def list_agents(self):
"""List all agents
Returns:
List: _description_
"""
"""List all agents."""
self.logger.info("[Listing all active agents]")
num_agents = len(self.agents)
self.logger.info(f"[Number of active agents: {num_agents}]")
try:
for agent in self.agents:
return self.logger.info(
# Assuming self.agent_pool is a list of agent objects
for agent in self.agent_pool:
self.logger.info(
f"[Agent] [ID: {agent.id}] [Name:"
f" {agent.agent_name}] [Description:"
f" {agent.agent_description}] [Status] [Running]"
f" {agent.agent_description}] [Status: Running]"
)
except Exception as error:
logger.error(f"Error listing agents: {error}")
raise error
self.logger.error(f"Error listing agents: {error}")
raise
def get_agent(self, agent_id):
"""Get agent by id
@ -227,7 +226,7 @@ class SwarmNetwork(BaseStructure):
self.logger.info(f"Getting agent {agent_id}")
try:
for agent in self.agents:
for agent in self.agent_pool:
if agent.id == agent_id:
return agent
raise ValueError(f"No agent found with ID {agent_id}")
@ -235,7 +234,7 @@ class SwarmNetwork(BaseStructure):
self.logger.error(f"Error getting agent: {error}")
raise error
def add_agent(self, agent):
def add_agent(self, agent: Agent):
"""Add agent to the agent pool
Args:
@ -243,7 +242,7 @@ class SwarmNetwork(BaseStructure):
"""
self.logger.info(f"Adding agent {agent} to pool")
try:
self.agents.append(agent)
self.agent_pool.append(agent)
except Exception as error:
print(f"Error adding agent to pool: {error}")
raise error
@ -256,9 +255,9 @@ class SwarmNetwork(BaseStructure):
"""
self.logger.info(f"Removing agent {agent_id} from pool")
try:
for agent in self.agents:
for agent in self.agent_pool:
if agent.id == agent_id:
self.agents.remove(agent)
self.agent_pool.remove(agent)
return
raise ValueError(f"No agent found with ID {agent_id}")
except Exception as error:
@ -291,7 +290,7 @@ class SwarmNetwork(BaseStructure):
self.logger.info(f"Scaling up agent pool by {num_agents}")
try:
for _ in range(num_agents):
self.agents.append(Agent())
self.agent_pool.append(Agent())
except Exception as error:
print(f"Error scaling up agent pool: {error}")
raise error
@ -303,7 +302,7 @@ class SwarmNetwork(BaseStructure):
num_agents (int, optional): _description_. Defaults to 1.
"""
for _ in range(num_agents):
self.agents.pop()
self.agent_pool.pop()
# - Create APIs for each agent in the pool (optional) with fastapi
def create_apis_for_agents(self):
@ -313,7 +312,7 @@ class SwarmNetwork(BaseStructure):
_type_: _description_
"""
self.apis = []
for agent in self.agents:
for agent in self.agent_pool:
self.api.get(f"/{agent.id}")
def run_agent(task: str, *args, **kwargs):
@ -326,7 +325,7 @@ class SwarmNetwork(BaseStructure):
# Observe all agents in the pool
self.logger.info("Starting the SwarmNetwork")
for agent in self.agents:
for agent in self.agent_pool:
self.logger.info(f"Starting agent {agent.id}")
self.logger.info(
f"[Agent][{agent.id}] [Status] [Running] [Awaiting"

@ -1,3 +1,6 @@
import logging
import warnings
from swarms.telemetry.auto_upgrade_swarms import auto_update
from swarms.utils.disable_logging import disable_logging
@ -5,4 +8,6 @@ from swarms.utils.disable_logging import disable_logging
def bootup():
"""Bootup swarms"""
disable_logging()
logging.disable(logging.CRITICAL)
warnings.filterwarnings("ignore", category=DeprecationWarning)
auto_update()

@ -1,5 +1,9 @@
from swarms.utils.class_args_wrapper import print_class_parameters
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.csv_and_pandas import (
csv_to_dataframe,
dataframe_to_strings,
)
from swarms.utils.data_to_text import (
csv_to_text,
data_to_text,
@ -12,12 +16,19 @@ from swarms.utils.download_weights_from_url import (
download_weights_from_url,
)
from swarms.utils.exponential_backoff import ExponentialBackoffMixin
from swarms.utils.file_processing import (
load_json,
parse_tagged_output,
sanitize_file_path,
zip_workspace,
)
from swarms.utils.find_img_path import find_image_path
from swarms.utils.json_output_parser import JsonOutputParser
from swarms.utils.llm_metrics_decorator import metrics_decorator
from swarms.utils.load_model_torch import load_model_torch
from swarms.utils.markdown_message import display_markdown_message
from swarms.utils.math_eval import math_eval
from swarms.utils.pandas_to_str import dataframe_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.prep_torch_model_inference import (
@ -43,11 +54,8 @@ from swarms.utils.video_to_frames import (
save_frames_as_images,
video_to_frames,
)
########
from swarms.utils.yaml_output_parser import YamlOutputParser
from swarms.utils.pandas_to_str import dataframe_to_text
from swarms.utils.concurrent_utils import execute_concurrently
__all__ = [
"SubprocessCodeInterpreter",
@ -85,4 +93,11 @@ __all__ = [
"video_to_frames",
"save_frames_as_images",
"dataframe_to_text",
"zip_workspace",
"sanitize_file_path",
"parse_tagged_output",
"load_json",
"csv_to_dataframe",
"dataframe_to_strings",
"execute_concurrently",
]

@ -0,0 +1,38 @@
import concurrent
def execute_concurrently(callable_functions, max_workers=5):
"""
Executes callable functions concurrently using multithreading.
Parameters:
- callable_functions: A list of tuples, each containing the callable function and its arguments.
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
- max_workers: The maximum number of threads to use.
Returns:
- results: A list of results returned by the callable functions. If an error occurs in any function,
the exception object will be placed at the corresponding index in the list.
"""
results = [None] * len(callable_functions)
def worker(fn, args, kwargs, index):
try:
result = fn(*args, **kwargs)
results[index] = result
except Exception as e:
results[index] = e
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete
concurrent.futures.wait(futures)
return results

@ -0,0 +1,34 @@
import pandas as pd
# CSV to dataframe
def csv_to_dataframe(file_path):
"""
Read a CSV file and return a pandas DataFrame.
Parameters:
file_path (str): The path to the CSV file.
Returns:
pandas.DataFrame: The DataFrame containing the data from the CSV file.
"""
df = pd.read_csv(file_path)
return df
# Dataframe to strings
def dataframe_to_strings(df):
"""
Converts a pandas DataFrame to a list of string representations of each row.
Args:
df (pandas.DataFrame): The DataFrame to convert.
Returns:
list: A list of string representations of each row in the DataFrame.
"""
row_strings = []
for index, row in df.iterrows():
row_string = row.to_string()
row_strings.append(row_string)
return row_strings

@ -32,7 +32,7 @@ def disable_logging():
"packaging",
]:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.ERROR)
logger.setLevel(logging.CRITICAL)
# Remove all existing handlers
logging.getLogger().handlers = []

@ -0,0 +1,92 @@
import logging
import tempfile
import shutil
import os
import re
from swarms.utils.parse_code import extract_code_from_markdown
import json
def zip_workspace(workspace_path: str, output_filename: str):
"""
Zips the specified workspace directory and returns the path to the zipped file.
Ensure the output_filename does not have .zip extension as it's added by make_archive.
"""
temp_dir = tempfile.mkdtemp()
# Remove .zip if present in output_filename to avoid duplication
base_output_path = os.path.join(
temp_dir, output_filename.replace(".zip", "")
)
zip_path = shutil.make_archive(
base_output_path, "zip", workspace_path
)
return zip_path # make_archive already appends .zip
def sanitize_file_path(file_path: str):
"""
Cleans and sanitizes the file path to be valid for Windows.
"""
sanitized_path = file_path.replace("`", "").strip()
# Replace any invalid characters here with an underscore or remove them
sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path)
return sanitized_path
def parse_tagged_output(output, workspace_path: str):
"""
Parses tagged output and saves files to the workspace directory.
Adds logging for each step of the process.
"""
pattern = r"<!--START_FILE_PATH-->(.*?)<!--END_FILE_PATH-->(.*?)<!--START_CONTENT-->(.*?)<!--END_CONTENT-->"
files = re.findall(pattern, output, re.DOTALL)
if not files:
logging.error("No files found in the output to parse.")
return
for file_path, _, content in files:
sanitized_path = sanitize_file_path(file_path)
content = extract_code_from_markdown(
content
) # Remove code block markers
full_path = os.path.join(workspace_path, sanitized_path)
try:
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, "w") as file:
file.write(content.strip())
logging.info(f"File saved: {full_path}")
except Exception as e:
logging.error(
f"Failed to save file {sanitized_path}: {e}"
)
def load_json(json_string: str):
"""
Loads a JSON string and returns the corresponding Python object.
Args:
json_string (str): The JSON string to be loaded.
Returns:
object: The Python object representing the JSON data.
"""
json_data = json.loads(json_string)
return json_data
# Create file that
def create_file(
content: str,
file_path: str,
):
"""
Creates a file with the specified content at the specified file path.
Args:
content (str): The content to be written to the file.
file_path (str): The path to the file to be created.
"""
with open(file_path, "w") as file:
file.write(content)
return file_path

@ -14,7 +14,7 @@ def dataframe_to_text(
Returns:
str: The string representation of the DataFrame.
Example:
>>> df = pd.DataFrame({
... 'A': [1, 2, 3],

Loading…
Cancel
Save