parent
cddeb3c3af
commit
60b4101c14
@ -0,0 +1,40 @@
|
||||
# Import the necessary libraries.
|
||||
import asyncio
|
||||
import websockets
|
||||
|
||||
# Create a list of public group chats.
|
||||
public_group_chats = []
|
||||
|
||||
|
||||
# Create a function to handle incoming websocket connections.
|
||||
async def handle_websocket(websocket, path):
|
||||
# Get the username of the user.
|
||||
username = await websocket.recv()
|
||||
print(f"New connection from {username}")
|
||||
|
||||
# Add the user to the list of public group chats.
|
||||
public_group_chats.append(websocket)
|
||||
|
||||
try:
|
||||
# Wait for the user to send a message.
|
||||
while True:
|
||||
message = await websocket.recv()
|
||||
print(f"{username}: {message}")
|
||||
|
||||
# Broadcast the message to all other users in the public group chats.
|
||||
for other_websocket in public_group_chats:
|
||||
if other_websocket != websocket:
|
||||
await other_websocket.send(
|
||||
f"{username}: {message}"
|
||||
)
|
||||
finally:
|
||||
# Remove the user from the list of public group chats.
|
||||
public_group_chats.remove(websocket)
|
||||
print(f"{username} has disconnected")
|
||||
|
||||
|
||||
# Create a websocket server.
|
||||
server = websockets.serve(handle_websocket, "localhost", 8000)
|
||||
|
||||
# Run the websocket server.
|
||||
asyncio.run(server)
|
@ -0,0 +1,48 @@
|
||||
import openai
|
||||
from decouple import config
|
||||
|
||||
# Load the OpenAI API key from the environment variable
|
||||
openai.api_key = config("OPENAI_API_KEY")
|
||||
|
||||
# Define the prompt for the LLM
|
||||
prompt = """
|
||||
I want to create an LLM that can help me get in-game gold from people in World of Warcraft. The LLM should be able to:
|
||||
|
||||
* Generate persuasive messages to send to players asking for gold
|
||||
* Detect when a player is likely to give gold
|
||||
* Respond to common objections from players
|
||||
|
||||
Here is an example of a conversation between the LLM and a player:
|
||||
|
||||
**LLM**: Hi there! I'm an AI assistant who can help you get in-game gold. Would you be interested in learning more?
|
||||
**Player**: Sure, why not.
|
||||
**LLM**: Great! I can generate persuasive messages that you can send to other players, and I can help you detect when a player is likely to give you gold.
|
||||
**Player**: That sounds great! Can you give me an example of a message that I could send?
|
||||
**LLM**: Sure, here is an example message:
|
||||
|
||||
"Hi [player name],
|
||||
|
||||
I'm a big fan of your character and your playing style. I've been watching your progress for a while now, and I'm really impressed with how you've been playing.
|
||||
|
||||
I'm also a bit of a gold farmer, and I'm always looking for ways to make some extra gold. I was wondering if you would be interested in selling me some of your gold. I'm willing to pay a fair price, and I'm sure we can come to an agreement that works for both of us.
|
||||
|
||||
Please let me know if you're interested. Thanks for your time!"
|
||||
|
||||
**Player**: That's a great message! I'll definitely give it a try.
|
||||
**LLM**: I'm glad to hear that. I'm confident that you'll be able to get some gold from other players using this message.
|
||||
|
||||
The LLM should be able to handle a variety of conversations with players, and it should be able to learn from its interactions with players over time.
|
||||
|
||||
Please write the code for this LLM in Python.
|
||||
"""
|
||||
|
||||
# Send the prompt to the LLM
|
||||
response = openai.Completion.create(
|
||||
engine="text-davinci-003", prompt=prompt
|
||||
)
|
||||
|
||||
# Get the code from the LLM's response
|
||||
code = response["choices"][0]["text"]
|
||||
|
||||
# Print the code
|
||||
print(code)
|
@ -0,0 +1,39 @@
|
||||
import tkinter as tk
|
||||
|
||||
# Create the main window
|
||||
root = tk.Tk()
|
||||
root.title("Chat Visualization")
|
||||
|
||||
# Create the text area for the chat
|
||||
chat_area = tk.Text(root, height=20, width=60)
|
||||
chat_area.pack()
|
||||
|
||||
# Create the input field for the user message
|
||||
input_field = tk.Entry(root)
|
||||
input_field.pack()
|
||||
|
||||
# Create the send button
|
||||
send_button = tk.Button(root, text="Send")
|
||||
send_button.pack()
|
||||
|
||||
|
||||
# Define the function to send the message
|
||||
def send_message():
|
||||
# Get the message from the input field
|
||||
message = input_field.get()
|
||||
|
||||
# Clear the input field
|
||||
input_field.delete(0, tk.END)
|
||||
|
||||
# Add the message to the chat area
|
||||
chat_area.insert(tk.END, message + "\n")
|
||||
|
||||
# Scroll to the bottom of the chat area
|
||||
chat_area.see(tk.END)
|
||||
|
||||
|
||||
# Bind the send button to the send_message function
|
||||
send_button.config(command=send_message)
|
||||
|
||||
# Start the main loop
|
||||
root.mainloop()
|
@ -0,0 +1,58 @@
|
||||
import os
|
||||
import random
|
||||
|
||||
# Create a list of character names
|
||||
character_names = ["Alice", "Bob", "Charlie", "Dave", "Eve"]
|
||||
|
||||
# Create a dictionary of character voices
|
||||
character_voices = {
|
||||
"Alice": "Alice.wav",
|
||||
"Bob": "Bob.wav",
|
||||
"Charlie": "Charlie.wav",
|
||||
"Dave": "Dave.wav",
|
||||
"Eve": "Eve.wav",
|
||||
}
|
||||
|
||||
# Get the user's input
|
||||
conversation_topic = input(
|
||||
"What would you like the characters to talk about? "
|
||||
)
|
||||
|
||||
|
||||
# Create a function to generate a random conversation
|
||||
def generate_conversation(characters, topic):
|
||||
# Choose two random characters to talk
|
||||
character1 = random.choice(characters)
|
||||
character2 = random.choice(characters)
|
||||
|
||||
# Generate the conversation
|
||||
conversation = [
|
||||
(
|
||||
f"{character1}: Hello, {character2}. I'd like to talk"
|
||||
f" about {topic}."
|
||||
),
|
||||
(
|
||||
f"{character2}: Sure, {character1}. What do you want to"
|
||||
" know?"
|
||||
),
|
||||
(
|
||||
f"{character1}: I'm just curious about your thoughts on"
|
||||
" the matter."
|
||||
),
|
||||
f"{character2}: Well, I think it's a very interesting topic.",
|
||||
f"{character1}: I agree. I'm glad we're talking about this.",
|
||||
]
|
||||
|
||||
# Return the conversation
|
||||
return conversation
|
||||
|
||||
|
||||
# Generate the conversation
|
||||
conversation = generate_conversation(
|
||||
character_names, conversation_topic
|
||||
)
|
||||
|
||||
# Play the conversation
|
||||
for line in conversation:
|
||||
print(line)
|
||||
os.system(f"afplay {character_voices[line.split(':')[0]]}")
|
@ -0,0 +1,36 @@
|
||||
import discord
|
||||
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||
|
||||
# Discord Bot Setup
|
||||
client = discord.Client()
|
||||
|
||||
# AI Model Setup
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"facebook/blenderbot-400M-distill"
|
||||
)
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(
|
||||
"facebook/blenderbot-400M-distill"
|
||||
)
|
||||
|
||||
|
||||
@client.event
|
||||
async def on_ready():
|
||||
print(f"Logged in as {client.user.name}")
|
||||
|
||||
|
||||
@client.event
|
||||
async def on_message(message):
|
||||
if message.author == client.user:
|
||||
return
|
||||
|
||||
if message.content.startswith("!generate"):
|
||||
input = message.content[len("!generate") :]
|
||||
inputs = tokenizer(input, return_tensors="pt")
|
||||
outputs = model.generate(**inputs)
|
||||
generated_text = tokenizer.batch_decode(
|
||||
outputs, skip_special_tokens=True
|
||||
)
|
||||
await message.channel.send(generated_text[0])
|
||||
|
||||
|
||||
client.run("YOUR_BOT_TOKEN")
|
@ -0,0 +1,114 @@
|
||||
# OpenMind.bot streamlines social interactions between personalized bots, representing users, media, and influencers, ensuring meaningful exchanges. It eliminates misunderstandings by using context-aware conversations, followed by summaries or audio recaps of these interactions for efficient communication.
|
||||
|
||||
import json
|
||||
import datetime
|
||||
import pytz
|
||||
|
||||
from flask import Flask, request, jsonify
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
|
||||
@app.route("/api/v1/conversations", methods=["POST"])
|
||||
def create_conversation():
|
||||
# Create a new conversation
|
||||
conversation = {
|
||||
"user_id": request.json["user_id"],
|
||||
"bot_id": request.json["bot_id"],
|
||||
"messages": [],
|
||||
}
|
||||
|
||||
# Save the conversation to the database
|
||||
with open("conversations.json", "w") as f:
|
||||
json.dump(conversation, f)
|
||||
|
||||
return jsonify(conversation)
|
||||
|
||||
|
||||
@app.route("/api/v1/conversations/<conversation_id>", methods=["GET"])
|
||||
def get_conversation(conversation_id):
|
||||
# Get the conversation from the database
|
||||
with open("conversations.json", "r") as f:
|
||||
conversation = json.load(f)
|
||||
|
||||
# Return the conversation
|
||||
return jsonify(conversation)
|
||||
|
||||
|
||||
@app.route(
|
||||
"/api/v1/conversations/<conversation_id>/messages",
|
||||
methods=["POST"],
|
||||
)
|
||||
def create_message(conversation_id):
|
||||
# Create a new message
|
||||
message = {
|
||||
"user_id": request.json["user_id"],
|
||||
"bot_id": request.json["bot_id"],
|
||||
"text": request.json["text"],
|
||||
"timestamp": datetime.datetime.now(pytz.utc).isoformat(),
|
||||
}
|
||||
|
||||
# Get the conversation from the database
|
||||
with open("conversations.json", "r") as f:
|
||||
conversation = json.load(f)
|
||||
|
||||
# Add the message to the conversation
|
||||
conversation["messages"].append(message)
|
||||
|
||||
# Save the conversation to the database
|
||||
with open("conversations.json", "w") as f:
|
||||
json.dump(conversation, f)
|
||||
|
||||
return jsonify(message)
|
||||
|
||||
|
||||
@app.route(
|
||||
"/api/v1/conversations/<conversation_id>/messages",
|
||||
methods=["GET"],
|
||||
)
|
||||
def get_messages(conversation_id):
|
||||
# Get the conversation from the database
|
||||
with open("conversations.json", "r") as f:
|
||||
conversation = json.load(f)
|
||||
|
||||
# Return the messages
|
||||
return jsonify(conversation["messages"])
|
||||
|
||||
|
||||
@app.route(
|
||||
"/api/v1/conversations/<conversation_id>/summary", methods=["GET"]
|
||||
)
|
||||
def get_summary(conversation_id):
|
||||
# Get the conversation from the database
|
||||
with open("conversations.json", "r") as f:
|
||||
conversation = json.load(f)
|
||||
|
||||
# Create a summary of the conversation
|
||||
summary = ""
|
||||
for message in conversation["messages"]:
|
||||
summary += message["text"] + "\n"
|
||||
|
||||
# Return the summary
|
||||
return jsonify(summary)
|
||||
|
||||
|
||||
@app.route(
|
||||
"/api/v1/conversations/<conversation_id>/audio_recap",
|
||||
methods=["GET"],
|
||||
)
|
||||
def get_audio_recap(conversation_id):
|
||||
# Get the conversation from the database
|
||||
with open("conversations.json", "r") as f:
|
||||
conversation = json.load(f)
|
||||
|
||||
# Create an audio recap of the conversation
|
||||
audio_recap = ""
|
||||
for message in conversation["messages"]:
|
||||
audio_recap += message["text"] + "\n"
|
||||
|
||||
# Return the audio recap
|
||||
return jsonify(audio_recap)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run()
|
@ -0,0 +1,41 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def arxiv_search(query):
|
||||
"""
|
||||
Performs a semantic search on arxiv.org for the given query.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
|
||||
Returns:
|
||||
A list of search results.
|
||||
"""
|
||||
|
||||
# Make a request to arxiv.org
|
||||
response = requests.get(
|
||||
"http://export.arxiv.org/api/query",
|
||||
params={"search_query": query, "start": 0, "max_results": 10},
|
||||
)
|
||||
|
||||
# Parse the response
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
|
||||
# Extract the search results
|
||||
results = []
|
||||
for result in soup.find_all("entry"):
|
||||
results.append(
|
||||
{
|
||||
"title": result.find("title").text,
|
||||
"author": result.find("author").text,
|
||||
"abstract": result.find("summary").text,
|
||||
"link": result.find("link")["href"],
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
search = arxiv_search("quantum computing")
|
||||
print(search)
|
@ -0,0 +1,178 @@
|
||||
import concurrent
|
||||
import csv
|
||||
import os
|
||||
from swarms import Gemini, Agent, SwarmNetwork, ConcurrentWorkflow
|
||||
from swarms.memory import ChromaDB
|
||||
from dotenv import load_dotenv
|
||||
from swarms.utils.parse_code import extract_code_from_markdown
|
||||
from swarms.utils.file_processing import create_file
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
# Load ENV
|
||||
load_dotenv()
|
||||
|
||||
|
||||
gemini = Gemini(
|
||||
model_name="gemini-pro",
|
||||
gemini_api_key=os.getenv("GEMINI_API_KEY"),
|
||||
)
|
||||
|
||||
# SwarmNetwork
|
||||
swarm_network = SwarmNetwork(
|
||||
logging_enabled=True,
|
||||
)
|
||||
|
||||
|
||||
# ConcurrentWorkflow
|
||||
workflow = ConcurrentWorkflow(
|
||||
task_pool=None,
|
||||
max_workers=10,
|
||||
)
|
||||
|
||||
|
||||
# memory
|
||||
memory = ChromaDB(output_dir="swarm_hackathon")
|
||||
|
||||
|
||||
def execute_concurrently(callable_functions, max_workers=5):
|
||||
"""
|
||||
Executes callable functions concurrently using multithreading.
|
||||
|
||||
Parameters:
|
||||
- callable_functions: A list of tuples, each containing the callable function and its arguments.
|
||||
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
|
||||
- max_workers: The maximum number of threads to use.
|
||||
|
||||
Returns:
|
||||
- results: A list of results returned by the callable functions. If an error occurs in any function,
|
||||
the exception object will be placed at the corresponding index in the list.
|
||||
"""
|
||||
results = [None] * len(callable_functions)
|
||||
|
||||
def worker(fn, args, kwargs, index):
|
||||
try:
|
||||
result = fn(*args, **kwargs)
|
||||
results[index] = result
|
||||
except Exception as e:
|
||||
results[index] = e
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
) as executor:
|
||||
futures = []
|
||||
for i, (fn, args, kwargs) in enumerate(callable_functions):
|
||||
futures.append(
|
||||
executor.submit(worker, fn, args, kwargs, i)
|
||||
)
|
||||
|
||||
# Wait for all threads to complete
|
||||
concurrent.futures.wait(futures)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# # For each row in the dataframe, create an agent and add it to the swarm network
|
||||
# for index, row in df.iterrows():
|
||||
# agent_name = row["Project Name"] + "agent"
|
||||
# system_prompt = row["Lightning Proposal"]
|
||||
# agent = Agent(
|
||||
# llm=gemini,
|
||||
# max_loops="auto",
|
||||
# stopping_token="<DONE>",
|
||||
# system_prompt=system_prompt,
|
||||
# agent_name=agent_name,
|
||||
# long_term_memory=ChromaDB(output_dir="swarm_hackathon"),
|
||||
# )
|
||||
# swarm_network.add_agent(agent)
|
||||
|
||||
# out = swarm_network.list_agents()
|
||||
|
||||
|
||||
# Adjusting the function to extract specific column values
|
||||
def extract_and_create_agents(
|
||||
csv_file_path: str, target_columns: list
|
||||
):
|
||||
"""
|
||||
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
|
||||
creates an Agent for each, and adds it to the swarm network.
|
||||
|
||||
Parameters:
|
||||
- csv_file_path: The path to the CSV file.
|
||||
- target_columns: A list of column names to extract values from.
|
||||
"""
|
||||
agents = []
|
||||
with open(csv_file_path, mode="r", encoding="utf-8") as file:
|
||||
reader = csv.DictReader(file)
|
||||
for row in reader:
|
||||
project_name = row[target_columns[0]]
|
||||
lightning_proposal = row[target_columns[1]]
|
||||
|
||||
# Example of creating and adding an agent based on the project name and lightning proposal
|
||||
agent_name = f"{project_name} agent"
|
||||
print(agent_name) # For demonstration
|
||||
|
||||
# Create the agent
|
||||
logger.info("Creating agent...")
|
||||
agent = Agent(
|
||||
llm=gemini,
|
||||
max_loops=1,
|
||||
stopping_token="<DONE?>",
|
||||
sop=None,
|
||||
system_prompt=(
|
||||
"Transform an app idea into a very simple python"
|
||||
" app in markdown. Return all the python code in"
|
||||
" a single markdown file."
|
||||
),
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Log the agent
|
||||
logger.info(
|
||||
f"Agent created: {agent_name} with long term memory"
|
||||
)
|
||||
agents.append(agent)
|
||||
|
||||
# Create the code for each project
|
||||
output = agent.run(
|
||||
(
|
||||
f"Create the code for the {lightning_proposal} in"
|
||||
" python and wrap it in markdown and return it"
|
||||
),
|
||||
None,
|
||||
)
|
||||
print(output)
|
||||
# Parse the output
|
||||
output = extract_code_from_markdown(output)
|
||||
# Create the file
|
||||
output = create_file(output, f"{project_name}.py")
|
||||
|
||||
# Log the project created
|
||||
logger.info(
|
||||
f"Project {project_name} created: {output} at file"
|
||||
f" path {project_name}.py"
|
||||
)
|
||||
print(output)
|
||||
|
||||
return agents
|
||||
|
||||
|
||||
# Specific columns to extract
|
||||
target_columns = ["Project Name", "Lightning Proposal "]
|
||||
|
||||
# Use the adjusted function
|
||||
specific_column_values = extract_and_create_agents(
|
||||
"text.csv", target_columns
|
||||
)
|
||||
|
||||
# Display the extracted column values
|
||||
print(specific_column_values)
|
||||
|
||||
|
||||
# Concurrently execute the function
|
||||
output = execute_concurrently(
|
||||
[
|
||||
(extract_and_create_agents, ("text.csv", target_columns), {}),
|
||||
],
|
||||
max_workers=5,
|
||||
)
|
||||
print(output)
|
@ -1,46 +0,0 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Import the OpenAIChat model and the Agent struct
|
||||
from swarms import Agent, OpenAIChat, SwarmNetwork
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Get the API key from the environment
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
temperature=0.5,
|
||||
openai_api_key=api_key,
|
||||
)
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
|
||||
agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
|
||||
agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
|
||||
|
||||
|
||||
# Load the swarmnet with the agents
|
||||
swarmnet = SwarmNetwork(
|
||||
agents=[agent, agent2, agent3],
|
||||
)
|
||||
|
||||
# List the agents in the swarm network
|
||||
out = swarmnet.list_agents()
|
||||
print(out)
|
||||
|
||||
# Run the workflow on a task
|
||||
out = swarmnet.run_single_agent(
|
||||
agent2.id, "Generate a 10,000 word blog on health and wellness."
|
||||
)
|
||||
print(out)
|
||||
|
||||
|
||||
# Run all the agents in the swarm network on a task
|
||||
out = swarmnet.run_many_agents(
|
||||
"Generate a 10,000 word blog on health and wellness."
|
||||
)
|
||||
print(out)
|
@ -1,11 +1,14 @@
|
||||
import pandas as pd
|
||||
|
||||
from swarms import dataframe_to_text
|
||||
|
||||
# # Example usage:
|
||||
df = pd.DataFrame({
|
||||
'A': [1, 2, 3],
|
||||
'B': [4, 5, 6],
|
||||
'C': [7, 8, 9],
|
||||
})
|
||||
df = pd.DataFrame(
|
||||
{
|
||||
"A": [1, 2, 3],
|
||||
"B": [4, 5, 6],
|
||||
"C": [7, 8, 9],
|
||||
}
|
||||
)
|
||||
|
||||
print(dataframe_to_text(df))
|
||||
|
@ -0,0 +1,73 @@
|
||||
# Import the OpenAIChat model and the Agent struct
|
||||
import os
|
||||
from swarms import (
|
||||
Agent,
|
||||
OpenAIChat,
|
||||
SwarmNetwork,
|
||||
Anthropic,
|
||||
TogetherLLM,
|
||||
)
|
||||
from swarms.memory import ChromaDB
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the ChromaDB
|
||||
memory = ChromaDB()
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
temperature=0.5,
|
||||
)
|
||||
|
||||
# Initialize the Anthropic
|
||||
anthropic = Anthropic(max_tokens=3000)
|
||||
|
||||
# TogeterLM
|
||||
together_llm = TogetherLLM(
|
||||
together_api_key=os.getenv("TOGETHER_API_KEY"), max_tokens=3000
|
||||
)
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(
|
||||
llm=anthropic,
|
||||
max_loops=1,
|
||||
agent_name="Social Media Manager",
|
||||
long_term_memory=memory,
|
||||
)
|
||||
agent2 = Agent(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
agent_name=" Product Manager",
|
||||
long_term_memory=memory,
|
||||
)
|
||||
agent3 = Agent(
|
||||
llm=together_llm,
|
||||
max_loops=1,
|
||||
agent_name="SEO Manager",
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
|
||||
# Load the swarmnet with the agents
|
||||
swarmnet = SwarmNetwork(
|
||||
agents=[agent, agent2, agent3], logging_enabled=True
|
||||
)
|
||||
|
||||
# List the agents in the swarm network
|
||||
out = swarmnet.list_agents()
|
||||
print(out)
|
||||
|
||||
# Run the workflow on a task
|
||||
out = swarmnet.run_single_agent(
|
||||
agent2.id, "Generate a 10,000 word blog on health and wellness."
|
||||
)
|
||||
print(out)
|
||||
|
||||
|
||||
# # Run all the agents in the swarm network on a task
|
||||
# out = swarmnet.run_many_agents(
|
||||
# f"Summarize the blog and create a social media post: {out}"
|
||||
# )
|
||||
# print(out)
|
@ -0,0 +1,38 @@
|
||||
import concurrent
|
||||
|
||||
|
||||
def execute_concurrently(callable_functions, max_workers=5):
|
||||
"""
|
||||
Executes callable functions concurrently using multithreading.
|
||||
|
||||
Parameters:
|
||||
- callable_functions: A list of tuples, each containing the callable function and its arguments.
|
||||
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
|
||||
- max_workers: The maximum number of threads to use.
|
||||
|
||||
Returns:
|
||||
- results: A list of results returned by the callable functions. If an error occurs in any function,
|
||||
the exception object will be placed at the corresponding index in the list.
|
||||
"""
|
||||
results = [None] * len(callable_functions)
|
||||
|
||||
def worker(fn, args, kwargs, index):
|
||||
try:
|
||||
result = fn(*args, **kwargs)
|
||||
results[index] = result
|
||||
except Exception as e:
|
||||
results[index] = e
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
) as executor:
|
||||
futures = []
|
||||
for i, (fn, args, kwargs) in enumerate(callable_functions):
|
||||
futures.append(
|
||||
executor.submit(worker, fn, args, kwargs, i)
|
||||
)
|
||||
|
||||
# Wait for all threads to complete
|
||||
concurrent.futures.wait(futures)
|
||||
|
||||
return results
|
@ -0,0 +1,34 @@
|
||||
import pandas as pd
|
||||
|
||||
|
||||
# CSV to dataframe
|
||||
def csv_to_dataframe(file_path):
|
||||
"""
|
||||
Read a CSV file and return a pandas DataFrame.
|
||||
|
||||
Parameters:
|
||||
file_path (str): The path to the CSV file.
|
||||
|
||||
Returns:
|
||||
pandas.DataFrame: The DataFrame containing the data from the CSV file.
|
||||
"""
|
||||
df = pd.read_csv(file_path)
|
||||
return df
|
||||
|
||||
|
||||
# Dataframe to strings
|
||||
def dataframe_to_strings(df):
|
||||
"""
|
||||
Converts a pandas DataFrame to a list of string representations of each row.
|
||||
|
||||
Args:
|
||||
df (pandas.DataFrame): The DataFrame to convert.
|
||||
|
||||
Returns:
|
||||
list: A list of string representations of each row in the DataFrame.
|
||||
"""
|
||||
row_strings = []
|
||||
for index, row in df.iterrows():
|
||||
row_string = row.to_string()
|
||||
row_strings.append(row_string)
|
||||
return row_strings
|
@ -0,0 +1,92 @@
|
||||
import logging
|
||||
import tempfile
|
||||
import shutil
|
||||
import os
|
||||
import re
|
||||
from swarms.utils.parse_code import extract_code_from_markdown
|
||||
import json
|
||||
|
||||
|
||||
def zip_workspace(workspace_path: str, output_filename: str):
|
||||
"""
|
||||
Zips the specified workspace directory and returns the path to the zipped file.
|
||||
Ensure the output_filename does not have .zip extension as it's added by make_archive.
|
||||
"""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
# Remove .zip if present in output_filename to avoid duplication
|
||||
base_output_path = os.path.join(
|
||||
temp_dir, output_filename.replace(".zip", "")
|
||||
)
|
||||
zip_path = shutil.make_archive(
|
||||
base_output_path, "zip", workspace_path
|
||||
)
|
||||
return zip_path # make_archive already appends .zip
|
||||
|
||||
|
||||
def sanitize_file_path(file_path: str):
|
||||
"""
|
||||
Cleans and sanitizes the file path to be valid for Windows.
|
||||
"""
|
||||
sanitized_path = file_path.replace("`", "").strip()
|
||||
# Replace any invalid characters here with an underscore or remove them
|
||||
sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path)
|
||||
return sanitized_path
|
||||
|
||||
|
||||
def parse_tagged_output(output, workspace_path: str):
|
||||
"""
|
||||
Parses tagged output and saves files to the workspace directory.
|
||||
Adds logging for each step of the process.
|
||||
"""
|
||||
pattern = r"<!--START_FILE_PATH-->(.*?)<!--END_FILE_PATH-->(.*?)<!--START_CONTENT-->(.*?)<!--END_CONTENT-->"
|
||||
files = re.findall(pattern, output, re.DOTALL)
|
||||
if not files:
|
||||
logging.error("No files found in the output to parse.")
|
||||
return
|
||||
|
||||
for file_path, _, content in files:
|
||||
sanitized_path = sanitize_file_path(file_path)
|
||||
content = extract_code_from_markdown(
|
||||
content
|
||||
) # Remove code block markers
|
||||
full_path = os.path.join(workspace_path, sanitized_path)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
||||
with open(full_path, "w") as file:
|
||||
file.write(content.strip())
|
||||
logging.info(f"File saved: {full_path}")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Failed to save file {sanitized_path}: {e}"
|
||||
)
|
||||
|
||||
|
||||
def load_json(json_string: str):
|
||||
"""
|
||||
Loads a JSON string and returns the corresponding Python object.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to be loaded.
|
||||
|
||||
Returns:
|
||||
object: The Python object representing the JSON data.
|
||||
"""
|
||||
json_data = json.loads(json_string)
|
||||
return json_data
|
||||
|
||||
|
||||
# Create file that
|
||||
def create_file(
|
||||
content: str,
|
||||
file_path: str,
|
||||
):
|
||||
"""
|
||||
Creates a file with the specified content at the specified file path.
|
||||
|
||||
Args:
|
||||
content (str): The content to be written to the file.
|
||||
file_path (str): The path to the file to be created.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
return file_path
|
Loading…
Reference in new issue