parent
75938ae274
commit
f69c25a454
@ -1,76 +0,0 @@
|
|||||||
import os
|
|
||||||
from typing import Any, Dict, Optional
|
|
||||||
|
|
||||||
from autogen import ConversableAgent
|
|
||||||
from loguru import logger
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
|
|
||||||
class AutogenAgentWrapper(Agent):
|
|
||||||
"""
|
|
||||||
Wrapper class for the ConversableAgent that provides additional functionality.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
llm_config: Dict[str, Any],
|
|
||||||
*args: Any,
|
|
||||||
**kwargs: Any,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize the AutogenAgentWrapper.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): The name of the agent.
|
|
||||||
llm_config (Dict[str, Any]): The configuration for the ConversableAgent.
|
|
||||||
*args: Additional positional arguments.
|
|
||||||
**kwargs: Additional keyword arguments.
|
|
||||||
"""
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.name = name
|
|
||||||
self.autogen_agent = ConversableAgent(
|
|
||||||
name=name,
|
|
||||||
llm_config=llm_config,
|
|
||||||
code_execution_config=False,
|
|
||||||
function_map=None,
|
|
||||||
human_input_mode="NEVER",
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(
|
|
||||||
self, task: str, *args: Any, **kwargs: Any
|
|
||||||
) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Run the AutogenAgentWrapper.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed by the agent.
|
|
||||||
*args: Additional positional arguments.
|
|
||||||
**kwargs: Additional keyword arguments.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Optional[str]: The response generated by the agent, or None if an error occurred.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
messages = [{"content": task, "role": "user"}]
|
|
||||||
response = self.autogen_agent.generate_reply(messages)
|
|
||||||
logger.info("Task: %s, Response: %s", task, response)
|
|
||||||
return response
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("An error occurred: %s", str(e))
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
llm_config = {
|
|
||||||
"config_list": [
|
|
||||||
{
|
|
||||||
"model": "gpt-4",
|
|
||||||
"api_key": os.environ.get("OPENAI_API_KEY"),
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
autogen_wrapper = AutogenAgentWrapper("AutogenAssistant", llm_config)
|
|
||||||
result = autogen_wrapper.run("Tell me a joke about programming.")
|
|
||||||
print(result)
|
|
@ -1,95 +0,0 @@
|
|||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
from crewai import Agent as CrewAIAgent
|
|
||||||
from crewai import Crew, Process, Task
|
|
||||||
from crewai_tools import SerperDevTool
|
|
||||||
from loguru import logger
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
|
|
||||||
class CrewAIAgentWrapper(Agent):
|
|
||||||
"""
|
|
||||||
Initialize the CrewAIAgentWrapper.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): The name of the agent.
|
|
||||||
role (str): The role of the agent.
|
|
||||||
goal (str): The goal of the agent.
|
|
||||||
backstory (str): The backstory of the agent.
|
|
||||||
tools (Optional[List]): The tools used by the agent (default: None).
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
role: str,
|
|
||||||
goal: str,
|
|
||||||
backstory: str,
|
|
||||||
tools: Optional[List] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.name = name
|
|
||||||
self.crewai_agent = CrewAIAgent(
|
|
||||||
role=role,
|
|
||||||
goal=goal,
|
|
||||||
backstory=backstory,
|
|
||||||
verbose=True,
|
|
||||||
allow_delegation=False,
|
|
||||||
tools=tools or [],
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Run the agent's task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed by the agent.
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Any: The result of the task execution.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
crew_task = Task(
|
|
||||||
description=task,
|
|
||||||
agent=self.crewai_agent,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
crew = Crew(
|
|
||||||
agents=[self.crewai_agent],
|
|
||||||
tasks=[crew_task],
|
|
||||||
process=Process.sequential,
|
|
||||||
)
|
|
||||||
result = crew.kickoff()
|
|
||||||
return result
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"An error occurred: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# Usage example
|
|
||||||
search_tool = SerperDevTool()
|
|
||||||
|
|
||||||
crewai_wrapper = CrewAIAgentWrapper(
|
|
||||||
name="ResearchAnalyst",
|
|
||||||
role="Senior Research Analyst",
|
|
||||||
goal="Uncover cutting-edge developments in AI and data science",
|
|
||||||
backstory="""You work at a leading tech think tank.
|
|
||||||
Your expertise lies in identifying emerging trends.
|
|
||||||
You have a knack for dissecting complex data and presenting actionable insights.""",
|
|
||||||
tools=[search_tool],
|
|
||||||
)
|
|
||||||
|
|
||||||
result = crewai_wrapper.run(
|
|
||||||
"Analyze the latest trends in quantum computing and summarize the key findings."
|
|
||||||
)
|
|
||||||
print(result)
|
|
@ -1,69 +0,0 @@
|
|||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
from griptape.structures import Agent as GriptapeAgent
|
|
||||||
from griptape.tools import FileManager, TaskMemoryClient, WebScraper
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
|
|
||||||
class GriptapeAgentWrapper(Agent):
|
|
||||||
"""
|
|
||||||
A wrapper class for the GriptapeAgent from the griptape library.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, name: str, tools: Optional[List] = None, *args, **kwargs
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize the GriptapeAgentWrapper.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- name: The name of the agent.
|
|
||||||
- tools: A list of tools to be used by the agent. If not provided, default tools will be used.
|
|
||||||
- *args, **kwargs: Additional arguments to be passed to the parent class constructor.
|
|
||||||
"""
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.name = name
|
|
||||||
self.tools = tools or [
|
|
||||||
WebScraper(off_prompt=True),
|
|
||||||
TaskMemoryClient(off_prompt=True),
|
|
||||||
FileManager(),
|
|
||||||
]
|
|
||||||
self.griptape_agent = GriptapeAgent(
|
|
||||||
input=f"I am {name}, an AI assistant. How can I help you?",
|
|
||||||
tools=self.tools,
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs) -> str:
|
|
||||||
"""
|
|
||||||
Run a task using the GriptapeAgent.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- task: The task to be performed by the agent.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
- The response from the GriptapeAgent as a string.
|
|
||||||
"""
|
|
||||||
response = self.griptape_agent.run(task, *args, **kwargs)
|
|
||||||
return str(response)
|
|
||||||
|
|
||||||
def add_tool(self, tool) -> None:
|
|
||||||
"""
|
|
||||||
Add a tool to the agent.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- tool: The tool to be added.
|
|
||||||
"""
|
|
||||||
self.tools.append(tool)
|
|
||||||
self.griptape_agent = GriptapeAgent(
|
|
||||||
input=f"I am {self.name}, an AI assistant. How can I help you?",
|
|
||||||
tools=self.tools,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Usage example
|
|
||||||
griptape_wrapper = GriptapeAgentWrapper("GriptapeAssistant")
|
|
||||||
result = griptape_wrapper.run(
|
|
||||||
"Load https://example.com, summarize it, and store it in a file called example_summary.txt."
|
|
||||||
)
|
|
||||||
print(result)
|
|
@ -1,45 +0,0 @@
|
|||||||
from swarms import (
|
|
||||||
Agent as SwarmsAgent,
|
|
||||||
) # Import the base Agent class from Swarms
|
|
||||||
from griptape.structures import Agent as GriptapeAgent
|
|
||||||
from griptape.tools import (
|
|
||||||
WebScraperTool,
|
|
||||||
FileManagerTool,
|
|
||||||
PromptSummaryTool,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Create a custom agent class that inherits from SwarmsAgent
|
|
||||||
class GriptapeSwarmsAgent(SwarmsAgent):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
# Initialize the Griptape agent with its tools
|
|
||||||
self.agent = GriptapeAgent(
|
|
||||||
input="Load {{ args[0] }}, summarize it, and store it in a file called {{ args[1] }}.",
|
|
||||||
tools=[
|
|
||||||
WebScraperTool(off_prompt=True),
|
|
||||||
PromptSummaryTool(off_prompt=True),
|
|
||||||
FileManagerTool(),
|
|
||||||
],
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
# Add additional settings
|
|
||||||
)
|
|
||||||
|
|
||||||
# Override the run method to take a task and execute it using the Griptape agent
|
|
||||||
def run(self, task: str) -> str:
|
|
||||||
# Extract URL and filename from task (you can modify this parsing based on task structure)
|
|
||||||
url, filename = task.split(
|
|
||||||
","
|
|
||||||
) # Example of splitting task string
|
|
||||||
# Execute the Griptape agent with the task inputs
|
|
||||||
result = self.agent.run(url.strip(), filename.strip())
|
|
||||||
# Return the final result as a string
|
|
||||||
return str(result)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
griptape_swarms_agent = GriptapeSwarmsAgent()
|
|
||||||
output = griptape_swarms_agent.run(
|
|
||||||
"https://griptape.ai, griptape.txt"
|
|
||||||
)
|
|
||||||
print(output)
|
|
@ -1,82 +0,0 @@
|
|||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
from langchain.agents import AgentExecutor, LLMSingleActionAgent, Tool
|
|
||||||
from langchain.chains import LLMChain
|
|
||||||
from langchain_community.llms import OpenAI
|
|
||||||
from langchain.prompts import StringPromptTemplate
|
|
||||||
from langchain.tools import DuckDuckGoSearchRun
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
|
|
||||||
class LangchainAgentWrapper(Agent):
|
|
||||||
"""
|
|
||||||
Initialize the LangchainAgentWrapper.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): The name of the agent.
|
|
||||||
tools (List[Tool]): The list of tools available to the agent.
|
|
||||||
llm (Optional[OpenAI], optional): The OpenAI language model to use. Defaults to None.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
tools: List[Tool],
|
|
||||||
llm: Optional[OpenAI] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.name = name
|
|
||||||
self.tools = tools
|
|
||||||
self.llm = llm or OpenAI(temperature=0)
|
|
||||||
|
|
||||||
prompt = StringPromptTemplate.from_template(
|
|
||||||
"You are {name}, an AI assistant. Answer the following question: {question}"
|
|
||||||
)
|
|
||||||
|
|
||||||
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
|
|
||||||
tool_names = [tool.name for tool in self.tools]
|
|
||||||
|
|
||||||
self.agent = LLMSingleActionAgent(
|
|
||||||
llm_chain=llm_chain,
|
|
||||||
output_parser=None,
|
|
||||||
stop=["\nObservation:"],
|
|
||||||
allowed_tools=tool_names,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.agent_executor = AgentExecutor.from_agent_and_tools(
|
|
||||||
agent=self.agent, tools=self.tools, verbose=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Run the agent with the given task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed by the agent.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Any: The result of the agent's execution.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return self.agent_executor.run(task)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
# Usage example
|
|
||||||
|
|
||||||
search_tool = DuckDuckGoSearchRun()
|
|
||||||
tools = [
|
|
||||||
Tool(
|
|
||||||
name="Search",
|
|
||||||
func=search_tool.run,
|
|
||||||
description="Useful for searching the internet",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
langchain_wrapper = LangchainAgentWrapper("LangchainAssistant", tools)
|
|
||||||
result = langchain_wrapper.run("What is the capital of France?")
|
|
||||||
print(result)
|
|
@ -1,135 +0,0 @@
|
|||||||
from typing import Any, Dict, List, Optional
|
|
||||||
|
|
||||||
from langchain import hub
|
|
||||||
from langchain.agents import AgentExecutor, create_openai_tools_agent
|
|
||||||
from langchain_community.tools.tavily_search import (
|
|
||||||
TavilySearchResults,
|
|
||||||
)
|
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
from loguru import logger
|
|
||||||
|
|
||||||
|
|
||||||
class LangchainAgent:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
tavily_api_key: str,
|
|
||||||
llm_model: str = "gpt-3.5-turbo",
|
|
||||||
temperature: float = 0.7,
|
|
||||||
tavily_max_results: int = 1,
|
|
||||||
prompt_hub_url: str = "hwchase17/openai-tools-agent",
|
|
||||||
verbose: bool = True,
|
|
||||||
log_file: Optional[str] = None,
|
|
||||||
openai_api_key: Optional[str] = None,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Initializes the LangchainAgent with given tools and parameters.
|
|
||||||
|
|
||||||
:param tavily_api_key: The API key for the Tavily search tool.
|
|
||||||
:param llm_model: The OpenAI language model to be used (default: "gpt-3.5-turbo").
|
|
||||||
:param temperature: Temperature for the language model (default: 0.7).
|
|
||||||
:param tavily_max_results: Maximum results for the Tavily search (default: 1).
|
|
||||||
:param prompt_hub_url: URL of the prompt hub to fetch the agent prompt (default: "hwchase17/openai-tools-agent").
|
|
||||||
:param verbose: If True, the agent will print detailed logs (default: True).
|
|
||||||
:param log_file: Optional log file to store logs using Loguru.
|
|
||||||
:param openai_api_key: Optional OpenAI API key for connecting to OpenAI services.
|
|
||||||
"""
|
|
||||||
# Setup Loguru for logging
|
|
||||||
if log_file:
|
|
||||||
logger.add(log_file, rotation="500 MB")
|
|
||||||
|
|
||||||
# Log initialization
|
|
||||||
logger.info(
|
|
||||||
"Initializing LangchainAgent with model: {}, temperature: {}",
|
|
||||||
llm_model,
|
|
||||||
temperature,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set up Tavily Search tool
|
|
||||||
logger.info(
|
|
||||||
"Setting up Tavily Search with max_results: {}",
|
|
||||||
tavily_max_results,
|
|
||||||
)
|
|
||||||
self.tavily_search = TavilySearchResults(
|
|
||||||
api_key=tavily_api_key, max_results=tavily_max_results
|
|
||||||
)
|
|
||||||
|
|
||||||
# Tools list (can be expanded)
|
|
||||||
self.tools = [self.tavily_search]
|
|
||||||
|
|
||||||
# Initialize the LLM (OpenAI Chat model)
|
|
||||||
logger.info("Initializing OpenAI model: {}", llm_model)
|
|
||||||
self.llm = ChatOpenAI(
|
|
||||||
model=llm_model,
|
|
||||||
temperature=temperature,
|
|
||||||
openai_api_key=openai_api_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Fetch the prompt template from LangChain hub
|
|
||||||
logger.info(
|
|
||||||
"Fetching prompt template from {}", prompt_hub_url
|
|
||||||
)
|
|
||||||
self.prompt = hub.pull(prompt_hub_url)
|
|
||||||
|
|
||||||
# Create the OpenAI Tools agent
|
|
||||||
logger.info(
|
|
||||||
"Creating OpenAI Tools agent with fetched prompt and LLM."
|
|
||||||
)
|
|
||||||
self.agent = create_openai_tools_agent(
|
|
||||||
self.llm, self.tools, self.prompt
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create AgentExecutor with the agent and tools
|
|
||||||
logger.info(
|
|
||||||
"Setting up AgentExecutor with verbose: {}", verbose
|
|
||||||
)
|
|
||||||
self.agent_executor = AgentExecutor(
|
|
||||||
agent=self.agent, tools=self.tools, verbose=verbose
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(
|
|
||||||
self,
|
|
||||||
task: str,
|
|
||||||
chat_history: Optional[List[Dict[str, str]]] = None,
|
|
||||||
) -> str:
|
|
||||||
"""
|
|
||||||
Run the LangchainAgent with a specific task.
|
|
||||||
|
|
||||||
:param task: The task (query) for the agent to handle.
|
|
||||||
:param chat_history: Optional previous chat history for context (default: None).
|
|
||||||
:return: The result of the task.
|
|
||||||
"""
|
|
||||||
logger.info("Running agent with task: {}", task)
|
|
||||||
|
|
||||||
# Create input for agent execution
|
|
||||||
input_data: Dict[str, Any] = {"input": task}
|
|
||||||
if chat_history:
|
|
||||||
logger.info("Passing chat history for context.")
|
|
||||||
input_data["chat_history"] = chat_history
|
|
||||||
|
|
||||||
# Invoke the agent
|
|
||||||
logger.info("Invoking the agent executor.")
|
|
||||||
result = self.agent_executor.invoke(input_data)
|
|
||||||
|
|
||||||
# Log the result
|
|
||||||
logger.info(
|
|
||||||
"Task executed successfully. Result: {}", result["output"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Return the output from the agent
|
|
||||||
# return result["output"]
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
# # Example usage:
|
|
||||||
# agent = LangchainAgent(
|
|
||||||
# tavily_api_key="your_tavily_api_key",
|
|
||||||
# llm_model="gpt-3.5-turbo",
|
|
||||||
# temperature=0.5,
|
|
||||||
# tavily_max_results=3,
|
|
||||||
# prompt_hub_url="your-prompt-url",
|
|
||||||
# verbose=True,
|
|
||||||
# log_file="agent.log",
|
|
||||||
# openai_api_key="your_openai_api_key"
|
|
||||||
# )
|
|
||||||
# result = agent.run("What is LangChain?")
|
|
||||||
# print(result)
|
|
@ -1,49 +0,0 @@
|
|||||||
import timeit
|
|
||||||
|
|
||||||
from swarms import Agent, ConcurrentWorkflow, Task
|
|
||||||
from swarms.agents.multion_agent import MultiOnAgent
|
|
||||||
|
|
||||||
# model
|
|
||||||
model = MultiOnAgent(multion_api_key="api-key")
|
|
||||||
|
|
||||||
|
|
||||||
# out = model.run("search for a recipe")
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="MultiOnAgent",
|
|
||||||
description="A multi-on agent that performs browsing tasks.",
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
system_prompt=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
# logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully")
|
|
||||||
|
|
||||||
# Task
|
|
||||||
task = Task(
|
|
||||||
agent=agent,
|
|
||||||
description="Download https://www.coachcamel.com/",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Swarm
|
|
||||||
# logger.info(
|
|
||||||
# f"Running concurrent workflow with task: {task.description}"
|
|
||||||
# )
|
|
||||||
|
|
||||||
# Measure execution time
|
|
||||||
start_time = timeit.default_timer()
|
|
||||||
|
|
||||||
workflow = ConcurrentWorkflow(
|
|
||||||
max_workers=20,
|
|
||||||
autosave=True,
|
|
||||||
print_results=True,
|
|
||||||
return_results=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add task to workflow
|
|
||||||
workflow.add(task)
|
|
||||||
workflow.run()
|
|
||||||
|
|
||||||
# Calculate execution time
|
|
||||||
execution_time = timeit.default_timer() - start_time
|
|
||||||
# logger.info(f"Execution time: {execution_time} seconds")
|
|
||||||
print(f"Execution time: {execution_time} seconds")
|
|
@ -1,89 +0,0 @@
|
|||||||
import os
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent-General-11",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=False,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=3,
|
|
||||||
context_length=200000,
|
|
||||||
tool_system_prompt=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
# # Convert the agent object to a dictionary
|
|
||||||
print(agent.to_dict())
|
|
||||||
print(agent.to_toml())
|
|
||||||
print(agent.model_dump_json())
|
|
||||||
print(agent.model_dump_yaml())
|
|
||||||
|
|
||||||
# Ingest documents into the agent's knowledge base
|
|
||||||
agent.ingest_docs("your_pdf_path.pdf")
|
|
||||||
|
|
||||||
# Receive a message from a user and process it
|
|
||||||
agent.receive_message(name="agent_name", message="message")
|
|
||||||
|
|
||||||
# Send a message from the agent to a user
|
|
||||||
agent.send_agent_message(agent_name="agent_name", message="message")
|
|
||||||
|
|
||||||
# Ingest multiple documents into the agent's knowledge base
|
|
||||||
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
|
|
||||||
|
|
||||||
# Run the agent with a filtered system prompt
|
|
||||||
agent.filtered_run(
|
|
||||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent with multiple system prompts
|
|
||||||
agent.bulk_run(
|
|
||||||
[
|
|
||||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
|
|
||||||
"Another system prompt",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add a memory to the agent
|
|
||||||
agent.add_memory("Add a memory to the agent")
|
|
||||||
|
|
||||||
# Check the number of available tokens for the agent
|
|
||||||
agent.check_available_tokens()
|
|
||||||
|
|
||||||
# Perform token checks for the agent
|
|
||||||
agent.tokens_checks()
|
|
||||||
|
|
||||||
# Print the dashboard of the agent
|
|
||||||
agent.print_dashboard()
|
|
||||||
|
|
||||||
# Fetch all the documents from the doc folders
|
|
||||||
agent.get_docs_from_doc_folders()
|
|
||||||
|
|
||||||
# Activate agent ops
|
|
||||||
agent.activate_agentops()
|
|
||||||
agent.check_end_session_agentops()
|
|
||||||
|
|
||||||
# Dump the model to a JSON file
|
|
||||||
agent.model_dump_json()
|
|
||||||
print(agent.to_toml())
|
|
||||||
|
|
||||||
# Print all of the output metadata of the agent
|
|
||||||
print(agent.agent_output.model_dump())
|
|
||||||
print(agent.agent_output.model_dump_json())
|
|
@ -1,34 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from loguru import logger
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
from swarms.agents.create_agents_from_yaml import (
|
|
||||||
create_agents_from_yaml,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Path to your YAML file
|
|
||||||
yaml_file = "agents.yaml"
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Create agents and run tasks (using 'both' to return agents and task results)
|
|
||||||
task_results = create_agents_from_yaml(
|
|
||||||
model=model, yaml_file=yaml_file, return_type="tasks"
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"Results from agents: {task_results}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"An error occurred: {e}")
|
|
@ -1,15 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent("Find a chick fil a equivalent in hayes valley")
|
|
@ -1,44 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Import the OpenAIChat model and the Agent struct
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
|
|
||||||
# Initilaize the chromadb client
|
|
||||||
chromadb = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
output_dir="scp",
|
|
||||||
docs_folder="artifacts",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.5,
|
|
||||||
openai_api_key=api_key,
|
|
||||||
max_tokens=1000,
|
|
||||||
)
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
name="Health and Wellness Blog",
|
|
||||||
system_prompt="Generate a 10,000 word blog on health and wellness.",
|
|
||||||
max_loops=4,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=True,
|
|
||||||
long_term_memory=[chromadb],
|
|
||||||
memory_chunk_size=300,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent.run("Generate a 10,000 word blog on health and wellness.")
|
|
@ -1,58 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import Anthropic
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
from swarms.utils.data_to_text import data_to_text
|
|
||||||
|
|
||||||
# Initilaize the chromadb client
|
|
||||||
chromadb = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
output_dir="fiance_agent_rag",
|
|
||||||
# docs_folder="artifacts", # Folder of your documents
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
agent_description="Agent creates ",
|
|
||||||
llm=Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")),
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
# dynamic_temperature_enabled=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
contract = data_to_text("your_contract_pdf.pdf")
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
f"Analyze the following contract and give me a full summary: {contract}"
|
|
||||||
)
|
|
@ -1,41 +0,0 @@
|
|||||||
import os
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_key=api_key,
|
|
||||||
model_name="o1-preview",
|
|
||||||
temperature=0.1,
|
|
||||||
max_tokens=100,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=2,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
return_step_meta=False,
|
|
||||||
# output_type="json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
out = agent.run(
|
|
||||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,54 +0,0 @@
|
|||||||
from dataclasses import dataclass
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from swarms import JSON, BaseLLM, BaseVectorDatabase, Agent
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class YourAgent(Agent):
|
|
||||||
"""
|
|
||||||
Represents an agent in the swarm protocol.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
llm (BaseLLM): The low-level module for the agent.
|
|
||||||
long_term_memory (BaseVectorDatabase): The long-term memory for the agent.
|
|
||||||
tool_schema (List[JSON]): The schema for the tools used by the agent.
|
|
||||||
"""
|
|
||||||
|
|
||||||
llm: BaseLLM
|
|
||||||
long_term_memory: BaseVectorDatabase
|
|
||||||
tool_schema: JSON
|
|
||||||
tool_schemas: List[JSON]
|
|
||||||
|
|
||||||
def step(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Performs a single step in the agent's task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed.
|
|
||||||
*args: Additional positional arguments.
|
|
||||||
**kwargs: Additional keyword arguments.
|
|
||||||
"""
|
|
||||||
...
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Runs the agent's task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed.
|
|
||||||
*args: Additional positional arguments.
|
|
||||||
**kwargs: Additional keyword arguments.
|
|
||||||
"""
|
|
||||||
...
|
|
||||||
|
|
||||||
def plan(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Plans the agent's task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed.
|
|
||||||
*args: Additional positional arguments.
|
|
||||||
**kwargs: Additional keyword arguments.
|
|
||||||
"""
|
|
||||||
...
|
|
@ -1,49 +0,0 @@
|
|||||||
from swarms import Agent, HuggingfaceLLM
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
model = HuggingfaceLLM(
|
|
||||||
model_id="meta-llama/Meta-Llama-3.1-8B",
|
|
||||||
max_tokens=4000,
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
# dynamic_temperature_enabled=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# tool_schema=
|
|
||||||
# tools
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
"What are the components of a startups stock incentive equity plan"
|
|
||||||
)
|
|
@ -1,86 +0,0 @@
|
|||||||
"""
|
|
||||||
* WORKING
|
|
||||||
|
|
||||||
What this script does:
|
|
||||||
Multi-Agent run to test AgentOps (https://www.agentops.ai/)
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
1. Create an account on https://www.agentops.ai/ and run pip install agentops
|
|
||||||
2. Add the folowing API key(s) in your .env file:
|
|
||||||
- OPENAI_API_KEY
|
|
||||||
- AGENTOPS_API_KEY
|
|
||||||
3. Go to your agentops dashboard to observe your activity
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
################ Adding project root to PYTHONPATH ################################
|
|
||||||
# If you are running examples examples in the project files directly, use this:
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
sys.path.insert(0, os.getcwd())
|
|
||||||
|
|
||||||
################ Adding project root to PYTHONPATH ################################
|
|
||||||
|
|
||||||
from swarms import Agent, OpenAIChat, AgentRearrange
|
|
||||||
|
|
||||||
Treasurer = Agent(
|
|
||||||
agent_name="Treasurer",
|
|
||||||
system_prompt="Give your opinion on the cash management.",
|
|
||||||
agent_description=(
|
|
||||||
"responsible for managing an organization's financial assets and liquidity. They oversee cash management, "
|
|
||||||
"investment strategies, and financial risk. Key duties include monitoring cash flow, managing bank relationships, "
|
|
||||||
"ensuring sufficient funds for operations, and optimizing returns on short-term investments. Treasurers also often "
|
|
||||||
"handle debt management and may be involved in capital raising activities."
|
|
||||||
),
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops=1,
|
|
||||||
agent_ops_on=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
CFO = Agent(
|
|
||||||
agent_name="CFO",
|
|
||||||
system_prompt="Give your opinion on the financial performance of the company.",
|
|
||||||
agent_description=(
|
|
||||||
"the top financial executive in an organization, overseeing all financial operations and strategy. Their role is broader than a treasurer's and includes:\n"
|
|
||||||
"Financial planning and analysis\n"
|
|
||||||
"Accounting and financial reporting\n"
|
|
||||||
"Budgeting and forecasting\n"
|
|
||||||
"Strategic financial decision-making\n"
|
|
||||||
"Compliance and risk management\n"
|
|
||||||
"Investor relations (in public companies)\n"
|
|
||||||
"Overseeing the finance and accounting departments"
|
|
||||||
),
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops=1,
|
|
||||||
agent_ops_on=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
swarm = AgentRearrange(
|
|
||||||
agents=[Treasurer, CFO],
|
|
||||||
flow="Treasurer -> CFO",
|
|
||||||
)
|
|
||||||
|
|
||||||
results = swarm.run(
|
|
||||||
"Date,Revenue,Expenses,Profit,Cash_Flow,Inventory,Customer_Acquisition_Cost,Customer_Retention_Rate,Marketing_Spend,R&D_Spend,Debt,Assets\n"
|
|
||||||
"2023-01-01,1000000,800000,200000,150000,500000,100,0.85,50000,100000,2000000,5000000\n"
|
|
||||||
"2023-02-01,1050000,820000,230000,180000,520000,95,0.87,55000,110000,1950000,5100000\n"
|
|
||||||
"2023-03-01,1100000,850000,250000,200000,530000,90,0.88,60000,120000,1900000,5200000\n"
|
|
||||||
"2023-04-01,1200000,900000,300000,250000,550000,85,0.90,70000,130000,1850000,5400000\n"
|
|
||||||
"2023-05-01,1300000,950000,350000,300000,580000,80,0.92,80000,140000,1800000,5600000\n"
|
|
||||||
"2023-06-01,1400000,1000000,400000,350000,600000,75,0.93,90000,150000,1750000,5800000\n"
|
|
||||||
"2023-07-01,1450000,1050000,400000,320000,620000,78,0.91,95000,160000,1700000,5900000\n"
|
|
||||||
"2023-08-01,1500000,1100000,400000,300000,650000,80,0.90,100000,170000,1650000,6000000\n"
|
|
||||||
"2023-09-01,1550000,1150000,400000,280000,680000,82,0.89,105000,180000,1600000,6100000\n"
|
|
||||||
"2023-10-01,1600000,1200000,400000,260000,700000,85,0.88,110000,190000,1550000,6200000\n"
|
|
||||||
"2023-11-01,1650000,1250000,400000,240000,720000,88,0.87,115000,200000,1500000,6300000\n"
|
|
||||||
"2023-12-01,1700000,1300000,400000,220000,750000,90,0.86,120000,210000,1450000,6400000\n"
|
|
||||||
"2024-01-01,1500000,1200000,300000,180000,780000,95,0.84,100000,180000,1500000,6300000\n"
|
|
||||||
"2024-02-01,1550000,1220000,330000,200000,760000,92,0.85,105000,185000,1480000,6350000\n"
|
|
||||||
"2024-03-01,1600000,1240000,360000,220000,740000,89,0.86,110000,190000,1460000,6400000\n"
|
|
||||||
"2024-04-01,1650000,1260000,390000,240000,720000,86,0.87,115000,195000,1440000,6450000\n"
|
|
||||||
"2024-05-01,1700000,1280000,420000,260000,700000,83,0.88,120000,200000,1420000,6500000\n"
|
|
||||||
"2024-06-01,1750000,1300000,450000,280000,680000,80,0.89,125000,205000,1400000,6550000"
|
|
||||||
)
|
|
@ -1,61 +0,0 @@
|
|||||||
"""
|
|
||||||
* WORKING
|
|
||||||
|
|
||||||
What this script does:
|
|
||||||
Simple agent run to test AgentOps to record tool actions (https://www.agentops.ai/)
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
1. Create an account on https://www.agentops.ai/ and run pip install agentops
|
|
||||||
2. Add the folowing API key(s) in your .env file:
|
|
||||||
- OPENAI_API_KEY
|
|
||||||
- AGENTOPS_API_KEY
|
|
||||||
3. Go to your agentops dashboard to observe your activity
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
################ Adding project root to PYTHONPATH ################################
|
|
||||||
# If you are running examples examples in the project files directly, use this:
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
sys.path.insert(0, os.getcwd())
|
|
||||||
|
|
||||||
################ Adding project root to PYTHONPATH ################################
|
|
||||||
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from agentops import record_function
|
|
||||||
|
|
||||||
|
|
||||||
# Add agentops decorator on your tools
|
|
||||||
@record_function("length_checker")
|
|
||||||
def length_checker(string: str) -> int:
|
|
||||||
"""
|
|
||||||
For a given string it returns the length of the string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
string (str): string to check the length of
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
int: length of the string
|
|
||||||
"""
|
|
||||||
return len(string)
|
|
||||||
|
|
||||||
|
|
||||||
agent1 = Agent(
|
|
||||||
agent_name="lengther",
|
|
||||||
system_prompt="return the length of the string",
|
|
||||||
agent_description=(
|
|
||||||
"For a given string it calls the function length_checker to return the length of the string."
|
|
||||||
),
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops=1,
|
|
||||||
agent_ops_on=True,
|
|
||||||
tools=[length_checker],
|
|
||||||
execute_tool=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent1.run("hello")
|
|
@ -1,14 +0,0 @@
|
|||||||
from swarms import Agent, AzureOpenAI
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=AzureOpenAI(),
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent("Understand the risk profile of this account")
|
|
@ -1,31 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models.base_llm import BaseLLM
|
|
||||||
|
|
||||||
|
|
||||||
# Define a custom LLM class
|
|
||||||
class ExampleLLM(BaseLLM):
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs):
|
|
||||||
# Your LLM logic here
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=ExampleLLM(), # Instantiate the ExampleLLM class
|
|
||||||
max_loops="auto", # Set the maximum number of loops to "auto"
|
|
||||||
autosave=True, # Enable autosave feature
|
|
||||||
dashboard=False, # Disable the dashboard
|
|
||||||
streaming_on=True, # Enable streaming
|
|
||||||
verbose=True, # Enable verbose mode
|
|
||||||
stopping_token="<DONE>", # Set the stopping token to "<DONE>"
|
|
||||||
interactive=True, # Enable interactive mode
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent(
|
|
||||||
"Generate a transcript for a youtube video on what swarms are!" # Specify the task
|
|
||||||
" Output a <DONE> token when done." # Specify the stopping condition
|
|
||||||
)
|
|
@ -1,37 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Import the OpenAIChat model and the Agent struct
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.5,
|
|
||||||
model_name="gpt-4",
|
|
||||||
openai_api_key=api_key,
|
|
||||||
max_tokens=4000,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"this is a test msg for stdout and stderr: {sys.stdout},"
|
|
||||||
f" {sys.stderr}"
|
|
||||||
)
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
out = agent.run("Generate a 10,000 word blog on health and wellness.")
|
|
||||||
|
|
||||||
print(out)
|
|
@ -1,40 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
api_key = os.getenv("GROQ_API_KEY")
|
|
||||||
|
|
||||||
# Model
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_base="https://api.groq.com/openai/v1",
|
|
||||||
openai_api_key=api_key,
|
|
||||||
model_name="llama-3.1-70b-versatile",
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=2,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
out = agent.run(
|
|
||||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,27 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Import the OpenAIChat model and the Agent struct
|
|
||||||
from swarms import Agent, HuggingfaceLLM
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = HuggingfaceLLM(model_id="meta-llama/Meta-Llama-3-8B").cuda()
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=True,
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent.run("Generate a 10,000 word blog on health and wellness.")
|
|
@ -1,62 +0,0 @@
|
|||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms import Agent
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the schema for the person's information
|
|
||||||
class Schema(BaseModel):
|
|
||||||
name: str = Field(..., title="Name of the person")
|
|
||||||
agent: int = Field(..., title="Age of the person")
|
|
||||||
is_student: bool = Field(
|
|
||||||
..., title="Whether the person is a student"
|
|
||||||
)
|
|
||||||
courses: list[str] = Field(
|
|
||||||
..., title="List of courses the person is taking"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
tool_schema = Schema(
|
|
||||||
name="Tool Name",
|
|
||||||
agent=1,
|
|
||||||
is_student=True,
|
|
||||||
courses=["Course1", "Course2"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = (
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Person Information Generator",
|
|
||||||
system_prompt=(
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
),
|
|
||||||
# Set the tool schema to the JSON string -- this is the key difference
|
|
||||||
# tool_schema=tool_schema,
|
|
||||||
llm=OpenAIChat(
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
),
|
|
||||||
max_loops=3,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
interactive=True,
|
|
||||||
# Set the output type to the tool schema which is a BaseModel
|
|
||||||
# output_type=tool_schema, # or dict, or str
|
|
||||||
metadata_output_type="json",
|
|
||||||
# List of schemas that the agent can handle
|
|
||||||
list_base_models=[tool_schema],
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json", # or soon yaml
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,102 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models import Anthropic
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
# Model
|
|
||||||
llm = Anthropic(
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Tools
|
|
||||||
def terminal(
|
|
||||||
code: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Run code in the terminal.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
code (str): The code to run in the terminal.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The output of the code.
|
|
||||||
"""
|
|
||||||
out = subprocess.run(
|
|
||||||
code, shell=True, capture_output=True, text=True
|
|
||||||
).stdout
|
|
||||||
return str(out)
|
|
||||||
|
|
||||||
|
|
||||||
def browser(query: str):
|
|
||||||
"""
|
|
||||||
Search the query in the browser with the `browser` tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The query to search in the browser.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The search results.
|
|
||||||
"""
|
|
||||||
import webbrowser
|
|
||||||
|
|
||||||
url = f"https://www.google.com/search?q={query}"
|
|
||||||
webbrowser.open(url)
|
|
||||||
return f"Searching for {query} in the browser."
|
|
||||||
|
|
||||||
|
|
||||||
def create_file(file_path: str, content: str):
|
|
||||||
"""
|
|
||||||
Create a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file creation operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, "w") as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} created successfully."
|
|
||||||
|
|
||||||
|
|
||||||
def file_editor(file_path: str, mode: str, content: str):
|
|
||||||
"""
|
|
||||||
Edit a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
mode (str): The mode to open the file in.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file editing operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, mode) as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} edited successfully."
|
|
||||||
|
|
||||||
|
|
||||||
# Agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Devin",
|
|
||||||
system_prompt=(
|
|
||||||
"Autonomous agent that can interact with humans and other"
|
|
||||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
|
||||||
" assist the user. Return all code in markdown format."
|
|
||||||
),
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
stopping_token="<DONE>",
|
|
||||||
interactive=True,
|
|
||||||
tools=[terminal, browser, file_editor, create_file],
|
|
||||||
code_interpreter=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
out = agent("Create a new file for a plan to take over the world.")
|
|
||||||
print(out)
|
|
@ -1,105 +0,0 @@
|
|||||||
from swarms import Agent, OpenAIChat # ChromaDB
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
# Model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Tools
|
|
||||||
def terminal(
|
|
||||||
code: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Run code in the terminal.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
code (str): The code to run in the terminal.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The output of the code.
|
|
||||||
"""
|
|
||||||
out = subprocess.run(
|
|
||||||
code, shell=True, capture_output=True, text=True
|
|
||||||
).stdout
|
|
||||||
return str(out)
|
|
||||||
|
|
||||||
|
|
||||||
def browser(query: str):
|
|
||||||
"""
|
|
||||||
Search the query in the browser with the `browser` tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The query to search in the browser.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The search results.
|
|
||||||
"""
|
|
||||||
import webbrowser
|
|
||||||
|
|
||||||
url = f"https://www.google.com/search?q={query}"
|
|
||||||
webbrowser.open(url)
|
|
||||||
return f"Searching for {query} in the browser."
|
|
||||||
|
|
||||||
|
|
||||||
def create_file(file_path: str, content: str):
|
|
||||||
"""
|
|
||||||
Create a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file creation operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, "w") as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} created successfully."
|
|
||||||
|
|
||||||
|
|
||||||
def file_editor(file_path: str, mode: str, content: str):
|
|
||||||
"""
|
|
||||||
Edit a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
mode (str): The mode to open the file in.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file editing operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, mode) as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} edited successfully."
|
|
||||||
|
|
||||||
|
|
||||||
# Agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Devin",
|
|
||||||
system_prompt=(
|
|
||||||
"Autonomous agent that can interact with humans and other"
|
|
||||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
|
||||||
" assist the user. Return all code in markdown format."
|
|
||||||
),
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
stopping_token="<DONE>",
|
|
||||||
interactive=True,
|
|
||||||
tools=[terminal, browser, file_editor, create_file],
|
|
||||||
# long_term_memory=chromadb,
|
|
||||||
metadata_output_type="json",
|
|
||||||
# List of schemas that the agent can handle
|
|
||||||
# list_base_models=[tool_schema],
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json", # or soon yaml
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
agent.run("Create a new file for a plan to take over the world.")
|
|
@ -1,13 +0,0 @@
|
|||||||
import json
|
|
||||||
from swarms.tools.py_func_to_openai_func_str import (
|
|
||||||
get_openai_function_schema_from_func,
|
|
||||||
)
|
|
||||||
from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
|
|
||||||
|
|
||||||
out = get_openai_function_schema_from_func(
|
|
||||||
fetch_web_articles_bing_api,
|
|
||||||
name="fetch_web_articles_bing_api",
|
|
||||||
description="Fetches four articles from Bing Web Search API based on the given query.",
|
|
||||||
)
|
|
||||||
out = json.dumps(out, indent=2)
|
|
||||||
print(out)
|
|
@ -1,97 +0,0 @@
|
|||||||
import json
|
|
||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing import List
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
|
|
||||||
class AgentSpec(BaseModel):
|
|
||||||
agent_name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the agent",
|
|
||||||
)
|
|
||||||
system_prompt: str = Field(
|
|
||||||
...,
|
|
||||||
description="The system prompt for the agent",
|
|
||||||
)
|
|
||||||
agent_description: str = Field(
|
|
||||||
...,
|
|
||||||
description="The description of the agent",
|
|
||||||
)
|
|
||||||
max_tokens: int = Field(
|
|
||||||
...,
|
|
||||||
description="The maximum number of tokens to generate in the API response",
|
|
||||||
)
|
|
||||||
temperature: float = Field(
|
|
||||||
...,
|
|
||||||
description="A parameter that controls the randomness of the generated text",
|
|
||||||
)
|
|
||||||
context_window: int = Field(
|
|
||||||
...,
|
|
||||||
description="The context window for the agent",
|
|
||||||
)
|
|
||||||
task: str = Field(
|
|
||||||
...,
|
|
||||||
description="The main task for the agent",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class SwarmSpec(BaseModel):
|
|
||||||
multiple_agents: List[AgentSpec] = Field(
|
|
||||||
...,
|
|
||||||
description="The list of agents in the swarm",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_agent(
|
|
||||||
agent_name: str,
|
|
||||||
system_prompt: str,
|
|
||||||
agent_description: str,
|
|
||||||
max_tokens: int,
|
|
||||||
temperature: float,
|
|
||||||
context_window: int,
|
|
||||||
):
|
|
||||||
return Agent(
|
|
||||||
agent_name=agent_name,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
agent_description=agent_description,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
temperature=temperature,
|
|
||||||
context_window=context_window,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're an agent creator, you're purpose is to create an agent with the user provided specifications. Think of relevant names, descriptions, and context windows for the agent. You need to provide the name of the agent, the system prompt for the agent, the description of the agent, the maximum number of tokens to generate in the API response, the temperature for the agent, the context window for the agent, and the model name for the agent from huggingface.",
|
|
||||||
max_tokens=3000,
|
|
||||||
temperature=0.8,
|
|
||||||
base_model=SwarmSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_json_for_agents_then_create_agents(
|
|
||||||
function_call: dict,
|
|
||||||
) -> str:
|
|
||||||
agents = []
|
|
||||||
for agent in json["multiple_agents"]:
|
|
||||||
agents.append(
|
|
||||||
create_agent(
|
|
||||||
agent["agent_name"],
|
|
||||||
agent["system_prompt"],
|
|
||||||
agent["agent_description"],
|
|
||||||
agent["max_tokens"],
|
|
||||||
agent["temperature"],
|
|
||||||
agent["context_window"],
|
|
||||||
# agent["model_name"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return agents
|
|
||||||
|
|
||||||
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
out = model.run(
|
|
||||||
"Create a swarm of agents to generate social media posts. Each agent should have it's own social media"
|
|
||||||
)
|
|
@ -1,245 +0,0 @@
|
|||||||
import logging
|
|
||||||
from typing import Any, Dict, Optional
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarms import Conversation
|
|
||||||
from swarm_models import OpenAIFunctionCaller
|
|
||||||
from loguru import logger
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class APITaskSchema(BaseModel):
|
|
||||||
plan: str = Field(
|
|
||||||
...,
|
|
||||||
description="Plan out the API request to be executed, contemplate the endpoint, method, headers, body, and params.",
|
|
||||||
)
|
|
||||||
url: str = Field(
|
|
||||||
..., description="The API endpoint to send the request to."
|
|
||||||
)
|
|
||||||
method: str = Field(
|
|
||||||
...,
|
|
||||||
description="HTTP method to use for the request (e.g., GET, POST).",
|
|
||||||
)
|
|
||||||
headers: Optional[Dict[str, str]] = Field(
|
|
||||||
..., description="Optional headers to include in the request."
|
|
||||||
)
|
|
||||||
body: Optional[Dict[str, Any]] = Field(
|
|
||||||
..., description="Optional body content for POST requests."
|
|
||||||
)
|
|
||||||
params: Optional[Dict[str, Any]] = Field(
|
|
||||||
..., description="Optional query parameters for the request."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class APIRequestAgent:
|
|
||||||
"""
|
|
||||||
An agent that sends API requests based on user input.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str, optional): The name of the agent. Defaults to "APIRequestAgent".
|
|
||||||
description (str, optional): The description of the agent. Defaults to "An agent that sends API requests based on user input.".
|
|
||||||
schema (BaseModel, optional): The schema for the API task. Defaults to APITaskSchema.
|
|
||||||
temperature (int, optional): The temperature for the language model. Defaults to 0.5.
|
|
||||||
system_prompt (str, optional): The system prompt for the language model. Defaults to "You are an API request manager. Create and execute requests based on the user's needs.".
|
|
||||||
max_tokens (int, optional): The maximum number of tokens for the language model. Defaults to 4000.
|
|
||||||
full_agent_history (str, optional): The full agent history. Defaults to None.
|
|
||||||
max_loops (int, optional): The maximum number of loops for the agent. Defaults to 10.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
name (str): The name of the agent.
|
|
||||||
description (str): The description of the agent.
|
|
||||||
schema (BaseModel): The schema for the API task.
|
|
||||||
session (requests.Session): The session for connection pooling.
|
|
||||||
system_prompt (str): The system prompt for the language model.
|
|
||||||
max_tokens (int): The maximum number of tokens for the language model.
|
|
||||||
full_agent_history (str): The full agent history.
|
|
||||||
max_loops (int): The maximum number of loops for the agent.
|
|
||||||
llm (OpenAIFunctionCaller): The function caller for the language model.
|
|
||||||
conversation (Conversation): The conversation object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: str = "APIRequestAgent",
|
|
||||||
description: str = "An agent that sends API requests based on user input.",
|
|
||||||
schema: BaseModel = APITaskSchema,
|
|
||||||
temperature: int = 0.5,
|
|
||||||
system_prompt: str = "You are an API request manager. Create and execute requests based on the user's needs.",
|
|
||||||
max_tokens: int = 4000,
|
|
||||||
full_agent_history: str = None,
|
|
||||||
max_loops: int = 10,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
# super().__init__(name=name, *args, **kwargs)
|
|
||||||
self.name = name
|
|
||||||
self.description = description
|
|
||||||
self.schema = schema
|
|
||||||
self.session = (
|
|
||||||
requests.Session()
|
|
||||||
) # Optional: Use a session for connection pooling.
|
|
||||||
self.system_prompt = system_prompt
|
|
||||||
self.max_tokens = max_tokens
|
|
||||||
self.full_agent_history = full_agent_history
|
|
||||||
self.max_loops = max_loops
|
|
||||||
|
|
||||||
# Initialize the function caller (LLM) with the schema
|
|
||||||
self.llm = OpenAIFunctionCaller(
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
temperature=temperature,
|
|
||||||
base_model=APITaskSchema,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Conversation
|
|
||||||
self.conversation = Conversation(
|
|
||||||
time_enabled=True,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Full Agent history
|
|
||||||
self.full_agent_history = (
|
|
||||||
self.conversation.return_history_as_string()
|
|
||||||
)
|
|
||||||
|
|
||||||
def parse_response(
|
|
||||||
self, response: requests.Response
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Parses the API response and returns the content.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response (requests.Response): The API response to parse.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, Any]: The parsed response content.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
logger.info(
|
|
||||||
f"Response status code: {response.status_code}"
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.json()
|
|
||||||
except requests.exceptions.HTTPError as e:
|
|
||||||
logging.error(f"HTTPError: {e}")
|
|
||||||
raise
|
|
||||||
except ValueError as e:
|
|
||||||
logging.error(f"Failed to parse JSON: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def execute_request(self, task: APITaskSchema) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Executes the API request based on the given task schema.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (APITaskSchema): The task schema containing request details.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, Any]: The API response.
|
|
||||||
"""
|
|
||||||
base_url = task.url
|
|
||||||
|
|
||||||
url = f"{base_url}/{task.endpoint}"
|
|
||||||
method = task.method.upper()
|
|
||||||
|
|
||||||
logger.info(f"Executing request: {method} {url}")
|
|
||||||
try:
|
|
||||||
if method == "GET":
|
|
||||||
response = self.session.get(
|
|
||||||
url, headers=task.headers, params=task.params
|
|
||||||
)
|
|
||||||
elif method == "POST":
|
|
||||||
response = self.session.post(
|
|
||||||
url,
|
|
||||||
headers=task.headers,
|
|
||||||
json=task.body,
|
|
||||||
params=task.params,
|
|
||||||
)
|
|
||||||
elif method == "PUT":
|
|
||||||
response = self.session.put(
|
|
||||||
url,
|
|
||||||
headers=task.headers,
|
|
||||||
json=task.body,
|
|
||||||
params=task.params,
|
|
||||||
)
|
|
||||||
elif method == "DELETE":
|
|
||||||
response = self.session.delete(
|
|
||||||
url, headers=task.headers, params=task.params
|
|
||||||
)
|
|
||||||
|
|
||||||
elif method == "PATCH":
|
|
||||||
response = self.session.patch(
|
|
||||||
url,
|
|
||||||
headers=task.headers,
|
|
||||||
json=task.body,
|
|
||||||
params=task.params,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
||||||
|
|
||||||
logging.info(f"Executed {method} request to {url}")
|
|
||||||
return self.parse_response(response)
|
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
logging.error(f"RequestException: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def execute_api_request(
|
|
||||||
self, task: APITaskSchema
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Executes a single step: sends the request and processes the response.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (APITaskSchema): The task schema containing request details.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, Any]: The processed response from the API.
|
|
||||||
"""
|
|
||||||
logger.info(f"Executing API request based on task: {task}")
|
|
||||||
response = self.execute_request(task)
|
|
||||||
response = str(response)
|
|
||||||
|
|
||||||
# Log the response in the conversation
|
|
||||||
self.conversation.add(role="API", content=response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def run(self, task: str) -> Any:
|
|
||||||
"""
|
|
||||||
Runs the agent by processing a task string, and executing the requests.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be processed by the LLM and executed by the agent.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Any: The result of the task processed by the LLM.
|
|
||||||
"""
|
|
||||||
logger.info(f"Running agent with task: {task}")
|
|
||||||
output = self.llm.run(task)
|
|
||||||
|
|
||||||
# Log the output in the conversation
|
|
||||||
print(output)
|
|
||||||
print(type(output))
|
|
||||||
self.conversation.add(role=self.name, content=output)
|
|
||||||
|
|
||||||
# Convert dict -> APITaskSchema
|
|
||||||
output = APITaskSchema(**output)
|
|
||||||
|
|
||||||
logger.info(f"Executing request based on task: {output}")
|
|
||||||
return self.execute_api_request(output)
|
|
||||||
|
|
||||||
|
|
||||||
# Model
|
|
||||||
agent = APIRequestAgent(
|
|
||||||
name="APIRequestAgent",
|
|
||||||
description="An agent that sends API requests based on user input.",
|
|
||||||
schema=APITaskSchema,
|
|
||||||
system_prompt="You are an API request manager. Create and execute requests based on the user's needs.",
|
|
||||||
)
|
|
||||||
|
|
||||||
agent.run("Send an API request to an open source API")
|
|
||||||
|
|
||||||
print(agent.full_agent_history)
|
|
@ -1,42 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
|
||||||
class ClaudeArtifact(BaseModel):
|
|
||||||
name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the artifact",
|
|
||||||
)
|
|
||||||
plan: str = Field(
|
|
||||||
...,
|
|
||||||
description="Plan for the artifact, Do I generate a new python file or do I modify an existing one?",
|
|
||||||
)
|
|
||||||
file_name_path: str = Field(
|
|
||||||
...,
|
|
||||||
description="The path to the file to modify or create for example: 'game.py'",
|
|
||||||
)
|
|
||||||
content_of_file: str = Field(
|
|
||||||
...,
|
|
||||||
description="The content of the file to modify or create ",
|
|
||||||
)
|
|
||||||
edit_count: int = Field(
|
|
||||||
...,
|
|
||||||
description="The number of times to edit the file",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're an artifact creator, you're purpose is to create an artifact with the user provided specifications. Think of relevant names, descriptions, and context windows for the artifact. You need to provide the name of the artifact, the system prompt for the artifact, the description of the artifact, the maximum number of tokens to generate in the API response, the temperature for the artifact, the context window for the artifact, and the model name for the artifact from huggingface.",
|
|
||||||
max_tokens=3500,
|
|
||||||
temperature=0.9,
|
|
||||||
base_model=ClaudeArtifact,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
out = model.run(
|
|
||||||
"Create a game in python that has never been created before. Create a new form of gaming experience that has never been contemplated before."
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,95 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing import List
|
|
||||||
import json
|
|
||||||
|
|
||||||
|
|
||||||
AI_PAPER_IDEA_GENERATOR = """
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
|
|
||||||
|
|
||||||
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
|
|
||||||
|
|
||||||
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
|
|
||||||
|
|
||||||
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
|
|
||||||
|
|
||||||
You're a research scientist working on a new paper. You need to generate a novel idea for a research paper.
|
|
||||||
|
|
||||||
The paper should be in the field of multi-modal learning and should propose a new method or algorithm.
|
|
||||||
|
|
||||||
The paper should be innovative, novel, and feasible.
|
|
||||||
|
|
||||||
Generate a paper idea that meets these criteria.
|
|
||||||
|
|
||||||
You need to provide the following details:
|
|
||||||
- The paper idea
|
|
||||||
- A brief description of the paper idea
|
|
||||||
- A proposed experiment to test the paper idea
|
|
||||||
- Ratings for interestingness, novelty, and feasibility of the paper idea
|
|
||||||
- The ratings should be on a scale of 0.1 to 1.0, with 1.0 being the most innovative, novel, or feasible
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class PaperIdeaSchema(BaseModel):
|
|
||||||
paper_idea: str = Field(
|
|
||||||
...,
|
|
||||||
description="The generated paper idea.",
|
|
||||||
)
|
|
||||||
description: str = Field(
|
|
||||||
...,
|
|
||||||
description="A brief description of the paper idea.",
|
|
||||||
)
|
|
||||||
experiment: str = Field(
|
|
||||||
...,
|
|
||||||
description="A proposed experiment to test the paper idea.",
|
|
||||||
)
|
|
||||||
interestingness: int = Field(
|
|
||||||
...,
|
|
||||||
description="A rating of how interesting the paper idea is on a scale of 0.1 to 1.0 being the most innovative paper idea.",
|
|
||||||
)
|
|
||||||
novelty: int = Field(
|
|
||||||
...,
|
|
||||||
description="A rating of how novel the paper idea is on a scale of 0.1 to 1.0 being the most novel paper idea.",
|
|
||||||
)
|
|
||||||
feasibility: int = Field(
|
|
||||||
...,
|
|
||||||
description="A rating of how feasible the paper idea is on a scale of 0.1 to 1.0 being the most feasible paper idea.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MultiplePaperIdeas(BaseModel):
|
|
||||||
paper_ideas: List[PaperIdeaSchema] = Field(
|
|
||||||
...,
|
|
||||||
description="A list of generated paper ideas.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
|
||||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt=AI_PAPER_IDEA_GENERATOR,
|
|
||||||
max_tokens=4000,
|
|
||||||
temperature=0.7,
|
|
||||||
base_model=MultiplePaperIdeas,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Call the function with the input
|
|
||||||
output = model.run(
|
|
||||||
"Generate paper ideas for multi-agent learning and collective intelligence involving many transformer models as an ensemble of transformers "
|
|
||||||
)
|
|
||||||
print(type(output))
|
|
||||||
# print(output)
|
|
||||||
output = json.dumps(output, indent=2)
|
|
||||||
print(output)
|
|
@ -1,52 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
|
||||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
|
||||||
class ModelCode(BaseModel):
|
|
||||||
file_name: str
|
|
||||||
model_code_in_pytorch: str
|
|
||||||
|
|
||||||
|
|
||||||
class TrainingCodeModel(BaseModel):
|
|
||||||
file_name: str
|
|
||||||
training_code: str
|
|
||||||
dataset_name: str
|
|
||||||
|
|
||||||
|
|
||||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
|
||||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're a model engineer, you're purpose is to generate code in pytorch for a give model name and code",
|
|
||||||
max_tokens=4000,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=ModelCode,
|
|
||||||
)
|
|
||||||
|
|
||||||
trainer = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're a model engineer, you're purpose is to generate the code for a given model architecture in pytorch to train using available datasets on huggingface",
|
|
||||||
max_tokens=4000,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=TrainingCodeModel,
|
|
||||||
)
|
|
||||||
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
|
|
||||||
# - system_prompt: A prompt that sets the context for the conversation with the API.
|
|
||||||
# - max_tokens: The maximum number of tokens to generate in the API response.
|
|
||||||
# - temperature: A parameter that controls the randomness of the generated text.
|
|
||||||
# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
|
|
||||||
out = model.run(
|
|
||||||
"Generate a pytorch code for a sentiment analysis model using pytorch"
|
|
||||||
)
|
|
||||||
print(str(out))
|
|
||||||
|
|
||||||
# Trainer
|
|
||||||
out = trainer.run(
|
|
||||||
f"Generate the training code for the sentiment analysis model using pytorch: {trainer}"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,39 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
|
||||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
|
||||||
class WeatherAPI(BaseModel):
|
|
||||||
city: str
|
|
||||||
date: str
|
|
||||||
|
|
||||||
|
|
||||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
|
||||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
function_caller = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You are a helpful assistant.",
|
|
||||||
max_tokens=500,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=WeatherAPI,
|
|
||||||
)
|
|
||||||
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
|
|
||||||
# - system_prompt: A prompt that sets the context for the conversation with the API.
|
|
||||||
# - max_tokens: The maximum number of tokens to generate in the API response.
|
|
||||||
# - temperature: A parameter that controls the randomness of the generated text.
|
|
||||||
# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
|
|
||||||
|
|
||||||
# Run the function caller
|
|
||||||
response = function_caller.run(
|
|
||||||
"Get the weather forecast for New York City on July 4th, 2022."
|
|
||||||
)
|
|
||||||
|
|
||||||
# The run() method of the OpenAIFunctionCaller class is used to make a function call to the API.
|
|
||||||
# It takes a string parameter that represents the user's request or query.
|
|
||||||
print(response)
|
|
@ -1,203 +0,0 @@
|
|||||||
import os
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from loguru import logger
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from swarms import create_file_in_folder
|
|
||||||
from swarm_models import OpenAIFunctionCaller
|
|
||||||
|
|
||||||
|
|
||||||
class PromptUseCase(BaseModel):
|
|
||||||
title: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the use case.",
|
|
||||||
)
|
|
||||||
description: str = Field(
|
|
||||||
...,
|
|
||||||
description="The description of the use case.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PromptSchema(BaseModel):
|
|
||||||
name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the prompt.",
|
|
||||||
)
|
|
||||||
prompt: str = Field(
|
|
||||||
...,
|
|
||||||
description="The prompt to generate the response.",
|
|
||||||
)
|
|
||||||
description: str = Field(
|
|
||||||
...,
|
|
||||||
description="The description of the prompt.",
|
|
||||||
)
|
|
||||||
tags: str = Field(
|
|
||||||
...,
|
|
||||||
description="The tags for the prompt denoted by a comma sign: Code Gen Prompt, Pytorch Code Gen Agent Prompt, Finance Agent Prompt, ",
|
|
||||||
)
|
|
||||||
useCases: List[PromptUseCase] = Field(
|
|
||||||
...,
|
|
||||||
description="The use cases for the prompt.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PromptGeneratorAgent:
|
|
||||||
"""
|
|
||||||
A class that generates prompts based on given tasks and publishes them to the marketplace.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
system_prompt (str, optional): The system prompt to use. Defaults to None.
|
|
||||||
max_tokens (int, optional): The maximum number of tokens in the generated prompt. Defaults to 1000.
|
|
||||||
temperature (float, optional): The temperature value for controlling randomness in the generated prompt. Defaults to 0.5.
|
|
||||||
schema (BaseModel, optional): The base model schema to use. Defaults to PromptSchema.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
llm (OpenAIFunctionCaller): An instance of the OpenAIFunctionCaller class for making function calls to the OpenAI API.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
clean_model_code: Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces.
|
|
||||||
upload_to_marketplace: Uploads the generated prompt data to the marketplace.
|
|
||||||
run: Creates a prompt based on the given task and publishes it to the marketplace.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
system_prompt: str = None,
|
|
||||||
max_tokens: int = 4000,
|
|
||||||
temperature: float = 0.5,
|
|
||||||
schema: BaseModel = PromptSchema,
|
|
||||||
):
|
|
||||||
self.llm = OpenAIFunctionCaller(
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
temperature=temperature,
|
|
||||||
base_model=schema,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def clean_model_code(self, model_code_str: str) -> str:
|
|
||||||
"""
|
|
||||||
Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_code_str (str): The model code string to clean.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The cleaned model code.
|
|
||||||
"""
|
|
||||||
cleaned_code = model_code_str.replace("\\n", "\n").replace(
|
|
||||||
"\\'", "'"
|
|
||||||
)
|
|
||||||
cleaned_code = cleaned_code.strip()
|
|
||||||
return cleaned_code
|
|
||||||
|
|
||||||
def upload_to_marketplace(self, data: dict) -> dict:
|
|
||||||
"""
|
|
||||||
Uploads the generated prompt data to the marketplace.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
data (dict): The prompt data to upload.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: The response from the marketplace API.
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
url = "https://swarms.world/api/add-prompt"
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Authorization": f"Bearer {os.getenv('SWARMS_API_KEY')}",
|
|
||||||
}
|
|
||||||
response = requests.post(
|
|
||||||
url, headers=headers, data=json.dumps(data)
|
|
||||||
)
|
|
||||||
return str(response.json())
|
|
||||||
|
|
||||||
def run(self, task: str) -> str:
|
|
||||||
"""
|
|
||||||
Creates a prompt based on the given task and publishes it to the marketplace.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task description for generating the prompt.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: The response from the marketplace API after uploading the prompt.
|
|
||||||
"""
|
|
||||||
out = self.llm.run(task)
|
|
||||||
name = out["name"]
|
|
||||||
logger.info(f"Prompt generated: {out}")
|
|
||||||
|
|
||||||
create_file_in_folder(
|
|
||||||
"auto_generated_prompts", f"prompt_{name}.json", str(out)
|
|
||||||
)
|
|
||||||
logger.info(f"Prompt saved to file: prompt_{name}.json")
|
|
||||||
|
|
||||||
# Clean the model code
|
|
||||||
prompt = out["prompt"]
|
|
||||||
description = out["description"]
|
|
||||||
tags = out["tags"]
|
|
||||||
useCases = out["useCases"]
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"name": name,
|
|
||||||
"prompt": self.clean_model_code(prompt),
|
|
||||||
"description": description,
|
|
||||||
"tags": tags,
|
|
||||||
"useCases": useCases,
|
|
||||||
}
|
|
||||||
|
|
||||||
create_file_in_folder(
|
|
||||||
"auto_generated_prompts",
|
|
||||||
f"prompt_{name}.json",
|
|
||||||
str(data),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now submit to swarms API
|
|
||||||
logger.info("Uploading to marketplace...")
|
|
||||||
return self.upload_to_marketplace(data)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
system_prompt = """
|
|
||||||
|
|
||||||
**System Prompt for Prompt Creator Agent**
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Role**: You are a highly skilled prompt creator agent with expertise in designing effective agents to solve complex business problems. Your primary function is to generate prompts that result in agents capable of executing business tasks with precision, efficiency, and scalability.
|
|
||||||
|
|
||||||
**Objective**: Your goal is to create prompts that follow a structured format, ensuring that the resulting agents are well-informed, reliable, and able to perform specific tasks in business environments. These tasks might include automating processes, analyzing data, generating content, or making strategic decisions.
|
|
||||||
|
|
||||||
### **Prompt Structure Guidelines**:
|
|
||||||
|
|
||||||
1. **Instructions**: Begin by clearly stating the objective of the agent. The instructions should outline what the agent is expected to accomplish, providing a high-level overview of the desired outcome. Be concise but comprehensive, ensuring the agent understands the broader context of the task.
|
|
||||||
|
|
||||||
2. **Examples**: After the instructions, provide several examples (known as "many-shot examples") to demonstrate how the agent should approach the task. Each example should include:
|
|
||||||
- **Input**: A specific scenario or task the agent might encounter.
|
|
||||||
- **Expected Output**: The correct or optimal response the agent should generate in that scenario.
|
|
||||||
|
|
||||||
Use a variety of examples that cover different potential cases the agent might face, ensuring the agent can generalize from the examples provided.
|
|
||||||
|
|
||||||
3. **Standard Operating Procedures (SOPs)**: For tasks that require detailed, step-by-step guidance, include a comprehensive SOP. This should be a long-form set of instructions that breaks down the task into manageable steps. The SOP should:
|
|
||||||
- Outline each step in a sequential manner.
|
|
||||||
- Provide specific guidelines, best practices, and considerations for each step.
|
|
||||||
- Include examples or mini-tutorials where necessary to ensure clarity.
|
|
||||||
|
|
||||||
4. **Error Handling**: Include guidance on how the agent should handle potential errors or uncertainties. This might involve instructions on when to seek additional input, how to flag issues, or how to prioritize tasks when resources are limited.
|
|
||||||
|
|
||||||
5. **Adaptability**: Ensure that the prompts encourage the agent to adapt to changing circumstances. This might include instructions on how to modify its approach based on real-time feedback, how to update its knowledge base, or how to learn from previous mistakes.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
agent = PromptGeneratorAgent(
|
|
||||||
system_prompt=system_prompt, max_tokens=4000
|
|
||||||
)
|
|
||||||
|
|
||||||
response = agent.run(
|
|
||||||
"Create a prompt for an agent to analyze complicated cashflow statements and generate a summary report."
|
|
||||||
)
|
|
||||||
print(response)
|
|
@ -1,77 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
|
|
||||||
class Observation(BaseModel):
|
|
||||||
observation: str = Field(
|
|
||||||
...,
|
|
||||||
description="What are you seeing in the image?",
|
|
||||||
)
|
|
||||||
summary_of_observation: str = Field(
|
|
||||||
...,
|
|
||||||
description="The summary of the observation/ img",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Sequence(BaseModel):
|
|
||||||
goal: str = Field(
|
|
||||||
...,
|
|
||||||
description="The goal of the mission",
|
|
||||||
)
|
|
||||||
observation: List[Observation] = Field(
|
|
||||||
...,
|
|
||||||
description="The observations of the agent",
|
|
||||||
)
|
|
||||||
action: str = Field(
|
|
||||||
...,
|
|
||||||
description="Take an action that leads to the completion of the task.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GoalDecomposer(BaseModel):
|
|
||||||
goal: str = Field(
|
|
||||||
...,
|
|
||||||
description="The goal of the task",
|
|
||||||
)
|
|
||||||
sub_goals: List[str] = Field(
|
|
||||||
...,
|
|
||||||
description="The sub goals of the mission",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Given the task t, observation o, the sub-goals
|
|
||||||
# sequence g1, g2, g3, ..., gn can be formulated as:
|
|
||||||
|
|
||||||
|
|
||||||
class KGP(BaseModel):
|
|
||||||
task: str = Field(
|
|
||||||
...,
|
|
||||||
description="The task to be accomplished",
|
|
||||||
)
|
|
||||||
observation: str = Field(
|
|
||||||
...,
|
|
||||||
description="The observation of the task",
|
|
||||||
)
|
|
||||||
sequence: List[GoalDecomposer] = Field(
|
|
||||||
...,
|
|
||||||
description="The sequence of goals to accomplish the task",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're an autonomous agent, you're purpose to accomplish a task through understanding your goal, observing the environment, and taking actions that lead to the completion of the task.",
|
|
||||||
max_tokens=500,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=KGP,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
out = model.run(
|
|
||||||
"We need to craft a diamond pickaxe to mine the obsidian."
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,39 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
|
||||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
|
||||||
class SentimentAnalysisCard(BaseModel):
|
|
||||||
text: str = Field(
|
|
||||||
...,
|
|
||||||
description="The text to be analyzed for sentiment rating",
|
|
||||||
)
|
|
||||||
rating: str = Field(
|
|
||||||
...,
|
|
||||||
description="The sentiment rating of the text from 0.0 to 1.0",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
|
||||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're a sentiment Analysis Agent, you're purpose is to rate the sentiment of text",
|
|
||||||
max_tokens=100,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=SentimentAnalysisCard,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
|
|
||||||
# - system_prompt: A prompt that sets the context for the conversation with the API.
|
|
||||||
# - max_tokens: The maximum number of tokens to generate in the API response.
|
|
||||||
# - temperature: A parameter that controls the randomness of the generated text.
|
|
||||||
# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
|
|
||||||
out = model.run("This agent created the code incorrectly it sucked.")
|
|
||||||
print(out)
|
|
@ -1,39 +0,0 @@
|
|||||||
from typing import Annotated
|
|
||||||
from swarms import create_openai_tool
|
|
||||||
from openai import OpenAI
|
|
||||||
|
|
||||||
# Create an instance of the OpenAI client
|
|
||||||
client = OpenAI()
|
|
||||||
|
|
||||||
# Define the user messages for the chat conversation
|
|
||||||
messages = [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# Define the BMI calculator tool using the create_openai_tool decorator
|
|
||||||
@create_openai_tool(
|
|
||||||
name="BMI Calculator",
|
|
||||||
description="Calculate the Body Mass Index (BMI)",
|
|
||||||
)
|
|
||||||
def calculate_bmi(
|
|
||||||
weight: Annotated[float, "Weight in kilograms"],
|
|
||||||
height: Annotated[float, "Height in meters"],
|
|
||||||
) -> Annotated[float, "Body Mass Index"]:
|
|
||||||
"""Calculate the Body Mass Index (BMI) given a person's weight and height."""
|
|
||||||
return weight / (height**2)
|
|
||||||
|
|
||||||
|
|
||||||
# Create a chat completion request using the OpenAI client
|
|
||||||
response = client.chat.completions.create(
|
|
||||||
model="gpt-3.5-turbo-0125",
|
|
||||||
messages=messages,
|
|
||||||
tools=calculate_bmi,
|
|
||||||
tool_choice="auto", # auto is default, but we'll be explicit
|
|
||||||
)
|
|
||||||
|
|
||||||
# Print the generated response from the chat completion
|
|
||||||
print(response.choices[0].message["content"])
|
|
@ -1,41 +0,0 @@
|
|||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
||||||
from swarms import ToolAgent
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
"databricks/dolly-v2-12b",
|
|
||||||
load_in_4bit=True,
|
|
||||||
device_map="auto",
|
|
||||||
)
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
|
|
||||||
|
|
||||||
# Define a JSON schema for person's information
|
|
||||||
json_schema = {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {"type": "string"},
|
|
||||||
"age": {"type": "number"},
|
|
||||||
"is_student": {"type": "boolean"},
|
|
||||||
"courses": {"type": "array", "items": {"type": "string"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = (
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
name="dolly-function-agent",
|
|
||||||
description="Ana gent to create a child data",
|
|
||||||
model=model,
|
|
||||||
tokenizer=tokenizer,
|
|
||||||
json_schema=json_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,63 +0,0 @@
|
|||||||
from pydantic import BaseModel, Field
|
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
||||||
|
|
||||||
from swarms import ToolAgent
|
|
||||||
from swarms.tools.json_utils import base_model_to_json
|
|
||||||
|
|
||||||
# Model name
|
|
||||||
model_name = "CohereForAI/c4ai-command-r-v01-4bit"
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
model_name,
|
|
||||||
device_map="auto",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the schema for the person's information
|
|
||||||
class APIExampleRequestSchema(BaseModel):
|
|
||||||
endpoint: str = Field(
|
|
||||||
..., description="The API endpoint for the example request"
|
|
||||||
)
|
|
||||||
method: str = Field(
|
|
||||||
..., description="The HTTP method for the example request"
|
|
||||||
)
|
|
||||||
headers: dict = Field(
|
|
||||||
..., description="The headers for the example request"
|
|
||||||
)
|
|
||||||
body: dict = Field(
|
|
||||||
..., description="The body of the example request"
|
|
||||||
)
|
|
||||||
response: dict = Field(
|
|
||||||
...,
|
|
||||||
description="The expected response of the example request",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
api_example_schema = base_model_to_json(APIExampleRequestSchema)
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = "Generate an example API request using this code:\n"
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
name="Command R Tool Agent",
|
|
||||||
description=(
|
|
||||||
"An agent that generates an API request using the Command R"
|
|
||||||
" model."
|
|
||||||
),
|
|
||||||
model=model,
|
|
||||||
tokenizer=tokenizer,
|
|
||||||
json_schema=api_example_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,36 +0,0 @@
|
|||||||
# Import necessary libraries
|
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
||||||
from swarms import ToolAgent
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
"databricks/dolly-v2-12b"
|
|
||||||
)
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
|
|
||||||
|
|
||||||
# Define a JSON schema for person's information
|
|
||||||
json_schema = {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {"type": "string"},
|
|
||||||
"age": {"type": "number"},
|
|
||||||
"is_student": {"type": "boolean"},
|
|
||||||
"courses": {"type": "array", "items": {"type": "string"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = (
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
model=model, tokenizer=tokenizer, json_schema=json_schema
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(generated_data)
|
|
@ -1,63 +0,0 @@
|
|||||||
from pydantic import BaseModel, Field
|
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
||||||
|
|
||||||
from swarms import ToolAgent
|
|
||||||
from swarms.tools.json_utils import base_model_to_json
|
|
||||||
|
|
||||||
# Model name
|
|
||||||
model_name = "ai21labs/Jamba-v0.1"
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
model_name,
|
|
||||||
device_map="auto",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the schema for the person's information
|
|
||||||
class APIExampleRequestSchema(BaseModel):
|
|
||||||
endpoint: str = Field(
|
|
||||||
..., description="The API endpoint for the example request"
|
|
||||||
)
|
|
||||||
method: str = Field(
|
|
||||||
..., description="The HTTP method for the example request"
|
|
||||||
)
|
|
||||||
headers: dict = Field(
|
|
||||||
..., description="The headers for the example request"
|
|
||||||
)
|
|
||||||
body: dict = Field(
|
|
||||||
..., description="The body of the example request"
|
|
||||||
)
|
|
||||||
response: dict = Field(
|
|
||||||
...,
|
|
||||||
description="The expected response of the example request",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
api_example_schema = base_model_to_json(APIExampleRequestSchema)
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = "Generate an example API request using this code:\n"
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
name="Command R Tool Agent",
|
|
||||||
description=(
|
|
||||||
"An agent that generates an API request using the Command R"
|
|
||||||
" model."
|
|
||||||
),
|
|
||||||
model=model,
|
|
||||||
tokenizer=tokenizer,
|
|
||||||
json_schema=api_example_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,49 +0,0 @@
|
|||||||
from pydantic import BaseModel, Field
|
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
||||||
|
|
||||||
from swarms import ToolAgent
|
|
||||||
from swarms.tools.json_utils import base_model_to_json
|
|
||||||
|
|
||||||
# Load the pre-trained model and tokenizer
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
"databricks/dolly-v2-12b",
|
|
||||||
load_in_4bit=True,
|
|
||||||
device_map="auto",
|
|
||||||
)
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the schema for the person's information
|
|
||||||
class Schema(BaseModel):
|
|
||||||
name: str = Field(..., title="Name of the person")
|
|
||||||
agent: int = Field(..., title="Age of the person")
|
|
||||||
is_student: bool = Field(
|
|
||||||
..., title="Whether the person is a student"
|
|
||||||
)
|
|
||||||
courses: list[str] = Field(
|
|
||||||
..., title="List of courses the person is taking"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
tool_schema = base_model_to_json(Schema)
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = (
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
name="dolly-function-agent",
|
|
||||||
description="Ana gent to create a child data",
|
|
||||||
model=model,
|
|
||||||
tokenizer=tokenizer,
|
|
||||||
json_schema=tool_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,52 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from swarms import ToolAgent
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.tools.json_utils import base_model_to_json
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Initialize the OpenAIChat class
|
|
||||||
chat = OpenAIChat(
|
|
||||||
api_key=os.getenv("OPENAI_API"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the schema for the person's information
|
|
||||||
class Schema(BaseModel):
|
|
||||||
name: str = Field(..., title="Name of the person")
|
|
||||||
agent: int = Field(..., title="Age of the person")
|
|
||||||
is_student: bool = Field(
|
|
||||||
..., title="Whether the person is a student"
|
|
||||||
)
|
|
||||||
courses: list[str] = Field(
|
|
||||||
..., title="List of courses the person is taking"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the schema to a JSON string
|
|
||||||
tool_schema = base_model_to_json(Schema)
|
|
||||||
|
|
||||||
# Define the task to generate a person's information
|
|
||||||
task = (
|
|
||||||
"Generate a person's information based on the following schema:"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create an instance of the ToolAgent class
|
|
||||||
agent = ToolAgent(
|
|
||||||
name="dolly-function-agent",
|
|
||||||
description="Ana gent to create a child data",
|
|
||||||
llm=chat,
|
|
||||||
json_schema=tool_schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent(task)
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,26 +0,0 @@
|
|||||||
import os
|
|
||||||
import threading
|
|
||||||
from swarms.agents.multion_wrapper import MultiOnAgent
|
|
||||||
|
|
||||||
|
|
||||||
def run_model(api_key):
|
|
||||||
model = MultiOnAgent(
|
|
||||||
api_key=api_key, max_steps=500, url="https://x.com"
|
|
||||||
)
|
|
||||||
out = model.run("")
|
|
||||||
print(out)
|
|
||||||
|
|
||||||
|
|
||||||
# Create a list to store the threads
|
|
||||||
threads = []
|
|
||||||
|
|
||||||
# Run 100 instances using multithreading
|
|
||||||
for _ in range(10):
|
|
||||||
api_key = os.getenv("MULTION_API_KEY")
|
|
||||||
thread = threading.Thread(target=run_model, args=(api_key,))
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
# Wait for all threads to finish
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
@ -1,73 +0,0 @@
|
|||||||
from swarms import Agent, AgentRearrange
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.agents.multion_wrapper import MultiOnAgent
|
|
||||||
|
|
||||||
model = MultiOnAgent(
|
|
||||||
url="https://tesla.com",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
llm = OpenAIChat()
|
|
||||||
|
|
||||||
|
|
||||||
def browser_automation(task: str):
|
|
||||||
"""
|
|
||||||
Run a task on the browser automation agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be executed on the browser automation agent.
|
|
||||||
"""
|
|
||||||
out = model.run(task)
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
# Purpose = To detect email spam using three different agents
|
|
||||||
agent1 = Agent(
|
|
||||||
agent_name="CyberTruckBuyer1",
|
|
||||||
system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
|
|
||||||
llm=llm,
|
|
||||||
max_loops=1,
|
|
||||||
# output_type=str,
|
|
||||||
metadata="json",
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json",
|
|
||||||
streaming_on=True,
|
|
||||||
tools=[browser_automation],
|
|
||||||
)
|
|
||||||
|
|
||||||
agent2 = Agent(
|
|
||||||
agent_name="CyberTruckBuyer2",
|
|
||||||
system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
|
|
||||||
llm=llm,
|
|
||||||
max_loops=1,
|
|
||||||
# output_type=str,
|
|
||||||
metadata="json",
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json",
|
|
||||||
streaming_on=True,
|
|
||||||
tools=[browser_automation],
|
|
||||||
)
|
|
||||||
|
|
||||||
agent3 = Agent(
|
|
||||||
agent_name="CyberTruckBuyer3",
|
|
||||||
system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
|
|
||||||
llm=llm,
|
|
||||||
max_loops=1,
|
|
||||||
# output_type=str,
|
|
||||||
metadata="json",
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json",
|
|
||||||
streaming_on=True,
|
|
||||||
tools=[browser_automation],
|
|
||||||
)
|
|
||||||
|
|
||||||
swarm = AgentRearrange(
|
|
||||||
flow="CyberTruckBuyer1 -> CyberTruckBuyer2 -> CyberTruckBuyer3",
|
|
||||||
agents=[agent1, agent2, agent3],
|
|
||||||
logging_enabled=True,
|
|
||||||
max_loops=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run all the agents
|
|
||||||
swarm.run("Let's buy a cyber truck")
|
|
@ -1,167 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarms import create_file_in_folder
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
import threading
|
|
||||||
import json
|
|
||||||
from typing import List, Dict
|
|
||||||
from datasets import load_dataset
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class ModelSpec(BaseModel):
|
|
||||||
novel_algorithm_name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the novel AI algorithm",
|
|
||||||
)
|
|
||||||
mathamatical_formulation: str = Field(
|
|
||||||
...,
|
|
||||||
description="The mathematical theoretical formulation of the new model",
|
|
||||||
)
|
|
||||||
model_code: str = Field(
|
|
||||||
...,
|
|
||||||
description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
|
|
||||||
max_tokens=3500,
|
|
||||||
temperature=1.0,
|
|
||||||
base_model=ModelSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_model_code(model_code_str: str) -> str:
|
|
||||||
"""
|
|
||||||
Cleans up the generated model code string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_code_str (str): The raw model code as a string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The cleaned-up model code.
|
|
||||||
"""
|
|
||||||
cleaned_code = model_code_str.replace("\\n", "\n").replace(
|
|
||||||
"\\'", "'"
|
|
||||||
)
|
|
||||||
return cleaned_code.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def generate_novel_model() -> Dict[str, str]:
|
|
||||||
"""
|
|
||||||
Generate a novel neural network model using the OpenAI function caller.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, str]: A dictionary containing the model's name, theory, and code.
|
|
||||||
"""
|
|
||||||
out = model.run(
|
|
||||||
"Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"name": out["novel_algorithm_name"],
|
|
||||||
"theory": out["mathamatical_formulation"],
|
|
||||||
"code": clean_model_code(out["model_code"]),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def generate_and_save_model(
|
|
||||||
i: int, dataset: List[Dict[str, str]]
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Generate, clean, save, and add the model data to a dataset.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
i (int): The iteration number (for logging purposes).
|
|
||||||
dataset (List[Dict[str, str]]): The dataset to add the model data to.
|
|
||||||
"""
|
|
||||||
model_data = generate_novel_model()
|
|
||||||
name = model_data["name"]
|
|
||||||
code = model_data["code"]
|
|
||||||
|
|
||||||
logger.info(f"Generated code for novel model {name}:")
|
|
||||||
create_file_in_folder("new_models", f"{name}.py", code)
|
|
||||||
logger.info(f"Saved code for novel model {i} to file:")
|
|
||||||
|
|
||||||
# Add the model data to the dataset
|
|
||||||
dataset.append(model_data)
|
|
||||||
|
|
||||||
|
|
||||||
def save_to_jsonl(
|
|
||||||
dataset: List[Dict[str, str]], file_path: str
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dataset (List[Dict[str, str]]): The dataset containing models' data.
|
|
||||||
file_path (str): The path to save the JSONL file.
|
|
||||||
"""
|
|
||||||
with open(file_path, "a") as file: # Open in append mode
|
|
||||||
for entry in dataset:
|
|
||||||
file.write(json.dumps(entry) + "\n")
|
|
||||||
logger.info(f"Dataset appended to {file_path}")
|
|
||||||
|
|
||||||
|
|
||||||
def upload_to_huggingface(
|
|
||||||
file_path: str, dataset_name: str, huggingface_token: str
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Uploads the dataset to Hugging Face.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the JSONL file.
|
|
||||||
dataset_name (str): The name of the dataset on Hugging Face.
|
|
||||||
huggingface_token (str): Your Hugging Face token for authentication.
|
|
||||||
"""
|
|
||||||
dataset = load_dataset(
|
|
||||||
"json", data_files=file_path, split="train"
|
|
||||||
)
|
|
||||||
dataset.push_to_hub(dataset_name, token=huggingface_token)
|
|
||||||
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
|
|
||||||
|
|
||||||
|
|
||||||
def main(
|
|
||||||
num_models: int,
|
|
||||||
jsonl_file_path: str,
|
|
||||||
dataset_name: str,
|
|
||||||
huggingface_token: str,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Main function to generate models, save them to JSONL, and upload to Hugging Face.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
num_models (int): The number of models to generate.
|
|
||||||
jsonl_file_path (str): The path to save the JSONL file.
|
|
||||||
dataset_name (str): The name of the dataset on Hugging Face.
|
|
||||||
huggingface_token (str): Your Hugging Face token for authentication.
|
|
||||||
"""
|
|
||||||
dataset = []
|
|
||||||
threads = []
|
|
||||||
|
|
||||||
for i in range(num_models):
|
|
||||||
thread = threading.Thread(
|
|
||||||
target=generate_and_save_model, args=(i, dataset)
|
|
||||||
)
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
save_to_jsonl(dataset, jsonl_file_path)
|
|
||||||
upload_to_huggingface(
|
|
||||||
jsonl_file_path, dataset_name, huggingface_token
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
if __name__ == "__main__":
|
|
||||||
num_models = 100 # Number of models to generate
|
|
||||||
jsonl_file_path = "novel_models_dataset.jsonl"
|
|
||||||
dataset_name = "novel_models_architectures"
|
|
||||||
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
|
||||||
|
|
||||||
main(num_models, jsonl_file_path, dataset_name, huggingface_token)
|
|
@ -1,207 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
import threading
|
|
||||||
import json
|
|
||||||
from typing import List, Dict
|
|
||||||
from datasets import load_dataset
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class ModelSpec(BaseModel):
|
|
||||||
novel_algorithm_name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the novel AI algorithm",
|
|
||||||
)
|
|
||||||
mathamatical_formulation: str = Field(
|
|
||||||
...,
|
|
||||||
description="The mathematical theoretical formulation of the new model",
|
|
||||||
)
|
|
||||||
model_code: str = Field(
|
|
||||||
...,
|
|
||||||
description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class OptimizationSpec(BaseModel):
|
|
||||||
errors: str = Field(
|
|
||||||
...,
|
|
||||||
description="The errors in the existing model architecture code",
|
|
||||||
)
|
|
||||||
refined_model_code: str = Field(
|
|
||||||
...,
|
|
||||||
description="The refined code for the model architecture in PyTorch",
|
|
||||||
)
|
|
||||||
step_by_step_instructions: str = Field(
|
|
||||||
...,
|
|
||||||
description="The step-by-step instructions on how the model works and how it was refined",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
|
|
||||||
max_tokens=3500,
|
|
||||||
temperature=1.0,
|
|
||||||
base_model=ModelSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the function caller
|
|
||||||
refiner = OpenAIFunctionCaller(
|
|
||||||
system_prompt="""
|
|
||||||
You're a model refiner, you refine existing deep learning models to improve their performance and you optimize code and clean it up. You intake a model architecture, and you refine it to make it more efficient, faster, and more accurate. You need to provide the code for the refined model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and refine models that have never been contemplated before. Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined.
|
|
||||||
|
|
||||||
""",
|
|
||||||
max_tokens=3500,
|
|
||||||
temperature=1.0,
|
|
||||||
base_model=OptimizationSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_model_code(model_code_str: str) -> str:
|
|
||||||
"""
|
|
||||||
Cleans up the generated model code string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_code_str (str): The raw model code as a string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The cleaned-up model code.
|
|
||||||
"""
|
|
||||||
cleaned_code = model_code_str.replace("\\n", "\n").replace(
|
|
||||||
"\\'", "'"
|
|
||||||
)
|
|
||||||
return cleaned_code.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def generate_novel_model() -> Dict[str, str]:
|
|
||||||
"""
|
|
||||||
Generate a novel neural network model using the OpenAI function caller.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, str]: A dictionary containing the model's name, theory, and code.
|
|
||||||
"""
|
|
||||||
out = model.run(
|
|
||||||
"Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
|
|
||||||
)
|
|
||||||
name = out["novel_algorithm_name"]
|
|
||||||
theory = out["mathamatical_formulation"]
|
|
||||||
code = clean_model_code(out["model_code"])
|
|
||||||
|
|
||||||
refined = refiner.run(
|
|
||||||
f"Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined. Name of Algorithm: {name} Code: {code}"
|
|
||||||
)
|
|
||||||
errors = refined["errors"]
|
|
||||||
refined_code = clean_model_code(refined["refined_model_code"])
|
|
||||||
instructions = refined["step_by_step_instructions"]
|
|
||||||
|
|
||||||
return {
|
|
||||||
"name": name,
|
|
||||||
"theory": theory,
|
|
||||||
"code": code,
|
|
||||||
"errors": errors,
|
|
||||||
"refined_code": refined_code,
|
|
||||||
"instructions": instructions,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def generate_and_save_model(
|
|
||||||
i: int, dataset: List[Dict[str, str]]
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Generate, clean, save, and add the model data to a dataset.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
i (int): The iteration number (for logging purposes).
|
|
||||||
dataset (List[Dict[str, str]]): The dataset to add the model data to.
|
|
||||||
"""
|
|
||||||
model_data = generate_novel_model()
|
|
||||||
# name = model_data["name"]
|
|
||||||
# code = model_data["code"]
|
|
||||||
|
|
||||||
# logger.info(f"Generated code for novel model {name}:")
|
|
||||||
# create_file_in_folder("new_models", f"{name}.py", code)
|
|
||||||
# logger.info(f"Saved code for novel model {i} to file:")
|
|
||||||
|
|
||||||
# Add the model data to the dataset
|
|
||||||
dataset.append(model_data)
|
|
||||||
|
|
||||||
|
|
||||||
def save_to_jsonl(
|
|
||||||
dataset: List[Dict[str, str]], file_path: str
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dataset (List[Dict[str, str]]): The dataset containing models' data.
|
|
||||||
file_path (str): The path to save the JSONL file.
|
|
||||||
"""
|
|
||||||
with open(file_path, "a") as file: # Open in append mode
|
|
||||||
for entry in dataset:
|
|
||||||
file.write(json.dumps(entry) + "\n")
|
|
||||||
logger.info(f"Dataset appended to {file_path}")
|
|
||||||
|
|
||||||
|
|
||||||
def upload_to_huggingface(
|
|
||||||
file_path: str, dataset_name: str, huggingface_token: str
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Uploads the dataset to Hugging Face.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the JSONL file.
|
|
||||||
dataset_name (str): The name of the dataset on Hugging Face.
|
|
||||||
huggingface_token (str): Your Hugging Face token for authentication.
|
|
||||||
"""
|
|
||||||
dataset = load_dataset(
|
|
||||||
"json", data_files=file_path, split="train"
|
|
||||||
)
|
|
||||||
dataset.push_to_hub(dataset_name, token=huggingface_token)
|
|
||||||
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
|
|
||||||
|
|
||||||
|
|
||||||
def main(
|
|
||||||
num_models: int,
|
|
||||||
jsonl_file_path: str,
|
|
||||||
dataset_name: str,
|
|
||||||
huggingface_token: str,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Main function to generate models, save them to JSONL, and upload to Hugging Face.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
num_models (int): The number of models to generate.
|
|
||||||
jsonl_file_path (str): The path to save the JSONL file.
|
|
||||||
dataset_name (str): The name of the dataset on Hugging Face.
|
|
||||||
huggingface_token (str): Your Hugging Face token for authentication.
|
|
||||||
"""
|
|
||||||
dataset = []
|
|
||||||
threads = []
|
|
||||||
|
|
||||||
for i in range(num_models):
|
|
||||||
thread = threading.Thread(
|
|
||||||
target=generate_and_save_model, args=(i, dataset)
|
|
||||||
)
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
||||||
|
|
||||||
save_to_jsonl(dataset, jsonl_file_path)
|
|
||||||
upload_to_huggingface(
|
|
||||||
jsonl_file_path, dataset_name, huggingface_token
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
if __name__ == "__main__":
|
|
||||||
num_models = 30 # Number of models to generate
|
|
||||||
jsonl_file_path = "novel_models_dataset_new.jsonl"
|
|
||||||
dataset_name = "novel_models_architectures_instructions"
|
|
||||||
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
|
||||||
|
|
||||||
main(num_models, jsonl_file_path, dataset_name, huggingface_token)
|
|
@ -1,133 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarms import create_file_in_folder
|
|
||||||
from swarms.tools.prebuilt.code_executor import CodeExecutor
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
import threading
|
|
||||||
|
|
||||||
|
|
||||||
code_executor = CodeExecutor()
|
|
||||||
|
|
||||||
AI_EXPERT_SYSTEM_PROMPT = """
|
|
||||||
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
|
|
||||||
|
|
||||||
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
|
|
||||||
|
|
||||||
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
|
|
||||||
|
|
||||||
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class ModelSpec(BaseModel):
|
|
||||||
novel_algorithm_name: str = Field(
|
|
||||||
...,
|
|
||||||
description="The name of the novel AI algorithm. lower case, no spaces, use _",
|
|
||||||
)
|
|
||||||
mathamatical_formulation: str = Field(
|
|
||||||
...,
|
|
||||||
description="The mathamatical theortical formulation of the new model",
|
|
||||||
)
|
|
||||||
model_code: str = Field(
|
|
||||||
...,
|
|
||||||
description="The code for the all-new model architecture in PyTorch, Add Types, and write clean code",
|
|
||||||
)
|
|
||||||
example_code: str = Field(
|
|
||||||
...,
|
|
||||||
description="Example code for the all-new model architecture in PyTorch, Add Types, and write clean code",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt=AI_EXPERT_SYSTEM_PROMPT,
|
|
||||||
max_tokens=4000,
|
|
||||||
temperature=0.4,
|
|
||||||
base_model=ModelSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_model_code(model_code_str: str):
|
|
||||||
# Remove extra escape characters and newlines
|
|
||||||
cleaned_code = model_code_str.replace("\\n", "\n").replace(
|
|
||||||
"\\'", "'"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Remove unnecessary leading and trailing whitespaces
|
|
||||||
cleaned_code = cleaned_code.strip()
|
|
||||||
|
|
||||||
return cleaned_code
|
|
||||||
|
|
||||||
|
|
||||||
def parse_function_call_output(out: str):
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
return None, None, None, None
|
|
||||||
|
|
||||||
# Parse the output
|
|
||||||
name = out["novel_algorithm_name"]
|
|
||||||
theory = out["mathamatical_formulation"]
|
|
||||||
code = out["model_code"]
|
|
||||||
example_code = out["example_code"]
|
|
||||||
|
|
||||||
return name, theory, code, example_code
|
|
||||||
|
|
||||||
|
|
||||||
def generate_and_execute_model(
|
|
||||||
i,
|
|
||||||
# task: str = "Create an all-new model compression format to compress neural networks to make them easier to share and store, aim for 100x compression. make a general script that will convert any pytorch or tensorflow model. Be creative, create a fully novel algorithm. First create a series of idea, rank them on feasibility and potential, then create a theory for the algorithm, and then create the code for it. The algorithm needs to compress the massive .pt files. The input should be a .pt file of the model, and the output should be a compressed .pt file. Don't use any placeholders, you can do it! Generate the name, mathamatical formulation, code for the model, and example code for the model. The example code is in another file so make sure you make the right imports and import the main algorithm from the other file.",
|
|
||||||
task="Generate an all-new model architecture for a neural network that achieves state-of-the-art performance on the CIFAR-10 dataset. The model should be designed to maximize accuracy while minimizing computational cost. Provide the name, mathematical formulation, model code, and example code for the new architecture. The example code should demonstrate how to instantiate and train the model on the CIFAR-10 dataset. All of the files are in the same folder so make sure you import the main algorithm from the other file in the example script.",
|
|
||||||
):
|
|
||||||
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
|
|
||||||
out = model.run(task)
|
|
||||||
name, theory, code, example_code = parse_function_call_output(out)
|
|
||||||
logger.info(
|
|
||||||
f"Algorithm {name}: Mathamatical formulation {theory}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
|
|
||||||
code = clean_model_code(code)
|
|
||||||
example_code = clean_model_code(example_code)
|
|
||||||
logger.info(f"Cleansed code for novel model {i}:")
|
|
||||||
|
|
||||||
# Save the generated code to a file
|
|
||||||
create_file_in_folder(f"new_models/{name}", f"{name}.py", code)
|
|
||||||
create_file_in_folder(
|
|
||||||
f"new_models/{name}", f"{name}_example.py", example_code
|
|
||||||
)
|
|
||||||
logger.info(f"Saved code for novel model {i} to file:")
|
|
||||||
|
|
||||||
# # Execute the generated code
|
|
||||||
test = code_executor.execute(code)
|
|
||||||
|
|
||||||
# Run the training runs
|
|
||||||
test_example = code_executor.execute(example_code)
|
|
||||||
|
|
||||||
if "error" in test:
|
|
||||||
logger.error(f"Error in code execution: {test}")
|
|
||||||
|
|
||||||
if "error" in test_example:
|
|
||||||
logger.error(
|
|
||||||
f"Error in code execution example: {test_example}"
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
|
||||||
logger.info(
|
|
||||||
f"Successfully executed code for novel model {name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Create and start a new thread for each model
|
|
||||||
threads = []
|
|
||||||
for i in range(10):
|
|
||||||
thread = threading.Thread(
|
|
||||||
target=generate_and_execute_model, args=(i,)
|
|
||||||
)
|
|
||||||
thread.start()
|
|
||||||
threads.append(thread)
|
|
||||||
|
|
||||||
# Wait for all threads to finish
|
|
||||||
for thread in threads:
|
|
||||||
thread.join()
|
|
@ -1,26 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops="auto",
|
|
||||||
agent_name="Amazon Product Scraper",
|
|
||||||
system_prompt=(
|
|
||||||
"Create the code in python to scrape amazon product reviews"
|
|
||||||
" and return csv given a product url"
|
|
||||||
),
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
stopping_token="<DONE>",
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent(
|
|
||||||
"Create the code to scrape this amazon url and rturn a csv of"
|
|
||||||
" reviews:"
|
|
||||||
" https://www.amazon.com/Creative-Act-Way-Being/dp/0593652886/ref=sr_1_1?dib=eyJ2IjoiMSJ9.JVdL3JSDmBVH_jv4eM6YE4npUpG6jO6-ai6lgmax-Ya4nH3oPk8cxkmzKsx9yAMX-Eo4A1ErqipCeY-FhTqMc7hhNTqCoAvNd65rvXH1GnYv7WlfSDYTjMkB_vVrH-iitBXAY6uASm73ff2hPWzqhF3ldGkYr8fA5FtmoYMSOnarvCU11YpoSp3EqdK526XOxkRJqeFlZAoAkXOmYHe9B5sY8-zQlVgkIV3U-7rUQdY.UXen28vr2K-Tbbz9aB7vNLLurAiR2ZSblFOVNjXYaf8&dib_tag=se&hvadid=652633987879&hvdev=c&hvlocphy=9061268&hvnetw=g&hvqmt=e&hvrand=413884426001746223&hvtargid=kwd-1977743614989&hydadcr=8513_13545021&keywords=the+creative+act+rick+rubin+book&qid=1710541252&sr=8-1"
|
|
||||||
)
|
|
@ -1,18 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="API Requester",
|
|
||||||
agent_description="This agent is responsible for making API requests.",
|
|
||||||
system_prompt="You're a helpful API Requester agent. ",
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
autosave=True,
|
|
||||||
max_loops="auto",
|
|
||||||
dashboard=True,
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
out = agent.run("Create an api request to OpenAI in python.")
|
|
||||||
print(out)
|
|
@ -1,93 +0,0 @@
|
|||||||
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarms.tools.prebuilt.code_executor import CodeExecutor
|
|
||||||
from swarms.structs.concat import concat_strings
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
|
|
||||||
# It is used here to define the data structure for making API calls to retrieve weather information.
|
|
||||||
class CodeSpec(BaseModel):
|
|
||||||
summary: str = Field(
|
|
||||||
...,
|
|
||||||
description="The summary of the code",
|
|
||||||
)
|
|
||||||
algorithmic_pseudocode: str = Field(
|
|
||||||
...,
|
|
||||||
description="The pseudocode of the code",
|
|
||||||
)
|
|
||||||
code: str = Field(
|
|
||||||
...,
|
|
||||||
description="The code for the algorithm.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def clean_model_code(model_code_str: str) -> str:
|
|
||||||
"""
|
|
||||||
Cleans up the generated model code string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_code_str (str): The raw model code as a string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The cleaned-up model code.
|
|
||||||
"""
|
|
||||||
cleaned_code = model_code_str.replace("\\n", "\n").replace(
|
|
||||||
"\\'", "'"
|
|
||||||
)
|
|
||||||
return cleaned_code.strip()
|
|
||||||
|
|
||||||
|
|
||||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
|
||||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
# Initialize the function caller
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="You're the code interpreter agent, your purpose is to generate code given a task and provide a summary, pseudocode, and code for the algorithm.",
|
|
||||||
max_tokens=3400,
|
|
||||||
temperature=0.5,
|
|
||||||
base_model=CodeSpec,
|
|
||||||
parallel_tool_calls=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_model_and_generate_code(max_loops: int = 2):
|
|
||||||
question = "What is the task for the code interpreter agent?"
|
|
||||||
task = input(question)
|
|
||||||
responses = []
|
|
||||||
responses.append(question)
|
|
||||||
responses.append(task)
|
|
||||||
|
|
||||||
for i in range(max_loops):
|
|
||||||
task = concat_strings(task)
|
|
||||||
|
|
||||||
out = model.run(task)
|
|
||||||
summary = out["summary"]
|
|
||||||
print("\nSummary: ", summary)
|
|
||||||
pseudocode = out["algorithmic_pseudocode"]
|
|
||||||
code = clean_model_code(out["code"])
|
|
||||||
|
|
||||||
output = f"{summary}\n\n{pseudocode}\n\n{code}"
|
|
||||||
responses.append(output)
|
|
||||||
|
|
||||||
# Code Executor
|
|
||||||
executor = CodeExecutor()
|
|
||||||
|
|
||||||
# Execute the code
|
|
||||||
result = executor.execute(code)
|
|
||||||
|
|
||||||
if "error" in result:
|
|
||||||
print(f"Error: {result}")
|
|
||||||
break
|
|
||||||
|
|
||||||
print("\nCode Output: ", result)
|
|
||||||
|
|
||||||
task = input(
|
|
||||||
"\nEnter the next task for the code interpreter agent (or 'exit' to stop): "
|
|
||||||
)
|
|
||||||
responses.append(task)
|
|
||||||
|
|
||||||
return responses
|
|
||||||
|
|
||||||
|
|
||||||
run_model_and_generate_code()
|
|
@ -1,362 +0,0 @@
|
|||||||
import os
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from loguru import logger
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from typing import Set
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
|
|
||||||
SYS_PROMPT = """
|
|
||||||
|
|
||||||
### System Prompt for API Reference Documentation Generator
|
|
||||||
|
|
||||||
You are an expert documentation generator agent. Your task is to produce **high-quality Python API reference documentation** for functions and classes in Python codebases. The codebase does **not include any web APIs**, only Python functions, methods, classes, and constants. You will generate clear, concise, and professional documentation based on the structure and functionality of the given code.
|
|
||||||
Don't use the one hashtag for the title, only use 3 hashtags for the module path
|
|
||||||
|
|
||||||
**Instructions:**
|
|
||||||
1. **Documentation Style**: Follow a consistent format for documenting Python functions and classes.
|
|
||||||
- For functions, provide:
|
|
||||||
- **Name** of the function.
|
|
||||||
- **Description** of what the function does.
|
|
||||||
- **Parameters** with type annotations and a description for each parameter.
|
|
||||||
- **Return Type** and a description of what is returned.
|
|
||||||
- **Example Usage** in code block format.
|
|
||||||
- For classes, provide:
|
|
||||||
- **Name** of the class.
|
|
||||||
- **Description** of the class and its purpose.
|
|
||||||
- **Attributes** with a description of each attribute and its type.
|
|
||||||
- **Methods** with the same details as functions (description, parameters, return types).
|
|
||||||
- **Example Usage** in code block format.
|
|
||||||
- For constants, briefly describe their purpose and value.
|
|
||||||
|
|
||||||
2. **Many-shot examples**:
|
|
||||||
- Provide multiple examples of documenting both **functions** and **classes** based on the given code.
|
|
||||||
|
|
||||||
### Many-Shot Examples:
|
|
||||||
|
|
||||||
#### Example 1: Function Documentation
|
|
||||||
|
|
||||||
```python
|
|
||||||
def add_numbers(a: int, b: int) -> int:
|
|
||||||
return a + b
|
|
||||||
```
|
|
||||||
|
|
||||||
**Documentation:**
|
|
||||||
|
|
||||||
### `add_numbers(a: int, b: int) -> int`
|
|
||||||
**Description**:
|
|
||||||
Adds two integers and returns their sum.
|
|
||||||
|
|
||||||
**Parameters**:
|
|
||||||
- `a` (`int`): The first integer.
|
|
||||||
- `b` (`int`): The second integer.
|
|
||||||
|
|
||||||
**Return**:
|
|
||||||
- (`int`): The sum of the two input integers.
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```python
|
|
||||||
result = add_numbers(3, 5)
|
|
||||||
print(result) # Output: 8
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example 2: Function Documentation
|
|
||||||
|
|
||||||
```python
|
|
||||||
def greet_user(name: str) -> str:
|
|
||||||
return f"Hello, {name}!"
|
|
||||||
```
|
|
||||||
|
|
||||||
**Documentation:**
|
|
||||||
|
|
||||||
### `greet_user(name: str) -> str`
|
|
||||||
**Description**:
|
|
||||||
Returns a greeting message for the given user.
|
|
||||||
|
|
||||||
**Parameters**:
|
|
||||||
- `name` (`str`): The name of the user to greet.
|
|
||||||
|
|
||||||
**Return**:
|
|
||||||
- (`str`): A personalized greeting message.
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```python
|
|
||||||
message = greet_user("Alice")
|
|
||||||
print(message) # Output: "Hello, Alice!"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example 3: Class Documentation
|
|
||||||
|
|
||||||
```python
|
|
||||||
class Calculator:
|
|
||||||
def __init__(self):
|
|
||||||
self.result = 0
|
|
||||||
|
|
||||||
def add(self, value: int) -> None:
|
|
||||||
self.result += value
|
|
||||||
|
|
||||||
def reset(self) -> None:
|
|
||||||
self.result = 0
|
|
||||||
```
|
|
||||||
|
|
||||||
**Documentation:**
|
|
||||||
|
|
||||||
### `Calculator`
|
|
||||||
**Description**:
|
|
||||||
A simple calculator class that can add numbers and reset the result.
|
|
||||||
|
|
||||||
**Attributes**:
|
|
||||||
- `result` (`int`): The current result of the calculator, initialized to 0.
|
|
||||||
|
|
||||||
**Methods**:
|
|
||||||
|
|
||||||
- `add(value: int) -> None`
|
|
||||||
- **Description**: Adds the given value to the current result.
|
|
||||||
- **Parameters**:
|
|
||||||
- `value` (`int`): The value to add to the result.
|
|
||||||
- **Return**: None.
|
|
||||||
|
|
||||||
- `reset() -> None`
|
|
||||||
- **Description**: Resets the calculator result to 0.
|
|
||||||
- **Parameters**: None.
|
|
||||||
- **Return**: None.
|
|
||||||
|
|
||||||
**Example**:
|
|
||||||
```python
|
|
||||||
calc = Calculator()
|
|
||||||
calc.add(5)
|
|
||||||
print(calc.result) # Output: 5
|
|
||||||
calc.reset()
|
|
||||||
print(calc.result) # Output: 0
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example 4: Constant Documentation
|
|
||||||
|
|
||||||
```python
|
|
||||||
PI = 3.14159
|
|
||||||
```
|
|
||||||
|
|
||||||
**Documentation:**
|
|
||||||
|
|
||||||
### `PI`
|
|
||||||
**Description**:
|
|
||||||
A constant representing the value of pi (π) to 5 decimal places.
|
|
||||||
|
|
||||||
**Value**:
|
|
||||||
`3.14159`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentationAgent:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
directory: str,
|
|
||||||
output_file: str = "API_Reference.md",
|
|
||||||
agent_name: str = "Documentation-Generator",
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initializes the DocumentationAgent.
|
|
||||||
|
|
||||||
:param directory: The root directory where the Python files are located.
|
|
||||||
:param output_file: The file where all the documentation will be saved.
|
|
||||||
:param agent_name: Name of the agent generating the documentation.
|
|
||||||
"""
|
|
||||||
self.directory = directory
|
|
||||||
self.output_file = output_file
|
|
||||||
self.agent_name = agent_name
|
|
||||||
self.model = OpenAIChat(
|
|
||||||
openai_api_key=api_key,
|
|
||||||
model_name="gpt-4o-mini",
|
|
||||||
temperature=0.1,
|
|
||||||
max_tokens=3000,
|
|
||||||
)
|
|
||||||
self.agent = Agent(
|
|
||||||
agent_name=agent_name,
|
|
||||||
system_prompt=SYS_PROMPT,
|
|
||||||
llm=self.model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path=f"{agent_name}_state.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
return_step_meta=False,
|
|
||||||
output_type=str,
|
|
||||||
)
|
|
||||||
self.documented_files: Set[str] = (
|
|
||||||
set()
|
|
||||||
) # Memory system to store documented files
|
|
||||||
logger.info(
|
|
||||||
f"Initialized {self.agent_name} for generating API documentation."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure the output file is clean before starting
|
|
||||||
with open(self.output_file, "w") as f:
|
|
||||||
f.write("# API Reference Documentation\n\n")
|
|
||||||
logger.info(f"Created new output file: {self.output_file}")
|
|
||||||
|
|
||||||
def _get_python_files(self) -> List[str]:
|
|
||||||
"""
|
|
||||||
Gets all Python (.py) files in the given directory, excluding 'utils', 'tools', and 'prompts' directories.
|
|
||||||
|
|
||||||
:return: A list of full paths to Python files.
|
|
||||||
"""
|
|
||||||
excluded_folders = {
|
|
||||||
"utils",
|
|
||||||
"tools",
|
|
||||||
"prompts",
|
|
||||||
"cli",
|
|
||||||
"schemas",
|
|
||||||
"agents",
|
|
||||||
"artifacts",
|
|
||||||
}
|
|
||||||
python_files = []
|
|
||||||
|
|
||||||
for root, dirs, files in os.walk(self.directory):
|
|
||||||
# Remove excluded folders from the search
|
|
||||||
dirs[:] = [d for d in dirs if d not in excluded_folders]
|
|
||||||
|
|
||||||
for file in files:
|
|
||||||
if file.endswith(".py"):
|
|
||||||
full_path = os.path.join(root, file)
|
|
||||||
python_files.append(full_path)
|
|
||||||
logger.info(f"Found Python file: {full_path}")
|
|
||||||
return python_files
|
|
||||||
|
|
||||||
def _get_module_path(self, file_path: str) -> str:
|
|
||||||
"""
|
|
||||||
Converts a file path to a Python module path.
|
|
||||||
|
|
||||||
:param file_path: Full path to the Python file.
|
|
||||||
:return: The module path for the file.
|
|
||||||
"""
|
|
||||||
relative_path = os.path.relpath(file_path, self.directory)
|
|
||||||
module_path = relative_path.replace(os.sep, ".").replace(
|
|
||||||
".py", ""
|
|
||||||
)
|
|
||||||
logger.info(f"Formatted module path: {module_path}")
|
|
||||||
return module_path
|
|
||||||
|
|
||||||
def _read_file_content(self, file_path: str) -> str:
|
|
||||||
"""
|
|
||||||
Reads the content of a Python file.
|
|
||||||
|
|
||||||
:param file_path: Full path to the Python file.
|
|
||||||
:return: The content of the file as a string.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with open(file_path, "r") as f:
|
|
||||||
content = f.read()
|
|
||||||
logger.info(f"Read content from {file_path}")
|
|
||||||
return content
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error reading file {file_path}: {e}")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def _write_to_markdown(self, content: str) -> None:
|
|
||||||
"""
|
|
||||||
Appends generated content to the output markdown file.
|
|
||||||
|
|
||||||
:param content: Documentation content to write to the markdown file.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with open(self.output_file, "a") as f:
|
|
||||||
f.write(content)
|
|
||||||
f.write(
|
|
||||||
"\n\n"
|
|
||||||
) # Add space between different module documentations
|
|
||||||
logger.info(
|
|
||||||
f"Appended documentation to {self.output_file}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error writing to {self.output_file}: {e}")
|
|
||||||
|
|
||||||
def _generate_doc_for_file(self, file_path: str) -> None:
|
|
||||||
"""
|
|
||||||
Generates documentation for a single Python file.
|
|
||||||
|
|
||||||
:param file_path: The full path to the Python file.
|
|
||||||
"""
|
|
||||||
if file_path in self.documented_files:
|
|
||||||
logger.info(
|
|
||||||
f"Skipping already documented file: {file_path}"
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
module_path = self._get_module_path(file_path)
|
|
||||||
file_content = self._read_file_content(file_path)
|
|
||||||
|
|
||||||
if (
|
|
||||||
file_content.strip()
|
|
||||||
): # Ensure the file isn't empty or just whitespace
|
|
||||||
logger.info(
|
|
||||||
f"Generating documentation for module {module_path}..."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Updated task prompt to give clearer instructions
|
|
||||||
task_prompt = f"""
|
|
||||||
You are an expert documentation generator. Generate a comprehensive Python API reference documentation for the following module '{module_path}'.
|
|
||||||
|
|
||||||
The module contains the following code:
|
|
||||||
|
|
||||||
{file_content}
|
|
||||||
|
|
||||||
Provide full documentation including descriptions for all functions, classes, and methods. If there is nothing to document, simply write "No documentable code".
|
|
||||||
|
|
||||||
Make sure you provide full module imports in your documentation such as {self.directory}.{module_path}
|
|
||||||
|
|
||||||
from {self.directory}.{module_path} import *
|
|
||||||
|
|
||||||
### `{self.directory}.{module_path}`
|
|
||||||
"""
|
|
||||||
doc_output = self.agent.run(task_prompt)
|
|
||||||
|
|
||||||
# Add a section for the subfolder (if any)
|
|
||||||
# markdown_content = f"# {subfolder}\n\n" if subfolder else ""
|
|
||||||
markdown_content = f"\n\n{doc_output}\n"
|
|
||||||
|
|
||||||
self._write_to_markdown(markdown_content)
|
|
||||||
self.documented_files.add(file_path)
|
|
||||||
|
|
||||||
def run(self) -> None:
|
|
||||||
"""
|
|
||||||
Generates documentation for all Python files in the directory and writes it to a markdown file using multithreading.
|
|
||||||
"""
|
|
||||||
python_files = self._get_python_files()
|
|
||||||
|
|
||||||
# with ThreadPoolExecutor() as executor:
|
|
||||||
# futures = [executor.submit(self._generate_doc_for_file, file_path) for file_path in python_files]
|
|
||||||
|
|
||||||
# for future in as_completed(futures):
|
|
||||||
# try:
|
|
||||||
# future.result() # Raises an exception if the function failed
|
|
||||||
# except Exception as e:
|
|
||||||
# logger.error(f"Error processing a file: {e}")
|
|
||||||
|
|
||||||
for file in python_files:
|
|
||||||
self._generate_doc_for_file(file)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Documentation generation completed. All documentation written to {self.output_file}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
if __name__ == "__main__":
|
|
||||||
doc_agent = DocumentationAgent(directory="swarms")
|
|
||||||
doc_agent.run()
|
|
@ -1,149 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms import Agent, HuggingfaceLLM
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
# model = OpenAIChat(
|
|
||||||
# api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
# )
|
|
||||||
|
|
||||||
model = HuggingfaceLLM(
|
|
||||||
model_id="bert-base-uncased",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Prompt
|
|
||||||
ROTH_IRA_SYSTEM_PROMPT = """
|
|
||||||
### System Prompt for a 401(k) and Roth IRA Management Agent
|
|
||||||
|
|
||||||
#### Introduction
|
|
||||||
You are a highly intelligent and empathetic AI agent specialized in helping individuals set up, manage, and run their 401(k) and Roth IRA accounts. Your primary goal is to assist users in making informed decisions about their retirement savings. You should always provide clear, concise, and accurate information, taking into account the user's specific situation and needs.
|
|
||||||
|
|
||||||
#### Goals
|
|
||||||
1. Help users understand the basics of 401(k) and Roth IRA accounts.
|
|
||||||
2. Assist users in setting up new accounts.
|
|
||||||
3. Guide users in managing their existing accounts.
|
|
||||||
4. Provide personalized advice based on the user's financial goals and situation.
|
|
||||||
5. Ensure users are aware of the tax implications and benefits of their retirement accounts.
|
|
||||||
6. Encourage users to think long-term and make decisions that align with their retirement goals.
|
|
||||||
|
|
||||||
#### How to Think
|
|
||||||
1. **Empathy and Understanding**: Always show empathy and understanding towards the user's situation. Acknowledge their concerns and provide reassurance.
|
|
||||||
2. **Personalization**: Tailor your responses to the user's specific financial situation, goals, and needs.
|
|
||||||
3. **Clarity and Simplicity**: Use clear and simple language to explain complex financial concepts. Avoid jargon unless it is necessary, and always define any terms you use.
|
|
||||||
4. **Accuracy and Reliability**: Provide accurate and reliable information. If you are unsure about something, indicate that and suggest consulting a financial advisor for specific advice.
|
|
||||||
5. **Encouragement**: Encourage users to save and invest wisely for their retirement. Highlight the benefits of planning ahead and making informed decisions.
|
|
||||||
|
|
||||||
#### Multi-Shot Conversational Examples
|
|
||||||
|
|
||||||
**Example 1: Setting up a 401(k)**
|
|
||||||
|
|
||||||
**User**: I just started a new job and they offer a 401(k) plan. How do I set it up?
|
|
||||||
|
|
||||||
**Agent**: Congratulations on your new job! Setting up a 401(k) is a great step towards securing your retirement. Here's what you need to do:
|
|
||||||
|
|
||||||
1. **Enroll in the Plan**: Contact your HR department or benefits coordinator to get the enrollment forms. They will guide you through the process.
|
|
||||||
2. **Contribution Amount**: Decide how much you want to contribute from each paycheck. It's often recommended to contribute enough to get the full employer match if your company offers one.
|
|
||||||
3. **Investment Options**: Choose how you want to invest your contributions. Most plans offer a range of options, from conservative to aggressive. Consider your risk tolerance and retirement timeline.
|
|
||||||
4. **Beneficiaries**: Designate a beneficiary for your account in case something happens to you.
|
|
||||||
|
|
||||||
Would you like more detailed guidance on any of these steps?
|
|
||||||
|
|
||||||
**Example 2: Managing a Roth IRA**
|
|
||||||
|
|
||||||
**User**: I already have a Roth IRA. How should I manage it to maximize my returns?
|
|
||||||
|
|
||||||
**Agent**: Great to hear that you have a Roth IRA! Here are some tips to help you manage it effectively:
|
|
||||||
|
|
||||||
1. **Regular Contributions**: Make regular contributions to your Roth IRA. For 2023, the maximum contribution is $6,500 ($7,500 if you're 50 or older).
|
|
||||||
2. **Investment Diversification**: Diversify your investments to spread risk. Consider a mix of stocks, bonds, and other assets that align with your risk tolerance and time horizon.
|
|
||||||
3. **Rebalancing**: Periodically rebalance your portfolio to maintain your desired asset allocation. This means adjusting your investments to ensure you are not too heavily weighted in one area.
|
|
||||||
4. **Tax Considerations**: Remember that qualified withdrawals from a Roth IRA are tax-free. This can be a great advantage in retirement.
|
|
||||||
5. **Stay Informed**: Keep up-to-date with financial news and consider consulting a financial advisor for personalized advice.
|
|
||||||
|
|
||||||
Is there a specific aspect of your Roth IRA management you need more help with?
|
|
||||||
|
|
||||||
**Example 3: Understanding Tax Implications**
|
|
||||||
|
|
||||||
**User**: What are the tax implications of withdrawing from my 401(k) and Roth IRA?
|
|
||||||
|
|
||||||
**Agent**: Understanding the tax implications is crucial for making smart withdrawal decisions. Here's a brief overview:
|
|
||||||
|
|
||||||
1. **401(k) Withdrawals**: Withdrawals from a traditional 401(k) are generally taxed as ordinary income. If you withdraw before age 59½, you may also face a 10% early withdrawal penalty, unless you qualify for an exception.
|
|
||||||
2. **Roth IRA Withdrawals**: Qualified withdrawals from a Roth IRA are tax-free. To be qualified, the account must have been open for at least 5 years, and you must be 59½ or older, disabled, or using the withdrawal for a first-time home purchase (up to a $10,000 lifetime limit).
|
|
||||||
|
|
||||||
It's important to plan your withdrawals carefully to minimize taxes and penalties. Would you like more detailed information on this topic?
|
|
||||||
|
|
||||||
**Example 4: Retirement Planning**
|
|
||||||
|
|
||||||
**User**: How do I know if I'm saving enough for retirement?
|
|
||||||
|
|
||||||
**Agent**: That's a great question and a common concern. Here are some steps to help you determine if you're on track:
|
|
||||||
|
|
||||||
1. **Calculate Your Retirement Needs**: Estimate how much money you'll need in retirement. Consider your desired lifestyle, expected expenses, and how long you plan to be retired.
|
|
||||||
2. **Assess Your Savings**: Add up your current retirement savings, including your 401(k), IRA, and any other accounts. Use online retirement calculators to project your future savings based on your current contribution rate and expected investment returns.
|
|
||||||
3. **Contribution Rate**: Aim to save at least 15% of your income for retirement. If you started late, you might need to save more.
|
|
||||||
4. **Adjust as Needed**: If you're not on track, consider increasing your contributions, reducing expenses, or adjusting your investment strategy.
|
|
||||||
|
|
||||||
Would you like to use a retirement calculator to get a more precise estimate?
|
|
||||||
|
|
||||||
**Example 5: Choosing Between 401(k) and Roth IRA**
|
|
||||||
|
|
||||||
**User**: Should I contribute to my 401(k) or a Roth IRA?
|
|
||||||
|
|
||||||
**Agent**: Both 401(k) and Roth IRA accounts have their advantages. Here are some factors to consider:
|
|
||||||
|
|
||||||
1. **Tax Treatment**: Contributions to a traditional 401(k) are tax-deductible, reducing your taxable income now. Roth IRA contributions are made with after-tax dollars, but qualified withdrawals are tax-free.
|
|
||||||
2. **Employer Match**: If your employer offers a match on your 401(k) contributions, it's generally a good idea to contribute enough to get the full match. It's essentially free money.
|
|
||||||
3. **Income Limits**: Roth IRAs have income limits for contributions. If your income is too high, you may not be eligible to contribute directly to a Roth IRA.
|
|
||||||
4. **Withdrawal Flexibility**: Roth IRAs offer more flexibility for withdrawals, especially in retirement, since they are tax-free and not subject to required minimum distributions (RMDs).
|
|
||||||
|
|
||||||
Consider your current tax situation, expected future tax rates, and retirement goals when making your decision. Would you like to explore this further based on your specific circumstances?
|
|
||||||
|
|
||||||
### Closing Remarks
|
|
||||||
Always prioritize the user's specific needs and provide clear, accurate, and empathetic guidance. Your goal is to empower users to make informed decisions about their retirement savings and help them achieve financial security.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
This 3,000-word system prompt ensures the LLM agent is well-equipped to assist users with their 401(k) and Roth IRA accounts, providing detailed examples to guide the agent in reasoning and problem-solving.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="401K-Roth-IRA-Agent",
|
|
||||||
system_prompt=ROTH_IRA_SYSTEM_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
interactive=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
"Create a comprehensive guide on setting up and managing a Roth IRA account."
|
|
||||||
)
|
|
@ -1,125 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Prompt
|
|
||||||
ESTATE_PLANNING_AGENT_SYS_PROMPT = """
|
|
||||||
|
|
||||||
## Estate Planning LLM Agent System Prompt
|
|
||||||
|
|
||||||
### Objective
|
|
||||||
You are an Estate Planning LLM Agent with extensive knowledge about setting up trusts, business corporations, and various estate planning strategies. Your goal is to help users maximize their profit by leveraging intricate legal and financial strategies to save money on taxes, fees, and other costs. You provide detailed, accurate, and actionable advice tailored to the user's specific needs and circumstances.
|
|
||||||
|
|
||||||
### Instructions
|
|
||||||
1. **Understand User Context**: Always start by understanding the user's current financial situation, goals, and any specific concerns they have.
|
|
||||||
2. **Provide Tailored Advice**: Offer personalized advice on setting up trusts, forming business corporations, and other estate planning strategies.
|
|
||||||
3. **Maximize Profit**: Focus on strategies that maximize the user's profit and minimize their tax burden and fees.
|
|
||||||
4. **Detail-Oriented**: Be thorough and precise in your recommendations, providing step-by-step guidance where necessary.
|
|
||||||
5. **Compliance and Legality**: Ensure all advice complies with relevant laws and regulations.
|
|
||||||
|
|
||||||
### Example Interactions
|
|
||||||
|
|
||||||
#### Example 1: Setting Up a Trust
|
|
||||||
**User Input:**
|
|
||||||
"I have a net worth of $5 million and want to set up a trust to ensure my children receive their inheritance without paying high taxes. What type of trust should I set up, and how do I go about it?"
|
|
||||||
|
|
||||||
**Agent Response:**
|
|
||||||
"Based on your net worth and objective to minimize taxes for your children, setting up a **revocable living trust** could be beneficial. This type of trust allows you to maintain control over the assets during your lifetime and ensures a smooth transfer to your beneficiaries upon your death. Here's a step-by-step guide to setting it up:
|
|
||||||
|
|
||||||
1. **Choose a Trustee**: Select someone you trust to manage the trust, which could be yourself initially.
|
|
||||||
2. **Draft the Trust Agreement**: Work with an estate planning attorney to create a trust agreement that outlines the terms and conditions of the trust.
|
|
||||||
3. **Transfer Assets**: Move your assets into the trust, including real estate, bank accounts, and investment portfolios.
|
|
||||||
4. **Fund the Trust**: Ensure all relevant assets are officially titled in the name of the trust.
|
|
||||||
5. **Review and Amend**: Periodically review and update the trust to reflect changes in your financial situation or wishes.
|
|
||||||
|
|
||||||
Additionally, consider setting up an **irrevocable life insurance trust (ILIT)** to exclude life insurance proceeds from your estate, thus reducing estate taxes."
|
|
||||||
|
|
||||||
#### Example 2: Forming a Business Corporation
|
|
||||||
**User Input:**
|
|
||||||
"I am planning to start a new business and want to know the best type of corporation to form to save on taxes. I expect to generate $500,000 in revenue annually."
|
|
||||||
|
|
||||||
**Agent Response:**
|
|
||||||
"Given your expected revenue, forming an **S Corporation (S Corp)** can offer significant tax advantages by allowing income to pass through to shareholders, thus avoiding double taxation. Here's how to proceed:
|
|
||||||
|
|
||||||
1. **Choose a Business Name**: Ensure the name is unique and complies with state regulations.
|
|
||||||
2. **Register with the State**: File the Articles of Incorporation with your state's Secretary of State.
|
|
||||||
3. **Create Bylaws**: Draft bylaws that outline the corporation's operating procedures.
|
|
||||||
4. **Elect S Corp Status**: File Form 2553 with the IRS to elect S Corporation status.
|
|
||||||
5. **Obtain an EIN**: Apply for an Employer Identification Number from the IRS.
|
|
||||||
6. **Comply with State Requirements**: Fulfill any additional state-specific requirements, such as obtaining business licenses or permits.
|
|
||||||
7. **Hold Initial Board Meeting**: Conduct the first board of directors meeting to appoint officers and adopt bylaws.
|
|
||||||
8. **Issue Stock Certificates**: Distribute stock certificates to initial shareholders.
|
|
||||||
|
|
||||||
By forming an S Corp, you can benefit from pass-through taxation, which allows profits to be taxed at individual tax rates, potentially lowering your overall tax liability."
|
|
||||||
|
|
||||||
#### Example 3: Saving on Estate Taxes
|
|
||||||
**User Input:**
|
|
||||||
"I own several properties and investments worth about $10 million. How can I structure my estate to minimize estate taxes for my heirs?"
|
|
||||||
|
|
||||||
**Agent Response:**
|
|
||||||
"To minimize estate taxes, consider the following strategies:
|
|
||||||
|
|
||||||
1. **Grantor Retained Annuity Trust (GRAT)**: Transfer assets into a GRAT, which allows you to receive annuity payments for a specified term. Any remaining assets pass to your beneficiaries tax-free.
|
|
||||||
2. **Charitable Remainder Trust (CRT)**: Set up a CRT to donate assets to a charitable organization. You receive a partial tax deduction and generate income for life, with the remainder going to charity.
|
|
||||||
3. **Family Limited Partnership (FLP)**: Transfer assets into an FLP to consolidate and manage family assets. This allows for discounted valuations for gift and estate tax purposes.
|
|
||||||
4. **Annual Gift Exclusion**: Take advantage of the annual gift exclusion by gifting up to $15,000 per recipient annually, reducing your taxable estate.
|
|
||||||
5. **Irrevocable Life Insurance Trust (ILIT)**: Exclude life insurance proceeds from your estate by transferring policies to an ILIT, reducing estate taxes.
|
|
||||||
6. **Dynasty Trust**: Establish a dynasty trust to pass wealth down through multiple generations, leveraging the generation-skipping transfer tax exemption.
|
|
||||||
|
|
||||||
Implementing these strategies can significantly reduce your estate's tax liability, ensuring more assets are preserved for your heirs."
|
|
||||||
|
|
||||||
### Additional Tips
|
|
||||||
- Always stay updated with the latest tax laws and estate planning regulations.
|
|
||||||
- Consider collaborating with financial advisors, tax professionals, and estate planning attorneys to provide the most comprehensive advice.
|
|
||||||
- Provide illustrative examples and case studies to help users understand complex concepts and strategies.
|
|
||||||
|
|
||||||
### Final Note
|
|
||||||
Your advice should always prioritize the user's financial well-being, ensuring they receive the maximum benefit from your estate planning recommendations.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
interactive=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
"optimize for the Minimal tax holdings at death, end of life"
|
|
||||||
)
|
|
@ -1,49 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
interactive=True,
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent.run("What are the best states to register a C CORP in?")
|
|
@ -1,47 +0,0 @@
|
|||||||
import os
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import Anthropic
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
from swarms.utils.data_to_text import data_to_text
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
agent_description="Agent creates ",
|
|
||||||
llm=Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")),
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
# dynamic_temperature_enabled=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
# tools=[Add your functions here# ],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
contract = data_to_text("your_contract_pdf.pdf")
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
f"Analyze the following contract and give me a full summary: {contract}"
|
|
||||||
)
|
|
@ -1,116 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_financial_news(
|
|
||||||
query: str = "Nvidia news", num_articles: int = 5
|
|
||||||
) -> str:
|
|
||||||
"""
|
|
||||||
Fetches financial news from the Google News API and returns a formatted string of the top news.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
api_key (str): Your Google News API key.
|
|
||||||
query (str): The query term to search for news. Default is "financial".
|
|
||||||
num_articles (int): The number of top articles to fetch. Default is 5.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: A formatted string of the top financial news articles.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the API response is invalid or there are no articles found.
|
|
||||||
requests.exceptions.RequestException: If there is an error with the request.
|
|
||||||
"""
|
|
||||||
url = "https://newsapi.org/v2/everything"
|
|
||||||
params = {
|
|
||||||
"q": query,
|
|
||||||
"apiKey": "ceabc81a7d8f45febfedadb27177f3a3",
|
|
||||||
"pageSize": num_articles,
|
|
||||||
"sortBy": "relevancy",
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.get(url, params=params)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
|
|
||||||
if "articles" not in data or len(data["articles"]) == 0:
|
|
||||||
raise ValueError(
|
|
||||||
"No articles found or invalid API response."
|
|
||||||
)
|
|
||||||
|
|
||||||
articles = data["articles"]
|
|
||||||
formatted_articles = []
|
|
||||||
|
|
||||||
for i, article in enumerate(articles, start=1):
|
|
||||||
title = article.get("title", "No Title")
|
|
||||||
description = article.get("description", "No Description")
|
|
||||||
url = article.get("url", "No URL")
|
|
||||||
formatted_articles.append(
|
|
||||||
f"{i}. {title}\nDescription: {description}\nRead more: {url}\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
return "\n".join(formatted_articles)
|
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"Request Error: {e}")
|
|
||||||
raise
|
|
||||||
except ValueError as e:
|
|
||||||
print(f"Value Error: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
# # Example usage:
|
|
||||||
# api_key = "ceabc81a7d8f45febfedadb27177f3a3"
|
|
||||||
# print(fetch_financial_news(api_key))
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
# system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=2,
|
|
||||||
autosave=True,
|
|
||||||
# dynamic_temperature_enabled=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
tools=[fetch_financial_news],
|
|
||||||
# stopping_token="Stop!",
|
|
||||||
# interactive=True,
|
|
||||||
# docs_folder="docs", # Enter your folder name
|
|
||||||
# pdf_path="docs/finance_agent.pdf",
|
|
||||||
# sop="Calculate the profit for a company.",
|
|
||||||
# sop_list=["Calculate the profit for a company."],
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=3,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
# tool_schema=
|
|
||||||
# tools
|
|
||||||
# agent_ops_on=True,
|
|
||||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
response = agent("What are the latest financial news on Nvidia?")
|
|
||||||
print(response)
|
|
@ -1,32 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
import os
|
|
||||||
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key,
|
|
||||||
model_name="gpt-4o-mini",
|
|
||||||
temperature=0.1,
|
|
||||||
max_tokens=4000,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Non-Profit Incorporation Agent",
|
|
||||||
llm=model,
|
|
||||||
system_prompt="I am an AI assistant that helps you incorporate a non-profit organization. I can provide information on the best states to incorporate a non-profit in, the steps to incorporate a non-profit, and answer any other questions you may have about non-profit incorporation.",
|
|
||||||
max_loops="auto",
|
|
||||||
interactive=True,
|
|
||||||
streaming_on=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Run
|
|
||||||
response = agent(
|
|
||||||
"What's the best state to incorporate a non profit in?"
|
|
||||||
)
|
|
||||||
print(response)
|
|
@ -1,114 +0,0 @@
|
|||||||
import os
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Any, Dict, List
|
|
||||||
|
|
||||||
from plaid import Client
|
|
||||||
from plaid.api import plaid_api
|
|
||||||
from plaid.model.error import PlaidError
|
|
||||||
from plaid.model.transactions_get_request import (
|
|
||||||
TransactionsGetRequest,
|
|
||||||
)
|
|
||||||
from plaid.model.transactions_get_response import (
|
|
||||||
TransactionsGetResponse,
|
|
||||||
)
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_transactions(
|
|
||||||
start_date: str, end_date: str
|
|
||||||
) -> List[Dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Fetches a list of transactions from Plaid for a given time period.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
access_token (str): The access token associated with the Plaid item.
|
|
||||||
start_date (str): The start date for the transaction query in 'YYYY-MM-DD' format.
|
|
||||||
end_date (str): The end date for the transaction query in 'YYYY-MM-DD' format.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Dict[str, Any]]: A list of transactions as dictionaries.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
PlaidError: If there is an error with the request to the Plaid API.
|
|
||||||
ValueError: If the date format is incorrect.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
access_token = os.getenv("PLAID_ACCESS_TOKEN")
|
|
||||||
# Validate date format
|
|
||||||
datetime.strptime(start_date, "%Y-%m-%d")
|
|
||||||
datetime.strptime(end_date, "%Y-%m-%d")
|
|
||||||
|
|
||||||
# Initialize the Plaid client with your credentials
|
|
||||||
plaid_client = plaid_api.PlaidApi(
|
|
||||||
Client(
|
|
||||||
client_id=os.getenv("PLAID_CLIENT_ID"),
|
|
||||||
secret=os.getenv("PLAID_SECRET"),
|
|
||||||
environment=os.getenv("PLAID_ENV", "sandbox"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create a request object for transactions
|
|
||||||
request = TransactionsGetRequest(
|
|
||||||
access_token=access_token,
|
|
||||||
start_date=start_date,
|
|
||||||
end_date=end_date,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Fetch transactions from the Plaid API
|
|
||||||
response: TransactionsGetResponse = (
|
|
||||||
plaid_client.transactions_get(request)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Return the transactions list
|
|
||||||
return response.transactions
|
|
||||||
|
|
||||||
except PlaidError as e:
|
|
||||||
print(f"Plaid API Error: {e}")
|
|
||||||
raise
|
|
||||||
except ValueError as e:
|
|
||||||
print(f"Date Format Error: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
# dynamic_temperature_enabled=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
# interactive=True, # Set to False to disable interactive mode
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
# # docs=
|
|
||||||
# # docs_folder="docs",
|
|
||||||
retry_attempts=1,
|
|
||||||
# context_length=1000,
|
|
||||||
# tool_schema = dict
|
|
||||||
context_length=200000,
|
|
||||||
return_step_meta=False,
|
|
||||||
tools=[fetch_transactions],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
out = agent.run(
|
|
||||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,87 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarms.agents.multion_agent import MultiOnAgent
|
|
||||||
from swarms.tools.prebuilt.code_interpreter import (
|
|
||||||
SubprocessCodeInterpreter,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
# Memory
|
|
||||||
chroma_db = ChromaDB()
|
|
||||||
|
|
||||||
|
|
||||||
# MultiOntool
|
|
||||||
def multion_tool(
|
|
||||||
task: str,
|
|
||||||
api_key: str = os.environ.get("MULTION_API_KEY"),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Executes a task using the MultiOnAgent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be executed.
|
|
||||||
api_key (str, optional): The API key for the MultiOnAgent. Defaults to the value of the MULTION_API_KEY environment variable.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The result of the task execution.
|
|
||||||
"""
|
|
||||||
multion = MultiOnAgent(multion_api_key=api_key)
|
|
||||||
return multion(task)
|
|
||||||
|
|
||||||
|
|
||||||
# Execute the interpreter tool
|
|
||||||
def execute_interpreter_tool(
|
|
||||||
code: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Executes a single command using the interpreter.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The command to be executed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
out = SubprocessCodeInterpreter(debug_mode=True)
|
|
||||||
out = out.run(code)
|
|
||||||
return code
|
|
||||||
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.5,
|
|
||||||
openai_api_key=api_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Research Agent",
|
|
||||||
agent_description="An agent that performs research tasks.",
|
|
||||||
system_prompt="Perform a research task.",
|
|
||||||
llm=llm,
|
|
||||||
max_loops=1,
|
|
||||||
dashboard=True,
|
|
||||||
# tools=[multion_tool, execute_interpreter_tool],
|
|
||||||
verbose=True,
|
|
||||||
long_term_memory=chroma_db,
|
|
||||||
stopping_token="done",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
out = agent.run(
|
|
||||||
"Generate a 10,000 word blog on health and wellness, and say done"
|
|
||||||
" when you are done"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,153 +0,0 @@
|
|||||||
"""
|
|
||||||
$ pip install swarms
|
|
||||||
|
|
||||||
- Add docs into the database
|
|
||||||
- Use better llm
|
|
||||||
- use better prompts [System and SOPs]
|
|
||||||
- Use a open source model like Command R
|
|
||||||
- Better SOPS ++ System Prompts
|
|
||||||
-
|
|
||||||
"""
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
|
|
||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Let's create a text file with the provided prompt.
|
|
||||||
|
|
||||||
research_system_prompt = """
|
|
||||||
Research Agent LLM Prompt: Summarizing Sources and Content
|
|
||||||
|
|
||||||
Objective:
|
|
||||||
Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
|
|
||||||
|
|
||||||
Instructions:
|
|
||||||
|
|
||||||
1. Identify Key Information:
|
|
||||||
- Extract the most important information from each source. Focus on key facts, main ideas, significant arguments, and critical data.
|
|
||||||
|
|
||||||
2. Summarize Clearly and Concisely:
|
|
||||||
- Use clear and straightforward language. Avoid unnecessary details and keep the summary concise.
|
|
||||||
- Ensure that the summary is coherent and easy to understand.
|
|
||||||
|
|
||||||
3. Preserve Original Meaning:
|
|
||||||
- While summarizing, maintain the original meaning and intent of the content. Do not omit essential information that changes the context or understanding.
|
|
||||||
|
|
||||||
4. Include Relevant Details:
|
|
||||||
- Mention the source title, author, publication date, and any other relevant details that provide context.
|
|
||||||
|
|
||||||
5. Structure:
|
|
||||||
- Begin with a brief introduction to the source.
|
|
||||||
- Follow with a summary of the main content.
|
|
||||||
- Conclude with any significant conclusions or implications presented in the source.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def movers_agent_system_prompt():
|
|
||||||
return """
|
|
||||||
The Movers Agent is responsible for providing users with fixed-cost estimates for moving services
|
|
||||||
based on the distance between their current location and destination, and the number of rooms in their home.
|
|
||||||
Additionally, the agent allows users to attempt negotiation for better deals using the Retell API.
|
|
||||||
|
|
||||||
Responsibilities:
|
|
||||||
- Provide fixed-cost estimates based on distance and room size.
|
|
||||||
- Allow users to attempt negotiation for better deals using the Retell API.
|
|
||||||
|
|
||||||
Details:
|
|
||||||
- Fixed Costs: Predefined costs for each of the 10 moving companies, with variations based on distance and number of rooms.
|
|
||||||
- Distance Calculation: Use a fixed formula to estimate distances and costs.
|
|
||||||
- Room Size: Standard sizes based on the number of rooms will be used to determine the base cost.
|
|
||||||
- Negotiation: Users can click a "negotiate" button to initiate negotiation via Retell API.
|
|
||||||
|
|
||||||
Tools and Resources Used:
|
|
||||||
- Google Maps API: For calculating distances between the current location and destination.
|
|
||||||
- Retell API: For simulating negotiation conversations.
|
|
||||||
- Streamlit: For displaying estimates and handling user interactions.
|
|
||||||
|
|
||||||
Example Workflow:
|
|
||||||
1. User inputs their current location, destination, and number of rooms.
|
|
||||||
2. The agent calculates the distance and estimates the cost using predefined rates.
|
|
||||||
3. Displays the estimates from 10 different moving companies.
|
|
||||||
4. Users can click "negotiate" to simulate negotiation via Retell API, adjusting the price within a predefined range.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize
|
|
||||||
memory = ChromaDB(
|
|
||||||
output_dir="research_base",
|
|
||||||
n_results=2,
|
|
||||||
)
|
|
||||||
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.2,
|
|
||||||
max_tokens=3500,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Research Agent",
|
|
||||||
system_prompt=research_system_prompt,
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
interactive=True,
|
|
||||||
# long_term_memory=memory,
|
|
||||||
tools=[fetch_web_articles_bing_api],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# # Initialize the agent
|
|
||||||
# agent = Agent(
|
|
||||||
# agent_name="Movers Agent",
|
|
||||||
# system_prompt=movers_agent_system_prompt(),
|
|
||||||
# llm=llm,
|
|
||||||
# max_loops=1,
|
|
||||||
# autosave=True,
|
|
||||||
# dashboard=False,
|
|
||||||
# interactive=True,
|
|
||||||
# # long_term_memory=memory,
|
|
||||||
# # tools=[fetch_web_articles_bing_api],
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
def perplexity_agent(task: str = None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
This function takes a task as input and uses the Bing API to fetch web articles related to the task.
|
|
||||||
It then combines the task and the fetched articles as prompts and runs them through an agent.
|
|
||||||
The agent generates a response based on the prompts and returns it.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task for which web articles need to be fetched.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response generated by the agent.
|
|
||||||
"""
|
|
||||||
out = fetch_web_articles_bing_api(
|
|
||||||
task,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Sources
|
|
||||||
sources = [task, out]
|
|
||||||
sources_prompts = "".join(sources)
|
|
||||||
|
|
||||||
# Run a question
|
|
||||||
agent_response = agent.run(sources_prompts)
|
|
||||||
return agent_response
|
|
||||||
|
|
||||||
|
|
||||||
out = perplexity_agent(
|
|
||||||
"What are the indian food restaurant names in standford university avenue? What are their cost ratios"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,35 +0,0 @@
|
|||||||
# Description: This is an example of how to use the Agent class to run a multi-modal workflow
|
|
||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarms import Agent, GPT4VisionAPI
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = GPT4VisionAPI(
|
|
||||||
openai_api_key=api_key,
|
|
||||||
max_tokens=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
task = "What is the color of the object?"
|
|
||||||
img = "images/swarms.jpeg"
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=True,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
out = agent.run(task=task, img=img)
|
|
||||||
print(out)
|
|
@ -1,37 +0,0 @@
|
|||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
from swarm_models import GPT4VisionAPI
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = GPT4VisionAPI(
|
|
||||||
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
|
||||||
max_tokens=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the task
|
|
||||||
task = (
|
|
||||||
"Analyze this image of an assembly line and identify any issues such as"
|
|
||||||
" misaligned parts, defects, or deviations from the standard assembly"
|
|
||||||
" process. IF there is anything unsafe in the image, explain why it is"
|
|
||||||
" unsafe and how it could be improved."
|
|
||||||
)
|
|
||||||
img = "assembly_line.jpg"
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Multi-ModalAgent",
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=True,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
agent.run(task, img)
|
|
@ -1,15 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
|
|
||||||
from swarm_models import GPT4VisionAPI
|
|
||||||
|
|
||||||
llm = GPT4VisionAPI()
|
|
||||||
|
|
||||||
agent = Agent(
|
|
||||||
max_loops="auto",
|
|
||||||
llm=llm,
|
|
||||||
)
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
task="Describe this image in a few sentences: ",
|
|
||||||
img="https://unsplash.com/photos/0pIC5ByPpZY",
|
|
||||||
)
|
|
@ -1,82 +0,0 @@
|
|||||||
# Importing necessary modules
|
|
||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
|
|
||||||
from swarms import tool
|
|
||||||
|
|
||||||
# Loading environment variables from .env file
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Getting the Gemini API key from environment variables
|
|
||||||
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
|
||||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
llm = OpenAIChat(
|
|
||||||
openai_api_key=openai_api_key,
|
|
||||||
max_tokens=1000,
|
|
||||||
temperature=0.2,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Making an instance of the ChromaDB class
|
|
||||||
memory = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
n_results=3,
|
|
||||||
multimodal=True,
|
|
||||||
# docs_folder="images",
|
|
||||||
output_dir="results",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Defining tool by creating a function and wrapping it with the @tool decorator and
|
|
||||||
# providing the necessary parameters and docstrings to show the usage of the tool.
|
|
||||||
@tool
|
|
||||||
def make_new_file(file: str, content: str):
|
|
||||||
"""
|
|
||||||
Make a new file.
|
|
||||||
|
|
||||||
This function creates a new file with the given name.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
file (str): The name of the file to be created.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: A dictionary containing the status of the operation.
|
|
||||||
"""
|
|
||||||
with open(file, "w") as f:
|
|
||||||
f.write(f"{content}")
|
|
||||||
|
|
||||||
|
|
||||||
# Initializing the agent with the Gemini instance and other parameters
|
|
||||||
agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
agent_name="Multi-Modal RAG Agent",
|
|
||||||
agent_description=(
|
|
||||||
"This agent fuses together the capabilities of Gemini and"
|
|
||||||
" Visual Chain of Thought to answer questions based on the"
|
|
||||||
" input image."
|
|
||||||
),
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
sop=VISUAL_CHAIN_OF_THOUGHT,
|
|
||||||
verbose=True,
|
|
||||||
# tools=[make_new_file],
|
|
||||||
long_term_memory=memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Defining the task and image path
|
|
||||||
task = (
|
|
||||||
"What is the content of this image, return exactly what you see"
|
|
||||||
" in the image."
|
|
||||||
)
|
|
||||||
img = "images/Screenshot_48.png"
|
|
||||||
|
|
||||||
|
|
||||||
# Running the agent with the specified task and image
|
|
||||||
out = agent.run(task=task, img=img)
|
|
||||||
print(out)
|
|
@ -1,78 +0,0 @@
|
|||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
tool decorated func [search_api] -> agent which parses the docs of the tool func
|
|
||||||
-> injected into prompt -> agent will output json containing tool usage -> agent output will be parsed -> tool executed
|
|
||||||
-> terminal response can be returned to agent for self-healing
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Import the OpenAIChat model and the Agent struct
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
# Load the environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
# Define a tool
|
|
||||||
def search_api(query: str, description: str):
|
|
||||||
"""Search the web for the query
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): _description_
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
_type_: _description_
|
|
||||||
"""
|
|
||||||
return f"Search results for {query}"
|
|
||||||
|
|
||||||
|
|
||||||
def weather_api(
|
|
||||||
query: str,
|
|
||||||
):
|
|
||||||
"""_summary_
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): _description_
|
|
||||||
"""
|
|
||||||
print(f"Getting the weather for {query}")
|
|
||||||
|
|
||||||
|
|
||||||
def rapid_api(query: str):
|
|
||||||
"""_summary_
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): _description_
|
|
||||||
"""
|
|
||||||
print(f"Getting the weather for {query}")
|
|
||||||
|
|
||||||
|
|
||||||
# Get the API key from the environment
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.5,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Research Agent",
|
|
||||||
llm=llm,
|
|
||||||
max_loops=3,
|
|
||||||
dashboard=True,
|
|
||||||
tools=[search_api, weather_api, rapid_api],
|
|
||||||
interactive=True,
|
|
||||||
execute_tool=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the workflow on a task
|
|
||||||
out = agent.run("Use the weather tool in Miami")
|
|
||||||
print(out)
|
|
@ -1,351 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import os
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
||||||
from typing import Any, Dict, List, Union
|
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
import requests
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from loguru import logger
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# New Pharmaceutical Agent System Prompt
|
|
||||||
PHARMA_AGENT_SYS_PROMPT = """
|
|
||||||
You are a pharmaceutical data analysis agent specializing in retrieving and analyzing chemical data.
|
|
||||||
You have access to the latest chemical databases and can provide detailed analysis of any chemical compounds
|
|
||||||
relevant to pharmaceutical research. Your goal is to assist pharmaceutical companies in retrieving chemical
|
|
||||||
properties, safety data, and usage details for various compounds.
|
|
||||||
|
|
||||||
When given a chemical name, you will:
|
|
||||||
1. Retrieve the relevant chemical properties such as molecular weight, CAS number, chemical formula,
|
|
||||||
melting point, boiling point, and solubility.
|
|
||||||
2. Analyze the chemical properties and provide insights on the compound's potential applications in
|
|
||||||
pharmaceuticals, safety precautions, and any known interactions with other compounds.
|
|
||||||
3. If you encounter missing or incomplete data, make a note of it and proceed with the available information,
|
|
||||||
ensuring you provide the most relevant and accurate analysis.
|
|
||||||
|
|
||||||
You will respond in a structured format and, where applicable, recommend further reading or research papers.
|
|
||||||
Keep responses concise but informative, with a focus on helping pharmaceutical companies make informed decisions
|
|
||||||
about chemical compounds.
|
|
||||||
|
|
||||||
If there are specific safety risks or regulatory concerns, highlight them clearly.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class PharmaAgent:
|
|
||||||
"""
|
|
||||||
A pharmaceutical data agent that dynamically fetches chemical data from external sources and uses an LLM
|
|
||||||
to analyze and respond to queries related to chemicals for pharmaceutical companies.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
api_key (str): The OpenAI API key for accessing the LLM.
|
|
||||||
agent (Agent): An instance of the swarms Agent class to manage interactions with the LLM.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model_name: str = "gpt-4o-mini",
|
|
||||||
temperature: float = 0.1,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initializes the PharmaAgent with the OpenAI model and necessary configurations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_name (str): The name of the LLM model to use.
|
|
||||||
temperature (float): The temperature for the LLM to control randomness.
|
|
||||||
"""
|
|
||||||
self.api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
logger.info("Initializing OpenAI model and Agent...")
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_key=self.api_key,
|
|
||||||
model_name=model_name,
|
|
||||||
temperature=temperature,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
self.agent = Agent(
|
|
||||||
agent_name="Pharmaceutical-Data-Agent",
|
|
||||||
system_prompt=PHARMA_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="pharma_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
return_step_meta=False,
|
|
||||||
)
|
|
||||||
logger.info("Agent initialized successfully.")
|
|
||||||
|
|
||||||
def get_latest_chemical_data(
|
|
||||||
self, chemical_name: str
|
|
||||||
) -> Union[Dict[str, Any], Dict[str, str]]:
|
|
||||||
"""
|
|
||||||
Fetches the latest chemical data dynamically from PubChem's API.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_name (str): The name of the chemical to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict[str, Any]: A dictionary containing chemical data if successful, or an error message if failed.
|
|
||||||
"""
|
|
||||||
logger.info(f"Fetching data for chemical: {chemical_name}")
|
|
||||||
base_url = (
|
|
||||||
"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name"
|
|
||||||
)
|
|
||||||
response = requests.get(f"{base_url}/{chemical_name}/JSON")
|
|
||||||
|
|
||||||
if response.status_code == 200:
|
|
||||||
chemical_data = response.json()
|
|
||||||
try:
|
|
||||||
compound_info = chemical_data["PC_Compounds"][0]
|
|
||||||
chemical_properties = {
|
|
||||||
"name": compound_info.get("props", [])[0]
|
|
||||||
.get("urn", {})
|
|
||||||
.get("label", "Unknown"),
|
|
||||||
"molecular_weight": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[1]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"CAS_number": compound_info.get("props", [])[2]
|
|
||||||
.get("urn", {})
|
|
||||||
.get("label", "Unknown"),
|
|
||||||
"formula": compound_info.get("props", [])[3]
|
|
||||||
.get("value", {})
|
|
||||||
.get("sval", "Unknown"),
|
|
||||||
"properties": {
|
|
||||||
"melting_point": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[4]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"boiling_point": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[5]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"solubility": "miscible with water", # Placeholder as PubChem may not provide this
|
|
||||||
},
|
|
||||||
}
|
|
||||||
logger.info(
|
|
||||||
f"Data successfully retrieved for chemical: {chemical_name}"
|
|
||||||
)
|
|
||||||
return chemical_properties
|
|
||||||
except (IndexError, KeyError):
|
|
||||||
logger.error(
|
|
||||||
f"Incomplete data for chemical: {chemical_name}"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"error": "Chemical data not found or incomplete"
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Failed to fetch chemical data. Status code: {response.status_code}"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"error": f"Failed to fetch chemical data. Status code: {response.status_code}"
|
|
||||||
}
|
|
||||||
|
|
||||||
def query_chemical_data(self, chemical_name: str) -> str:
|
|
||||||
"""
|
|
||||||
Queries the latest chemical data and passes it to the LLM agent for further analysis and response.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_name (str): The name of the chemical to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response from the LLM agent after analyzing the chemical data.
|
|
||||||
"""
|
|
||||||
chemical_data = self.get_latest_chemical_data(chemical_name)
|
|
||||||
if "error" in chemical_data:
|
|
||||||
return f"Error: {chemical_data['error']}"
|
|
||||||
|
|
||||||
prompt = f"Fetch and analyze the latest chemical data for {chemical_name}: {chemical_data}"
|
|
||||||
logger.info(
|
|
||||||
f"Sending chemical data to agent for analysis: {chemical_name}"
|
|
||||||
)
|
|
||||||
return self.agent.run(prompt)
|
|
||||||
|
|
||||||
def run(self, chemical_name: str) -> str:
|
|
||||||
"""
|
|
||||||
Main method to fetch and analyze the latest chemical data using the LLM agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_name (str): The name of the chemical to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the chemical query processed by the agent.
|
|
||||||
"""
|
|
||||||
logger.info(f"Running chemical query for: {chemical_name}")
|
|
||||||
return self.query_chemical_data(chemical_name)
|
|
||||||
|
|
||||||
def run_concurrently(
|
|
||||||
self, chemical_names: List[str]
|
|
||||||
) -> List[str]:
|
|
||||||
"""
|
|
||||||
Runs multiple chemical queries concurrently using ThreadPoolExecutor.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_names (List[str]): List of chemical names to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[str]: List of results from the LLM agent for each chemical.
|
|
||||||
"""
|
|
||||||
logger.info("Running chemical queries concurrently...")
|
|
||||||
results = []
|
|
||||||
with ThreadPoolExecutor() as executor:
|
|
||||||
future_to_chemical = {
|
|
||||||
executor.submit(self.run, chemical): chemical
|
|
||||||
for chemical in chemical_names
|
|
||||||
}
|
|
||||||
for future in as_completed(future_to_chemical):
|
|
||||||
chemical = future_to_chemical[future]
|
|
||||||
try:
|
|
||||||
result = future.result()
|
|
||||||
logger.info(f"Completed query for: {chemical}")
|
|
||||||
results.append(result)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(
|
|
||||||
f"Chemical {chemical} generated an exception: {exc}"
|
|
||||||
)
|
|
||||||
results.append(f"Error querying {chemical}")
|
|
||||||
return results
|
|
||||||
|
|
||||||
async def fetch_chemical_data_async(
|
|
||||||
self, session: aiohttp.ClientSession, chemical_name: str
|
|
||||||
) -> Union[Dict[str, Any], Dict[str, str]]:
|
|
||||||
"""
|
|
||||||
Asynchronously fetches chemical data using aiohttp.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
session (aiohttp.ClientSession): An aiohttp client session.
|
|
||||||
chemical_name (str): The name of the chemical to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Union[Dict[str, Any], Dict[str, str]]: A dictionary containing chemical data if successful, or an error message if failed.
|
|
||||||
"""
|
|
||||||
logger.info(
|
|
||||||
f"Fetching data asynchronously for chemical: {chemical_name}"
|
|
||||||
)
|
|
||||||
base_url = (
|
|
||||||
"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name"
|
|
||||||
)
|
|
||||||
async with session.get(
|
|
||||||
f"{base_url}/{chemical_name}/JSON"
|
|
||||||
) as response:
|
|
||||||
if response.status == 200:
|
|
||||||
chemical_data = await response.json()
|
|
||||||
try:
|
|
||||||
compound_info = chemical_data["PC_Compounds"][0]
|
|
||||||
chemical_properties = {
|
|
||||||
"name": compound_info.get("props", [])[0]
|
|
||||||
.get("urn", {})
|
|
||||||
.get("label", "Unknown"),
|
|
||||||
"molecular_weight": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[1]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"CAS_number": compound_info.get("props", [])[
|
|
||||||
2
|
|
||||||
]
|
|
||||||
.get("urn", {})
|
|
||||||
.get("label", "Unknown"),
|
|
||||||
"formula": compound_info.get("props", [])[3]
|
|
||||||
.get("value", {})
|
|
||||||
.get("sval", "Unknown"),
|
|
||||||
"properties": {
|
|
||||||
"melting_point": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[4]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"boiling_point": compound_info.get(
|
|
||||||
"props", []
|
|
||||||
)[5]
|
|
||||||
.get("value", {})
|
|
||||||
.get("fval", "Unknown"),
|
|
||||||
"solubility": "miscible with water", # Placeholder as PubChem may not provide this
|
|
||||||
},
|
|
||||||
}
|
|
||||||
logger.info(
|
|
||||||
f"Data successfully retrieved for chemical: {chemical_name}"
|
|
||||||
)
|
|
||||||
return chemical_properties
|
|
||||||
except (IndexError, KeyError):
|
|
||||||
logger.error(
|
|
||||||
f"Incomplete data for chemical: {chemical_name}"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"error": "Chemical data not found or incomplete"
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Failed to fetch chemical data. Status code: {response.status}"
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"error": f"Failed to fetch chemical data. Status code: {response.status}"
|
|
||||||
}
|
|
||||||
|
|
||||||
async def run_async(self, chemical_name: str) -> str:
|
|
||||||
"""
|
|
||||||
Asynchronously runs the agent to fetch and analyze the latest chemical data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_name (str): The name of the chemical to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the chemical query processed by the agent.
|
|
||||||
"""
|
|
||||||
async with aiohttp.ClientSession() as session:
|
|
||||||
chemical_data = await self.fetch_chemical_data_async(
|
|
||||||
session, chemical_name
|
|
||||||
)
|
|
||||||
if "error" in chemical_data:
|
|
||||||
return f"Error: {chemical_data['error']}"
|
|
||||||
|
|
||||||
prompt = f"Fetch and analyze the latest chemical data for {chemical_name}: {chemical_data}"
|
|
||||||
logger.info(
|
|
||||||
f"Sending chemical data to agent for analysis: {chemical_name}"
|
|
||||||
)
|
|
||||||
return self.agent.run(prompt)
|
|
||||||
|
|
||||||
async def run_many_async(
|
|
||||||
self, chemical_names: List[str]
|
|
||||||
) -> List[str]:
|
|
||||||
"""
|
|
||||||
Runs multiple chemical queries asynchronously using aiohttp and asyncio.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chemical_names (List[str]): List of chemical names to query.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[str]: List of results from the LLM agent for each chemical.
|
|
||||||
"""
|
|
||||||
logger.info(
|
|
||||||
"Running multiple chemical queries asynchronously..."
|
|
||||||
)
|
|
||||||
tasks = []
|
|
||||||
async with aiohttp.ClientSession():
|
|
||||||
for chemical in chemical_names:
|
|
||||||
task = self.run_async(chemical)
|
|
||||||
tasks.append(task)
|
|
||||||
return await asyncio.gather(*tasks)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
if __name__ == "__main__":
|
|
||||||
pharma_agent = PharmaAgent()
|
|
||||||
|
|
||||||
# Example of running concurrently
|
|
||||||
chemical_names = ["formaldehyde", "acetone", "ethanol"]
|
|
||||||
concurrent_results = pharma_agent.run_concurrently(chemical_names)
|
|
||||||
print(concurrent_results)
|
|
@ -1,258 +0,0 @@
|
|||||||
import os
|
|
||||||
import requests
|
|
||||||
from loguru import logger
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from typing import Optional
|
|
||||||
from datetime import datetime
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
# Define the system prompt for the pharmaceutical analysis agent
|
|
||||||
PHARMACEUTICAL_AGENT_SYS_PROMPT = """
|
|
||||||
You are an expert pharmaceutical data analyst. Your task is to analyze chemical and protein data to provide detailed insights into their potential interactions and uses in drug development. Use the provided data and ensure your analysis is scientifically accurate, reliable, and considers potential side effects and clinical trials.
|
|
||||||
|
|
||||||
Always answer in a structured, detailed format. Consider the following information when analyzing:
|
|
||||||
- Chemical: {chemical_title}, Molecular Formula: {chemical_formula}
|
|
||||||
- Protein: {protein_name}, Function: {protein_function}
|
|
||||||
|
|
||||||
Your goal is to provide a comprehensive understanding of how these chemical compounds might interact with the protein and their potential use cases in medicine, considering real-world clinical scenarios.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic Model for chemical data
|
|
||||||
class ChemicalData(BaseModel):
|
|
||||||
title: Optional[str] = Field(None, title="Chemical Title")
|
|
||||||
molecular_formula: Optional[str] = Field(
|
|
||||||
None, title="Molecular Formula"
|
|
||||||
)
|
|
||||||
isomeric_smiles: Optional[str] = Field(
|
|
||||||
None, title="Isomeric SMILES"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic Model for protein data
|
|
||||||
class ProteinData(BaseModel):
|
|
||||||
entry_name: Optional[str] = Field(
|
|
||||||
None, title="Protein Entry Name"
|
|
||||||
)
|
|
||||||
function: Optional[str] = Field(None, title="Protein Function")
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic Model for the analysis output
|
|
||||||
class AnalysisOutput(BaseModel):
|
|
||||||
analysis_id: str = Field(..., title="Unique Analysis ID")
|
|
||||||
timestamp: str = Field(..., title="Timestamp of the analysis")
|
|
||||||
chemical_data: Optional[ChemicalData] = Field(
|
|
||||||
None, title="Chemical Data"
|
|
||||||
)
|
|
||||||
protein_data: Optional[ProteinData] = Field(
|
|
||||||
None, title="Protein Data"
|
|
||||||
)
|
|
||||||
analysis_result: Optional[str] = Field(
|
|
||||||
None, title="Result from the agent analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Create an instance of the OpenAIChat class
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the Swarms Agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Pharmaceutical-Analysis-Agent",
|
|
||||||
# system_prompt=PHARMACEUTICAL_AGENT_SYS_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="pharmaceutical_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
return_step_meta=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PharmaDataIntegration:
|
|
||||||
def __init__(self):
|
|
||||||
"""
|
|
||||||
Initializes the integration class for Swarms and public pharmaceutical APIs (PubChem, UniProt).
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@logger.catch
|
|
||||||
def fetch_chemical_data(self, compound_id: str) -> ChemicalData:
|
|
||||||
"""
|
|
||||||
Fetch chemical data from the PubChem API based on compound ID. No API key is required.
|
|
||||||
|
|
||||||
:param compound_id: The PubChem compound ID to fetch data for.
|
|
||||||
:return: Pydantic model containing chemical data.
|
|
||||||
"""
|
|
||||||
url = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{compound_id}/property/Title,MolecularFormula,IsomericSMILES/JSON"
|
|
||||||
logger.debug(
|
|
||||||
f"Fetching chemical data for compound ID: {compound_id}"
|
|
||||||
)
|
|
||||||
response = requests.get(url)
|
|
||||||
if response.status_code == 200:
|
|
||||||
logger.info(
|
|
||||||
f"Successfully fetched chemical data for compound ID: {compound_id}"
|
|
||||||
)
|
|
||||||
data = (
|
|
||||||
response.json()
|
|
||||||
.get("PropertyTable", {})
|
|
||||||
.get("Properties", [{}])[0]
|
|
||||||
)
|
|
||||||
return ChemicalData(
|
|
||||||
title=data.get("Title", "Unknown Chemical"),
|
|
||||||
molecular_formula=data.get(
|
|
||||||
"MolecularFormula", "Unknown Formula"
|
|
||||||
),
|
|
||||||
isomeric_smiles=data.get(
|
|
||||||
"IsomericSMILES", "Unknown SMILES"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Failed to fetch chemical data for compound ID: {compound_id}, Status Code: {response.status_code}"
|
|
||||||
)
|
|
||||||
return ChemicalData()
|
|
||||||
|
|
||||||
@logger.catch
|
|
||||||
def fetch_protein_data(self, protein_id: str) -> ProteinData:
|
|
||||||
"""
|
|
||||||
Fetch protein data from the UniProt API based on protein ID. No API key is required.
|
|
||||||
|
|
||||||
:param protein_id: The UniProt protein ID to fetch data for.
|
|
||||||
:return: Pydantic model containing protein data.
|
|
||||||
"""
|
|
||||||
url = f"https://www.uniprot.org/uniprot/{protein_id}.json"
|
|
||||||
logger.debug(
|
|
||||||
f"Fetching protein data for protein ID: {protein_id}"
|
|
||||||
)
|
|
||||||
response = requests.get(url)
|
|
||||||
if response.status_code == 200:
|
|
||||||
logger.info(
|
|
||||||
f"Successfully fetched protein data for protein ID: {protein_id}"
|
|
||||||
)
|
|
||||||
data = response.json()
|
|
||||||
return ProteinData(
|
|
||||||
entry_name=data.get("entryName", "Unknown Protein"),
|
|
||||||
function=data.get("function", "Unknown Function"),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"Failed to fetch protein data for protein ID: {protein_id}, Status Code: {response.status_code}"
|
|
||||||
)
|
|
||||||
return ProteinData()
|
|
||||||
|
|
||||||
@logger.catch
|
|
||||||
def analyze_data_with_swarms_agent(
|
|
||||||
self,
|
|
||||||
chemical_data: Optional[ChemicalData],
|
|
||||||
protein_data: Optional[ProteinData],
|
|
||||||
) -> str:
|
|
||||||
"""
|
|
||||||
Use the Swarms Agent to analyze the fetched chemical and protein data.
|
|
||||||
|
|
||||||
:param chemical_data: Data fetched from PubChem about the chemical.
|
|
||||||
:param protein_data: Data fetched from UniProt about the protein.
|
|
||||||
:return: Analysis result from the Swarms Agent.
|
|
||||||
"""
|
|
||||||
# Fill in the system prompt with the actual data
|
|
||||||
agent_input = PHARMACEUTICAL_AGENT_SYS_PROMPT.format(
|
|
||||||
chemical_title=(
|
|
||||||
chemical_data.title if chemical_data else "Unknown"
|
|
||||||
),
|
|
||||||
chemical_formula=(
|
|
||||||
chemical_data.molecular_formula
|
|
||||||
if chemical_data
|
|
||||||
else "Unknown"
|
|
||||||
),
|
|
||||||
protein_name=(
|
|
||||||
protein_data.entry_name if protein_data else "Unknown"
|
|
||||||
),
|
|
||||||
protein_function=(
|
|
||||||
protein_data.function if protein_data else "Unknown"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"Running Swarms Agent with the provided chemical and protein data."
|
|
||||||
)
|
|
||||||
out = agent.run(agent_input)
|
|
||||||
logger.info(f"Swarms Agent analysis result: {out}")
|
|
||||||
return out
|
|
||||||
|
|
||||||
@logger.catch
|
|
||||||
def run(
|
|
||||||
self,
|
|
||||||
task: str,
|
|
||||||
protein_id: Optional[str] = None,
|
|
||||||
compound_id: Optional[str] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
) -> AnalysisOutput:
|
|
||||||
"""
|
|
||||||
The main method that dynamically handles task, protein, and chemical analysis.
|
|
||||||
|
|
||||||
:param task: Natural language task that guides the analysis (e.g., "Analyze the effects of this protein").
|
|
||||||
:param protein_id: (Optional) Protein ID from UniProt.
|
|
||||||
:param compound_id: (Optional) Compound ID from PubChem.
|
|
||||||
:return: JSON output with chemical, protein, and analysis data.
|
|
||||||
"""
|
|
||||||
chemical_data = None
|
|
||||||
protein_data = None
|
|
||||||
|
|
||||||
# Dynamic task handling
|
|
||||||
if "protein" in task.lower() and protein_id:
|
|
||||||
logger.debug(f"Task is protein-related: {task}")
|
|
||||||
protein_data = self.fetch_protein_data(protein_id)
|
|
||||||
logger.info(protein_data)
|
|
||||||
|
|
||||||
if "chemical" in task.lower() and compound_id:
|
|
||||||
logger.debug(f"Task is chemical-related: {task}")
|
|
||||||
chemical_data = self.fetch_chemical_data(compound_id)
|
|
||||||
|
|
||||||
# Analyze data using the Swarms Agent
|
|
||||||
analysis_result = self.analyze_data_with_swarms_agent(
|
|
||||||
chemical_data, protein_data
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the output model
|
|
||||||
output = AnalysisOutput(
|
|
||||||
analysis_id=f"{compound_id or 'unknown'}-{protein_id or 'unknown'}",
|
|
||||||
timestamp=datetime.utcnow().isoformat(),
|
|
||||||
chemical_data=chemical_data,
|
|
||||||
protein_data=protein_data,
|
|
||||||
analysis_result=analysis_result,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Log the JSON output
|
|
||||||
# logger.info(f"Final analysis result as JSON: {output.json(indent=2)}")
|
|
||||||
|
|
||||||
# Return the structured JSON output
|
|
||||||
return output.model_dump_json(indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
|
||||||
if __name__ == "__main__":
|
|
||||||
pharma_integration = PharmaDataIntegration()
|
|
||||||
|
|
||||||
# Example: Analyze the effects of a specific protein and chemical compound
|
|
||||||
result = pharma_integration.run(
|
|
||||||
task="Analyze this compound and provide an analysis",
|
|
||||||
# protein_id="P12345",
|
|
||||||
compound_id="19833",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Print the result in JSON format
|
|
||||||
print(result)
|
|
@ -1,40 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models.llama3_hosted import llama3Hosted
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
|
|
||||||
|
|
||||||
# Define the research system prompt
|
|
||||||
research_system_prompt = """
|
|
||||||
Research Agent LLM Prompt: Summarizing Sources and Content
|
|
||||||
Objective: Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
|
|
||||||
Instructions:
|
|
||||||
1. Identify Key Information: ...
|
|
||||||
2. Summarize Clearly and Concisely: ...
|
|
||||||
3. Preserve Original Meaning: ...
|
|
||||||
4. Include Relevant Details: ...
|
|
||||||
5. Structure: ...
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Initialize memory
|
|
||||||
memory = ChromaDB(output_dir="research_base", n_results=2)
|
|
||||||
|
|
||||||
# Initialize the LLM
|
|
||||||
llm = llama3Hosted(temperature=0.2, max_tokens=3500)
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Research Agent",
|
|
||||||
system_prompt=research_system_prompt,
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
interactive=True,
|
|
||||||
long_term_memory=memory,
|
|
||||||
tools=[fetch_web_articles_bing_api],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Define the task for the agent
|
|
||||||
task = "What is the impact of climate change on biodiversity?"
|
|
||||||
out = agent.run(task)
|
|
||||||
print(out)
|
|
@ -1,109 +0,0 @@
|
|||||||
"""
|
|
||||||
$ pip install swarms
|
|
||||||
|
|
||||||
- Add docs into the database
|
|
||||||
- Use better llm
|
|
||||||
- use better prompts [System and SOPs]
|
|
||||||
- Use a open source model like Command R
|
|
||||||
- Better SOPS ++ System Prompts
|
|
||||||
-
|
|
||||||
"""
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
|
|
||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Let's create a text file with the provided prompt.
|
|
||||||
|
|
||||||
research_system_prompt = """
|
|
||||||
Research Agent LLM Prompt: Summarizing Sources and Content
|
|
||||||
|
|
||||||
Objective:
|
|
||||||
Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
|
|
||||||
|
|
||||||
Instructions:
|
|
||||||
|
|
||||||
1. Identify Key Information:
|
|
||||||
- Extract the most important information from each source. Focus on key facts, main ideas, significant arguments, and critical data.
|
|
||||||
|
|
||||||
2. Summarize Clearly and Concisely:
|
|
||||||
- Use clear and straightforward language. Avoid unnecessary details and keep the summary concise.
|
|
||||||
- Ensure that the summary is coherent and easy to understand.
|
|
||||||
|
|
||||||
3. Preserve Original Meaning:
|
|
||||||
- While summarizing, maintain the original meaning and intent of the content. Do not omit essential information that changes the context or understanding.
|
|
||||||
|
|
||||||
4. Include Relevant Details:
|
|
||||||
- Mention the source title, author, publication date, and any other relevant details that provide context.
|
|
||||||
|
|
||||||
5. Structure:
|
|
||||||
- Begin with a brief introduction to the source.
|
|
||||||
- Follow with a summary of the main content.
|
|
||||||
- Conclude with any significant conclusions or implications presented in the source.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize
|
|
||||||
memory = ChromaDB(
|
|
||||||
output_dir="research_base",
|
|
||||||
n_results=2,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
llm = OpenAIChat(
|
|
||||||
temperature=0.2,
|
|
||||||
max_tokens=3500,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Research Agent",
|
|
||||||
system_prompt=research_system_prompt,
|
|
||||||
llm=llm,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
interactive=True,
|
|
||||||
long_term_memory=memory,
|
|
||||||
# tools=[fetch_web_articles_bing_api],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def perplexity_agent(task: str = None, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
This function takes a task as input and uses the Bing API to fetch web articles related to the task.
|
|
||||||
It then combines the task and the fetched articles as prompts and runs them through an agent.
|
|
||||||
The agent generates a response based on the prompts and returns it.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task for which web articles need to be fetched.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response generated by the agent.
|
|
||||||
"""
|
|
||||||
out = fetch_web_articles_bing_api(
|
|
||||||
task,
|
|
||||||
subscription_key=os.getenv("BING_API_KEY"),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Sources
|
|
||||||
sources = [task, out]
|
|
||||||
sources_prompts = "".join(sources)
|
|
||||||
|
|
||||||
# Run a question
|
|
||||||
agent_response = agent.run(sources_prompts)
|
|
||||||
return agent_response
|
|
||||||
|
|
||||||
|
|
||||||
out = perplexity_agent(
|
|
||||||
"What are the indian food restaurant names in standford university avenue? What are their cost ratios"
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,72 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
import swarms.prompts.security_team as stsp
|
|
||||||
from swarm_models import GPT4VisionAPI
|
|
||||||
from swarms.structs import Agent
|
|
||||||
|
|
||||||
# Load environment variables and initialize the Vision API
|
|
||||||
load_dotenv()
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
llm = GPT4VisionAPI(openai_api_key=api_key)
|
|
||||||
|
|
||||||
# Image for analysis
|
|
||||||
img = "bank_robbery.jpg"
|
|
||||||
|
|
||||||
# Initialize agents with respective prompts for security tasks
|
|
||||||
crowd_analysis_agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
weapon_detection_agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
sop=stsp.WEAPON_DETECTION_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
surveillance_monitoring_agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
emergency_response_coordinator = Agent(
|
|
||||||
llm=llm,
|
|
||||||
sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
multi_modal=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run agents with respective tasks on the same image
|
|
||||||
crowd_analysis = crowd_analysis_agent.run(
|
|
||||||
"Analyze the crowd dynamics in the scene", img
|
|
||||||
)
|
|
||||||
|
|
||||||
weapon_detection_analysis = weapon_detection_agent.run(
|
|
||||||
"Inspect the scene for any potential threats", img
|
|
||||||
)
|
|
||||||
|
|
||||||
surveillance_monitoring_analysis = surveillance_monitoring_agent.run(
|
|
||||||
"Monitor the overall scene for unusual activities", img
|
|
||||||
)
|
|
||||||
|
|
||||||
emergency_response_analysis = emergency_response_coordinator.run(
|
|
||||||
"Develop a response plan based on the scene analysis", img
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process and output results for each task
|
|
||||||
# Example output (uncomment to use):
|
|
||||||
print(f"Crowd Analysis: {crowd_analysis}")
|
|
||||||
print(f"Weapon Detection Analysis: {weapon_detection_analysis}")
|
|
||||||
print(
|
|
||||||
"Surveillance Monitoring Analysis:"
|
|
||||||
f" {surveillance_monitoring_analysis}"
|
|
||||||
)
|
|
||||||
print(f"Emergency Response Analysis: {emergency_response_analysis}")
|
|
@ -1,17 +0,0 @@
|
|||||||
from swarms import Artifact
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
artifact = Artifact(file_path="example.txt", file_type=".txt")
|
|
||||||
artifact.create("Initial content")
|
|
||||||
artifact.edit("First edit")
|
|
||||||
artifact.edit("Second edit")
|
|
||||||
artifact.save()
|
|
||||||
|
|
||||||
# Export to JSON
|
|
||||||
artifact.export_to_json("artifact.json")
|
|
||||||
|
|
||||||
# Import from JSON
|
|
||||||
imported_artifact = Artifact.import_from_json("artifact.json")
|
|
||||||
|
|
||||||
# # Get metrics
|
|
||||||
print(artifact.get_metrics())
|
|
File diff suppressed because it is too large
Load Diff
@ -1,221 +0,0 @@
|
|||||||
# Agent that picks up your intent
|
|
||||||
# Depending on your intent it routes you to an agent that can help you with your request.
|
|
||||||
# Account management agent and product support agent
|
|
||||||
# Account Management Agent --> Talk about the user, their account. Just understand the user's intent and route them to the right agent.
|
|
||||||
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
from swarms import BaseLLM, base_model_to_openai_function
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
## Pydantic model for the tool schema
|
|
||||||
class HASSchema(BaseModel):
|
|
||||||
name: str = Field(
|
|
||||||
...,
|
|
||||||
title="Name",
|
|
||||||
description="The name of the agent to send the task to.",
|
|
||||||
)
|
|
||||||
task: str = Field(
|
|
||||||
...,
|
|
||||||
title="Task",
|
|
||||||
description="The task to send to the agent.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
swarm_schema = base_model_to_openai_function(
|
|
||||||
HASSchema, output_str=True
|
|
||||||
)
|
|
||||||
|
|
||||||
ACCOUNT_MANAGEMENT_SYSTEM_PROMPT = """
|
|
||||||
|
|
||||||
You are an Account Management Agent. Your primary role is to engage with users regarding their accounts. Your main tasks include understanding the user's intent, addressing their immediate needs, and routing them to the appropriate agent for further assistance. Be simple and direct in your communication.
|
|
||||||
|
|
||||||
When a user contacts you, start by greeting them and asking how you can assist with their account. Listen carefully to their concerns, questions, or issues. If the user provides information that is specific to their account, acknowledge it and ask any necessary follow-up questions to clarify their needs. Ensure that you fully understand their intent before proceeding.
|
|
||||||
|
|
||||||
Once you have a clear understanding of the user's request or issue, determine the best course of action. If you can resolve the issue yourself, do so efficiently. If the issue requires specialized assistance, explain to the user that you will route them to the appropriate agent who can help further. Ensure the user feels heard and understood throughout the process.
|
|
||||||
|
|
||||||
Your ultimate goal is to provide a seamless and positive experience for the user by effectively managing their inquiries and directing them to the right resource for resolution. Always maintain a polite and professional tone, and ensure that the user feels supported and valued.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
PRODUCT_SUPPORT_QA_SYSTEM_PROMPT = """
|
|
||||||
|
|
||||||
|
|
||||||
You are a Product Support Agent.
|
|
||||||
Your primary role is to provide assistance to users who have questions or issues related to the product. Your main tasks include understanding the user's needs, providing accurate information, and resolving any problems they may encounter. Be clear and concise in your communication.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class llama3Hosted(BaseLLM):
|
|
||||||
"""
|
|
||||||
A class representing a hosted version of the Llama3 model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model (str): The name or path of the Llama3 model to use.
|
|
||||||
temperature (float): The temperature parameter for generating responses.
|
|
||||||
max_tokens (int): The maximum number of tokens in the generated response.
|
|
||||||
system_prompt (str): The system prompt to use for generating responses.
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
model (str): The name or path of the Llama3 model.
|
|
||||||
temperature (float): The temperature parameter for generating responses.
|
|
||||||
max_tokens (int): The maximum number of tokens in the generated response.
|
|
||||||
system_prompt (str): The system prompt for generating responses.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
run(task, *args, **kwargs): Generates a response for the given task.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model: str = "meta-llama/Meta-Llama-3-8B-Instruct",
|
|
||||||
temperature: float = 0.8,
|
|
||||||
max_tokens: int = 4000,
|
|
||||||
system_prompt: str = "You are a helpful assistant.",
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.model = model
|
|
||||||
self.temperature = temperature
|
|
||||||
self.max_tokens = max_tokens
|
|
||||||
self.system_prompt = system_prompt
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs) -> str:
|
|
||||||
"""
|
|
||||||
Generates a response for the given task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The user's task or input.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The generated response from the Llama3 model.
|
|
||||||
|
|
||||||
"""
|
|
||||||
url = "http://34.204.8.31:30001/v1/chat/completions"
|
|
||||||
|
|
||||||
payload = json.dumps(
|
|
||||||
{
|
|
||||||
"model": self.model,
|
|
||||||
"messages": [
|
|
||||||
{"role": "system", "content": self.system_prompt},
|
|
||||||
{"role": "user", "content": task},
|
|
||||||
],
|
|
||||||
"stop_token_ids": [128009, 128001],
|
|
||||||
"temperature": self.temperature,
|
|
||||||
"max_tokens": self.max_tokens,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
headers = {"Content-Type": "application/json"}
|
|
||||||
|
|
||||||
response = requests.request(
|
|
||||||
"POST", url, headers=headers, data=payload
|
|
||||||
)
|
|
||||||
|
|
||||||
response_json = response.json()
|
|
||||||
assistant_message = response_json["choices"][0]["message"][
|
|
||||||
"content"
|
|
||||||
]
|
|
||||||
|
|
||||||
return assistant_message
|
|
||||||
|
|
||||||
|
|
||||||
def select_agent_and_send_task(name: str = None, task: str = None):
|
|
||||||
"""
|
|
||||||
Select an agent and send a task to them.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): The name of the agent to send the task to.
|
|
||||||
task (str): The task to send to the agent.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response from the agent.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if name == "Product Support Agent":
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Product Support Agent",
|
|
||||||
system_prompt=PRODUCT_SUPPORT_QA_SYSTEM_PROMPT,
|
|
||||||
llm=llama3Hosted(),
|
|
||||||
max_loops=2,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
output_type=str,
|
|
||||||
metadata_output_type="json",
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
|
|
||||||
|
|
||||||
response = agent.run(task)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
def parse_json_then_activate_agent(json_data: str):
|
|
||||||
"""
|
|
||||||
Parse the JSON data and activate the appropriate agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
json_data (str): The JSON data containing the agent name and task.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response from the agent.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
data = json.loads(json_data)
|
|
||||||
name = data.get("name")
|
|
||||||
task = data.get("task")
|
|
||||||
|
|
||||||
response = select_agent_and_send_task(name, task)
|
|
||||||
|
|
||||||
return response
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return "Invalid JSON data."
|
|
||||||
|
|
||||||
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Account Management Agent",
|
|
||||||
system_prompt=ACCOUNT_MANAGEMENT_SYSTEM_PROMPT,
|
|
||||||
# sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
|
|
||||||
# sop=list_base_models_json,
|
|
||||||
llm=llama3Hosted(
|
|
||||||
max_tokens=3000,
|
|
||||||
),
|
|
||||||
max_loops="auto",
|
|
||||||
interactive=True,
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
# interactive=True,
|
|
||||||
# tools=[search_weather], # or list of tools
|
|
||||||
verbose=True,
|
|
||||||
# Set the output type to the tool schema which is a BaseModel
|
|
||||||
list_base_models=[HASSchema],
|
|
||||||
output_type=str, # or dict, or str
|
|
||||||
metadata_output_type="json",
|
|
||||||
# List of schemas that the agent can handle
|
|
||||||
function_calling_format_type="OpenAI",
|
|
||||||
function_calling_type="json", # or soon yaml
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent to generate the person's information
|
|
||||||
generated_data = agent.run("I need help with my modem.")
|
|
||||||
parse_json_then_activate_agent(generated_data)
|
|
||||||
|
|
||||||
|
|
||||||
# Print the generated data
|
|
||||||
print(f"Generated data: {generated_data}")
|
|
@ -1,87 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarm_models import Anthropic, OpenAIChat
|
|
||||||
from swarms.prompts.accountant_swarm_prompts import (
|
|
||||||
DECISION_MAKING_PROMPT,
|
|
||||||
DOC_ANALYZER_AGENT_PROMPT,
|
|
||||||
SUMMARY_GENERATOR_AGENT_PROMPT,
|
|
||||||
)
|
|
||||||
from swarms.structs import Agent
|
|
||||||
from swarms.utils.pdf_to_text import pdf_to_text
|
|
||||||
|
|
||||||
# Environment variables
|
|
||||||
load_dotenv()
|
|
||||||
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
||||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
|
|
||||||
# Base llms
|
|
||||||
llm1 = OpenAIChat(
|
|
||||||
openai_api_key=openai_api_key,
|
|
||||||
max_tokens=5000,
|
|
||||||
)
|
|
||||||
|
|
||||||
llm2 = Anthropic(
|
|
||||||
anthropic_api_key=anthropic_api_key,
|
|
||||||
max_tokens=5000,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Agents
|
|
||||||
doc_analyzer_agent = Agent(
|
|
||||||
llm=llm2,
|
|
||||||
sop=DOC_ANALYZER_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
saved_state_path="doc_analyzer_agent.json",
|
|
||||||
)
|
|
||||||
summary_generator_agent = Agent(
|
|
||||||
llm=llm2,
|
|
||||||
sop=SUMMARY_GENERATOR_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
saved_state_path="summary_generator_agent.json",
|
|
||||||
)
|
|
||||||
decision_making_support_agent = Agent(
|
|
||||||
llm=llm2,
|
|
||||||
sop=DECISION_MAKING_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
saved_state_path="decision_making_support_agent.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
pdf_path = "bankstatement.pdf"
|
|
||||||
fraud_detection_instructions = "Detect fraud in the document"
|
|
||||||
summary_agent_instructions = (
|
|
||||||
"Generate an actionable summary of the document with action steps"
|
|
||||||
" to take"
|
|
||||||
)
|
|
||||||
decision_making_support_agent_instructions = (
|
|
||||||
"Provide decision making support to the business owner:"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Transform the pdf to text
|
|
||||||
pdf_text = pdf_to_text(pdf_path)
|
|
||||||
print(pdf_text)
|
|
||||||
|
|
||||||
|
|
||||||
# Detect fraud in the document
|
|
||||||
fraud_detection_agent_output = doc_analyzer_agent.run(
|
|
||||||
f"{fraud_detection_instructions}: {pdf_text}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate an actionable summary of the document
|
|
||||||
summary_agent_output = summary_generator_agent.run(
|
|
||||||
f"{summary_agent_instructions}: {fraud_detection_agent_output}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Provide decision making support to the accountant
|
|
||||||
decision_making_support_agent_output = (
|
|
||||||
decision_making_support_agent.run(
|
|
||||||
f"{decision_making_support_agent_instructions}:"
|
|
||||||
f" {summary_agent_output}"
|
|
||||||
)
|
|
||||||
)
|
|
@ -1,104 +0,0 @@
|
|||||||
import os
|
|
||||||
import random
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarm_models import StableDiffusion
|
|
||||||
from swarms.structs import Agent
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
stability_api_key = os.getenv("STABILITY_API_KEY")
|
|
||||||
|
|
||||||
# Initialize the language model and image generation model
|
|
||||||
llm = OpenAIChat(
|
|
||||||
openai_api_key=openai_api_key, temperature=0.5, max_tokens=3000
|
|
||||||
)
|
|
||||||
sd_api = StableDiffusion(api_key=stability_api_key)
|
|
||||||
|
|
||||||
|
|
||||||
# Creative Concept Generator for Product Ads
|
|
||||||
class ProductAdConceptGenerator:
|
|
||||||
def __init__(self, product_name):
|
|
||||||
self.product_name = product_name
|
|
||||||
self.themes = [
|
|
||||||
"futuristic",
|
|
||||||
"rustic",
|
|
||||||
"luxurious",
|
|
||||||
"minimalistic",
|
|
||||||
"vibrant",
|
|
||||||
"elegant",
|
|
||||||
"retro",
|
|
||||||
"urban",
|
|
||||||
"ethereal",
|
|
||||||
"surreal",
|
|
||||||
"artistic",
|
|
||||||
"tech-savvy",
|
|
||||||
"vintage",
|
|
||||||
"natural",
|
|
||||||
"sophisticated",
|
|
||||||
"playful",
|
|
||||||
"dynamic",
|
|
||||||
"serene",
|
|
||||||
"lasers,lightning",
|
|
||||||
]
|
|
||||||
self.contexts = [
|
|
||||||
"in an everyday setting",
|
|
||||||
"in a rave setting",
|
|
||||||
"in an abstract environment",
|
|
||||||
"in an adventurous context",
|
|
||||||
"surrounded by nature",
|
|
||||||
"in a high-tech setting",
|
|
||||||
"in a historical context",
|
|
||||||
"in a busy urban scene",
|
|
||||||
"in a tranquil and peaceful setting",
|
|
||||||
"against a backdrop of city lights",
|
|
||||||
"in a surreal dreamscape",
|
|
||||||
"in a festive atmosphere",
|
|
||||||
"in a luxurious setting",
|
|
||||||
"in a playful and colorful background",
|
|
||||||
"in an ice cave setting",
|
|
||||||
"in a serene and calm landscape",
|
|
||||||
]
|
|
||||||
self.contexts = [
|
|
||||||
"high realism product ad (extremely creative)"
|
|
||||||
]
|
|
||||||
|
|
||||||
def generate_concept(self):
|
|
||||||
theme = random.choice(self.themes)
|
|
||||||
context = random.choice(self.contexts)
|
|
||||||
return (
|
|
||||||
f"{theme} inside a {style} {self.product_name}, {context}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# User input
|
|
||||||
product_name = input(
|
|
||||||
"Enter a product name for ad creation (e.g., 'PS5', 'AirPods',"
|
|
||||||
" 'Kirkland Vodka'): "
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate creative concept
|
|
||||||
concept_generator = ProductAdConceptGenerator(product_name)
|
|
||||||
creative_concept = concept_generator.generate_concept()
|
|
||||||
|
|
||||||
# Generate product image based on the creative concept
|
|
||||||
image_paths = sd_api.run(creative_concept)
|
|
||||||
|
|
||||||
# Generate ad copy
|
|
||||||
ad_copy_agent = Agent(llm=llm, max_loops=1)
|
|
||||||
ad_copy_prompt = (
|
|
||||||
f"Write a compelling {social_media_platform} ad copy for a"
|
|
||||||
f" product photo showing {product_name} {creative_concept}."
|
|
||||||
)
|
|
||||||
ad_copy = ad_copy_agent.run(task=ad_copy_prompt)
|
|
||||||
|
|
||||||
# Output the results
|
|
||||||
print("Creative Concept:", concept_result)
|
|
||||||
print("Design Ideas:", design_result)
|
|
||||||
print("Ad Copy:", copywriting_result)
|
|
||||||
print(
|
|
||||||
"Image Path:",
|
|
||||||
image_paths[0] if image_paths else "No image generated",
|
|
||||||
)
|
|
@ -1,185 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
import uuid
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
import chromadb
|
|
||||||
import numpy as np
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarms.utils.data_to_text import data_to_text
|
|
||||||
from swarms.utils.markdown_message import display_markdown_message
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
# Results storage using local ChromaDB
|
|
||||||
class ChromaDB:
|
|
||||||
"""
|
|
||||||
|
|
||||||
ChromaDB database
|
|
||||||
|
|
||||||
Args:
|
|
||||||
metric (str): The similarity metric to use.
|
|
||||||
output (str): The name of the collection to store the results in.
|
|
||||||
limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
|
|
||||||
n_results (int, optional): The number of results to retrieve. Defaults to 2.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
add: _description_
|
|
||||||
query: _description_
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> chromadb = ChromaDB(
|
|
||||||
>>> metric="cosine",
|
|
||||||
>>> output="results",
|
|
||||||
>>> llm="gpt3",
|
|
||||||
>>> openai_api_key=OPENAI_API_KEY,
|
|
||||||
>>> )
|
|
||||||
>>> chromadb.add(task, result, result_id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
metric: str = "cosine",
|
|
||||||
output_dir: str = "swarms",
|
|
||||||
limit_tokens: Optional[int] = 1000,
|
|
||||||
n_results: int = 2,
|
|
||||||
docs_folder: Optional[str] = None,
|
|
||||||
verbose: bool = False,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.metric = metric
|
|
||||||
self.output_dir = output_dir
|
|
||||||
self.limit_tokens = limit_tokens
|
|
||||||
self.n_results = n_results
|
|
||||||
self.docs_folder = docs_folder
|
|
||||||
self.verbose = verbose
|
|
||||||
|
|
||||||
# Disable ChromaDB logging
|
|
||||||
if verbose:
|
|
||||||
logging.getLogger("chromadb").setLevel(logging.INFO)
|
|
||||||
|
|
||||||
# Create Chroma collection
|
|
||||||
chroma_persist_dir = "chroma"
|
|
||||||
chroma_client = chromadb.PersistentClient(
|
|
||||||
settings=chromadb.config.Settings(
|
|
||||||
persist_directory=chroma_persist_dir,
|
|
||||||
),
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
# Create ChromaDB client
|
|
||||||
self.client = chromadb.Client()
|
|
||||||
|
|
||||||
# Create Chroma collection
|
|
||||||
self.collection = chroma_client.get_or_create_collection(
|
|
||||||
name=output_dir,
|
|
||||||
metadata={"hnsw:space": metric},
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
display_markdown_message(
|
|
||||||
"ChromaDB collection created:"
|
|
||||||
f" {self.collection.name} with metric: {self.metric} and"
|
|
||||||
f" output directory: {self.output_dir}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# If docs
|
|
||||||
if docs_folder:
|
|
||||||
display_markdown_message(
|
|
||||||
f"Traversing directory: {docs_folder}"
|
|
||||||
)
|
|
||||||
self.traverse_directory()
|
|
||||||
|
|
||||||
def add(
|
|
||||||
self,
|
|
||||||
document: str,
|
|
||||||
images: List[np.ndarray] = None,
|
|
||||||
img_urls: List[str] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Add a document to the ChromaDB collection.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
document (str): The document to be added.
|
|
||||||
condition (bool, optional): The condition to check before adding the document. Defaults to True.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The ID of the added document.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
doc_id = str(uuid.uuid4())
|
|
||||||
self.collection.add(
|
|
||||||
ids=[doc_id],
|
|
||||||
documents=[document],
|
|
||||||
images=images,
|
|
||||||
uris=img_urls,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
return doc_id
|
|
||||||
except Exception as e:
|
|
||||||
raise Exception(f"Failed to add document: {str(e)}")
|
|
||||||
|
|
||||||
def query(
|
|
||||||
self,
|
|
||||||
query_text: str,
|
|
||||||
query_images: List[np.ndarray],
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Query documents from the ChromaDB collection.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The query string.
|
|
||||||
n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: The retrieved documents.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
docs = self.collection.query(
|
|
||||||
query_texts=[query_text],
|
|
||||||
query_images=query_images,
|
|
||||||
n_results=self.n_docs,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)["documents"]
|
|
||||||
return docs[0]
|
|
||||||
except Exception as e:
|
|
||||||
raise Exception(f"Failed to query documents: {str(e)}")
|
|
||||||
|
|
||||||
def traverse_directory(self):
|
|
||||||
"""
|
|
||||||
Traverse through every file in the given directory and its subdirectories,
|
|
||||||
and return the paths of all files.
|
|
||||||
Parameters:
|
|
||||||
- directory_name (str): The name of the directory to traverse.
|
|
||||||
Returns:
|
|
||||||
- list: A list of paths to each file in the directory and its subdirectories.
|
|
||||||
"""
|
|
||||||
image_extensions = [
|
|
||||||
".jpg",
|
|
||||||
".jpeg",
|
|
||||||
".png",
|
|
||||||
]
|
|
||||||
images = []
|
|
||||||
for root, dirs, files in os.walk(self.docs_folder):
|
|
||||||
for file in files:
|
|
||||||
_, ext = os.path.splitext(file)
|
|
||||||
if ext.lower() in image_extensions:
|
|
||||||
images.append(os.path.join(root, file))
|
|
||||||
else:
|
|
||||||
data = data_to_text(file)
|
|
||||||
added_to_db = self.add([data])
|
|
||||||
print(f"{file} added to Database")
|
|
||||||
if images:
|
|
||||||
added_to_db = self.add(img_urls=[images])
|
|
||||||
print(f"{len(images)} images added to Database ")
|
|
||||||
return added_to_db
|
|
@ -1,71 +0,0 @@
|
|||||||
"""
|
|
||||||
Building an Autonomous Agent in 5 minutes with:
|
|
||||||
- LLM: OpenAI, Anthropic, EleutherAI, Hugging Face: Transformers
|
|
||||||
- Tools: Search, Browser, ETC
|
|
||||||
- Long Term Mmeory: ChromaDB, Weaviate, Pinecone, ETC
|
|
||||||
"""
|
|
||||||
|
|
||||||
from swarms import Agent, OpenAIChat, tool
|
|
||||||
from examples.demos.agent_in_5.chroma_db import ChromaDB
|
|
||||||
|
|
||||||
# Initialize the memory
|
|
||||||
chroma = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
limit_tokens=1000,
|
|
||||||
verbose=True,
|
|
||||||
# docs_folder = "docs" # Add your docs folder here
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
How to make a tool in Swarms:
|
|
||||||
- Use the @tool decorator
|
|
||||||
- Define the function with the required arguments
|
|
||||||
- Add a docstring with the description of the tool
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# Create a tool
|
|
||||||
@tool # Use this decorator
|
|
||||||
def browser(query: str = None): # Add types
|
|
||||||
"""
|
|
||||||
Opens a web browser and performs a Google search with the given query.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The search query to be performed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: A message indicating that the browser is being opened for the given query.
|
|
||||||
"""
|
|
||||||
import webbrowser
|
|
||||||
|
|
||||||
url = f"https://www.google.com/search?q={query}"
|
|
||||||
webbrowser.open(url)
|
|
||||||
return f"Opening browser for: {query}"
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
agent_name="AI Engineer",
|
|
||||||
agent_description=(
|
|
||||||
"Creates AI Models for special use cases using PyTorch"
|
|
||||||
),
|
|
||||||
system_prompt=(
|
|
||||||
"Create an AI model for earthquake prediction using PyTorch."
|
|
||||||
),
|
|
||||||
max_loops=4, # or "auto"
|
|
||||||
autosave=True,
|
|
||||||
dashboard=True,
|
|
||||||
verbose=True,
|
|
||||||
stopping_token="<DONE>",
|
|
||||||
interactive=True,
|
|
||||||
tools=[browser],
|
|
||||||
long_term_memory=chroma, # pass in your memory object
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
out = agent.run(
|
|
||||||
"Let's make an AI model for earthquake prediction in pytorch."
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -1,73 +0,0 @@
|
|||||||
import requests
|
|
||||||
from typing import List, Dict, Any
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_flights_in_area(
|
|
||||||
latitude: float, longitude: float, radius: float = 0.5
|
|
||||||
) -> List[Dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Fetch and summarize flight data for a given area using the OpenSky Network API.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
latitude (float): The latitude of the center point.
|
|
||||||
longitude (float): The longitude of the center point.
|
|
||||||
radius (float): The radius around the center point to search for flights, in degrees. Default is 0.5.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Dict[str, Any]]: A list of summarized flight data in the specified area.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: If the request fails or the response is invalid.
|
|
||||||
"""
|
|
||||||
url = "https://opensky-network.org/api/states/all"
|
|
||||||
params = {
|
|
||||||
"lamin": latitude - radius,
|
|
||||||
"lamax": latitude + radius,
|
|
||||||
"lomin": longitude - radius,
|
|
||||||
"lomax": longitude + radius,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.get(url, params=params)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
flights = data.get("states", [])
|
|
||||||
|
|
||||||
summarized_flights = []
|
|
||||||
for flight in flights:
|
|
||||||
if (
|
|
||||||
flight[1]
|
|
||||||
and flight[5]
|
|
||||||
and flight[6]
|
|
||||||
and flight[7] is not None
|
|
||||||
): # Ensure essential data is available
|
|
||||||
summarized_flights.append(
|
|
||||||
{
|
|
||||||
"callsign": flight[1].strip(),
|
|
||||||
"origin_country": flight[2],
|
|
||||||
"last_position": f"Lat: {flight[5]}, Lon: {flight[6]}",
|
|
||||||
"altitude_meters": flight[7],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return summarized_flights
|
|
||||||
except requests.RequestException as e:
|
|
||||||
raise Exception(f"Failed to fetch flight data: {e}")
|
|
||||||
except ValueError:
|
|
||||||
raise Exception("Invalid response format.")
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
latitude = 28.3922 # Latitude for Cape Canaveral, FL
|
|
||||||
longitude = -80.6077 # Longitude for Cape Canaveral, FL
|
|
||||||
radius = 0.5 # 0.5 degrees (~55 km)
|
|
||||||
|
|
||||||
try:
|
|
||||||
flights = fetch_flights_in_area(latitude, longitude, radius)
|
|
||||||
for flight in flights:
|
|
||||||
print(
|
|
||||||
f"Callsign: {flight['callsign']}, Origin: {flight['origin_country']}, "
|
|
||||||
f"Position: {flight['last_position']}, Altitude: {flight['altitude_meters']} meters"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
@ -1,74 +0,0 @@
|
|||||||
from swarms import (
|
|
||||||
Agent,
|
|
||||||
llama3Hosted,
|
|
||||||
AgentRearrange,
|
|
||||||
)
|
|
||||||
from examples.demos.agentic_space_traffic_control.prompts import (
|
|
||||||
WEATHER_ANALYST_SYSTEM_PROMPT,
|
|
||||||
SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
from tools import (
|
|
||||||
fetch_weather_data,
|
|
||||||
)
|
|
||||||
from swarms.tools import get_openai_function_schema_from_func
|
|
||||||
|
|
||||||
|
|
||||||
def prep_weather_tool_prompt(city: str = "Melbourne, Fl") -> str:
|
|
||||||
out = get_openai_function_schema_from_func(
|
|
||||||
fetch_weather_data,
|
|
||||||
name="Fetch Weather Data by City",
|
|
||||||
description="Fetch near real-time weather data for a city using wttr.in. Provide the name of the city (e.g., 'Austin, Tx') and state, as input.",
|
|
||||||
)
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
# Purpose = To generate weather information for the user and send API requests to the Baron Weather API
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Weather Analyst Agent",
|
|
||||||
system_prompt=WEATHER_ANALYST_SYSTEM_PROMPT,
|
|
||||||
llm=llama3Hosted(),
|
|
||||||
max_loops=1,
|
|
||||||
# autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
# sop=list_base_models_json,
|
|
||||||
# sop_list=[
|
|
||||||
# prep_weather_tool_prompt
|
|
||||||
# ], # Set the output type to the tool schema which is a BaseModel
|
|
||||||
# output_type=str, # or dict, or str
|
|
||||||
# metadata_output_type="json",
|
|
||||||
# # List of schemas that the agent can handle
|
|
||||||
# function_calling_format_type="OpenAI",
|
|
||||||
# function_calling_type="json", # or soon yaml
|
|
||||||
# sop=fetch_weather_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Purpose = To manage the trajectories and communication of spacecraft
|
|
||||||
agent2 = Agent(
|
|
||||||
agent_name="Space Traffic Controller Agent",
|
|
||||||
system_prompt=SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT,
|
|
||||||
# sop=list_base_models_json,
|
|
||||||
llm=llama3Hosted(),
|
|
||||||
max_loops=1,
|
|
||||||
# autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
# Set the output type to the tool schema which is a BaseModel
|
|
||||||
# output_type=str, # or dict, or str
|
|
||||||
# metadata_output_type="json",
|
|
||||||
# # List of schemas that the agent can handle
|
|
||||||
# function_calling_format_type="OpenAI",
|
|
||||||
# function_calling_type="json", # or soon yaml
|
|
||||||
)
|
|
||||||
|
|
||||||
# Rearrange
|
|
||||||
flow = AgentRearrange(
|
|
||||||
agents=[agent, agent2],
|
|
||||||
flow="Weather Analyst Agent -> Space Traffic Controller Agent",
|
|
||||||
max_loops=3,
|
|
||||||
)
|
|
||||||
# Run the flow
|
|
||||||
flow.run(
|
|
||||||
"We're preparing for a launch in Cape canveral, let's begin the launch process, whats the weather like?"
|
|
||||||
)
|
|
@ -1,68 +0,0 @@
|
|||||||
def WEATHER_ANALYST_SYSTEM_PROMPT() -> str:
|
|
||||||
return """
|
|
||||||
|
|
||||||
# Weather Analyst Instructions
|
|
||||||
|
|
||||||
## Role Overview
|
|
||||||
As a Weather Analyst, your primary responsibility is to monitor and report on space weather conditions. Your insights help ensure the safety and efficiency of space missions.
|
|
||||||
|
|
||||||
## Key Responsibilities
|
|
||||||
1. **Monitor Space Weather**: Regularly check for updates on space weather conditions such as solar storms, asteroid showers, and other cosmic phenomena.
|
|
||||||
2. **Forecast Weather Conditions**: Provide accurate and timely weather forecasts to assist in mission planning and execution.
|
|
||||||
3. **Communicate Hazards**: Alert the Space Traffic Controllers about any upcoming weather hazards that could affect spacecraft operations.
|
|
||||||
|
|
||||||
## How to Think Like a Weather Analyst
|
|
||||||
- **Accuracy**: Always verify the data before reporting. Ensure your forecasts are as accurate as possible.
|
|
||||||
- **Timeliness**: Provide updates promptly. Space missions depend on real-time information to make critical decisions.
|
|
||||||
- **Clarity**: Communicate clearly and concisely. Ensure that your reports are easily understood by all team members.
|
|
||||||
- **Anticipation**: Think ahead. Predict potential weather impacts on future missions and communicate these proactively.
|
|
||||||
|
|
||||||
## Example Actions
|
|
||||||
1. **Regular Updates**:
|
|
||||||
- "Solar activity is expected to increase in the next 3 hours. Recommend delaying any non-essential missions."
|
|
||||||
2. **Forecasting**:
|
|
||||||
- "A solar storm is predicted to hit in 5 hours. Suggest adjusting launch windows to avoid potential interference."
|
|
||||||
3. **Hazard Alerts**:
|
|
||||||
- "Detected an asteroid shower trajectory intersecting with planned spacecraft path. Immediate re-routing is advised."
|
|
||||||
|
|
||||||
## Tools and Resources
|
|
||||||
- **Space Weather Monitoring Systems**: Use tools provided to monitor space weather conditions.
|
|
||||||
- **Communication Platforms**: Utilize the chat interface to send updates and alerts to the team.
|
|
||||||
- **Data Sources**: Access reliable data sources for accurate weather information.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT() -> str:
|
|
||||||
return """
|
|
||||||
|
|
||||||
# Space Traffic Controller Instructions
|
|
||||||
|
|
||||||
## Role Overview
|
|
||||||
As a Space Traffic Controller, your main task is to manage the trajectories and communication of spacecraft. Your role is crucial in ensuring that missions are executed safely and efficiently.
|
|
||||||
|
|
||||||
## Key Responsibilities
|
|
||||||
1. **Manage Trajectories**: Plan and adjust spacecraft trajectories to avoid hazards and optimize fuel usage.
|
|
||||||
2. **Coordinate Communication**: Maintain clear and continuous communication with spacecraft, providing guidance and updates.
|
|
||||||
3. **Collaborate with Team Members**: Work closely with Weather Analysts and Fuel Managers to make informed decisions.
|
|
||||||
|
|
||||||
## How to Think Like a Space Traffic Controller
|
|
||||||
- **Precision**: Ensure trajectory calculations are precise to avoid collisions and optimize mission success.
|
|
||||||
- **Communication**: Maintain clear and effective communication with both spacecraft and team members.
|
|
||||||
- **Adaptability**: Be ready to adjust plans based on new information, such as weather updates or fuel status.
|
|
||||||
- **Safety First**: Prioritize the safety of the spacecraft and crew in all decisions.
|
|
||||||
|
|
||||||
## Example Actions
|
|
||||||
1. **Trajectory Management**:
|
|
||||||
- "Adjusting the spacecraft's trajectory to avoid the predicted solar storm area."
|
|
||||||
2. **Communication**:
|
|
||||||
- "Mission Control to Spacecraft Alpha, prepare for a trajectory change in 5 minutes."
|
|
||||||
3. **Collaboration**:
|
|
||||||
- "Received a weather alert about an upcoming solar storm. Fuel Manager, please confirm if we have enough reserves for an extended orbit."
|
|
||||||
|
|
||||||
## Tools and Resources
|
|
||||||
- **Trajectory Planning Software**: Use provided tools to calculate and adjust spacecraft trajectories.
|
|
||||||
- **Communication Systems**: Utilize the chat interface and other communication tools to coordinate with spacecraft and team members.
|
|
||||||
- **Mission Data**: Access mission-specific data to inform your decisions and actions.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
@ -1,59 +0,0 @@
|
|||||||
def test_create_graph():
|
|
||||||
"""
|
|
||||||
Tests that a graph can be created.
|
|
||||||
"""
|
|
||||||
graph = create_graph()
|
|
||||||
assert isinstance(graph, dict)
|
|
||||||
|
|
||||||
|
|
||||||
def test_weight_edges():
|
|
||||||
"""
|
|
||||||
Tests that the edges of a graph can be weighted.
|
|
||||||
"""
|
|
||||||
graph = create_graph()
|
|
||||||
weight_edges(graph)
|
|
||||||
for edge in graph.edges:
|
|
||||||
assert isinstance(edge.weight, int)
|
|
||||||
|
|
||||||
|
|
||||||
def test_create_user_list():
|
|
||||||
"""
|
|
||||||
Tests that a list of all the podcasts that the user has listened to can be created.
|
|
||||||
"""
|
|
||||||
user_list = create_user_list()
|
|
||||||
assert isinstance(user_list, list)
|
|
||||||
|
|
||||||
|
|
||||||
def test_find_most_similar_podcasts():
|
|
||||||
"""
|
|
||||||
Tests that the most similar podcasts to a given podcast can be found.
|
|
||||||
"""
|
|
||||||
graph = create_graph()
|
|
||||||
weight_edges(graph)
|
|
||||||
user_list = create_user_list()
|
|
||||||
most_similar_podcasts = find_most_similar_podcasts(
|
|
||||||
graph, user_list
|
|
||||||
)
|
|
||||||
assert isinstance(most_similar_podcasts, list)
|
|
||||||
|
|
||||||
|
|
||||||
def test_add_most_similar_podcasts():
|
|
||||||
"""
|
|
||||||
Tests that the most similar podcasts to a given podcast can be added to the user's list.
|
|
||||||
"""
|
|
||||||
graph = create_graph()
|
|
||||||
weight_edges(graph)
|
|
||||||
user_list = create_user_list()
|
|
||||||
add_most_similar_podcasts(graph, user_list)
|
|
||||||
assert len(user_list) > 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_repeat_steps():
|
|
||||||
"""
|
|
||||||
Tests that steps 5-6 can be repeated until the user's list contains the desired number of podcasts.
|
|
||||||
"""
|
|
||||||
graph = create_graph()
|
|
||||||
weight_edges(graph)
|
|
||||||
user_list = create_user_list()
|
|
||||||
repeat_steps(graph, user_list)
|
|
||||||
assert len(user_list) == 10
|
|
@ -1,254 +0,0 @@
|
|||||||
import concurrent
|
|
||||||
import csv
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarms.utils.parse_code import extract_code_from_markdown
|
|
||||||
from swarms.utils.file_processing import create_file
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
|
|
||||||
|
|
||||||
# Load ENV
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Gemini
|
|
||||||
gemini = OpenAIChat()
|
|
||||||
|
|
||||||
# memory
|
|
||||||
memory = ChromaDB(output_dir="swarm_hackathon")
|
|
||||||
|
|
||||||
|
|
||||||
def execute_concurrently(callable_functions: callable, max_workers=5):
|
|
||||||
"""
|
|
||||||
Executes callable functions concurrently using multithreading.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- callable_functions: A list of tuples, each containing the callable function and its arguments.
|
|
||||||
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
|
|
||||||
- max_workers: The maximum number of threads to use.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
- results: A list of results returned by the callable functions. If an error occurs in any function,
|
|
||||||
the exception object will be placed at the corresponding index in the list.
|
|
||||||
"""
|
|
||||||
results = [None] * len(callable_functions)
|
|
||||||
|
|
||||||
def worker(fn, args, kwargs, index):
|
|
||||||
try:
|
|
||||||
result = fn(*args, **kwargs)
|
|
||||||
results[index] = result
|
|
||||||
except Exception as e:
|
|
||||||
results[index] = e
|
|
||||||
|
|
||||||
with concurrent.futures.ThreadPoolExecutor(
|
|
||||||
max_workers=max_workers
|
|
||||||
) as executor:
|
|
||||||
futures = []
|
|
||||||
for i, (fn, args, kwargs) in enumerate(callable_functions):
|
|
||||||
futures.append(
|
|
||||||
executor.submit(worker, fn, args, kwargs, i)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait for all threads to complete
|
|
||||||
concurrent.futures.wait(futures)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
# Adjusting the function to extract specific column values
|
|
||||||
def extract_and_create_agents(
|
|
||||||
csv_file_path: str, target_columns: list
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
|
|
||||||
creates an Agent for each, and adds it to the swarm network.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- csv_file_path: The path to the CSV file.
|
|
||||||
- target_columns: A list of column names to extract values from.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
agents = []
|
|
||||||
with open(csv_file_path, mode="r", encoding="utf-8") as file:
|
|
||||||
reader = csv.DictReader(file)
|
|
||||||
for row in reader:
|
|
||||||
project_name = row[target_columns[0]]
|
|
||||||
lightning_proposal = row[target_columns[1]]
|
|
||||||
|
|
||||||
# Example of creating and adding an agent based on the project name and lightning proposal
|
|
||||||
agent_name = f"{project_name} agent"
|
|
||||||
print(agent_name) # For demonstration
|
|
||||||
|
|
||||||
# Create the agent
|
|
||||||
logger.info("Creating agent...")
|
|
||||||
|
|
||||||
# Design agent
|
|
||||||
logger.info("Creating design agent...")
|
|
||||||
design_agent = Agent(
|
|
||||||
llm=gemini,
|
|
||||||
agent_name="Design Agent",
|
|
||||||
max_loops=1,
|
|
||||||
stopping_token="<DONE?>",
|
|
||||||
sop=None,
|
|
||||||
system_prompt=(
|
|
||||||
"Transform an app idea into step by step very"
|
|
||||||
" simple algorithmic psuedocode so it can be"
|
|
||||||
" implemented simply."
|
|
||||||
),
|
|
||||||
long_term_memory=memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Log the agent
|
|
||||||
logger.info(
|
|
||||||
f"Code Agent created: {agent_name} with long term"
|
|
||||||
" memory"
|
|
||||||
)
|
|
||||||
agent = Agent(
|
|
||||||
llm=gemini,
|
|
||||||
agent_name=agent_name,
|
|
||||||
max_loops=1,
|
|
||||||
code_interpreter=True,
|
|
||||||
stopping_token="<DONE?>",
|
|
||||||
sop=None,
|
|
||||||
system_prompt=(
|
|
||||||
"Transform an app idea into a very simple"
|
|
||||||
" python app in markdown. Return all the"
|
|
||||||
" python code in a single markdown file."
|
|
||||||
" Return only code and nothing else."
|
|
||||||
),
|
|
||||||
long_term_memory=memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Testing agent
|
|
||||||
logger.info(f"Testing_agent agent: {agent_name}")
|
|
||||||
agent = Agent(
|
|
||||||
llm=gemini,
|
|
||||||
agent_name=agent_name + " testing",
|
|
||||||
max_loops=1,
|
|
||||||
stopping_token="<DONE?>",
|
|
||||||
sop=None,
|
|
||||||
system_prompt=(
|
|
||||||
"Create unit tests using pytest based on the"
|
|
||||||
" code you see, only return unit test code in"
|
|
||||||
" python using markdown, only return the code"
|
|
||||||
" and nothing else."
|
|
||||||
),
|
|
||||||
long_term_memory=memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Log the agent
|
|
||||||
logger.info(
|
|
||||||
f"Agent created: {agent_name} with long term"
|
|
||||||
" memory"
|
|
||||||
)
|
|
||||||
agents.append(agent)
|
|
||||||
|
|
||||||
# Design agent
|
|
||||||
design_agent_output = design_agent.run(
|
|
||||||
(
|
|
||||||
"Create the algorithmic psuedocode for the"
|
|
||||||
f" {lightning_proposal} in markdown and"
|
|
||||||
" return it"
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"Algorithmic psuedocode created:"
|
|
||||||
f" {design_agent_output}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the code for each project
|
|
||||||
output = agent.run(
|
|
||||||
(
|
|
||||||
"Create the code for the"
|
|
||||||
f" {lightning_proposal} in python using the"
|
|
||||||
" algorithmic psuedocode"
|
|
||||||
f" {design_agent_output} and wrap it in"
|
|
||||||
" markdown and return it"
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
print(output)
|
|
||||||
# Parse the output
|
|
||||||
output = extract_code_from_markdown(output)
|
|
||||||
# Create the file
|
|
||||||
output = create_file(output, f"{project_name}.py")
|
|
||||||
|
|
||||||
# Testing agent
|
|
||||||
testing_agent_output = agent.run(
|
|
||||||
(
|
|
||||||
"Create the unit tests for the"
|
|
||||||
f" {lightning_proposal} in python using the"
|
|
||||||
f" code {output} and wrap it in markdown and"
|
|
||||||
" return it"
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
print(testing_agent_output)
|
|
||||||
|
|
||||||
# Parse the output
|
|
||||||
testing_agent_output = extract_code_from_markdown(
|
|
||||||
testing_agent_output
|
|
||||||
)
|
|
||||||
# Create the file
|
|
||||||
testing_agent_output = create_file(
|
|
||||||
testing_agent_output, f"test_{project_name}.py"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Log the project created
|
|
||||||
logger.info(
|
|
||||||
f"Project {project_name} created: {output} at"
|
|
||||||
f" file path {project_name}.py"
|
|
||||||
)
|
|
||||||
print(output)
|
|
||||||
|
|
||||||
# Log the unit tests created
|
|
||||||
logger.info(
|
|
||||||
f"Unit tests for {project_name} created:"
|
|
||||||
f" {testing_agent_output} at file path"
|
|
||||||
f" test_{project_name}.py"
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
f"Agent {agent_name} created and added to the"
|
|
||||||
" swarm network"
|
|
||||||
)
|
|
||||||
|
|
||||||
return agents
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
"An error occurred while extracting and creating"
|
|
||||||
f" agents: {e}"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# CSV
|
|
||||||
csv_file = "presentation.csv"
|
|
||||||
|
|
||||||
# Specific columns to extract
|
|
||||||
target_columns = ["Project Name", "Project Description"]
|
|
||||||
|
|
||||||
# Use the adjusted function
|
|
||||||
specific_column_values = extract_and_create_agents(
|
|
||||||
csv_file, target_columns
|
|
||||||
)
|
|
||||||
|
|
||||||
# Display the extracted column values
|
|
||||||
print(specific_column_values)
|
|
||||||
|
|
||||||
|
|
||||||
# Concurrently execute the function
|
|
||||||
logger.info(
|
|
||||||
"Concurrently executing the swarm for each hackathon project..."
|
|
||||||
)
|
|
||||||
output = execute_concurrently(
|
|
||||||
[
|
|
||||||
(extract_and_create_agents, (csv_file, target_columns), {}),
|
|
||||||
],
|
|
||||||
max_workers=5,
|
|
||||||
)
|
|
||||||
print(output)
|
|
@ -1,86 +0,0 @@
|
|||||||
class MockApp:
|
|
||||||
def __init__(self):
|
|
||||||
self.running = True
|
|
||||||
self.session = None
|
|
||||||
self.slides = []
|
|
||||||
|
|
||||||
def main_menu(self):
|
|
||||||
return input("Choose option: 1. Start, 2. Load, 3. Exit ")
|
|
||||||
|
|
||||||
def start_new_talk(self, title):
|
|
||||||
self.session = title
|
|
||||||
self.slides = []
|
|
||||||
|
|
||||||
def add_slide(self, content):
|
|
||||||
self.slides.append(content)
|
|
||||||
|
|
||||||
def edit_slide(self, index, content):
|
|
||||||
self.slides[index] = content
|
|
||||||
|
|
||||||
def delete_slide(self, index):
|
|
||||||
del self.slides[index]
|
|
||||||
|
|
||||||
def reorder_slides(self, new_order):
|
|
||||||
self.slides = [self.slides[i] for i in new_order]
|
|
||||||
|
|
||||||
def get_number_of_slides(self):
|
|
||||||
return len(self.slides)
|
|
||||||
|
|
||||||
# Function to simulate user actions
|
|
||||||
def simulate_user_action(self, action):
|
|
||||||
# Placeholder function to simulate user interaction, not part of the actual app code
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# Testing starting a new talk
|
|
||||||
def test_start_new_talk():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("My New Talk")
|
|
||||||
assert app.session == "My New Talk"
|
|
||||||
assert app.slides == []
|
|
||||||
|
|
||||||
|
|
||||||
# Testing adding a slide
|
|
||||||
def test_add_slide():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("Talk 1")
|
|
||||||
app.add_slide("Slide Content 1")
|
|
||||||
assert app.slides == ["Slide Content 1"]
|
|
||||||
|
|
||||||
|
|
||||||
# Testing editing a slide
|
|
||||||
def test_edit_slide():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("Talk 1")
|
|
||||||
app.add_slide("Slide Content 1")
|
|
||||||
app.edit_slide(0, "Updated Slide Content 1")
|
|
||||||
assert app.slides == ["Updated Slide Content 1"]
|
|
||||||
|
|
||||||
|
|
||||||
# Testing deleting a slide
|
|
||||||
def test_delete_slide():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("Talk 1")
|
|
||||||
app.add_slide("Slide Content 1")
|
|
||||||
app.add_slide("Slide Content 2")
|
|
||||||
app.delete_slide(0)
|
|
||||||
assert app.slides == ["Slide Content 2"]
|
|
||||||
|
|
||||||
|
|
||||||
# Testing reordering slides
|
|
||||||
def test_reorder_slides():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("Talk 1")
|
|
||||||
app.add_slide("Slide Content 1")
|
|
||||||
app.add_slide("Slide Content 2")
|
|
||||||
app.reorder_slides([1, 0])
|
|
||||||
assert app.slides == ["Slide Content 2", "Slide Content 1"]
|
|
||||||
|
|
||||||
|
|
||||||
# Testing the number of slides
|
|
||||||
def test_slide_count():
|
|
||||||
app = MockApp()
|
|
||||||
app.start_new_talk("Talk 1")
|
|
||||||
app.add_slide("Slide Content 1")
|
|
||||||
app.add_slide("Slide Content 2")
|
|
||||||
assert app.get_number_of_slides() == 2
|
|
|
@ -1,38 +0,0 @@
|
|||||||
from ai_acceleerated_learning.Vocal import Vocal
|
|
||||||
|
|
||||||
vocal = Vocal()
|
|
||||||
|
|
||||||
|
|
||||||
def test_pass():
|
|
||||||
assert (
|
|
||||||
vocal.generate_video(
|
|
||||||
"I love to play basketball, and I am a very good player.",
|
|
||||||
"basketball",
|
|
||||||
)
|
|
||||||
== "Successfully generated a YouTube video for your prompt: I"
|
|
||||||
" love to play basketball, and I am a very good player."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_invalid_sports():
|
|
||||||
assert (
|
|
||||||
vocal.generate_video(
|
|
||||||
"I just ate some delicious tacos", "tacos"
|
|
||||||
)
|
|
||||||
== "Invalid sports entered!! Please enter a valid sport."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_invalid_prompt():
|
|
||||||
assert (
|
|
||||||
vocal.generate_video(987, "basketball")
|
|
||||||
== "Invalid prompt entered!! Please enter a valid prompt."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_not_string():
|
|
||||||
assert (
|
|
||||||
vocal.generate_video(789, 234)
|
|
||||||
== "Invalid prompt and sports entered!! Please enter valid"
|
|
||||||
" prompt and sport."
|
|
||||||
)
|
|
@ -1,86 +0,0 @@
|
|||||||
# test_presentation_assistant.py
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from presentation_assistant import (
|
|
||||||
PresentationAssistant,
|
|
||||||
SlideNotFoundError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def assistant():
|
|
||||||
slides = [
|
|
||||||
"Welcome to our presentation!",
|
|
||||||
"Here is the agenda for today.",
|
|
||||||
"Let's dive into the first topic.",
|
|
||||||
"Thank you for attending.",
|
|
||||||
]
|
|
||||||
return PresentationAssistant(slides)
|
|
||||||
|
|
||||||
|
|
||||||
def test_init():
|
|
||||||
slides = ["Slide 1", "Slide 2"]
|
|
||||||
pa = PresentationAssistant(slides)
|
|
||||||
assert pa.slides == slides
|
|
||||||
assert pa.current_slide == 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_next_slide(assistant):
|
|
||||||
assistant.next_slide()
|
|
||||||
assert assistant.current_slide == 1
|
|
||||||
assistant.next_slide()
|
|
||||||
assert assistant.current_slide == 2
|
|
||||||
|
|
||||||
|
|
||||||
def test_previous_slide(assistant):
|
|
||||||
assistant.current_slide = 2
|
|
||||||
assistant.previous_slide()
|
|
||||||
assert assistant.current_slide == 1
|
|
||||||
assistant.previous_slide()
|
|
||||||
assert assistant.current_slide == 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_next_slide_at_end(assistant):
|
|
||||||
assistant.current_slide = len(assistant.slides) - 1
|
|
||||||
with pytest.raises(SlideNotFoundError):
|
|
||||||
assistant.next_slide()
|
|
||||||
|
|
||||||
|
|
||||||
def test_previous_slide_at_start(assistant):
|
|
||||||
with pytest.raises(SlideNotFoundError):
|
|
||||||
assistant.previous_slide()
|
|
||||||
|
|
||||||
|
|
||||||
def test_go_to_slide(assistant):
|
|
||||||
assistant.go_to_slide(2)
|
|
||||||
assert assistant.current_slide == 2
|
|
||||||
|
|
||||||
|
|
||||||
def test_go_to_slide_out_of_range(assistant):
|
|
||||||
with pytest.raises(SlideNotFoundError):
|
|
||||||
assistant.go_to_slide(len(assistant.slides))
|
|
||||||
|
|
||||||
|
|
||||||
def test_go_to_slide_negative(assistant):
|
|
||||||
with pytest.raises(SlideNotFoundError):
|
|
||||||
assistant.go_to_slide(-1)
|
|
||||||
|
|
||||||
|
|
||||||
def test_current_slide_content(assistant):
|
|
||||||
content = assistant.current_slide_content()
|
|
||||||
assert content == assistant.slides[0]
|
|
||||||
assistant.next_slide()
|
|
||||||
content = assistant.current_slide_content()
|
|
||||||
assert content == assistant.slides[1]
|
|
||||||
|
|
||||||
|
|
||||||
def test_show_slide(
|
|
||||||
assistant, capsys
|
|
||||||
): # capsys is a pytest fixture to capture stdout and stderr
|
|
||||||
assistant.show_slide()
|
|
||||||
captured = capsys.readouterr()
|
|
||||||
assert captured.out.strip() == assistant.slides[0]
|
|
||||||
assistant.next_slide()
|
|
||||||
assistant.show_slide()
|
|
||||||
captured = capsys.readouterr()
|
|
||||||
assert captured.out.strip() == assistant.slides[1]
|
|
@ -1,78 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from swarm_models import Anthropic, OpenAIChat
|
|
||||||
from swarms.prompts.ai_research_team import (
|
|
||||||
PAPER_IMPLEMENTOR_AGENT_PROMPT,
|
|
||||||
PAPER_SUMMARY_ANALYZER,
|
|
||||||
)
|
|
||||||
from swarms.structs import Agent
|
|
||||||
from swarms.utils.pdf_to_text import pdf_to_text
|
|
||||||
from swarms import rearrange
|
|
||||||
|
|
||||||
# Base llms
|
|
||||||
load_dotenv()
|
|
||||||
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
||||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
PDF_PATH = "fasterffn.pdf"
|
|
||||||
|
|
||||||
|
|
||||||
# Base llms
|
|
||||||
llm1 = OpenAIChat(
|
|
||||||
openai_api_key=openai_api_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
llm2 = Anthropic(
|
|
||||||
anthropic_api_key=anthropic_api_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Agents
|
|
||||||
paper_summarizer_agent = Agent(
|
|
||||||
agent_name="paper_summarizer_agent",
|
|
||||||
llm=llm2,
|
|
||||||
sop=PAPER_SUMMARY_ANALYZER,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
saved_state_path="paper_summarizer.json",
|
|
||||||
)
|
|
||||||
|
|
||||||
paper_implementor_agent = Agent(
|
|
||||||
agent_name="paper_implementor_agent",
|
|
||||||
llm=llm1,
|
|
||||||
sop=PAPER_IMPLEMENTOR_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
saved_state_path="paper_implementor.json",
|
|
||||||
code_interpreter=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
pytorch_pseudocode_agent = Agent(
|
|
||||||
agent_name="pytorch_pseudocode_agent",
|
|
||||||
llm=llm1,
|
|
||||||
sop=PAPER_IMPLEMENTOR_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
saved_state_path="pytorch_pseudocode_agent.json",
|
|
||||||
code_interpreter=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
paper = pdf_to_text(PDF_PATH)
|
|
||||||
task = f"""
|
|
||||||
Focus on creating the algorithmic pseudocode for the novel
|
|
||||||
f" method in this paper: {paper}
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
agents = [
|
|
||||||
paper_summarizer_agent,
|
|
||||||
paper_implementor_agent,
|
|
||||||
pytorch_pseudocode_agent,
|
|
||||||
]
|
|
||||||
|
|
||||||
flow = "paper_summarizer_agent -> paper_implementor_agent -> pytorch_pseudocode_agent"
|
|
||||||
|
|
||||||
swarm = rearrange(agents, flow, task)
|
|
||||||
print(swarm)
|
|
@ -1,21 +0,0 @@
|
|||||||
from swarm_models.gpt4_vision_api import GPT4VisionAPI
|
|
||||||
from swarms.structs import Agent
|
|
||||||
|
|
||||||
llm = GPT4VisionAPI()
|
|
||||||
|
|
||||||
task = (
|
|
||||||
"Analyze this image of an assembly line and identify any issues"
|
|
||||||
" such as misaligned parts, defects, or deviations from the"
|
|
||||||
" standard assembly process. IF there is anything unsafe in the"
|
|
||||||
" image, explain why it is unsafe and how it could be improved."
|
|
||||||
)
|
|
||||||
img = "assembly_line.jpg"
|
|
||||||
|
|
||||||
## Initialize the workflow
|
|
||||||
agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
max_loops=1,
|
|
||||||
dashboard=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
agent.run(task=task, img=img)
|
|
Before Width: | Height: | Size: 532 KiB |
@ -1,39 +0,0 @@
|
|||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.structs import Agent
|
|
||||||
import swarms.prompts.autoswarm as sdsp
|
|
||||||
|
|
||||||
# Load environment variables and initialize the OpenAI Chat model
|
|
||||||
load_dotenv()
|
|
||||||
api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
llm = OpenAIChat(model_name="gpt-4", openai_api_key=api_key)
|
|
||||||
|
|
||||||
user_idea = "screenplay writing team"
|
|
||||||
|
|
||||||
role_identification_agent = Agent(
|
|
||||||
llm=llm,
|
|
||||||
sop=sdsp.AGENT_ROLE_IDENTIFICATION_AGENT_PROMPT,
|
|
||||||
max_loops=1,
|
|
||||||
)
|
|
||||||
agent_configuration_agent = Agent(
|
|
||||||
llm=llm, sop=sdsp.AGENT_CONFIGURATION_AGENT_PROMPT, max_loops=1
|
|
||||||
)
|
|
||||||
swarm_assembly_agent = Agent(
|
|
||||||
llm=llm, sop=sdsp.SWARM_ASSEMBLY_AGENT_PROMPT, max_loops=1
|
|
||||||
)
|
|
||||||
testing_optimization_agent = Agent(
|
|
||||||
llm=llm, sop=sdsp.TESTING_OPTIMIZATION_AGENT_PROMPT, max_loops=1
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process the user idea through each agent
|
|
||||||
role_identification_output = role_identification_agent.run(user_idea)
|
|
||||||
agent_configuration_output = agent_configuration_agent.run(
|
|
||||||
role_identification_output
|
|
||||||
)
|
|
||||||
swarm_assembly_output = swarm_assembly_agent.run(
|
|
||||||
agent_configuration_output
|
|
||||||
)
|
|
||||||
testing_optimization_output = testing_optimization_agent.run(
|
|
||||||
swarm_assembly_output
|
|
||||||
)
|
|
@ -1,91 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
from swarm_models.openai_models import OpenAIChat
|
|
||||||
|
|
||||||
|
|
||||||
class AutoTemp:
|
|
||||||
"""
|
|
||||||
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
|
|
||||||
It generates responses at different temperatures, evaluates them, and ranks them based on quality.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
api_key,
|
|
||||||
default_temp=0.0,
|
|
||||||
alt_temps=None,
|
|
||||||
auto_select=True,
|
|
||||||
max_workers=6,
|
|
||||||
):
|
|
||||||
self.api_key = api_key
|
|
||||||
self.default_temp = default_temp
|
|
||||||
self.alt_temps = (
|
|
||||||
alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
|
|
||||||
)
|
|
||||||
self.auto_select = auto_select
|
|
||||||
self.max_workers = max_workers
|
|
||||||
self.llm = OpenAIChat(
|
|
||||||
openai_api_key=self.api_key, temperature=self.default_temp
|
|
||||||
)
|
|
||||||
|
|
||||||
def evaluate_output(self, output, temperature):
|
|
||||||
print(f"Evaluating output at temperature {temperature}...")
|
|
||||||
eval_prompt = f"""
|
|
||||||
Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria:
|
|
||||||
|
|
||||||
- Relevance: How well does the output address the prompt or task at hand?
|
|
||||||
- Clarity: Is the output easy to understand and free of ambiguity?
|
|
||||||
- Utility: How useful is the output for its intended purpose?
|
|
||||||
- Pride: If the user had to submit this output to the world for their career, would they be proud?
|
|
||||||
- Delight: Is the output likely to delight or positively surprise the user?
|
|
||||||
|
|
||||||
Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical.
|
|
||||||
|
|
||||||
Output to evaluate:
|
|
||||||
---
|
|
||||||
{output}
|
|
||||||
---
|
|
||||||
"""
|
|
||||||
score_text = self.llm(eval_prompt, temperature=0.5)
|
|
||||||
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
|
|
||||||
return (
|
|
||||||
round(float(score_match.group()), 1)
|
|
||||||
if score_match
|
|
||||||
else 0.0
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, prompt, temperature_string):
|
|
||||||
print("Starting generation process...")
|
|
||||||
temperature_list = [
|
|
||||||
float(temp.strip())
|
|
||||||
for temp in temperature_string.split(",")
|
|
||||||
if temp.strip()
|
|
||||||
]
|
|
||||||
outputs = {}
|
|
||||||
scores = {}
|
|
||||||
for temp in temperature_list:
|
|
||||||
print(f"Generating at temperature {temp}...")
|
|
||||||
output_text = self.llm(prompt, temperature=temp)
|
|
||||||
if output_text:
|
|
||||||
outputs[temp] = output_text
|
|
||||||
scores[temp] = self.evaluate_output(output_text, temp)
|
|
||||||
|
|
||||||
print("Generation process complete.")
|
|
||||||
if not scores:
|
|
||||||
return "No valid outputs generated.", None
|
|
||||||
|
|
||||||
sorted_scores = sorted(
|
|
||||||
scores.items(), key=lambda item: item[1], reverse=True
|
|
||||||
)
|
|
||||||
best_temp, best_score = sorted_scores[0]
|
|
||||||
best_output = outputs[best_temp]
|
|
||||||
|
|
||||||
return (
|
|
||||||
f"Best AutoTemp Output (Temp {best_temp} | Score:"
|
|
||||||
f" {best_score}):\n{best_output}"
|
|
||||||
if self.auto_select
|
|
||||||
else "\n".join(
|
|
||||||
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
|
|
||||||
for temp, score in sorted_scores
|
|
||||||
)
|
|
||||||
)
|
|
@ -1,140 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from autotemp import AutoTemp
|
|
||||||
from termcolor import colored
|
|
||||||
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.structs import SequentialWorkflow
|
|
||||||
|
|
||||||
|
|
||||||
class BlogGen:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
api_key,
|
|
||||||
blog_topic,
|
|
||||||
temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
|
|
||||||
): # Add blog_topic as an argument
|
|
||||||
self.openai_chat = OpenAIChat(
|
|
||||||
openai_api_key=api_key, temperature=0.8
|
|
||||||
)
|
|
||||||
self.auto_temp = AutoTemp(api_key)
|
|
||||||
self.temperature_range = temperature_range
|
|
||||||
self.workflow = SequentialWorkflow(max_loops=5)
|
|
||||||
|
|
||||||
# Formatting the topic selection prompt with the user's topic
|
|
||||||
self.TOPIC_SELECTION_SYSTEM_PROMPT = f"""
|
|
||||||
Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.DRAFT_WRITER_SYSTEM_PROMPT = """
|
|
||||||
Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.REVIEW_AGENT_SYSTEM_PROMPT = """
|
|
||||||
Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """
|
|
||||||
Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def run_workflow(self):
|
|
||||||
try:
|
|
||||||
# Topic generation using OpenAIChat
|
|
||||||
topic_result = self.openai_chat.generate(
|
|
||||||
[self.TOPIC_SELECTION_SYSTEM_PROMPT]
|
|
||||||
)
|
|
||||||
topic_output = topic_result.generations[0][0].text
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
(
|
|
||||||
"\nTopic Selection Task"
|
|
||||||
f" Output:\n----------------------------\n{topic_output}\n"
|
|
||||||
),
|
|
||||||
"white",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
chosen_topic = topic_output.split("\n")[0]
|
|
||||||
print(
|
|
||||||
colored("Selected topic: " + chosen_topic, "yellow")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initial draft generation with AutoTemp
|
|
||||||
initial_draft_prompt = (
|
|
||||||
self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
|
|
||||||
"{{CHOSEN_TOPIC}}", chosen_topic
|
|
||||||
)
|
|
||||||
)
|
|
||||||
auto_temp_output = self.auto_temp.run(
|
|
||||||
initial_draft_prompt, self.temperature_range
|
|
||||||
)
|
|
||||||
initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
(
|
|
||||||
"\nInitial Draft"
|
|
||||||
f" Output:\n----------------------------\n{initial_draft_output}\n"
|
|
||||||
),
|
|
||||||
"white",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Review process using OpenAIChat
|
|
||||||
review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace(
|
|
||||||
"{{ARTICLE_TOPIC}}", chosen_topic
|
|
||||||
)
|
|
||||||
review_result = self.openai_chat.generate([review_prompt])
|
|
||||||
review_output = review_result.generations[0][0].text
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
(
|
|
||||||
"\nReview"
|
|
||||||
f" Output:\n----------------------------\n{review_output}\n"
|
|
||||||
),
|
|
||||||
"white",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Distribution preparation using OpenAIChat
|
|
||||||
distribution_prompt = (
|
|
||||||
self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
|
|
||||||
"{{ARTICLE_TOPIC}}", chosen_topic
|
|
||||||
)
|
|
||||||
)
|
|
||||||
distribution_result = self.openai_chat.generate(
|
|
||||||
[distribution_prompt]
|
|
||||||
)
|
|
||||||
distribution_output = distribution_result.generations[0][
|
|
||||||
0
|
|
||||||
].text
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
(
|
|
||||||
"\nDistribution"
|
|
||||||
f" Output:\n----------------------------\n{distribution_output}\n"
|
|
||||||
),
|
|
||||||
"white",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Final compilation of the blog
|
|
||||||
final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
(
|
|
||||||
"\nFinal Blog"
|
|
||||||
f" Content:\n----------------------------\n{final_blog_content}\n"
|
|
||||||
),
|
|
||||||
"green",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(colored(f"An error occurred: {str(e)}", "red"))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
api_key = os.environ["OPENAI_API_KEY"]
|
|
||||||
blog_generator = BlogGen(api_key)
|
|
||||||
blog_generator.run_workflow()
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue