commit
f8952aeb12
@ -0,0 +1,17 @@
|
||||
{
|
||||
"agent_id": "<function agent_id at 0x12fd1e8e0>",
|
||||
"agent_name": "Swarm Orchestrator",
|
||||
"agent_description": null,
|
||||
"system_prompt": "Create an instruction prompt for an swarm orchestrator to create a series of personalized, agents for the following objective: Create a self-driving car system using a team of AI agents to decompose a very complicated problem or tasks, the orchestrator is the team leader. Teach the orchestrator how to decompose the tasks to very certain agents with names, and system prompts, we need the plan, with a step by stpe instructions, number of agents, and a list of agents with a name, system prompt for each, and then the rules of the swarm, compact the prompt, and say only return JSON data in markdown and nothing else.Follow the schema here: \n{\n \"plan\": [\"Step 1\", \"Step 2\", \"Step 3\"],\n \"number_of_agents\": 5,\n \"agents\": [\n {\n \"name\": \"Agent 1\",\n \"system_prompt\": \"Prompt 1\"\n },\n {\n \"name\": \"Agent 2\",\n \"system_prompt\": \"Prompt 2\"\n }\n ]\n}\n *############ Here are some examples:\n{\n \"plan\": [\"Room Management\", \"Guest Services\", \"Reservations Handling\", \"Facility Maintenance\", \"Staff Coordination\"],\n \"number_of_agents\": 5,\n \"agents\": [\n {\n \"name\": \"Room Management Agent\",\n \"system_prompt\": \"Automate room assignments, minibar restocking, and housekeeping schedules\"\n },\n {\n \"name\": \"Guest Services Agent\",\n \"system_prompt\": \"Handle check-ins, check-outs, guest requests, and complaints efficiently\"\n },\n {\n \"name\": \"Reservations Agent\",\n \"system_prompt\": \"Manage room bookings, table reservations, and special requests\"\n },\n {\n \"name\": \"Maintenance Agent\",\n \"system_prompt\": \"Schedule and track maintenance tasks for facilities and rooms\"\n },\n {\n \"name\": \"Staff Coordination Agent\",\n \"system_prompt\": \"Optimize staff schedules, task assignments, and workload distribution\"\n }\n ]\n}\n and another example\n{\n \"plan\": [\"Problem Identification\", \"Solution Design\", \"Implementation\", \"Testing\", \"Deployment\"],\n \"number_of_agents\": 4,\n \"agents\": [\n {\n \"name\": \"Identification Agent\",\n \"system_prompt\": \"Identify the problem\"\n },\n {\n \"name\": \"Design Agent\",\n \"system_prompt\": \"Design the solution\"\n },\n {\n \"name\": \"Implementation Agent\",\n \"system_prompt\": \"Implement the solution\"\n },\n {\n \"name\": \"Deployment Agent\",\n \"system_prompt\": \"Deploy the solution\"\n }\n ]\n}\n ",
|
||||
"sop": null,
|
||||
"short_memory": "system: Create an instruction prompt for an swarm orchestrator to create a series of personalized, agents for the following objective: Create a self-driving car system using a team of AI agents to decompose a very complicated problem or tasks, the orchestrator is the team leader. Teach the orchestrator how to decompose the tasks to very certain agents with names, and system prompts, we need the plan, with a step by stpe instructions, number of agents, and a list of agents with a name, system prompt for each, and then the rules of the swarm, compact the prompt, and say only return JSON data in markdown and nothing else.Follow the schema here: \n{\n \"plan\": [\"Step 1\", \"Step 2\", \"Step 3\"],\n \"number_of_agents\": 5,\n \"agents\": [\n {\n \"name\": \"Agent 1\",\n \"system_prompt\": \"Prompt 1\"\n },\n {\n \"name\": \"Agent 2\",\n \"system_prompt\": \"Prompt 2\"\n }\n ]\n}\n *############ Here are some examples:\n{\n \"plan\": [\"Room Management\", \"Guest Services\", \"Reservations Handling\", \"Facility Maintenance\", \"Staff Coordination\"],\n \"number_of_agents\": 5,\n \"agents\": [\n {\n \"name\": \"Room Management Agent\",\n \"system_prompt\": \"Automate room assignments, minibar restocking, and housekeeping schedules\"\n },\n {\n \"name\": \"Guest Services Agent\",\n \"system_prompt\": \"Handle check-ins, check-outs, guest requests, and complaints efficiently\"\n },\n {\n \"name\": \"Reservations Agent\",\n \"system_prompt\": \"Manage room bookings, table reservations, and special requests\"\n },\n {\n \"name\": \"Maintenance Agent\",\n \"system_prompt\": \"Schedule and track maintenance tasks for facilities and rooms\"\n },\n {\n \"name\": \"Staff Coordination Agent\",\n \"system_prompt\": \"Optimize staff schedules, task assignments, and workload distribution\"\n }\n ]\n}\n and another example\n{\n \"plan\": [\"Problem Identification\", \"Solution Design\", \"Implementation\", \"Testing\", \"Deployment\"],\n \"number_of_agents\": 4,\n \"agents\": [\n {\n \"name\": \"Identification Agent\",\n \"system_prompt\": \"Identify the problem\"\n },\n {\n \"name\": \"Design Agent\",\n \"system_prompt\": \"Design the solution\"\n },\n {\n \"name\": \"Implementation Agent\",\n \"system_prompt\": \"Implement the solution\"\n },\n {\n \"name\": \"Deployment Agent\",\n \"system_prompt\": \"Deploy the solution\"\n }\n ]\n}\n \n\n\nHuman:: Create a self-driving car system using a team of AI agents\n\n\nSwarm Orchestrator: \n```json\n{\n \"plan\": [\n \"Perception\",\n \"Planning\",\n \"Control\",\n \"Monitoring\",\n \"Mapping & Localization\"\n ],\n \"number_of_agents\": 5,\n \"agents\": [\n {\n \"name\": \"Perception Agent\",\n \"system_prompt\": \"Perceive the surrounding environment using sensors like cameras, radar and lidar.\"\n },\n {\n \"name\": \"Planning Agent\",\n \"system_prompt\": \"Plan an optimal, collision-free path to the destination.\" \n },\n {\n \"name\": \"Control Agent\",\n \"system_prompt\": \"Execute steering, acceleration and braking commands to drive along the planned path.\"\n },\n {\n \"name\": \"Monitoring Agent\", \n \"system_prompt\": \"Continuously monitor the vehicle health and behavior to detect anomalies.\"\n },\n {\n \"name\": \"Mapping & Localization Agent\",\n \"system_prompt\": \"Build and update maps used for localization and navigation planning.\"\n }\n ]\n}\n```\n\nThis decomposes the key capabilities needed for a self-driving car system into agents with focused responsibilities. The orchestrator can coordinate the agents to work together to enable autonomous driving. I kept the prompts compact as requested. Let me know if you need any changes!\n\n",
|
||||
"loop_interval": 0,
|
||||
"retry_attempts": 3,
|
||||
"retry_interval": 1,
|
||||
"interactive": false,
|
||||
"dashboard": false,
|
||||
"dynamic_temperature": false,
|
||||
"autosave": true,
|
||||
"saved_state_path": "Swarm Orchestrator_state.json",
|
||||
"max_loops": 1
|
||||
}
|
@ -0,0 +1,185 @@
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import List, Optional
|
||||
|
||||
import chromadb
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.utils.data_to_text import data_to_text
|
||||
from swarms.utils.markdown_message import display_markdown_message
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
# Results storage using local ChromaDB
|
||||
class ChromaDB:
|
||||
"""
|
||||
|
||||
ChromaDB database
|
||||
|
||||
Args:
|
||||
metric (str): The similarity metric to use.
|
||||
output (str): The name of the collection to store the results in.
|
||||
limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
|
||||
n_results (int, optional): The number of results to retrieve. Defaults to 2.
|
||||
|
||||
Methods:
|
||||
add: _description_
|
||||
query: _description_
|
||||
|
||||
Examples:
|
||||
>>> chromadb = ChromaDB(
|
||||
>>> metric="cosine",
|
||||
>>> output="results",
|
||||
>>> llm="gpt3",
|
||||
>>> openai_api_key=OPENAI_API_KEY,
|
||||
>>> )
|
||||
>>> chromadb.add(task, result, result_id)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric: str = "cosine",
|
||||
output_dir: str = "swarms",
|
||||
limit_tokens: Optional[int] = 1000,
|
||||
n_results: int = 2,
|
||||
docs_folder: Optional[str] = None,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
self.metric = metric
|
||||
self.output_dir = output_dir
|
||||
self.limit_tokens = limit_tokens
|
||||
self.n_results = n_results
|
||||
self.docs_folder = docs_folder
|
||||
self.verbose = verbose
|
||||
|
||||
# Disable ChromaDB logging
|
||||
if verbose:
|
||||
logging.getLogger("chromadb").setLevel(logging.INFO)
|
||||
|
||||
# Create Chroma collection
|
||||
chroma_persist_dir = "chroma"
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
settings=chromadb.config.Settings(
|
||||
persist_directory=chroma_persist_dir,
|
||||
),
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
# Create ChromaDB client
|
||||
self.client = chromadb.Client()
|
||||
|
||||
# Create Chroma collection
|
||||
self.collection = chroma_client.get_or_create_collection(
|
||||
name=output_dir,
|
||||
metadata={"hnsw:space": metric},
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
display_markdown_message(
|
||||
"ChromaDB collection created:"
|
||||
f" {self.collection.name} with metric: {self.metric} and"
|
||||
f" output directory: {self.output_dir}"
|
||||
)
|
||||
|
||||
# If docs
|
||||
if docs_folder:
|
||||
display_markdown_message(
|
||||
f"Traversing directory: {docs_folder}"
|
||||
)
|
||||
self.traverse_directory()
|
||||
|
||||
def add(
|
||||
self,
|
||||
document: str,
|
||||
images: List[np.ndarray] = None,
|
||||
img_urls: List[str] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Add a document to the ChromaDB collection.
|
||||
|
||||
Args:
|
||||
document (str): The document to be added.
|
||||
condition (bool, optional): The condition to check before adding the document. Defaults to True.
|
||||
|
||||
Returns:
|
||||
str: The ID of the added document.
|
||||
"""
|
||||
try:
|
||||
doc_id = str(uuid.uuid4())
|
||||
self.collection.add(
|
||||
ids=[doc_id],
|
||||
documents=[document],
|
||||
images=images,
|
||||
uris=img_urls,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
return doc_id
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to add document: {str(e)}")
|
||||
|
||||
def query(
|
||||
self,
|
||||
query_text: str,
|
||||
query_images: List[np.ndarray],
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Query documents from the ChromaDB collection.
|
||||
|
||||
Args:
|
||||
query (str): The query string.
|
||||
n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
|
||||
|
||||
Returns:
|
||||
dict: The retrieved documents.
|
||||
"""
|
||||
try:
|
||||
docs = self.collection.query(
|
||||
query_texts=[query_text],
|
||||
query_images=query_images,
|
||||
n_results=self.n_docs,
|
||||
*args,
|
||||
**kwargs,
|
||||
)["documents"]
|
||||
return docs[0]
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to query documents: {str(e)}")
|
||||
|
||||
def traverse_directory(self):
|
||||
"""
|
||||
Traverse through every file in the given directory and its subdirectories,
|
||||
and return the paths of all files.
|
||||
Parameters:
|
||||
- directory_name (str): The name of the directory to traverse.
|
||||
Returns:
|
||||
- list: A list of paths to each file in the directory and its subdirectories.
|
||||
"""
|
||||
image_extensions = [
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".png",
|
||||
]
|
||||
images = []
|
||||
for root, dirs, files in os.walk(self.docs_folder):
|
||||
for file in files:
|
||||
_, ext = os.path.splitext(file)
|
||||
if ext.lower() in image_extensions:
|
||||
images.append(os.path.join(root, file))
|
||||
else:
|
||||
data = data_to_text(file)
|
||||
added_to_db = self.add([data])
|
||||
print(f"{file} added to Database")
|
||||
if images:
|
||||
added_to_db = self.add(img_urls=[images])
|
||||
print(f"{len(images)} images added to Database ")
|
||||
return added_to_db
|
@ -0,0 +1,70 @@
|
||||
"""
|
||||
Building an Autonomous Agent in 5 minutes with:
|
||||
- LLM: OpenAI, Anthropic, EleutherAI, Hugging Face: Transformers
|
||||
- Tools: Search, Browser, ETC
|
||||
- Long Term Mmeory: ChromaDB, Weaviate, Pinecone, ETC
|
||||
"""
|
||||
from swarms import Agent, OpenAIChat, tool
|
||||
from playground.demos.agent_in_5.chroma_db import ChromaDB
|
||||
|
||||
# Initialize the memory
|
||||
chroma = ChromaDB(
|
||||
metric="cosine",
|
||||
limit_tokens=1000,
|
||||
verbose=True,
|
||||
# docs_folder = "docs" # Add your docs folder here
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
How to make a tool in Swarms:
|
||||
- Use the @tool decorator
|
||||
- Define the function with the required arguments
|
||||
- Add a docstring with the description of the tool
|
||||
"""
|
||||
|
||||
|
||||
# Create a tool
|
||||
@tool # Use this decorator
|
||||
def browser(query: str = None): # Add types
|
||||
"""
|
||||
Opens a web browser and performs a Google search with the given query.
|
||||
|
||||
Args:
|
||||
query (str): The search query to be performed.
|
||||
|
||||
Returns:
|
||||
str: A message indicating that the browser is being opened for the given query.
|
||||
"""
|
||||
import webbrowser
|
||||
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
webbrowser.open(url)
|
||||
return f"Opening browser for: {query}"
|
||||
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
llm=OpenAIChat(),
|
||||
agent_name="AI Engineer",
|
||||
agent_description=(
|
||||
"Creates AI Models for special use cases using PyTorch"
|
||||
),
|
||||
system_prompt=(
|
||||
"Create an AI model for earthquake prediction using PyTorch."
|
||||
),
|
||||
max_loops=4, # or "auto"
|
||||
autosave=True,
|
||||
dashboard=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
tools=[browser],
|
||||
long_term_memory=chroma, # pass in your memory object
|
||||
)
|
||||
|
||||
# Run the agent
|
||||
out = agent.run(
|
||||
"Let's make an AI model for earthquake prediction in pytorch."
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,52 @@
|
||||
meta_system_prompt_generator = """
|
||||
|
||||
|
||||
### Meta-Prompter Template for Agent-Based Task Prompt Generation
|
||||
|
||||
**Objective**: To create a comprehensive system prompt that directs an intelligent agent to produce a specific and useful response for a given task or scenario. Only Return the prompt for the agent you're instructing. Nothing else
|
||||
|
||||
|
||||
1. **Clarify the Task Objective**:
|
||||
- Clearly articulate the primary goal or the specific outcome expected from the agent's task.
|
||||
- Highlight the core problem or question the agent needs to address.
|
||||
|
||||
2. **Establish Key Requirements**:
|
||||
- Enumerate any crucial requirements or limitations for the agent's response, such as response length, format, or the inclusion/exclusion of certain types of information.
|
||||
- Outline the expected depth of detail or complexity in the response.
|
||||
|
||||
3. **Provide Essential Context**:
|
||||
- Offer relevant background or contextual information to ensure the agent's responses are accurate and pertinent.
|
||||
- Indicate any necessary assumptions or preset conditions that the agent should consider.
|
||||
|
||||
4. **Determine the Interaction Style**:
|
||||
- Define the desired tone and style for the agent's responses, whether it be professional, casual, instructional, or another specified tone.
|
||||
- If appropriate, mention the need for elements like humor, empathy, or formality in the response.
|
||||
|
||||
5. **Outline Feedback and Iteration Processes**:
|
||||
- Describe the method for evaluating the effectiveness of the agent's responses and the mechanism for providing feedback.
|
||||
- Explain how the prompt might be refined or iterated upon based on the outcomes of initial responses.
|
||||
|
||||
6. **Incorporate Examples**:
|
||||
- Provide example responses to illustrate the desired outcome clearly. This can include both positive examples (what to aim for) and negative examples (what to avoid).
|
||||
- Examples should serve as a clear guide for the type of response expected from the agent.
|
||||
|
||||
7. **Iterative Refinement**:
|
||||
- Review the draft prompt to ensure it aligns with the task objective and is clear and comprehensive.
|
||||
- Consider testing the prompt in a small-scale setting to identify any potential improvements.
|
||||
|
||||
### Example Meta-Prompt Creation:
|
||||
|
||||
- **Objective**: Generate a prompt for an intelligent agent to devise innovative community project ideas that promote sustainability.
|
||||
- **Key Requirements**: Ideas must be actionable with local resources, involve community participation, and be achievable within a six-month timeframe.
|
||||
- **Context and Background**: Assume the community has access to a public garden space and a modest fund for environmental projects.
|
||||
- **Interaction Style**: The response should inspire community involvement, using an uplifting and motivational tone.
|
||||
- **Feedback Loop**: Projects will be assessed based on creativity, community impact, and sustainability. Feedback will guide the refinement of future prompts.
|
||||
- **Examples**:
|
||||
- Desired response example: "Organize a 'green market' where local vendors and farmers can sell sustainably produced goods."
|
||||
- Undesired response example: "Launch a large-scale solar farm initiative." (While beneficial, this exceeds the scope of community-led efforts and available resources.)
|
||||
|
||||
####### Meta-Prompter Template Ends Here #######
|
||||
|
||||
Now remember to only return the prompt for the agent you're instructing. Nothing else
|
||||
|
||||
"""
|
@ -0,0 +1,27 @@
|
||||
from swarms.structs.agent import Agent
|
||||
from typing import Union
|
||||
from swarms.models.popular_llms import OpenAIChat
|
||||
from swarms.models.base_llm import AbstractLLM
|
||||
from swarms.prompts.meta_system_prompt import (
|
||||
meta_system_prompt_generator,
|
||||
)
|
||||
|
||||
meta_prompter_llm = OpenAIChat(
|
||||
system_prompt=str(meta_system_prompt_generator)
|
||||
)
|
||||
|
||||
|
||||
def meta_system_prompt(
|
||||
agent: Union[Agent, AbstractLLM], system_prompt: str
|
||||
) -> str:
|
||||
"""
|
||||
Generates a meta system prompt for the given agent using the provided system prompt.
|
||||
|
||||
Args:
|
||||
agent (Union[Agent, AbstractLLM]): The agent or LLM (Language Learning Model) for which the meta system prompt is generated.
|
||||
system_prompt (str): The system prompt used to generate the meta system prompt.
|
||||
|
||||
Returns:
|
||||
str: The generated meta system prompt.
|
||||
"""
|
||||
return meta_prompter_llm(system_prompt)
|
Loading…
Reference in new issue