[DEMO][Swarm Mechanic]

pull/480/head
Kye Gomez 8 months ago
parent 2cf86acd6d
commit 36492c1e0f

1
.gitignore vendored

@ -24,6 +24,7 @@ venv
.DS_Store .DS_Store
Cargo.lock Cargo.lock
.DS_STORE .DS_STORE
artifacts_logs
Cargo.lock Cargo.lock
Medical Treatment Recommendation Agent_state.json Medical Treatment Recommendation Agent_state.json
swarms/agents/.DS_Store swarms/agents/.DS_Store

@ -0,0 +1,88 @@
"""
pip3 install -U swarms
pip3 install -U chromadb
task -> Understanding Agent [understands the problem better] -> Summarize of the conversation -> research agent that has access to internt perplexity -> final rag agent
# Todo
- Use better llm -- gpt4, claude, gemini
- Make better system prompt
- Populate the vector database with q/a of past history
"""
from swarms import Agent, llama3Hosted, AgentRearrange
from pydantic import BaseModel
from playground.memory.chromadb_example import ChromaDB
# Initialize the language model agent (e.g., GPT-3)
llm = llama3Hosted(max_tokens=3000)
# Initialize Memory
memory = ChromaDB(output_dir="swarm_mechanic", n_results=2, verbose=True)
# Perplexity Agent
# def webbrowser(query: str):
# # put your logic here
# return query
# Output
class EvaluatorOuputSchema(BaseModel):
evaluation: str = None
question_for_user: str = None
# Initialize agents for individual tasks
agent1 = Agent(
agent_name="Summary ++ Hightlighter Agent",
system_prompt="Generate a simple, direct, and reliable summary of the input task alongside the highlights",
llm=llm,
max_loops=1,
)
# Point out that if their are details that can be added
# What do you mean? What lights do you have turned on.
agent2 = Agent(
agent_name="Evaluator",
system_prompt="Summarize and evaluate the summary and the users demand, always be interested in learning more about the situation with extreme precision.",
llm=llm,
max_loops=1,
list_base_models=[EvaluatorOuputSchema],
)
# research_agent = Agent(
# agent_name="Research Agent",
# system_prompt="Summarize and evaluate the summary and the users demand, always be interested in learning more about the situation with extreme precision.",
# llm=llm,
# max_loops=1,
# tool = [webbrowser]
# )
agent3 = Agent(
agent_name="Summarizer Agent",
system_prompt="Summarize the entire history of the interaction",
llm=llm,
max_loops=1,
long_term_memory=memory,
)
# Task
task = "Car Model: S-Class, Car Year: 2020, Car Mileage: 10000, all my service lights are on, what should i do?"
# Swarm
swarm = AgentRearrange(
agents=[agent1, agent2, agent3],
flow=f"{agent1.agent_name} -> {agent2.agent_name} -> {agent3.agent_name}",
memory_system=memory,
)
# Task
out = swarm.run(task)
print(out)

@ -1,15 +0,0 @@
from swarms.memory import ChromaDB
# Initialize the memory
chroma = ChromaDB(
metric="cosine",
limit_tokens=1000,
verbose=True,
)
# Add text
text = "This is a test"
chroma.add(text)
# Search for similar text
similar_text = chroma.query(text)

@ -1,15 +1,14 @@
import logging import logging
import os import os
import uuid import uuid
from typing import Callable, List, Optional from typing import Callable, Optional
import chromadb import chromadb
import numpy as np
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.memory.base_vectordb import BaseVectorDatabase
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.markdown_message import display_markdown_message from swarms.utils.markdown_message import display_markdown_message
from swarms.memory.base_vectordb import BaseVectorDatabase
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()
@ -46,7 +45,7 @@ class ChromaDB(BaseVectorDatabase):
metric: str = "cosine", metric: str = "cosine",
output_dir: str = "swarms", output_dir: str = "swarms",
limit_tokens: Optional[int] = 1000, limit_tokens: Optional[int] = 1000,
n_results: int = 2, n_results: int = 3,
embedding_function: Callable = None, embedding_function: Callable = None,
docs_folder: str = None, docs_folder: str = None,
verbose: bool = False, verbose: bool = False,
@ -108,8 +107,6 @@ class ChromaDB(BaseVectorDatabase):
def add( def add(
self, self,
document: str, document: str,
images: List[np.ndarray] = None,
img_urls: List[str] = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -128,11 +125,12 @@ class ChromaDB(BaseVectorDatabase):
self.collection.add( self.collection.add(
ids=[doc_id], ids=[doc_id],
documents=[document], documents=[document],
images=images,
uris=img_urls,
*args, *args,
**kwargs, **kwargs,
) )
print("-----------------")
print("Document added successfully")
print("-----------------")
return doc_id return doc_id
except Exception as e: except Exception as e:
raise Exception(f"Failed to add document: {str(e)}") raise Exception(f"Failed to add document: {str(e)}")
@ -140,7 +138,6 @@ class ChromaDB(BaseVectorDatabase):
def query( def query(
self, self,
query_text: str, query_text: str,
query_images: List[np.ndarray],
*args, *args,
**kwargs, **kwargs,
): ):
@ -157,8 +154,7 @@ class ChromaDB(BaseVectorDatabase):
try: try:
docs = self.collection.query( docs = self.collection.query(
query_texts=[query_text], query_texts=[query_text],
query_images=query_images, n_results=self.n_results,
n_results=self.n_docs,
*args, *args,
**kwargs, **kwargs,
)["documents"] )["documents"]
@ -177,23 +173,12 @@ class ChromaDB(BaseVectorDatabase):
""" """
added_to_db = False added_to_db = False
image_extensions = [
".jpg",
".jpeg",
".png",
]
images = []
for root, dirs, files in os.walk(self.docs_folder): for root, dirs, files in os.walk(self.docs_folder):
for file in files: for file in files:
file = os.path.join(self.docs_folder, file)
_, ext = os.path.splitext(file) _, ext = os.path.splitext(file)
if ext.lower() in image_extensions: data = data_to_text(file)
images.append(os.path.join(root, file)) added_to_db = self.add([data])
else: print(f"{file} added to Database")
data = data_to_text(file)
added_to_db = self.add([data])
print(f"{file} added to Database")
if images:
added_to_db = self.add(img_urls=[images])
print(f"{len(images)} images added to Database ")
return added_to_db return added_to_db

@ -11,7 +11,6 @@ agent1 = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
tools=[],
) )
agent2 = Agent( agent2 = Agent(
agent_name="Summarizer", agent_name="Summarizer",
@ -19,7 +18,6 @@ agent2 = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
tools=[],
) )
# Create the Sequential workflow # Create the Sequential workflow

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "5.0.7" version = "5.0.8"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]

@ -31,4 +31,4 @@ def cleanup_json_logs(name: str = None):
# Call the function # Call the function
cleanup_json_logs("json_logs") cleanup_json_logs("artifacts_logs")

@ -1,7 +1,7 @@
import json import json
from typing import List from typing import List
from beartype import beartype # from beartype import beartype
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm from swarms.structs.base_swarm import BaseSwarm
@ -214,7 +214,6 @@ class HiearchicalSwarm(BaseSwarm):
logger.error("Invalid JSON data, try again.") logger.error("Invalid JSON data, try again.")
raise json.JSONDecodeError raise json.JSONDecodeError
@beartype
def select_agent_and_send_task( def select_agent_and_send_task(
self, name: str = None, task: str = None, *args, **kwargs self, name: str = None, task: str = None, *args, **kwargs
): ):
@ -246,7 +245,6 @@ class HiearchicalSwarm(BaseSwarm):
logger.error(f"Error: {e}") logger.error(f"Error: {e}")
raise e raise e
@beartype
def run(self, task: str = None, *args, **kwargs): def run(self, task: str = None, *args, **kwargs):
""" """
Run the hierarchical swarm. Run the hierarchical swarm.

@ -0,0 +1,73 @@
import timeit
from typing import Callable, Iterable, List, Optional, TypeVar
T = TypeVar("T")
R = TypeVar("R")
def optimized_loop(
data: Iterable[T],
operation: Callable[[T], R],
condition: Optional[Callable[[T], bool]] = None,
) -> List[R]:
"""
Perform an optimized loop over the input data, applying an operation to each element.
Optionally, filter elements based on a condition before applying the operation.
Args:
data (Iterable[T]): The input data to be processed. Can be any iterable type.
operation (Callable[[T], R]): The operation to be applied to each element.
condition (Optional[Callable[[T], bool]]): An optional condition to filter elements before applying the operation.
Returns:
List[R]: The result of applying the operation to the filtered elements.
"""
if condition is not None:
return [operation(x) for x in data if condition(x)]
else:
return [operation(x) for x in data]
# Sample data, operation, and condition for benchmarking
data = list(range(1000000))
operation = lambda x: x * x
condition = lambda x: x % 2 == 0
# Define a traditional loop for comparison
def traditional_loop(data: Iterable[int]) -> List[int]:
result = []
for x in data:
if x % 2 == 0:
result.append(x * x)
return result
# Define a benchmarking function
def benchmark():
# Time the execution of the optimized loop
optimized_time = timeit.timeit(
stmt="optimized_loop(data, operation, condition)",
setup="from __main__ import optimized_loop, data, operation, condition",
globals=globals(),
number=10,
)
print(f"Optimized loop execution time: {optimized_time:.4f} seconds")
# Time the execution of the traditional loop for comparison
traditional_time = timeit.timeit(
stmt="traditional_loop(data)",
setup="from __main__ import traditional_loop, data",
globals=globals(),
number=10,
)
print(
f"Traditional loop execution time: {traditional_time:.4f} seconds"
)
# Run the benchmark
if __name__ == "__main__":
benchmark()
Loading…
Cancel
Save