parent
842068cb5c
commit
53fc90d9ed
@ -1,161 +0,0 @@
|
|||||||
from typing import List
|
|
||||||
|
|
||||||
from httpx import RequestError
|
|
||||||
from swarms.memory.base_vectordb import BaseVectorDatabase
|
|
||||||
|
|
||||||
try:
|
|
||||||
from sentence_transformers import SentenceTransformer
|
|
||||||
except ImportError:
|
|
||||||
print("Please install the sentence-transformers package")
|
|
||||||
print("pip install sentence-transformers")
|
|
||||||
|
|
||||||
try:
|
|
||||||
from qdrant_client import QdrantClient
|
|
||||||
from qdrant_client.http.models import (
|
|
||||||
Distance,
|
|
||||||
PointStruct,
|
|
||||||
VectorParams,
|
|
||||||
)
|
|
||||||
except ImportError:
|
|
||||||
print("Please install the qdrant-client package")
|
|
||||||
print("pip install qdrant-client")
|
|
||||||
|
|
||||||
|
|
||||||
class Qdrant(BaseVectorDatabase):
|
|
||||||
"""
|
|
||||||
Qdrant class for managing collections and performing vector operations using QdrantClient.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
client (QdrantClient): The Qdrant client for interacting with the Qdrant server.
|
|
||||||
collection_name (str): Name of the collection to be managed in Qdrant.
|
|
||||||
model (SentenceTransformer): The model used for generating sentence embeddings.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
api_key (str): API key for authenticating with Qdrant.
|
|
||||||
host (str): Host address of the Qdrant server.
|
|
||||||
port (int): Port number of the Qdrant server. Defaults to 6333.
|
|
||||||
collection_name (str): Name of the collection to be used or created. Defaults to "qdrant".
|
|
||||||
model_name (str): Name of the model to be used for embeddings. Defaults to "BAAI/bge-small-en-v1.5".
|
|
||||||
https (bool): Flag to indicate if HTTPS should be used. Defaults to True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
api_key: str,
|
|
||||||
host: str,
|
|
||||||
port: int = 6333,
|
|
||||||
collection_name: str = "qdrant",
|
|
||||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
|
||||||
https: bool = True,
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
self.client = QdrantClient(
|
|
||||||
url=host, port=port, api_key=api_key
|
|
||||||
)
|
|
||||||
self.collection_name = collection_name
|
|
||||||
self._load_embedding_model(model_name)
|
|
||||||
self._setup_collection()
|
|
||||||
except RequestError as e:
|
|
||||||
print(f"Error setting up QdrantClient: {e}")
|
|
||||||
|
|
||||||
def _load_embedding_model(self, model_name: str):
|
|
||||||
"""
|
|
||||||
Loads the sentence embedding model specified by the model name.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_name (str): The name of the model to load for generating embeddings.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.model = SentenceTransformer(model_name)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error loading embedding model: {e}")
|
|
||||||
|
|
||||||
def _setup_collection(self):
|
|
||||||
try:
|
|
||||||
exists = self.client.get_collection(self.collection_name)
|
|
||||||
if exists:
|
|
||||||
print(
|
|
||||||
f"Collection '{self.collection_name}' already"
|
|
||||||
" exists."
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.client.create_collection(
|
|
||||||
collection_name=self.collection_name,
|
|
||||||
vectors_config=VectorParams(
|
|
||||||
size=self.model.get_sentence_embedding_dimension(),
|
|
||||||
distance=Distance.DOT,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
print(f"Collection '{self.collection_name}' created.")
|
|
||||||
|
|
||||||
def add(self, docs: List[dict], *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Adds vector representations of documents to the Qdrant collection.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
docs (List[dict]): A list of documents where each document is a dictionary with at least a 'page_content' key.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
OperationResponse or None: Returns the operation information if successful, otherwise None.
|
|
||||||
"""
|
|
||||||
points = []
|
|
||||||
for i, doc in enumerate(docs):
|
|
||||||
try:
|
|
||||||
if "page_content" in doc:
|
|
||||||
embedding = self.model.encode(
|
|
||||||
doc["page_content"], normalize_embeddings=True
|
|
||||||
)
|
|
||||||
points.append(
|
|
||||||
PointStruct(
|
|
||||||
id=i + 1,
|
|
||||||
vector=embedding,
|
|
||||||
payload={"content": doc["page_content"]},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
f"Document at index {i} is missing"
|
|
||||||
" 'page_content' key"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing document at index {i}: {e}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
operation_info = self.client.upsert(
|
|
||||||
collection_name=self.collection_name,
|
|
||||||
wait=True,
|
|
||||||
points=points,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
return operation_info
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error adding vectors: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def query(self, query: str, limit: int = 3, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Searches the collection for vectors similar to the query vector.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The query string to be converted into a vector and used for searching.
|
|
||||||
limit (int): The number of search results to return. Defaults to 3.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
SearchResult or None: Returns the search results if successful, otherwise None.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
query_vector = self.model.encode(
|
|
||||||
query, normalize_embeddings=True, *args, **kwargs
|
|
||||||
)
|
|
||||||
search_result = self.client.search(
|
|
||||||
collection_name=self.collection_name,
|
|
||||||
query_vector=query_vector,
|
|
||||||
limit=limit,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
return search_result
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error searching vectors: {e}")
|
|
||||||
return None
|
|
@ -1,90 +0,0 @@
|
|||||||
from swarms.structs.base_structure import BaseStructure
|
|
||||||
from swarms.structs.task import Task
|
|
||||||
from swarms.utils.logger import logger # noqa: F401
|
|
||||||
|
|
||||||
|
|
||||||
class NonlinearWorkflow(BaseStructure):
|
|
||||||
"""
|
|
||||||
Represents a Directed Acyclic Graph (DAG) workflow.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
tasks (dict): A dictionary mapping task names to Task objects.
|
|
||||||
edges (dict): A dictionary mapping task names to a list of dependencies.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
add(task: Task, *dependencies: str): Adds a task to the workflow with its dependencies.
|
|
||||||
run(): Executes the workflow by running tasks in topological order.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from swarms.models import OpenAIChat
|
|
||||||
>>> from swarms.structs import NonlinearWorkflow, Task
|
|
||||||
>>> llm = OpenAIChat(openai_api_key="")
|
|
||||||
>>> task = Task(llm, "What's the weather in miami")
|
|
||||||
>>> workflow = NonlinearWorkflow()
|
|
||||||
>>> workflow.add(task)
|
|
||||||
>>> workflow.run()
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, stopping_token: str = "<DONE>"):
|
|
||||||
self.tasks = {}
|
|
||||||
self.edges = {}
|
|
||||||
self.stopping_token = stopping_token
|
|
||||||
|
|
||||||
def add(self, task: Task, *dependencies: str):
|
|
||||||
"""
|
|
||||||
Adds a task to the workflow with its dependencies.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (Task): The task to be added.
|
|
||||||
dependencies (str): Variable number of dependency task names.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
AssertionError: If the task is None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
assert task is not None, "Task cannot be None"
|
|
||||||
self.tasks[task.name] = task
|
|
||||||
self.edges[task.name] = list(dependencies)
|
|
||||||
logger.info(f"[NonlinearWorkflow] [Added task {task.name}]")
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""
|
|
||||||
Executes the workflow by running tasks in topological order.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
Exception: If a circular dependency is detected.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Create a copy of the edges
|
|
||||||
edges = self.edges.copy()
|
|
||||||
|
|
||||||
while edges:
|
|
||||||
# Get all tasks with no dependencies
|
|
||||||
ready_tasks = [
|
|
||||||
task for task, deps in edges.items() if not deps
|
|
||||||
]
|
|
||||||
|
|
||||||
if not ready_tasks:
|
|
||||||
raise Exception("Circular dependency detected")
|
|
||||||
|
|
||||||
# Run all ready tasks
|
|
||||||
for task in ready_tasks:
|
|
||||||
result = self.tasks[task].execute()
|
|
||||||
if result == self.stopping_token:
|
|
||||||
return
|
|
||||||
del edges[task]
|
|
||||||
|
|
||||||
# Remove dependencies on the ready tasks
|
|
||||||
for deps in edges.values():
|
|
||||||
for task in ready_tasks:
|
|
||||||
if task in deps:
|
|
||||||
deps.remove(task)
|
|
||||||
except Exception as error:
|
|
||||||
logger.error(f"[ERROR][NonlinearWorkflow] {error}")
|
|
||||||
raise error
|
|
@ -1,32 +1,10 @@
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
from pydantic import BaseModel
|
||||||
from swarms.structs.step import Step
|
from swarms.structs.step import Step
|
||||||
|
|
||||||
|
|
||||||
class Plan:
|
class Plan(BaseModel):
|
||||||
def __init__(self, steps: List[Step]):
|
steps: List[Step]
|
||||||
"""
|
|
||||||
Initializes a Plan object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
steps (List[Step]): A list of Step objects representing the steps in the plan.
|
|
||||||
"""
|
|
||||||
self.steps = steps
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
"""
|
|
||||||
Returns a string representation of the Plan object.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: A string representation of the Plan object.
|
|
||||||
"""
|
|
||||||
return str([str(step) for step in self.steps])
|
|
||||||
|
|
||||||
def __repr(self) -> str:
|
|
||||||
"""
|
|
||||||
Returns a string representation of the Plan object.
|
|
||||||
|
|
||||||
Returns:
|
class Config:
|
||||||
str: A string representation of the Plan object.
|
orm_mode = True
|
||||||
"""
|
|
||||||
return str(self)
|
|
||||||
|
@ -1,89 +0,0 @@
|
|||||||
from typing import List
|
|
||||||
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.base_swarm import (
|
|
||||||
BaseSwarm,
|
|
||||||
)
|
|
||||||
from swarms.structs.conversation import Conversation
|
|
||||||
from swarms.utils.logger import logger
|
|
||||||
|
|
||||||
|
|
||||||
class StackOverflowSwarm(BaseSwarm):
|
|
||||||
"""
|
|
||||||
Represents a swarm of agents that work together to solve a problem or answer a question on Stack Overflow.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
agents (List[Agent]): The list of agents in the swarm.
|
|
||||||
autosave (bool): Flag indicating whether to automatically save the conversation.
|
|
||||||
verbose (bool): Flag indicating whether to display verbose output.
|
|
||||||
save_filepath (str): The filepath to save the conversation.
|
|
||||||
conversation (Conversation): The conversation object for storing the interactions.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from swarms.structs.agent import Agent
|
|
||||||
>>> from swarms.structs.stack_overflow_swarm import StackOverflowSwarm
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
agents: List[Agent],
|
|
||||||
autosave: bool = False,
|
|
||||||
verbose: bool = False,
|
|
||||||
save_filepath: str = "stack_overflow_swarm.json",
|
|
||||||
eval_agent: Agent = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.agents = agents
|
|
||||||
self.autosave = autosave
|
|
||||||
self.verbose = verbose
|
|
||||||
self.save_filepath = save_filepath
|
|
||||||
self.eval_agent = eval_agent
|
|
||||||
|
|
||||||
# Configure conversation
|
|
||||||
self.conversation = Conversation(
|
|
||||||
time_enabled=True,
|
|
||||||
autosave=autosave,
|
|
||||||
save_filepath=save_filepath,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Counter for the number of upvotes per post
|
|
||||||
self.upvotes = 0
|
|
||||||
|
|
||||||
# Counter for the number of downvotes per post
|
|
||||||
self.downvotes = 0
|
|
||||||
|
|
||||||
# Forum for the agents to interact
|
|
||||||
self.forum = []
|
|
||||||
|
|
||||||
def run(self, task: str, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Run the swarm to solve a problem or answer a question like stack overflow
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be performed by the agents.
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[str]: The conversation history.
|
|
||||||
"""
|
|
||||||
# Add the task to the conversation
|
|
||||||
self.conversation.add("Human", task)
|
|
||||||
logger.info(f"Task: {task} Added to the Forum.")
|
|
||||||
|
|
||||||
# Run the agents and get their responses and append to the conversation
|
|
||||||
for agent in self.agents:
|
|
||||||
response = agent.run(
|
|
||||||
self.conversation.return_history_as_string(),
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
# Add to the conversation
|
|
||||||
self.conversation.add(agent.ai_name, f"{response}")
|
|
||||||
logger.info(f"[{agent.ai_name}]: [{response}]")
|
|
||||||
|
|
||||||
return self.conversation.return_history_as_string()
|
|
Loading…
Reference in new issue