pull/453/head
Kye 9 months ago
parent 842068cb5c
commit 53fc90d9ed

@ -9,6 +9,7 @@ from swarms.models.popular_llms import Anthropic, OpenAIChat
from swarms.models.base_llm import BaseLLM
from swarms.memory.base_vectordb import BaseVectorDatabase
boss_sys_prompt = (
"You're the Swarm Orchestrator, like a project manager of a"
" bustling hive. When a task arises, you tap into your network of"

@ -4,7 +4,7 @@ from dotenv import load_dotenv
from swarms import Agent, OpenAIChat
from swarms.agents.multion_agent import MultiOnAgent
from swarms.memory.chroma_db import ChromaDB
from playground.memory.chroma_db import ChromaDB
from swarms.tools.tool import tool
from swarms.tools.code_interpreter import SubprocessCodeInterpreter

@ -4,7 +4,7 @@ import os
from dotenv import load_dotenv
from swarms import Agent, OpenAIChat
from swarms.memory.chroma_db import ChromaDB
from playground.memory.chroma_db import ChromaDB
from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
from swarms.tools.tool import tool

@ -1,161 +0,0 @@
from typing import List
from httpx import RequestError
from swarms.memory.base_vectordb import BaseVectorDatabase
try:
from sentence_transformers import SentenceTransformer
except ImportError:
print("Please install the sentence-transformers package")
print("pip install sentence-transformers")
try:
from qdrant_client import QdrantClient
from qdrant_client.http.models import (
Distance,
PointStruct,
VectorParams,
)
except ImportError:
print("Please install the qdrant-client package")
print("pip install qdrant-client")
class Qdrant(BaseVectorDatabase):
"""
Qdrant class for managing collections and performing vector operations using QdrantClient.
Attributes:
client (QdrantClient): The Qdrant client for interacting with the Qdrant server.
collection_name (str): Name of the collection to be managed in Qdrant.
model (SentenceTransformer): The model used for generating sentence embeddings.
Args:
api_key (str): API key for authenticating with Qdrant.
host (str): Host address of the Qdrant server.
port (int): Port number of the Qdrant server. Defaults to 6333.
collection_name (str): Name of the collection to be used or created. Defaults to "qdrant".
model_name (str): Name of the model to be used for embeddings. Defaults to "BAAI/bge-small-en-v1.5".
https (bool): Flag to indicate if HTTPS should be used. Defaults to True.
"""
def __init__(
self,
api_key: str,
host: str,
port: int = 6333,
collection_name: str = "qdrant",
model_name: str = "BAAI/bge-small-en-v1.5",
https: bool = True,
):
try:
self.client = QdrantClient(
url=host, port=port, api_key=api_key
)
self.collection_name = collection_name
self._load_embedding_model(model_name)
self._setup_collection()
except RequestError as e:
print(f"Error setting up QdrantClient: {e}")
def _load_embedding_model(self, model_name: str):
"""
Loads the sentence embedding model specified by the model name.
Args:
model_name (str): The name of the model to load for generating embeddings.
"""
try:
self.model = SentenceTransformer(model_name)
except Exception as e:
print(f"Error loading embedding model: {e}")
def _setup_collection(self):
try:
exists = self.client.get_collection(self.collection_name)
if exists:
print(
f"Collection '{self.collection_name}' already"
" exists."
)
except Exception:
self.client.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(
size=self.model.get_sentence_embedding_dimension(),
distance=Distance.DOT,
),
)
print(f"Collection '{self.collection_name}' created.")
def add(self, docs: List[dict], *args, **kwargs):
"""
Adds vector representations of documents to the Qdrant collection.
Args:
docs (List[dict]): A list of documents where each document is a dictionary with at least a 'page_content' key.
Returns:
OperationResponse or None: Returns the operation information if successful, otherwise None.
"""
points = []
for i, doc in enumerate(docs):
try:
if "page_content" in doc:
embedding = self.model.encode(
doc["page_content"], normalize_embeddings=True
)
points.append(
PointStruct(
id=i + 1,
vector=embedding,
payload={"content": doc["page_content"]},
)
)
else:
print(
f"Document at index {i} is missing"
" 'page_content' key"
)
except Exception as e:
print(f"Error processing document at index {i}: {e}")
try:
operation_info = self.client.upsert(
collection_name=self.collection_name,
wait=True,
points=points,
*args,
**kwargs,
)
return operation_info
except Exception as e:
print(f"Error adding vectors: {e}")
return None
def query(self, query: str, limit: int = 3, *args, **kwargs):
"""
Searches the collection for vectors similar to the query vector.
Args:
query (str): The query string to be converted into a vector and used for searching.
limit (int): The number of search results to return. Defaults to 3.
Returns:
SearchResult or None: Returns the search results if successful, otherwise None.
"""
try:
query_vector = self.model.encode(
query, normalize_embeddings=True, *args, **kwargs
)
search_result = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
limit=limit,
*args,
**kwargs,
)
return search_result
except Exception as e:
print(f"Error searching vectors: {e}")
return None

@ -27,7 +27,6 @@ from swarms.structs.multi_process_workflow import (
from swarms.structs.multi_threaded_workflow import (
MultiThreadedWorkflow,
)
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.plan import Plan
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.schemas import (

@ -1,16 +1,16 @@
import asyncio
import json
import logging
from typing import Union
import os
import random
import sys
import time
import uuid
from typing import Any, Callable, Dict, List, Optional, Tuple
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import yaml
from loguru import logger
from pydantic import BaseModel
from termcolor import colored
from swarms.memory.base_vectordb import BaseVectorDatabase
@ -18,23 +18,21 @@ from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
)
from swarms.prompts.worker_prompt import tool_usage_worker_prompt
from swarms.structs.conversation import Conversation
from swarms.tools.tool import BaseTool
from swarms.structs.schemas import ManySteps, Step
from swarms.structs.yaml_model import YamlModel
from swarms.telemetry.user_utils import get_user_device_data
from swarms.tools.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.tools.exec_tool import execute_tool_by_name
from swarms.prompts.worker_prompt import tool_usage_worker_prompt
from pydantic import BaseModel
from swarms.tools.pydantic_to_json import (
base_model_to_openai_function,
multi_base_model_to_openai_function,
)
from swarms.structs.schemas import Step, ManySteps
from swarms.telemetry.user_utils import get_user_device_data
from swarms.structs.yaml_model import YamlModel
from swarms.tools.code_interpreter import SubprocessCodeInterpreter
from swarms.tools.tool import BaseTool
from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
# Utils

@ -1,90 +0,0 @@
from swarms.structs.base_structure import BaseStructure
from swarms.structs.task import Task
from swarms.utils.logger import logger # noqa: F401
class NonlinearWorkflow(BaseStructure):
"""
Represents a Directed Acyclic Graph (DAG) workflow.
Attributes:
tasks (dict): A dictionary mapping task names to Task objects.
edges (dict): A dictionary mapping task names to a list of dependencies.
Methods:
add(task: Task, *dependencies: str): Adds a task to the workflow with its dependencies.
run(): Executes the workflow by running tasks in topological order.
Examples:
>>> from swarms.models import OpenAIChat
>>> from swarms.structs import NonlinearWorkflow, Task
>>> llm = OpenAIChat(openai_api_key="")
>>> task = Task(llm, "What's the weather in miami")
>>> workflow = NonlinearWorkflow()
>>> workflow.add(task)
>>> workflow.run()
"""
def __init__(self, stopping_token: str = "<DONE>"):
self.tasks = {}
self.edges = {}
self.stopping_token = stopping_token
def add(self, task: Task, *dependencies: str):
"""
Adds a task to the workflow with its dependencies.
Args:
task (Task): The task to be added.
dependencies (str): Variable number of dependency task names.
Raises:
AssertionError: If the task is None.
Returns:
None
"""
assert task is not None, "Task cannot be None"
self.tasks[task.name] = task
self.edges[task.name] = list(dependencies)
logger.info(f"[NonlinearWorkflow] [Added task {task.name}]")
def run(self):
"""
Executes the workflow by running tasks in topological order.
Raises:
Exception: If a circular dependency is detected.
Returns:
None
"""
try:
# Create a copy of the edges
edges = self.edges.copy()
while edges:
# Get all tasks with no dependencies
ready_tasks = [
task for task, deps in edges.items() if not deps
]
if not ready_tasks:
raise Exception("Circular dependency detected")
# Run all ready tasks
for task in ready_tasks:
result = self.tasks[task].execute()
if result == self.stopping_token:
return
del edges[task]
# Remove dependencies on the ready tasks
for deps in edges.values():
for task in ready_tasks:
if task in deps:
deps.remove(task)
except Exception as error:
logger.error(f"[ERROR][NonlinearWorkflow] {error}")
raise error

@ -1,32 +1,10 @@
from typing import List
from pydantic import BaseModel
from swarms.structs.step import Step
class Plan:
def __init__(self, steps: List[Step]):
"""
Initializes a Plan object.
Args:
steps (List[Step]): A list of Step objects representing the steps in the plan.
"""
self.steps = steps
def __str__(self) -> str:
"""
Returns a string representation of the Plan object.
Returns:
str: A string representation of the Plan object.
"""
return str([str(step) for step in self.steps])
def __repr(self) -> str:
"""
Returns a string representation of the Plan object.
class Plan(BaseModel):
steps: List[Step]
Returns:
str: A string representation of the Plan object.
"""
return str(self)
class Config:
orm_mode = True

@ -1,89 +0,0 @@
from typing import List
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import (
BaseSwarm,
)
from swarms.structs.conversation import Conversation
from swarms.utils.logger import logger
class StackOverflowSwarm(BaseSwarm):
"""
Represents a swarm of agents that work together to solve a problem or answer a question on Stack Overflow.
Attributes:
agents (List[Agent]): The list of agents in the swarm.
autosave (bool): Flag indicating whether to automatically save the conversation.
verbose (bool): Flag indicating whether to display verbose output.
save_filepath (str): The filepath to save the conversation.
conversation (Conversation): The conversation object for storing the interactions.
Examples:
>>> from swarms.structs.agent import Agent
>>> from swarms.structs.stack_overflow_swarm import StackOverflowSwarm
"""
def __init__(
self,
agents: List[Agent],
autosave: bool = False,
verbose: bool = False,
save_filepath: str = "stack_overflow_swarm.json",
eval_agent: Agent = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.agents = agents
self.autosave = autosave
self.verbose = verbose
self.save_filepath = save_filepath
self.eval_agent = eval_agent
# Configure conversation
self.conversation = Conversation(
time_enabled=True,
autosave=autosave,
save_filepath=save_filepath,
*args,
**kwargs,
)
# Counter for the number of upvotes per post
self.upvotes = 0
# Counter for the number of downvotes per post
self.downvotes = 0
# Forum for the agents to interact
self.forum = []
def run(self, task: str, *args, **kwargs):
"""
Run the swarm to solve a problem or answer a question like stack overflow
Args:
task (str): The task to be performed by the agents.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
List[str]: The conversation history.
"""
# Add the task to the conversation
self.conversation.add("Human", task)
logger.info(f"Task: {task} Added to the Forum.")
# Run the agents and get their responses and append to the conversation
for agent in self.agents:
response = agent.run(
self.conversation.return_history_as_string(),
*args,
**kwargs,
)
# Add to the conversation
self.conversation.add(agent.ai_name, f"{response}")
logger.info(f"[{agent.ai_name}]: [{response}]")
return self.conversation.return_history_as_string()

@ -1,7 +1,7 @@
import os
from unittest.mock import patch
from swarms.memory.pinecone import PineconeDB
from playground.memory.pinecone import PineconeDB
api_key = os.getenv("PINECONE_API_KEY") or ""

@ -3,7 +3,7 @@ from unittest.mock import patch
from dotenv import load_dotenv
from swarms.memory.pg import PostgresDB
from playground.memory.pg import PostgresDB
load_dotenv()

Loading…
Cancel
Save