pull/546/head
Kye Gomez 6 months ago
parent 471996cf1e
commit da7ba6477f

@ -74,19 +74,19 @@ Features:
✅ Long term memory database with RAG (ChromaDB, Pinecone, Qdrant)
```python
import os
from swarms import Agent, Anthropic
from swarms.prompts.finance_agent_sys_prompt import FINANCIAL_AGENT_SYS_PROMPT
from swarms.utils.data_to_text import data_to_text
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
model = OpenAIChat()
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=Anthropic(
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
),
max_loops="auto",
llm=model,
max_loops=1,
autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False,
@ -114,12 +114,9 @@ agent = Agent(
)
contract = data_to_text("your_contract_pdf.pdf")
agent.run(
f"Analyze the following contract and give me a full summary: {contract}"
"What are the components of a startups stock incentive equity plan"
)
```
-----

@ -123,7 +123,6 @@ markdown_extensions:
- pymdownx.tilde
nav:
- Home:
- Overview: "index.md"
- Install: "swarms/install/install.md"
- Docker Setup: "swarms/install/docker_setup.md"
- Contributing: "contributing.md"

@ -1,16 +1,16 @@
import os
from swarms import Agent, Anthropic
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.utils.data_to_text import data_to_text
model = OpenAIChat()
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")),
max_loops="auto",
llm=model,
max_loops=1,
autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False,
@ -33,13 +33,13 @@ agent = Agent(
# context_length=1000,
# tool_schema = dict
context_length=200000,
# tool_schema=
# tools
# agent_ops_on=True,
# long_term_memory=ChromaDB(docs_folder="artifacts"),
)
contract = data_to_text("your_contract_pdf.pdf")
agent.run(
f"Analyze the following contract and give me a full summary: {contract}"
"What are the components of a startups stock incentive equity plan"
)

@ -0,0 +1,49 @@
from swarms import Agent, HuggingfaceLLM
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
model = HuggingfaceLLM(
model_id="meta-llama/Meta-Llama-3.1-8B",
max_tokens=4000,
temperature=0.1,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[Add your functions here# ],
# stopping_token="Stop!",
# interactive=True,
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs=
# # docs_folder="docs",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000,
# tool_schema=
# tools
# agent_ops_on=True,
# long_term_memory=ChromaDB(docs_folder="artifacts"),
)
agent.run(
"What are the components of a startups stock incentive equity plan"
)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "5.4.0"
version = "5.4.1"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -3,7 +3,6 @@ from swarms.models.base_llm import BaseLLM # noqa: E402
from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.gpt_o import GPT4o
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.idefics import Idefics # noqa: E402
from swarms.models.kosmos_two import Kosmos # noqa: E402
@ -75,6 +74,5 @@ __all__ = [
"Vilt",
"OpenAIEmbeddings",
"llama3Hosted",
"GPT4o",
"FireWorksAI",
]

@ -1,106 +0,0 @@
import os
import base64
from dotenv import load_dotenv
from openai import OpenAI
from swarms.models.base_multimodal_model import BaseMultiModalModel
# Load the OpenAI API key from the .env file
load_dotenv()
# Initialize the OpenAI API key
api_key = os.environ.get("OPENAI_API_KEY")
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
class GPT4o(BaseMultiModalModel):
"""
GPT4o is a class that represents a multi-modal conversational model based on GPT-4.
It extends the BaseMultiModalModel class.
Args:
system_prompt (str): The system prompt to be used in the conversation.
temperature (float): The temperature parameter for generating diverse responses.
max_tokens (int): The maximum number of tokens in the generated response.
openai_api_key (str): The API key for accessing the OpenAI GPT-4 API.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Attributes:
system_prompt (str): The system prompt to be used in the conversation.
temperature (float): The temperature parameter for generating diverse responses.
max_tokens (int): The maximum number of tokens in the generated response.
client (OpenAI): The OpenAI client for making API requests.
Methods:
run(task, local_img=None, img=None, *args, **kwargs):
Runs the GPT-4o model to generate a response based on the given task and image.
"""
def __init__(
self,
system_prompt: str = None,
temperature: float = 0.1,
max_tokens: int = 300,
openai_api_key: str = None,
*args,
**kwargs,
):
super().__init__()
self.system_prompt = system_prompt
self.temperature = temperature
self.max_tokens = max_tokens
self.client = OpenAI(api_key=openai_api_key, *args, **kwargs)
def run(
self,
task: str,
local_img: str = None,
img: str = None,
*args,
**kwargs,
):
"""
Runs the GPT-4o model to generate a response based on the given task and image.
Args:
task (str): The task or user prompt for the conversation.
local_img (str): The local path to the image file.
img (str): The URL of the image.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The generated response from the GPT-4o model.
"""
img = encode_image(local_img)
response = self.client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": task},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img}"
},
},
],
}
],
max_tokens=self.max_tokens,
temperature=self.temperature,
)
return response.choices[0].message.content

@ -261,6 +261,11 @@ class Agent(BaseStructure):
log_directory: str = None,
project_path: str = None,
tool_system_prompt: str = tool_sop_prompt(),
max_tokens: int = 4096,
top_p: float = 0.9,
top_k: int = None,
frequency_penalty: float = 0.0,
presence_penalty: float = 0.0,
*args,
**kwargs,
):
@ -346,6 +351,11 @@ class Agent(BaseStructure):
self.log_directory = log_directory
self.project_path = project_path
self.tool_system_prompt = tool_system_prompt
self.max_tokens = max_tokens
self.top_p = top_p
self.top_k = top_k
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
# Name
self.name = agent_name
@ -473,6 +483,20 @@ class Agent(BaseStructure):
self.feedback.append(feedback)
logging.info(f"Feedback received: {feedback}")
def initialize_llm(self, llm: Any) -> None:
return llm(
system_prompt=self.system_prompt,
max_tokens=self.max_tokens,
context_length=self.context_length,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty,
stop=self.stopping_token,
engine=self.engine,
)
def agent_initialization(self):
try:
print(
@ -692,6 +716,7 @@ class Agent(BaseStructure):
# Clear the short memory
response = None
all_responses = []
if self.tokenizer is not None:
self.check_available_tokens()
@ -728,9 +753,11 @@ class Agent(BaseStructure):
)
print(response)
# Add to memory
self.short_memory.add(
role=self.agent_name, content=response
)
all_responses.append(response)
else:
@ -853,10 +880,12 @@ class Agent(BaseStructure):
if self.agent_ops_on is True:
self.check_end_session_agentops()
final_response = " ".join(all_responses)
if self.return_history:
return self.short_memory.return_history_as_string()
else:
return response
return final_response
except Exception as error:
logger.info(
@ -1578,47 +1607,52 @@ class Agent(BaseStructure):
)
def convert_tool_into_openai_schema(self):
# Transform the tools into an openai schema
logger.info("Converting tools into OpenAI function calling schema")
try:
if callable(self.tools):
for tool in self.tools:
# Transform the tool into a openai function calling schema
name = tool.__name__
description = tool.__doc__
logger.info(
f"Converting tool: {name} into a OpenAI certified function calling schema. Add documentation and type hints."
)
tool_schema_list = get_openai_function_schema_from_func(
tool, name=name, description=description
)
try:
logger.info(
f"Converting tool: {name} into a OpenAI certified function calling schema. Add documentation and type hints."
)
tool_schema_list = (
get_openai_function_schema_from_func(
tool, name=name, description=description
)
)
# Transform the dictionary to a string
tool_schema_list = json.dumps(tool_schema_list, indent=4)
# Transform the dictionary to a string
tool_schema_list = json.dumps(
tool_schema_list, indent=4
)
# Add the tool schema to the short memory
self.short_memory.add(
role="System", content=tool_schema_list
)
# Add the tool schema to the short memory
self.short_memory.add(
role="System", content=tool_schema_list
)
logger.info(
f"Conversion process successful, the tool {name} has been integrated with the agent successfully."
)
logger.info(
f"Conversion process successful, the tool {name} has been integrated with the agent successfully."
)
except Exception as error:
logger.info(
f"There was an error converting your tool into a OpenAI certified function calling schema. Add documentation and type hints: {error}"
)
raise error
except Exception as error:
logger.info(
f"Error detected: {error} make sure you have inputted a callable and that it has documentation as docstrings"
)
raise error
else:
for tool in self.tools:
# Parse the json for the name of the function
name = tool["name"]
description = tool["description"]
# Transform the dict into a string
tool_schema_list = json.dumps(tool, indent=4)
# Add the tool schema to the short memory
self.short_memory.add(
role="System", content=tool_schema_list
)
logger.info(
f"Conversion process successful, the tool {name} has been integrated with the agent successfully."
)
return None
def memory_query(self, task: str = None, *args, **kwargs):
try:

@ -1,146 +0,0 @@
A high-level pseudocode for creating the classes and functions for your desired system:
1. **Swarms**
- The main class. It initializes the swarm with a specified number of worker nodes and sets up self-scaling if required.
- Methods include `add_worker`, `remove_worker`, `execute`, and `scale`.
2. **WorkerNode**
- Class for each worker node in the swarm. It has a `task_queue` and a `completed_tasks` queue.
- Methods include `receive_task`, `complete_task`, and `communicate`.
3. **HierarchicalSwarms**
- Inherits from Swarms and overrides the `execute` method to execute tasks in a hierarchical manner.
4. **CollaborativeSwarms**
- Inherits from Swarms and overrides the `execute` method to execute tasks in a collaborative manner.
5. **CompetitiveSwarms**
- Inherits from Swarms and overrides the `execute` method to execute tasks in a competitive manner.
6. **MultiAgentDebate**
- Inherits from Swarms and overrides the `execute` method to execute tasks in a debating manner.
To implement this in Python, you would start by setting up the base `Swarm` class and `WorkerNode` class. Here's a simplified Python example:
```python
class WorkerNode:
def __init__(self, llm: BaseLLM):
self.llm = llm
self.task_queue = deque()
self.completed_tasks = deque()
def receive_task(self, task):
self.task_queue.append(task)
def complete_task(self):
task = self.task_queue.popleft()
result = self.llm.execute(task)
self.completed_tasks.append(result)
return result
def communicate(self, other_node):
# Placeholder for communication method
pass
class Swarms:
def __init__(self, num_nodes: int, llm: BaseLLM, self_scaling: bool):
self.nodes = [WorkerNode(llm) for _ in range(num_nodes)]
self.self_scaling = self_scaling
def add_worker(self, llm: BaseLLM):
self.nodes.append(WorkerNode(llm))
def remove_worker(self, index: int):
self.nodes.pop(index)
def execute(self, task):
# Placeholder for main execution logic
pass
def scale(self):
# Placeholder for self-scaling logic
pass
```
Then, you would build out the specialized classes for each type of swarm:
```python
class HierarchicalSwarms(Swarms):
def execute(self, task):
# Implement hierarchical task execution
pass
class CollaborativeSwarms(Swarms):
def execute(self, task):
# Implement collaborative task execution
pass
class CompetitiveSwarms(Swarms):
def execute(self, task):
# Implement competitive task execution
pass
class MultiAgentDebate(Swarms):
def execute(self, task):
# Implement debate-style task execution
pass
```
# WorkerNode class
Here's the pseudocode algorithm for a `WorkerNode` class that includes a vector embedding database for communication:
1. **WorkerNode**
- Initialize a worker node with an LLM and a connection to the vector embedding database.
- The worker node maintains a `task_queue` and `completed_tasks` queue. It also keeps track of the status of tasks (e.g., "pending", "completed").
- The `receive_task` method accepts a task and adds it to the `task_queue`.
- The `complete_task` method takes the oldest task from the `task_queue`, executes it, and then stores the result in the `completed_tasks` queue. It also updates the task status in the vector embedding database to "completed".
- The `communicate` method uses the vector embedding database to share information with other nodes. It inserts the task result into the vector database and also queries for tasks marked as "completed".
In Python, this could look something like:
```python
from collections import deque
from typing import Any, Dict
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from swarms.workers.auto_agent import AutoGPT
class WorkerNode:
def __init__(self, llm: AutoGPT, vectorstore: FAISS):
self.llm = llm
self.vectorstore = vectorstore
self.task_queue = deque()
self.completed_tasks = deque()
self.task_status: Dict[Any, str] = {}
def receive_task(self, task):
self.task_queue.append(task)
self.task_status[task] = "pending"
def complete_task(self):
task = self.task_queue.popleft()
result = self.llm.run(task)
self.completed_tasks.append(result)
self.task_status[task] = "completed"
# Insert task result into the vectorstore
self.vectorstore.insert(task, result)
return result
def communicate(self):
# Share task results and status through vectorstore
completed_tasks = [
(task, self.task_status[task])
for task in self.task_queue
if self.task_status[task] == "completed"
]
for task, status in completed_tasks:
self.vectorstore.insert(task, status)
```
This example assumes that tasks are hashable and can be used as dictionary keys. The `vectorstore.insert` method is used to share task results and status with other nodes, and you can use methods like `vectorstore.query` or `vectorstore.regex_search` to retrieve this information. Please remember this is a simplified implementation and might need changes according to your exact requirements.

@ -1,4 +1,5 @@
import sys
from swarms.utils.try_except_wrapper import try_except_wrapper
try:
import pypdf
@ -10,7 +11,8 @@ except ImportError:
sys.exit(1)
def pdf_to_text(pdf_path):
@try_except_wrapper
def pdf_to_text(pdf_path: str) -> str:
"""
Converts a PDF file to a string of text.

@ -1,24 +1,33 @@
import datetime
import os
import platform
import sys
import traceback
from loguru import logger
# Configuring loguru to log to both the console and a file
logger.remove() # Remove default logger configuration
logger.add(
sys.stderr,
level="ERROR",
format="<red>{time}</red> - <level>{level}</level> - <level>{message}</level>",
)
logger.add(
"error.log", level="ERROR", format="{time} - {level} - {message}"
)
# Remove default logger configuration
logger.remove()
# Define the path for the log folder
log_folder = os.path.join(os.getcwd(), "errors")
def report_error(error: Exception) -> None:
try:
# Create the log folder if it doesn't exist
os.makedirs(log_folder, exist_ok=True)
except PermissionError:
logger.error(f"Permission denied: '{log_folder}'")
except Exception as e:
logger.error(f"An error occurred while creating the log folder: {e}")
else:
# If the folder was created successfully, add a new logger
logger.add(
os.path.join(log_folder, "error_{time}.log"),
level="ERROR",
format="<red>{time}</red> - <level>{level}</level> - <level>{message}</level>",
)
def report_error(error: Exception):
"""
Logs an error message and provides instructions for reporting the issue on Swarms GitHub
or joining the community on Discord for real-time support.
@ -48,13 +57,14 @@ def report_error(error: Exception) -> None:
error_message = (
f"\n"
f"------------------Error: {error}-----------------------\n"
f"#########################################\n"
f"# #\n"
f"# ERROR DETECTED! #\n"
f"# #\n"
f"# #\n"
f"# #\n"
f"# {error} #\n"
f"# #\n"
f"#########################################\n"
f"\n"
f"Error Message: {context_info['exception_message']} ({context_info['exception_type']})\n"
@ -85,9 +95,7 @@ def report_error(error: Exception) -> None:
f"-----------------------------------------\n"
)
logger.error(error_message)
return None
return logger.error(error_message)
# # Example usage:

@ -1,7 +1,84 @@
from functools import wraps
from time import time
from typing import Any, Callable
from swarms.utils.loguru_logger import logger
from swarms.utils.report_error_loguru import report_error
def try_except_wrapper(func, verbose: bool = False):
def retry(
max_retries: int = 3,
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
A decorator that retries a function a specified number of times if an exception occurs.
Args:
max_retries (int): The maximum number of retries. Default is 3.
Returns:
Callable[[Callable[..., Any]], Callable[..., Any]]: The decorator function.
"""
def decorator_retry(func: Callable[..., Any]) -> Callable[..., Any]:
@wraps(func)
def wrapper_retry(*args, **kwargs) -> Any:
"""
The wrapper function that retries the decorated function.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Any: The result of the decorated function.
"""
for _ in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as e:
logger.error(f"Error: {e}, retrying...")
return func(*args, **kwargs)
return wrapper_retry
return decorator_retry
def log_execution_time(func: Callable[..., Any]) -> Callable[..., Any]:
"""
A decorator that logs the execution time of a function.
Args:
func (Callable[..., Any]): The function to be decorated.
Returns:
Callable[..., Any]: The decorated function.
"""
@wraps(func)
def wrapper(*args, **kwargs) -> Any:
"""
The wrapper function that logs the execution time and calls the decorated function.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Any: The result of the decorated function.
"""
start = time()
result = func(*args, **kwargs)
end = time()
logger.info(
f"Execution time for {func.__name__}: {end - start} seconds"
)
return result
return wrapper
def try_except_wrapper(verbose: bool = False):
"""
A decorator that wraps a function with a try-except block.
It catches any exception that occurs during the execution of the function,
@ -23,23 +100,40 @@ def try_except_wrapper(func, verbose: bool = False):
Exiting function: divide
"""
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as error:
if verbose:
logger.error(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
else:
print(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
return None
finally:
print(f"Exiting function: {func.__name__}")
def decorator(func: Callable[..., Any]):
@wraps(func)
@retry()
@log_execution_time
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as error:
if verbose:
report_error(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
else:
report_error(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
return None
finally:
print(f"Exiting function: {func.__name__}")
return wrapper
return wrapper
return decorator
# @try_except_wrapper(verbose=True)
# def divide(a, b):
# """Multiply two numbers."""
# return a / b
# # This will work fine
# result = divide(2, 0)
# print(result) # Output: 6

Loading…
Cancel
Save