parent
471996cf1e
commit
da7ba6477f
@ -0,0 +1,49 @@
|
||||
from swarms import Agent, HuggingfaceLLM
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
model = HuggingfaceLLM(
|
||||
model_id="meta-llama/Meta-Llama-3.1-8B",
|
||||
max_tokens=4000,
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
# tools=[Add your functions here# ],
|
||||
# stopping_token="Stop!",
|
||||
# interactive=True,
|
||||
# docs_folder="docs", # Enter your folder name
|
||||
# pdf_path="docs/finance_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
user_name="swarms_corp",
|
||||
# # docs=
|
||||
# # docs_folder="docs",
|
||||
retry_attempts=3,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=200000,
|
||||
# tool_schema=
|
||||
# tools
|
||||
# agent_ops_on=True,
|
||||
# long_term_memory=ChromaDB(docs_folder="artifacts"),
|
||||
)
|
||||
|
||||
|
||||
agent.run(
|
||||
"What are the components of a startups stock incentive equity plan"
|
||||
)
|
@ -1,106 +0,0 @@
|
||||
import os
|
||||
import base64
|
||||
from dotenv import load_dotenv
|
||||
from openai import OpenAI
|
||||
|
||||
from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||
|
||||
# Load the OpenAI API key from the .env file
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the OpenAI API key
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
|
||||
# Function to encode the image
|
||||
def encode_image(image_path):
|
||||
with open(image_path, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
|
||||
class GPT4o(BaseMultiModalModel):
|
||||
"""
|
||||
GPT4o is a class that represents a multi-modal conversational model based on GPT-4.
|
||||
It extends the BaseMultiModalModel class.
|
||||
|
||||
Args:
|
||||
system_prompt (str): The system prompt to be used in the conversation.
|
||||
temperature (float): The temperature parameter for generating diverse responses.
|
||||
max_tokens (int): The maximum number of tokens in the generated response.
|
||||
openai_api_key (str): The API key for accessing the OpenAI GPT-4 API.
|
||||
*args: Additional positional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Attributes:
|
||||
system_prompt (str): The system prompt to be used in the conversation.
|
||||
temperature (float): The temperature parameter for generating diverse responses.
|
||||
max_tokens (int): The maximum number of tokens in the generated response.
|
||||
client (OpenAI): The OpenAI client for making API requests.
|
||||
|
||||
Methods:
|
||||
run(task, local_img=None, img=None, *args, **kwargs):
|
||||
Runs the GPT-4o model to generate a response based on the given task and image.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
system_prompt: str = None,
|
||||
temperature: float = 0.1,
|
||||
max_tokens: int = 300,
|
||||
openai_api_key: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.system_prompt = system_prompt
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
|
||||
self.client = OpenAI(api_key=openai_api_key, *args, **kwargs)
|
||||
|
||||
def run(
|
||||
self,
|
||||
task: str,
|
||||
local_img: str = None,
|
||||
img: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Runs the GPT-4o model to generate a response based on the given task and image.
|
||||
|
||||
Args:
|
||||
task (str): The task or user prompt for the conversation.
|
||||
local_img (str): The local path to the image file.
|
||||
img (str): The URL of the image.
|
||||
*args: Additional positional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
str: The generated response from the GPT-4o model.
|
||||
|
||||
"""
|
||||
img = encode_image(local_img)
|
||||
|
||||
response = self.client.chat.completions.create(
|
||||
model="gpt-4o",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": task},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{img}"
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
max_tokens=self.max_tokens,
|
||||
temperature=self.temperature,
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
@ -1,146 +0,0 @@
|
||||
A high-level pseudocode for creating the classes and functions for your desired system:
|
||||
|
||||
1. **Swarms**
|
||||
- The main class. It initializes the swarm with a specified number of worker nodes and sets up self-scaling if required.
|
||||
- Methods include `add_worker`, `remove_worker`, `execute`, and `scale`.
|
||||
2. **WorkerNode**
|
||||
- Class for each worker node in the swarm. It has a `task_queue` and a `completed_tasks` queue.
|
||||
- Methods include `receive_task`, `complete_task`, and `communicate`.
|
||||
3. **HierarchicalSwarms**
|
||||
- Inherits from Swarms and overrides the `execute` method to execute tasks in a hierarchical manner.
|
||||
4. **CollaborativeSwarms**
|
||||
- Inherits from Swarms and overrides the `execute` method to execute tasks in a collaborative manner.
|
||||
5. **CompetitiveSwarms**
|
||||
- Inherits from Swarms and overrides the `execute` method to execute tasks in a competitive manner.
|
||||
6. **MultiAgentDebate**
|
||||
- Inherits from Swarms and overrides the `execute` method to execute tasks in a debating manner.
|
||||
|
||||
To implement this in Python, you would start by setting up the base `Swarm` class and `WorkerNode` class. Here's a simplified Python example:
|
||||
|
||||
```python
|
||||
class WorkerNode:
|
||||
def __init__(self, llm: BaseLLM):
|
||||
self.llm = llm
|
||||
self.task_queue = deque()
|
||||
self.completed_tasks = deque()
|
||||
|
||||
def receive_task(self, task):
|
||||
self.task_queue.append(task)
|
||||
|
||||
def complete_task(self):
|
||||
task = self.task_queue.popleft()
|
||||
result = self.llm.execute(task)
|
||||
self.completed_tasks.append(result)
|
||||
return result
|
||||
|
||||
def communicate(self, other_node):
|
||||
# Placeholder for communication method
|
||||
pass
|
||||
|
||||
|
||||
class Swarms:
|
||||
def __init__(self, num_nodes: int, llm: BaseLLM, self_scaling: bool):
|
||||
self.nodes = [WorkerNode(llm) for _ in range(num_nodes)]
|
||||
self.self_scaling = self_scaling
|
||||
|
||||
def add_worker(self, llm: BaseLLM):
|
||||
self.nodes.append(WorkerNode(llm))
|
||||
|
||||
def remove_worker(self, index: int):
|
||||
self.nodes.pop(index)
|
||||
|
||||
def execute(self, task):
|
||||
# Placeholder for main execution logic
|
||||
pass
|
||||
|
||||
def scale(self):
|
||||
# Placeholder for self-scaling logic
|
||||
pass
|
||||
```
|
||||
|
||||
Then, you would build out the specialized classes for each type of swarm:
|
||||
|
||||
```python
|
||||
class HierarchicalSwarms(Swarms):
|
||||
def execute(self, task):
|
||||
# Implement hierarchical task execution
|
||||
pass
|
||||
|
||||
|
||||
class CollaborativeSwarms(Swarms):
|
||||
def execute(self, task):
|
||||
# Implement collaborative task execution
|
||||
pass
|
||||
|
||||
|
||||
class CompetitiveSwarms(Swarms):
|
||||
def execute(self, task):
|
||||
# Implement competitive task execution
|
||||
pass
|
||||
|
||||
|
||||
class MultiAgentDebate(Swarms):
|
||||
def execute(self, task):
|
||||
# Implement debate-style task execution
|
||||
pass
|
||||
```
|
||||
|
||||
|
||||
# WorkerNode class
|
||||
|
||||
Here's the pseudocode algorithm for a `WorkerNode` class that includes a vector embedding database for communication:
|
||||
|
||||
1. **WorkerNode**
|
||||
- Initialize a worker node with an LLM and a connection to the vector embedding database.
|
||||
- The worker node maintains a `task_queue` and `completed_tasks` queue. It also keeps track of the status of tasks (e.g., "pending", "completed").
|
||||
- The `receive_task` method accepts a task and adds it to the `task_queue`.
|
||||
- The `complete_task` method takes the oldest task from the `task_queue`, executes it, and then stores the result in the `completed_tasks` queue. It also updates the task status in the vector embedding database to "completed".
|
||||
- The `communicate` method uses the vector embedding database to share information with other nodes. It inserts the task result into the vector database and also queries for tasks marked as "completed".
|
||||
|
||||
In Python, this could look something like:
|
||||
|
||||
```python
|
||||
from collections import deque
|
||||
from typing import Any, Dict
|
||||
|
||||
import faiss
|
||||
from langchain.docstore import InMemoryDocstore
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.vectorstores import FAISS
|
||||
|
||||
from swarms.workers.auto_agent import AutoGPT
|
||||
|
||||
|
||||
class WorkerNode:
|
||||
def __init__(self, llm: AutoGPT, vectorstore: FAISS):
|
||||
self.llm = llm
|
||||
self.vectorstore = vectorstore
|
||||
self.task_queue = deque()
|
||||
self.completed_tasks = deque()
|
||||
self.task_status: Dict[Any, str] = {}
|
||||
|
||||
def receive_task(self, task):
|
||||
self.task_queue.append(task)
|
||||
self.task_status[task] = "pending"
|
||||
|
||||
def complete_task(self):
|
||||
task = self.task_queue.popleft()
|
||||
result = self.llm.run(task)
|
||||
self.completed_tasks.append(result)
|
||||
self.task_status[task] = "completed"
|
||||
# Insert task result into the vectorstore
|
||||
self.vectorstore.insert(task, result)
|
||||
return result
|
||||
|
||||
def communicate(self):
|
||||
# Share task results and status through vectorstore
|
||||
completed_tasks = [
|
||||
(task, self.task_status[task])
|
||||
for task in self.task_queue
|
||||
if self.task_status[task] == "completed"
|
||||
]
|
||||
for task, status in completed_tasks:
|
||||
self.vectorstore.insert(task, status)
|
||||
```
|
||||
|
||||
This example assumes that tasks are hashable and can be used as dictionary keys. The `vectorstore.insert` method is used to share task results and status with other nodes, and you can use methods like `vectorstore.query` or `vectorstore.regex_search` to retrieve this information. Please remember this is a simplified implementation and might need changes according to your exact requirements.
|
Loading…
Reference in new issue