parent
56857ae74d
commit
995bb7d317
@ -0,0 +1,110 @@
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, List, Optional, Any
|
||||
|
||||
from swarms.utils.logger import logger
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.base_workflow import BaseWorkflow
|
||||
from swarms import OpenAIChat
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConcurrentWorkflow(BaseWorkflow):
|
||||
"""
|
||||
ConcurrentWorkflow class for running a set of tasks concurrently using N number of autonomous agents.
|
||||
|
||||
Args:
|
||||
max_workers (int): The maximum number of workers to use for the threading.Thread.
|
||||
autosave (bool): Whether to save the state of the workflow to a file. Default is False.
|
||||
saved_state_filepath (str): The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
|
||||
print_results (bool): Whether to print the results of each task. Default is False.
|
||||
return_results (bool): Whether to return the results of each task. Default is False.
|
||||
use_processes (bool): Whether to use processes instead of threads. Default is False.
|
||||
|
||||
Examples:
|
||||
>>> from swarms.models import OpenAIChat
|
||||
>>> from swarms.structs import ConcurrentWorkflow
|
||||
>>> llm = OpenAIChat(openai_api_key="")
|
||||
>>> workflow = ConcurrentWorkflow(max_workers=5, agents=[llm])
|
||||
>>> workflow.run()
|
||||
"""
|
||||
|
||||
max_loops: int = 1
|
||||
max_workers: int = 5
|
||||
autosave: bool = False
|
||||
agents: List[Agent] = field(default_factory=list)
|
||||
saved_state_filepath: Optional[str] = "runs/concurrent_workflow.json"
|
||||
print_results: bool = True # Modified: Set print_results to True
|
||||
return_results: bool = False
|
||||
stopping_condition: Optional[Callable] = None
|
||||
|
||||
def run(self, task: Optional[str] = None, *args, **kwargs) -> Optional[List[Any]]:
|
||||
"""
|
||||
Executes the tasks in parallel using multiple threads.
|
||||
|
||||
Args:
|
||||
task (Optional[str]): A task description if applicable.
|
||||
*args: Additional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
Optional[List[Any]]: A list of the results of each task, if return_results is True. Otherwise, returns None.
|
||||
"""
|
||||
loop = 0
|
||||
results = []
|
||||
|
||||
while loop < self.max_loops:
|
||||
if not self.agents:
|
||||
logger.warning("No agents found in the workflow.")
|
||||
break
|
||||
|
||||
threads = [threading.Thread(target=self.execute_agent, args=(agent, task)) for agent in self.agents]
|
||||
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
if self.return_results:
|
||||
results.extend([thread.result for thread in threads if hasattr(thread, 'result')])
|
||||
|
||||
loop += 1
|
||||
|
||||
if self.stopping_condition and self.stopping_condition(results):
|
||||
break
|
||||
|
||||
return results if self.return_results else None
|
||||
|
||||
def list_agents(self):
|
||||
"""Prints a list of the agents in the workflow."""
|
||||
for agent in self.agents:
|
||||
logger.info(agent)
|
||||
|
||||
def save(self):
|
||||
"""Saves the state of the workflow to a file."""
|
||||
self.save_state(self.saved_state_filepath)
|
||||
|
||||
def execute_agent(self, agent: Agent, task: Optional[str] = None, *args, **kwargs):
|
||||
try:
|
||||
result = agent.run(task, *args, **kwargs)
|
||||
if self.print_results:
|
||||
logger.info(f"Agent {agent}: {result}")
|
||||
if self.return_results:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Agent {agent} generated an exception: {e}")
|
||||
|
||||
|
||||
|
||||
api_key = os.environ["OPENAI_API_KEY"]
|
||||
|
||||
# Model
|
||||
swarm = ConcurrentWorkflow(
|
||||
agents = [Agent(llm=OpenAIChat(openai_api_key=api_key, max_tokens=4000,), max_loops=4, dashboard=False)],
|
||||
)
|
||||
|
||||
|
||||
# Run the workflow
|
||||
swarm.run("Generate a report on the top 3 biggest expenses for small businesses and how businesses can save 20%")
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,101 @@
|
||||
from swarms import Agent, OpenAIChat, MixtureOfAgents
|
||||
from swarms import Anthropic
|
||||
|
||||
GEO_EXPERT_SYSTEM_PROMPT = """
|
||||
|
||||
You are GeoExpert AI, a sophisticated agent specialized in the fields of geo-economic fragmentation and foreign direct investment (FDI).
|
||||
|
||||
|
||||
|
||||
Your goals are:
|
||||
1. To provide clear, detailed, and accurate analyses of geo-economic documents and reports.
|
||||
2. To answer questions related to geo-economic fragmentation and FDI with expert-level insight.
|
||||
3. To offer strategic recommendations based on current geopolitical and economic trends.
|
||||
4. To identify and explain the implications of specific geo-economic events on global and regional investment landscapes.
|
||||
|
||||
You will achieve these goals by:
|
||||
1. Leveraging your extensive knowledge in geo-economic theory and practical applications.
|
||||
2. Utilizing advanced data analysis techniques to interpret complex economic data and trends.
|
||||
3. Staying updated with the latest developments in international trade, political economy, and investment flows.
|
||||
4. Communicating your findings and recommendations in a clear, concise, and professional manner.
|
||||
|
||||
Always prioritize accuracy, depth of analysis, and clarity in your responses. Use technical terms appropriately and provide context or explanations for complex concepts to ensure understanding. Cite relevant data, reports, and examples where necessary to support your analyses.
|
||||
|
||||
---
|
||||
"""
|
||||
|
||||
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Geo Expert AI",
|
||||
system_prompt=GEO_EXPERT_SYSTEM_PROMPT,
|
||||
# agent_description="Generate a profit report for a company!",
|
||||
llm=OpenAIChat(max_tokens=4000),
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
saved_state_path="accounting_agent.json",
|
||||
# tools=[calculate_profit, generate_report],
|
||||
docs_folder="heinz_docs",
|
||||
# pdf_path="docs/accounting_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
# user_name="User",
|
||||
# # docs=
|
||||
# # docs_folder="docs",
|
||||
# retry_attempts=3,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=100000,
|
||||
# interactive=True,
|
||||
# long_term_memory=ChromaDB(docs_folder="heinz_docs", output_dir="geoexpert_output"),
|
||||
)
|
||||
|
||||
|
||||
|
||||
# Initialize the agent
|
||||
forecaster_agent = Agent(
|
||||
agent_name="Forecaster Agent",
|
||||
system_prompt="You're the forecaster agent, your purpose is to predict the future of a company! Give numbers and numbers, don't summarize we need numbers",
|
||||
# agent_description="Generate a profit report for a company!",
|
||||
llm=Anthropic(max_tokens=4000, anthropic_api_key="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"),
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
saved_state_path="forecaster_agent.json",
|
||||
# tools=[calculate_profit, generate_report],
|
||||
docs_folder="heinz_docs",
|
||||
# pdf_path="docs/accounting_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
# user_name="User",
|
||||
# # docs=
|
||||
# # docs_folder="docs",
|
||||
# retry_attempts=3,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=100000,
|
||||
# interactive=True,
|
||||
# long_term_memory=ChromaDB(docs_folder="heinz_docs", output_dir="geoexpert_output"),
|
||||
)
|
||||
|
||||
|
||||
# Initialize the swarm
|
||||
swarm = MixtureOfAgents(
|
||||
agents = [agent, forecaster_agent],
|
||||
final_agent = forecaster_agent,
|
||||
layers = 1,
|
||||
)
|
||||
|
||||
# Run the swarm
|
||||
out = swarm.run("what is the economic impact of China from technology decoupling, and how is that impact measured? What is the forecast or economic, give some numbers")
|
||||
print(out)
|
@ -0,0 +1,84 @@
|
||||
envs:
|
||||
# MODEL_NAME: meta-llama/Meta-Llama-3-70B-Instruct
|
||||
MODEL_NAME: meta-llama/Meta-Llama-3-8B
|
||||
HF_TOKEN: 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 # Change to your own huggingface token, or use --env to pass.
|
||||
HF_HUB_ENABLE_HF_TRANSFER: True
|
||||
|
||||
# Service configuration
|
||||
service:
|
||||
readiness_probe:
|
||||
path: /v1/chat/completions # Path for the readiness probe
|
||||
post_data:
|
||||
model: $MODEL_NAME # Specify the model name
|
||||
messages:
|
||||
- role: user
|
||||
content: Hello! What is your name? # Specify the initial message
|
||||
max_tokens: 1 # Maximum number of tokens
|
||||
readiness_probe: /v1/health # Additional readiness probe
|
||||
|
||||
# Replica Policy
|
||||
replica_policy:
|
||||
min_replicas: 1 # Minimum number of replicas
|
||||
max_replicas: 10 # Maximum number of replicas
|
||||
target_qps_per_replica: 2.5 # Target queries per second per replica
|
||||
upscale_delay_seconds: 200 # Delay before upscaling replicas
|
||||
downscale_delay_seconds: 1200 # Delay before downscaling replicas
|
||||
|
||||
resources:
|
||||
# accelerators: {L4:8, A10g:8, A10:8, A100:4, A100:8, A100-80GB:2, A100-80GB:4, A100-80GB:8}
|
||||
accelerators: {A10g, A10, L40, A40} # We can use cheaper accelerators for 8B model.
|
||||
# cpus: 32+
|
||||
use_spot: True
|
||||
disk_size: 100 # Ensure model checkpoints can fit.
|
||||
# disk_tier: best
|
||||
ports: 8081 # Expose to internet traffic.
|
||||
|
||||
setup: |
|
||||
#Install vllm
|
||||
conda activate vllm
|
||||
if [ $? -ne 0 ]; then
|
||||
conda create -n vllm python=3.10 -y
|
||||
conda activate vllm
|
||||
fi
|
||||
|
||||
pip install vllm==0.4.0.post1
|
||||
|
||||
# Install Gradio for web UI.
|
||||
pip install gradio openai
|
||||
pip install flash-attn==2.5.7
|
||||
pip install hf_transfer
|
||||
|
||||
run: |
|
||||
# Serve VLM
|
||||
|
||||
conda activate vllm
|
||||
echo 'Starting vllm api server...'
|
||||
# https://github.com/vllm-project/vllm/issues/3098
|
||||
export PATH=$PATH:/sbin
|
||||
|
||||
# NOTE: --gpu-memory-utilization 0.95 needed for 4-GPU nodes.
|
||||
python3 -u -m vllm.entrypoints.openai.api_server \
|
||||
--port 8090 \
|
||||
--model meta-llama/Meta-Llama-3-8B \
|
||||
--trust-remote-code --tensor-parallel-size 4 \
|
||||
--gpu-memory-utilization 0.95 \
|
||||
--max-num-seqs 64 \
|
||||
|
||||
# Serve Gradio
|
||||
|
||||
# echo 'Starting gradio server...'
|
||||
# git clone https://github.com/vllm-project/vllm.git || true
|
||||
# python vllm/examples/gradio_openai_chatbot_webserver.py \
|
||||
# -m $MODEL_NAME \
|
||||
# --port 8811 \
|
||||
# --model-url http://localhost:8081/v1 \
|
||||
# --stop-token-ids 128009,128001
|
||||
# --share
|
||||
|
||||
echo 'Starting gradio server...'
|
||||
git clone https://github.com/vllm-project/vllm.git || true
|
||||
python3 vllm/examples/gradio_openai_chatbot_webserver.py \
|
||||
-m meta-llama/Meta-Llama-3-8B\
|
||||
--port 8811 \
|
||||
--model-url http://localhost:8081/v1 \
|
||||
--stop-token-ids 128009,128001
|
@ -1,6 +1,6 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.structs import Agent, OpenAIChat, Task
|
||||
from swarms import Agent, OpenAIChat, Task
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
Loading…
Reference in new issue